diff options
185 files changed, 2873 insertions, 1955 deletions
diff --git a/.gitignore b/.gitignore index c40a3c4800b..a2265d5d5b7 100644 --- a/.gitignore +++ b/.gitignore @@ -126,6 +126,7 @@ scripts/mytop scripts/wsrep_sst_common scripts/wsrep_sst_mysqldump scripts/wsrep_sst_rsync +scripts/wsrep_sst_rsync_wan scripts/wsrep_sst_mariabackup scripts/wsrep_sst_xtrabackup scripts/wsrep_sst_xtrabackup-v2 @@ -494,3 +495,9 @@ UpgradeLog*.htm # Microsoft Fakes FakesAssemblies/ + +# macOS garbage +.DS_Store + +# QtCreator && CodeBlocks +*.cbp diff --git a/client/mysqldump.c b/client/mysqldump.c index 9a89c805955..f64acb044f5 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -5008,6 +5008,14 @@ static int dump_selected_tables(char *db, char **table_names, int tables) if (opt_xml) print_xml_tag(md_result_file, "", "\n", "database", "name=", db, NullS); + + /* obtain dump of routines (procs/functions) */ + if (opt_routines && mysql_get_server_version(mysql) >= 50009) + { + DBUG_PRINT("info", ("Dumping routines for database %s", db)); + dump_routines_for_db(db); + } + if (opt_single_transaction && mysql_get_server_version(mysql) >= 50500) { verbose_msg("-- Setting savepoint...\n"); @@ -5017,7 +5025,6 @@ static int dump_selected_tables(char *db, char **table_names, int tables) DBUG_RETURN(1); } } - /* Dump each selected table */ for (pos= dump_tables; pos < end; pos++) { @@ -5079,12 +5086,6 @@ static int dump_selected_tables(char *db, char **table_names, int tables) DBUG_PRINT("info", ("Dumping events for database %s", db)); dump_events_for_db(db); } - /* obtain dump of routines (procs/functions) */ - if (opt_routines && mysql_get_server_version(mysql) >= 50009) - { - DBUG_PRINT("info", ("Dumping routines for database %s", db)); - dump_routines_for_db(db); - } free_root(&glob_root, MYF(0)); if (opt_xml) { diff --git a/client/mysqlslap.c b/client/mysqlslap.c index 05bc9430541..20215b17bdc 100644 --- a/client/mysqlslap.c +++ b/client/mysqlslap.c @@ -853,7 +853,7 @@ build_table_string(void) if (auto_generate_sql_guid_primary) { - dynstr_append(&table_string, "id varchar(32) primary key"); + dynstr_append(&table_string, "id varchar(36) primary key"); if (num_int_cols || num_char_cols || auto_generate_sql_guid_primary) dynstr_append(&table_string, ","); @@ -868,7 +868,7 @@ build_table_string(void) if (count) /* Except for the first pass we add a comma */ dynstr_append(&table_string, ","); - if (snprintf(buf, HUGE_STRING_LENGTH, "id%d varchar(32) unique key", count) + if (snprintf(buf, HUGE_STRING_LENGTH, "id%d varchar(36) unique key", count) > HUGE_STRING_LENGTH) { fprintf(stderr, "Memory Allocation error in create table\n"); diff --git a/client/mysqltest.cc b/client/mysqltest.cc index b962813f558..aadb46da0c1 100644 --- a/client/mysqltest.cc +++ b/client/mysqltest.cc @@ -8747,6 +8747,7 @@ void init_re(void) "[[:space:]]*SELECT[[:space:]]|" "[[:space:]]*CREATE[[:space:]]+TABLE[[:space:]]|" "[[:space:]]*DO[[:space:]]|" + "[[:space:]]*HANDLER[[:space:]]+.*[[:space:]]+READ[[:space:]]|" "[[:space:]]*SET[[:space:]]+OPTION[[:space:]]|" "[[:space:]]*DELETE[[:space:]]+MULTI[[:space:]]|" "[[:space:]]*UPDATE[[:space:]]+MULTI[[:space:]]|" diff --git a/debian/control b/debian/control index 20ccd4c163e..4b5ed47c606 100644 --- a/debian/control +++ b/debian/control @@ -351,7 +351,7 @@ Depends: bsdutils, findutils, galera-3 (>=25.3), gawk, - iproute2, + iproute | iproute2, libdbi-perl, lsb-base (>= 3.0-10), lsof, diff --git a/include/my_global.h b/include/my_global.h index cdd4b503e70..65546a37700 100644 --- a/include/my_global.h +++ b/include/my_global.h @@ -598,6 +598,8 @@ typedef SOCKET_SIZE_TYPE size_socket; #endif #ifndef SOCK_CLOEXEC #define SOCK_CLOEXEC 0 +#else +#define HAVE_SOCK_CLOEXEC #endif /* additional file share flags for win32 */ diff --git a/include/mysql/psi/mysql_socket.h b/include/mysql/psi/mysql_socket.h index 64531e5c35e..6bc14e984fd 100644 --- a/include/mysql/psi/mysql_socket.h +++ b/include/mysql/psi/mysql_socket.h @@ -561,6 +561,12 @@ inline_mysql_socket_socket (key, (const my_socket*)&mysql_socket.fd, NULL, 0); } #endif + + /* SOCK_CLOEXEC isn't always a number - can't preprocessor compare */ +#if defined(HAVE_FCNTL) && defined(FD_CLOEXEC) && !defined(HAVE_SOCK_CLOEXEC) + (void) fcntl(mysql_socket.fd, F_SETFD, FD_CLOEXEC); +#endif + return mysql_socket; } diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index f505f40bedc..4f4e184937e 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -5698,7 +5698,7 @@ sub lldb_arguments { $input = $input ? "< $input" : ""; # write init file for mysqld or client - mtr_tofile($lldb_init_file, "set args $str $input\n"); + mtr_tofile($lldb_init_file, "process launch --stop-at-entry -- $str $input\n"); print "\nTo start lldb for $type, type in another window:\n"; print "cd $glob_mysql_test_dir && lldb -s $lldb_init_file $$exe\n"; diff --git a/mysql-test/r/alter_table.result b/mysql-test/r/alter_table.result index 2bea055df4c..968e8d1f599 100644 --- a/mysql-test/r/alter_table.result +++ b/mysql-test/r/alter_table.result @@ -2237,6 +2237,29 @@ t1 CREATE TABLE `t1` ( ) ENGINE=InnoDB DEFAULT CHARSET=latin1 DROP TABLE t1; # +# MDEV-14668 ADD PRIMARY KEY IF NOT EXISTS on composite key +# +CREATE TABLE t1 ( +`ID` BIGINT(20) NOT NULL, +`RANK` MEDIUMINT(4) NOT NULL, +`CHECK_POINT` BIGINT(20) NOT NULL, +UNIQUE INDEX `HORIZON_UIDX01` (`ID`, `RANK`) +) ENGINE=InnoDB; +ALTER TABLE t1 ADD PRIMARY KEY IF NOT EXISTS (`ID`, `CHECK_POINT`); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `ID` bigint(20) NOT NULL, + `RANK` mediumint(4) NOT NULL, + `CHECK_POINT` bigint(20) NOT NULL, + PRIMARY KEY (`ID`,`CHECK_POINT`), + UNIQUE KEY `HORIZON_UIDX01` (`ID`,`RANK`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +ALTER TABLE t1 ADD PRIMARY KEY IF NOT EXISTS (`ID`, `CHECK_POINT`); +Warnings: +Note 1061 Multiple primary key defined +DROP TABLE t1; +# # End of 10.0 tests # # diff --git a/mysql-test/r/analyze_stmt_orderby.result b/mysql-test/r/analyze_stmt_orderby.result index 8d904a4419b..4c422608738 100644 --- a/mysql-test/r/analyze_stmt_orderby.result +++ b/mysql-test/r/analyze_stmt_orderby.result @@ -306,7 +306,7 @@ ANALYZE "r_rows": 10, "r_total_time_ms": "REPLACED", "filtered": 100, - "r_filtered": 1, + "r_filtered": 100, "attached_condition": "t0.a is not null" } } diff --git a/mysql-test/r/auto_increment.result b/mysql-test/r/auto_increment.result index 660a93b1b30..bd61d73f08c 100644 --- a/mysql-test/r/auto_increment.result +++ b/mysql-test/r/auto_increment.result @@ -538,6 +538,35 @@ pk 1 drop table t1; # +# End of 5.3 tests +# +# +# MDEV-16534 PPC64: Unexpected error with a negative values into auto-increment columns in HEAP, MyISAM, ARIA +# +CREATE TABLE t1 ( +id TINYINT NOT NULL AUTO_INCREMENT, +name CHAR(30) NOT NULL, +PRIMARY KEY (id) +) ENGINE=MyISAM; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `id` tinyint(4) NOT NULL AUTO_INCREMENT, + `name` char(30) NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +INSERT INTO t1 (name) VALUES ('dog'); +UPDATE t1 SET id=-1 WHERE id=1; +INSERT INTO t1 (name) VALUES ('cat'); +SELECT * FROM t1; +id name +-1 dog +2 cat +DROP TABLE t1; +# +# End of 5.5 tests +# +# # MDEV-15352 AUTO_INCREMENT breaks after updating a column value to a negative number # SET @engine='MyISAM'; @@ -657,3 +686,6 @@ id name -1 dog 2 cat DROP PROCEDURE autoinc_mdev15353_one; +# +# End of 10.2 tests +# diff --git a/mysql-test/r/having.result b/mysql-test/r/having.result index c64d0579962..d22f5563bbe 100644 --- a/mysql-test/r/having.result +++ b/mysql-test/r/having.result @@ -711,6 +711,23 @@ a ct set sql_mode=@save_sql_mode; drop table t1; # +# mdev-16235: impossible HAVING in query without aggregation +# +explain extended +select * from mysql.help_topic where example = 'foo' having description is null; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible HAVING +Warnings: +Note 1003 select `mysql`.`help_topic`.`help_topic_id` AS `help_topic_id`,`mysql`.`help_topic`.`name` AS `name`,`mysql`.`help_topic`.`help_category_id` AS `help_category_id`,`mysql`.`help_topic`.`description` AS `description`,`mysql`.`help_topic`.`example` AS `example`,`mysql`.`help_topic`.`url` AS `url` from `mysql`.`help_topic` where `mysql`.`help_topic`.`example` = 'foo' having 0 +select * from mysql.help_topic where example = 'foo' having description is null; +help_topic_id name help_category_id description example url +# +# End of 5. tests +# +# +# Start of 10.0 tests +# +# # Bug mdev-5160: two-way join with HAVING over the second table # CREATE TABLE t1 (c1 varchar(6)) ENGINE=MyISAM; diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result index 6f411eb8639..5d2d960ad6f 100644 --- a/mysql-test/r/information_schema.result +++ b/mysql-test/r/information_schema.result @@ -1472,7 +1472,7 @@ USE test; End of 5.0 tests. select * from information_schema.engines WHERE ENGINE="MyISAM"; ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -MyISAM DEFAULT MyISAM storage engine NO NO NO +MyISAM DEFAULT Non-transactional engine with good performance and small data footprint NO NO NO grant select on *.* to user3148@localhost; connect con3148,localhost,user3148,,test; connection con3148; diff --git a/mysql-test/r/limit.result b/mysql-test/r/limit.result index 064fa5a18a7..b47644eb40d 100644 --- a/mysql-test/r/limit.result +++ b/mysql-test/r/limit.result @@ -146,3 +146,19 @@ a 16 DROP TABLE t1; End of 5.1 tests +# +# mdev-16235: SELECT over a table with LIMIT 0 +# +EXPLAIN +SELECT * FROM mysql.slow_log WHERE sql_text != 'foo' LIMIT 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Zero limit +SELECT * FROM mysql.slow_log WHERE sql_text != 'foo' LIMIT 0; +start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text thread_id rows_affected +EXPLAIN +SELECT * FROM mysql.help_topic WHERE help_category_id != example LIMIT 0; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Zero limit +SELECT * FROM mysql.help_topic WHERE help_category_id != example LIMIT 0; +help_topic_id name help_category_id description example url +End of 5.5 tests diff --git a/mysql-test/r/lock.result b/mysql-test/r/lock.result index e3fd16ee59a..0805fd45864 100644 --- a/mysql-test/r/lock.result +++ b/mysql-test/r/lock.result @@ -406,7 +406,7 @@ LOCK TABLE t1 WRITE; HANDLER t1 OPEN; ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction HANDLER t1 READ FIRST; -ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction +Got one of the listed errors HANDLER t1 CLOSE; ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction UNLOCK TABLES; diff --git a/mysql-test/r/mysqldump.result b/mysql-test/r/mysqldump.result index 7f6107db5e4..77ea6f9ea2f 100644 --- a/mysql-test/r/mysqldump.result +++ b/mysql-test/r/mysqldump.result @@ -5598,6 +5598,21 @@ DROP DATABASE db1; DROP DATABASE db2; FOUND 1 /Database: mysql/ in bug11505.sql # +# MDEV-15021: Fix the order in which routines are called +# +use test; +CREATE FUNCTION f() RETURNS INT RETURN 1; +CREATE VIEW v1 AS SELECT f(); +# Running mysqldump -uroot test --routines --tables v1 > **vardir**/test.dmp +DROP VIEW v1; +DROP FUNCTION f; +# Running mysql -uroot test < **vardir**/test.dmp +# +# Cleanup after succesful import. +# +DROP VIEW v1; +DROP FUNCTION f; +# # Test for --add-drop-trigger # use test; diff --git a/mysql-test/r/mysqlslap.result b/mysql-test/r/mysqlslap.result index d3c5107dee3..791cb5ac6b3 100644 --- a/mysql-test/r/mysqlslap.result +++ b/mysql-test/r/mysqlslap.result @@ -255,3 +255,6 @@ Benchmark # MDEV-4684 - Enhancement request: --init-command support for mysqlslap # DROP TABLE t1; +# +# Bug MDEV-15789 (Upstream: #80329): MYSQLSLAP OPTIONS --AUTO-GENERATE-SQL-GUID-PRIMARY and --AUTO-GENERATE-SQL-SECONDARY-INDEXES DONT WORK +# diff --git a/mysql-test/r/olap.result b/mysql-test/r/olap.result index b10f175b63e..24140583d13 100644 --- a/mysql-test/r/olap.result +++ b/mysql-test/r/olap.result @@ -766,3 +766,25 @@ b NULL DROP TABLE t1, t2; End of 5.0 tests +# +# Start of 10.1 tests +# +# +# MDEV-16190 Server crashes in Item_null_result::field_type on SELECT with time field, ROLLUP and HAVING +# +CREATE TABLE t1 (t TIME) ENGINE=MyISAM; +INSERT INTO t1 VALUES ('12:12:12'); +SELECT t, COUNT(*) FROM t1 GROUP BY t WITH ROLLUP HAVING t > '00:00:00'; +t COUNT(*) +12:12:12 1 +DROP TABLE t1; +CREATE TABLE t1 (t TIME) ENGINE=MyISAM; +INSERT INTO t1 VALUES ('12:12:12'),('12:12:13'); +SELECT t, COUNT(*) FROM t1 GROUP BY t WITH ROLLUP HAVING t > '00:00:00'; +t COUNT(*) +12:12:12 1 +12:12:13 1 +DROP TABLE t1; +# +# End of 10.1 tests +# diff --git a/mysql-test/r/ps.result b/mysql-test/r/ps.result index ac96dd05a2a..4a1922e23f8 100644 --- a/mysql-test/r/ps.result +++ b/mysql-test/r/ps.result @@ -4365,7 +4365,46 @@ LINE3 3 drop table t1; # End of 5.5 tests # -# Start of 10.2 tests +# End of 10.0 tests +# +# +# MDEV-12060 Crash in EXECUTE IMMEDIATE with an expression returning a GRANT command +# (the 10.1 part) +# +CREATE PROCEDURE p2 () +BEGIN +SET STATEMENT join_cache_level=CAST(CONCAT(_utf8'6',_latin1'') AS INT) FOR PREPARE stmt FROM 'SELECT 1'; +EXECUTE stmt; +DEALLOCATE PREPARE stmt; +END; +/ +CALL p2(); +1 +1 +DROP PROCEDURE p2; +BEGIN NOT ATOMIC +SET STATEMENT join_cache_level=CAST(CONCAT(_utf8'6',_latin1'') AS INT) FOR PREPARE stmt FROM 'SELECT 1'; +EXECUTE stmt; +DEALLOCATE PREPARE stmt; +END; +/ +1 +1 +BEGIN NOT ATOMIC +SET STATEMENT join_cache_level=CAST(CONCAT(_utf8'6',_latin1'') AS INT) FOR PREPARE stmt FROM 'SELECT 1'; +DEALLOCATE PREPARE stmt; +END; +/ +BEGIN NOT ATOMIC +PREPARE stmt FROM 'SELECT 1'; +SET STATEMENT join_cache_level=CAST(CONCAT(_utf8'6',_latin1'') AS INT) FOR EXECUTE stmt; +DEALLOCATE PREPARE stmt; +END; +/ +1 +1 +# +# End of 10.1 tests # # # MDEV-10709 Expressions as parameters to Dynamic SQL diff --git a/mysql-test/r/read_only_innodb.result b/mysql-test/r/read_only_innodb.result index b6e294b633c..abfc5322ed0 100644 --- a/mysql-test/r/read_only_innodb.result +++ b/mysql-test/r/read_only_innodb.result @@ -237,14 +237,6 @@ a a 5 10 DROP TABLE temp1, temp2; -# MDEV-14185 CREATE TEMPORARY TABLE AS SELECT causes error 1290 with read_only and InnoDB. - -CREATE TEMPORARY TABLE temp1 ENGINE=INNODB AS SELECT a FROM t1; -SELECT * FROM temp1; -a -1 -DROP TABLE temp1; - # Disconnect and cleanup disconnect con1; diff --git a/mysql-test/r/selectivity.result b/mysql-test/r/selectivity.result index 64199f983ee..5f3f2bb5064 100644 --- a/mysql-test/r/selectivity.result +++ b/mysql-test/r/selectivity.result @@ -356,13 +356,13 @@ and o_orderkey = l_orderkey group by c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice order by o_totalprice desc, o_orderdate; id select_type table type possible_keys key key_len ref rows filtered Extra -1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 6005 0.00 Using temporary; Using filesort -1 PRIMARY orders eq_ref PRIMARY,i_o_custkey PRIMARY 4 <subquery2>.l_orderkey 1 100.00 Using where +1 PRIMARY orders ALL PRIMARY,i_o_custkey NULL NULL NULL 1500 100.00 Using where; Using temporary; Using filesort +1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 dbt3_s001.orders.o_orderkey 1 100.00 1 PRIMARY customer eq_ref PRIMARY PRIMARY 4 dbt3_s001.orders.o_custkey 1 100.00 -1 PRIMARY lineitem ref PRIMARY,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 <subquery2>.l_orderkey 4 100.00 +1 PRIMARY lineitem ref PRIMARY,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey_quantity 4 dbt3_s001.orders.o_orderkey 4 100.00 Using index 2 MATERIALIZED lineitem index NULL i_l_orderkey_quantity 13 NULL 6005 100.00 Using index Warnings: -Note 1003 select `dbt3_s001`.`customer`.`c_name` AS `c_name`,`dbt3_s001`.`customer`.`c_custkey` AS `c_custkey`,`dbt3_s001`.`orders`.`o_orderkey` AS `o_orderkey`,`dbt3_s001`.`orders`.`o_orderDATE` AS `o_orderdate`,`dbt3_s001`.`orders`.`o_totalprice` AS `o_totalprice`,sum(`dbt3_s001`.`lineitem`.`l_quantity`) AS `sum(l_quantity)` from <materialize> (select `dbt3_s001`.`lineitem`.`l_orderkey` from `dbt3_s001`.`lineitem` group by `dbt3_s001`.`lineitem`.`l_orderkey` having sum(`dbt3_s001`.`lineitem`.`l_quantity`) > 250) join `dbt3_s001`.`customer` join `dbt3_s001`.`orders` join `dbt3_s001`.`lineitem` where `dbt3_s001`.`customer`.`c_custkey` = `dbt3_s001`.`orders`.`o_custkey` and `dbt3_s001`.`orders`.`o_orderkey` = `<subquery2>`.`l_orderkey` and `dbt3_s001`.`lineitem`.`l_orderkey` = `<subquery2>`.`l_orderkey` group by `dbt3_s001`.`customer`.`c_name`,`dbt3_s001`.`customer`.`c_custkey`,`dbt3_s001`.`orders`.`o_orderkey`,`dbt3_s001`.`orders`.`o_orderDATE`,`dbt3_s001`.`orders`.`o_totalprice` order by `dbt3_s001`.`orders`.`o_totalprice` desc,`dbt3_s001`.`orders`.`o_orderDATE` +Note 1003 select `dbt3_s001`.`customer`.`c_name` AS `c_name`,`dbt3_s001`.`customer`.`c_custkey` AS `c_custkey`,`dbt3_s001`.`orders`.`o_orderkey` AS `o_orderkey`,`dbt3_s001`.`orders`.`o_orderDATE` AS `o_orderdate`,`dbt3_s001`.`orders`.`o_totalprice` AS `o_totalprice`,sum(`dbt3_s001`.`lineitem`.`l_quantity`) AS `sum(l_quantity)` from <materialize> (select `dbt3_s001`.`lineitem`.`l_orderkey` from `dbt3_s001`.`lineitem` group by `dbt3_s001`.`lineitem`.`l_orderkey` having sum(`dbt3_s001`.`lineitem`.`l_quantity`) > 250) join `dbt3_s001`.`customer` join `dbt3_s001`.`orders` join `dbt3_s001`.`lineitem` where `dbt3_s001`.`customer`.`c_custkey` = `dbt3_s001`.`orders`.`o_custkey` and `<subquery2>`.`l_orderkey` = `dbt3_s001`.`orders`.`o_orderkey` and `dbt3_s001`.`lineitem`.`l_orderkey` = `dbt3_s001`.`orders`.`o_orderkey` group by `dbt3_s001`.`customer`.`c_name`,`dbt3_s001`.`customer`.`c_custkey`,`dbt3_s001`.`orders`.`o_orderkey`,`dbt3_s001`.`orders`.`o_orderDATE`,`dbt3_s001`.`orders`.`o_totalprice` order by `dbt3_s001`.`orders`.`o_totalprice` desc,`dbt3_s001`.`orders`.`o_orderDATE` select c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice, sum(l_quantity) from customer, orders, lineitem @@ -1530,6 +1530,68 @@ t 10:00:00 11:00:00 DROP TABLE t1; +# +# MDEV-16374: filtered shows 0 for materilization scan for a semi join, which makes optimizer +# always pick materialization scan over materialization lookup +# +create table t0(a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 (a int, b int); +insert into t1 values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10), +(11,11),(12,12),(13,13),(14,14),(15,15); +set @@optimizer_use_condition_selectivity=2; +explain extended select * from t1 where a in (select max(a) from t1 group by b); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 16 100.00 Using where +1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00 +2 MATERIALIZED t1 ALL NULL NULL NULL NULL 16 100.00 Using temporary +Warnings: +Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from <materialize> (select max(`test`.`t1`.`a`) from `test`.`t1` group by `test`.`t1`.`b`) join `test`.`t1` where `<subquery2>`.`max(a)` = `test`.`t1`.`a` +select * from t1 where a in (select max(a) from t1 group by b); +a b +0 0 +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +11 11 +12 12 +13 13 +14 14 +15 15 +set @@optimizer_use_condition_selectivity=1; +explain extended select * from t1 where a in (select max(a) from t1 group by b); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 16 100.00 Using where +1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00 +2 MATERIALIZED t1 ALL NULL NULL NULL NULL 16 100.00 Using temporary +Warnings: +Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from <materialize> (select max(`test`.`t1`.`a`) from `test`.`t1` group by `test`.`t1`.`b`) join `test`.`t1` where `<subquery2>`.`max(a)` = `test`.`t1`.`a` +select * from t1 where a in (select max(a) from t1 group by b); +a b +0 0 +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +11 11 +12 12 +13 13 +14 14 +15 15 +drop table t1,t0; set histogram_size=@save_histogram_size; set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity; set use_stat_tables=@save_use_stat_tables; diff --git a/mysql-test/r/selectivity_innodb.result b/mysql-test/r/selectivity_innodb.result index a05c14c8e71..849217116ea 100644 --- a/mysql-test/r/selectivity_innodb.result +++ b/mysql-test/r/selectivity_innodb.result @@ -359,13 +359,13 @@ and o_orderkey = l_orderkey group by c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice order by o_totalprice desc, o_orderdate; id select_type table type possible_keys key key_len ref rows filtered Extra -1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 6005 0.00 Using temporary; Using filesort -1 PRIMARY orders eq_ref PRIMARY,i_o_custkey PRIMARY 4 <subquery2>.l_orderkey 1 100.00 Using where +1 PRIMARY orders ALL PRIMARY,i_o_custkey NULL NULL NULL 1500 100.00 Using where; Using temporary; Using filesort +1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 dbt3_s001.orders.o_orderkey 1 100.00 1 PRIMARY customer eq_ref PRIMARY PRIMARY 4 dbt3_s001.orders.o_custkey 1 100.00 -1 PRIMARY lineitem ref PRIMARY,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 <subquery2>.l_orderkey 4 100.00 +1 PRIMARY lineitem ref PRIMARY,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey_quantity 4 dbt3_s001.orders.o_orderkey 4 100.00 Using index 2 MATERIALIZED lineitem index NULL PRIMARY 8 NULL 6005 100.00 Warnings: -Note 1003 select `dbt3_s001`.`customer`.`c_name` AS `c_name`,`dbt3_s001`.`customer`.`c_custkey` AS `c_custkey`,`dbt3_s001`.`orders`.`o_orderkey` AS `o_orderkey`,`dbt3_s001`.`orders`.`o_orderDATE` AS `o_orderdate`,`dbt3_s001`.`orders`.`o_totalprice` AS `o_totalprice`,sum(`dbt3_s001`.`lineitem`.`l_quantity`) AS `sum(l_quantity)` from <materialize> (select `dbt3_s001`.`lineitem`.`l_orderkey` from `dbt3_s001`.`lineitem` group by `dbt3_s001`.`lineitem`.`l_orderkey` having sum(`dbt3_s001`.`lineitem`.`l_quantity`) > 250) join `dbt3_s001`.`customer` join `dbt3_s001`.`orders` join `dbt3_s001`.`lineitem` where `dbt3_s001`.`customer`.`c_custkey` = `dbt3_s001`.`orders`.`o_custkey` and `dbt3_s001`.`orders`.`o_orderkey` = `<subquery2>`.`l_orderkey` and `dbt3_s001`.`lineitem`.`l_orderkey` = `<subquery2>`.`l_orderkey` group by `dbt3_s001`.`customer`.`c_name`,`dbt3_s001`.`customer`.`c_custkey`,`dbt3_s001`.`orders`.`o_orderkey`,`dbt3_s001`.`orders`.`o_orderDATE`,`dbt3_s001`.`orders`.`o_totalprice` order by `dbt3_s001`.`orders`.`o_totalprice` desc,`dbt3_s001`.`orders`.`o_orderDATE` +Note 1003 select `dbt3_s001`.`customer`.`c_name` AS `c_name`,`dbt3_s001`.`customer`.`c_custkey` AS `c_custkey`,`dbt3_s001`.`orders`.`o_orderkey` AS `o_orderkey`,`dbt3_s001`.`orders`.`o_orderDATE` AS `o_orderdate`,`dbt3_s001`.`orders`.`o_totalprice` AS `o_totalprice`,sum(`dbt3_s001`.`lineitem`.`l_quantity`) AS `sum(l_quantity)` from <materialize> (select `dbt3_s001`.`lineitem`.`l_orderkey` from `dbt3_s001`.`lineitem` group by `dbt3_s001`.`lineitem`.`l_orderkey` having sum(`dbt3_s001`.`lineitem`.`l_quantity`) > 250) join `dbt3_s001`.`customer` join `dbt3_s001`.`orders` join `dbt3_s001`.`lineitem` where `dbt3_s001`.`customer`.`c_custkey` = `dbt3_s001`.`orders`.`o_custkey` and `<subquery2>`.`l_orderkey` = `dbt3_s001`.`orders`.`o_orderkey` and `dbt3_s001`.`lineitem`.`l_orderkey` = `dbt3_s001`.`orders`.`o_orderkey` group by `dbt3_s001`.`customer`.`c_name`,`dbt3_s001`.`customer`.`c_custkey`,`dbt3_s001`.`orders`.`o_orderkey`,`dbt3_s001`.`orders`.`o_orderDATE`,`dbt3_s001`.`orders`.`o_totalprice` order by `dbt3_s001`.`orders`.`o_totalprice` desc,`dbt3_s001`.`orders`.`o_orderDATE` select c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice, sum(l_quantity) from customer, orders, lineitem @@ -1540,6 +1540,68 @@ t 10:00:00 11:00:00 DROP TABLE t1; +# +# MDEV-16374: filtered shows 0 for materilization scan for a semi join, which makes optimizer +# always pick materialization scan over materialization lookup +# +create table t0(a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 (a int, b int); +insert into t1 values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10), +(11,11),(12,12),(13,13),(14,14),(15,15); +set @@optimizer_use_condition_selectivity=2; +explain extended select * from t1 where a in (select max(a) from t1 group by b); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 16 100.00 Using where +1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00 +2 MATERIALIZED t1 ALL NULL NULL NULL NULL 16 100.00 Using temporary +Warnings: +Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from <materialize> (select max(`test`.`t1`.`a`) from `test`.`t1` group by `test`.`t1`.`b`) join `test`.`t1` where `<subquery2>`.`max(a)` = `test`.`t1`.`a` +select * from t1 where a in (select max(a) from t1 group by b); +a b +0 0 +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +11 11 +12 12 +13 13 +14 14 +15 15 +set @@optimizer_use_condition_selectivity=1; +explain extended select * from t1 where a in (select max(a) from t1 group by b); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 16 100.00 Using where +1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00 +2 MATERIALIZED t1 ALL NULL NULL NULL NULL 16 100.00 Using temporary +Warnings: +Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from <materialize> (select max(`test`.`t1`.`a`) from `test`.`t1` group by `test`.`t1`.`b`) join `test`.`t1` where `<subquery2>`.`max(a)` = `test`.`t1`.`a` +select * from t1 where a in (select max(a) from t1 group by b); +a b +0 0 +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +11 11 +12 12 +13 13 +14 14 +15 15 +drop table t1,t0; set histogram_size=@save_histogram_size; set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity; set use_stat_tables=@save_use_stat_tables; diff --git a/mysql-test/r/subselect_mat_cost_bugs.result b/mysql-test/r/subselect_mat_cost_bugs.result index ffd93c125ed..f19cf0d4095 100644 --- a/mysql-test/r/subselect_mat_cost_bugs.result +++ b/mysql-test/r/subselect_mat_cost_bugs.result @@ -334,7 +334,7 @@ SELECT * FROM t1 WHERE (f1) IN (SELECT f1 FROM t2) LIMIT 0; id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE +1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Zero limit 2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where SELECT * FROM t1 WHERE (f1) IN (SELECT f1 FROM t2) diff --git a/mysql-test/r/subselect_sj2_mat.result b/mysql-test/r/subselect_sj2_mat.result index 3ff0e619188..88ff82e72d6 100644 --- a/mysql-test/r/subselect_sj2_mat.result +++ b/mysql-test/r/subselect_sj2_mat.result @@ -1670,3 +1670,150 @@ id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 11 func 1 2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 Using where DROP TABLE t1,t2; +# +# MDEV-16225: wrong resultset from query with semijoin=on +# +CREATE TABLE t1 ( +`id` int(10) NOT NULL AUTO_INCREMENT, +`local_name` varchar(64) NOT NULL, +PRIMARY KEY (`id`) +) ENGINE=InnoDB AUTO_INCREMENT=14 DEFAULT CHARSET=latin1; +insert into t1(`id`,`local_name`) values +(1,'Cash Advance'), +(2,'Cash Advance'), +(3,'Rollover'), +(4,'AL Installment'), +(5,'AL Installment'), +(6,'AL Installment'), +(7,'AL Installment'), +(8,'AL Installment'), +(9,'AL Installment'), +(10,'Internet Payday'), +(11,'Rollover - Internet Payday'), +(12,'AL Monthly Installment'), +(13,'AL Semi-Monthly Installment'); +explain +SELECT SQL_NO_CACHE t.id +FROM t1 t +WHERE ( +t.id IN (SELECT A.id FROM t1 AS A WHERE A.local_name IN (SELECT B.local_name FROM t1 AS B WHERE B.id IN (0,4,12,13,1,10,3,11))) +OR +(t.id IN (0,4,12,13,1,10,3,11)) +); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t index PRIMARY PRIMARY 4 NULL 13 Using where; Using index +2 MATERIALIZED <subquery3> ALL distinct_key NULL NULL NULL 8 +2 MATERIALIZED A ALL PRIMARY NULL NULL NULL 13 Using where; Using join buffer (flat, BNL join) +3 MATERIALIZED B ALL PRIMARY NULL NULL NULL 13 Using where +SELECT SQL_NO_CACHE t.id +FROM t1 t +WHERE ( +t.id IN (SELECT A.id FROM t1 AS A WHERE A.local_name IN (SELECT B.local_name FROM t1 AS B WHERE B.id IN (0,4,12,13,1,10,3,11))) +OR +(t.id IN (0,4,12,13,1,10,3,11)) +); +id +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +drop table t1; +# +# MDEV-15247: Crash when SET NAMES 'utf8' is set +# +CREATE TABLE t1 ( +id_category int unsigned, +id_product int unsigned, +PRIMARY KEY (id_category,id_product) +) ENGINE=MyISAM; +INSERT INTO `t1` VALUES (31,216), (31,215), (31,214), (31,213), (31,212), (32,211), (32,210), (32,209), (32,208), (29,207), (30,315372), (2,161), (2,132), (33,315380), (31,315371), (29,315370), (29,315373), (29,315369), (29,315374), (29,315368), (29,315375), (29,315367), (29,183), (29,182), (30,177), (29,315376), (13,315365), (2,167), (2,315357), (2,164), (2,159), (2,131), (2,127), (14,315364), (27,315363), (29,205), (29,204), (29,203), (29,202), (29,201), (29,200), (29,199), (29,198), (29,197), (29,196), (29,195), (29,194), (29,193), (29,192), (29,191), (29,190), (29,189), (14,188), (29,187), (29,186), (29,185), (29,184), (29,315377), (29,315378), (29,181), (33,315379), (29,179), (30,178), (29,180), (30,176), (30,175), (30,174), (30,173), (30,172), (11,171), (27,315357), (23,108), (23,102); +CREATE TABLE t2 ( +id_product int, +id_t2 int, +KEY id_t2 (id_t2), +KEY id_product (id_product) +) ENGINE=MyISAM; +INSERT INTO `t2` VALUES (11,31), (11,31), (11,31), (11,32), (11,32), +(11,32), (10,26), (11,32), (10,28), (11,32), (10,29), (11,33), (10,26), +(11,33), (10,27), (9,23), (11,32), (10,26), (8,18), (7,15), (11,32), +(10,28), (11,32), (10,28), (11,32), (10,29), (11,32), (10,29), (8,19), +(7,16), (8,18), (7,16), (8,20), (7,16), (11,32), (10,28), (8,19), +(7,16), (8,20), (7,16), (11,32), (10,29), (8,19), (7,16), (8,20), +(7,16), (10,27), (9,23), (10,27), (9,23), (10,27), (9,23), (11,32), +(10,27), (11,32), (10,27), (8,18), (7,15), (10,26), (9,24), (8,19), +(7,16), (10,26), (9,23), (8,19), (7,16), (8,18), (7,16), (8,18), (7,16), +(9,23), (8,18), (9,23), (8,19), (7,16), (7,16), (8,19), (7,16), (11,31), +(10,27), (9,24), (11,31), (10,27), (9,23), (8,19), (11,31), (10,26), (9,24), +(8,19), (11,31), (10,26), (9,25), (8,18), (11,31), (10,26), (9,23), (8,19), +(11,31), (10,26), (9,23), (8,18), (11,31), (10,30), (9,23), (8,18), (11,31), +(10,30), (9,23), (8,19), (11,31), (10,26), (9,25), (8,19), (8,21), (11,32), +(10,26), (9,22), (8,19), (11,32), (10,26), (9,22), (8,18), (11,32), (10,26), +(9,22), (8,20), (11,33), (10,26), (9,22), (8,19), (11,33), (10,26), (9,22), +(8,18), (11,33), (10,26), (9,22), (8,20), (11,32), (10,26), (9,24), (8,19), +(11,32), (10,26), (9,25), (8,19), (11,32), (10,26), (9,25), (8,18), (11,32), +(10,26), (9,23), (8,18), (11,32), (10,30), (9,23), (8,18), (11,32), (10,30), +(9,23), (8,19), (11,32), (10,26), (9,23), (8,19), (11,32), (10,27), (9,23), +(11,32), (10,27), (9,23), (11,32), (10,27), (9,23), (10,26), (9,22), (8,19), +(7,15), (10,26), (9,22), (8,20), (7,15), (10,26), (9,22), (8,18), (7,15), +(8,19), (10,26), (10,26), (11,33), (10,26), (11,33), (10,26), (11,33), +(10,27), (11,33), (10,27), (11,31), (10,26), (11,31), (10,26), (8,18), +(7,15), (9,23), (9,23), (9,24), (8,21), (7,15), (7,15), (7,15), (7,15), +(7,15), (7,15), (7,15), (7,15), (7,15), (8,18), (7,17), (8,18), (7,17), (8,19), (8,19); +CREATE TABLE t3 ( +id_product int unsigned, +PRIMARY KEY (id_product) +) ENGINE=MyISAM; +INSERT INTO t3 VALUES +(102),(103),(104),(105),(106),(107),(108),(109),(110), +(315371),(315373),(315374),(315375),(315376),(315377), +(315378),(315379),(315380); +CREATE TABLE t4 ( +id_product int not null, +id_shop int, +PRIMARY KEY (id_product,id_shop) +) ENGINE=MyISAM ; +INSERT INTO t4 VALUES +(202,1),(201,1),(200,1),(199,1),(198,1),(197,1),(196,1),(195,1), +(194,1),(193,1),(192,1),(191,1),(190,1),(189,1),(188,1),(187,1), +(186,1),(185,1),(184,1),(183,1),(182,1),(181,1),(179,1),(178,1), +(177,1),(176,1),(126,1),(315380,1); +CREATE TABLE t5 (id_product int) ENGINE=MyISAM; +INSERT INTO `t5` VALUES +(652),(668),(669),(670),(671),(673),(674),(675),(676), +(677),(679),(680),(681),(682),(683),(684),(685),(686); +explain +SELECT * FROM t3 +JOIN t4 ON (t4.id_product = t3.id_product AND t4.id_shop = 1) +JOIN t1 ON (t1.id_product = t3.id_product) +LEFT JOIN t5 ON (t5.id_product = t3.id_product) +WHERE 1=1 +AND t3.id_product IN (SELECT id_product FROM t2 t2_1 WHERE t2_1.id_t2 = 32) +AND t3.id_product IN (SELECT id_product FROM t2 t2_2 WHERE t2_2.id_t2 = 15) +AND t3.id_product IN (SELECT id_product FROM t2 t2_3 WHERE t2_3.id_t2 = 18 OR t2_3.id_t2 = 19) +AND t3.id_product IN (SELECT id_product FROM t2 t2_4 WHERE t2_4.id_t2 = 34 OR t2_4.id_t2 = 23) +AND t3.id_product IN (SELECT id_product FROM t2 t2_5 WHERE t2_5.id_t2 = 29 OR t2_5.id_t2 = 28 OR t2_5.id_t2 = 26); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 index NULL PRIMARY 8 NULL 73 Using index +1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t1.id_product 1 Using index +1 PRIMARY <subquery5> eq_ref distinct_key distinct_key 4 func 1 Using where +1 PRIMARY <subquery4> eq_ref distinct_key distinct_key 4 func 1 Using where +1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1 Using where +1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 Using where +1 PRIMARY t4 eq_ref PRIMARY PRIMARY 8 test.t1.id_product,const 1 Using where; Using index +1 PRIMARY <subquery6> eq_ref distinct_key distinct_key 4 func 1 Using where +1 PRIMARY t5 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join) +5 MATERIALIZED t2_4 range id_t2,id_product id_t2 5 NULL 18 Using index condition; Using where +4 MATERIALIZED t2_3 range id_t2,id_product id_t2 5 NULL 32 Using index condition; Using where +3 MATERIALIZED t2_2 ref id_t2,id_product id_t2 5 const 12 +2 MATERIALIZED t2_1 ref id_t2,id_product id_t2 5 const 50 +6 MATERIALIZED t2_5 range id_t2,id_product id_t2 5 NULL 30 Using index condition; Using where +drop table t1,t2,t3,t4,t5; diff --git a/mysql-test/r/subselect_sj_nonmerged.result b/mysql-test/r/subselect_sj_nonmerged.result index c7e04225ffe..47970668ae5 100644 --- a/mysql-test/r/subselect_sj_nonmerged.result +++ b/mysql-test/r/subselect_sj_nonmerged.result @@ -77,9 +77,9 @@ explain select * from t4 where t4.a in (select max(t2.a) from t1, t2 group by t2.b) and t4.b in (select max(t2.a) from t1, t2 group by t2.b); id select_type table type possible_keys key key_len ref rows Extra -1 PRIMARY <subquery3> ALL distinct_key NULL NULL NULL 5 -1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 5 Using join buffer (flat, BNL join) -1 PRIMARY t4 ref a a 10 <subquery2>.max(t2.a),<subquery3>.max(t2.a) 12 +1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 5 +1 PRIMARY t4 ref a a 5 <subquery2>.max(t2.a) 12 Using index condition +1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 test.t4.b 1 3 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 Using temporary 3 MATERIALIZED t1 ALL NULL NULL NULL NULL 10 Using join buffer (flat, BNL join) 2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 Using temporary diff --git a/mysql-test/std_data/frm/t1.frm b/mysql-test/std_data/frm/t1.frm Binary files differnew file mode 100644 index 00000000000..a998f54ec67 --- /dev/null +++ b/mysql-test/std_data/frm/t1.frm diff --git a/mysql-test/suite/encryption/disabled.def b/mysql-test/suite/encryption/disabled.def index 9777a4ac99a..d92d3495cb8 100644 --- a/mysql-test/suite/encryption/disabled.def +++ b/mysql-test/suite/encryption/disabled.def @@ -12,5 +12,3 @@ innodb_scrub : MDEV-8139 scrubbing does not work reliably innodb_scrub_background : MDEV-8139 scrubbing does not work reliably -innodb-redo-badkey : MDEV-13893 / MDEV-12699 Improve crash recovery of corrupted data pages - diff --git a/mysql-test/suite/encryption/r/innodb-redo-nokeys.result b/mysql-test/suite/encryption/r/innodb-redo-nokeys.result index 251ea73cd4c..cc9d385bbbd 100644 --- a/mysql-test/suite/encryption/r/innodb-redo-nokeys.result +++ b/mysql-test/suite/encryption/r/innodb-redo-nokeys.result @@ -1,7 +1,7 @@ call mtr.add_suppression("mysqld: File .*"); call mtr.add_suppression("Plugin 'file_key_management' .*"); call mtr.add_suppression("InnoDB: cannot enable encryption, encryption plugin is not available"); -call mtr.add_suppression("Plugin 'InnoDB' init function returned error."); +call mtr.add_suppression("Plugin 'InnoDB' init function returned error\\."); call mtr.add_suppression("Plugin 'InnoDB' registration as a STORAGE ENGINE failed."); call mtr.add_suppression("InnoDB: The page \\[page id: space=[0-9]+, page number=[0-9]+\\] in file '.*test.t[1-4]\\.ibd' cannot be decrypted\\."); call mtr.add_suppression("failed to read or decrypt \\[page id: space=[1-9][0-9]*, page number=[1-9][0-9]*\\]"); diff --git a/mysql-test/suite/encryption/t/innodb-discard-import.test b/mysql-test/suite/encryption/t/innodb-discard-import.test index e105cf82b67..62dafec4eba 100644 --- a/mysql-test/suite/encryption/t/innodb-discard-import.test +++ b/mysql-test/suite/encryption/t/innodb-discard-import.test @@ -1,10 +1,6 @@ -- source include/have_innodb.inc +-- source include/innodb_page_size_small.inc -- source include/have_file_key_management_plugin.inc -# embedded does not support restart --- source include/not_embedded.inc --- source include/not_valgrind.inc -# Avoid CrashReporter popup on Mac --- source include/not_crashrep.inc # # MDEV-8770: Incorrect error message when importing page compressed tablespace @@ -28,7 +24,9 @@ create table t1(c1 bigint not null, b char(200)) engine=innodb encrypted=yes en show warnings; create table t2(c1 bigint not null, b char(200)) engine=innodb page_compressed=1 encrypted=yes encryption_key_id=4; show warnings; -create table t3(c1 bigint not null, b char(200)) engine=innodb row_format=compressed encrypted=yes encryption_key_id=4; +let $kbs= `select floor(@@global.innodb_page_size/1024)`; +--replace_regex / key_block_size=\d+//i +eval create table t3(c1 bigint not null, b char(200)) engine=innodb row_format=compressed encrypted=yes encryption_key_id=4 key_block_size=$kbs; show warnings; create table t4(c1 bigint not null, b char(200)) engine=innodb page_compressed=1; show warnings; @@ -89,6 +87,7 @@ ALTER TABLE t2 IMPORT TABLESPACE; SHOW CREATE TABLE t2; SELECT COUNT(*) FROM t2; ALTER TABLE t3 IMPORT TABLESPACE; +--replace_regex / key_block_size=\d+//i SHOW CREATE TABLE t3; SELECT COUNT(*) FROM t3; ALTER TABLE t4 IMPORT TABLESPACE; @@ -118,5 +117,5 @@ DROP TABLE t1,t2,t3,t4; # reset system --disable_query_log -EVAL SET GLOBAL innodb_compression_algorithm = $innodb_compression_algo; +eval SET GLOBAL innodb_compression_algorithm = $innodb_compression_algo; --enable_query_log diff --git a/mysql-test/suite/encryption/t/innodb-missing-key.test b/mysql-test/suite/encryption/t/innodb-missing-key.test index 87c617c7aed..37ac3b00343 100644 --- a/mysql-test/suite/encryption/t/innodb-missing-key.test +++ b/mysql-test/suite/encryption/t/innodb-missing-key.test @@ -60,5 +60,3 @@ SELECT COUNT(1) FROM t2; SELECT COUNT(1) FROM t3; DROP TABLE t1, t2, t3; - - diff --git a/mysql-test/suite/encryption/t/innodb-redo-nokeys.test b/mysql-test/suite/encryption/t/innodb-redo-nokeys.test index 37565ab4827..66720eb8585 100644 --- a/mysql-test/suite/encryption/t/innodb-redo-nokeys.test +++ b/mysql-test/suite/encryption/t/innodb-redo-nokeys.test @@ -6,7 +6,7 @@ call mtr.add_suppression("mysqld: File .*"); call mtr.add_suppression("Plugin 'file_key_management' .*"); call mtr.add_suppression("InnoDB: cannot enable encryption, encryption plugin is not available"); -call mtr.add_suppression("Plugin 'InnoDB' init function returned error."); +call mtr.add_suppression("Plugin 'InnoDB' init function returned error\\."); call mtr.add_suppression("Plugin 'InnoDB' registration as a STORAGE ENGINE failed."); call mtr.add_suppression("InnoDB: The page \\[page id: space=[0-9]+, page number=[0-9]+\\] in file '.*test.t[1-4]\\.ibd' cannot be decrypted\\."); call mtr.add_suppression("failed to read or decrypt \\[page id: space=[1-9][0-9]*, page number=[1-9][0-9]*\\]"); diff --git a/mysql-test/suite/federated/have_federatedx.inc b/mysql-test/suite/federated/have_federatedx.inc index 56ce31f5b2f..2250dd205bd 100644 --- a/mysql-test/suite/federated/have_federatedx.inc +++ b/mysql-test/suite/federated/have_federatedx.inc @@ -1,5 +1,5 @@ if (!`SELECT count(*) FROM information_schema.plugins WHERE plugin_name = 'federated' AND plugin_status = 'active' AND - plugin_description LIKE '%FederatedX%'`){ + plugin_description LIKE '%transactions%'`){ skip Need FederatedX engine; } diff --git a/mysql-test/suite/funcs_1/r/is_engines_archive.result b/mysql-test/suite/funcs_1/r/is_engines_archive.result index 2772992495c..52802b17acd 100644 --- a/mysql-test/suite/funcs_1/r/is_engines_archive.result +++ b/mysql-test/suite/funcs_1/r/is_engines_archive.result @@ -2,7 +2,7 @@ SELECT * FROM information_schema.engines WHERE ENGINE = 'ARCHIVE'; ENGINE ARCHIVE SUPPORT YES -COMMENT Archive storage engine +COMMENT gzip-compresses tables for a low storage footprint TRANSACTIONS NO XA NO SAVEPOINTS NO diff --git a/mysql-test/suite/funcs_1/r/is_engines_csv.result b/mysql-test/suite/funcs_1/r/is_engines_csv.result index 2a7e61ee4d3..7e413b9af6f 100644 --- a/mysql-test/suite/funcs_1/r/is_engines_csv.result +++ b/mysql-test/suite/funcs_1/r/is_engines_csv.result @@ -2,7 +2,7 @@ SELECT * FROM information_schema.engines WHERE ENGINE = 'CSV'; ENGINE CSV SUPPORT YES -COMMENT CSV storage engine +COMMENT Stores tables as CSV files TRANSACTIONS NO XA NO SAVEPOINTS NO diff --git a/mysql-test/suite/funcs_1/r/is_engines_federated.result b/mysql-test/suite/funcs_1/r/is_engines_federated.result index 8057a0266c5..20926458ed0 100644 --- a/mysql-test/suite/funcs_1/r/is_engines_federated.result +++ b/mysql-test/suite/funcs_1/r/is_engines_federated.result @@ -2,7 +2,7 @@ SELECT * FROM information_schema.engines WHERE ENGINE = 'FEDERATED'; ENGINE FEDERATED SUPPORT YES -COMMENT FederatedX pluggable storage engine +COMMENT Allows to access tables on other MariaDB servers, supports transactions and more TRANSACTIONS YES XA NO SAVEPOINTS YES diff --git a/mysql-test/suite/funcs_1/r/is_engines_myisam.result b/mysql-test/suite/funcs_1/r/is_engines_myisam.result index 7e42c864187..d307ce4be6a 100644 --- a/mysql-test/suite/funcs_1/r/is_engines_myisam.result +++ b/mysql-test/suite/funcs_1/r/is_engines_myisam.result @@ -2,7 +2,7 @@ SELECT * FROM information_schema.engines WHERE ENGINE = 'MyISAM'; ENGINE MyISAM SUPPORT DEFAULT -COMMENT MyISAM storage engine +COMMENT Non-transactional engine with good performance and small data footprint TRANSACTIONS NO XA NO SAVEPOINTS NO diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index 14aa22727c2..72a27277116 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -27,6 +27,9 @@ galera_ssl_upgrade : MDEV-13549 Galera test failures galera.MW-329 : wsrep_local_replays not stable galera.MW-328A : have_deadlocks test not stable query_cache : MDEV-15805 Test failure on galera.query_cache +MW-416 : MDEV-13549 Galera test failures +galera_wan : MDEV-13549 Galera test failures +MW-388 : MDEV-13549 Galera test failures galera.MW-44 : MDEV-15809 Test failure on galera.MW-44 galera.galera_pc_ignore_sb : MDEV-15811 Test failure on galera_pc_ignore_sb galera_kill_applier : race condition at the start of the test diff --git a/mysql-test/suite/galera/galera_2nodes_as_master_slave.cnf b/mysql-test/suite/galera/galera_2nodes_as_master_slave.cnf deleted file mode 100644 index f68fe524904..00000000000 --- a/mysql-test/suite/galera/galera_2nodes_as_master_slave.cnf +++ /dev/null @@ -1,83 +0,0 @@ - -# -# Let's understand the topology. -# * Independent Master with server-id = 1 -# * Galera cluster with 2 nodes: node#1 and node#2 with server-id = 2, 3 -# node#1 act as slave to Independent Master with server-id = 1 -# * Independent Slave with server-id = 4 replicating from galera node#2 -# - -# Use default setting for mysqld processes -!include include/default_mysqld.cnf - -[mysqld] -log-slave-updates -log-bin=mysqld-bin -binlog-format=row -gtid-mode=on -enforce-gtid-consistency=true - -[mysqld.1] -server-id=1 - -[mysqld.2] -server-id=2 - -wsrep_provider=@ENV.WSREP_PROVIDER -wsrep_cluster_address='gcomm://' -wsrep_provider_options='base_port=@mysqld.2.#galera_port;evs.install_timeout = PT15S; evs.max_install_timeouts=1;' - -# enforce read-committed characteristics across the cluster -wsrep_causal_reads=ON -wsrep_sync_wait = 15 - -wsrep_node_address=127.0.0.1 -wsrep_sst_receive_address=127.0.0.2:@mysqld.2.#sst_port -wsrep_node_incoming_address=127.0.0.1:@mysqld.2.port - -# Required for Galera -innodb_autoinc_lock_mode=2 - -innodb_flush_log_at_trx_commit=2 - -[mysqld.3] -server-id=3 - -wsrep_provider=@ENV.WSREP_PROVIDER -wsrep_cluster_address='gcomm://127.0.0.1:@mysqld.2.#galera_port' -wsrep_provider_options='base_port=@mysqld.3.#galera_port;evs.install_timeout = PT15S; evs.max_install_timeouts = 1;' - -# enforce read-committed characteristics across the cluster -wsrep_causal_reads=ON -wsrep_sync_wait = 15 - -wsrep_node_address=127.0.0.1 -wsrep_sst_receive_address=127.0.0.2:@mysqld.3.#sst_port -wsrep_node_incoming_address=127.0.0.1:@mysqld.3.port - -# Required for Galera -innodb_autoinc_lock_mode=2 - -innodb_flush_log_at_trx_commit=2 - -[mysqld.4] -server-id=4 - -[ENV] -NODE_MYPORT_1= @mysqld.1.port -NODE_MYSOCK_1= @mysqld.1.socket - -NODE_MYPORT_2= @mysqld.2.port -NODE_MYSOCK_2= @mysqld.2.socket - -NODE_MYPORT_3= @mysqld.3.port -NODE_MYSOCK_3= @mysqld.3.socket - -NODE_MYPORT_4= @mysqld.4.port -NODE_MYSOCK_4= @mysqld.4.socket - -NODE_GALERAPORT_2= @mysqld.2.#galera_port -NODE_GALERAPORT_3= @mysqld.3.#galera_port - -NODE_SSTPORT_2= @mysqld.2.#sst_port -NODE_SSTPORT_3= @mysqld.3.#sst_port diff --git a/mysql-test/suite/galera/galera_2nodes_as_master_with_repl_filter.cnf b/mysql-test/suite/galera/galera_2nodes_as_master_with_repl_filter.cnf deleted file mode 100644 index d5490280ab2..00000000000 --- a/mysql-test/suite/galera/galera_2nodes_as_master_with_repl_filter.cnf +++ /dev/null @@ -1,87 +0,0 @@ -# -# This .cnf file creates a setup with a 2-node Galera cluster and one stand-alone MySQL server, to be used as a slave -# - -# Use default setting for mysqld processes -!include include/default_mysqld.cnf - -[mysqld] -default-storage-engine=InnoDB - -[mysqld.1] -server-id=1 -binlog-format=row -log-bin=mysqld-bin -log_slave_updates -gtid-mode=on -enforce-gtid-consistency=true -event-scheduler=1 - -wsrep_provider=@ENV.WSREP_PROVIDER -wsrep_cluster_address='gcomm://' -wsrep_provider_options='base_port=@mysqld.1.#galera_port' - -# enforce read-committed characteristics across the cluster -wsrep_causal_reads=ON -wsrep_sync_wait = 15 - -wsrep_node_address=127.0.0.1 -wsrep_sst_receive_address=127.0.0.2:@mysqld.1.#sst_port -wsrep_node_incoming_address=127.0.0.1:@mysqld.1.port - -# Required for Galera -innodb_autoinc_lock_mode=2 - -innodb_flush_log_at_trx_commit=2 - -[mysqld.2] -server-id=2 -binlog-format=row -log-bin=mysqld-bin -log_slave_updates -gtid-mode=on -enforce-gtid-consistency=true -event-scheduler=1 - -wsrep_provider=@ENV.WSREP_PROVIDER -wsrep_cluster_address='gcomm://127.0.0.1:@mysqld.1.#galera_port' -wsrep_provider_options='base_port=@mysqld.2.#galera_port' - -# enforce read-committed characteristics across the cluster -wsrep_causal_reads=ON -wsrep_sync_wait = 15 - -wsrep_node_address=127.0.0.1 -wsrep_sst_receive_address=127.0.0.2:@mysqld.2.#sst_port -wsrep_node_incoming_address=127.0.0.1:@mysqld.2.port - -# Required for Galera -innodb_autoinc_lock_mode=2 - -innodb_flush_log_at_trx_commit=2 - -[mysqld.3] -server-id=3 -replicate-ignore-db=test -replicate-wild-ignore-table=test.% -log-bin=mysqld-bin -log_slave_updates -gtid-mode=on -enforce-gtid-consistency=true -event-scheduler=1 - -[ENV] -NODE_MYPORT_1= @mysqld.1.port -NODE_MYSOCK_1= @mysqld.1.socket - -NODE_MYPORT_2= @mysqld.2.port -NODE_MYSOCK_2= @mysqld.2.socket - -NODE_MYPORT_3= @mysqld.3.port -NODE_MYSOCK_3= @mysqld.3.socket - -NODE_GALERAPORT_1= @mysqld.1.#galera_port -NODE_GALERAPORT_2= @mysqld.2.#galera_port - -NODE_SSTPORT_1= @mysqld.1.#sst_port -NODE_SSTPORT_2= @mysqld.2.#sst_port diff --git a/mysql-test/suite/galera/r/MW-416.result b/mysql-test/suite/galera/r/MW-416.result new file mode 100644 index 00000000000..05399b213a8 --- /dev/null +++ b/mysql-test/suite/galera/r/MW-416.result @@ -0,0 +1,114 @@ +CREATE USER 'userMW416'@'localhost'; +GRANT SELECT, INSERT, UPDATE ON test.* TO 'userMW416'@'localhost'; +SHOW GLOBAL STATUS LIKE 'wsrep_replicated'; +Variable_name Value +wsrep_replicated 2 +ALTER DATABASE db CHARACTER SET = utf8; +ERROR 42000: Access denied for user 'userMW416'@'localhost' to database 'db' +ALTER EVENT ev1 RENAME TO ev2; +ERROR 42000: Access denied for user 'userMW416'@'localhost' to database 'test' +ALTER FUNCTION fun1 COMMENT 'foo'; +ERROR 42000: alter routine command denied to user 'userMW416'@'localhost' for routine 'test.fun1' +ALTER LOGFILE GROUP lfg ADD UNDOFILE 'file' ENGINE=InnoDB; +Got one of the listed errors +ALTER PROCEDURE proc1 COMMENT 'foo'; +Got one of the listed errors +ALTER SERVER srv OPTIONS (USER 'sally'); +Got one of the listed errors +ALTER TABLE tbl DROP COLUMN col; +Got one of the listed errors +ALTER TABLESPACE tblspc DROP DATAFILE 'file' ENGINE=innodb; +Got one of the listed errors +ALTER VIEW vw AS SELECT 1; +Got one of the listed errors +CREATE DATABASE db; +Got one of the listed errors +CREATE EVENT ev1 ON SCHEDULE AT CURRENT_TIMESTAMP DO SELECT 1; +Got one of the listed errors +CREATE FUNCTION fun1() RETURNS int RETURN(1); +Got one of the listed errors +CREATE FUNCTION fun1 RETURNS STRING SONAME 'funlib.so'; +Got one of the listed errors +CREATE PROCEDURE proc1() BEGIN END; +Got one of the listed errors +CREATE INDEX idx ON tbl(id); +Got one of the listed errors +CREATE LOGFILE GROUP lfg ADD UNDOFILE 'undofile' ENGINE innodb; +Got one of the listed errors +CREATE SERVER srv FOREIGN DATA WRAPPER 'fdw' OPTIONS (USER 'user'); +Got one of the listed errors +CREATE TABLE t (i int); +Got one of the listed errors +CREATE TABLESPACE tblspc ADD DATAFILE 'file' ENGINE=innodb; +Got one of the listed errors +CREATE TRIGGER trg BEFORE UPDATE ON t FOR EACH ROW BEGIN END; +Got one of the listed errors +CREATE VIEW vw AS SELECT 1; +Got one of the listed errors +DROP DATABASE db; +Got one of the listed errors +DROP EVENT ev; +Got one of the listed errors +DROP FUNCTION fun1; +Got one of the listed errors +DROP INDEX idx ON t0; +Got one of the listed errors +DROP LOGFILE GROUP lfg; +Got one of the listed errors +DROP PROCEDURE proc1; +Got one of the listed errors +DROP SERVEr srv; +Got one of the listed errors +DROP TABLE t0; +Got one of the listed errors +DROP TABLESPACE tblspc; +Got one of the listed errors +DROP TRIGGER trg; +Got one of the listed errors +DROP VIEW vw; +Got one of the listed errors +RENAME TABLE t0 TO t1; +Got one of the listed errors +TRUNCATE TABLE t0; +Got one of the listed errors +ALTER USER myuser PASSWORD EXPIRE; +Got one of the listed errors +CREATE USER myuser IDENTIFIED BY 'pass'; +Got one of the listed errors +DROP USER myuser; +Got one of the listed errors +GRANT ALL ON *.* TO 'myuser'; +Got one of the listed errors +RENAME USER myuser TO mariauser; +Got one of the listed errors +REVOKE SELECT ON test FROM myuser; +Got one of the listed errors +REVOKE ALL, GRANT OPTION FROM myuser; +Got one of the listed errors +REVOKE PROXY ON myuser FROM myuser; +Got one of the listed errors +ANALYZE TABLE db.tbl; +Got one of the listed errors +CHECK TABLE db.tbl; +Got one of the listed errors +CHECKSUM TABLE db.tbl; +Got one of the listed errors +OPTIMIZE TABLE db.tbl; +Got one of the listed errors +REPAIR TABLE db.tbl; +Got one of the listed errors +INSTALL PLUGIN plg SONAME 'plg.so'; +Got one of the listed errors +UNINSTALL PLUGIN plg; +Got one of the listed errors +DROP USER 'userMW416'@'localhost'; +SHOW DATABASES; +Database +information_schema +mtr +mysql +performance_schema +test +SHOW GLOBAL STATUS LIKE 'wsrep_replicated'; +Variable_name Value +wsrep_replicated 3 diff --git a/mysql-test/suite/galera/r/galera#500.result b/mysql-test/suite/galera/r/galera#500.result new file mode 100644 index 00000000000..6a07d0359a4 --- /dev/null +++ b/mysql-test/suite/galera/r/galera#500.result @@ -0,0 +1,10 @@ +SET SESSION wsrep_sync_wait = 0; +SET GLOBAL wsrep_provider_options="gmcast.isolate=2"; +SET SESSION wsrep_sync_wait = 0; +SHOW STATUS LIKE 'wsrep_cluster_status'; +Variable_name Value +wsrep_cluster_status non-Primary +SET SESSION wsrep_sync_wait = default; +SET GLOBAL wsrep_provider_options="pc.bootstrap=1"; +SET SESSION wsrep_on=0; +CALL mtr.add_suppression("WSREP: exception from gcomm, backend must be restarted: Gcomm backend termination was requested by setting gmcast.isolate=2."); diff --git a/mysql-test/suite/galera/r/galera_sst_mysqldump_with_key.result b/mysql-test/suite/galera/r/galera_sst_mysqldump_with_key.result index 0a88b290e59..93ab4a3f3d4 100644 --- a/mysql-test/suite/galera/r/galera_sst_mysqldump_with_key.result +++ b/mysql-test/suite/galera/r/galera_sst_mysqldump_with_key.result @@ -1,13 +1,21 @@ Setting SST method to mysqldump ... +call mtr.add_suppression("WSREP: wsrep_sst_method is set to 'mysqldump' yet mysqld bind_address is set to '127.0.0.1'"); +call mtr.add_suppression("Failed to load slave replication state from table mysql.gtid_slave_pos"); +connection node_1; CREATE USER 'sst'; GRANT ALL PRIVILEGES ON *.* TO 'sst'; SET GLOBAL wsrep_sst_auth = 'sst:'; +connection node_2; SET GLOBAL wsrep_sst_method = 'mysqldump'; +connection node_1; +connection node_2; +connection node_1; CREATE USER sslsst; GRANT ALL PRIVILEGES ON *.* TO sslsst; GRANT USAGE ON *.* TO sslsst REQUIRE SSL; SET GLOBAL wsrep_sst_auth = 'sslsst:'; Performing State Transfer on a server that has been temporarily disconnected +connection node_1; CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB; SET AUTOCOMMIT=OFF; START TRANSACTION; @@ -17,6 +25,7 @@ INSERT INTO t1 VALUES ('node1_committed_before'); INSERT INTO t1 VALUES ('node1_committed_before'); INSERT INTO t1 VALUES ('node1_committed_before'); COMMIT; +connection node_2; SET AUTOCOMMIT=OFF; START TRANSACTION; INSERT INTO t1 VALUES ('node2_committed_before'); @@ -27,6 +36,7 @@ INSERT INTO t1 VALUES ('node2_committed_before'); COMMIT; Unloading wsrep provider ... SET GLOBAL wsrep_provider = 'none'; +connection node_1; SET AUTOCOMMIT=OFF; START TRANSACTION; INSERT INTO t1 VALUES ('node1_committed_during'); @@ -41,6 +51,7 @@ INSERT INTO t1 VALUES ('node1_to_be_committed_after'); INSERT INTO t1 VALUES ('node1_to_be_committed_after'); INSERT INTO t1 VALUES ('node1_to_be_committed_after'); INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +connect node_1a_galera_st_disconnect_slave, 127.0.0.1, root, , test, $NODE_MYPORT_1; SET AUTOCOMMIT=OFF; START TRANSACTION; INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); @@ -48,6 +59,7 @@ INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +connection node_2; Loading wsrep provider ... SET AUTOCOMMIT=OFF; START TRANSACTION; @@ -57,6 +69,7 @@ INSERT INTO t1 VALUES ('node2_committed_after'); INSERT INTO t1 VALUES ('node2_committed_after'); INSERT INTO t1 VALUES ('node2_committed_after'); COMMIT; +connection node_1; INSERT INTO t1 VALUES ('node1_to_be_committed_after'); INSERT INTO t1 VALUES ('node1_to_be_committed_after'); INSERT INTO t1 VALUES ('node1_to_be_committed_after'); @@ -71,6 +84,7 @@ INSERT INTO t1 VALUES ('node1_committed_after'); INSERT INTO t1 VALUES ('node1_committed_after'); INSERT INTO t1 VALUES ('node1_committed_after'); COMMIT; +connection node_1a_galera_st_disconnect_slave; INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); @@ -85,6 +99,7 @@ COUNT(*) = 0 1 COMMIT; SET AUTOCOMMIT=ON; +connection node_1; SELECT COUNT(*) = 35 FROM t1; COUNT(*) = 35 1 @@ -94,8 +109,10 @@ COUNT(*) = 0 DROP TABLE t1; COMMIT; SET AUTOCOMMIT=ON; +connection node_1; CALL mtr.add_suppression("Slave SQL: Error 'The MySQL server is running with the --skip-grant-tables option so it cannot execute this statement' on query"); DROP USER sst; +connection node_2; CALL mtr.add_suppression("Slave SQL: Error 'The MySQL server is running with the --skip-grant-tables option so it cannot execute this statement' on query"); CALL mtr.add_suppression("InnoDB: Error: Table \"mysql\"\\.\"innodb_index_stats\" not found"); CALL mtr.add_suppression("Can't open and lock time zone table"); @@ -103,5 +120,3 @@ CALL mtr.add_suppression("Can't open and lock privilege tables"); CALL mtr.add_suppression("Info table is not ready to be used"); CALL mtr.add_suppression("Native table .* has the wrong structure"); DROP USER sslsst; -SET GLOBAL general_log = ON; -SET GLOBAL slow_query_log = ON; diff --git a/mysql-test/suite/galera/r/galera_var_dirty_reads.result b/mysql-test/suite/galera/r/galera_var_dirty_reads.result index 020efb7b8f1..049aa5be3cc 100644 --- a/mysql-test/suite/galera/r/galera_var_dirty_reads.result +++ b/mysql-test/suite/galera/r/galera_var_dirty_reads.result @@ -1,5 +1,7 @@ connection node_1; connection node_2; +connection node_1; +connection node_2; connection node_2; CREATE TABLE t1(i INT) ENGINE=INNODB; INSERT INTO t1 VALUES(1); diff --git a/mysql-test/suite/galera/t/MW-416.test b/mysql-test/suite/galera/t/MW-416.test new file mode 100644 index 00000000000..df4fa35abc7 --- /dev/null +++ b/mysql-test/suite/galera/t/MW-416.test @@ -0,0 +1,134 @@ +--source include/galera_cluster.inc +--source include/have_innodb.inc + +--source include/wait_until_ready.inc + +CREATE USER 'userMW416'@'localhost'; +GRANT SELECT, INSERT, UPDATE ON test.* TO 'userMW416'@'localhost'; + +SHOW GLOBAL STATUS LIKE 'wsrep_replicated'; + +--connect userMW416, localhost, userMW416,, test, $NODE_MYPORT_1 +--connection userMW416 + +# DDL + +--error 1044 +ALTER DATABASE db CHARACTER SET = utf8; +--error 1044 +ALTER EVENT ev1 RENAME TO ev2; +--error 1370 +ALTER FUNCTION fun1 COMMENT 'foo'; +#--error 1044,1227 +#ALTER INSTANCE ROTATE INNODB MASTER KEY; +--error 1044,1227 +ALTER LOGFILE GROUP lfg ADD UNDOFILE 'file' ENGINE=InnoDB; +--error 1044,1227,1370 +ALTER PROCEDURE proc1 COMMENT 'foo'; +--error 1044,1227,1370 +ALTER SERVER srv OPTIONS (USER 'sally'); +--error 1044,1142,1227,1370 +ALTER TABLE tbl DROP COLUMN col; +--error 1044,1227,1370 +ALTER TABLESPACE tblspc DROP DATAFILE 'file' ENGINE=innodb; +--error 1044,1142,1227,1370 +ALTER VIEW vw AS SELECT 1; + +--error 1044,1227,1370 +CREATE DATABASE db; +--error 1044,1227,1370 +CREATE EVENT ev1 ON SCHEDULE AT CURRENT_TIMESTAMP DO SELECT 1; +--error 1044,1227,1370 +CREATE FUNCTION fun1() RETURNS int RETURN(1); +--error 1044,1227,1370 +CREATE FUNCTION fun1 RETURNS STRING SONAME 'funlib.so'; +--error 1044,1227,1370 +CREATE PROCEDURE proc1() BEGIN END; +--error 1044,1142,1227,1370 +CREATE INDEX idx ON tbl(id); +--error 1044,1142,1227,1370 +CREATE LOGFILE GROUP lfg ADD UNDOFILE 'undofile' ENGINE innodb; +--error 1044,1142,1227,1370 +CREATE SERVER srv FOREIGN DATA WRAPPER 'fdw' OPTIONS (USER 'user'); +--error 1044,1142,1227,1370 +CREATE TABLE t (i int); +--error 1044,1142,1227,1370 +CREATE TABLESPACE tblspc ADD DATAFILE 'file' ENGINE=innodb; +--error 1044,1142,1227,1370 +CREATE TRIGGER trg BEFORE UPDATE ON t FOR EACH ROW BEGIN END; +--error 1044,1142,1227,1370 +CREATE VIEW vw AS SELECT 1; + + + +--error 1044,1142,1227,1370 +DROP DATABASE db; +--error 1044,1142,1227,1370 +DROP EVENT ev; +--error 1044,1142,1227,1370 +DROP FUNCTION fun1; +--error 1044,1142,1227,1370 +DROP INDEX idx ON t0; +--error 1044,1142,1227,1370,1064 +DROP LOGFILE GROUP lfg; +--error 1044,1142,1227,1370 +DROP PROCEDURE proc1; +--error 1044,1142,1227,1370 +DROP SERVEr srv; +--error 1044,1142,1227,1370 +DROP TABLE t0; +--error 1044,1142,1227,1370,1064 +DROP TABLESPACE tblspc; +--error 1044,1142,1227,1360,1370 +DROP TRIGGER trg; +--error 1044,1142,1227,1370 +DROP VIEW vw; + +--error 1044,1142,1227,1370 +RENAME TABLE t0 TO t1; + +--error 1044,1142,1227,1370 +TRUNCATE TABLE t0; + +# DCL + +# account management +--error 1044,1142,1227,1370,1064 +ALTER USER myuser PASSWORD EXPIRE; +--error 1044,1142,1227,1370 +CREATE USER myuser IDENTIFIED BY 'pass'; +--error 1044,1142,1227,1370 +DROP USER myuser; +--error 1044,1045,1142,1227,1370 +GRANT ALL ON *.* TO 'myuser'; +--error 1044,1142,1227,1370 +RENAME USER myuser TO mariauser; +--error 1044,1142,1227,1370 +REVOKE SELECT ON test FROM myuser; +--error 1044,1142,1227,1370,1698 +REVOKE ALL, GRANT OPTION FROM myuser; +--error 1044,1142,1227,1370,1698 +REVOKE PROXY ON myuser FROM myuser; + +# table maintenance +--error 1044,1142,1227,1370 +ANALYZE TABLE db.tbl; +--error 1044,1142,1227,1370 +CHECK TABLE db.tbl; +--error 1044,1142,1227,1370 +CHECKSUM TABLE db.tbl; +--error 1044,1142,1227,1370 +OPTIMIZE TABLE db.tbl; +--error 1044,1142,1227,1370 +REPAIR TABLE db.tbl; + +# plugin and user defined functions +--error 1044,1142,1227,1370 +INSTALL PLUGIN plg SONAME 'plg.so'; +--error 1044,1142,1227,1370 +UNINSTALL PLUGIN plg; + +--connection node_1 +DROP USER 'userMW416'@'localhost'; +SHOW DATABASES; +SHOW GLOBAL STATUS LIKE 'wsrep_replicated'; diff --git a/mysql-test/suite/galera/t/galera#500.test b/mysql-test/suite/galera/t/galera#500.test new file mode 100644 index 00000000000..3c8490b6907 --- /dev/null +++ b/mysql-test/suite/galera/t/galera#500.test @@ -0,0 +1,38 @@ +# +# The purpose of this test is to verify that if an exception is +# thrown from gcomm background thread, the provider terminates properly +# and wsrep_ready becomes 0. +# + +--source include/have_innodb.inc +--source include/galera_cluster.inc +--source include/galera_have_debug_sync.inc + +# Force node_2 gcomm background thread to terminate via exception. +--connection node_2 +--let $wsrep_cluster_address = `SELECT @@wsrep_cluster_address` +# Setting gmcast.isolate=2 will force gcomm background thread to +# throw exception. +SET SESSION wsrep_sync_wait = 0; +SET GLOBAL wsrep_provider_options="gmcast.isolate=2"; + +# Wait until wsrep_ready becomes 0. +--let $wait_condition = SELECT VARIABLE_VALUE = 0 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME ='wsrep_ready' +--source include/wait_condition.inc + +# Wait until node_1 ends up in non-prim and rebootstrap the cluster. +--connection node_1 +SET SESSION wsrep_sync_wait = 0; +--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME ='wsrep_cluster_size' +--source include/wait_condition.inc +SHOW STATUS LIKE 'wsrep_cluster_status'; +SET SESSION wsrep_sync_wait = default; +SET GLOBAL wsrep_provider_options="pc.bootstrap=1"; + +# Restart node_2 +--connection node_2 +SET SESSION wsrep_on=0; +--source include/restart_mysqld.inc + +--connection node_2 +CALL mtr.add_suppression("WSREP: exception from gcomm, backend must be restarted: Gcomm backend termination was requested by setting gmcast.isolate=2."); diff --git a/mysql-test/suite/galera/t/galera_gcache_recover_manytrx.test b/mysql-test/suite/galera/t/galera_gcache_recover_manytrx.test index 3bfcdc9f117..08165f30f7d 100644 --- a/mysql-test/suite/galera/t/galera_gcache_recover_manytrx.test +++ b/mysql-test/suite/galera/t/galera_gcache_recover_manytrx.test @@ -5,6 +5,7 @@ --source include/galera_cluster.inc --source include/big_test.inc +--source include/have_log_bin.inc SET SESSION wsrep_sync_wait = 0; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 LONGBLOB) ENGINE=InnoDB; @@ -93,6 +94,8 @@ END| DELIMITER ;| +--let $wsrep_last_committed_before = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'` + --connect node_1_insert_simple, 127.0.0.1, root, , test, $NODE_MYPORT_1 --connect node_1_insert_multi, 127.0.0.1, root, , test, $NODE_MYPORT_1 --connect node_1_insert_transaction, 127.0.0.1, root, , test, $NODE_MYPORT_1 @@ -124,6 +127,13 @@ DELIMITER ;| --connection node_2 SET SESSION wsrep_sync_wait = 0; + +# Make sure that node_2 is not killed while TOIs are applied. +# Otherwhise we risk that grastate file is marked unsafe, and +# as a consequence the node cannot rejoin with IST. +--let $wait_condition = SELECT VARIABLE_VALUE > $wsrep_last_committed_before FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed' +--source include/wait_condition.inc + --source include/kill_galera.inc --sleep 10 @@ -172,9 +182,8 @@ SET SESSION wsrep_sync_wait = 0; --source include/start_mysqld.inc --connection node_1 ---source include/wait_until_connected_again.inc ---source include/galera_wait_ready.inc --let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; +--source include/wait_condition.inc --let $diff_servers = 1 2 --source include/diff_servers.inc diff --git a/mysql-test/suite/galera/t/galera_many_tables_pk.test b/mysql-test/suite/galera/t/galera_many_tables_pk.test index 551307b123f..73c5fc1622c 100644 --- a/mysql-test/suite/galera/t/galera_many_tables_pk.test +++ b/mysql-test/suite/galera/t/galera_many_tables_pk.test @@ -16,7 +16,7 @@ if (!`SELECT @@open_files_limit >= 1024`){ while ($count) { --disable_query_log - --let $ddl_var = `SELECT CONCAT("CREATE TABLE t", $count, " (f1 INTEGER AUTO_INCREMENT PRIMARY KEY) ENGINE=InnoDB")` + --let $ddl_var = `SELECT CONCAT("CREATE TABLE t", $count, " (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB")` --eval $ddl_var --enable_query_log --dec $count @@ -37,7 +37,7 @@ START TRANSACTION; while ($count) { --disable_query_log - --let $ddl_var = `SELECT CONCAT("INSERT INTO t", $count, " VALUES (DEFAULT)")` + --let $ddl_var = `SELECT CONCAT("INSERT INTO t", $count, " VALUES (1)")` --eval $ddl_var --enable_query_log --dec $count diff --git a/mysql-test/suite/galera/t/galera_sst_mariabackup.test b/mysql-test/suite/galera/t/galera_sst_mariabackup.test index 0e7ac487700..bcb9ade3a25 100644 --- a/mysql-test/suite/galera/t/galera_sst_mariabackup.test +++ b/mysql-test/suite/galera/t/galera_sst_mariabackup.test @@ -1,3 +1,4 @@ +--source include/big_test.inc --source include/galera_cluster.inc --source include/have_innodb.inc --source include/have_mariabackup.inc diff --git a/mysql-test/suite/galera/t/galera_sst_mysqldump.test b/mysql-test/suite/galera/t/galera_sst_mysqldump.test index 390e9815b20..835fac94a68 100644 --- a/mysql-test/suite/galera/t/galera_sst_mysqldump.test +++ b/mysql-test/suite/galera/t/galera_sst_mysqldump.test @@ -1,3 +1,4 @@ +--source include/big_test.inc --source include/galera_cluster.inc --source suite/galera/include/galera_sst_set_mysqldump.inc diff --git a/mysql-test/suite/galera/t/galera_sst_mysqldump_with_key.cnf b/mysql-test/suite/galera/t/galera_sst_mysqldump_with_key.cnf index e108484b248..44e5573b3e6 100644 --- a/mysql-test/suite/galera/t/galera_sst_mysqldump_with_key.cnf +++ b/mysql-test/suite/galera/t/galera_sst_mysqldump_with_key.cnf @@ -12,10 +12,6 @@ wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.size=1;pc.ignore [mysqld] wsrep_debug=ON -ssl-ca=@ENV.MYSQL_TEST_DIR/std_data/galera-cert.pem -ssl-cert=@ENV.MYSQL_TEST_DIR/std_data/galera-cert.pem -ssl-key=@ENV.MYSQL_TEST_DIR/std_data/galera-key.pem - [client] ssl-ca=@ENV.MYSQL_TEST_DIR/std_data/cacert.pem ssl-cert=@ENV.MYSQL_TEST_DIR/std_data/client-cert.pem diff --git a/mysql-test/suite/galera/t/galera_sst_mysqldump_with_key.test b/mysql-test/suite/galera/t/galera_sst_mysqldump_with_key.test index c813e04169f..0dbc63b531c 100644 --- a/mysql-test/suite/galera/t/galera_sst_mysqldump_with_key.test +++ b/mysql-test/suite/galera/t/galera_sst_mysqldump_with_key.test @@ -5,9 +5,12 @@ --source include/big_test.inc --source include/galera_cluster.inc --source include/have_innodb.inc ---source include/have_openssl.inc +--source include/have_ssl_communication.inc --source suite/galera/include/galera_sst_set_mysqldump.inc +--let $node_1=node_1 +--let $node_2=node_2 +--source include/auto_increment_offset_save.inc --connection node_1 CREATE USER sslsst; @@ -18,12 +21,7 @@ SET GLOBAL wsrep_sst_auth = 'sslsst:'; --source suite/galera/include/galera_st_disconnect_slave.inc +--source include/auto_increment_offset_restore.inc --source suite/galera/include/galera_sst_restore.inc -DROP USER sslsst; - ---connection node_2 -# We have to manually restore global_log and slow_query_log due to mysql-wsrep#108 -# Otherwise MTR's check_testcases complains -SET GLOBAL general_log = ON; -SET GLOBAL slow_query_log = ON; +DROP USER sslsst; diff --git a/mysql-test/suite/galera/t/galera_sst_rsync.test b/mysql-test/suite/galera/t/galera_sst_rsync.test index f796356cac7..5c08707e870 100644 --- a/mysql-test/suite/galera/t/galera_sst_rsync.test +++ b/mysql-test/suite/galera/t/galera_sst_rsync.test @@ -1,3 +1,4 @@ +--source include/big_test.inc --source include/galera_cluster.inc --let $node_1=node_1 diff --git a/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2-options.cnf b/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2-options.cnf index 1e29673c0ff..3abf2549aae 100644 --- a/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2-options.cnf +++ b/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2-options.cnf @@ -8,7 +8,7 @@ wsrep_debug=ON [xtrabackup] backup-locks close-files -compact +#compact - disabled in xtrabackup 2.4, https://bugs.launchpad.net/percona-xtrabackup/+bug/1192834/comments/29 # compression requires qpress from the Percona repositories # compress # compress-threads=2 diff --git a/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2.test b/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2.test index f1fd0f3ddf3..c270e4d0b19 100644 --- a/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2.test +++ b/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2.test @@ -1,3 +1,4 @@ +--source include/big_test.inc --source include/galera_cluster.inc --source include/have_innodb.inc --source include/have_xtrabackup.inc diff --git a/mysql-test/suite/galera/t/galera_var_dirty_reads.test b/mysql-test/suite/galera/t/galera_var_dirty_reads.test index 3e2108868af..1f01c4aac07 100644 --- a/mysql-test/suite/galera/t/galera_var_dirty_reads.test +++ b/mysql-test/suite/galera/t/galera_var_dirty_reads.test @@ -11,6 +11,11 @@ --let $node_2=node_2 --source include/auto_increment_offset_save.inc +# Save original auto_increment_offset values. +--let $node_1=node_1 +--let $node_2=node_2 +--source include/auto_increment_offset_save.inc + --connection node_2 --let $wsrep_cluster_address_saved = `SELECT @@global.wsrep_cluster_address` diff --git a/mysql-test/suite/galera_3nodes/r/galera_evs_suspect_timeout.result b/mysql-test/suite/galera_3nodes/r/galera_evs_suspect_timeout.result index 1464222a079..7e0d282ec7f 100644 --- a/mysql-test/suite/galera_3nodes/r/galera_evs_suspect_timeout.result +++ b/mysql-test/suite/galera_3nodes/r/galera_evs_suspect_timeout.result @@ -11,6 +11,7 @@ SET SESSION wsrep_sync_wait = 0; SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; VARIABLE_VALUE = 2 1 +SET SESSION wsrep_sync_wait = DEFAULT; SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 diff --git a/mysql-test/suite/galera_3nodes/t/galera_evs_suspect_timeout.test b/mysql-test/suite/galera_3nodes/t/galera_evs_suspect_timeout.test index a87f19ac94e..03236a3cb93 100644 --- a/mysql-test/suite/galera_3nodes/t/galera_evs_suspect_timeout.test +++ b/mysql-test/suite/galera_3nodes/t/galera_evs_suspect_timeout.test @@ -50,6 +50,7 @@ SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_N --enable_query_log --source include/wait_until_connected_again.inc +SET SESSION wsrep_sync_wait = DEFAULT; SELECT COUNT(*) = 1 FROM t1; DROP TABLE t1; diff --git a/mysql-test/suite/handler/handler.inc b/mysql-test/suite/handler/handler.inc index 2432ff13e55..c29ee0c693d 100644 --- a/mysql-test/suite/handler/handler.inc +++ b/mysql-test/suite/handler/handler.inc @@ -373,7 +373,9 @@ connection con2; send optimize table t1; --sleep 1 connection default; +--disable_ps_protocol handler t1 read next; +--enable_ps_protocol handler t1 close; connection con2; reap; diff --git a/mysql-test/suite/handler/ps.result b/mysql-test/suite/handler/ps.result new file mode 100644 index 00000000000..54685f9156b --- /dev/null +++ b/mysql-test/suite/handler/ps.result @@ -0,0 +1,9 @@ +create table t1 (i int); +handler test.t1 open handler_a; +flush status; +handler handler_a read first; +i +show status like 'Com_stmt_prepare%'; +Variable_name Value +Com_stmt_prepare OK +drop table t1; diff --git a/mysql-test/suite/handler/ps.test b/mysql-test/suite/handler/ps.test new file mode 100644 index 00000000000..68091190c85 --- /dev/null +++ b/mysql-test/suite/handler/ps.test @@ -0,0 +1,11 @@ +# +# MDEV-15729 Server crashes in Field::make_field upon HANDLER READ executed with PS protocol +# +create table t1 (i int); +handler test.t1 open handler_a; +flush status; +handler handler_a read first; +# handler...read must be prepared in --ps-protocol mode +--replace_result $PS_PROTOCOL OK +show status like 'Com_stmt_prepare%'; +drop table t1; diff --git a/mysql-test/suite/heap/heap_auto_increment.result b/mysql-test/suite/heap/heap_auto_increment.result index d6504349d92..e3fd377d4ef 100644 --- a/mysql-test/suite/heap/heap_auto_increment.result +++ b/mysql-test/suite/heap/heap_auto_increment.result @@ -40,6 +40,32 @@ _rowid _rowid skey sval 2 2 2 hey drop table t1; # +# MDEV-16534 PPC64: Unexpected error with a negative values into auto-increment columns in HEAP, MyISAM, ARIA +# +CREATE TABLE t1 ( +id TINYINT NOT NULL AUTO_INCREMENT, +name CHAR(30) NOT NULL, +PRIMARY KEY (id) +) ENGINE=MEMORY; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `id` tinyint(4) NOT NULL AUTO_INCREMENT, + `name` char(30) NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=MEMORY DEFAULT CHARSET=latin1 +INSERT INTO t1 (name) VALUES ('dog'); +UPDATE t1 SET id=-1 WHERE id=1; +INSERT INTO t1 (name) VALUES ('cat'); +SELECT * FROM t1; +id name +-1 dog +2 cat +DROP TABLE t1; +# +# End of 5.5 tests +# +# # MDEV-15352 AUTO_INCREMENT breaks after updating a column value to a negative number # SET @engine='MEMORY'; @@ -159,3 +185,6 @@ id name -1 dog 2 cat DROP PROCEDURE autoinc_mdev15353_one; +# +# End of 10.2 tests +# diff --git a/mysql-test/suite/heap/heap_auto_increment.test b/mysql-test/suite/heap/heap_auto_increment.test index 6ec150f48da..a480251f3e8 100644 --- a/mysql-test/suite/heap/heap_auto_increment.test +++ b/mysql-test/suite/heap/heap_auto_increment.test @@ -35,8 +35,33 @@ drop table t1; # End of 4.1 tests --echo # +--echo # MDEV-16534 PPC64: Unexpected error with a negative values into auto-increment columns in HEAP, MyISAM, ARIA +--echo # + +CREATE TABLE t1 ( + id TINYINT NOT NULL AUTO_INCREMENT, + name CHAR(30) NOT NULL, + PRIMARY KEY (id) +) ENGINE=MEMORY; +SHOW CREATE TABLE t1; +INSERT INTO t1 (name) VALUES ('dog'); +UPDATE t1 SET id=-1 WHERE id=1; +INSERT INTO t1 (name) VALUES ('cat'); +SELECT * FROM t1; +DROP TABLE t1; + +--echo # +--echo # End of 5.5 tests +--echo # + +--echo # --echo # MDEV-15352 AUTO_INCREMENT breaks after updating a column value to a negative number --echo # SET @engine='MEMORY'; --source include/autoinc_mdev15353.inc + +--echo # +--echo # End of 10.2 tests +--echo # + diff --git a/mysql-test/suite/innodb/t/innodb_bug27216817.test b/mysql-test/suite/innodb/t/innodb_bug27216817.test deleted file mode 100644 index a93932b4a04..00000000000 --- a/mysql-test/suite/innodb/t/innodb_bug27216817.test +++ /dev/null @@ -1,28 +0,0 @@ -# -# BUG#27216817: INNODB: FAILING ASSERTION: -# PREBUILT->TABLE->N_MYSQL_HANDLES_OPENED == 1 -# - -source include/have_innodb.inc; -create table t1 (a int not null, b int not null) engine=innodb; -insert t1 values (1,2),(3,4); - -lock table t1 write, t1 tr read; -flush status; -alter table t1 add primary key (b); -show status like 'Handler_read_rnd_next'; -unlock tables; -alter table t1 drop primary key; - -lock table t1 write; -flush status; -alter table t1 add primary key (b); -show status like 'Handler_read_rnd_next'; -unlock tables; -alter table t1 drop primary key; - -flush status; -alter table t1 add primary key (b); -show status like 'Handler_read_rnd_next'; - -drop table t1; diff --git a/mysql-test/suite/maria/maria.result b/mysql-test/suite/maria/maria.result index 32c7af632c7..03f53b26a56 100644 --- a/mysql-test/suite/maria/maria.result +++ b/mysql-test/suite/maria/maria.result @@ -2745,6 +2745,35 @@ INSERT INTO t1 (b) VALUES (''); ALTER TABLE t1 ENABLE KEYS; DROP TABLE t1; # +# Start of 5.5 tests +# +# +# MDEV-16534 PPC64: Unexpected error with a negative values into auto-increment columns in HEAP, MyISAM, ARIA +# +CREATE TABLE t1 ( +id TINYINT NOT NULL AUTO_INCREMENT, +name CHAR(30) NOT NULL, +PRIMARY KEY (id) +) ENGINE=ARIA; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `id` tinyint(4) NOT NULL AUTO_INCREMENT, + `name` char(30) NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=0 +INSERT INTO t1 (name) VALUES ('dog'); +UPDATE t1 SET id=-1 WHERE id=1; +INSERT INTO t1 (name) VALUES ('cat'); +SELECT * FROM t1; +id name +-1 dog +2 cat +DROP TABLE t1; +# +# End of 5.5 tests +# +# # BUG#47444 - --myisam_repair_threads > 1 can result in all index # cardinalities=1 # diff --git a/mysql-test/suite/maria/maria.test b/mysql-test/suite/maria/maria.test index 79cd3fe3ad9..34eac473304 100644 --- a/mysql-test/suite/maria/maria.test +++ b/mysql-test/suite/maria/maria.test @@ -1990,6 +1990,30 @@ ALTER TABLE t1 ENABLE KEYS; DROP TABLE t1; --echo # +--echo # Start of 5.5 tests +--echo # + +--echo # +--echo # MDEV-16534 PPC64: Unexpected error with a negative values into auto-increment columns in HEAP, MyISAM, ARIA +--echo # + +CREATE TABLE t1 ( + id TINYINT NOT NULL AUTO_INCREMENT, + name CHAR(30) NOT NULL, + PRIMARY KEY (id) +) ENGINE=ARIA; +SHOW CREATE TABLE t1; +INSERT INTO t1 (name) VALUES ('dog'); +UPDATE t1 SET id=-1 WHERE id=1; +INSERT INTO t1 (name) VALUES ('cat'); +SELECT * FROM t1; +DROP TABLE t1; + +--echo # +--echo # End of 5.5 tests +--echo # + +--echo # --echo # BUG#47444 - --myisam_repair_threads > 1 can result in all index --echo # cardinalities=1 --echo # diff --git a/mysql-test/suite/plugins/r/processlist.result b/mysql-test/suite/plugins/r/processlist.result new file mode 100644 index 00000000000..22515c8174b --- /dev/null +++ b/mysql-test/suite/plugins/r/processlist.result @@ -0,0 +1,11 @@ +create table t1 (a int) engine=innodb; +start transaction; +insert t1 values (1); +connect con2,localhost,root; +state from show engine innodb status + +state from show processlist + +disconnect con2; +connection default; +drop table t1; diff --git a/mysql-test/suite/plugins/t/processlist.test b/mysql-test/suite/plugins/t/processlist.test new file mode 100644 index 00000000000..5aacef317b9 --- /dev/null +++ b/mysql-test/suite/plugins/t/processlist.test @@ -0,0 +1,18 @@ +# +# MDEV-15359 Thread stay in "cleaning up" status after finishing +# +source include/have_innodb.inc; + +create table t1 (a int) engine=innodb; +start transaction; +insert t1 values (1); +let id=`select connection_id()`; +connect con2,localhost,root; +let s=query_get_value(show engine innodb status,Status,1); +disable_query_log; +eval select regexp_replace("$s", '(?s)^.*MySQL thread id $id,.*root([^\n]*)\n.*', '\\\\1') as `state from show engine innodb status`; +eval select state as `state from show processlist` from information_schema.processlist where id = $id; +enable_query_log; +disconnect con2; +connection default; +drop table t1; diff --git a/mysql-test/suite/rpl/r/rpl_parallel_optimistic.result b/mysql-test/suite/rpl/r/rpl_parallel_optimistic.result index 7c8a2a94104..07b080a72c3 100644 --- a/mysql-test/suite/rpl/r/rpl_parallel_optimistic.result +++ b/mysql-test/suite/rpl/r/rpl_parallel_optimistic.result @@ -610,8 +610,39 @@ a b 57 7 58 8 59 9 +connection server_1; +DELETE FROM t1; +DELETE FROM t2; +include/save_master_gtid.inc +connection server_2; +include/sync_with_master_gtid.inc +connection server_2; +set global log_warnings=2; +BEGIN; +INSERT INTO t1 SET a=1; +connection server_1; +SET @save.binlog_format=@@session.binlog_format; +SET @@SESSION.binlog_format=row; +BEGIN; +INSERT INTO t1 SET a=1; +INSERT INTO t2 SET a=1; +COMMIT; +BEGIN; +DELETE FROM t2; +COMMIT; +connection server_2; +connection server_2; +ROLLBACK; +connection server_1; +SET @@SESSION.binlog_format= @save.binlog_format; +DELETE FROM t1; +DELETE FROM t2; +include/save_master_gtid.inc +connection server_2; +include/sync_with_master_gtid.inc connection server_2; include/stop_slave.inc +set global log_warnings=default; SET GLOBAL slave_parallel_mode=@old_parallel_mode; SET GLOBAL slave_parallel_threads=@old_parallel_threads; include/start_slave.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_optimistic.test b/mysql-test/suite/rpl/t/rpl_parallel_optimistic.test index b5d78033088..a49d59c4eb4 100644 --- a/mysql-test/suite/rpl/t/rpl_parallel_optimistic.test +++ b/mysql-test/suite/rpl/t/rpl_parallel_optimistic.test @@ -484,10 +484,69 @@ SELECT * FROM t2 WHERE a >= 40 ORDER BY a; SELECT * FROM t1 WHERE a >= 40 ORDER BY a; SELECT * FROM t2 WHERE a >= 40 ORDER BY a; -# Clean up. +# partial cleanup to reuse the tables by following tests +--connection server_1 +DELETE FROM t1; +DELETE FROM t2; +--source include/save_master_gtid.inc + +--connection server_2 +--source include/sync_with_master_gtid.inc + +# +# MDEV-13577 optimistic parallel slave errors out to error log unnecessary +# + +# The 1st of the following two trx:s a blocker on slave +--connection server_2 +set global log_warnings=2; +BEGIN; +INSERT INTO t1 SET a=1; + +--connection server_1 +SET @save.binlog_format=@@session.binlog_format; +SET @@SESSION.binlog_format=row; + +BEGIN; + INSERT INTO t1 SET a=1; + INSERT INTO t2 SET a=1; +COMMIT; +# This transaction is going to win optimistical race with above INSERT +# on slave while being depend on it. That means it will face a kind of temporary error +# and then will retry to succeed. +BEGIN; + DELETE FROM t2; +COMMIT; + +# First make sure DELETE raced indeed to get stuck at retrying stage +# where it runs "realistically" now. There is nomore optimistic error +# in the errorlog, which is downgraded to the warning level (when +# --log-warnings > 1), see above suppression. +--connection server_2 +--let $wait_condition= SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE state = "Waiting for prior transaction to commit" +--source include/wait_condition.inc + +# Next release the 1st trx to commit. +--connection server_2 +ROLLBACK; + +# MDEV-13577 local cleanup: +--connection server_1 +SET @@SESSION.binlog_format= @save.binlog_format; +DELETE FROM t1; +DELETE FROM t2; +--source include/save_master_gtid.inc + +--connection server_2 +--source include/sync_with_master_gtid.inc + +# +# Clean up. +# --connection server_2 --source include/stop_slave.inc +set global log_warnings=default; SET GLOBAL slave_parallel_mode=@old_parallel_mode; SET GLOBAL slave_parallel_threads=@old_parallel_threads; --source include/start_slave.inc diff --git a/mysql-test/suite/unit/suite.pm b/mysql-test/suite/unit/suite.pm index c92363a8601..c8180c59240 100644 --- a/mysql-test/suite/unit/suite.pm +++ b/mysql-test/suite/unit/suite.pm @@ -40,7 +40,7 @@ sub start_test { my $bin=$ENV{MTR_BINDIR} || '..'; return "Not run for embedded server" if $::opt_embedded_server; return "Not configured to run ctest" unless -f "$bin/CTestTestfile.cmake"; - my ($ctest_vs)= $opt_vs_config ? "-C $opt_vs_config" : ""; + my ($ctest_vs)= $::opt_vs_config ? "-C ".substr($::opt_vs_config,1) : ""; my (@ctest_list)= `cd "$bin" && ctest $ctest_vs --show-only --verbose`; return "No ctest" if $?; diff --git a/mysql-test/suite/vcol/r/vcol_misc.result b/mysql-test/suite/vcol/r/vcol_misc.result index c5cd05129f2..62ae344365c 100644 --- a/mysql-test/suite/vcol/r/vcol_misc.result +++ b/mysql-test/suite/vcol/r/vcol_misc.result @@ -352,6 +352,16 @@ a b c DROP TABLE t1; SET sql_mode=DEFAULT; # +# MDEV-15834 The code in TABLE_SHARE::init_from_binary_frm_image() is not safe +# +SHOW TABLES; +Tables_in_test +t1 +SHOW CREATE TABLE t1; +ERROR HY000: Incorrect information in file: './test/t1.frm' +ALTER TABLE t1; +ERROR HY000: Incorrect information in file: './test/t1.frm' +# # End of 5.5 tests # # diff --git a/mysql-test/suite/vcol/t/vcol_misc.test b/mysql-test/suite/vcol/t/vcol_misc.test index 2244967a968..3efbaa0a376 100644 --- a/mysql-test/suite/vcol/t/vcol_misc.test +++ b/mysql-test/suite/vcol/t/vcol_misc.test @@ -1,5 +1,7 @@ --source include/have_ucs2.inc +let $MYSQLD_DATADIR= `select @@datadir`; + --disable_warnings drop table if exists t1,t2; --enable_warnings @@ -321,6 +323,20 @@ SELECT * FROM t1; DROP TABLE t1; SET sql_mode=DEFAULT; + +--echo # +--echo # MDEV-15834 The code in TABLE_SHARE::init_from_binary_frm_image() is not safe +--echo # + +--copy_file std_data/frm/t1.frm $MYSQLD_DATADIR/test/t1.frm +SHOW TABLES; +--error ER_NOT_FORM_FILE +SHOW CREATE TABLE t1; +--error ER_NOT_FORM_FILE +ALTER TABLE t1; +--remove_file $MYSQLD_DATADIR/test/t1.frm + + --echo # --echo # End of 5.5 tests --echo # diff --git a/mysql-test/suite/wsrep/r/variables.result b/mysql-test/suite/wsrep/r/variables.result index 30b97e02af3..b6f22828532 100644 --- a/mysql-test/suite/wsrep/r/variables.result +++ b/mysql-test/suite/wsrep/r/variables.result @@ -207,6 +207,16 @@ Threads_connected 1 SHOW STATUS LIKE 'wsrep_thread_count'; Variable_name Value wsrep_thread_count 11 +set wsrep_on=0; +set wsrep_on=1; +create user test@localhost; +connect con1,localhost,test; +set auto_increment_increment=10; +set wsrep_on=0; +ERROR 42000: Access denied; you need (at least one of) the SUPER privilege(s) for this operation +disconnect con1; +connection default; +drop user test@localhost; # # MDEV#6411: Setting set @@global.wsrep_sst_auth=NULL causes crash # diff --git a/mysql-test/suite/wsrep/t/variables.test b/mysql-test/suite/wsrep/t/variables.test index 1769f567848..1315f090d5c 100644 --- a/mysql-test/suite/wsrep/t/variables.test +++ b/mysql-test/suite/wsrep/t/variables.test @@ -118,6 +118,20 @@ sleep 3; SHOW STATUS LIKE 'threads_connected'; SHOW STATUS LIKE 'wsrep_thread_count'; +# +# privileges for wsrep_on +# +set wsrep_on=0; +set wsrep_on=1; +create user test@localhost; +connect con1,localhost,test; +set auto_increment_increment=10; +--error ER_SPECIFIC_ACCESS_DENIED_ERROR +set wsrep_on=0; +disconnect con1; +connection default; +drop user test@localhost; + --echo # --echo # MDEV#6411: Setting set @@global.wsrep_sst_auth=NULL causes crash --echo # diff --git a/mysql-test/t/alter_table.test b/mysql-test/t/alter_table.test index 092569dcadb..97cef0b5794 100644 --- a/mysql-test/t/alter_table.test +++ b/mysql-test/t/alter_table.test @@ -1839,6 +1839,21 @@ SHOW CREATE TABLE t1; DROP TABLE t1; --echo # +--echo # MDEV-14668 ADD PRIMARY KEY IF NOT EXISTS on composite key +--echo # +CREATE TABLE t1 ( + `ID` BIGINT(20) NOT NULL, + `RANK` MEDIUMINT(4) NOT NULL, + `CHECK_POINT` BIGINT(20) NOT NULL, + UNIQUE INDEX `HORIZON_UIDX01` (`ID`, `RANK`) + ) ENGINE=InnoDB; + +ALTER TABLE t1 ADD PRIMARY KEY IF NOT EXISTS (`ID`, `CHECK_POINT`); +SHOW CREATE TABLE t1; +ALTER TABLE t1 ADD PRIMARY KEY IF NOT EXISTS (`ID`, `CHECK_POINT`); +DROP TABLE t1; + +--echo # --echo # End of 10.0 tests --echo # diff --git a/mysql-test/t/auto_increment.test b/mysql-test/t/auto_increment.test index 6f678ed309f..1c359ec5268 100644 --- a/mysql-test/t/auto_increment.test +++ b/mysql-test/t/auto_increment.test @@ -398,6 +398,29 @@ select last_insert_id(); select * from t1; drop table t1; +--echo # +--echo # End of 5.3 tests +--echo # + +--echo # +--echo # MDEV-16534 PPC64: Unexpected error with a negative values into auto-increment columns in HEAP, MyISAM, ARIA +--echo # + +CREATE TABLE t1 ( + id TINYINT NOT NULL AUTO_INCREMENT, + name CHAR(30) NOT NULL, + PRIMARY KEY (id) +) ENGINE=MyISAM; +SHOW CREATE TABLE t1; +INSERT INTO t1 (name) VALUES ('dog'); +UPDATE t1 SET id=-1 WHERE id=1; +INSERT INTO t1 (name) VALUES ('cat'); +SELECT * FROM t1; +DROP TABLE t1; + +--echo # +--echo # End of 5.5 tests +--echo # --echo # --echo # MDEV-15352 AUTO_INCREMENT breaks after updating a column value to a negative number @@ -405,3 +428,8 @@ drop table t1; SET @engine='MyISAM'; --source include/autoinc_mdev15353.inc + +--echo # +--echo # End of 10.2 tests +--echo # + diff --git a/mysql-test/t/having.test b/mysql-test/t/having.test index c9231fef3be..179af14559f 100644 --- a/mysql-test/t/having.test +++ b/mysql-test/t/having.test @@ -746,6 +746,23 @@ set sql_mode=@save_sql_mode; drop table t1; --echo # +--echo # mdev-16235: impossible HAVING in query without aggregation +--echo # + +explain extended +select * from mysql.help_topic where example = 'foo' having description is null; + +select * from mysql.help_topic where example = 'foo' having description is null; + +--echo # +--echo # End of 5. tests +--echo # + +--echo # +--echo # Start of 10.0 tests +--echo # + +--echo # --echo # Bug mdev-5160: two-way join with HAVING over the second table --echo # diff --git a/mysql-test/t/limit.test b/mysql-test/t/limit.test index 4dbe13096d4..668d3b74518 100644 --- a/mysql-test/t/limit.test +++ b/mysql-test/t/limit.test @@ -115,3 +115,17 @@ SELECT a FROM t1 ORDER BY a LIMIT 2 OFFSET 14; DROP TABLE t1; --echo End of 5.1 tests + +--echo # +--echo # mdev-16235: SELECT over a table with LIMIT 0 +--echo # + +EXPLAIN +SELECT * FROM mysql.slow_log WHERE sql_text != 'foo' LIMIT 0; +SELECT * FROM mysql.slow_log WHERE sql_text != 'foo' LIMIT 0; + +EXPLAIN +SELECT * FROM mysql.help_topic WHERE help_category_id != example LIMIT 0; +SELECT * FROM mysql.help_topic WHERE help_category_id != example LIMIT 0; + +--echo End of 5.5 tests diff --git a/mysql-test/t/lock.test b/mysql-test/t/lock.test index 2c2c5d42783..92ab8294273 100644 --- a/mysql-test/t/lock.test +++ b/mysql-test/t/lock.test @@ -474,7 +474,7 @@ LOCK TABLE t1 WRITE; --echo # HANDLER commands are not allowed in LOCK TABLES mode --error ER_LOCK_OR_ACTIVE_TRANSACTION HANDLER t1 OPEN; ---error ER_LOCK_OR_ACTIVE_TRANSACTION +--error ER_LOCK_OR_ACTIVE_TRANSACTION,ER_UNKNOWN_TABLE HANDLER t1 READ FIRST; --error ER_LOCK_OR_ACTIVE_TRANSACTION HANDLER t1 CLOSE; diff --git a/mysql-test/t/mysqldump.test b/mysql-test/t/mysqldump.test index 9f74f4ad9c0..abb0085157f 100644 --- a/mysql-test/t/mysqldump.test +++ b/mysql-test/t/mysqldump.test @@ -2636,6 +2636,28 @@ exec $MYSQL_DUMP mysql func > $SEARCH_FILE; source include/search_pattern_in_file.inc; --echo # +--echo # MDEV-15021: Fix the order in which routines are called +--echo # +use test; +CREATE FUNCTION f() RETURNS INT RETURN 1; +CREATE VIEW v1 AS SELECT f(); + +--echo # Running mysqldump -uroot test --routines --tables v1 > **vardir**/test.dmp +--exec $MYSQL_DUMP -uroot test --routines --tables v1 > $MYSQLTEST_VARDIR/test.dmp + +DROP VIEW v1; +DROP FUNCTION f; + +--echo # Running mysql -uroot test < **vardir**/test.dmp +--exec $MYSQL -uroot test < $MYSQLTEST_VARDIR/test.dmp + +--echo # +--echo # Cleanup after succesful import. +--echo # +DROP VIEW v1; +DROP FUNCTION f; + +--echo # --echo # Test for --add-drop-trigger --echo # use test; diff --git a/mysql-test/t/mysqlslap.test b/mysql-test/t/mysqlslap.test index c49c4ab3d7d..81115d59d09 100644 --- a/mysql-test/t/mysqlslap.test +++ b/mysql-test/t/mysqlslap.test @@ -80,3 +80,11 @@ DROP DATABASE bug58090; --exec $MYSQL_SLAP --create-schema=test --init-command="CREATE TABLE t1(a INT)" --silent --concurrency=1 --iterations=1 DROP TABLE t1; + +--echo # +--echo # Bug MDEV-15789 (Upstream: #80329): MYSQLSLAP OPTIONS --AUTO-GENERATE-SQL-GUID-PRIMARY and --AUTO-GENERATE-SQL-SECONDARY-INDEXES DONT WORK +--echo # + +--exec $MYSQL_SLAP --concurrency=1 --silent --iterations=1 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --auto-generate-sql-guid-primary --create-schema=slap + +--exec $MYSQL_SLAP --concurrency=1 --silent --iterations=1 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --auto-generate-sql-secondary-indexes=1 --create-schema=slap diff --git a/mysql-test/t/olap.test b/mysql-test/t/olap.test index fec5df1a1c7..4a61cebdc0d 100644 --- a/mysql-test/t/olap.test +++ b/mysql-test/t/olap.test @@ -404,3 +404,26 @@ SELECT DISTINCT b FROM t1, t2 GROUP BY a, b WITH ROLLUP; DROP TABLE t1, t2; --echo End of 5.0 tests + + +--echo # +--echo # Start of 10.1 tests +--echo # + +--echo # +--echo # MDEV-16190 Server crashes in Item_null_result::field_type on SELECT with time field, ROLLUP and HAVING +--echo # +CREATE TABLE t1 (t TIME) ENGINE=MyISAM; +INSERT INTO t1 VALUES ('12:12:12'); +SELECT t, COUNT(*) FROM t1 GROUP BY t WITH ROLLUP HAVING t > '00:00:00'; +DROP TABLE t1; + +CREATE TABLE t1 (t TIME) ENGINE=MyISAM; +INSERT INTO t1 VALUES ('12:12:12'),('12:12:13'); +SELECT t, COUNT(*) FROM t1 GROUP BY t WITH ROLLUP HAVING t > '00:00:00'; +DROP TABLE t1; + + +--echo # +--echo # End of 10.1 tests +--echo # diff --git a/mysql-test/t/ps.test b/mysql-test/t/ps.test index 1c8595936cd..c826bff4ea8 100644 --- a/mysql-test/t/ps.test +++ b/mysql-test/t/ps.test @@ -3875,7 +3875,58 @@ drop table t1; --echo # End of 5.5 tests --echo # ---echo # Start of 10.2 tests +--echo # End of 10.0 tests +--echo # + +--echo # +--echo # MDEV-12060 Crash in EXECUTE IMMEDIATE with an expression returning a GRANT command +--echo # (the 10.1 part) +--echo # + +DELIMITER /; +CREATE PROCEDURE p2 () +BEGIN + SET STATEMENT join_cache_level=CAST(CONCAT(_utf8'6',_latin1'') AS INT) FOR PREPARE stmt FROM 'SELECT 1'; + EXECUTE stmt; + DEALLOCATE PREPARE stmt; +END; +/ +DELIMITER ;/ +CALL p2(); +DROP PROCEDURE p2; + + +DELIMITER /; +BEGIN NOT ATOMIC + SET STATEMENT join_cache_level=CAST(CONCAT(_utf8'6',_latin1'') AS INT) FOR PREPARE stmt FROM 'SELECT 1'; + EXECUTE stmt; + DEALLOCATE PREPARE stmt; +END; +/ +DELIMITER ;/ + + +DELIMITER /; +BEGIN NOT ATOMIC + SET STATEMENT join_cache_level=CAST(CONCAT(_utf8'6',_latin1'') AS INT) FOR PREPARE stmt FROM 'SELECT 1'; + DEALLOCATE PREPARE stmt; +END; +/ +DELIMITER ;/ + + +DELIMITER /; +BEGIN NOT ATOMIC + PREPARE stmt FROM 'SELECT 1'; + SET STATEMENT join_cache_level=CAST(CONCAT(_utf8'6',_latin1'') AS INT) FOR EXECUTE stmt; + DEALLOCATE PREPARE stmt; +END; +/ +DELIMITER ;/ + + +--echo # +--echo # End of 10.1 tests --echo # --echo # @@ -4696,7 +4747,6 @@ DROP PROCEDURE p2; DROP PROCEDURE p1; DROP ROLE testrole; - --echo # --echo # End of 10.2 tests --echo # diff --git a/mysql-test/t/read_only_innodb.test b/mysql-test/t/read_only_innodb.test index a9310a1a78e..9ba3ccaca07 100644 --- a/mysql-test/t/read_only_innodb.test +++ b/mysql-test/t/read_only_innodb.test @@ -241,15 +241,6 @@ SELECT * FROM temp1, temp2; DROP TABLE temp1, temp2; --echo ---echo # MDEV-14185 CREATE TEMPORARY TABLE AS SELECT causes error 1290 with read_only and InnoDB. ---echo - -CREATE TEMPORARY TABLE temp1 ENGINE=INNODB AS SELECT a FROM t1; -SELECT * FROM temp1; -DROP TABLE temp1; - - ---echo --echo # Disconnect and cleanup --echo disconnect con1; diff --git a/mysql-test/t/selectivity.test b/mysql-test/t/selectivity.test index 3e60f242083..cf12bdaea21 100644 --- a/mysql-test/t/selectivity.test +++ b/mysql-test/t/selectivity.test @@ -1045,6 +1045,24 @@ SELECT * FROM (SELECT t FROM t1 WHERE d IS NULL) sq; DROP TABLE t1; +--echo # +--echo # MDEV-16374: filtered shows 0 for materilization scan for a semi join, which makes optimizer +--echo # always pick materialization scan over materialization lookup +--echo # + +create table t0(a int); +insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); +create table t1 (a int, b int); +insert into t1 values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10), +(11,11),(12,12),(13,13),(14,14),(15,15); +set @@optimizer_use_condition_selectivity=2; +explain extended select * from t1 where a in (select max(a) from t1 group by b); +select * from t1 where a in (select max(a) from t1 group by b); +set @@optimizer_use_condition_selectivity=1; +explain extended select * from t1 where a in (select max(a) from t1 group by b); +select * from t1 where a in (select max(a) from t1 group by b); +drop table t1,t0; + set histogram_size=@save_histogram_size; set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity; set use_stat_tables=@save_use_stat_tables; diff --git a/mysql-test/t/subselect_sj2_mat.test b/mysql-test/t/subselect_sj2_mat.test index cfb6c8c2819..68a888012f2 100644 --- a/mysql-test/t/subselect_sj2_mat.test +++ b/mysql-test/t/subselect_sj2_mat.test @@ -303,3 +303,132 @@ eval $q; eval explain $q; DROP TABLE t1,t2; + +--echo # +--echo # MDEV-16225: wrong resultset from query with semijoin=on +--echo # + +CREATE TABLE t1 ( + `id` int(10) NOT NULL AUTO_INCREMENT, + `local_name` varchar(64) NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB AUTO_INCREMENT=14 DEFAULT CHARSET=latin1; + +insert into t1(`id`,`local_name`) values +(1,'Cash Advance'), +(2,'Cash Advance'), +(3,'Rollover'), +(4,'AL Installment'), +(5,'AL Installment'), +(6,'AL Installment'), +(7,'AL Installment'), +(8,'AL Installment'), +(9,'AL Installment'), +(10,'Internet Payday'), +(11,'Rollover - Internet Payday'), +(12,'AL Monthly Installment'), +(13,'AL Semi-Monthly Installment'); + +explain +SELECT SQL_NO_CACHE t.id +FROM t1 t +WHERE ( + t.id IN (SELECT A.id FROM t1 AS A WHERE A.local_name IN (SELECT B.local_name FROM t1 AS B WHERE B.id IN (0,4,12,13,1,10,3,11))) + OR + (t.id IN (0,4,12,13,1,10,3,11)) +); +SELECT SQL_NO_CACHE t.id +FROM t1 t +WHERE ( + t.id IN (SELECT A.id FROM t1 AS A WHERE A.local_name IN (SELECT B.local_name FROM t1 AS B WHERE B.id IN (0,4,12,13,1,10,3,11))) + OR + (t.id IN (0,4,12,13,1,10,3,11)) +); +drop table t1; + +--echo # +--echo # MDEV-15247: Crash when SET NAMES 'utf8' is set +--echo # + +CREATE TABLE t1 ( + id_category int unsigned, + id_product int unsigned, + PRIMARY KEY (id_category,id_product) +) ENGINE=MyISAM; + +INSERT INTO `t1` VALUES (31,216), (31,215), (31,214), (31,213), (31,212), (32,211), (32,210), (32,209), (32,208), (29,207), (30,315372), (2,161), (2,132), (33,315380), (31,315371), (29,315370), (29,315373), (29,315369), (29,315374), (29,315368), (29,315375), (29,315367), (29,183), (29,182), (30,177), (29,315376), (13,315365), (2,167), (2,315357), (2,164), (2,159), (2,131), (2,127), (14,315364), (27,315363), (29,205), (29,204), (29,203), (29,202), (29,201), (29,200), (29,199), (29,198), (29,197), (29,196), (29,195), (29,194), (29,193), (29,192), (29,191), (29,190), (29,189), (14,188), (29,187), (29,186), (29,185), (29,184), (29,315377), (29,315378), (29,181), (33,315379), (29,179), (30,178), (29,180), (30,176), (30,175), (30,174), (30,173), (30,172), (11,171), (27,315357), (23,108), (23,102); + +CREATE TABLE t2 ( + id_product int, + id_t2 int, + KEY id_t2 (id_t2), + KEY id_product (id_product) +) ENGINE=MyISAM; + +INSERT INTO `t2` VALUES (11,31), (11,31), (11,31), (11,32), (11,32), +(11,32), (10,26), (11,32), (10,28), (11,32), (10,29), (11,33), (10,26), +(11,33), (10,27), (9,23), (11,32), (10,26), (8,18), (7,15), (11,32), +(10,28), (11,32), (10,28), (11,32), (10,29), (11,32), (10,29), (8,19), +(7,16), (8,18), (7,16), (8,20), (7,16), (11,32), (10,28), (8,19), +(7,16), (8,20), (7,16), (11,32), (10,29), (8,19), (7,16), (8,20), +(7,16), (10,27), (9,23), (10,27), (9,23), (10,27), (9,23), (11,32), +(10,27), (11,32), (10,27), (8,18), (7,15), (10,26), (9,24), (8,19), +(7,16), (10,26), (9,23), (8,19), (7,16), (8,18), (7,16), (8,18), (7,16), +(9,23), (8,18), (9,23), (8,19), (7,16), (7,16), (8,19), (7,16), (11,31), +(10,27), (9,24), (11,31), (10,27), (9,23), (8,19), (11,31), (10,26), (9,24), +(8,19), (11,31), (10,26), (9,25), (8,18), (11,31), (10,26), (9,23), (8,19), +(11,31), (10,26), (9,23), (8,18), (11,31), (10,30), (9,23), (8,18), (11,31), +(10,30), (9,23), (8,19), (11,31), (10,26), (9,25), (8,19), (8,21), (11,32), +(10,26), (9,22), (8,19), (11,32), (10,26), (9,22), (8,18), (11,32), (10,26), +(9,22), (8,20), (11,33), (10,26), (9,22), (8,19), (11,33), (10,26), (9,22), +(8,18), (11,33), (10,26), (9,22), (8,20), (11,32), (10,26), (9,24), (8,19), +(11,32), (10,26), (9,25), (8,19), (11,32), (10,26), (9,25), (8,18), (11,32), +(10,26), (9,23), (8,18), (11,32), (10,30), (9,23), (8,18), (11,32), (10,30), +(9,23), (8,19), (11,32), (10,26), (9,23), (8,19), (11,32), (10,27), (9,23), +(11,32), (10,27), (9,23), (11,32), (10,27), (9,23), (10,26), (9,22), (8,19), +(7,15), (10,26), (9,22), (8,20), (7,15), (10,26), (9,22), (8,18), (7,15), +(8,19), (10,26), (10,26), (11,33), (10,26), (11,33), (10,26), (11,33), +(10,27), (11,33), (10,27), (11,31), (10,26), (11,31), (10,26), (8,18), +(7,15), (9,23), (9,23), (9,24), (8,21), (7,15), (7,15), (7,15), (7,15), +(7,15), (7,15), (7,15), (7,15), (7,15), (8,18), (7,17), (8,18), (7,17), (8,19), (8,19); + +CREATE TABLE t3 ( + id_product int unsigned, + PRIMARY KEY (id_product) +) ENGINE=MyISAM; + +INSERT INTO t3 VALUES +(102),(103),(104),(105),(106),(107),(108),(109),(110), +(315371),(315373),(315374),(315375),(315376),(315377), +(315378),(315379),(315380); + +CREATE TABLE t4 ( + id_product int not null, + id_shop int, + PRIMARY KEY (id_product,id_shop) +) ENGINE=MyISAM ; + +INSERT INTO t4 VALUES +(202,1),(201,1),(200,1),(199,1),(198,1),(197,1),(196,1),(195,1), +(194,1),(193,1),(192,1),(191,1),(190,1),(189,1),(188,1),(187,1), +(186,1),(185,1),(184,1),(183,1),(182,1),(181,1),(179,1),(178,1), +(177,1),(176,1),(126,1),(315380,1); + +CREATE TABLE t5 (id_product int) ENGINE=MyISAM; +INSERT INTO `t5` VALUES +(652),(668),(669),(670),(671),(673),(674),(675),(676), +(677),(679),(680),(681),(682),(683),(684),(685),(686); + +explain +SELECT * FROM t3 + JOIN t4 ON (t4.id_product = t3.id_product AND t4.id_shop = 1) + JOIN t1 ON (t1.id_product = t3.id_product) +LEFT JOIN t5 ON (t5.id_product = t3.id_product) +WHERE 1=1 +AND t3.id_product IN (SELECT id_product FROM t2 t2_1 WHERE t2_1.id_t2 = 32) +AND t3.id_product IN (SELECT id_product FROM t2 t2_2 WHERE t2_2.id_t2 = 15) +AND t3.id_product IN (SELECT id_product FROM t2 t2_3 WHERE t2_3.id_t2 = 18 OR t2_3.id_t2 = 19) +AND t3.id_product IN (SELECT id_product FROM t2 t2_4 WHERE t2_4.id_t2 = 34 OR t2_4.id_t2 = 23) +AND t3.id_product IN (SELECT id_product FROM t2 t2_5 WHERE t2_5.id_t2 = 29 OR t2_5.id_t2 = 28 OR t2_5.id_t2 = 26); + +drop table t1,t2,t3,t4,t5; diff --git a/mysys/my_symlink.c b/mysys/my_symlink.c index 06f6a29e4a0..8580fac3595 100644 --- a/mysys/my_symlink.c +++ b/mysys/my_symlink.c @@ -244,7 +244,7 @@ const char *my_open_parent_dir_nosymlinks(const char *pathname, int *pdfd) return pathname + (s - buf); } - fd = openat(dfd, s, O_NOFOLLOW | O_PATH); + fd = openat(dfd, s, O_NOFOLLOW | O_PATH | O_CLOEXEC); if (fd < 0) goto err; diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt index fb09e459f87..e68284a7d58 100644 --- a/scripts/CMakeLists.txt +++ b/scripts/CMakeLists.txt @@ -331,6 +331,22 @@ ELSE() COMPONENT ${${file}_COMPONENT} ) ENDFOREACH() + SET (wsrep_sst_rsync_wan ${CMAKE_CURRENT_BINARY_DIR}/wsrep_sst_rsync_wan) + ADD_CUSTOM_COMMAND( + OUTPUT ${wsrep_sst_rsync_wan} + COMMAND ${CMAKE_COMMAND} ARGS -E create_symlink + wsrep_sst_rsync + wsrep_sst_rsync_wan + ) + ADD_CUSTOM_TARGET(symlink_wsrep_sst_rsync + ALL + DEPENDS ${wsrep_sst_rsync_wan} + ) + INSTALL( + FILES ${wsrep_sst_rsync_wan} + DESTINATION ${INSTALL_BINDIR} + COMPONENT Server + ) FOREACH(file ${WSREP_SOURCE}) CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/${file}.sh @@ -344,7 +360,6 @@ ELSE() COMPONENT ${${file}_COMPONENT} ) ENDFOREACH() - ENDIF() # Install libgcc as mylibgcc.a diff --git a/scripts/wsrep_sst_common.sh b/scripts/wsrep_sst_common.sh index edf7b2d71aa..3367f3e244e 100644 --- a/scripts/wsrep_sst_common.sh +++ b/scripts/wsrep_sst_common.sh @@ -35,12 +35,24 @@ case "$1" in # # Break address string into host:port/path parts # - readonly WSREP_SST_OPT_HOST=${WSREP_SST_OPT_ADDR%%[:/]*} - readonly WSREP_SST_OPT_HOST_UNESCAPED=`echo "$WSREP_SST_OPT_HOST"|awk '{if(match($0,/^\[.*\]$/)) $0=substr($0,2,RLENGTH-2);print}'` - readonly WSREP_SST_OPT_ADDR_PORT=$(echo $WSREP_SST_OPT_ADDR | \ - cut -d ']' -f 2 | cut -s -d ':' -f 2 | cut -d '/' -f 1) + case "${WSREP_SST_OPT_ADDR}" in + \[*) + # IPv6 + addr_no_bracket=${WSREP_SST_OPT_ADDR#\[} + readonly WSREP_SST_OPT_HOST_UNESCAPED=${addr_no_bracket%%\]*} + readonly WSREP_SST_OPT_HOST="[${WSREP_SST_OPT_HOST_UNESCAPED}]" + ;; + *) + readonly WSREP_SST_OPT_HOST=${WSREP_SST_OPT_ADDR%%[:/]*} + readonly WSREP_SST_OPT_HOST_UNESCAPED=$WSREP_SST_OPT_HOST + ;; + esac + remain=${WSREP_SST_OPT_ADDR#${WSREP_SST_OPT_HOST}} + remain=${remain#:} + readonly WSREP_SST_OPT_ADDR_PORT=${remain%%/*} + remain=${remain#*/} + readonly WSREP_SST_OPT_MODULE=${remain%%/*} readonly WSREP_SST_OPT_PATH=${WSREP_SST_OPT_ADDR#*/} - readonly WSREP_SST_OPT_MODULE=${WSREP_SST_OPT_PATH%%/*} remain=${WSREP_SST_OPT_PATH#*/} readonly WSREP_SST_OPT_LSN=${remain%%/*} remain=${remain#*/} @@ -120,10 +132,10 @@ done readonly WSREP_SST_OPT_BYPASS readonly WSREP_SST_OPT_BINLOG -if [ -n "${WSREP_SST_OPT_ADDR:-}" ]; then +if [ -n "${WSREP_SST_OPT_ADDR_PORT:-}" ]; then if [ -n "${WSREP_SST_OPT_PORT:-}" ]; then - if [ -n "$WSREP_SST_OPT_ADDR_PORT" -a "$WSREP_SST_OPT_PORT" != "$WSREP_SST_OPT_ADDR_PORT" ]; then - wsrep_log_error "port in --port=$WSREP_SST_OPT_PORT differs from port in --address=$WSREP_SST_OPT_ADDR" + if [ "$WSREP_SST_OPT_PORT" != "$WSREP_SST_OPT_ADDR_PORT" ]; then + echo "WSREP_SST: [ERROR] port in --port=$WSREP_SST_OPT_PORT differs from port in --address=$WSREP_SST_OPT_ADDR" >&2 exit 2 fi else diff --git a/scripts/wsrep_sst_mysqldump.sh b/scripts/wsrep_sst_mysqldump.sh index cfe4838fbc4..faa3f10639b 100644 --- a/scripts/wsrep_sst_mysqldump.sh +++ b/scripts/wsrep_sst_mysqldump.sh @@ -108,11 +108,6 @@ then DROP PREPARE stmt;" fi -# Retrieve the donor's @@global.gtid_binlog_state. -GTID_BINLOG_STATE=$(echo "SHOW GLOBAL VARIABLES LIKE 'gtid_binlog_state'" |\ -$MYSQL_CLIENT $AUTH -S$WSREP_SST_OPT_SOCKET --disable-reconnect --connect_timeout=10 |\ -tail -1 | awk -F ' ' '{ print $2 }') - MYSQL="$MYSQL_CLIENT $WSREP_SST_OPT_CONF "\ "$AUTH -h${WSREP_SST_OPT_HOST_UNESCAPED} "\ "-P$WSREP_SST_OPT_PORT --disable-reconnect --connect_timeout=10" @@ -126,12 +121,16 @@ tail -1 | awk -F ' ' '{ print $2 }') SERVER_VERSION=$(echo "set statement wsrep_sync_wait=0 for SHOW VARIABLES LIKE 'version'" | $MYSQL |\ tail -1 | awk -F ' ' '{ print $2 }') +# Retrieve the donor's @@global.gtid_binlog_state. +GTID_BINLOG_STATE=$(echo "SHOW GLOBAL VARIABLES LIKE 'gtid_binlog_state'" | $MYSQL |\ +tail -1 | awk -F ' ' '{ print $2 }') + RESET_MASTER="" SET_GTID_BINLOG_STATE="" SQL_LOG_BIN_OFF="" # Safety check -if echo $SERVER_VERSION | grep '^10.' > /dev/null +if [ "${SERVER_VERSION%%.*}" != '5' ] then # If binary logging is enabled on the joiner node, we need to copy donor's # gtid_binlog_state to joiner. In order to do that, a RESET MASTER must be @@ -146,7 +145,7 @@ then fi # NOTE: we don't use --routines here because we're dumping mysql.proc table -MYSQLDUMP="$MYSQLDUMP $AUTH -S$WSREP_SST_OPT_SOCKET \ +MYSQLDUMP="$MYSQLDUMP $WSREP_SST_OPT_CONF $AUTH -S$WSREP_SST_OPT_SOCKET \ --add-drop-database --add-drop-table --skip-add-locks --create-options \ --disable-keys --extended-insert --skip-lock-tables --quick --set-charset \ --skip-comments --flush-privileges --all-databases --events" diff --git a/sql-common/client.c b/sql-common/client.c index 90a2b2a33c3..81244de7a74 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -2522,6 +2522,10 @@ static int send_client_reply_packet(MCPVIO_EXT *mpvio, mysql->client_flag|= CLIENT_MULTI_RESULTS; #ifdef HAVE_OPENSSL + if (mysql->options.ssl_key || mysql->options.ssl_cert || + mysql->options.ssl_ca || mysql->options.ssl_capath || + mysql->options.ssl_cipher) + mysql->options.use_ssl = 1; if (mysql->options.use_ssl) mysql->client_flag|= CLIENT_SSL; #endif /* HAVE_OPENSSL */ diff --git a/sql/events.cc b/sql/events.cc index 07cfdf1028a..69894c85f68 100644 --- a/sql/events.cc +++ b/sql/events.cc @@ -462,8 +462,7 @@ Events::update_event(THD *thd, Event_parse_data *parse_data, if (check_access(thd, EVENT_ACL, parse_data->dbname.str, NULL, NULL, 0, 0)) DBUG_RETURN(TRUE); - - WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL) + WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL); if (lock_object_name(thd, MDL_key::EVENT, parse_data->dbname.str, parse_data->name.str)) @@ -592,8 +591,7 @@ Events::drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name, bool if_exists) if (check_access(thd, EVENT_ACL, dbname.str, NULL, NULL, 0, 0)) DBUG_RETURN(TRUE); - - WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL) + WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL); /* Turn off row binlogging of this statement and use statement-based so @@ -619,7 +617,7 @@ Events::drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name, bool if_exists) #ifdef WITH_WSREP error: DBUG_RETURN(TRUE); -#endif +#endif /* WITH_WSREP */ } diff --git a/sql/handler.cc b/sql/handler.cc index 06c407b572a..87803846010 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -3654,7 +3654,7 @@ void handler::print_error(int error, myf errflag) */ errflag|= ME_NOREFRESH; } - } + } /* if we got an OS error from a file-based engine, specify a path of error */ if (error < HA_ERR_FIRST && bas_ext()[0]) @@ -4383,6 +4383,7 @@ handler::ha_create_partitioning_metadata(const char *name, DBUG_ASSERT(m_lock_type == F_UNLCK || (!old_name && strcmp(name, table_share->path.str))); + return create_partitioning_metadata(name, old_name, action_flag); } @@ -6185,6 +6186,12 @@ void ha_fake_trx_id(THD *thd) DBUG_VOID_RETURN; } + if (thd->wsrep_ws_handle.trx_id != WSREP_UNDEFINED_TRX_ID) + { + WSREP_DEBUG("fake trx id skipped: %lu", thd->wsrep_ws_handle.trx_id); + DBUG_VOID_RETURN; + } + /* Try statement transaction if standard one is not set. */ THD_TRANS *trans= (thd->transaction.all.ha_list) ? &thd->transaction.all : &thd->transaction.stmt; diff --git a/sql/item.cc b/sql/item.cc index 4bc6fe7e5bc..a501ff6fdc6 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -1360,7 +1360,7 @@ bool Item::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate) ltime, fuzzydate, field_name_or_null())) goto err; - break; + return null_value= false; } case REAL_RESULT: { @@ -1368,7 +1368,7 @@ bool Item::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate) if (null_value || double_to_datetime_with_warn(value, ltime, fuzzydate, field_name_or_null())) goto err; - break; + return null_value= false; } case DECIMAL_RESULT: { @@ -1377,7 +1377,7 @@ bool Item::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate) decimal_to_datetime_with_warn(res, ltime, fuzzydate, field_name_or_null())) goto err; - break; + return null_value= false; } case STRING_RESULT: { @@ -1387,15 +1387,20 @@ bool Item::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate) str_to_datetime_with_warn(res->charset(), res->ptr(), res->length(), ltime, fuzzydate)) goto err; - break; + return null_value= false; } default: + null_value= true; DBUG_ASSERT(0); } - return null_value= 0; - err: + return null_value|= make_zero_date(ltime, fuzzydate); +} + + +bool Item::make_zero_date(MYSQL_TIME *ltime, ulonglong fuzzydate) +{ /* if the item was not null and convertion failed, we return a zero date if allowed, otherwise - null. @@ -1417,7 +1422,7 @@ err: */ ltime->time_type= MYSQL_TIMESTAMP_TIME; } - return null_value|= !(fuzzydate & TIME_FUZZY_DATES); + return !(fuzzydate & TIME_FUZZY_DATES); } bool Item::get_seconds(ulonglong *sec, ulong *sec_part) @@ -3366,6 +3371,15 @@ my_decimal *Item_null::val_decimal(my_decimal *decimal_value) } +bool Item_null::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate) +{ + // following assert is redundant, because fixed=1 assigned in constructor + DBUG_ASSERT(fixed == 1); + make_zero_date(ltime, fuzzydate); + return (null_value= true); +} + + Item *Item_null::safe_charset_converter(THD *thd, CHARSET_INFO *tocs) { return this; diff --git a/sql/item.h b/sql/item.h index 5ed45c69e96..ce07f993dc4 100644 --- a/sql/item.h +++ b/sql/item.h @@ -705,6 +705,12 @@ protected: bool fixed_length, bool set_blob_packlength); Field *create_tmp_field(bool group, TABLE *table, uint convert_int_length); + /* + This method is used if the item was not null but convertion to + TIME/DATE/DATETIME failed. We return a zero date if allowed, + otherwise - null. + */ + bool make_zero_date(MYSQL_TIME *ltime, ulonglong fuzzydate); void push_note_converted_to_negative_complement(THD *thd); void push_note_converted_to_positive_complement(THD *thd); @@ -2766,6 +2772,7 @@ public: longlong val_int(); String *val_str(String *str); my_decimal *val_decimal(my_decimal *); + bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate); int save_in_field(Field *field, bool no_conversions); int save_safe_in_field(Field *field); bool send(Protocol *protocol, String *str); diff --git a/sql/log.cc b/sql/log.cc index 973aecb16e8..923810aa945 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -2565,7 +2565,7 @@ bool MYSQL_LOG::open( File file= -1; my_off_t seek_offset; bool is_fifo = false; - int open_flags= O_CREAT | O_BINARY; + int open_flags= O_CREAT | O_BINARY | O_CLOEXEC; DBUG_ENTER("MYSQL_LOG::open"); DBUG_PRINT("enter", ("log_type: %d", (int) log_type_arg)); @@ -3288,7 +3288,7 @@ bool MYSQL_BIN_LOG::open_index_file(const char *index_file_name_arg, ".index", opt); if ((index_file_nr= mysql_file_open(m_key_file_log_index, index_file_name, - O_RDWR | O_CREAT | O_BINARY, + O_RDWR | O_CREAT | O_BINARY | O_CLOEXEC, MYF(MY_WME))) < 0 || mysql_file_sync(index_file_nr, MYF(MY_WME)) || init_io_cache(&index_file, index_file_nr, @@ -8966,14 +8966,14 @@ int TC_LOG_MMAP::open(const char *opt_name) tc_log_page_size= my_getpagesize(); fn_format(logname,opt_name,mysql_data_home,"",MY_UNPACK_FILENAME); - if ((fd= mysql_file_open(key_file_tclog, logname, O_RDWR, MYF(0))) < 0) + if ((fd= mysql_file_open(key_file_tclog, logname, O_RDWR | O_CLOEXEC, MYF(0))) < 0) { if (my_errno != ENOENT) goto err; if (using_heuristic_recover()) return 1; if ((fd= mysql_file_create(key_file_tclog, logname, CREATE_MODE, - O_RDWR, MYF(MY_WME))) < 0) + O_RDWR | O_CLOEXEC, MYF(MY_WME))) < 0) goto err; inited=1; file_length= opt_tc_log_size; diff --git a/sql/log_event.cc b/sql/log_event.cc index 7de9df9ebe6..4b70c4124f7 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -5216,22 +5216,6 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi, else thd->variables.collation_database= thd->db_charset; - { - const CHARSET_INFO *cs= thd->charset(); - /* - We cannot ask for parsing a statement using a character set - without state_maps (parser internal data). - */ - if (!cs->state_map) - { - rli->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, - ER_THD(thd, ER_SLAVE_FATAL_ERROR), - "character_set cannot be parsed"); - thd->is_slave_error= true; - goto end; - } - } - /* Record any GTID in the same transaction, so slave state is transactionally consistent. @@ -13036,6 +13020,16 @@ void issue_long_find_row_warning(Log_event_type type, } +/* + HA_ERR_KEY_NOT_FOUND is a fatal error normally, but it's an expected + error in speculate optimistic mode, so use something non-fatal instead +*/ +static int row_not_found_error(rpl_group_info *rgi) +{ + return rgi->speculation != rpl_group_info::SPECULATE_OPTIMISTIC + ? HA_ERR_KEY_NOT_FOUND : HA_ERR_RECORD_CHANGED; +} + /** Locate the current row in event's table. @@ -13123,8 +13117,8 @@ int Rows_log_event::find_row(rpl_group_info *rgi) if (error) { DBUG_PRINT("info",("rnd_pos returns error %d",error)); - if (error == HA_ERR_RECORD_DELETED) - error= HA_ERR_KEY_NOT_FOUND; + if (error == HA_ERR_RECORD_DELETED || error == HA_ERR_KEY_NOT_FOUND) + error= row_not_found_error(rgi); table->file->print_error(error, MYF(0)); } DBUG_RETURN(error); @@ -13189,8 +13183,8 @@ int Rows_log_event::find_row(rpl_group_info *rgi) HA_READ_KEY_EXACT))) { DBUG_PRINT("info",("no record matching the key found in the table")); - if (error == HA_ERR_RECORD_DELETED) - error= HA_ERR_KEY_NOT_FOUND; + if (error == HA_ERR_RECORD_DELETED || error == HA_ERR_KEY_NOT_FOUND) + error= row_not_found_error(rgi); table->file->print_error(error, MYF(0)); table->file->ha_index_end(); goto end; diff --git a/sql/mysql_install_db.cc b/sql/mysql_install_db.cc index 5960346c60e..17e83126bc9 100644 --- a/sql/mysql_install_db.cc +++ b/sql/mysql_install_db.cc @@ -199,7 +199,7 @@ int main(int argc, char **argv) die("database creation failed"); } - printf("Creation of the database was successfull"); + printf("Creation of the database was successful"); return 0; } diff --git a/sql/mysql_upgrade_service.cc b/sql/mysql_upgrade_service.cc index 36de05e54e4..9bb60809205 100644 --- a/sql/mysql_upgrade_service.cc +++ b/sql/mysql_upgrade_service.cc @@ -146,6 +146,11 @@ static void die(const char *fmt, ...) exit(1); } +#define WRITE_LOG(fmt,...) {\ + char log_buf[1024]; \ + snprintf(log_buf,sizeof(log_buf), fmt, __VA_ARGS__);\ + WriteFile(logfile_handle,log_buf, strlen(log_buf), 0 , 0);\ +} /* spawn-like function to run subprocesses. @@ -187,17 +192,22 @@ static intptr_t run_tool(int wait_flag, const char *program,...) { char tmpdir[FN_REFLEN]; GetTempPath(FN_REFLEN, tmpdir); - sprintf_s(logfile_path, "%s\\mysql_upgrade_service.%s.log", tmpdir, + sprintf_s(logfile_path, "%smysql_upgrade_service.%s.log", tmpdir, opt_service); - logfile_handle= CreateFile(logfile_path, GENERIC_WRITE, FILE_SHARE_READ, - NULL, TRUNCATE_EXISTING, 0, NULL); - if (!logfile_handle) + SECURITY_ATTRIBUTES attr= {0}; + attr.nLength= sizeof(SECURITY_ATTRIBUTES); + attr.bInheritHandle= TRUE; + logfile_handle= CreateFile(logfile_path, FILE_APPEND_DATA, + FILE_SHARE_READ|FILE_SHARE_WRITE, &attr, CREATE_ALWAYS, 0, NULL); + if (logfile_handle == INVALID_HANDLE_VALUE) { die("Cannot open log file %s, windows error %u", logfile_path, GetLastError()); } } + WRITE_LOG("Executing %s\r\n", cmdline); + /* Start child process */ STARTUPINFO si= {0}; si.cb= sizeof(si); @@ -458,7 +468,7 @@ int main(int argc, char **argv) log("Phase 3/8: Starting mysqld for upgrade"); mysqld_process= (HANDLE)run_tool(P_NOWAIT, mysqld_path, defaults_file_param, "--skip-networking", "--skip-grant-tables", - "--enable-named-pipe", socket_param, NULL); + "--enable-named-pipe", socket_param,"--skip-slave-start", NULL); if (mysqld_process == INVALID_HANDLE_VALUE) { diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc index 90416e691d4..36d0116696a 100644 --- a/sql/opt_subselect.cc +++ b/sql/opt_subselect.cc @@ -5947,6 +5947,7 @@ bool JOIN::choose_tableless_subquery_plan() functions produce empty subquery result. There is no need to further rewrite the subquery because it will not be executed at all. */ + exec_const_cond= 0; return FALSE; } @@ -5978,6 +5979,6 @@ bool JOIN::choose_tableless_subquery_plan() tmp_having= having; } } - exec_const_cond= conds; + exec_const_cond= zero_result_cause ? 0 : conds; return FALSE; } diff --git a/sql/set_var.h b/sql/set_var.h index 8dd2cb073b7..5a7f8c2d8a3 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -438,6 +438,7 @@ int sys_var_init(); uint sys_var_elements(); int sys_var_add_options(DYNAMIC_ARRAY *long_options, int parse_flags); void sys_var_end(void); +bool check_has_super(sys_var *self, THD *thd, set_var *var); #endif diff --git a/sql/slave.cc b/sql/slave.cc index 713a668bfe8..7e52822ca51 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -5295,7 +5295,7 @@ err_during_init: */ if (WSREP_ON && wsrep_node_dropped && wsrep_restart_slave) { - if (wsrep_ready) + if (wsrep_ready_get()) { WSREP_INFO("Slave error due to node temporarily non-primary" "SQL slave will continue"); diff --git a/sql/sql_alter.cc b/sql/sql_alter.cc index 3a9ba07d7a1..dbb138ed9ab 100644 --- a/sql/sql_alter.cc +++ b/sql/sql_alter.cc @@ -304,9 +304,6 @@ bool Sql_cmd_alter_table::execute(THD *thd) create_info.data_file_name= create_info.index_file_name= NULL; thd->enable_slow_log= opt_log_slow_admin_statements; -#ifdef WITH_PARTITION_STORAGE_ENGINE - thd->work_part_info= 0; -#endif #ifdef WITH_WSREP if ((!thd->is_current_stmt_binlog_format_row() || diff --git a/sql/sql_analyze_stmt.cc b/sql/sql_analyze_stmt.cc index ac3797aae60..8e67267f6a0 100644 --- a/sql/sql_analyze_stmt.cc +++ b/sql/sql_analyze_stmt.cc @@ -45,7 +45,7 @@ void Filesort_tracker::print_json_members(Json_writer *writer) else if (r_limit == 0) writer->add_str(varied_str); else - writer->add_ll((longlong) rint(r_limit/get_r_loops())); + writer->add_ll((longlong) rint(r_limit)); } writer->add_member("r_used_priority_queue"); diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 9dfaf82758c..83ab24fc27d 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -5345,6 +5345,9 @@ find_field_in_natural_join(THD *thd, TABLE_LIST *table_ref, const char *name, column reference. See create_view_field() for details. */ item= nj_col->create_item(thd); + if (!item) + DBUG_RETURN(NULL); + /* *ref != NULL means that *ref contains the item that we need to replace. If the item was aliased by the user, set the alias to diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 7e15eff4dd4..8590a0fe179 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -515,103 +515,6 @@ extern "C" } -/** - Dumps a text description of a thread, its security context - (user, host) and the current query. - - @param thd thread context - @param buffer pointer to preferred result buffer - @param length length of buffer - @param max_query_len how many chars of query to copy (0 for all) - - @return Pointer to string -*/ - -extern "C" -char *thd_get_error_context_description(THD *thd, char *buffer, - unsigned int length, - unsigned int max_query_len) -{ - String str(buffer, length, &my_charset_latin1); - const Security_context *sctx= &thd->main_security_ctx; - char header[256]; - int len; - - mysql_mutex_lock(&LOCK_thread_count); - - /* - The pointers thd->query and thd->proc_info might change since they are - being modified concurrently. This is acceptable for proc_info since its - values doesn't have to very accurate and the memory it points to is static, - but we need to attempt a snapshot on the pointer values to avoid using NULL - values. The pointer to thd->query however, doesn't point to static memory - and has to be protected by thd->LOCK_thd_data or risk pointing to - uninitialized memory. - */ - const char *proc_info= thd->proc_info; - - len= my_snprintf(header, sizeof(header), - "MySQL thread id %lu, OS thread handle %lu, query id %lu", - thd->thread_id, (ulong) thd->real_id, (ulong) thd->query_id); - str.length(0); - str.append(header, len); - - if (sctx->host) - { - str.append(' '); - str.append(sctx->host); - } - - if (sctx->ip) - { - str.append(' '); - str.append(sctx->ip); - } - - if (sctx->user) - { - str.append(' '); - str.append(sctx->user); - } - - if (proc_info) - { - str.append(' '); - str.append(proc_info); - } - - /* Don't wait if LOCK_thd_data is used as this could cause a deadlock */ - if (!mysql_mutex_trylock(&thd->LOCK_thd_data)) - { - if (thd->query()) - { - if (max_query_len < 1) - len= thd->query_length(); - else - len= MY_MIN(thd->query_length(), max_query_len); - str.append('\n'); - str.append(thd->query(), len); - } - mysql_mutex_unlock(&thd->LOCK_thd_data); - } - mysql_mutex_unlock(&LOCK_thread_count); - - if (str.c_ptr_safe() == buffer) - return buffer; - - /* - We have to copy the new string to the destination buffer because the string - was reallocated to a larger buffer to be able to fit. - */ - DBUG_ASSERT(buffer != NULL); - length= MY_MIN(str.length(), length-1); - memcpy(buffer, str.c_ptr_quick(), length); - /* Make sure that the new string is null terminated */ - buffer[length]= '\0'; - return buffer; -} - - #if MARIA_PLUGIN_INTERFACE_VERSION < 0x0200 /** TODO: This function is for API compatibility, remove it eventually. diff --git a/sql/sql_class.h b/sql/sql_class.h index a12015ecfe4..72cbd03cd1b 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -3119,6 +3119,7 @@ public: query_id_t first_query_id; } binlog_evt_union; + mysql_cond_t COND_wsrep_thd; /** Internal parser state. Note that since the parser is not re-entrant, we keep only one parser diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc index e2daad12eae..fe51f8e1d64 100644 --- a/sql/sql_explain.cc +++ b/sql/sql_explain.cc @@ -1641,7 +1641,7 @@ void Explain_table_access::print_explain_json(Explain_query *query, { /* Get r_filtered value from filesort */ if (pre_join_sort->tracker.get_r_loops()) - writer->add_double(pre_join_sort->tracker.get_r_filtered()); + writer->add_double(pre_join_sort->tracker.get_r_filtered()*100); else writer->add_null(); } @@ -2460,4 +2460,3 @@ void Explain_range_checked_fer::print_json(Json_writer *writer, writer->end_object(); } } - diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index 2a3454703f4..4c98c9cefdb 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -936,6 +936,7 @@ SQL_HANDLER *mysql_ha_read_prepare(THD *thd, TABLE_LIST *tables, if (!(handler= mysql_ha_find_handler(thd, tables->alias))) DBUG_RETURN(0); tables->table= handler->table; // This is used by fix_fields + handler->table->pos_in_table_list= tables; if (mysql_ha_fix_cond_and_key(handler, mode, keyname, key_expr, cond, 1)) DBUG_RETURN(0); DBUG_RETURN(handler); diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index c24503677ca..bdb2d6caf25 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -4466,12 +4466,44 @@ bool select_create::send_eof() */ if (!table->s->tmp_table) { +#ifdef WITH_WSREP + if (WSREP_ON) + { + /* + append table level exclusive key for CTAS + */ + wsrep_key_arr_t key_arr= {0, 0}; + wsrep_prepare_keys_for_isolation(thd, + create_table->db, + create_table->table_name, + table_list, + &key_arr); + int rcode = wsrep->append_key( + wsrep, + &thd->wsrep_ws_handle, + key_arr.keys, //&wkey, + key_arr.keys_len, + WSREP_KEY_EXCLUSIVE, + false); + wsrep_keys_free(&key_arr); + if (rcode) { + DBUG_PRINT("wsrep", ("row key failed: %d", rcode)); + WSREP_ERROR("Appending table key for CTAS failed: %s, %d", + (wsrep_thd_query(thd)) ? + wsrep_thd_query(thd) : "void", rcode); + return true; + } + /* If commit fails, we should be able to reset the OK status. */ + thd->get_stmt_da()->set_overwrite_status(TRUE); + } +#endif /* WITH_WSREP */ trans_commit_stmt(thd); if (!(thd->variables.option_bits & OPTION_GTID_BEGIN)) trans_commit_implicit(thd); #ifdef WITH_WSREP if (WSREP_ON) { + thd->get_stmt_da()->set_overwrite_status(FALSE); mysql_mutex_lock(&thd->LOCK_thd_data); if (thd->wsrep_conflict_state != NO_CONFLICT) { diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index a5973b9b08d..db7c3b5a7dc 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -3406,6 +3406,7 @@ mysql_execute_command(THD *thd) { WSREP_SYNC_WAIT(thd, WSREP_SYNC_WAIT_BEFORE_SHOW); execute_show_status(thd, all_tables); + break; } case SQLCOM_SHOW_EXPLAIN: @@ -3811,7 +3812,6 @@ mysql_execute_command(THD *thd) } #ifdef WITH_PARTITION_STORAGE_ENGINE - thd->work_part_info= 0; { partition_info *part_info= thd->lex->part_info; if (part_info && !(part_info= part_info->get_clone(thd))) @@ -4464,7 +4464,7 @@ end_with_restore_list: case SQLCOM_INSERT_SELECT: { WSREP_SYNC_WAIT(thd, WSREP_SYNC_WAIT_BEFORE_INSERT_REPLACE); - select_result *sel_result; + select_insert *sel_result; bool explain= MY_TEST(lex->describe); DBUG_ASSERT(first_table == all_tables && first_table != 0); if (WSREP_CLIENT(thd) && diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 4c258acd487..58dd9c95f10 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -2132,8 +2132,7 @@ bool mysql_install_plugin(THD *thd, const LEX_STRING *name, tables.init_one_table("mysql", 5, "plugin", 6, "plugin", TL_WRITE); if (!opt_noacl && check_table_access(thd, INSERT_ACL, &tables, FALSE, 1, FALSE)) DBUG_RETURN(TRUE); - - WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL) + WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL); /* need to open before acquiring LOCK_plugin or it will deadlock */ if (! (table = open_ltable(thd, &tables, TL_WRITE, diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 10e9e65c0c4..1fa21437266 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1427,10 +1427,18 @@ JOIN::optimize_inner() if (cond_value == Item::COND_FALSE || having_value == Item::COND_FALSE || (!unit->select_limit_cnt && !(select_options & OPTION_FOUND_ROWS))) { /* Impossible cond */ - DBUG_PRINT("info", (having_value == Item::COND_FALSE ? - "Impossible HAVING" : "Impossible WHERE")); - zero_result_cause= having_value == Item::COND_FALSE ? - "Impossible HAVING" : "Impossible WHERE"; + if (unit->select_limit_cnt) + { + DBUG_PRINT("info", (having_value == Item::COND_FALSE ? + "Impossible HAVING" : "Impossible WHERE")); + zero_result_cause= having_value == Item::COND_FALSE ? + "Impossible HAVING" : "Impossible WHERE"; + } + else + { + DBUG_PRINT("info", ("Zero limit")); + zero_result_cause= "Zero limit"; + } table_count= top_join_tab_count= 0; error= 0; goto setup_subq_exit; @@ -9916,7 +9924,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) table_map current_map; i= join->const_tables; for (tab= first_depth_first_tab(join); tab; - tab= next_depth_first_tab(join, tab), i++) + tab= next_depth_first_tab(join, tab)) { bool is_hj; @@ -10401,6 +10409,8 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) } first_inner_tab= first_inner_tab->first_upper; } + if (!tab->bush_children) + i++; } } DBUG_RETURN(0); @@ -25670,21 +25680,18 @@ void JOIN::set_allowed_join_cache_types() void JOIN::save_query_plan(Join_plan_state *save_to) { - if (keyuse.elements) - { - DYNAMIC_ARRAY tmp_keyuse; - /* Swap the current and the backup keyuse internal arrays. */ - tmp_keyuse= keyuse; - keyuse= save_to->keyuse; /* keyuse is reset to an empty array. */ - save_to->keyuse= tmp_keyuse; + DYNAMIC_ARRAY tmp_keyuse; + /* Swap the current and the backup keyuse internal arrays. */ + tmp_keyuse= keyuse; + keyuse= save_to->keyuse; /* keyuse is reset to an empty array. */ + save_to->keyuse= tmp_keyuse; - for (uint i= 0; i < table_count; i++) - { - save_to->join_tab_keyuse[i]= join_tab[i].keyuse; - join_tab[i].keyuse= NULL; - save_to->join_tab_checked_keys[i]= join_tab[i].checked_keys; - join_tab[i].checked_keys.clear_all(); - } + for (uint i= 0; i < table_count; i++) + { + save_to->join_tab_keyuse[i]= join_tab[i].keyuse; + join_tab[i].keyuse= NULL; + save_to->join_tab_checked_keys[i]= join_tab[i].checked_keys; + join_tab[i].checked_keys.clear_all(); } memcpy((uchar*) save_to->best_positions, (uchar*) best_positions, sizeof(POSITION) * (table_count + 1)); @@ -25722,20 +25729,17 @@ void JOIN::reset_query_plan() void JOIN::restore_query_plan(Join_plan_state *restore_from) { - if (restore_from->keyuse.elements) - { - DYNAMIC_ARRAY tmp_keyuse; - tmp_keyuse= keyuse; - keyuse= restore_from->keyuse; - restore_from->keyuse= tmp_keyuse; - - for (uint i= 0; i < table_count; i++) - { - join_tab[i].keyuse= restore_from->join_tab_keyuse[i]; - join_tab[i].checked_keys= restore_from->join_tab_checked_keys[i]; - } + DYNAMIC_ARRAY tmp_keyuse; + tmp_keyuse= keyuse; + keyuse= restore_from->keyuse; + restore_from->keyuse= tmp_keyuse; + for (uint i= 0; i < table_count; i++) + { + join_tab[i].keyuse= restore_from->join_tab_keyuse[i]; + join_tab[i].checked_keys= restore_from->join_tab_checked_keys[i]; } + memcpy((uchar*) best_positions, (uchar*) restore_from->best_positions, sizeof(POSITION) * (table_count + 1)); /* Restore SJM nests */ diff --git a/sql/sql_show.cc b/sql/sql_show.cc index f8448c7328f..b9c90f7fb4b 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -2522,16 +2522,16 @@ static const char *thread_state_info(THD *tmp) else return "Reading from net"; } - else +#else + if (tmp->get_command() == COM_SLEEP) + return ""; #endif - { - if (tmp->proc_info) - return tmp->proc_info; - else if (tmp->mysys_var && tmp->mysys_var->current_cond) - return "Waiting on cond"; - else - return NULL; - } + if (tmp->proc_info) + return tmp->proc_info; + else if (tmp->mysys_var && tmp->mysys_var->current_cond) + return "Waiting on cond"; + else + return NULL; } void mysqld_list_processes(THD *thd,const char *user, bool verbose) @@ -9899,3 +9899,88 @@ static void get_cs_converted_string_value(THD *thd, return; } #endif + +/** + Dumps a text description of a thread, its security context + (user, host) and the current query. + + @param thd thread context + @param buffer pointer to preferred result buffer + @param length length of buffer + @param max_query_len how many chars of query to copy (0 for all) + + @return Pointer to string +*/ + +extern "C" +char *thd_get_error_context_description(THD *thd, char *buffer, + unsigned int length, + unsigned int max_query_len) +{ + String str(buffer, length, &my_charset_latin1); + const Security_context *sctx= &thd->main_security_ctx; + char header[256]; + int len; + + mysql_mutex_lock(&LOCK_thread_count); + + len= my_snprintf(header, sizeof(header), + "MySQL thread id %lu, OS thread handle %lu, query id %lu", + thd->thread_id, (ulong) thd->real_id, (ulong) thd->query_id); + str.length(0); + str.append(header, len); + + if (sctx->host) + { + str.append(' '); + str.append(sctx->host); + } + + if (sctx->ip) + { + str.append(' '); + str.append(sctx->ip); + } + + if (sctx->user) + { + str.append(' '); + str.append(sctx->user); + } + + /* Don't wait if LOCK_thd_data is used as this could cause a deadlock */ + if (!mysql_mutex_trylock(&thd->LOCK_thd_data)) + { + if (const char *info= thread_state_info(thd)) + { + str.append(' '); + str.append(info); + } + + if (thd->query()) + { + if (max_query_len < 1) + len= thd->query_length(); + else + len= MY_MIN(thd->query_length(), max_query_len); + str.append('\n'); + str.append(thd->query(), len); + } + mysql_mutex_unlock(&thd->LOCK_thd_data); + } + mysql_mutex_unlock(&LOCK_thread_count); + + if (str.c_ptr_safe() == buffer) + return buffer; + + /* + We have to copy the new string to the destination buffer because the string + was reallocated to a larger buffer to be able to fit. + */ + DBUG_ASSERT(buffer != NULL); + length= MY_MIN(str.length(), length-1); + memcpy(buffer, str.c_ptr_quick(), length); + /* Make sure that the new string is null terminated */ + buffer[length]= '\0'; + return buffer; +} diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 9a87b0e7461..73f62f2f2c8 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -6027,8 +6027,11 @@ drop_create_field: continue; /* Check if the table already has a PRIMARY KEY */ - bool dup_primary_key= key->type == Key::PRIMARY && - table->s->primary_key != MAX_KEY; + bool dup_primary_key= + key->type == Key::PRIMARY && + table->s->primary_key != MAX_KEY && + (keyname= table->s->key_info[table->s->primary_key].name) && + my_strcasecmp(system_charset_info, keyname, primary_key_name) == 0; if (dup_primary_key) goto remove_key; @@ -6128,7 +6131,6 @@ remove_key: } #ifdef WITH_PARTITION_STORAGE_ENGINE - DBUG_ASSERT(thd->work_part_info == 0); partition_info *tab_part_info= table->part_info; thd->work_part_info= thd->lex->part_info; if (tab_part_info) @@ -8730,10 +8732,6 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, { DBUG_ENTER("mysql_alter_table"); -#ifdef WITH_PARTITION_STORAGE_ENGINE - thd->work_part_info= 0; // Used by partitioning -#endif - /* Check if we attempt to alter mysql.slow_log or mysql.general_log table and return an error if diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index 32dc1dd15e0..01ceb34a577 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -453,7 +453,6 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create) my_error(ER_BINLOG_CREATE_ROUTINE_NEED_SUPER, MYF(0)); DBUG_RETURN(TRUE); } - WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL) if (!create) { @@ -515,6 +514,7 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create) if (err_status) goto end; } + WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL); /* We should have only one table in table list. */ DBUG_ASSERT(tables->next_global == 0); diff --git a/sql/sql_view.cc b/sql/sql_view.cc index e3bdde77ee3..d9cc470961e 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -436,7 +436,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, goto err; } - WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL) + WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL); /* ignore lock specs for CREATE statement @@ -700,15 +700,15 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views, lex->link_first_table_back(view, link_to_local); DBUG_RETURN(0); +#ifdef WITH_WSREP + error: + res= TRUE; +#endif /* WITH_WSREP */ err: THD_STAGE_INFO(thd, stage_end); lex->link_first_table_back(view, link_to_local); unit->cleanup(); DBUG_RETURN(res || thd->is_error()); -#ifdef WITH_WSREP - error: - DBUG_RETURN(true); -#endif /* WITH_WSREP */ } diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index c3d786af945..de546175da8 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -430,7 +430,7 @@ error_if_in_trans_or_substatement(THD *thd, int in_substatement_error, return false; } -static bool check_has_super(sys_var *self, THD *thd, set_var *var) +bool check_has_super(sys_var *self, THD *thd, set_var *var) { DBUG_ASSERT(self->scope() != sys_var::GLOBAL);// don't abuse check_has_super() #ifndef NO_EMBEDDED_ACCESS_CHECKS diff --git a/sql/table.cc b/sql/table.cc index f7aee3eae7d..917d194047c 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1795,7 +1795,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, if ((uchar)field_type == (uchar)MYSQL_TYPE_VIRTUAL) { - DBUG_ASSERT(interval_nr); // Expect non-null expression + if (!interval_nr) // Expect non-null expression + goto err; /* MariaDB version 10.0 version. The interval_id byte in the .frm file stores the length of the @@ -5824,6 +5825,8 @@ Item *create_view_field(THD *thd, TABLE_LIST *view, Item **field_ref, Item *item= (new (thd->mem_root) Item_direct_view_ref(thd, context, field_ref, view->alias, name, view)); + if (!item) + return NULL; /* Force creation of nullable item for the result tmp table for outer joined views/derived tables. @@ -8092,7 +8095,15 @@ int TABLE_LIST::fetch_number_of_rows() { int error= 0; if (jtbm_subselect) + { + if (jtbm_subselect->is_jtbm_merged) + { + table->file->stats.records= (ha_rows)jtbm_subselect->jtbm_record_count; + set_if_bigger(table->file->stats.records, 2); + table->used_stat_records= table->file->stats.records; + } return 0; + } if (is_materialized_derived() && !fill_me) { diff --git a/sql/wsrep_hton.cc b/sql/wsrep_hton.cc index 4672f286982..1ef4465f173 100644 --- a/sql/wsrep_hton.cc +++ b/sql/wsrep_hton.cc @@ -117,7 +117,7 @@ void wsrep_post_commit(THD* thd, bool all) switch (thd->wsrep_exec_mode) { - case LOCAL_COMMIT: + case LOCAL_COMMIT: { DBUG_ASSERT(thd->wsrep_trx_meta.gtid.seqno != WSREP_SEQNO_UNDEFINED); if (wsrep && wsrep->post_commit(wsrep, &thd->wsrep_ws_handle)) @@ -129,18 +129,30 @@ void wsrep_post_commit(THD* thd, bool all) wsrep_cleanup_transaction(thd); break; } - case LOCAL_STATE: - { - /* - Non-InnoDB statements may have populated events in stmt cache => cleanup - */ - WSREP_DEBUG("cleanup transaction for LOCAL_STATE: %s", thd->query()); - wsrep_cleanup_transaction(thd); - break; - } - default: break; + case LOCAL_STATE: + { + /* non-InnoDB statements may have populated events in stmt cache + => cleanup + */ + WSREP_DEBUG("cleanup transaction for LOCAL_STATE"); + /* + Run post-rollback hook to clean up in the case if + some keys were populated for the transaction in provider + but during commit time there was no write set to replicate. + This may happen when client sets the SAVEPOINT and immediately + rolls back to savepoint after first operation. + */ + if (all && thd->wsrep_conflict_state != MUST_REPLAY && + wsrep && wsrep->post_rollback(wsrep, &thd->wsrep_ws_handle)) + { + WSREP_WARN("post_rollback fail: %llu %d", + (long long)thd->thread_id, thd->get_stmt_da()->status()); + } + wsrep_cleanup_transaction(thd); + break; + } + default: break; } - } /* diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index a5cd269fc87..eeb1be72ebb 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -13,6 +13,7 @@ along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */ +#include <sql_plugin.h> // SHOW_MY_BOOL #include <mysqld.h> #include <sql_class.h> #include <sql_parse.h> @@ -301,8 +302,7 @@ wsrep_view_handler_cb (void* app_ctx, if (memcmp(&cluster_uuid, &view->state_id.uuid, sizeof(wsrep_uuid_t))) { - memcpy((wsrep_uuid_t*)&cluster_uuid, &view->state_id.uuid, - sizeof(cluster_uuid)); + memcpy(&cluster_uuid, &view->state_id.uuid, sizeof(cluster_uuid)); wsrep_uuid_print (&cluster_uuid, cluster_uuid_str, sizeof(cluster_uuid_str)); @@ -348,7 +348,7 @@ wsrep_view_handler_cb (void* app_ctx, // version change if (view->proto_ver != wsrep_protocol_version) { - my_bool wsrep_ready_saved= wsrep_ready; + my_bool wsrep_ready_saved= wsrep_ready_get(); wsrep_ready_set(FALSE); WSREP_INFO("closing client connections for " "protocol change %ld -> %d", @@ -463,16 +463,34 @@ out: return WSREP_CB_SUCCESS; } -void wsrep_ready_set (my_bool x) +my_bool wsrep_ready_set (my_bool x) { WSREP_DEBUG("Setting wsrep_ready to %d", x); if (mysql_mutex_lock (&LOCK_wsrep_ready)) abort(); - if (wsrep_ready != x) + my_bool ret= (wsrep_ready != x); + if (ret) { wsrep_ready= x; mysql_cond_signal (&COND_wsrep_ready); } mysql_mutex_unlock (&LOCK_wsrep_ready); + return ret; +} + +my_bool wsrep_ready_get (void) +{ + if (mysql_mutex_lock (&LOCK_wsrep_ready)) abort(); + my_bool ret= wsrep_ready; + mysql_mutex_unlock (&LOCK_wsrep_ready); + return ret; +} + +int wsrep_show_ready(THD *thd, SHOW_VAR *var, char *buff) +{ + var->type= SHOW_MY_BOOL; + var->value= buff; + *((my_bool *)buff)= wsrep_ready_get(); + return 0; } // Wait until wsrep has reached ready state @@ -491,17 +509,8 @@ void wsrep_ready_wait () static void wsrep_synced_cb(void* app_ctx) { WSREP_INFO("Synchronized with group, ready for connections"); - bool signal_main= false; - if (mysql_mutex_lock (&LOCK_wsrep_ready)) abort(); - if (!wsrep_ready) - { - wsrep_ready= TRUE; - mysql_cond_signal (&COND_wsrep_ready); - signal_main= true; - - } + my_bool signal_main= wsrep_ready_set(TRUE); wsrep_config_state->set(WSREP_MEMBER_SYNCED); - mysql_mutex_unlock (&LOCK_wsrep_ready); if (signal_main) { @@ -983,6 +992,8 @@ bool wsrep_must_sync_wait (THD* thd, uint mask) { return (thd->variables.wsrep_sync_wait & mask) && thd->variables.wsrep_on && + !(thd->variables.wsrep_dirty_reads && + !is_update_query(thd->lex->sql_command)) && !thd->in_active_multi_stmt_transaction() && thd->wsrep_conflict_state != REPLAYING && thd->wsrep_sync_wait_gtid.seqno == WSREP_SEQNO_UNDEFINED; @@ -1030,17 +1041,7 @@ bool wsrep_sync_wait (THD* thd, uint mask) return false; } -/* - * Helpers to deal with TOI key arrays - */ -typedef struct wsrep_key_arr -{ - wsrep_key_t* keys; - size_t keys_len; -} wsrep_key_arr_t; - - -static void wsrep_keys_free(wsrep_key_arr_t* key_arr) +void wsrep_keys_free(wsrep_key_arr_t* key_arr) { for (size_t i= 0; i < key_arr->keys_len; ++i) { @@ -1105,11 +1106,11 @@ static bool wsrep_prepare_key_for_isolation(const char* db, } /* Prepare key list from db/table and table_list */ -static bool wsrep_prepare_keys_for_isolation(THD* thd, - const char* db, - const char* table, - const TABLE_LIST* table_list, - wsrep_key_arr_t* ka) +bool wsrep_prepare_keys_for_isolation(THD* thd, + const char* db, + const char* table, + const TABLE_LIST* table_list, + wsrep_key_arr_t* ka) { ka->keys= 0; ka->keys_len= 0; @@ -1865,7 +1866,7 @@ bool wsrep_grant_mdl_exception(MDL_context *requestor_ctx, { if (request_thd->lex->sql_command == SQLCOM_DROP_TABLE) { - WSREP_DEBUG("DROP caused BF abort"); + WSREP_DEBUG("DROP caused BF abort, conf %d", granted_thd->wsrep_conflict_state); } else if (granted_thd->wsrep_query_state == QUERY_COMMITTING) { diff --git a/sql/wsrep_mysqld.h b/sql/wsrep_mysqld.h index 130c7e5ad39..8d92621001b 100644 --- a/sql/wsrep_mysqld.h +++ b/sql/wsrep_mysqld.h @@ -233,6 +233,7 @@ extern wsrep_seqno_t wsrep_locked_seqno; #define WSREP_QUERY(thd) (thd->query()) +extern my_bool wsrep_ready_get(); extern void wsrep_ready_wait(); class Ha_trx_info; @@ -325,6 +326,18 @@ bool wsrep_create_like_table(THD* thd, TABLE_LIST* table, bool wsrep_node_is_donor(); bool wsrep_node_is_synced(); +typedef struct wsrep_key_arr +{ + wsrep_key_t* keys; + size_t keys_len; +} wsrep_key_arr_t; +bool wsrep_prepare_keys_for_isolation(THD* thd, + const char* db, + const char* table, + const TABLE_LIST* table_list, + wsrep_key_arr_t* ka); +void wsrep_keys_free(wsrep_key_arr_t* key_arr); + #define WSREP_BINLOG_FORMAT(my_format) \ ((wsrep_forced_binlog_format != BINLOG_FORMAT_UNSPEC) ? \ wsrep_forced_binlog_format : my_format) @@ -359,6 +372,5 @@ bool wsrep_node_is_synced(); #define wsrep_thr_deinit() do {} while(0) #define wsrep_running_threads (0) #define WSREP_BINLOG_FORMAT(my_format) my_format - #endif /* WITH_WSREP */ #endif /* WSREP_MYSQLD_H */ diff --git a/sql/wsrep_priv.h b/sql/wsrep_priv.h index 97307fcc948..222a49cc2ab 100644 --- a/sql/wsrep_priv.h +++ b/sql/wsrep_priv.h @@ -26,7 +26,7 @@ #include <pthread.h> #include <cstdio> -void wsrep_ready_set (my_bool x); +my_bool wsrep_ready_set (my_bool x); ssize_t wsrep_sst_prepare (void** msg); wsrep_cb_status wsrep_sst_donate_cb (void* app_ctx, diff --git a/sql/wsrep_sst.cc b/sql/wsrep_sst.cc index e6fdf6cde43..a4832ce913b 100644 --- a/sql/wsrep_sst.cc +++ b/sql/wsrep_sst.cc @@ -605,11 +605,11 @@ static ssize_t sst_prepare_other (const char* method, ret= snprintf (cmd_str(), cmd_len, "wsrep_sst_%s " - WSREP_SST_OPT_ROLE" 'joiner' " - WSREP_SST_OPT_ADDR" '%s' " - WSREP_SST_OPT_DATA" '%s' " + WSREP_SST_OPT_ROLE " 'joiner' " + WSREP_SST_OPT_ADDR " '%s' " + WSREP_SST_OPT_DATA " '%s' " " %s " - WSREP_SST_OPT_PARENT" '%d'" + WSREP_SST_OPT_PARENT " '%d'" " %s '%s' ", method, addr_in, mysql_real_data_home, wsrep_defaults_file, @@ -891,13 +891,13 @@ static int sst_donate_mysqldump (const char* addr, int ret= snprintf (cmd_str(), cmd_len, "wsrep_sst_mysqldump " - WSREP_SST_OPT_ADDR" '%s' " - WSREP_SST_OPT_PORT" '%d' " - WSREP_SST_OPT_LPORT" '%u' " - WSREP_SST_OPT_SOCKET" '%s' " - " '%s' " - WSREP_SST_OPT_GTID" '%s:%lld' " - WSREP_SST_OPT_GTID_DOMAIN_ID" '%d'" + WSREP_SST_OPT_ADDR " '%s' " + WSREP_SST_OPT_PORT " '%d' " + WSREP_SST_OPT_LPORT " '%u' " + WSREP_SST_OPT_SOCKET " '%s' " + " %s " + WSREP_SST_OPT_GTID " '%s:%lld' " + WSREP_SST_OPT_GTID_DOMAIN_ID " '%d'" "%s", addr, port, mysqld_port, mysqld_unix_port, wsrep_defaults_file, uuid_str, @@ -1254,14 +1254,14 @@ static int sst_donate_other (const char* method, ret= snprintf (cmd_str(), cmd_len, "wsrep_sst_%s " - WSREP_SST_OPT_ROLE" 'donor' " - WSREP_SST_OPT_ADDR" '%s' " - WSREP_SST_OPT_SOCKET" '%s' " - WSREP_SST_OPT_DATA" '%s' " + WSREP_SST_OPT_ROLE " 'donor' " + WSREP_SST_OPT_ADDR " '%s' " + WSREP_SST_OPT_SOCKET " '%s' " + WSREP_SST_OPT_DATA " '%s' " " %s " " %s '%s' " - WSREP_SST_OPT_GTID" '%s:%lld' " - WSREP_SST_OPT_GTID_DOMAIN_ID" '%d'" + WSREP_SST_OPT_GTID " '%s:%lld' " + WSREP_SST_OPT_GTID_DOMAIN_ID " '%d'" "%s", method, addr, mysqld_unix_port, mysql_real_data_home, wsrep_defaults_file, diff --git a/sql/wsrep_var.cc b/sql/wsrep_var.cc index 38d727fb470..0bca5d1177d 100644 --- a/sql/wsrep_var.cc +++ b/sql/wsrep_var.cc @@ -62,6 +62,9 @@ bool wsrep_on_check(sys_var *self, THD* thd, set_var* var) { bool new_wsrep_on= (bool)var->save_result.ulonglong_value; + if (check_has_super(self, thd, var)) + return true; + if (new_wsrep_on && innodb_lock_schedule_algorithm != 0) { my_message(ER_WRONG_ARGUMENTS, " WSREP (galera) can't be enabled " "if innodb_lock_schedule_algorithm=VATS. Please configure" diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index 2b6079d4a29..edbb5d358fa 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -1908,7 +1908,7 @@ maria_declare_plugin(archive) &archive_storage_engine, "ARCHIVE", "Brian Aker, MySQL AB", - "Archive storage engine", + "gzip-compresses tables for a low storage footprint", PLUGIN_LICENSE_GPL, archive_db_init, /* Plugin Init */ NULL, /* Plugin Deinit */ diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc index 7e0c61ff634..6659e529e97 100644 --- a/storage/csv/ha_tina.cc +++ b/storage/csv/ha_tina.cc @@ -1788,7 +1788,7 @@ maria_declare_plugin(csv) &csv_storage_engine, "CSV", "Brian Aker, MySQL AB", - "CSV storage engine", + "Stores tables as CSV files", PLUGIN_LICENSE_GPL, tina_init_func, /* Plugin Init */ tina_done_func, /* Plugin Deinit */ diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc index ec05127c475..35a68a80f27 100644 --- a/storage/federated/ha_federated.cc +++ b/storage/federated/ha_federated.cc @@ -3410,7 +3410,7 @@ maria_declare_plugin(federated) &federated_storage_engine, "FEDERATED", "Patrick Galbraith and Brian Aker, MySQL AB", - "Federated MySQL storage engine", + "Allows to access tables on other MariaDB servers", PLUGIN_LICENSE_GPL, federated_db_init, /* Plugin Init */ federated_done, /* Plugin Deinit */ diff --git a/storage/federatedx/ha_federatedx.cc b/storage/federatedx/ha_federatedx.cc index 8f9b499c611..840a5a68885 100644 --- a/storage/federatedx/ha_federatedx.cc +++ b/storage/federatedx/ha_federatedx.cc @@ -3645,7 +3645,7 @@ maria_declare_plugin(federatedx) &federatedx_storage_engine, "FEDERATED", "Patrick Galbraith", - "FederatedX pluggable storage engine", + "Allows to access tables on other MariaDB servers, supports transactions and more", PLUGIN_LICENSE_GPL, federatedx_db_init, /* Plugin Init */ federatedx_done, /* Plugin Deinit */ diff --git a/storage/heap/hp_hash.c b/storage/heap/hp_hash.c index eaf284c2015..54f5d93d0c6 100644 --- a/storage/heap/hp_hash.c +++ b/storage/heap/hp_hash.c @@ -1006,7 +1006,7 @@ void heap_update_auto_increment(HP_INFO *info, const uchar *record) switch (info->s->auto_key_type) { case HA_KEYTYPE_INT8: - s_value= (longlong) *(char*)key; + s_value= (longlong) *(const signed char*) key; break; case HA_KEYTYPE_BINARY: value=(ulonglong) *(uchar*) key; diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 7ac7495e221..198255a30ef 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -2541,8 +2541,12 @@ innobase_mysql_tmpfile( } } #else +#ifdef F_DUPFD_CLOEXEC + fd2 = fcntl(fd, F_DUPFD_CLOEXEC, 0); +#else fd2 = dup(fd); #endif +#endif if (fd2 < 0) { char errbuf[MYSYS_STRERROR_SIZE]; DBUG_PRINT("error",("Got error %d on dup",fd2)); @@ -8484,13 +8488,12 @@ report_error: error, m_prebuilt->table->flags, m_user_thd); #ifdef WITH_WSREP - if (!error_result && - wsrep_thd_exec_mode(m_user_thd) == LOCAL_STATE && - wsrep_on(m_user_thd) && - !wsrep_consistency_check(m_user_thd) && - !wsrep_thd_ignore_table(m_user_thd)) { - if (wsrep_append_keys(m_user_thd, false, record, NULL)) - { + if (!error_result + && wsrep_on(m_user_thd) + && wsrep_thd_exec_mode(m_user_thd) == LOCAL_STATE + && !wsrep_consistency_check(m_user_thd) + && !wsrep_thd_ignore_table(m_user_thd)) { + if (wsrep_append_keys(m_user_thd, false, record, NULL)) { DBUG_PRINT("wsrep", ("row key failed")); error_result = HA_ERR_INTERNAL_ERROR; goto wsrep_error; @@ -19671,8 +19674,10 @@ wsrep_innobase_kill_one_trx( thd_get_thread_id(thd), victim_trx->id); - WSREP_DEBUG("Aborting query: %s", - (thd && wsrep_thd_query(thd)) ? wsrep_thd_query(thd) : "void"); + WSREP_DEBUG("Aborting query: %s conf %d trx: %lu", + (thd && wsrep_thd_query(thd)) ? wsrep_thd_query(thd) : "void", + wsrep_thd_conflict_state(thd, FALSE), + wsrep_thd_ws_handle(thd)->trx_id); wsrep_thd_LOCK(thd); DBUG_EXECUTE_IF("sync.wsrep_after_BF_victim_lock", @@ -19735,7 +19740,7 @@ wsrep_innobase_kill_one_trx( wsrep_t *wsrep= get_wsrep(); rcode = wsrep->abort_pre_commit( wsrep, bf_seqno, - (wsrep_trx_id_t)victim_trx->id + (wsrep_trx_id_t)wsrep_thd_ws_handle(thd)->trx_id ); switch (rcode) { @@ -19860,12 +19865,14 @@ wsrep_abort_transaction( my_bool signal) { DBUG_ENTER("wsrep_innobase_abort_thd"); - trx_t* victim_trx = thd_to_trx(victim_thd); - trx_t* bf_trx = (bf_thd) ? thd_to_trx(bf_thd) : NULL; + + trx_t* victim_trx = thd_to_trx(victim_thd); + trx_t* bf_trx = (bf_thd) ? thd_to_trx(bf_thd) : NULL; - WSREP_DEBUG("abort transaction: BF: %s victim: %s", - wsrep_thd_query(bf_thd), - wsrep_thd_query(victim_thd)); + WSREP_DEBUG("abort transaction: BF: %s victim: %s victim conf: %d", + wsrep_thd_query(bf_thd), + wsrep_thd_query(victim_thd), + wsrep_thd_conflict_state(victim_thd, FALSE)); if (victim_trx) { lock_mutex_enter(); @@ -19917,8 +19924,8 @@ innobase_wsrep_get_checkpoint( XID* xid) { DBUG_ASSERT(hton == innodb_hton_ptr); - trx_sys_read_wsrep_checkpoint(xid); - return 0; + trx_sys_read_wsrep_checkpoint(xid); + return 0; } static diff --git a/storage/innobase/include/fil0fil.ic b/storage/innobase/include/fil0fil.ic index 9505cc0bd69..023a48a5066 100644 --- a/storage/innobase/include/fil0fil.ic +++ b/storage/innobase/include/fil0fil.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2015, 2017, MariaDB Corporation. +Copyright (c) 2015, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software diff --git a/storage/innobase/include/fsp0fsp.h b/storage/innobase/include/fsp0fsp.h index 6697c1f37ed..368f0daa201 100644 --- a/storage/innobase/include/fsp0fsp.h +++ b/storage/innobase/include/fsp0fsp.h @@ -743,7 +743,7 @@ fsp_flags_convert_from_101(ulint flags) /* Bits 13..16 are the wrong position for PAGE_SSIZE, and they should contain one of the values 3,4,6,7, that is, be of the form - 0b0011 or 0b01xx (except 0b0110). + 0b0011 or 0b01xx (except 0b0101). In correct versions, these bits should be 0bc0se where c is the MariaDB COMPRESSED flag and e is the MySQL 5.7 ENCRYPTION flag diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc index 613a3cad0e9..568239b79ed 100644 --- a/storage/innobase/os/os0file.cc +++ b/storage/innobase/os/os0file.cc @@ -2700,7 +2700,7 @@ os_file_create_simple_func( bool retry; do { - file = open(name, create_flag, os_innodb_umask); + file = open(name, create_flag | O_CLOEXEC, os_innodb_umask); if (file == -1) { *success = false; @@ -3002,7 +3002,7 @@ os_file_create_func( bool retry; do { - file = open(name, create_flag, os_innodb_umask); + file = open(name, create_flag | O_CLOEXEC, os_innodb_umask); if (file == -1) { const char* operation; @@ -3136,7 +3136,7 @@ os_file_create_simple_no_error_handling_func( return(OS_FILE_CLOSED); } - file = open(name, create_flag, os_innodb_umask); + file = open(name, create_flag | O_CLOEXEC, os_innodb_umask); *success = (file != -1); diff --git a/storage/maria/ma_control_file.c b/storage/maria/ma_control_file.c index 1ccb67d5698..99c934c479d 100644 --- a/storage/maria/ma_control_file.c +++ b/storage/maria/ma_control_file.c @@ -277,7 +277,7 @@ CONTROL_FILE_ERROR ma_control_file_open(my_bool create_if_missing, " file is probably in use by another process"; uint new_cf_create_time_size, new_cf_changeable_size, new_block_size; my_off_t file_size; - int open_flags= O_BINARY | /*O_DIRECT |*/ O_RDWR; + int open_flags= O_BINARY | /*O_DIRECT |*/ O_RDWR | O_CLOEXEC; int error= CONTROL_FILE_UNKNOWN_ERROR; DBUG_ENTER("ma_control_file_open"); diff --git a/storage/maria/ma_key.c b/storage/maria/ma_key.c index 6f3e17ed80d..8010d9aa147 100644 --- a/storage/maria/ma_key.c +++ b/storage/maria/ma_key.c @@ -728,7 +728,7 @@ ulonglong ma_retrieve_auto_increment(const uchar *key, uint8 key_type) switch (key_type) { case HA_KEYTYPE_INT8: - s_value= (longlong) *(const char*)key; + s_value= (longlong) *(const signed char*) key; break; case HA_KEYTYPE_BINARY: value=(ulonglong) *key; diff --git a/storage/maria/ma_loghandler.c b/storage/maria/ma_loghandler.c index e30ddb756a6..9410e5332e3 100644 --- a/storage/maria/ma_loghandler.c +++ b/storage/maria/ma_loghandler.c @@ -78,6 +78,32 @@ typedef union uchar buffer[TRANSLOG_PAGE_SIZE]; } TRANSLOG_PAGE_SIZE_BUFF; +#define MAX_TRUNSLOG_USED_BUFFERS 3 + +typedef struct +{ + struct st_translog_buffer *buff[MAX_TRUNSLOG_USED_BUFFERS]; + uint8 wrt_ptr; + uint8 unlck_ptr; +} TRUNSLOG_USED_BUFFERS; + +static void +used_buffs_init(TRUNSLOG_USED_BUFFERS *buffs) +{ + buffs->unlck_ptr= buffs->wrt_ptr= 0; +} + +static void +used_buffs_add(TRUNSLOG_USED_BUFFERS *buffs, + struct st_translog_buffer *buff); + +static void +used_buffs_register_unlock(TRUNSLOG_USED_BUFFERS *buffs, + struct st_translog_buffer *buff); + +static void +used_buffs_urgent_unlock(TRUNSLOG_USED_BUFFERS *buffs); + /* min chunk length */ #define TRANSLOG_MIN_CHUNK 3 /* @@ -156,7 +182,28 @@ struct st_translog_buffer TRANSLOG_FILE *file; /* Threads which are waiting for buffer filling/freeing */ mysql_cond_t waiting_filling_buffer; - /* Number of records which are in copy progress */ + /* + Number of records which are in copy progress. + + Controlled via translog_buffer_increase_writers() and + translog_buffer_decrease_writers(). + + 1 Simple case: translog_force_current_buffer_to_finish both called in + the same procedure. + + 2 Simple case: translog_write_variable_record_1group: + translog_advance_pointer() increase writer of the buffer and + translog_buffer_decrease_writers() decrease it. + + Usual case: + 1) translog_advance_pointer (i.e. reserve place for future writing) + increase writers for all buffers where place reserved. + Simpliest case: just all space reserved in one buffer + complex case: end of the first buffer, all second buffer, beginning + of the third buffer. + 2) When we finish with writing translog_chaser_page_next() will be + called and unlock the buffer by decreasing number of writers. + */ uint copy_to_buffer_in_progress; /* list of waiting buffer ready threads */ struct st_my_thread_var *waiting_flush; @@ -214,6 +261,7 @@ struct st_translog_buffer struct st_buffer_cursor { + TRUNSLOG_USED_BUFFERS buffs; /* pointer into the buffer */ uchar *ptr; /* current buffer */ @@ -935,7 +983,7 @@ static File create_logfile_by_number_no_cache(uint32 file_no) /* TODO: add O_DIRECT to open flags (when buffer is aligned) */ if ((file= mysql_file_create(key_file_translog, translog_filename_by_fileno(file_no, path), - 0, O_BINARY | O_RDWR, MYF(MY_WME))) < 0) + 0, O_BINARY | O_RDWR | O_CLOEXEC, MYF(MY_WME))) < 0) { DBUG_PRINT("error", ("Error %d during creating file '%s'", errno, path)); translog_stop_writing(); @@ -973,7 +1021,7 @@ static File open_logfile_by_number_no_cache(uint32 file_no) /* TODO: use mysql_file_create() */ if ((file= mysql_file_open(key_file_translog, translog_filename_by_fileno(file_no, path), - log_descriptor.open_flags, + log_descriptor.open_flags | O_CLOEXEC, MYF(MY_WME))) < 0) { DBUG_PRINT("error", ("Error %d during opening file '%s'", errno, path)); @@ -1648,15 +1696,12 @@ static my_bool translog_create_new_file() DBUG_PRINT("info", ("file_no: %lu", (ulong)file_no)); if (translog_write_file_header()) - DBUG_RETURN(1); + goto error; if (ma_control_file_write_and_force(last_checkpoint_lsn, file_no, max_trid_in_control_file, recovery_failures)) - { - translog_stop_writing(); - DBUG_RETURN(1); - } + goto error; DBUG_RETURN(0); @@ -1697,10 +1742,6 @@ static void translog_buffer_lock(struct st_translog_buffer *buffer) SYNOPSIS translog_buffer_unlock() buffer This buffer which should be unlocked - - RETURN - 0 OK - 1 Error */ static void translog_buffer_unlock(struct st_translog_buffer *buffer) @@ -1894,7 +1935,10 @@ static void translog_finish_page(TRANSLOG_ADDRESS *horizon, (uint) cursor->buffer->size, (uint) (cursor->ptr -cursor->buffer->buffer), (uint) cursor->current_page_fill, (uint) left)); - DBUG_ASSERT(LSN_FILE_NO(*horizon) == LSN_FILE_NO(cursor->buffer->offset)); + DBUG_ASSERT(LSN_FILE_NO(*horizon) == LSN_FILE_NO(cursor->buffer->offset) + || translog_status == TRANSLOG_UNINITED); + if ((LSN_FILE_NO(*horizon) != LSN_FILE_NO(cursor->buffer->offset))) + DBUG_VOID_RETURN; // everything wrong do not write to awoid more problems translog_check_cursor(cursor); if (cursor->protected) { @@ -3243,7 +3287,7 @@ static my_bool translog_get_last_page_addr(TRANSLOG_ADDRESS *addr, File fd; if ((fd= mysql_file_open(key_file_translog, translog_filename_by_fileno(file_no, path), - O_RDONLY, (no_errors ? MYF(0) : MYF(MY_WME)))) < 0) + O_RDONLY | O_CLOEXEC, (no_errors ? MYF(0) : MYF(MY_WME)))) < 0) { my_errno= errno; DBUG_PRINT("error", ("Error %d during opening file #%d", @@ -4588,6 +4632,7 @@ static my_bool translog_chaser_page_next(TRANSLOG_ADDRESS *horizon, { translog_buffer_lock(buffer_to_flush); translog_buffer_decrease_writers(buffer_to_flush); + used_buffs_register_unlock(&cursor->buffs, buffer_to_flush); if (!rc) rc= translog_buffer_flush(buffer_to_flush); translog_buffer_unlock(buffer_to_flush); @@ -4692,7 +4737,8 @@ translog_write_variable_record_chunk3_page(struct st_translog_parts *parts, 1 Error */ -static my_bool translog_advance_pointer(int pages, uint16 last_page_data) +static my_bool translog_advance_pointer(int pages, uint16 last_page_data, + TRUNSLOG_USED_BUFFERS *buffs) { translog_size_t last_page_offset= (log_descriptor.page_overhead + last_page_data); @@ -4709,6 +4755,8 @@ static my_bool translog_advance_pointer(int pages, uint16 last_page_data) (uint) last_page_data)); translog_lock_assert_owner(); + used_buffs_init(buffs); + if (pages == -1) { /* @@ -4786,8 +4834,10 @@ static my_bool translog_advance_pointer(int pages, uint16 last_page_data) translog_wait_for_buffer_free(new_buffer); #ifndef DBUG_OFF /* We keep the handler locked so nobody can start this new buffer */ - DBUG_ASSERT(offset == new_buffer->offset && new_buffer->file == NULL && - (file == NULL ? ver : (uint8)(ver + 1)) == new_buffer->ver); + DBUG_ASSERT((offset == new_buffer->offset && new_buffer->file == NULL && + (file == NULL ? ver : (uint8)(ver + 1)) == + new_buffer->ver) || + translog_status == TRANSLOG_READONLY); } #endif @@ -4808,6 +4858,8 @@ static my_bool translog_advance_pointer(int pages, uint16 last_page_data) DBUG_ASSERT(log_descriptor.bc.buffer->buffer_no == log_descriptor.bc.buffer_no); translog_buffer_increase_writers(log_descriptor.bc.buffer); + // register for case of error + used_buffs_add(buffs, log_descriptor.bc.buffer); if (file_end_offset <= buffer_end_offset) { @@ -4818,6 +4870,10 @@ static my_bool translog_advance_pointer(int pages, uint16 last_page_data) (ulong) LSN_FILE_NO(log_descriptor.horizon))); if (translog_create_new_file()) { + struct st_translog_buffer *ob= log_descriptor.bc.buffer; + translog_buffer_unlock(ob); + used_buffs_urgent_unlock(buffs); + translog_buffer_lock(ob); DBUG_RETURN(1); } } @@ -4839,6 +4895,7 @@ end: log_descriptor.bc.ptr+= offset; log_descriptor.bc.buffer->size+= offset; translog_buffer_increase_writers(log_descriptor.bc.buffer); + used_buffs_add(buffs, log_descriptor.bc.buffer); log_descriptor.horizon+= offset; /* offset increasing */ log_descriptor.bc.current_page_fill= last_page_offset; DBUG_PRINT("info", ("NewP buffer #%u: %p chaser: %d Size: %lu (%lu) " @@ -4859,6 +4916,56 @@ end: DBUG_RETURN(0); } +static void +used_buffs_add(TRUNSLOG_USED_BUFFERS *buffs, + struct st_translog_buffer *buff) +{ + DBUG_ENTER("used_buffs_add"); + DBUG_PRINT("enter", ("ADD buffs: %p unlk %u (%p) wrt_ptr: %u (%p)" + " buff %p (%u)", + buffs, + buffs->wrt_ptr, buffs->buff[buffs->wrt_ptr], + buffs->unlck_ptr, buffs->buff[buffs->unlck_ptr], + buff, buff->buffer_no)); + DBUG_ASSERT(buffs->wrt_ptr < MAX_TRUNSLOG_USED_BUFFERS); + buffs->buff[buffs->wrt_ptr++]= buff; + DBUG_VOID_RETURN; +} + +static void +used_buffs_register_unlock(TRUNSLOG_USED_BUFFERS *buffs, + struct st_translog_buffer *buff + __attribute__((unused)) ) +{ + DBUG_ENTER("used_buffs_register_unlock"); + DBUG_PRINT("enter", ("SUB buffs: %p unlk %u (%p) wrt_ptr: %u (%p)" + " buff %p (%u)", + buffs, + buffs->wrt_ptr, buffs->buff[buffs->wrt_ptr], + buffs->unlck_ptr, buffs->buff[buffs->unlck_ptr], + buff, buff->buffer_no)); + DBUG_ASSERT(buffs->buff[buffs->unlck_ptr] == buff); + buffs->unlck_ptr++; + DBUG_VOID_RETURN; +} +static void used_buffs_urgent_unlock(TRUNSLOG_USED_BUFFERS *buffs) +{ + uint i; + DBUG_ENTER("used_buffs_urgent_unlock"); + translog_lock(); + translog_stop_writing(); + translog_unlock(); + for (i= buffs->unlck_ptr; i < buffs->wrt_ptr; i++) + { + struct st_translog_buffer *buf= buffs->buff[i]; + translog_buffer_lock(buf); + translog_buffer_decrease_writers(buf); + translog_buffer_unlock(buf); + buffs->buff[i]= NULL; + } + used_buffs_init(buffs); + DBUG_VOID_RETURN; +} /* Get page rest @@ -4997,6 +5104,11 @@ translog_write_variable_record_1group(LSN *lsn, lsn, hook_arg))) { translog_unlock(); + if (buffer_to_flush != NULL) + { + translog_buffer_flush(buffer_to_flush); + translog_buffer_unlock(buffer_to_flush); + } DBUG_RETURN(1); } cursor= log_descriptor.bc; @@ -5027,8 +5139,9 @@ translog_write_variable_record_1group(LSN *lsn, (log_descriptor.page_capacity_chunk_2 - 1), record_rest, parts->record_length)); /* record_rest + 3 is chunk type 3 overhead + record_rest */ - rc|= translog_advance_pointer((int)(full_pages + additional_chunk3_page), - (record_rest ? record_rest + 3 : 0)); + rc= translog_advance_pointer((int)(full_pages + additional_chunk3_page), + (record_rest ? record_rest + 3 : 0), + &cursor.buffs); log_descriptor.bc.buffer->last_lsn= *lsn; DBUG_PRINT("info", ("last_lsn set to " LSN_FMT " buffer: %p", LSN_IN_PARTS(log_descriptor.bc.buffer->last_lsn), @@ -5047,7 +5160,11 @@ translog_write_variable_record_1group(LSN *lsn, translog_buffer_unlock(buffer_to_flush); } if (rc) + { + //translog_advance_pointer decreased writers so it is OK + DBUG_ASSERT(cursor.buffs.unlck_ptr == cursor.buffs.wrt_ptr); DBUG_RETURN(1); + } translog_write_variable_record_1group_header(parts, type, short_trid, header_length, chunk0_header); @@ -5062,7 +5179,7 @@ translog_write_variable_record_1group(LSN *lsn, for (i= 0; i < full_pages; i++) { if (translog_write_variable_record_chunk2_page(parts, &horizon, &cursor)) - DBUG_RETURN(1); + goto error; DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT, LSN_IN_PARTS(log_descriptor.horizon), @@ -5075,7 +5192,7 @@ translog_write_variable_record_1group(LSN *lsn, log_descriptor. page_capacity_chunk_2 - 2, &horizon, &cursor)) - DBUG_RETURN(1); + goto error; DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT, LSN_IN_PARTS(log_descriptor.horizon), LSN_IN_PARTS(horizon))); @@ -5085,17 +5202,22 @@ translog_write_variable_record_1group(LSN *lsn, if (translog_write_variable_record_chunk3_page(parts, record_rest, &horizon, &cursor)) - DBUG_RETURN(1); - DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT, - (uint) LSN_FILE_NO(log_descriptor.horizon), - (uint) LSN_OFFSET(log_descriptor.horizon), - (uint) LSN_FILE_NO(horizon), - (uint) LSN_OFFSET(horizon))); + goto error; + DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT, + (uint) LSN_FILE_NO(log_descriptor.horizon), + (uint) LSN_OFFSET(log_descriptor.horizon), + (uint) LSN_FILE_NO(horizon), + (uint) LSN_OFFSET(horizon))); translog_buffer_lock(cursor.buffer); translog_buffer_decrease_writers(cursor.buffer); + used_buffs_register_unlock(&cursor.buffs, cursor.buffer); translog_buffer_unlock(cursor.buffer); - DBUG_RETURN(rc); + DBUG_ASSERT(cursor.buffs.unlck_ptr == cursor.buffs.wrt_ptr); + DBUG_RETURN(0); +error: + used_buffs_urgent_unlock(&cursor.buffs); + DBUG_RETURN(1); } @@ -5149,7 +5271,8 @@ translog_write_variable_record_1chunk(LSN *lsn, lsn, hook_arg))) { translog_unlock(); - DBUG_RETURN(1); + rc= 1; + goto err; } rc= translog_write_parts_on_page(&log_descriptor.horizon, @@ -5165,6 +5288,7 @@ translog_write_variable_record_1chunk(LSN *lsn, check if we switched buffer and need process it (current buffer is unlocked already => we will not delay other threads */ +err: if (buffer_to_flush != NULL) { if (!rc) @@ -5504,9 +5628,11 @@ translog_write_variable_record_mgroup(LSN *lsn, uint file_of_the_first_group; int pages_to_skip; struct st_translog_buffer *buffer_of_last_lsn; + my_bool external_buffer_to_flush= TRUE; DBUG_ENTER("translog_write_variable_record_mgroup"); translog_lock_assert_owner(); + used_buffs_init(&cursor.buffs); chunk2_header[0]= TRANSLOG_CHUNK_NOHDR; if (my_init_dynamic_array(&groups, @@ -5514,6 +5640,11 @@ translog_write_variable_record_mgroup(LSN *lsn, 10, 10, MYF(0))) { translog_unlock(); + if (buffer_to_flush != NULL) + { + translog_buffer_flush(buffer_to_flush); + translog_buffer_unlock(buffer_to_flush); + } DBUG_PRINT("error", ("init array failed")); DBUG_RETURN(1); } @@ -5540,6 +5671,7 @@ translog_write_variable_record_mgroup(LSN *lsn, translog_mark_file_unfinished(file_of_the_first_group); do { + DBUG_ASSERT(cursor.buffs.unlck_ptr == cursor.buffs.wrt_ptr); group.addr= horizon= log_descriptor.horizon; cursor= log_descriptor.bc; cursor.chaser= 1; @@ -5572,21 +5704,26 @@ translog_write_variable_record_mgroup(LSN *lsn, (ulong)(parts->record_length - (first_page - 1 + buffer_rest) - done))); - rc|= translog_advance_pointer((int)full_pages, 0); + rc= translog_advance_pointer((int)full_pages, 0, &cursor.buffs); translog_unlock(); if (buffer_to_flush != NULL) { - translog_buffer_decrease_writers(buffer_to_flush); + if (!external_buffer_to_flush) + translog_buffer_decrease_writers(buffer_to_flush); if (!rc) rc= translog_buffer_flush(buffer_to_flush); translog_buffer_unlock(buffer_to_flush); buffer_to_flush= NULL; } + external_buffer_to_flush= FALSE; + if (rc) { DBUG_PRINT("error", ("flush of unlock buffer failed")); + //translog_advance_pointer decreased writers so it is OK + DBUG_ASSERT(cursor.buffs.unlck_ptr == cursor.buffs.wrt_ptr); goto err; } @@ -5623,6 +5760,7 @@ translog_write_variable_record_mgroup(LSN *lsn, } translog_buffer_lock(cursor.buffer); translog_buffer_decrease_writers(cursor.buffer); + used_buffs_register_unlock(&cursor.buffs, cursor.buffer); translog_buffer_unlock(cursor.buffer); translog_lock(); @@ -5637,6 +5775,11 @@ translog_write_variable_record_mgroup(LSN *lsn, first_page= translog_get_current_page_rest(); } buffer_rest= translog_get_current_group_size(); + + if (buffer_to_flush) + used_buffs_register_unlock(&cursor.buffs, + buffer_to_flush); // will be unlocked + } while ((translog_size_t)(first_page + buffer_rest) < (translog_size_t)(parts->record_length - done)); @@ -5732,17 +5875,21 @@ translog_write_variable_record_mgroup(LSN *lsn, (ulong) full_pages * log_descriptor.page_capacity_chunk_2, chunk3_pages, (uint) chunk3_size, (uint) record_rest)); + + DBUG_ASSERT(cursor.buffs.unlck_ptr == cursor.buffs.wrt_ptr); rc= translog_advance_pointer(pages_to_skip + (int)(chunk0_pages - 1), record_rest + header_fixed_part + (groups.elements - ((page_capacity - header_fixed_part) / (7 + 1)) * - (chunk0_pages - 1)) * (7 + 1)); + (chunk0_pages - 1)) * (7 + 1), + &cursor.buffs); buffer_of_last_lsn= log_descriptor.bc.buffer; translog_unlock(); if (buffer_to_flush != NULL) { + DBUG_ASSERT(!external_buffer_to_flush); translog_buffer_decrease_writers(buffer_to_flush); if (!rc) rc= translog_buffer_flush(buffer_to_flush); @@ -5906,8 +6053,10 @@ translog_write_variable_record_mgroup(LSN *lsn, } while (chunk0_pages != 0); translog_buffer_lock(cursor.buffer); translog_buffer_decrease_writers(cursor.buffer); + used_buffs_register_unlock(&cursor.buffs, cursor.buffer); translog_buffer_unlock(cursor.buffer); rc= 0; + DBUG_ASSERT(cursor.buffs.unlck_ptr == cursor.buffs.wrt_ptr); if (translog_set_lsn_for_files(file_of_the_first_group, LSN_FILE_NO(*lsn), *lsn, FALSE)) @@ -5916,17 +6065,22 @@ translog_write_variable_record_mgroup(LSN *lsn, translog_mark_file_finished(file_of_the_first_group); delete_dynamic(&groups); - DBUG_RETURN(rc); + DBUG_RETURN(0); err_unlock: translog_unlock(); err: + + if (cursor.buffs.unlck_ptr != cursor.buffs.wrt_ptr) + used_buffs_urgent_unlock(&cursor.buffs); + if (buffer_to_flush != NULL) { /* This is to prevent locking buffer forever in case of error */ - translog_buffer_decrease_writers(buffer_to_flush); + if (!external_buffer_to_flush) + translog_buffer_decrease_writers(buffer_to_flush); if (!rc) rc= translog_buffer_flush(buffer_to_flush); translog_buffer_unlock(buffer_to_flush); @@ -7480,7 +7634,8 @@ static void translog_force_current_buffer_to_finish() DBUG_ASSERT(log_descriptor.bc.ptr !=NULL); DBUG_ASSERT(LSN_FILE_NO(log_descriptor.horizon) == - LSN_FILE_NO(old_buffer->offset)); + LSN_FILE_NO(old_buffer->offset) || + translog_status == TRANSLOG_READONLY ); translog_check_cursor(&log_descriptor.bc); DBUG_ASSERT(left < TRANSLOG_PAGE_SIZE); if (left) diff --git a/storage/maria/ma_open.c b/storage/maria/ma_open.c index d05e2313891..a781b7e00d3 100644 --- a/storage/maria/ma_open.c +++ b/storage/maria/ma_open.c @@ -334,13 +334,13 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags) }); DEBUG_SYNC_C("mi_open_kfile"); if ((kfile=mysql_file_open(key_file_kfile, name_buff, - (open_mode=O_RDWR) | O_SHARE | O_NOFOLLOW, + (open_mode=O_RDWR) | O_SHARE | O_NOFOLLOW | O_CLOEXEC, MYF(MY_NOSYMLINKS))) < 0) { if ((errno != EROFS && errno != EACCES) || mode != O_RDONLY || (kfile=mysql_file_open(key_file_kfile, name_buff, - (open_mode=O_RDONLY) | O_SHARE | O_NOFOLLOW, + (open_mode=O_RDONLY) | O_SHARE | O_NOFOLLOW | O_CLOEXEC, MYF(MY_NOSYMLINKS))) < 0) goto err; } @@ -1949,7 +1949,7 @@ int _ma_open_datafile(MARIA_HA *info, MARIA_SHARE *share) DEBUG_SYNC_C("mi_open_datafile"); info->dfile.file= share->bitmap.file.file= mysql_file_open(key_file_dfile, share->data_file_name.str, - share->mode | O_SHARE, MYF(flags)); + share->mode | O_SHARE | O_CLOEXEC, MYF(flags)); return info->dfile.file >= 0 ? 0 : 1; } @@ -1963,7 +1963,7 @@ int _ma_open_keyfile(MARIA_SHARE *share) mysql_mutex_lock(&share->intern_lock); share->kfile.file= mysql_file_open(key_file_kfile, share->unique_file_name.str, - share->mode | O_SHARE | O_NOFOLLOW, + share->mode | O_SHARE | O_NOFOLLOW | O_CLOEXEC, MYF(MY_WME | MY_NOSYMLINKS)); mysql_mutex_unlock(&share->intern_lock); return (share->kfile.file < 0); diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index a70f464ef4a..622e5c66d4f 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -2568,7 +2568,7 @@ maria_declare_plugin(myisam) &myisam_storage_engine, "MyISAM", "MySQL AB", - "MyISAM storage engine", + "Non-transactional engine with good performance and small data footprint", PLUGIN_LICENSE_GPL, myisam_init, /* Plugin Init */ NULL, /* Plugin Deinit */ diff --git a/storage/myisam/mi_key.c b/storage/myisam/mi_key.c index 9a2526ad2cf..18ecc9e8ba3 100644 --- a/storage/myisam/mi_key.c +++ b/storage/myisam/mi_key.c @@ -553,7 +553,7 @@ ulonglong retrieve_auto_increment(MI_INFO *info,const uchar *record) switch (keyseg->type) { case HA_KEYTYPE_INT8: - s_value= (longlong) *(char*)key; + s_value= (longlong) *(const signed char*) key; break; case HA_KEYTYPE_BINARY: value=(ulonglong) *(uchar*) key; diff --git a/storage/myisam/mi_open.c b/storage/myisam/mi_open.c index 41b0e18da02..85fb270869b 100644 --- a/storage/myisam/mi_open.c +++ b/storage/myisam/mi_open.c @@ -139,13 +139,13 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags) DEBUG_SYNC_C("mi_open_kfile"); if ((kfile= mysql_file_open(mi_key_file_kfile, name_buff, - (open_mode= O_RDWR) | O_SHARE | O_NOFOLLOW, + (open_mode= O_RDWR) | O_SHARE | O_NOFOLLOW | O_CLOEXEC, MYF(MY_NOSYMLINKS))) < 0) { if ((errno != EROFS && errno != EACCES) || mode != O_RDONLY || (kfile= mysql_file_open(mi_key_file_kfile, name_buff, - (open_mode= O_RDONLY) | O_SHARE| O_NOFOLLOW, + (open_mode= O_RDONLY) | O_SHARE| O_NOFOLLOW | O_CLOEXEC, MYF(MY_NOSYMLINKS))) < 0) goto err; } @@ -1270,7 +1270,7 @@ int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share) myf flags= MY_WME | (share->mode & O_NOFOLLOW ? MY_NOSYMLINKS: 0); DEBUG_SYNC_C("mi_open_datafile"); info->dfile= mysql_file_open(mi_key_file_dfile, share->data_file_name, - share->mode | O_SHARE, MYF(flags)); + share->mode | O_SHARE | O_CLOEXEC, MYF(flags)); return info->dfile >= 0 ? 0 : 1; } @@ -1279,7 +1279,7 @@ int mi_open_keyfile(MYISAM_SHARE *share) { if ((share->kfile= mysql_file_open(mi_key_file_kfile, share->unique_file_name, - share->mode | O_SHARE | O_NOFOLLOW, + share->mode | O_SHARE | O_NOFOLLOW | O_CLOEXEC, MYF(MY_NOSYMLINKS | MY_WME))) < 0) return 1; return 0; diff --git a/storage/tokudb/CMakeLists.txt b/storage/tokudb/CMakeLists.txt index 22c2cd0d490..8facf6c3f10 100644 --- a/storage/tokudb/CMakeLists.txt +++ b/storage/tokudb/CMakeLists.txt @@ -75,6 +75,7 @@ IF(INSTALL_SYSCONFDIR) COMPONENT tokudb-engine) ENDIF() +MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-shadow") MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-vla" DEBUG) MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-implicit-fallthrough") diff --git a/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake b/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake index 385723aebc7..50d35ee4906 100644 --- a/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake +++ b/storage/tokudb/PerconaFT/cmake_modules/TokuSetupCompiler.cmake @@ -149,7 +149,7 @@ set_cflags_if_supported( -Wmissing-prototypes -Wmissing-declarations -Wpointer-arith - -Wshadow + #-Wshadow will fail with GCC-8 ${OPTIONAL_CFLAGS} ## other flags to try: #-Wunsafe-loop-optimizations diff --git a/storage/tokudb/PerconaFT/ft/ft-ops.cc b/storage/tokudb/PerconaFT/ft/ft-ops.cc index a98768158dd..aefd6f0ec22 100644 --- a/storage/tokudb/PerconaFT/ft/ft-ops.cc +++ b/storage/tokudb/PerconaFT/ft/ft-ops.cc @@ -822,22 +822,22 @@ int toku_ftnode_fetch_callback(CACHEFILE UU(cachefile), fprintf( stderr, "%s:%d:toku_ftnode_fetch_callback - " - "file[%s], blocknum[%ld], toku_deserialize_ftnode_from " + "file[%s], blocknum[%lld], toku_deserialize_ftnode_from " "failed with a checksum error.\n", __FILE__, __LINE__, toku_cachefile_fname_in_env(cachefile), - blocknum.b); + (longlong)blocknum.b); } else { fprintf( stderr, "%s:%d:toku_ftnode_fetch_callback - " - "file[%s], blocknum[%ld], toku_deserialize_ftnode_from " + "file[%s], blocknum[%lld], toku_deserialize_ftnode_from " "failed with %d.\n", __FILE__, __LINE__, toku_cachefile_fname_in_env(cachefile), - blocknum.b, + (longlong)blocknum.b, r); } // make absolutely sure we crash before doing anything else. diff --git a/storage/tokudb/PerconaFT/ft/serialize/ft-serialize.cc b/storage/tokudb/PerconaFT/ft/serialize/ft-serialize.cc index b24d72a5dff..0d6573972d7 100644 --- a/storage/tokudb/PerconaFT/ft/serialize/ft-serialize.cc +++ b/storage/tokudb/PerconaFT/ft/serialize/ft-serialize.cc @@ -656,20 +656,20 @@ exit: fprintf(stderr, \ "%s:%d toku_deserialize_ft_from: " \ "filename[%s] " \ - "r[%d] max_acceptable_lsn[%lu]" \ - "r0[%d] checkpoint_lsn_0[%lu] checkpoint_count_0[%lu] " \ - "r1[%d] checkpoint_lsn_1[%lu] checkpoint_count_1[%lu]\n", \ + "r[%d] max_acceptable_lsn[%llu]" \ + "r0[%d] checkpoint_lsn_0[%llu] checkpoint_count_0[%llu] " \ + "r1[%d] checkpoint_lsn_1[%llu] checkpoint_count_1[%llu]\n", \ __FILE__, \ __LINE__, \ fn, \ r, \ - max_acceptable_lsn.lsn, \ + (ulonglong)max_acceptable_lsn.lsn, \ r0, \ - checkpoint_lsn_0.lsn, \ - checkpoint_count_0, \ + (ulonglong)checkpoint_lsn_0.lsn, \ + (ulonglong)checkpoint_count_0, \ r1, \ - checkpoint_lsn_1.lsn, \ - checkpoint_count_1); + (ulonglong)checkpoint_lsn_1.lsn, \ + (ulonglong)checkpoint_count_1); int toku_deserialize_ft_from(int fd, const char *fn, diff --git a/storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc b/storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc index 22a562ae24c..f3b31eb31be 100644 --- a/storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc +++ b/storage/tokudb/PerconaFT/ft/serialize/ft_node-serialize.cc @@ -1170,11 +1170,11 @@ int verify_ftnode_sub_block(struct sub_block *sb, fprintf( stderr, "%s:%d:verify_ftnode_sub_block - " - "file[%s], blocknum[%ld], stored_xsum[%u] != actual_xsum[%u]\n", + "file[%s], blocknum[%lld], stored_xsum[%u] != actual_xsum[%u]\n", __FILE__, __LINE__, fname ? fname : "unknown", - blocknum.b, + (longlong)blocknum.b, stored_xsum, actual_xsum); dump_bad_block((Bytef *) sb->uncompressed_ptr, sb->uncompressed_size); @@ -1197,11 +1197,11 @@ static int deserialize_ftnode_info(struct sub_block *sb, FTNODE node) { fprintf( stderr, "%s:%d:deserialize_ftnode_info - " - "file[%s], blocknum[%ld], verify_ftnode_sub_block failed with %d\n", + "file[%s], blocknum[%lld], verify_ftnode_sub_block failed with %d\n", __FILE__, __LINE__, fname ? fname : "unknown", - node->blocknum.b, + (longlong)node->blocknum.b, r); dump_bad_block(static_cast<unsigned char *>(sb->uncompressed_ptr), sb->uncompressed_size); @@ -1253,11 +1253,11 @@ static int deserialize_ftnode_info(struct sub_block *sb, FTNODE node) { fprintf( stderr, "%s:%d:deserialize_ftnode_info - " - "file[%s], blocknum[%ld], data_size[%d] != rb.ndone[%d]\n", + "file[%s], blocknum[%lld], data_size[%d] != rb.ndone[%d]\n", __FILE__, __LINE__, fname ? fname : "unknown", - node->blocknum.b, + (longlong)node->blocknum.b, data_size, rb.ndone); dump_bad_block(rb.buf, rb.size); @@ -1388,12 +1388,12 @@ static int deserialize_ftnode_partition( if (r != 0) { fprintf(stderr, "%s:%d:deserialize_ftnode_partition - " - "file[%s], blocknum[%ld], " + "file[%s], blocknum[%lld], " "verify_ftnode_sub_block failed with %d\n", __FILE__, __LINE__, fname ? fname : "unknown", - node->blocknum.b, + (longlong)node->blocknum.b, r); goto exit; } @@ -1410,12 +1410,12 @@ static int deserialize_ftnode_partition( if (ch != FTNODE_PARTITION_MSG_BUFFER) { fprintf(stderr, "%s:%d:deserialize_ftnode_partition - " - "file[%s], blocknum[%ld], ch[%d] != " + "file[%s], blocknum[%lld], ch[%d] != " "FTNODE_PARTITION_MSG_BUFFER[%d]\n", __FILE__, __LINE__, fname ? fname : "unknown", - node->blocknum.b, + (longlong)node->blocknum.b, ch, FTNODE_PARTITION_MSG_BUFFER); dump_bad_block(rb.buf, rb.size); @@ -1433,12 +1433,12 @@ static int deserialize_ftnode_partition( if (ch != FTNODE_PARTITION_DMT_LEAVES) { fprintf(stderr, "%s:%d:deserialize_ftnode_partition - " - "file[%s], blocknum[%ld], ch[%d] != " + "file[%s], blocknum[%lld], ch[%d] != " "FTNODE_PARTITION_DMT_LEAVES[%d]\n", __FILE__, __LINE__, fname ? fname : "unknown", - node->blocknum.b, + (longlong)node->blocknum.b, ch, FTNODE_PARTITION_DMT_LEAVES); dump_bad_block(rb.buf, rb.size); @@ -1457,11 +1457,11 @@ static int deserialize_ftnode_partition( if (rb.ndone != rb.size) { fprintf(stderr, "%s:%d:deserialize_ftnode_partition - " - "file[%s], blocknum[%ld], rb.ndone[%d] != rb.size[%d]\n", + "file[%s], blocknum[%lld], rb.ndone[%d] != rb.size[%d]\n", __FILE__, __LINE__, fname ? fname : "unknown", - node->blocknum.b, + (longlong)node->blocknum.b, rb.ndone, rb.size); dump_bad_block(rb.buf, rb.size); @@ -1485,12 +1485,12 @@ static int decompress_and_deserialize_worker(struct rbuf curr_rbuf, const char *fname = toku_ftnode_get_cachefile_fname_in_env(node); fprintf(stderr, "%s:%d:decompress_and_deserialize_worker - " - "file[%s], blocknum[%ld], read_and_decompress_sub_block failed " + "file[%s], blocknum[%lld], read_and_decompress_sub_block failed " "with %d\n", __FILE__, __LINE__, fname ? fname : "unknown", - node->blocknum.b, + (longlong)node->blocknum.b, r); dump_bad_block(curr_rbuf.buf, curr_rbuf.size); goto exit; @@ -1502,12 +1502,12 @@ static int decompress_and_deserialize_worker(struct rbuf curr_rbuf, const char *fname = toku_ftnode_get_cachefile_fname_in_env(node); fprintf(stderr, "%s:%d:decompress_and_deserialize_worker - " - "file[%s], blocknum[%ld], deserialize_ftnode_partition failed " + "file[%s], blocknum[%lld], deserialize_ftnode_partition failed " "with %d\n", __FILE__, __LINE__, fname ? fname : "unknown", - node->blocknum.b, + (longlong)node->blocknum.b, r); dump_bad_block(curr_rbuf.buf, curr_rbuf.size); goto exit; @@ -1582,11 +1582,11 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough( fprintf( stderr, "%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - " - "file[%s], blocknum[%ld], rb->size[%u] < 24\n", + "file[%s], blocknum[%lld], rb->size[%u] < 24\n", __FILE__, __LINE__, fname ? fname : "unknown", - blocknum.b, + (longlong)blocknum.b, rb->size); dump_bad_block(rb->buf, rb->size); // TODO: What error do we return here? @@ -1602,12 +1602,12 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough( fprintf( stderr, "%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - " - "file[%s], blocknum[%ld], unrecognized magic number " + "file[%s], blocknum[%lld], unrecognized magic number " "%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n", __FILE__, __LINE__, fname ? fname : "unknown", - blocknum.b, + (longlong)blocknum.b, static_cast<const uint8_t*>(magic)[0], static_cast<const uint8_t*>(magic)[1], static_cast<const uint8_t*>(magic)[2], @@ -1627,12 +1627,12 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough( fprintf( stderr, "%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - " - "file[%s], blocknum[%ld], node->layout_version_read_from_disk[%d] " + "file[%s], blocknum[%lld], node->layout_version_read_from_disk[%d] " "< FT_FIRST_LAYOUT_VERSION_WITH_BASEMENT_NODES[%d]\n", __FILE__, __LINE__, fname ? fname : "unknown", - blocknum.b, + (longlong)blocknum.b, node->layout_version_read_from_disk, FT_FIRST_LAYOUT_VERSION_WITH_BASEMENT_NODES); dump_bad_block(rb->buf, rb->size); @@ -1667,11 +1667,11 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough( fprintf( stderr, "%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - " - "file[%s], blocknum[%ld], needed_size[%d] > rb->size[%d]\n", + "file[%s], blocknum[%lld], needed_size[%d] > rb->size[%d]\n", __FILE__, __LINE__, fname ? fname : "unknown", - blocknum.b, + (longlong)blocknum.b, needed_size, rb->size); dump_bad_block(rb->buf, rb->size); @@ -1695,11 +1695,11 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough( fprintf( stderr, "%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - " - "file[%s], blocknum[%ld], stored_checksum[%d] != checksum[%d]\n", + "file[%s], blocknum[%lld], stored_checksum[%d] != checksum[%d]\n", __FILE__, __LINE__, fname ? fname : "unknown", - blocknum.b, + (longlong)blocknum.b, stored_checksum, checksum); dump_bad_block(rb->buf, rb->size); @@ -1717,12 +1717,12 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough( fprintf( stderr, "%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - " - "file[%s], blocknum[%ld], rb->size[%d] - rb->ndone[%d] < " + "file[%s], blocknum[%lld], rb->size[%d] - rb->ndone[%d] < " "sb_node_info.compressed_size[%d] + 8\n", __FILE__, __LINE__, fname ? fname : "unknown", - blocknum.b, + (longlong)blocknum.b, rb->size, rb->ndone, sb_node_info.compressed_size); @@ -1744,11 +1744,11 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough( fprintf( stderr, "%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - " - "file[%s], blocknum[%ld], sb_node_info.xsum[%d] != actual_xsum[%d]\n", + "file[%s], blocknum[%lld], sb_node_info.xsum[%d] != actual_xsum[%d]\n", __FILE__, __LINE__, fname ? fname : "unknown", - blocknum.b, + (longlong)blocknum.b, sb_node_info.xsum, actual_xsum); dump_bad_block(rb->buf, rb->size); @@ -1774,12 +1774,12 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough( fprintf( stderr, "%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - " - "file[%s], blocknum[%ld], deserialize_ftnode_info failed with " + "file[%s], blocknum[%lld], deserialize_ftnode_info failed with " "%d\n", __FILE__, __LINE__, fname ? fname : "unknown", - blocknum.b, + (longlong)blocknum.b, r); dump_bad_block( static_cast<unsigned char *>(sb_node_info.uncompressed_ptr), @@ -1812,12 +1812,12 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough( fprintf( stderr, "%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - " - "file[%s], blocknum[%ld], toku_ftnode_pf_callback failed with " + "file[%s], blocknum[%lld], toku_ftnode_pf_callback failed with " "%d\n", __FILE__, __LINE__, fname ? fname : "unknown", - blocknum.b, + (longlong)blocknum.b, r); dump_bad_block(rb->buf, rb->size); goto cleanup; @@ -2164,12 +2164,12 @@ static int deserialize_and_upgrade_ftnode(FTNODE node, const char* fname = toku_cachefile_fname_in_env(bfe->ft->cf); fprintf(stderr, "%s:%d:deserialize_and_upgrade_ftnode - " - "file[%s], blocknum[%ld], " + "file[%s], blocknum[%lld], " "read_and_decompress_block_from_fd_into_rbuf failed with %d\n", __FILE__, __LINE__, fname ? fname : "unknown", - blocknum.b, + (longlong)blocknum.b, r); goto exit; } @@ -2190,12 +2190,12 @@ static int deserialize_and_upgrade_ftnode(FTNODE node, const char* fname = toku_cachefile_fname_in_env(bfe->ft->cf); fprintf(stderr, "%s:%d:deserialize_and_upgrade_ftnode - " - "file[%s], blocknum[%ld], version[%d] > " + "file[%s], blocknum[%lld], version[%d] > " "FT_LAYOUT_VERSION_14[%d]\n", __FILE__, __LINE__, fname ? fname : "unknown", - blocknum.b, + (longlong)blocknum.b, version, FT_LAYOUT_VERSION_14); dump_bad_block(rb.buf, rb.size); @@ -2278,12 +2278,12 @@ static int deserialize_ftnode_from_rbuf(FTNODE *ftnode, memcmp(magic, "tokunode", 8) != 0) { fprintf(stderr, "%s:%d:deserialize_ftnode_from_rbuf - " - "file[%s], blocknum[%ld], unrecognized magic number " + "file[%s], blocknum[%lld], unrecognized magic number " "%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n", __FILE__, __LINE__, fname ? fname : "unknown", - blocknum.b, + (longlong)blocknum.b, static_cast<const uint8_t *>(magic)[0], static_cast<const uint8_t *>(magic)[1], static_cast<const uint8_t *>(magic)[2], @@ -2309,12 +2309,12 @@ static int deserialize_ftnode_from_rbuf(FTNODE *ftnode, if (r != 0) { fprintf(stderr, "%s:%d:deserialize_ftnode_from_rbuf - " - "file[%s], blocknum[%ld], deserialize_and_upgrade_ftnode " + "file[%s], blocknum[%lld], deserialize_and_upgrade_ftnode " "failed with %d\n", __FILE__, __LINE__, fname ? fname : "unknown", - blocknum.b, + (longlong)blocknum.b, r); dump_bad_block(rb->buf, rb->size); goto cleanup; @@ -2355,11 +2355,11 @@ static int deserialize_ftnode_from_rbuf(FTNODE *ftnode, fprintf( stderr, "%s:%d:deserialize_ftnode_from_rbuf - " - "file[%s], blocknum[%ld], stored_checksum[%d] != checksum[%d]\n", + "file[%s], blocknum[%lld], stored_checksum[%d] != checksum[%d]\n", __FILE__, __LINE__, fname ? fname : "unknown", - blocknum.b, + (longlong)blocknum.b, stored_checksum, checksum); dump_bad_block(rb->buf, rb->size); @@ -2377,12 +2377,12 @@ static int deserialize_ftnode_from_rbuf(FTNODE *ftnode, fprintf( stderr, "%s:%d:deserialize_ftnode_from_rbuf - " - "file[%s], blocknum[%ld], read_and_decompress_sub_block failed " + "file[%s], blocknum[%lld], read_and_decompress_sub_block failed " "with %d\n", __FILE__, __LINE__, fname ? fname : "unknown", - blocknum.b, + (longlong)blocknum.b, r); dump_bad_block( static_cast<unsigned char *>(sb_node_info.uncompressed_ptr), @@ -2398,12 +2398,12 @@ static int deserialize_ftnode_from_rbuf(FTNODE *ftnode, fprintf( stderr, "%s:%d:deserialize_ftnode_from_rbuf - " - "file[%s], blocknum[%ld], deserialize_ftnode_info failed with " + "file[%s], blocknum[%lld], deserialize_ftnode_info failed with " "%d\n", __FILE__, __LINE__, fname ? fname : "unknown", - blocknum.b, + (longlong)blocknum.b, r); dump_bad_block(rb->buf, rb->size); goto cleanup; @@ -2470,12 +2470,12 @@ static int deserialize_ftnode_from_rbuf(FTNODE *ftnode, fprintf( stderr, "%s:%d:deserialize_ftnode_from_rbuf - " - "file[%s], blocknum[%ld], childnum[%d], " + "file[%s], blocknum[%lld], childnum[%d], " "decompress_and_deserialize_worker failed with %d\n", __FILE__, __LINE__, fname ? fname : "unknown", - blocknum.b, + (longlong)blocknum.b, i, r); dump_bad_block(rb->buf, rb->size); @@ -2490,13 +2490,13 @@ static int deserialize_ftnode_from_rbuf(FTNODE *ftnode, fprintf( stderr, "%s:%d:deserialize_ftnode_from_rbuf - " - "file[%s], blocknum[%ld], childnum[%d], " + "file[%s], blocknum[%lld], childnum[%d], " "check_and_copy_compressed_sub_block_worker failed with " "%d\n", __FILE__, __LINE__, fname ? fname : "unknown", - blocknum.b, + (longlong)blocknum.b, i, r); dump_bad_block(rb->buf, rb->size); @@ -2641,12 +2641,12 @@ int toku_deserialize_bp_from_compressed(FTNODE node, const char* fname = toku_cachefile_fname_in_env(bfe->ft->cf); fprintf(stderr, "%s:%d:toku_deserialize_bp_from_compressed - " - "file[%s], blocknum[%ld], " + "file[%s], blocknum[%lld], " "deserialize_ftnode_partition failed with %d\n", __FILE__, __LINE__, fname ? fname : "unknown", - node->blocknum.b, + (longlong)node->blocknum.b, r); dump_bad_block(static_cast<unsigned char *>(curr_sb->compressed_ptr), curr_sb->compressed_size); @@ -2689,12 +2689,12 @@ static int deserialize_ftnode_from_fd(int fd, fprintf( stderr, "%s:%d:deserialize_ftnode_from_fd - " - "file[%s], blocknum[%ld], deserialize_ftnode_from_rbuf failed with " + "file[%s], blocknum[%lld], deserialize_ftnode_from_rbuf failed with " "%d\n", __FILE__, __LINE__, fname ? fname : "unknown", - blocknum.b, + (longlong)blocknum.b, r); dump_bad_block(rb.buf, rb.size); } diff --git a/storage/tokudb/PerconaFT/portability/memory.h b/storage/tokudb/PerconaFT/portability/memory.h index 5ae652d39fc..851e4d69e03 100644 --- a/storage/tokudb/PerconaFT/portability/memory.h +++ b/storage/tokudb/PerconaFT/portability/memory.h @@ -107,7 +107,7 @@ size_t toku_malloc_usable_size(void *p) __attribute__((__visibility__("default") #define XMALLOC(v) CAST_FROM_VOIDP(v, toku_xmalloc(sizeof(*v))) #define XMALLOC_N(n,v) CAST_FROM_VOIDP(v, toku_xmalloc((n)*sizeof(*v))) #define XCALLOC_N(n,v) CAST_FROM_VOIDP(v, toku_xcalloc((n), (sizeof(*v)))) -#define XCALLOC(v) XCALLOC_N(1,(v)) +#define XCALLOC(v) XCALLOC_N(1,v) #define XREALLOC(v,s) CAST_FROM_VOIDP(v, toku_xrealloc(v, s)) #define XREALLOC_N(n,v) CAST_FROM_VOIDP(v, toku_xrealloc(v, (n)*sizeof(*v))) diff --git a/storage/tokudb/PerconaFT/portability/toku_debug_sync.h b/storage/tokudb/PerconaFT/portability/toku_debug_sync.h index b5394e58d68..493075c36c3 100644 --- a/storage/tokudb/PerconaFT/portability/toku_debug_sync.h +++ b/storage/tokudb/PerconaFT/portability/toku_debug_sync.h @@ -62,9 +62,6 @@ inline void toku_debug_sync(struct tokutxn *txn, const char *sync_point_name) { void *client_extra; THD *thd; - if (likely(!opt_debug_sync_timeout)) - return; - toku_txn_get_client_id(txn, &client_id, &client_extra); thd = reinterpret_cast<THD *>(client_extra); DEBUG_SYNC(thd, sync_point_name); diff --git a/storage/tokudb/PerconaFT/portability/toku_pthread.h b/storage/tokudb/PerconaFT/portability/toku_pthread.h index e3bd3bce598..a0dfcc246a7 100644 --- a/storage/tokudb/PerconaFT/portability/toku_pthread.h +++ b/storage/tokudb/PerconaFT/portability/toku_pthread.h @@ -162,10 +162,20 @@ typedef struct toku_mutex_aligned { #define ZERO_COND_INITIALIZER \ { 0 } #elif defined(__APPLE__) +#if TOKU_PTHREAD_DEBUG +#define ZERO_COND_INITIALIZER \ + { \ + { 0 , { 0 } }, \ + nullptr, \ + 0 \ + } +#else #define ZERO_COND_INITIALIZER \ { \ - { 0 } \ + { 0 , { 0 } }, \ + nullptr \ } +#endif #else // __linux__, at least #define ZERO_COND_INITIALIZER \ {} diff --git a/storage/tokudb/PerconaFT/src/CMakeLists.txt b/storage/tokudb/PerconaFT/src/CMakeLists.txt index 65bf4814cf8..bae37389004 100644 --- a/storage/tokudb/PerconaFT/src/CMakeLists.txt +++ b/storage/tokudb/PerconaFT/src/CMakeLists.txt @@ -18,7 +18,7 @@ set(tokudb_srcs ## make the shared library add_library(${LIBTOKUDB} SHARED ${tokudb_srcs}) add_dependencies(${LIBTOKUDB} install_tdb_h generate_log_code) -target_link_libraries(${LIBTOKUDB} LINK_PRIVATE locktree_static ft_static util_static lzma snappy ${LIBTOKUPORTABILITY}) +target_link_libraries(${LIBTOKUDB} LINK_PRIVATE locktree_static ft_static util_static lzma snappy dbug ${LIBTOKUPORTABILITY}) target_link_libraries(${LIBTOKUDB} LINK_PUBLIC ${ZLIB_LIBRARY} ) ## make the static library diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/autogen.sh b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/autogen.sh index f0195ecadd3..f0195ecadd3 100644..100755 --- a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/autogen.sh +++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/autogen.sh diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/compile b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/compile index ec64c622026..ec64c622026 100644..100755 --- a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/compile +++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/compile diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.guess b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.guess index 7501b1bee01..7501b1bee01 100644..100755 --- a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.guess +++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.guess diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.rpath b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.rpath index c492a93b663..c492a93b663 100644..100755 --- a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.rpath +++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.rpath diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.sub b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.sub index a39437d0158..a39437d0158 100644..100755 --- a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.sub +++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.sub diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/depcomp b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/depcomp index df8eea7e4ce..df8eea7e4ce 100644..100755 --- a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/depcomp +++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/depcomp diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/install-sh b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/install-sh index 6781b987bdb..6781b987bdb 100644..100755 --- a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/install-sh +++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/install-sh diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/ltmain.sh b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/ltmain.sh index b36c4ad366c..b36c4ad366c 100644..100755 --- a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/ltmain.sh +++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/ltmain.sh diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/missing b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/missing index 28055d2ae6f..28055d2ae6f 100644..100755 --- a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/missing +++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/missing diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/extra/7z2lzma/7z2lzma.bash b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/extra/7z2lzma/7z2lzma.bash index 35ea4dae973..35ea4dae973 100644..100755 --- a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/extra/7z2lzma/7z2lzma.bash +++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/extra/7z2lzma/7z2lzma.bash diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_compress.sh b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_compress.sh index ff0cb304df4..ff0cb304df4 100644..100755 --- a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_compress.sh +++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_compress.sh diff --git a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_files.sh b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_files.sh index 7dd9a3901bf..7dd9a3901bf 100644..100755 --- a/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_files.sh +++ b/storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_files.sh diff --git a/storage/tokudb/PerconaFT/util/dmt.cc b/storage/tokudb/PerconaFT/util/dmt.cc index b5b94982487..642c9367d7e 100644 --- a/storage/tokudb/PerconaFT/util/dmt.cc +++ b/storage/tokudb/PerconaFT/util/dmt.cc @@ -80,8 +80,8 @@ void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::create_from_sorted_memory_of_fix paranoid_invariant(numvalues > 0); void *ptr = toku_mempool_malloc(&this->mp, aligned_memsize); paranoid_invariant_notnull(ptr); - uint8_t * const CAST_FROM_VOIDP(dest, ptr); - const uint8_t * const CAST_FROM_VOIDP(src, mem); + uint8_t * CAST_FROM_VOIDP(dest, ptr); + const uint8_t * CAST_FROM_VOIDP(src, mem); if (pad_bytes == 0) { paranoid_invariant(aligned_memsize == mem_length); memcpy(dest, src, aligned_memsize); diff --git a/storage/tokudb/PerconaFT/util/omt.h b/storage/tokudb/PerconaFT/util/omt.h index c7ed2ca546f..36946401381 100644 --- a/storage/tokudb/PerconaFT/util/omt.h +++ b/storage/tokudb/PerconaFT/util/omt.h @@ -127,7 +127,7 @@ public: paranoid_invariant(index != NODE_NULL); m_index = index; } -} __attribute__((__packed__,aligned(4))); +} ; template<> class subtree_templated<true> { @@ -184,7 +184,7 @@ public: inline void disable_bit(void) { m_bitfield &= MASK_INDEX; } -} __attribute__((__packed__)) ; +} ; template<typename omtdata_t, bool subtree_supports_marks> class omt_node_templated { @@ -197,7 +197,7 @@ public: // this needs to be in both implementations because we don't have // a "static if" the caller can use inline void clear_stolen_bits(void) {} -} __attribute__((__packed__,aligned(4))); +} ; template<typename omtdata_t> class omt_node_templated<omtdata_t, true> { @@ -234,7 +234,7 @@ public: this->unset_marked_bit(); this->unset_marks_below_bit(); } -} __attribute__((__packed__,aligned(4))); +} ; } diff --git a/storage/tokudb/tokudb_thread.h b/storage/tokudb/tokudb_thread.h index dec58f3fd35..5df0159901f 100644 --- a/storage/tokudb/tokudb_thread.h +++ b/storage/tokudb/tokudb_thread.h @@ -111,7 +111,6 @@ public: // wait for the event to become signalled void wait(void); - int wait(ulonglong microseconds); // signal the event void signal(void); @@ -152,7 +151,6 @@ public: // wait for the semaphore to become signalled E_WAIT wait(void); - E_WAIT wait(ulonglong microseconds); // signal the semaphore to increase the count // return true if signalled, false if ignored due to count @@ -372,28 +370,6 @@ inline void event_t::wait(void) { assert_debug(r == 0); return; } -inline int event_t::wait(ulonglong microseconds) { - timespec waittime = time::offset_timespec(microseconds); - int r = pthread_mutex_timedlock(&_mutex, &waittime); - if (r == ETIMEDOUT) return ETIMEDOUT; - assert_debug(r == 0); - while (_signalled == false && _pulsed == false) { - r = pthread_cond_timedwait(&_cond, &_mutex, &waittime); - if (r == ETIMEDOUT) { - r = pthread_mutex_unlock(&_mutex); - assert_debug(r == 0); - return ETIMEDOUT; - } - assert_debug(r == 0); - } - if (_manual_reset == false) - _signalled = false; - if (_pulsed) - _pulsed = false; - r = pthread_mutex_unlock(&_mutex); - assert_debug(r == 0); - return 0; -} inline void event_t::signal(void) { int r MY_ATTRIBUTE((unused)) = pthread_mutex_lock(&_mutex); assert_debug(r == 0); @@ -479,31 +455,6 @@ inline semaphore_t::E_WAIT semaphore_t::wait(void) { assert_debug(r == 0); return ret; } -inline semaphore_t::E_WAIT semaphore_t::wait(ulonglong microseconds) { - E_WAIT ret; - timespec waittime = time::offset_timespec(microseconds); - int r = pthread_mutex_timedlock(&_mutex, &waittime); - if (r == ETIMEDOUT) return E_TIMEDOUT; - assert_debug(r == 0); - while (_signalled == 0 && _interrupted == false) { - r = pthread_cond_timedwait(&_cond, &_mutex, &waittime); - if (r == ETIMEDOUT) { - r = pthread_mutex_unlock(&_mutex); - assert_debug(r == 0); - return E_TIMEDOUT; - } - assert_debug(r == 0); - } - if (_interrupted) { - ret = E_INTERRUPTED; - } else { - _signalled--; - ret = E_SIGNALLED; - } - r = pthread_mutex_unlock(&_mutex); - assert_debug(r == 0); - return ret; -} inline bool semaphore_t::signal(void) { bool ret = false; int r MY_ATTRIBUTE((unused)) = pthread_mutex_lock(&_mutex); diff --git a/storage/xtradb/buf/buf0buf.cc b/storage/xtradb/buf/buf0buf.cc index b49ee86f78f..ce4dd4715db 100644 --- a/storage/xtradb/buf/buf0buf.cc +++ b/storage/xtradb/buf/buf0buf.cc @@ -77,15 +77,6 @@ Created 11/5/1995 Heikki Tuuri #include "snappy-c.h" #endif -/** Decrypt a page. -@param[in,out] bpage Page control block -@param[in,out] space tablespace -@return whether the operation was successful */ -static -bool -buf_page_decrypt_after_read(buf_page_t* bpage, fil_space_t* space) - MY_ATTRIBUTE((nonnull)); - /********************************************************************//** Mark a table with the specified space pointed by bpage->space corrupted. Also remove the bpage from LRU list. @@ -390,6 +381,143 @@ on the io_type */ ? (counter##_READ) \ : (counter##_WRITTEN)) + +/** Reserve a buffer slot for encryption, decryption or page compression. +@param[in,out] buf_pool buffer pool +@return reserved buffer slot */ +static buf_tmp_buffer_t* buf_pool_reserve_tmp_slot(buf_pool_t* buf_pool) +{ + for (ulint i = 0; i < buf_pool->tmp_arr->n_slots; i++) { + buf_tmp_buffer_t* slot = &buf_pool->tmp_arr->slots[i]; + if (slot->acquire()) { + return slot; + } + } + + /* We assume that free slot is found */ + ut_error; + return NULL; +} + +/** Reserve a buffer for encryption, decryption or decompression. +@param[in,out] slot reserved slot */ +static void buf_tmp_reserve_crypt_buf(buf_tmp_buffer_t* slot) +{ + if (!slot->crypt_buf) { + slot->crypt_buf = static_cast<byte*>( + aligned_malloc(srv_page_size, srv_page_size)); + } +} + +/** Reserve a buffer for compression. +@param[in,out] slot reserved slot */ +static void buf_tmp_reserve_compression_buf(buf_tmp_buffer_t* slot) +{ + if (!slot->comp_buf) { + /* Both snappy and lzo compression methods require that + output buffer used for compression is bigger than input + buffer. Increase the allocated buffer size accordingly. */ + ulint size = srv_page_size; +#ifdef HAVE_LZO + size += LZO1X_1_15_MEM_COMPRESS; +#elif defined HAVE_SNAPPY + size = snappy_max_compressed_length(size); +#endif + slot->comp_buf = static_cast<byte*>( + aligned_malloc(size, srv_page_size)); + } +} + +/** Decrypt a page. +@param[in,out] bpage Page control block +@param[in,out] space tablespace +@return whether the operation was successful */ +static bool buf_page_decrypt_after_read(buf_page_t* bpage, fil_space_t* space) +{ + ut_ad(space->n_pending_ios > 0); + ut_ad(space->id == bpage->space); + + byte* dst_frame = bpage->zip.data ? bpage->zip.data : + ((buf_block_t*) bpage)->frame; + bool page_compressed = fil_page_is_compressed(dst_frame); + buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); + + if (bpage->offset == 0) { + /* File header pages are not encrypted/compressed */ + return true; + } + + /* Page is encrypted if encryption information is found from + tablespace and page contains used key_version. This is true + also for pages first compressed and then encrypted. */ + + buf_tmp_buffer_t* slot; + + if (page_compressed) { + /* the page we read is unencrypted */ + /* Find free slot from temporary memory array */ +decompress: + slot = buf_pool_reserve_tmp_slot(buf_pool); + /* For decompression, use crypt_buf. */ + buf_tmp_reserve_crypt_buf(slot); +decompress_with_slot: + ut_d(fil_page_type_validate(dst_frame)); + + bpage->write_size = fil_page_decompress(slot->crypt_buf, + dst_frame); + slot->release(); + + ut_ad(!bpage->write_size || fil_page_type_validate(dst_frame)); + ut_ad(space->n_pending_ios > 0); + return bpage->write_size != 0; + } + + if (space->crypt_data + && mach_read_from_4(FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION + + dst_frame)) { + /* Verify encryption checksum before we even try to + decrypt. */ + if (!fil_space_verify_crypt_checksum( + dst_frame, buf_page_get_zip_size(bpage), NULL, + bpage->offset)) { +decrypt_failed: + /* Mark page encrypted in case it should be. */ + if (space->crypt_data->type + != CRYPT_SCHEME_UNENCRYPTED) { + bpage->encrypted = true; + } + + return false; + } + + /* Find free slot from temporary memory array */ + slot = buf_pool_reserve_tmp_slot(buf_pool); + buf_tmp_reserve_crypt_buf(slot); + + ut_d(fil_page_type_validate(dst_frame)); + + /* decrypt using crypt_buf to dst_frame */ + if (!fil_space_decrypt(space, slot->crypt_buf, + dst_frame, &bpage->encrypted)) { + slot->release(); + goto decrypt_failed; + } + + ut_d(fil_page_type_validate(dst_frame)); + + if (fil_page_is_compressed_encrypted(dst_frame)) { + goto decompress_with_slot; + } + + slot->release(); + } else if (fil_page_is_compressed_encrypted(dst_frame)) { + goto decompress; + } + + ut_ad(space->n_pending_ios > 0); + return true; +} + /********************************************************************//** Gets the smallest oldest_modification lsn for any page in the pool. Returns zero if all modified pages have been flushed to disk. @@ -4696,9 +4824,9 @@ buf_page_check_corrupt(buf_page_t* bpage, fil_space_t* space) ib_logf(IB_LOG_LEVEL_ERROR, "The page [page id: space=%u" ", page number=%u]" - " in file %s cannot be decrypted.", + " in file '%s' cannot be decrypted.", bpage->space, bpage->offset, - space->name); + space->chain.start->name); ib_logf(IB_LOG_LEVEL_INFO, "However key management plugin or used key_version " ULINTPF @@ -4733,7 +4861,6 @@ buf_page_io_complete(buf_page_t* bpage) const ibool uncompressed = (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE); bool have_LRU_mutex = false; - byte* frame = NULL; dberr_t err = DB_SUCCESS; ut_a(buf_page_in_file(bpage)); @@ -4751,19 +4878,18 @@ buf_page_io_complete(buf_page_t* bpage) ulint read_page_no = 0; ulint read_space_id = 0; uint key_version = 0; - - ut_ad(bpage->zip.data || ((buf_block_t*)bpage)->frame); + byte* frame = bpage->zip.data + ? bpage->zip.data + : reinterpret_cast<buf_block_t*>(bpage)->frame; + ut_ad(frame); fil_space_t* space = fil_space_acquire_for_io(bpage->space); if (!space) { return(DB_TABLESPACE_DELETED); } - buf_page_decrypt_after_read(bpage, space); - - if (buf_page_get_zip_size(bpage)) { - frame = bpage->zip.data; - } else { - frame = ((buf_block_t*) bpage)->frame; + if (!buf_page_decrypt_after_read(bpage, space)) { + err = DB_DECRYPTION_FAILED; + goto database_corrupted; } if (buf_page_get_zip_size(bpage)) { @@ -4944,7 +5070,7 @@ database_corrupted: /* io_type == BUF_IO_WRITE */ if (bpage->slot) { /* Mark slot free */ - bpage->slot->reserved = false; + bpage->slot->release(); bpage->slot = NULL; } } @@ -6243,66 +6369,6 @@ buf_pool_mutex_exit( mutex_exit(&buf_pool->LRU_list_mutex); } -/********************************************************************//** -Reserve unused slot from temporary memory array and allocate necessary -temporary memory if not yet allocated. -@return reserved slot */ -UNIV_INTERN -buf_tmp_buffer_t* -buf_pool_reserve_tmp_slot( -/*======================*/ - buf_pool_t* buf_pool, /*!< in: buffer pool where to - reserve */ - bool compressed) /*!< in: is file space compressed */ -{ - buf_tmp_buffer_t *free_slot=NULL; - - /* Array is protected by buf_pool mutex */ - buf_pool_mutex_enter(buf_pool); - - for(ulint i = 0; i < buf_pool->tmp_arr->n_slots; i++) { - buf_tmp_buffer_t *slot = &buf_pool->tmp_arr->slots[i]; - - if(slot->reserved == false) { - free_slot = slot; - break; - } - } - - /* We assume that free slot is found */ - ut_a(free_slot != NULL); - free_slot->reserved = true; - /* Now that we have reserved this slot we can release - buf_pool mutex */ - buf_pool_mutex_exit(buf_pool); - - /* Allocate temporary memory for encryption/decryption */ - if (free_slot->crypt_buf == NULL) { - free_slot->crypt_buf = static_cast<byte*>(aligned_malloc(UNIV_PAGE_SIZE, UNIV_PAGE_SIZE)); - memset(free_slot->crypt_buf, 0, UNIV_PAGE_SIZE); - } - - /* For page compressed tables allocate temporary memory for - compression/decompression */ - if (compressed && free_slot->comp_buf == NULL) { - ulint size = UNIV_PAGE_SIZE; - - /* Both snappy and lzo compression methods require that - output buffer used for compression is bigger than input - buffer. Increase the allocated buffer size accordingly. */ -#if HAVE_SNAPPY - size = snappy_max_compressed_length(size); -#endif -#if HAVE_LZO - size += LZO1X_1_15_MEM_COMPRESS; -#endif - free_slot->comp_buf = static_cast<byte*>(aligned_malloc(size, UNIV_PAGE_SIZE)); - memset(free_slot->comp_buf, 0, size); - } - - return (free_slot); -} - /** Encryption and page_compression hook that is called just before a page is written to disk. @param[in,out] space tablespace @@ -6352,16 +6418,18 @@ buf_page_encrypt_before_write( } ulint zip_size = buf_page_get_zip_size(bpage); - ulint page_size = (zip_size) ? zip_size : UNIV_PAGE_SIZE; + ut_ad(!zip_size || !page_compressed); buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); /* Find free slot from temporary memory array */ - buf_tmp_buffer_t* slot = buf_pool_reserve_tmp_slot(buf_pool, page_compressed); + buf_tmp_buffer_t* slot = buf_pool_reserve_tmp_slot(buf_pool); slot->out_buf = NULL; bpage->slot = slot; + buf_tmp_reserve_crypt_buf(slot); byte *dst_frame = slot->crypt_buf; if (!page_compressed) { +not_compressed: /* Encrypt page content */ byte* tmp = fil_space_encrypt(space, bpage->offset, @@ -6369,32 +6437,28 @@ buf_page_encrypt_before_write( src_frame, dst_frame); - bpage->real_size = page_size; + bpage->real_size = UNIV_PAGE_SIZE; slot->out_buf = dst_frame = tmp; ut_d(fil_page_type_validate(tmp)); } else { /* First we compress the page content */ - ulint out_len = 0; - - byte *tmp = fil_compress_page( - space, - (byte *)src_frame, - slot->comp_buf, - page_size, + buf_tmp_reserve_compression_buf(slot); + byte* tmp = slot->comp_buf; + ulint out_len = fil_page_compress( + src_frame, tmp, fsp_flags_get_page_compression_level(space->flags), fil_space_get_block_size(space, bpage->offset), - encrypted, - &out_len); + encrypted); + if (!out_len) { + goto not_compressed; + } bpage->real_size = out_len; -#ifdef UNIV_DEBUG - fil_page_type_validate(tmp); -#endif - - if(encrypted) { + ut_d(fil_page_type_validate(tmp)); + if (encrypted) { /* And then we encrypt the page content */ tmp = fil_space_encrypt(space, bpage->offset, @@ -6406,130 +6470,8 @@ buf_page_encrypt_before_write( slot->out_buf = dst_frame = tmp; } -#ifdef UNIV_DEBUG - fil_page_type_validate(dst_frame); -#endif + ut_d(fil_page_type_validate(dst_frame)); // return dst_frame which will be written return dst_frame; } - -/** Decrypt a page. -@param[in,out] bpage Page control block -@param[in,out] space tablespace -@return whether the operation was successful */ -static -bool -buf_page_decrypt_after_read(buf_page_t* bpage, fil_space_t* space) -{ - ut_ad(space->n_pending_ios > 0); - ut_ad(space->id == bpage->space); - - ulint zip_size = buf_page_get_zip_size(bpage); - ulint size = (zip_size) ? zip_size : UNIV_PAGE_SIZE; - - byte* dst_frame = (zip_size) ? bpage->zip.data : - ((buf_block_t*) bpage)->frame; - unsigned key_version = - mach_read_from_4(dst_frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION); - bool page_compressed = fil_page_is_compressed(dst_frame); - bool page_compressed_encrypted = fil_page_is_compressed_encrypted(dst_frame); - buf_pool_t* buf_pool = buf_pool_from_bpage(bpage); - bool success = true; - - if (bpage->offset == 0) { - /* File header pages are not encrypted/compressed */ - return (true); - } - - /* Page is encrypted if encryption information is found from - tablespace and page contains used key_version. This is true - also for pages first compressed and then encrypted. */ - if (!space->crypt_data) { - key_version = 0; - } - - if (page_compressed) { - /* the page we read is unencrypted */ - /* Find free slot from temporary memory array */ - buf_tmp_buffer_t* slot = buf_pool_reserve_tmp_slot(buf_pool, page_compressed); - -#ifdef UNIV_DEBUG - fil_page_type_validate(dst_frame); -#endif - - /* decompress using comp_buf to dst_frame */ - fil_decompress_page(slot->comp_buf, - dst_frame, - ulong(size), - &bpage->write_size); - - /* Mark this slot as free */ - slot->reserved = false; - key_version = 0; - -#ifdef UNIV_DEBUG - fil_page_type_validate(dst_frame); -#endif - } else { - buf_tmp_buffer_t* slot = NULL; - - if (key_version) { - /* Verify encryption checksum before we even try to - decrypt. */ - if (!fil_space_verify_crypt_checksum(dst_frame, - zip_size, NULL, bpage->offset)) { - - /* Mark page encrypted in case it should - be. */ - if (space->crypt_data->type - != CRYPT_SCHEME_UNENCRYPTED) { - bpage->encrypted = true; - } - - return (false); - } - - /* Find free slot from temporary memory array */ - slot = buf_pool_reserve_tmp_slot(buf_pool, page_compressed); - -#ifdef UNIV_DEBUG - fil_page_type_validate(dst_frame); -#endif - - /* decrypt using crypt_buf to dst_frame */ - if (!fil_space_decrypt(space, slot->crypt_buf, - dst_frame, &bpage->encrypted)) { - success = false; - } - -#ifdef UNIV_DEBUG - fil_page_type_validate(dst_frame); -#endif - } - - if (page_compressed_encrypted && success) { - if (!slot) { - slot = buf_pool_reserve_tmp_slot(buf_pool, page_compressed); - } - -#ifdef UNIV_DEBUG - fil_page_type_validate(dst_frame); -#endif - /* decompress using comp_buf to dst_frame */ - fil_decompress_page(slot->comp_buf, - dst_frame, - ulong(size), - &bpage->write_size); - ut_d(fil_page_type_validate(dst_frame)); - } - - /* Mark this slot as free */ - if (slot) { - slot->reserved = false; - } - } - - ut_ad(space->n_pending_ios > 0); - return (success); -} diff --git a/storage/xtradb/buf/buf0dblwr.cc b/storage/xtradb/buf/buf0dblwr.cc index 4995df98165..f9cf710728f 100644 --- a/storage/xtradb/buf/buf0dblwr.cc +++ b/storage/xtradb/buf/buf0dblwr.cc @@ -510,10 +510,11 @@ buf_dblwr_process() "Restoring possible half-written data pages " "from the doublewrite buffer..."); - unaligned_read_buf = static_cast<byte*>(ut_malloc(2 * UNIV_PAGE_SIZE)); + unaligned_read_buf = static_cast<byte*>(ut_malloc(3 * UNIV_PAGE_SIZE)); read_buf = static_cast<byte*>( ut_align(unaligned_read_buf, UNIV_PAGE_SIZE)); + byte* const buf = read_buf + UNIV_PAGE_SIZE; for (std::list<byte*>::iterator i = recv_dblwr.pages.begin(); i != recv_dblwr.pages.end(); ++i, ++page_no_dblwr ) { @@ -562,24 +563,26 @@ buf_dblwr_process() ignore this page (there should be redo log records to initialize it). */ } else { - if (fil_page_is_compressed_encrypted(read_buf) || - fil_page_is_compressed(read_buf)) { - /* Decompress the page before - validating the checksum. */ - fil_decompress_page( - NULL, read_buf, srv_page_size, - NULL, true); + /* Decompress the page before + validating the checksum. */ + ulint decomp = fil_page_decompress(buf, read_buf); + if (!decomp) { + goto bad; + } + if (!decomp || (decomp != srv_page_size && zip_size)) { + goto bad; } if (fil_space_verify_crypt_checksum( - read_buf, zip_size, NULL, page_no) - || !buf_page_is_corrupted( - true, read_buf, zip_size, space())) { + read_buf, zip_size, NULL, page_no) + || !buf_page_is_corrupted( + true, read_buf, zip_size, space())) { /* The page is good; there is no need to consult the doublewrite buffer. */ continue; } +bad: /* We intentionally skip this message for is_all_zero pages. */ ib_logf(IB_LOG_LEVEL_INFO, @@ -588,18 +591,15 @@ buf_dblwr_process() space_id, page_no); } - /* Next, validate the doublewrite page. */ - if (fil_page_is_compressed_encrypted(page) || - fil_page_is_compressed(page)) { - /* Decompress the page before - validating the checksum. */ - fil_decompress_page( - NULL, page, srv_page_size, NULL, true); + ulint decomp = fil_page_decompress(buf, page); + if (!decomp || (decomp != srv_page_size && zip_size)) { + goto bad_doublewrite; } - - if (!fil_space_verify_crypt_checksum(page, zip_size, NULL, page_no) + if (!fil_space_verify_crypt_checksum(page, zip_size, NULL, + page_no) && buf_page_is_corrupted(true, page, zip_size, space)) { if (!is_all_zero) { +bad_doublewrite: ib_logf(IB_LOG_LEVEL_WARN, "A doublewrite copy of page " ULINTPF ":" ULINTPF " is corrupted.", diff --git a/storage/xtradb/fil/fil0crypt.cc b/storage/xtradb/fil/fil0crypt.cc index 93778bf471f..bf7c24ecaae 100644 --- a/storage/xtradb/fil/fil0crypt.cc +++ b/storage/xtradb/fil/fil0crypt.cc @@ -708,60 +708,39 @@ fil_space_encrypt( #ifdef UNIV_DEBUG if (tmp) { /* Verify that encrypted buffer is not corrupted */ - byte* tmp_mem = (byte *)malloc(UNIV_PAGE_SIZE); dberr_t err = DB_SUCCESS; byte* src = src_frame; bool page_compressed_encrypted = (mach_read_from_2(tmp+FIL_PAGE_TYPE) == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED); - byte* comp_mem = NULL; - byte* uncomp_mem = NULL; + byte uncomp_mem[UNIV_PAGE_SIZE_MAX]; + byte tmp_mem[UNIV_PAGE_SIZE_MAX]; ulint size = (zip_size) ? zip_size : UNIV_PAGE_SIZE; if (page_compressed_encrypted) { - comp_mem = (byte *)malloc(UNIV_PAGE_SIZE); - uncomp_mem = (byte *)malloc(UNIV_PAGE_SIZE); - memcpy(comp_mem, src_frame, UNIV_PAGE_SIZE); - fil_decompress_page(uncomp_mem, comp_mem, - srv_page_size, NULL); - src = uncomp_mem; + memcpy(uncomp_mem, src, srv_page_size); + ulint unzipped1 = fil_page_decompress( + tmp_mem, uncomp_mem); + ut_ad(unzipped1); + if (unzipped1 != srv_page_size) { + src = uncomp_mem; + } } - bool corrupted1 = buf_page_is_corrupted(true, src, zip_size, space); - bool ok = fil_space_decrypt(crypt_data, tmp_mem, size, tmp, &err); + ut_ad(!buf_page_is_corrupted(true, src, zip_size, space)); + ut_ad(fil_space_decrypt(crypt_data, tmp_mem, size, tmp, &err)); + ut_ad(err == DB_SUCCESS); /* Need to decompress the page if it was also compressed */ if (page_compressed_encrypted) { - memcpy(comp_mem, tmp_mem, UNIV_PAGE_SIZE); - fil_decompress_page(tmp_mem, comp_mem, - srv_page_size, NULL); + byte buf[UNIV_PAGE_SIZE_MAX]; + memcpy(buf, tmp_mem, srv_page_size); + ulint unzipped2 = fil_page_decompress(tmp_mem, buf); + ut_ad(unzipped2); } - bool corrupted = buf_page_is_corrupted(true, tmp_mem, zip_size, space); - memcpy(tmp_mem+FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, src+FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, 8); - bool different = memcmp(src, tmp_mem, size); - - if (!ok || corrupted || corrupted1 || err != DB_SUCCESS || different) { - fprintf(stderr, "ok %d corrupted %d corrupted1 %d err %d different %d\n", - ok , corrupted, corrupted1, err, different); - fprintf(stderr, "src_frame\n"); - buf_page_print(src_frame, zip_size); - fprintf(stderr, "encrypted_frame\n"); - buf_page_print(tmp, zip_size); - fprintf(stderr, "decrypted_frame\n"); - buf_page_print(tmp_mem, zip_size); - ut_ad(0); - } - - free(tmp_mem); - - if (comp_mem) { - free(comp_mem); - } - - if (uncomp_mem) { - free(uncomp_mem); - } + memcpy(tmp_mem + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, + src + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, 8); + ut_ad(!memcmp(src, tmp_mem, size)); } - #endif /* UNIV_DEBUG */ return tmp; diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc index c656dc37071..4ad572697cd 100644 --- a/storage/xtradb/fil/fil0fil.cc +++ b/storage/xtradb/fil/fil0fil.cc @@ -351,19 +351,6 @@ fil_space_get_by_id( return(space); } -/****************************************************************//** -Get space id from fil node */ -ulint -fil_node_get_space_id( -/*==================*/ - fil_node_t* node) /*!< in: Compressed node*/ -{ - ut_ad(node); - ut_ad(node->space); - - return (node->space->id); -} - /*******************************************************************//** Returns the table space by a given name, NULL if not found. */ fil_space_t* diff --git a/storage/xtradb/fil/fil0pagecompress.cc b/storage/xtradb/fil/fil0pagecompress.cc index edc932f36f5..25cd8e28a91 100644 --- a/storage/xtradb/fil/fil0pagecompress.cc +++ b/storage/xtradb/fil/fil0pagecompress.cc @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (C) 2013, 2017, MariaDB Corporation. All Rights Reserved. +Copyright (C) 2013, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -80,73 +80,26 @@ static ulint srv_data_read, srv_data_written; #include "snappy-c.h" #endif -/* Used for debugging */ -//#define UNIV_PAGECOMPRESS_DEBUG 1 - -/****************************************************************//** -For page compressed pages compress the page before actual write -operation. -@return compressed page to be written*/ -UNIV_INTERN -byte* -fil_compress_page( -/*==============*/ - fil_space_t* space, /*!< in,out: tablespace (NULL during IMPORT) */ - byte* buf, /*!< in: buffer from which to write; in aio - this must be appropriately aligned */ - byte* out_buf, /*!< out: compressed buffer */ - ulint len, /*!< in: length of input buffer.*/ - ulint level, /* in: compression level */ - ulint block_size, /*!< in: block size */ - bool encrypted, /*!< in: is page also encrypted */ - ulint* out_len) /*!< out: actual length of compressed - page */ +/** Compress a page_compressed page before writing to a data file. +@param[in] buf page to be compressed +@param[out] out_buf compressed page +@param[in] level compression level +@param[in] block_size file system block size +@param[in] encrypted whether the page will be subsequently encrypted +@return actual length of compressed page +@retval 0 if the page was not compressed */ +UNIV_INTERN ulint fil_page_compress(const byte* buf, byte* out_buf, ulint level, + ulint block_size, bool encrypted) { - int err = Z_OK; - int comp_level = level; + int comp_level = int(level); ulint header_len = FIL_PAGE_DATA + FIL_PAGE_COMPRESSED_SIZE; - ulint write_size = 0; -#if HAVE_LZO - lzo_uint write_size_lzo = write_size; -#endif /* Cache to avoid change during function execution */ ulint comp_method = innodb_compression_algorithm; - bool allocated = false; - - /* page_compression does not apply to tables or tablespaces - that use ROW_FORMAT=COMPRESSED */ - ut_ad(!space || !FSP_FLAGS_GET_ZIP_SSIZE(space->flags)); if (encrypted) { header_len += FIL_PAGE_COMPRESSION_METHOD_SIZE; } - if (!out_buf) { - allocated = true; - ulint size = UNIV_PAGE_SIZE; - - /* Both snappy and lzo compression methods require that - output buffer used for compression is bigger than input - buffer. Increase the allocated buffer size accordingly. */ -#if HAVE_SNAPPY - if (comp_method == PAGE_SNAPPY_ALGORITHM) { - size = snappy_max_compressed_length(size); - } -#endif -#if HAVE_LZO - if (comp_method == PAGE_LZO_ALGORITHM) { - size += LZO1X_1_15_MEM_COMPRESS; - } -#endif - - out_buf = static_cast<byte *>(ut_malloc(size)); - } - - ut_ad(buf); - ut_ad(out_buf); - ut_ad(len); - ut_ad(out_len); - /* Let's not compress file space header or extent descriptor */ switch (fil_page_get_type(buf)) { @@ -154,8 +107,7 @@ fil_compress_page( case FIL_PAGE_TYPE_FSP_HDR: case FIL_PAGE_TYPE_XDES: case FIL_PAGE_PAGE_COMPRESSED: - *out_len = len; - goto err_exit; + return 0; } /* If no compression level was provided to this table, use system @@ -164,204 +116,113 @@ fil_compress_page( comp_level = page_zip_level; } - DBUG_PRINT("compress", - ("Preparing for space " ULINTPF " '%s' len " ULINTPF, - space ? space->id : 0, - space ? space->name : "(import)", - len)); - - write_size = UNIV_PAGE_SIZE - header_len; + ulint write_size = srv_page_size - header_len; - switch(comp_method) { + switch (comp_method) { + default: + ut_ad(!"unknown compression method"); + /* fall through */ + case PAGE_UNCOMPRESSED: + return 0; + case PAGE_ZLIB_ALGORITHM: + { + ulong len = uLong(write_size); + if (Z_OK == compress2( + out_buf + header_len, &len, + buf, uLong(srv_page_size), comp_level)) { + write_size = len; + goto success; + } + } + break; #ifdef HAVE_LZ4 case PAGE_LZ4_ALGORITHM: - -#ifdef HAVE_LZ4_COMPRESS_DEFAULT - err = LZ4_compress_default((const char *)buf, - (char *)out_buf+header_len, len, write_size); -#else - err = LZ4_compress_limitedOutput((const char *)buf, - (char *)out_buf+header_len, len, write_size); -#endif /* HAVE_LZ4_COMPRESS_DEFAULT */ - write_size = err; - - if (err == 0) { - /* If error we leave the actual page as it was */ - -#ifndef UNIV_PAGECOMPRESS_DEBUG - if (space && !space->printed_compression_failure) { - space->printed_compression_failure = true; -#endif - ib_logf(IB_LOG_LEVEL_WARN, - "Compression failed for space " ULINTPF - " name %s len " ULINTPF - " err %d write_size " ULINTPF ".", - space->id, space->name, len, - err, write_size); -#ifndef UNIV_PAGECOMPRESS_DEBUG - } -#endif - srv_stats.pages_page_compression_error.inc(); - *out_len = len; - goto err_exit; +# ifdef HAVE_LZ4_COMPRESS_DEFAULT + write_size = LZ4_compress_default( + reinterpret_cast<const char*>(buf), + reinterpret_cast<char*>(out_buf) + header_len, + int(srv_page_size), int(write_size)); +# else + write_size = LZ4_compress_limitedOutput( + reinterpret_cast<const char*>(buf), + reinterpret_cast<char*>(out_buf) + header_len, + int(srv_page_size), int(write_size)); +# endif + + if (write_size) { + goto success; } break; #endif /* HAVE_LZ4 */ #ifdef HAVE_LZO - case PAGE_LZO_ALGORITHM: - err = lzo1x_1_15_compress( - buf, len, out_buf+header_len, &write_size_lzo, out_buf+UNIV_PAGE_SIZE); - - write_size = write_size_lzo; - - if (err != LZO_E_OK || write_size > UNIV_PAGE_SIZE-header_len) { - if (space && !space->printed_compression_failure) { - space->printed_compression_failure = true; - ib_logf(IB_LOG_LEVEL_WARN, - "Compression failed for space " ULINTPF - " name %s len " ULINTPF - " err %d write_size " ULINTPF ".", - space->id, space->name, len, - err, write_size); - } - - srv_stats.pages_page_compression_error.inc(); - *out_len = len; - goto err_exit; + case PAGE_LZO_ALGORITHM: { + lzo_uint len = write_size; + + if (LZO_E_OK == lzo1x_1_15_compress( + buf, srv_page_size, + out_buf + header_len, &len, + out_buf + srv_page_size) + && len <= write_size) { + write_size = len; + goto success; } - break; + } #endif /* HAVE_LZO */ #ifdef HAVE_LZMA case PAGE_LZMA_ALGORITHM: { - size_t out_pos=0; - - err = lzma_easy_buffer_encode( - comp_level, - LZMA_CHECK_NONE, - NULL, /* No custom allocator, use malloc/free */ - reinterpret_cast<uint8_t*>(buf), - len, - reinterpret_cast<uint8_t*>(out_buf + header_len), - &out_pos, - (size_t)write_size); - - if (err != LZMA_OK || out_pos > UNIV_PAGE_SIZE-header_len) { - if (space && !space->printed_compression_failure) { - space->printed_compression_failure = true; - ib_logf(IB_LOG_LEVEL_WARN, - "Compression failed for space " ULINTPF - " name %s len " ULINTPF - " err %d write_size " ULINTPF ".", - space->id, space->name, len, - err, out_pos); - } - - srv_stats.pages_page_compression_error.inc(); - *out_len = len; - goto err_exit; + size_t out_pos = 0; + + if (LZMA_OK == lzma_easy_buffer_encode( + comp_level, LZMA_CHECK_NONE, NULL, + buf, srv_page_size, out_buf + header_len, + &out_pos, write_size) + && out_pos <= write_size) { + write_size = out_pos; + goto success; } - - write_size = out_pos; - break; } #endif /* HAVE_LZMA */ #ifdef HAVE_BZIP2 case PAGE_BZIP2_ALGORITHM: { - - err = BZ2_bzBuffToBuffCompress( - (char *)(out_buf + header_len), - (unsigned int *)&write_size, - (char *)buf, - len, - 1, - 0, - 0); - - if (err != BZ_OK || write_size > UNIV_PAGE_SIZE-header_len) { - if (space && !space->printed_compression_failure) { - space->printed_compression_failure = true; - ib_logf(IB_LOG_LEVEL_WARN, - "Compression failed for space " ULINTPF - " name %s len " ULINTPF - " err %d write_size " ULINTPF ".", - space->id, space->name, len, - err, write_size); - } - - srv_stats.pages_page_compression_error.inc(); - *out_len = len; - goto err_exit; + unsigned len = unsigned(write_size); + if (BZ_OK == BZ2_bzBuffToBuffCompress( + reinterpret_cast<char*>(out_buf + header_len), + &len, + const_cast<char*>( + reinterpret_cast<const char*>(buf)), + unsigned(srv_page_size), 1, 0, 0) + && len <= write_size) { + write_size = len; + goto success; } break; } #endif /* HAVE_BZIP2 */ #ifdef HAVE_SNAPPY - case PAGE_SNAPPY_ALGORITHM: - { - snappy_status cstatus; - write_size = snappy_max_compressed_length(UNIV_PAGE_SIZE); - - cstatus = snappy_compress( - (const char *)buf, - (size_t)len, - (char *)(out_buf+header_len), - (size_t*)&write_size); - - if (cstatus != SNAPPY_OK || write_size > UNIV_PAGE_SIZE-header_len) { - if (space && !space->printed_compression_failure) { - space->printed_compression_failure = true; - ib_logf(IB_LOG_LEVEL_WARN, - "Compression failed for space " ULINTPF - " name %s len " ULINTPF - " err %d write_size " ULINTPF ".", - space->id, space->name, len, - (int)cstatus, write_size); - } - - srv_stats.pages_page_compression_error.inc(); - *out_len = len; - goto err_exit; + case PAGE_SNAPPY_ALGORITHM: { + size_t len = snappy_max_compressed_length(srv_page_size); + + if (SNAPPY_OK == snappy_compress( + reinterpret_cast<const char*>(buf), + srv_page_size, + reinterpret_cast<char*>(out_buf) + header_len, + &len) + && len <= write_size) { + write_size = len; + goto success; } break; } #endif /* HAVE_SNAPPY */ - - case PAGE_ZLIB_ALGORITHM: - err = compress2(out_buf+header_len, (ulong*)&write_size, buf, - uLong(len), comp_level); - - if (err != Z_OK) { - /* If error we leave the actual page as it was */ - - if (space && !space->printed_compression_failure) { - space->printed_compression_failure = true; - ib_logf(IB_LOG_LEVEL_WARN, - "Compression failed for space " ULINTPF - " name %s len " ULINTPF - " rt %d write_size " ULINTPF ".", - space->id, space->name, len, - err, write_size); - } - - srv_stats.pages_page_compression_error.inc(); - *out_len = len; - goto err_exit; - } - break; - - case PAGE_UNCOMPRESSED: - *out_len = len; - return (buf); - break; - default: - ut_error; - break; } + srv_stats.pages_page_compression_error.inc(); + return 0; +success: /* Set up the page header */ memcpy(out_buf, buf, FIL_PAGE_DATA); /* Set up the checksum */ @@ -392,22 +253,11 @@ fil_compress_page( /* Verify that page can be decompressed */ { - byte *comp_page; - byte *uncomp_page; - - comp_page = static_cast<byte *>(ut_malloc(UNIV_PAGE_SIZE)); - uncomp_page = static_cast<byte *>(ut_malloc(UNIV_PAGE_SIZE)); - memcpy(comp_page, out_buf, UNIV_PAGE_SIZE); - - fil_decompress_page(uncomp_page, comp_page, ulong(len), NULL); - - if (buf_page_is_corrupted(false, uncomp_page, 0, space)) { - buf_page_print(uncomp_page, 0); - ut_ad(0); - } - - ut_free(comp_page); - ut_free(uncomp_page); + page_t tmp_buf[UNIV_PAGE_SIZE_MAX]; + page_t page[UNIV_PAGE_SIZE_MAX]; + memcpy(page, out_buf, srv_page_size); + ut_ad(fil_page_decompress(tmp_buf, page)); + ut_ad(!buf_page_is_corrupted(false, page, 0, NULL)); } #endif /* UNIV_DEBUG */ @@ -431,324 +281,144 @@ fil_compress_page( #endif } - DBUG_PRINT("compress", - ("Succeeded for space " ULINTPF - " '%s' len " ULINTPF " out_len " ULINTPF, - space ? space->id : 0, - space ? space->name : "(import)", - len, write_size)); - - srv_stats.page_compression_saved.add((len - write_size)); + srv_stats.page_compression_saved.add(srv_page_size - write_size); srv_stats.pages_page_compressed.inc(); /* If we do not persistently trim rest of page, we need to write it all */ if (!srv_use_trim) { - memset(out_buf+write_size,0,len-write_size); - write_size = len; - } - - *out_len = write_size; - - if (allocated) { - /* TODO: reduce number of memcpy's */ - memcpy(buf, out_buf, len); - } else { - return(out_buf); - } - -err_exit: - if (allocated) { - ut_free(out_buf); + memset(out_buf + write_size, 0, srv_page_size - write_size); } - return (buf); - + return write_size; } -/****************************************************************//** -For page compressed pages decompress the page after actual read -operation. */ -UNIV_INTERN -void -fil_decompress_page( -/*================*/ - byte* page_buf, /*!< in: preallocated buffer or NULL */ - byte* buf, /*!< out: buffer from which to read; in aio - this must be appropriately aligned */ - ulong len, /*!< in: length of output buffer.*/ - ulint* write_size, /*!< in/out: Actual payload size of - the compressed data. */ - bool return_error) /*!< in: true if only an error should - be produced when decompression fails. - By default this parameter is false. */ +/** Decompress a page that may be subject to page_compressed compression. +@param[in,out] tmp_buf temporary buffer (of innodb_page_size) +@param[in,out] buf possibly compressed page buffer +@return size of the compressed data +@retval 0 if decompression failed +@retval srv_page_size if the page was not compressed */ +UNIV_INTERN ulint fil_page_decompress(byte* tmp_buf, byte* buf) { - int err = 0; - ulint actual_size = 0; - ulint compression_alg = 0; - byte *in_buf; - ulint ptype; - ulint header_len = FIL_PAGE_DATA + FIL_PAGE_COMPRESSED_SIZE; - - ut_ad(buf); - ut_ad(len); - - ptype = mach_read_from_2(buf+FIL_PAGE_TYPE); - - if (ptype == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED) { - header_len += FIL_PAGE_COMPRESSION_METHOD_SIZE; - } - - /* Do not try to uncompressed pages that are not compressed */ - if (ptype != FIL_PAGE_PAGE_COMPRESSED && - ptype != FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED && - ptype != FIL_PAGE_TYPE_COMPRESSED) { - return; - } - - // If no buffer was given, we need to allocate temporal buffer - if (page_buf == NULL) { - in_buf = static_cast<byte *>(ut_malloc(UNIV_PAGE_SIZE)); - memset(in_buf, 0, UNIV_PAGE_SIZE); - } else { - in_buf = page_buf; + const unsigned ptype = mach_read_from_2(buf+FIL_PAGE_TYPE); + ulint header_len; + ib_uint64_t compression_alg; + switch (ptype) { + case FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED: + header_len = FIL_PAGE_DATA + FIL_PAGE_COMPRESSED_SIZE + + FIL_PAGE_COMPRESSION_METHOD_SIZE; + compression_alg = mach_read_from_2( + FIL_PAGE_DATA + FIL_PAGE_COMPRESSED_SIZE + buf); + break; + case FIL_PAGE_PAGE_COMPRESSED: + header_len = FIL_PAGE_DATA + FIL_PAGE_COMPRESSED_SIZE; + compression_alg = mach_read_from_8( + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION + buf); + break; + default: + return srv_page_size; } - /* Before actual decompress, make sure that page type is correct */ - - if (mach_read_from_4(buf+FIL_PAGE_SPACE_OR_CHKSUM) != BUF_NO_CHECKSUM_MAGIC || - (ptype != FIL_PAGE_PAGE_COMPRESSED && - ptype != FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED)) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Corruption: We try to uncompress corrupted page" - " CRC " ULINTPF " type " ULINTPF " len " ULINTPF ".", - mach_read_from_4(buf+FIL_PAGE_SPACE_OR_CHKSUM), - mach_read_from_2(buf+FIL_PAGE_TYPE), len); - - fflush(stderr); - if (return_error) { - goto error_return; - } - ut_error; + if (mach_read_from_4(buf + FIL_PAGE_SPACE_OR_CHKSUM) + != BUF_NO_CHECKSUM_MAGIC) { + return 0; } - /* Get compression algorithm */ - if (ptype == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED) { - compression_alg = mach_read_from_2(buf+FIL_PAGE_DATA+FIL_PAGE_COMPRESSED_SIZE); - } else { - compression_alg = mach_read_from_8(buf+FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION); - } + ulint actual_size = mach_read_from_2(buf + FIL_PAGE_DATA); - /* Get the actual size of compressed page */ - actual_size = mach_read_from_2(buf+FIL_PAGE_DATA); /* Check if payload size is corrupted */ - if (actual_size == 0 || actual_size > UNIV_PAGE_SIZE) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Corruption: We try to uncompress corrupted page" - " actual size " ULINTPF " compression %s.", - actual_size, fil_get_compression_alg_name(compression_alg)); - fflush(stderr); - if (return_error) { - goto error_return; - } - ut_error; + if (actual_size == 0 || actual_size > srv_page_size - header_len) { + return 0; } - /* Store actual payload size of the compressed data. This pointer - points to buffer pool. */ - if (write_size) { - *write_size = actual_size; - } - - DBUG_PRINT("compress", - ("Preparing for decompress for len " ULINTPF ".", - actual_size)); - - switch(compression_alg) { + switch (compression_alg) { + default: + ib_logf(IB_LOG_LEVEL_ERROR, + "Unknown compression algorithm " UINT64PF, + compression_alg); + return 0; case PAGE_ZLIB_ALGORITHM: - err= uncompress(in_buf, &len, buf+header_len, (unsigned long)actual_size); - - /* If uncompress fails it means that page is corrupted */ - if (err != Z_OK) { - - ib_logf(IB_LOG_LEVEL_ERROR, - "Corruption: Page is marked as compressed" - " but uncompress failed with error %d " - " size " ULINTPF " len " ULINTPF ".", - err, actual_size, len); - - fflush(stderr); - - if (return_error) { - goto error_return; + { + uLong len = srv_page_size; + if (Z_OK != uncompress(tmp_buf, &len, + buf + header_len, + uLong(actual_size)) + && len != srv_page_size) { + return 0; } - ut_error; } break; - #ifdef HAVE_LZ4 case PAGE_LZ4_ALGORITHM: - err = LZ4_decompress_fast((const char *)buf+header_len, (char *)in_buf, len); - - if (err != (int)actual_size) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Corruption: Page is marked as compressed" - " but uncompress failed with error %d " - " size " ULINTPF " len " ULINTPF ".", - err, actual_size, len); - - fflush(stderr); - - if (return_error) { - goto error_return; - } - ut_error; + if (LZ4_decompress_safe(reinterpret_cast<const char*>(buf) + + header_len, + reinterpret_cast<char*>(tmp_buf), + actual_size, srv_page_size) + == int(srv_page_size)) { + break; } - break; + return 0; #endif /* HAVE_LZ4 */ #ifdef HAVE_LZO case PAGE_LZO_ALGORITHM: { - ulint olen = 0; - lzo_uint olen_lzo = olen; - err = lzo1x_decompress((const unsigned char *)buf+header_len, - actual_size,(unsigned char *)in_buf, &olen_lzo, NULL); - - olen = olen_lzo; - - if (err != LZO_E_OK || (olen == 0 || olen > UNIV_PAGE_SIZE)) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Corruption: Page is marked as compressed" - " but uncompress failed with error %d " - " size " ULINTPF " len " ULINTPF ".", - err, actual_size, len); - - fflush(stderr); - - if (return_error) { - goto error_return; - } - ut_error; + lzo_uint len_lzo = srv_page_size; + if (LZO_E_OK == lzo1x_decompress_safe( + buf + header_len, + actual_size, tmp_buf, &len_lzo, NULL) + && len_lzo == srv_page_size) { + break; } - break; + return 0; } #endif /* HAVE_LZO */ #ifdef HAVE_LZMA case PAGE_LZMA_ALGORITHM: { - - lzma_ret ret; size_t src_pos = 0; size_t dst_pos = 0; uint64_t memlimit = UINT64_MAX; - ret = lzma_stream_buffer_decode( - &memlimit, - 0, - NULL, - buf+header_len, - &src_pos, - actual_size, - in_buf, - &dst_pos, - len); - - - if (ret != LZMA_OK || (dst_pos == 0 || dst_pos > UNIV_PAGE_SIZE)) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Corruption: Page is marked as compressed" - " but decompression read only %ld bytes" - " size " ULINTPF "len " ULINTPF ".", - dst_pos, actual_size, len); - fflush(stderr); - - if (return_error) { - goto error_return; - } - ut_error; + if (LZMA_OK == lzma_stream_buffer_decode( + &memlimit, 0, NULL, buf + header_len, + &src_pos, actual_size, tmp_buf, &dst_pos, + srv_page_size) + && dst_pos == srv_page_size) { + break; } - - break; + return 0; } #endif /* HAVE_LZMA */ #ifdef HAVE_BZIP2 case PAGE_BZIP2_ALGORITHM: { - unsigned int dst_pos = UNIV_PAGE_SIZE; - - err = BZ2_bzBuffToBuffDecompress( - (char *)in_buf, - &dst_pos, - (char *)(buf+header_len), - actual_size, - 1, - 0); - - if (err != BZ_OK || (dst_pos == 0 || dst_pos > UNIV_PAGE_SIZE)) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Corruption: Page is marked as compressed" - " but decompression read only %du bytes" - " size " ULINTPF " len " ULINTPF " err %d.", - dst_pos, actual_size, len, err); - fflush(stderr); - - if (return_error) { - goto error_return; - } - ut_error; + unsigned int dst_pos = srv_page_size; + if (BZ_OK == BZ2_bzBuffToBuffDecompress( + reinterpret_cast<char*>(tmp_buf), + &dst_pos, + reinterpret_cast<char*>(buf) + header_len, + actual_size, 1, 0) + && dst_pos == srv_page_size) { + break; } - break; + return 0; } #endif /* HAVE_BZIP2 */ #ifdef HAVE_SNAPPY - case PAGE_SNAPPY_ALGORITHM: - { - snappy_status cstatus; - ulint olen = UNIV_PAGE_SIZE; - - cstatus = snappy_uncompress( - (const char *)(buf+header_len), - (size_t)actual_size, - (char *)in_buf, - (size_t*)&olen); - - if (cstatus != SNAPPY_OK || olen != UNIV_PAGE_SIZE) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Corruption: Page is marked as compressed" - " but decompression read only " ULINTPF " bytes" - " size " ULINTPF " len " ULINTPF " err %d.", - olen, actual_size, len, (int)cstatus); - fflush(stderr); - - if (return_error) { - goto error_return; - } - ut_error; + case PAGE_SNAPPY_ALGORITHM: { + size_t olen = srv_page_size; + + if (SNAPPY_OK == snappy_uncompress( + reinterpret_cast<const char*>(buf) + header_len, + actual_size, + reinterpret_cast<char*>(tmp_buf), &olen) + && olen == srv_page_size) { + break; } - - break; + return 0; } #endif /* HAVE_SNAPPY */ - default: - ib_logf(IB_LOG_LEVEL_ERROR, - "Corruption: Page is marked as compressed" - " but compression algorithm %s" - " is not known." - ,fil_get_compression_alg_name(compression_alg)); - - fflush(stderr); - if (return_error) { - goto error_return; - } - ut_error; - break; } srv_stats.pages_page_decompressed.inc(); - - /* Copy the uncompressed page to the buffer pool, not - really any other options. */ - memcpy(buf, in_buf, len); - -error_return: - if (page_buf != in_buf) { - ut_free(in_buf); - } + memcpy(buf, tmp_buf, srv_page_size); + return actual_size; } diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index 97de34b9083..30fabbdb089 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -2710,8 +2710,12 @@ innobase_mysql_tmpfile( } } #else +#ifdef F_DUPFD_CLOEXEC + fd2 = fcntl(fd, F_DUPFD_CLOEXEC, 0); +#else fd2 = dup(fd); #endif +#endif if (fd2 < 0) { DBUG_PRINT("error",("Got error %d on dup",fd2)); my_errno=errno; @@ -8984,14 +8988,12 @@ report_error: user_thd); #ifdef WITH_WSREP - if (!error_result && - wsrep_thd_exec_mode(user_thd) == LOCAL_STATE && - wsrep_on(user_thd) && - !wsrep_consistency_check(user_thd) && - !wsrep_thd_ignore_table(user_thd)) - { - if (wsrep_append_keys(user_thd, false, record, NULL)) - { + if (!error_result + && wsrep_on(user_thd) + && wsrep_thd_exec_mode(user_thd) == LOCAL_STATE + && !wsrep_consistency_check(user_thd) + && !wsrep_thd_ignore_table(user_thd)) { + if (wsrep_append_keys(user_thd, false, record, NULL)) { DBUG_PRINT("wsrep", ("row key failed")); error_result = HA_ERR_INTERNAL_ERROR; goto wsrep_error; diff --git a/storage/xtradb/ibuf/ibuf0ibuf.cc b/storage/xtradb/ibuf/ibuf0ibuf.cc index b169916c34e..96a86eea4c1 100644 --- a/storage/xtradb/ibuf/ibuf0ibuf.cc +++ b/storage/xtradb/ibuf/ibuf0ibuf.cc @@ -5218,6 +5218,10 @@ ibuf_check_bitmap_on_import( bitmap_page = ibuf_bitmap_get_map_page( space_id, page_no, zip_size, &mtr); + if (!bitmap_page) { + mutex_exit(&ibuf_mutex); + return DB_CORRUPTION; + } for (i = FSP_IBUF_BITMAP_OFFSET + 1; i < page_size; i++) { const ulint offset = page_no + i; diff --git a/storage/xtradb/include/buf0buf.h b/storage/xtradb/include/buf0buf.h index 7661ba1785d..0944b5d4067 100644 --- a/storage/xtradb/include/buf0buf.h +++ b/storage/xtradb/include/buf0buf.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2013, 2017, MariaDB Corporation. +Copyright (c) 2013, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -38,6 +38,7 @@ Created 11/5/1995 Heikki Tuuri #include "ut0rbt.h" #include "os0proc.h" #include "log0log.h" +#include "my_atomic.h" /** @name Modes for buf_page_get_gen */ /* @{ */ @@ -1528,45 +1529,16 @@ buf_page_encrypt_before_write( buf_page_t* bpage, byte* src_frame); -/********************************************************************** -The hook that is called after page is written to disk. -The function releases any resources needed for encryption that was allocated -in buf_page_encrypt_before_write */ -UNIV_INTERN -ibool -buf_page_encrypt_after_write( -/*=========================*/ - buf_page_t* page); /*!< in/out: buffer page that was flushed */ - -/********************************************************************//** -The hook that is called just before a page is read from disk. -The function allocates memory that is used to temporarily store disk content -before getting decrypted */ -UNIV_INTERN -byte* -buf_page_decrypt_before_read( -/*=========================*/ - buf_page_t* page, /*!< in/out: buffer page read from disk */ - ulint zip_size); /*!< in: compressed page size, or 0 */ - -/********************************************************************//** -The hook that is called just after a page is read from disk. -The function decrypt disk content into buf_page_t and releases the -temporary buffer that was allocated in buf_page_decrypt_before_read */ -UNIV_INTERN -bool -buf_page_decrypt_after_read( -/*========================*/ - buf_page_t* page); /*!< in/out: buffer page read from disk */ - /** @brief The temporary memory structure. NOTE! The definition appears here only for other modules of this directory (buf) to see it. Do not use from outside! */ typedef struct { - bool reserved; /*!< true if this slot is reserved +private: + int32 reserved; /*!< true if this slot is reserved */ +public: byte* crypt_buf; /*!< for encryption the data needs to be copied to a separate buffer before it's encrypted&written. this as a page can be @@ -1577,6 +1549,21 @@ typedef struct { byte* out_buf; /*!< resulting buffer after encryption/compression. This is a pointer and not allocated. */ + + /** Release the slot */ + void release() + { + my_atomic_store32_explicit(&reserved, false, + MY_MEMORY_ORDER_RELAXED); + } + + /** Acquire the slot + @return whether the slot was acquired */ + bool acquire() + { + return !my_atomic_fas32_explicit(&reserved, true, + MY_MEMORY_ORDER_RELAXED); + } } buf_tmp_buffer_t; /** The common buffer control block structure diff --git a/storage/xtradb/include/fil0fil.h b/storage/xtradb/include/fil0fil.h index a0db48f7037..7b8339fec7d 100644 --- a/storage/xtradb/include/fil0fil.h +++ b/storage/xtradb/include/fil0fil.h @@ -170,8 +170,7 @@ extern fil_addr_t fil_addr_null; #define FIL_PAGE_TYPE_BLOB 10 /*!< Uncompressed BLOB page */ #define FIL_PAGE_TYPE_ZBLOB 11 /*!< First compressed BLOB page */ #define FIL_PAGE_TYPE_ZBLOB2 12 /*!< Subsequent compressed BLOB page */ -#define FIL_PAGE_TYPE_COMPRESSED 13 /*!< Compressed page */ -#define FIL_PAGE_TYPE_LAST FIL_PAGE_TYPE_COMPRESSED +#define FIL_PAGE_TYPE_LAST FIL_PAGE_TYPE_ZBLOB2 /*!< Last page type */ /* @} */ @@ -341,9 +340,6 @@ struct fil_space_t { corrupted page. */ bool is_corrupt; /*!< true if tablespace corrupted */ - bool printed_compression_failure; - /*!< true if we have already printed - compression failure */ fil_space_crypt_t* crypt_data; /*!< tablespace crypt data or NULL */ ulint file_block_size; diff --git a/storage/xtradb/include/fil0fil.ic b/storage/xtradb/include/fil0fil.ic index 6c2504c9f8c..3f21c529308 100644 --- a/storage/xtradb/include/fil0fil.ic +++ b/storage/xtradb/include/fil0fil.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (c) 2015, 2017, MariaDB Corporation. +Copyright (c) 2015, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -65,12 +65,9 @@ fil_get_page_type_name( return "ZBLOB"; case FIL_PAGE_TYPE_ZBLOB2: return "ZBLOB2"; - case FIL_PAGE_TYPE_COMPRESSED: - return "ORACLE PAGE COMPRESSED"; } return "PAGE TYPE CORRUPTED"; - } /****************************************************************//** @@ -112,8 +109,7 @@ fil_page_type_validate( page_type == FIL_PAGE_TYPE_XDES || page_type == FIL_PAGE_TYPE_BLOB || page_type == FIL_PAGE_TYPE_ZBLOB || - page_type == FIL_PAGE_TYPE_ZBLOB2 || - page_type == FIL_PAGE_TYPE_COMPRESSED))) { + page_type == FIL_PAGE_TYPE_ZBLOB2))) { ulint key_version = mach_read_from_4(page + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION); bool page_compressed = (page_type == FIL_PAGE_PAGE_COMPRESSED); diff --git a/storage/xtradb/include/fil0pagecompress.h b/storage/xtradb/include/fil0pagecompress.h index 03e16699ce3..934372c55b2 100644 --- a/storage/xtradb/include/fil0pagecompress.h +++ b/storage/xtradb/include/fil0pagecompress.h @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (C) 2013, 2017 MariaDB Corporation. All Rights Reserved. +Copyright (C) 2013, 2018 MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -30,70 +30,26 @@ atomic writes information to table space. Created 11/12/2013 Jan Lindström jan.lindstrom@skysql.com ***********************************************************************/ -/*******************************************************************//** -Find out wheather the page is index page or not -@return true if page type index page, false if not */ -UNIV_INLINE -ibool -fil_page_is_index_page( -/*===================*/ - byte *buf); /*!< in: page */ - -/****************************************************************//** -Get the name of the compression algorithm used for page -compression. -@return compression algorithm name or "UNKNOWN" if not known*/ -UNIV_INLINE -const char* -fil_get_compression_alg_name( -/*=========================*/ - ulint comp_alg); /*!<in: compression algorithm number */ - -/****************************************************************//** -For page compressed pages compress the page before actual write -operation. -@return compressed page to be written*/ -UNIV_INTERN -byte* -fil_compress_page( -/*==============*/ - fil_space_t* space, /*!< in,out: tablespace (NULL during IMPORT) */ - byte* buf, /*!< in: buffer from which to write; in aio - this must be appropriately aligned */ - byte* out_buf, /*!< out: compressed buffer */ - ulint len, /*!< in: length of input buffer.*/ - ulint level, /* in: compression level */ - ulint block_size, /*!< in: block size */ - bool encrypted, /*!< in: is page also encrypted */ - ulint* out_len); /*!< out: actual length of compressed - page */ - -/****************************************************************//** -For page compressed pages decompress the page after actual read -operation. */ -UNIV_INTERN -void -fil_decompress_page( -/*================*/ - byte* page_buf, /*!< in: preallocated buffer or NULL */ - byte* buf, /*!< out: buffer from which to read; in aio - this must be appropriately aligned */ - ulong len, /*!< in: length of output buffer.*/ - ulint* write_size, /*!< in/out: Actual payload size of - the compressed data. */ - bool return_error=false); - /*!< in: true if only an error should - be produced when decompression fails. - By default this parameter is false. */ - -/****************************************************************//** -Get space id from fil node -@return space id*/ -UNIV_INTERN -ulint -fil_node_get_space_id( -/*==================*/ - fil_node_t* node); /*!< in: Node where to get space id*/ +/** Compress a page_compressed page before writing to a data file. +@param[in] buf page to be compressed +@param[out] out_buf compressed page +@param[in] level compression level +@param[in] block_size file system block size +@param[in] encrypted whether the page will be subsequently encrypted +@return actual length of compressed page +@retval 0 if the page was not compressed */ +UNIV_INTERN ulint fil_page_compress(const byte* buf, byte* out_buf, ulint level, + ulint block_size, bool encrypted) + MY_ATTRIBUTE((nonnull, warn_unused_result)); + +/** Decompress a page that may be subject to page_compressed compression. +@param[in,out] tmp_buf temporary buffer (of innodb_page_size) +@param[in,out] buf compressed page buffer +@return size of the compressed data +@retval 0 if decompression failed +@retval srv_page_size if the page was not compressed */ +UNIV_INTERN ulint fil_page_decompress(byte* tmp_buf, byte* buf) + MY_ATTRIBUTE((nonnull, warn_unused_result)); /****************************************************************//** Get block size from fil node @@ -120,13 +76,4 @@ ibool fil_page_is_compressed_encrypted( /*=============================*/ byte* buf); /*!< in: page */ - -/*******************************************************************//** -Find out wheather the page is page compressed with lzo method -@return true if page is page compressed with lzo method*/ -UNIV_INLINE -ibool -fil_page_is_lzo_compressed( -/*=======================*/ - byte* buf); /*!< in: page */ #endif diff --git a/storage/xtradb/include/fsp0fsp.h b/storage/xtradb/include/fsp0fsp.h index 715572199ab..f9aca33a71e 100644 --- a/storage/xtradb/include/fsp0fsp.h +++ b/storage/xtradb/include/fsp0fsp.h @@ -915,7 +915,7 @@ fsp_flags_convert_from_101(ulint flags) /* Bits 13..16 are the wrong position for PAGE_SSIZE, and they should contain one of the values 3,4,6,7, that is, be of the form - 0b0011 or 0b01xx (except 0b0110). + 0b0011 or 0b01xx (except 0b0101). In correct versions, these bits should be 0bc0se where c is the MariaDB COMPRESSED flag and e is the MySQL 5.7 ENCRYPTION flag diff --git a/storage/xtradb/include/fsp0pagecompress.ic b/storage/xtradb/include/fsp0pagecompress.ic index 14f968e319e..99d0dfb3c7c 100644 --- a/storage/xtradb/include/fsp0pagecompress.ic +++ b/storage/xtradb/include/fsp0pagecompress.ic @@ -1,6 +1,6 @@ /***************************************************************************** -Copyright (C) 2013, 2017, MariaDB Corporation. All Rights Reserved. +Copyright (C) 2013, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -50,18 +50,6 @@ fsp_flags_get_atomic_writes( } /*******************************************************************//** -Find out wheather the page is index page or not -@return true if page type index page, false if not */ -UNIV_INLINE -ibool -fil_page_is_index_page( -/*===================*/ - byte* buf) /*!< in: page */ -{ - return(mach_read_from_2(buf+FIL_PAGE_TYPE) == FIL_PAGE_INDEX); -} - -/*******************************************************************//** Find out wheather the page is page compressed @return true if page is page compressed, false if not */ UNIV_INLINE @@ -84,59 +72,3 @@ fil_page_is_compressed_encrypted( { return(mach_read_from_2(buf+FIL_PAGE_TYPE) == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED); } - -/****************************************************************//** -Get the name of the compression algorithm used for page -compression. -@return compression algorithm name or "UNKNOWN" if not known*/ -UNIV_INLINE -const char* -fil_get_compression_alg_name( -/*=========================*/ - ulint comp_alg) /*!<in: compression algorithm number */ -{ - switch(comp_alg) { - case PAGE_UNCOMPRESSED: - return ("uncompressed"); - break; - case PAGE_ZLIB_ALGORITHM: - return ("ZLIB"); - break; - case PAGE_LZ4_ALGORITHM: - return ("LZ4"); - break; - case PAGE_LZO_ALGORITHM: - return ("LZO"); - break; - case PAGE_LZMA_ALGORITHM: - return ("LZMA"); - break; - case PAGE_BZIP2_ALGORITHM: - return ("BZIP2"); - break; - case PAGE_SNAPPY_ALGORITHM: - return ("SNAPPY"); - break; - /* No default to get compiler warning */ - } - - return ("NULL"); -} - -#ifndef UNIV_INNOCHECKSUM -/*******************************************************************//** -Find out wheather the page is page compressed with lzo method -@return true if page is page compressed with lzo method, false if not */ -UNIV_INLINE -ibool -fil_page_is_lzo_compressed( -/*=======================*/ - byte* buf) /*!< in: page */ -{ - return((mach_read_from_2(buf+FIL_PAGE_TYPE) == FIL_PAGE_PAGE_COMPRESSED && - mach_read_from_8(buf+FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION) == PAGE_LZO_ALGORITHM) || - (mach_read_from_2(buf+FIL_PAGE_TYPE) == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED && - mach_read_from_2(buf+FIL_PAGE_DATA+FIL_PAGE_COMPRESSED_SIZE) == PAGE_LZO_ALGORITHM)); -} - -#endif /* UNIV_INNOCHECKSUM */ diff --git a/storage/xtradb/os/os0file.cc b/storage/xtradb/os/os0file.cc index da8548f0fa8..2e172a7524c 100644 --- a/storage/xtradb/os/os0file.cc +++ b/storage/xtradb/os/os0file.cc @@ -1509,7 +1509,7 @@ os_file_create_simple_func( } do { - file = ::open(name, create_flag, os_innodb_umask); + file = ::open(name, create_flag | O_CLOEXEC, os_innodb_umask); if (file == -1) { *success = FALSE; @@ -1731,7 +1731,7 @@ os_file_create_simple_no_error_handling_func( return(file); } - file = open(name, create_flag, os_innodb_umask); + file = ::open(name, create_flag | O_CLOEXEC , os_innodb_umask); *success = file != -1; @@ -2146,7 +2146,7 @@ os_file_create_func( #endif /* O_SYNC */ do { - file = open(name, create_flag, os_innodb_umask); + file = ::open(name, create_flag | O_CLOEXEC, os_innodb_umask); if (file == -1) { const char* operation; @@ -2359,6 +2359,24 @@ loop: #endif } +/** Handle RENAME error. +@param name old name of the file +@param new_name new name of the file */ +static void os_file_handle_rename_error(const char* name, const char* new_name) +{ + if (os_file_get_last_error(true) != OS_FILE_DISK_FULL) { + ib_logf(IB_LOG_LEVEL_ERROR, "Cannot rename file '%s' to '%s'", + name, new_name); + } else if (!os_has_said_disk_full) { + os_has_said_disk_full = true; + /* Disk full error is reported irrespective of the + on_error_silent setting. */ + ib_logf(IB_LOG_LEVEL_ERROR, + "Full disk prevents renaming file '%s' to '%s'", + name, new_name); + } +} + /***********************************************************************//** NOTE! Use the corresponding macro os_file_rename(), not directly this function! Renames a file (can also move it to another directory). It is safest that the @@ -2394,7 +2412,7 @@ os_file_rename_func( return(TRUE); } - os_file_handle_error_no_exit(oldpath, "rename", FALSE, __FILE__, __LINE__); + os_file_handle_rename_error(oldpath, newpath); return(FALSE); #else @@ -2404,7 +2422,7 @@ os_file_rename_func( ret = rename(oldpath, newpath); if (ret != 0) { - os_file_handle_error_no_exit(oldpath, "rename", FALSE, __FILE__, __LINE__); + os_file_handle_rename_error(oldpath, newpath); return(FALSE); } @@ -3718,7 +3736,7 @@ os_file_get_status( access = !srv_read_only_mode ? O_RDWR : O_RDONLY; - fh = ::open(path, access, os_innodb_umask); + fh = ::open(path, access | O_CLOEXEC, os_innodb_umask); if (fh == -1) { stat_info->rw_perm = false; @@ -4147,7 +4165,7 @@ os_aio_native_aio_supported(void) strcpy(name + dirnamelen, "ib_logfile0"); - fd = ::open(name, O_RDONLY); + fd = ::open(name, O_RDONLY | O_CLOEXEC); if (fd == -1) { diff --git a/storage/xtradb/row/row0import.cc b/storage/xtradb/row/row0import.cc index 2315384649d..d4d2c4028fd 100644 --- a/storage/xtradb/row/row0import.cc +++ b/storage/xtradb/row/row0import.cc @@ -42,6 +42,12 @@ Created 2012-02-08 by Sunny Bains. #include "srv0start.h" #include "row0quiesce.h" #include "fil0pagecompress.h" +#ifdef HAVE_LZO +#include "lzo/lzo1x.h" +#endif +#ifdef HAVE_SNAPPY +#include "snappy-c.h" +#endif #include <vector> @@ -427,12 +433,9 @@ public: updated then its state must be set to BUF_PAGE_NOT_USED. For compressed tables the page descriptor memory will be at offset: block->frame + UNIV_PAGE_SIZE; - @param offset - physical offset within the file - @param block - block read from file, note it is not from the buffer pool + @param block block read from file, note it is not from the buffer pool @retval DB_SUCCESS or error code. */ - virtual dberr_t operator()( - os_offset_t offset, - buf_block_t* block) UNIV_NOTHROW = 0; + virtual dberr_t operator()(buf_block_t* block) UNIV_NOTHROW = 0; /** @return the space id of the tablespace */ @@ -689,12 +692,9 @@ struct FetchIndexRootPages : public AbstractCallback { /** Called for each block as it is read from the file. - @param offset - physical offset in the file - @param block - block to convert, it is not from the buffer pool. + @param block block to convert, it is not from the buffer pool. @retval DB_SUCCESS or error code. */ - virtual dberr_t operator() ( - os_offset_t offset, - buf_block_t* block) UNIV_NOTHROW; + dberr_t operator()(buf_block_t* block) UNIV_NOTHROW; /** Update the import configuration that will be used to import the tablespace. */ @@ -712,13 +712,9 @@ Called for each block as it is read from the file. Check index pages to determine the exact row format. We can't get that from the tablespace header flags alone. -@param offset - physical offset in the file -@param block - block to convert, it is not from the buffer pool. +@param block block to convert, it is not from the buffer pool. @retval DB_SUCCESS or error code. */ -dberr_t -FetchIndexRootPages::operator() ( - os_offset_t offset, - buf_block_t* block) UNIV_NOTHROW +dberr_t FetchIndexRootPages::operator()(buf_block_t* block) UNIV_NOTHROW { if (is_interrupted()) return DB_INTERRUPTED; @@ -726,15 +722,7 @@ FetchIndexRootPages::operator() ( ulint page_type = fil_page_get_type(page); - if (block->page.offset * m_page_size != offset) { - ib_logf(IB_LOG_LEVEL_ERROR, - "Page offset doesn't match file offset: " - "page offset: %u, file offset: " ULINTPF, - block->page.offset, - (ulint) (offset / m_page_size)); - - return DB_CORRUPTION; - } else if (page_type == FIL_PAGE_TYPE_XDES) { + if (page_type == FIL_PAGE_TYPE_XDES) { return set_current_xdes(block->page.offset, page); } else if (page_type == FIL_PAGE_INDEX && !is_free(block->page.offset) @@ -877,12 +865,9 @@ public: /** Called for each block as it is read from the file. - @param offset - physical offset in the file - @param block - block to convert, it is not from the buffer pool. + @param block block to convert, it is not from the buffer pool. @retval DB_SUCCESS or error code. */ - virtual dberr_t operator() ( - os_offset_t offset, - buf_block_t* block) UNIV_NOTHROW; + dberr_t operator()(buf_block_t* block) UNIV_NOTHROW; private: /** Update the page, set the space id, max trx id and index id. @@ -2038,10 +2023,9 @@ PageConverter::update_page( /** Called for every page in the tablespace. If the page was not updated then its state must be set to BUF_PAGE_NOT_USED. -@param block - block read from file, note it is not from the buffer pool +@param block block read from file, note it is not from the buffer pool @retval DB_SUCCESS or error code. */ -dberr_t -PageConverter::operator() (os_offset_t, buf_block_t* block) UNIV_NOTHROW +dberr_t PageConverter::operator()(buf_block_t* block) UNIV_NOTHROW { /* If we already had an old page with matching number in the buffer pool, evict it now, because @@ -3386,15 +3370,30 @@ fil_iterate( os_offset_t offset; ulint n_bytes = iter.n_io_buffers * iter.page_size; + const ulint buf_size = srv_page_size +#ifdef HAVE_LZO + + LZO1X_1_15_MEM_COMPRESS +#elif defined HAVE_SNAPPY + + snappy_max_compressed_length(srv_page_size) +#endif + ; + byte* page_compress_buf = static_cast<byte*>( + ut_malloc_low(buf_size, false)); ut_ad(!srv_read_only_mode); + if (!page_compress_buf) { + return DB_OUT_OF_MEMORY; + } + /* TODO: For ROW_FORMAT=COMPRESSED tables we do a lot of useless copying for non-index pages. Unfortunately, it is required by buf_zip_decompress() */ + dberr_t err = DB_SUCCESS; for (offset = iter.start; offset < iter.end; offset += n_bytes) { if (callback.is_interrupted()) { - return DB_INTERRUPTED; + err = DB_INTERRUPTED; + goto func_exit; } byte* io_buffer = iter.io_buffer; @@ -3425,30 +3424,20 @@ fil_iterate( if (!os_file_read_no_error_handling(iter.file, readptr, offset, n_bytes)) { ib_logf(IB_LOG_LEVEL_ERROR, "os_file_read() failed"); - return DB_IO_ERROR; + err = DB_IO_ERROR; + goto func_exit; } bool updated = false; - os_offset_t page_off = offset; - ulint n_pages_read = (ulint) n_bytes / iter.page_size; const ulint size = iter.page_size; - block->page.offset = page_off / size; + ulint n_pages_read = ulint(n_bytes) / size; + block->page.offset = offset / size; for (ulint i = 0; i < n_pages_read; - ++i, page_off += size, block->frame += size, - block->page.offset++) { - bool decrypted = false; - dberr_t err = DB_SUCCESS; + ++i, block->frame += size, block->page.offset++) { byte* src = readptr + (i * size); - byte* dst = io_buffer + (i * size); - bool frame_changed = false; - ulint page_type = mach_read_from_2(src+FIL_PAGE_TYPE); - const bool page_compressed - = page_type - == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED - || page_type == FIL_PAGE_PAGE_COMPRESSED; const ulint page_no = page_get_page_no(src); - if (!page_no && page_off) { + if (!page_no && block->page.offset) { const ulint* b = reinterpret_cast<const ulint*> (src); const ulint* const e = b + size / sizeof *b; @@ -3463,55 +3452,85 @@ fil_iterate( continue; } - if (page_no != page_off / size) { + if (page_no != block->page.offset) { +page_corrupted: + ib_logf(IB_LOG_LEVEL_WARN, + "%s: Page %lu at offset " + UINT64PF " looks corrupted.", + callback.filename(), + ulong(offset / size), offset); + err = DB_CORRUPTION; + goto func_exit; + } + + bool decrypted = false; + byte* dst = io_buffer + (i * size); + bool frame_changed = false; + ulint page_type = mach_read_from_2(src+FIL_PAGE_TYPE); + const bool page_compressed + = page_type + == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED + || page_type == FIL_PAGE_PAGE_COMPRESSED; + + if (page_compressed && block->page.zip.data) { goto page_corrupted; } - if (encrypted) { + if (!encrypted) { + } else if (!mach_read_from_4( + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION + + src)) { +not_encrypted: + if (!page_compressed + && !block->page.zip.data) { + block->frame = src; + frame_changed = true; + } else { + ut_ad(dst != src); + memcpy(dst, src, size); + } + } else { + if (!fil_space_verify_crypt_checksum( + src, callback.get_zip_size(), + NULL, block->page.offset)) { + goto page_corrupted; + } + decrypted = fil_space_decrypt( iter.crypt_data, dst, iter.page_size, src, &err); if (err != DB_SUCCESS) { - return err; + goto func_exit; } - if (decrypted) { - updated = true; - } else { - if (!page_compressed - && !block->page.zip.data) { - block->frame = src; - frame_changed = true; - } else { - ut_ad(dst != src); - memcpy(dst, src, size); - } + if (!decrypted) { + goto not_encrypted; } + + updated = true; } /* If the original page is page_compressed, we need to decompress it before adjusting further. */ if (page_compressed) { - fil_decompress_page(NULL, dst, ulong(size), - NULL); + ulint compress_length = fil_page_decompress( + page_compress_buf, dst); + ut_ad(compress_length != srv_page_size); + if (compress_length == 0) { + goto page_corrupted; + } updated = true; } else if (buf_page_is_corrupted( false, encrypted && !frame_changed ? dst : src, callback.get_zip_size(), NULL)) { -page_corrupted: - ib_logf(IB_LOG_LEVEL_WARN, - "%s: Page %lu at offset " - UINT64PF " looks corrupted.", - callback.filename(), - ulong(offset / size), offset); - return DB_CORRUPTION; + goto page_corrupted; } - if ((err = callback(page_off, block)) != DB_SUCCESS) { - return err; + if ((err = callback(block)) != DB_SUCCESS) { + goto func_exit; } else if (!updated) { updated = buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE; @@ -3561,19 +3580,17 @@ page_corrupted: src = io_buffer + (i * size); if (page_compressed) { - ulint len = 0; - - fil_compress_page( - NULL, - src, - NULL, - size, - 0,/* FIXME: compression level */ - 512,/* FIXME: use proper block size */ - encrypted, - &len); - updated = true; + if (fil_page_compress( + src, + page_compress_buf, + 0,/* FIXME: compression level */ + 512,/* FIXME: proper block size */ + encrypted)) { + /* FIXME: remove memcpy() */ + memcpy(src, page_compress_buf, + srv_page_size); + } } /* If tablespace is encrypted, encrypt page before we @@ -3583,19 +3600,15 @@ page_corrupted: buffer pool is not being used at all! */ if (decrypted && encrypted) { byte *dest = writeptr + (i * size); - ulint space = mach_read_from_4( - src + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID); - ulint offset = mach_read_from_4(src + FIL_PAGE_OFFSET); - ib_uint64_t lsn = mach_read_from_8(src + FIL_PAGE_LSN); byte* tmp = fil_encrypt_buf( - iter.crypt_data, - space, - offset, - lsn, - src, - iter.page_size == UNIV_PAGE_SIZE ? 0 : iter.page_size, - dest); + iter.crypt_data, + callback.get_space_id(), + block->page.offset, + mach_read_from_8(src + FIL_PAGE_LSN), + src, + callback.get_zip_size(), + dest); if (tmp == src) { /* TODO: remove unnecessary memcpy's */ @@ -3614,11 +3627,14 @@ page_corrupted: offset, (ulint) n_bytes)) { ib_logf(IB_LOG_LEVEL_ERROR, "os_file_write() failed"); - return DB_IO_ERROR; + err = DB_IO_ERROR; + goto func_exit; } } - return DB_SUCCESS; +func_exit: + ut_free(page_compress_buf); + return err; } /********************************************************************//** diff --git a/storage/xtradb/row/row0upd.cc b/storage/xtradb/row/row0upd.cc index 1b9f87e233f..93ccc07c9af 100644 --- a/storage/xtradb/row/row0upd.cc +++ b/storage/xtradb/row/row0upd.cc @@ -1806,6 +1806,23 @@ row_upd_store_row( } } +#ifdef WITH_WSREP +/** Determine if a FOREIGN KEY constraint needs to be processed. +@param[in] node query node +@param[in] trx transaction +@return whether the node cannot be ignored */ + +inline bool wsrep_must_process_fk(const upd_node_t* node, const trx_t* trx) +{ + if (!wsrep_on_trx(trx)) { + return false; + } + return que_node_get_type(node->common.parent) != QUE_NODE_UPDATE + || static_cast<upd_node_t*>(node->common.parent)->cascade_node + != node; +} +#endif /* WITH_WSREP */ + /***********************************************************//** Updates a secondary index entry of a row. @return DB_SUCCESS if operation successfully completed, else error @@ -1836,7 +1853,7 @@ row_upd_sec_index_entry( referenced = row_upd_index_is_referenced(index, trx); #ifdef WITH_WSREP - ibool foreign = wsrep_row_upd_index_is_foreign(index, trx); + bool foreign = wsrep_row_upd_index_is_foreign(index, trx); #endif /* WITH_WSREP */ heap = mem_heap_create(1024); @@ -1968,61 +1985,61 @@ row_upd_sec_index_entry( row_ins_sec_index_entry() below */ if (!rec_get_deleted_flag( rec, dict_table_is_comp(index->table))) { -#ifdef WITH_WSREP - que_node_t *parent = que_node_get_parent(node); -#endif /* WITH_WSREP */ err = btr_cur_del_mark_set_sec_rec( 0, btr_cur, TRUE, thr, &mtr); - if (err == DB_SUCCESS && referenced) { - - ulint* offsets; - - offsets = rec_get_offsets( - rec, index, NULL, ULINT_UNDEFINED, - &heap); - - /* NOTE that the following call loses - the position of pcur ! */ - err = row_upd_check_references_constraints( - node, &pcur, index->table, - index, offsets, thr, &mtr); + if (err != DB_SUCCESS) { + break; } + #ifdef WITH_WSREP - if (err == DB_SUCCESS && !referenced && - wsrep_on_trx(trx) && - !wsrep_thd_is_BF(trx->mysql_thd, FALSE) && - !(parent && que_node_get_type(parent) == - QUE_NODE_UPDATE && - ((upd_node_t*)parent)->cascade_node == node) && - foreign - ) { - ulint* offsets = - rec_get_offsets( + if (!referenced && foreign + && wsrep_must_process_fk(node, trx) + && !wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { + ulint* offsets = rec_get_offsets( rec, index, NULL, ULINT_UNDEFINED, &heap); + err = wsrep_row_upd_check_foreign_constraints( node, &pcur, index->table, index, offsets, thr, &mtr); + switch (err) { case DB_SUCCESS: case DB_NO_REFERENCED_ROW: err = DB_SUCCESS; break; case DB_DEADLOCK: - if (wsrep_debug) fprintf (stderr, - "WSREP: sec index FK check fail for deadlock"); + if (wsrep_debug) { + ib_logf(IB_LOG_LEVEL_WARN, + "WSREP: sec index FK check fail for deadlock: " + " index %s table %s", index->name, index->table->name); + } break; default: - fprintf (stderr, - "WSREP: referenced FK check fail: %d", - (int)err); + ib_logf(IB_LOG_LEVEL_ERROR, + "WSREP: referenced FK check fail: %s index %s table %s", + ut_strerr(err), index->name, index->table->name); break; } } #endif /* WITH_WSREP */ } - break; + + if (referenced) { + + ulint* offsets; + + offsets = rec_get_offsets( + rec, index, NULL, ULINT_UNDEFINED, + &heap); + + /* NOTE that the following call loses + the position of pcur ! */ + err = row_upd_check_references_constraints( + node, &pcur, index->table, + index, offsets, thr, &mtr); + } } btr_pcur_close(&pcur); @@ -2191,9 +2208,6 @@ row_upd_clust_rec_by_insert( rec_t* rec; ulint* offsets = NULL; -#ifdef WITH_WSREP - que_node_t *parent = que_node_get_parent(node); -#endif /* WITH_WSREP */ ut_ad(node); ut_ad(dict_index_is_clust(index)); @@ -2278,35 +2292,31 @@ err_exit: if (err != DB_SUCCESS) { goto err_exit; } - } #ifdef WITH_WSREP - if (!referenced && wsrep_on_trx(trx) && - !(parent && que_node_get_type(parent) == QUE_NODE_UPDATE && - ((upd_node_t*)parent)->cascade_node == node) && - foreign - ) { + } else if ((foreign && wsrep_must_process_fk(node, trx))) { err = wsrep_row_upd_check_foreign_constraints( node, pcur, table, index, offsets, thr, mtr); + switch (err) { case DB_SUCCESS: case DB_NO_REFERENCED_ROW: err = DB_SUCCESS; break; case DB_DEADLOCK: - if (wsrep_debug) fprintf (stderr, - "WSREP: insert FK check fail for deadlock"); + if (wsrep_debug) { + ib_logf(IB_LOG_LEVEL_WARN, + "WSREP: sec index FK check fail for deadlock: " + " index %s table %s", index->name, index->table->name); + } break; default: - fprintf (stderr, - "WSREP: referenced FK check fail: %d", - (int)err); + ib_logf(IB_LOG_LEVEL_ERROR, + "WSREP: referenced FK check fail: %s index %s table %s", + ut_strerr(err), index->name, index->table->name); break; } - if (err != DB_SUCCESS) { - goto err_exit; - } - } #endif /* WITH_WSREP */ + } } mtr_commit(mtr); @@ -2512,7 +2522,7 @@ row_upd_del_mark_clust_rec( dberr_t err; #ifdef WITH_WSREP rec_t* rec; - que_node_t *parent = que_node_get_parent(node); + trx_t* trx = thr_get_trx(thr) ; #endif /* WITH_WSREP */ ut_ad(node); @@ -2541,38 +2551,37 @@ row_upd_del_mark_clust_rec( btr_cur_get_block(btr_cur), btr_cur_get_rec(btr_cur), #endif /* WITH_WSREP */ index, offsets, thr, mtr); - if (err == DB_SUCCESS && referenced) { + if (err != DB_SUCCESS) { + } else if (referenced) { /* NOTE that the following call loses the position of pcur ! */ err = row_upd_check_references_constraints( node, pcur, index->table, index, offsets, thr, mtr); - } #ifdef WITH_WSREP - trx_t* trx = thr_get_trx(thr) ; - if (err == DB_SUCCESS && !referenced && wsrep_on_trx(trx) && - !(parent && que_node_get_type(parent) == QUE_NODE_UPDATE && - ((upd_node_t*)parent)->cascade_node == node) && - foreign - ) { + } else if (foreign && wsrep_must_process_fk(node, trx)) { err = wsrep_row_upd_check_foreign_constraints( node, pcur, index->table, index, offsets, thr, mtr); + switch (err) { case DB_SUCCESS: case DB_NO_REFERENCED_ROW: err = DB_SUCCESS; break; case DB_DEADLOCK: - if (wsrep_debug) fprintf (stderr, - "WSREP: clust rec FK check fail for deadlock"); + if (wsrep_debug) { + ib_logf(IB_LOG_LEVEL_WARN, + "WSREP: sec index FK check fail for deadlock: " + " index %s table %s", index->name, index->table->name); + } break; default: - fprintf (stderr, - "WSREP: clust rec referenced FK check fail: %d", - (int)err); + ib_logf(IB_LOG_LEVEL_ERROR, + "WSREP: referenced FK check fail: %s index %s table %s", + ut_strerr(err), index->name, index->table->name); break; } - } #endif /* WITH_WSREP */ + } mtr_commit(mtr); diff --git a/support-files/mysql.server.sh b/support-files/mysql.server.sh index 342929d2ecd..0a81b71171a 100644 --- a/support-files/mysql.server.sh +++ b/support-files/mysql.server.sh @@ -25,7 +25,6 @@ # Description: MariaDB is a very fast and reliable SQL database engine. ### END INIT INFO -# If you install MariaDB on some other places than @prefix@, then you # have to do one of the following things for this script to work: # # - Run this script from within the MariaDB installation directory diff --git a/support-files/policy/selinux/mariadb-server.fc b/support-files/policy/selinux/mariadb-server.fc index 1a69ecc2c40..409f72923aa 100644 --- a/support-files/policy/selinux/mariadb-server.fc +++ b/support-files/policy/selinux/mariadb-server.fc @@ -1,4 +1,4 @@ -# This SELinux file contexts (.fc) file has been copied under BSD License from +# This SELinux file contexts (.fc) file has been copied under New BSD License from # Percona XtraDB Cluster. /etc/init\.d/rc\.d/mysql -- gen_context(system_u:object_r:mysqld_initrc_exec_t,s0) diff --git a/support-files/policy/selinux/mariadb-server.te b/support-files/policy/selinux/mariadb-server.te index 34d79326b10..45ef40f4153 100644 --- a/support-files/policy/selinux/mariadb-server.te +++ b/support-files/policy/selinux/mariadb-server.te @@ -1,4 +1,4 @@ -# This SELinux type enforcement (.te) file has been copied under BSD License +# This SELinux type enforcement (.te) file has been copied under New BSD License # from Percona XtraDB Cluster, along with some additions. module mariadb-server 1.0; diff --git a/support-files/wsrep.cnf.sh b/support-files/wsrep.cnf.sh index a5390855ca1..51ce3dca2dd 100644 --- a/support-files/wsrep.cnf.sh +++ b/support-files/wsrep.cnf.sh @@ -30,6 +30,9 @@ bind-address=0.0.0.0 ## WSREP options ## +# Enable wsrep +wsrep_on=1 + # Full path to wsrep provider library or 'none' wsrep_provider=none |