From 4652260d656ea871fe1ff31176b6d85eeffde932 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Wed, 25 Jan 2023 11:46:28 -0800 Subject: MDEV-28616 Crash when using derived table over union with order by clause This bug manifested itself when the server processed a query containing a derived table over union whose ORDER BY clause included a subquery with unresolvable column reference. For such a query the server crashed when trying to resolve column references in the ORDER BY clause used by union. For any union with ORDER BY clause an extra SELECT_LEX structure is created and it is attached to SELECT_LEX_UNIT structure of the union via the field fake_select_lex. The outer context for fake_select_lex must be the same as for other selects of the union. If the union is used in the FROM list of a derived table then the outer context for fake_select_lex must be set to NULL in line with other selects of the union. It was not done and it caused a crash when searching for possible resolution of an unresolvable column reference occurred in a subquery used in the ORDER BY clause. Approved by Oleksandr Byelkin --- mysql-test/main/derived.result | 23 +++++++++++++++++++++++ mysql-test/main/derived.test | 30 ++++++++++++++++++++++++++++++ sql/sql_derived.cc | 3 +++ 3 files changed, 56 insertions(+) diff --git a/mysql-test/main/derived.result b/mysql-test/main/derived.result index 2761fdfa287..0cb029fa7a4 100644 --- a/mysql-test/main/derived.result +++ b/mysql-test/main/derived.result @@ -1327,5 +1327,28 @@ a b DROP VIEW v1; DROP TABLE t1; # +# MDEV-28616: derived table over union with order by clause that +# contains subquery with unresolvable column reference +# +SELECT 1 FROM ( +SELECT 1 UNION SELECT 2 ORDER BY (SELECT 1 FROM DUAL WHERE xxx = 0) +) dt; +ERROR 42S22: Unknown column 'xxx' in 'where clause' +create table t1 (a int, b int); +insert into t1 values (3,8), (7,2), (1,4), (5,9); +create table t2 (a int, b int); +insert into t2 values (9,1), (7,3), (2,6); +create table t3 (c int, d int); +insert into t3 values (7,8), (1,2), (3,8); +select * from +( +select a,b from t1 where t1.a > 3 +union +select a,b from t2 where t2.b < 6 +order by (a - b / (select a + max(c) from t3 where d = x)) +) dt; +ERROR 42S22: Unknown column 'x' in 'where clause' +drop table t1,t2,t3; +# # End of 10.3 tests # diff --git a/mysql-test/main/derived.test b/mysql-test/main/derived.test index 6a831000e57..dca7243febb 100644 --- a/mysql-test/main/derived.test +++ b/mysql-test/main/derived.test @@ -1137,6 +1137,36 @@ SELECT * FROM v1 WHERE b > 0; DROP VIEW v1; DROP TABLE t1; +--echo # +--echo # MDEV-28616: derived table over union with order by clause that +--echo # contains subquery with unresolvable column reference +--echo # + +--error ER_BAD_FIELD_ERROR +SELECT 1 FROM ( + SELECT 1 UNION SELECT 2 ORDER BY (SELECT 1 FROM DUAL WHERE xxx = 0) +) dt; + +create table t1 (a int, b int); +insert into t1 values (3,8), (7,2), (1,4), (5,9); + +create table t2 (a int, b int); +insert into t2 values (9,1), (7,3), (2,6); + +create table t3 (c int, d int); +insert into t3 values (7,8), (1,2), (3,8); + +--error ER_BAD_FIELD_ERROR +select * from +( + select a,b from t1 where t1.a > 3 + union + select a,b from t2 where t2.b < 6 + order by (a - b / (select a + max(c) from t3 where d = x)) +) dt; + +drop table t1,t2,t3; + --echo # --echo # End of 10.3 tests --echo # diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 93dc62828ac..8177ee27943 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -771,6 +771,9 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived) cursor->outer_join|= JOIN_TYPE_OUTER; } } + // Prevent it for possible ORDER BY clause + if (unit->fake_select_lex) + unit->fake_select_lex->context.outer_context= 0; /* Above cascade call of prepare is important for PS protocol, but after it -- cgit v1.2.1 From b1043ea0ed01a7caa398d4a066b415d6eeebb08e Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Thu, 26 Jan 2023 10:57:01 +0400 Subject: Revert "MDEV-30151 parse error 1=2 not between/in" This reverts commit eba099184e1f6704894694ea41f97f216eae5f21. A different patch with less shift-reduce conflicts is coming. --- mysql-test/main/parser.result | 11 ----------- mysql-test/main/parser.test | 8 -------- sql/sql_yacc.yy | 6 +++--- sql/sql_yacc_ora.yy | 6 +++--- 4 files changed, 6 insertions(+), 25 deletions(-) diff --git a/mysql-test/main/parser.result b/mysql-test/main/parser.result index f44478727ae..0bb4e82c8b8 100644 --- a/mysql-test/main/parser.result +++ b/mysql-test/main/parser.result @@ -1866,15 +1866,4 @@ ERROR 42000: You have an error in your SQL syntax; check the manual that corresp EXECUTE IMMEDIATE 'CREATE PROCEDURE p() UPDATE t SET c=\'\'"abc'; ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '"abc' at line 1 SET @@sql_mode=@save_sql_mode; -# -# MDEV-30151 parse error 1=2 not between/in -# -select 1=2 not in (3,4); -1=2 not in (3,4) -1 -select 1=2 not between 3 and 4; -1=2 not between 3 and 4 -1 -# # End of 10.3 tests -# diff --git a/mysql-test/main/parser.test b/mysql-test/main/parser.test index cfe4f9d6f53..9df18c50ee3 100644 --- a/mysql-test/main/parser.test +++ b/mysql-test/main/parser.test @@ -1673,12 +1673,4 @@ EXECUTE IMMEDIATE 'CREATE PROCEDURE p() UPDATE t SET c=\'\'"abc'; SET @@sql_mode=@save_sql_mode; ---echo # ---echo # MDEV-30151 parse error 1=2 not between/in ---echo # -select 1=2 not in (3,4); -select 1=2 not between 3 and 4; - ---echo # --echo # End of 10.3 tests ---echo # diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 3025d93de0f..7766049c104 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -899,7 +899,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); /* We should not introduce any further shift/reduce conflicts. */ -%expect 96 +%expect 85 /* Comments for TOKENS. @@ -1687,7 +1687,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %left PREC_BELOW_NOT -%nonassoc LOW_PRIORITY_NOT +%nonassoc NOT_SYM %left '=' EQUAL_SYM GE '>' LE '<' NE %nonassoc IS %right BETWEEN_SYM @@ -9840,7 +9840,7 @@ expr: MYSQL_YYABORT; } } - | NOT_SYM expr %prec LOW_PRIORITY_NOT + | NOT_SYM expr %prec NOT_SYM { $$= negate_expression(thd, $2); if (unlikely($$ == NULL)) diff --git a/sql/sql_yacc_ora.yy b/sql/sql_yacc_ora.yy index df90ba6c634..a5ee1892e5e 100644 --- a/sql/sql_yacc_ora.yy +++ b/sql/sql_yacc_ora.yy @@ -293,7 +293,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); /* We should not introduce any further shift/reduce conflicts. */ -%expect 98 +%expect 87 /* Comments for TOKENS. @@ -1081,7 +1081,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %left PREC_BELOW_NOT -%nonassoc LOW_PRIORITY_NOT +%nonassoc NOT_SYM %left '=' EQUAL_SYM GE '>' LE '<' NE %nonassoc IS %right BETWEEN_SYM @@ -9797,7 +9797,7 @@ expr: MYSQL_YYABORT; } } - | NOT_SYM expr %prec LOW_PRIORITY_NOT + | NOT_SYM expr %prec NOT_SYM { $$= negate_expression(thd, $2); if (unlikely($$ == NULL)) -- cgit v1.2.1 From 895673dae52d36c20caf900e6179694de1c7699b Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Mon, 12 Dec 2022 17:45:48 +0400 Subject: MDEV-30151 parse error 1=2 not between/in This patch fixes the problem by adding a new rule booleat_test. This makes the grammar clearer and less conflicting. Additionally, fixing %prec in this grammar branch: - | boolean_test IS NULL_SYM %prec PREC_BELOW_NOT + | boolean_test IS NULL_SYM %prec IS to have consistently "%prec IS" in all grammar branches starting with "boolean_test IS ...". It's not clear why these three rules needed different %prec before the fix: - boolean_test IS TRUE - boolean_test IS UNKNOWN - boolean_test IS NULL --- mysql-test/main/parser.result | 31 +++++++++++++++++++++++++ mysql-test/main/parser.test | 21 +++++++++++++++++ sql/sql_yacc.yy | 54 +++++++++++++++++++++++++++++++------------ sql/sql_yacc_ora.yy | 54 +++++++++++++++++++++++++++++++------------ 4 files changed, 130 insertions(+), 30 deletions(-) diff --git a/mysql-test/main/parser.result b/mysql-test/main/parser.result index 0bb4e82c8b8..a8ee4440b5e 100644 --- a/mysql-test/main/parser.result +++ b/mysql-test/main/parser.result @@ -1866,4 +1866,35 @@ ERROR 42000: You have an error in your SQL syntax; check the manual that corresp EXECUTE IMMEDIATE 'CREATE PROCEDURE p() UPDATE t SET c=\'\'"abc'; ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '"abc' at line 1 SET @@sql_mode=@save_sql_mode; +# +# MDEV-30151 parse error 1=2 not between/in +# +SELECT 1=2 NOT IN (3,4); +1=2 NOT IN (3,4) +1 +SELECT 1=2 NOT BETWEEN 3 AND 4; +1=2 NOT BETWEEN 3 AND 4 +1 +CREATE TABLE t1 ( f INT AS ( 1 IN ( 2 NOT BETWEEN 3 AND 4 ) ) ); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f` int(11) GENERATED ALWAYS AS (1 = 2 not between 3 and 4) VIRTUAL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +DROP TABLE t1; +CREATE TABLE t1 ( f INT, CHECK ( 1 IN ( 2 NOT BETWEEN 3 AND 4 ) ) ); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f` int(11) DEFAULT NULL, + CONSTRAINT `CONSTRAINT_1` CHECK (1 = 2 not between 3 and 4) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +DROP TABLE t1; +CREATE VIEW v1 AS SELECT 1 IN ( 2 NOT BETWEEN 3 AND 4 ); +SHOW CREATE VIEW v1; +View Create View character_set_client collation_connection +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select 1 = 2 not between 3 and 4 AS `1 IN ( 2 NOT BETWEEN 3 AND 4 )` latin1 latin1_swedish_ci +DROP VIEW v1; +# # End of 10.3 tests +# diff --git a/mysql-test/main/parser.test b/mysql-test/main/parser.test index 9df18c50ee3..9e46f859d5c 100644 --- a/mysql-test/main/parser.test +++ b/mysql-test/main/parser.test @@ -1673,4 +1673,25 @@ EXECUTE IMMEDIATE 'CREATE PROCEDURE p() UPDATE t SET c=\'\'"abc'; SET @@sql_mode=@save_sql_mode; +--echo # +--echo # MDEV-30151 parse error 1=2 not between/in +--echo # + +SELECT 1=2 NOT IN (3,4); +SELECT 1=2 NOT BETWEEN 3 AND 4; + +CREATE TABLE t1 ( f INT AS ( 1 IN ( 2 NOT BETWEEN 3 AND 4 ) ) ); +SHOW CREATE TABLE t1; +DROP TABLE t1; + +CREATE TABLE t1 ( f INT, CHECK ( 1 IN ( 2 NOT BETWEEN 3 AND 4 ) ) ); +SHOW CREATE TABLE t1; +DROP TABLE t1; + +CREATE VIEW v1 AS SELECT 1 IN ( 2 NOT BETWEEN 3 AND 4 ); +SHOW CREATE VIEW v1; +DROP VIEW v1; + +--echo # --echo # End of 10.3 tests +--echo # diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 7766049c104..e03bde31832 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -899,7 +899,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); /* We should not introduce any further shift/reduce conflicts. */ -%expect 85 +%expect 78 /* Comments for TOKENS. @@ -1687,7 +1687,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %left PREC_BELOW_NOT -%nonassoc NOT_SYM +/* The precendence of boolean NOT is in fact here. See the comment below. */ + %left '=' EQUAL_SYM GE '>' LE '<' NE %nonassoc IS %right BETWEEN_SYM @@ -1699,6 +1700,24 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %left '*' '/' '%' DIV_SYM MOD_SYM %left '^' %left MYSQL_CONCAT_SYM +/* + Boolean negation has a special branch in "expr" starting with NOT_SYM. + The precedence of logical negation is determined by the grammar itself + (without using Bison terminal symbol precedence) in this order + - Boolean factor (i.e. logical AND) + - Boolean NOT + - Boolean test (such as '=', IS NULL, IS TRUE) + + But we also need a precedence for NOT_SYM in other contexts, + to shift (without reduce) in these cases: + predicate NOT IN ... + predicate NOT BETWEEN ... + predicate NOT LIKE ... + predicate NOT REGEXP ... + If the precedence of NOT_SYM was low, it would reduce immediately + after scanning "predicate" and then produce a syntax error on "NOT". +*/ +%nonassoc NOT_SYM %nonassoc NEG '~' NOT2_SYM BINARY %nonassoc COLLATE_SYM @@ -1938,6 +1957,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); literal insert_ident order_ident temporal_literal simple_ident expr sum_expr in_sum_expr variable variable_aux + boolean_test predicate bit_expr parenthesized_expr table_wild simple_expr column_default_non_parenthesized_expr udf_expr primary_expr string_factor_expr mysql_concatenation_expr @@ -9840,79 +9860,83 @@ expr: MYSQL_YYABORT; } } - | NOT_SYM expr %prec NOT_SYM + | NOT_SYM expr { $$= negate_expression(thd, $2); if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | expr IS TRUE_SYM %prec IS + | boolean_test %prec PREC_BELOW_NOT + ; + +boolean_test: + boolean_test IS TRUE_SYM %prec IS { $$= new (thd->mem_root) Item_func_istrue(thd, $1); if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | expr IS not TRUE_SYM %prec IS + | boolean_test IS not TRUE_SYM %prec IS { $$= new (thd->mem_root) Item_func_isnottrue(thd, $1); if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | expr IS FALSE_SYM %prec IS + | boolean_test IS FALSE_SYM %prec IS { $$= new (thd->mem_root) Item_func_isfalse(thd, $1); if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | expr IS not FALSE_SYM %prec IS + | boolean_test IS not FALSE_SYM %prec IS { $$= new (thd->mem_root) Item_func_isnotfalse(thd, $1); if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | expr IS UNKNOWN_SYM %prec IS + | boolean_test IS UNKNOWN_SYM %prec IS { $$= new (thd->mem_root) Item_func_isnull(thd, $1); if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | expr IS not UNKNOWN_SYM %prec IS + | boolean_test IS not UNKNOWN_SYM %prec IS { $$= new (thd->mem_root) Item_func_isnotnull(thd, $1); if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | expr IS NULL_SYM %prec PREC_BELOW_NOT + | boolean_test IS NULL_SYM %prec IS { $$= new (thd->mem_root) Item_func_isnull(thd, $1); if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | expr IS not NULL_SYM %prec IS + | boolean_test IS not NULL_SYM %prec IS { $$= new (thd->mem_root) Item_func_isnotnull(thd, $1); if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | expr EQUAL_SYM predicate %prec EQUAL_SYM + | boolean_test EQUAL_SYM predicate %prec EQUAL_SYM { $$= new (thd->mem_root) Item_func_equal(thd, $1, $3); if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | expr comp_op predicate %prec '=' + | boolean_test comp_op predicate %prec '=' { $$= (*$2)(0)->create(thd, $1, $3); if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | expr comp_op all_or_any '(' subselect ')' %prec '=' + | boolean_test comp_op all_or_any '(' subselect ')' %prec '=' { $$= all_any_subquery_creator(thd, $1, $2, $3, $5); if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | predicate + | predicate %prec BETWEEN_SYM ; predicate: diff --git a/sql/sql_yacc_ora.yy b/sql/sql_yacc_ora.yy index a5ee1892e5e..89f7412ea89 100644 --- a/sql/sql_yacc_ora.yy +++ b/sql/sql_yacc_ora.yy @@ -293,7 +293,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); /* We should not introduce any further shift/reduce conflicts. */ -%expect 87 +%expect 80 /* Comments for TOKENS. @@ -1081,7 +1081,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %left PREC_BELOW_NOT -%nonassoc NOT_SYM +/* The precendence of boolean NOT is in fact here. See the comment below. */ + %left '=' EQUAL_SYM GE '>' LE '<' NE %nonassoc IS %right BETWEEN_SYM @@ -1093,6 +1094,24 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %left '*' '/' '%' DIV_SYM MOD_SYM %left '^' %left MYSQL_CONCAT_SYM +/* + Boolean negation has a special branch in "expr" starting with NOT_SYM. + The precedence of logical negation is determined by the grammar itself + (without using Bison terminal symbol precedence) in this order + - Boolean factor (i.e. logical AND) + - Boolean NOT + - Boolean test (such as '=', IS NULL, IS TRUE) + + But we also need a precedence for NOT_SYM in other contexts, + to shift (without reduce) in these cases: + predicate NOT IN ... + predicate NOT BETWEEN ... + predicate NOT LIKE ... + predicate NOT REGEXP ... + If the precedence of NOT_SYM was low, it would reduce immediately + after scanning "predicate" and then produce a syntax error on "NOT". +*/ +%nonassoc NOT_SYM %nonassoc NEG '~' NOT2_SYM BINARY %nonassoc COLLATE_SYM @@ -1339,6 +1358,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); literal insert_ident order_ident temporal_literal simple_ident expr sum_expr in_sum_expr variable variable_aux + boolean_test predicate bit_expr parenthesized_expr table_wild simple_expr column_default_non_parenthesized_expr udf_expr primary_expr string_factor_expr mysql_concatenation_expr @@ -9797,79 +9817,83 @@ expr: MYSQL_YYABORT; } } - | NOT_SYM expr %prec NOT_SYM + | NOT_SYM expr { $$= negate_expression(thd, $2); if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | expr IS TRUE_SYM %prec IS + | boolean_test %prec PREC_BELOW_NOT + ; + +boolean_test: + boolean_test IS TRUE_SYM %prec IS { $$= new (thd->mem_root) Item_func_istrue(thd, $1); if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | expr IS not TRUE_SYM %prec IS + | boolean_test IS not TRUE_SYM %prec IS { $$= new (thd->mem_root) Item_func_isnottrue(thd, $1); if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | expr IS FALSE_SYM %prec IS + | boolean_test IS FALSE_SYM %prec IS { $$= new (thd->mem_root) Item_func_isfalse(thd, $1); if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | expr IS not FALSE_SYM %prec IS + | boolean_test IS not FALSE_SYM %prec IS { $$= new (thd->mem_root) Item_func_isnotfalse(thd, $1); if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | expr IS UNKNOWN_SYM %prec IS + | boolean_test IS UNKNOWN_SYM %prec IS { $$= new (thd->mem_root) Item_func_isnull(thd, $1); if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | expr IS not UNKNOWN_SYM %prec IS + | boolean_test IS not UNKNOWN_SYM %prec IS { $$= new (thd->mem_root) Item_func_isnotnull(thd, $1); if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | expr IS NULL_SYM %prec PREC_BELOW_NOT + | boolean_test IS NULL_SYM %prec IS { $$= new (thd->mem_root) Item_func_isnull(thd, $1); if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | expr IS not NULL_SYM %prec IS + | boolean_test IS not NULL_SYM %prec IS { $$= new (thd->mem_root) Item_func_isnotnull(thd, $1); if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | expr EQUAL_SYM predicate %prec EQUAL_SYM + | boolean_test EQUAL_SYM predicate %prec EQUAL_SYM { $$= new (thd->mem_root) Item_func_equal(thd, $1, $3); if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | expr comp_op predicate %prec '=' + | boolean_test comp_op predicate %prec '=' { $$= (*$2)(0)->create(thd, $1, $3); if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | expr comp_op all_or_any '(' subselect ')' %prec '=' + | boolean_test comp_op all_or_any '(' subselect ')' %prec '=' { $$= all_any_subquery_creator(thd, $1, $2, $3, $5); if (unlikely($$ == NULL)) MYSQL_YYABORT; } - | predicate + | predicate %prec BETWEEN_SYM ; predicate: -- cgit v1.2.1 From f812f8e1ab7acc5dd17daf5ab5f73495c7963af5 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Thu, 26 Jan 2023 12:22:38 +0100 Subject: MDEV-30475 Windows, mtr - Remove outdated instructions on how to install post-mortem debugger Also, use standard C:\symbols location for OS debugging symbols cache, rather than own invention C:\cdb_symbols. --- mysql-test/lib/My/CoreDump.pm | 33 +++------------------------------ 1 file changed, 3 insertions(+), 30 deletions(-) diff --git a/mysql-test/lib/My/CoreDump.pm b/mysql-test/lib/My/CoreDump.pm index 05b6edf1385..be6d21146d1 100644 --- a/mysql-test/lib/My/CoreDump.pm +++ b/mysql-test/lib/My/CoreDump.pm @@ -310,16 +310,8 @@ sub cdb_check { `cdb -? 2>&1`; if ($? >> 8) { - print "Cannot find cdb. Please Install Debugging tools for Windows\n"; - print "from http://www.microsoft.com/whdc/devtools/debugging/"; - if($ENV{'ProgramW6432'}) - { - print "install64bit.mspx (native x64 version)\n"; - } - else - { - print "installx86.mspx\n"; - } + print "Cannot find the cdb debugger. Please install Debugging tools for Windows\n"; + print "and set PATH environment variable to include location of cdb.exe"; } } @@ -328,25 +320,6 @@ sub _cdb { my ($core_name, $format)= @_; print "\nTrying 'cdb' to get a backtrace\n"; return unless -f $core_name; - - # Try to set environment for debugging tools for Windows - if ($ENV{'PATH'} !~ /Debugging Tools/) - { - if ($ENV{'ProgramW6432'}) - { - # On x64 computer - $ENV{'PATH'}.= ";".$ENV{'ProgramW6432'}."\\Debugging Tools For Windows (x64)"; - } - else - { - # On x86 computer. Newest versions of Debugging tools are installed in the - # directory with (x86) suffix, older versions did not have this suffix. - $ENV{'PATH'}.= ";".$ENV{'ProgramFiles'}."\\Debugging Tools For Windows (x86)"; - $ENV{'PATH'}.= ";".$ENV{'ProgramFiles'}."\\Debugging Tools For Windows"; - } - } - - # Read module list, find out the name of executable and # build symbol path (required by cdb if executable was built on # different machine) @@ -384,7 +357,7 @@ sub _cdb { if (!$ENV{'_NT_SYMBOL_PATH'}) { my $windir= $ENV{'windir'}; - my $symbol_cache= substr($windir ,0, index($windir,'\\'))."\\cdb_symbols"; + my $symbol_cache= substr($windir ,0, index($windir,'\\'))."\\symbols"; print "OS debug symbols will be downloaded and stored in $symbol_cache.\n"; print "You can control the location of symbol cache with _NT_SYMBOL_PATH\n"; -- cgit v1.2.1 From 2a78c3ef6fd6663d6731dd5cec2f462420b61123 Mon Sep 17 00:00:00 2001 From: Salman Mohammadi Date: Sun, 8 Jan 2023 20:14:58 +0100 Subject: MDEV-30509: mariadb-plugin-connect: introduce curl as recommends in order to be able to retrieve files using REST queries. Otherwise, `ERROR 1105 (HY000): Curl not installed.` will be thrown. --- debian/control | 1 + 1 file changed, 1 insertion(+) diff --git a/debian/control b/debian/control index aed073e5c8c..09e0fd74d57 100644 --- a/debian/control +++ b/debian/control @@ -525,6 +525,7 @@ Depends: libxml2, unixodbc, ${misc:Depends}, ${shlibs:Depends} +Recommends: curl Breaks: mariadb-connect-engine-10.1, mariadb-connect-engine-10.2, mariadb-connect-engine-10.3 -- cgit v1.2.1 From 9b32e4b192303421ca26625153ae1190429e307f Mon Sep 17 00:00:00 2001 From: Nayuta Yanagisawa Date: Tue, 27 Sep 2022 15:22:57 +0900 Subject: MDEV-29644 a potential bug of null pointer dereference in spider_db_mbase::print_warnings() The function spider_db_mbase::print_warnings() can potentially result in a null pointer dereference. Remove the null pointer dereference by cleaning up the function. Some small changes to the original commit 422fb63a9bbee35c50b6c7be19d199afe0bc98fa. Co-Authored-By: Yuchen Pei --- .../mysql-test/spider/bugfix/r/mdev_29644.result | 41 ++++++++++ .../mysql-test/spider/bugfix/t/mdev_29644.cnf | 3 + .../mysql-test/spider/bugfix/t/mdev_29644.test | 56 ++++++++++++++ storage/spider/spd_db_mysql.cc | 88 +++++++++------------- storage/spider/spd_db_mysql.h | 4 +- 5 files changed, 136 insertions(+), 56 deletions(-) create mode 100644 storage/spider/mysql-test/spider/bugfix/r/mdev_29644.result create mode 100644 storage/spider/mysql-test/spider/bugfix/t/mdev_29644.cnf create mode 100644 storage/spider/mysql-test/spider/bugfix/t/mdev_29644.test diff --git a/storage/spider/mysql-test/spider/bugfix/r/mdev_29644.result b/storage/spider/mysql-test/spider/bugfix/r/mdev_29644.result new file mode 100644 index 00000000000..b52cecc5bb7 --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/r/mdev_29644.result @@ -0,0 +1,41 @@ +# +# MDEV-29644 a potential bug of null pointer dereference in spider_db_mbase::print_warnings() +# +for master_1 +for child2 +child2_1 +child2_2 +child2_3 +for child3 +connection child2_1; +CREATE DATABASE auto_test_remote; +USE auto_test_remote; +CREATE TABLE tbl_a ( +a CHAR(5) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +SET GLOBAL sql_mode=''; +connection master_1; +CREATE DATABASE auto_test_local; +USE auto_test_local; +CREATE TABLE tbl_a ( +a CHAR(255) +) ENGINE=Spider DEFAULT CHARSET=utf8 COMMENT='table "tbl_a", srv "s_2_1"'; +SET sql_mode=''; +INSERT INTO tbl_a VALUES ("this will be truncated"); +NOT FOUND /\[WARN SPIDER RESULT\].* Warning 1265 Data truncated for column 'a' at row 1.*/ in mysqld.1.1.err +SET GLOBAL spider_log_result_errors=4; +INSERT INTO tbl_a VALUES ("this will be truncated"); +FOUND 1 /\[WARN SPIDER RESULT\].* Warning 1265 Data truncated for column 'a' at row 1.*/ in mysqld.1.1.err +connection master_1; +SET GLOBAL spider_log_result_errors=DEFAULT; +SET sql_mode=DEFAULT; +DROP DATABASE IF EXISTS auto_test_local; +connection child2_1; +SET GLOBAL sql_mode=DEFAULT; +DROP DATABASE IF EXISTS auto_test_remote; +for master_1 +for child2 +child2_1 +child2_2 +child2_3 +for child3 diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_29644.cnf b/storage/spider/mysql-test/spider/bugfix/t/mdev_29644.cnf new file mode 100644 index 00000000000..05dfd8a0bce --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_29644.cnf @@ -0,0 +1,3 @@ +!include include/default_mysqld.cnf +!include ../my_1_1.cnf +!include ../my_2_1.cnf diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_29644.test b/storage/spider/mysql-test/spider/bugfix/t/mdev_29644.test new file mode 100644 index 00000000000..3a8fbb251e1 --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_29644.test @@ -0,0 +1,56 @@ +--echo # +--echo # MDEV-29644 a potential bug of null pointer dereference in spider_db_mbase::print_warnings() +--echo # + +# The test case below does not cause the potential null pointer dereference. +# It is just for checking spider_db_mbase::fetch_and_print_warnings() works. + +--disable_query_log +--disable_result_log +--source ../../t/test_init.inc +--enable_result_log +--enable_query_log + +--connection child2_1 +CREATE DATABASE auto_test_remote; +USE auto_test_remote; +eval CREATE TABLE tbl_a ( + a CHAR(5) +) $CHILD2_1_ENGINE $CHILD2_1_CHARSET; + +SET GLOBAL sql_mode=''; + +--connection master_1 +CREATE DATABASE auto_test_local; +USE auto_test_local; +eval CREATE TABLE tbl_a ( + a CHAR(255) +) $MASTER_1_ENGINE $MASTER_1_CHARSET COMMENT='table "tbl_a", srv "s_2_1"'; + +SET sql_mode=''; + +let SEARCH_FILE= $MYSQLTEST_VARDIR/log/mysqld.1.1.err; +let SEARCH_PATTERN= \[WARN SPIDER RESULT\].* Warning 1265 Data truncated for column 'a' at row 1.*; + +INSERT INTO tbl_a VALUES ("this will be truncated"); +--source include/search_pattern_in_file.inc # should not find + +SET GLOBAL spider_log_result_errors=4; + +INSERT INTO tbl_a VALUES ("this will be truncated"); +--source include/search_pattern_in_file.inc # should find + +--connection master_1 +SET GLOBAL spider_log_result_errors=DEFAULT; +SET sql_mode=DEFAULT; +DROP DATABASE IF EXISTS auto_test_local; + +--connection child2_1 +SET GLOBAL sql_mode=DEFAULT; +DROP DATABASE IF EXISTS auto_test_remote; + +--disable_query_log +--disable_result_log +--source ../t/test_deinit.inc +--enable_query_log +--enable_result_log diff --git a/storage/spider/spd_db_mysql.cc b/storage/spider/spd_db_mysql.cc index e942d1d9063..b1c222d193a 100644 --- a/storage/spider/spd_db_mysql.cc +++ b/storage/spider/spd_db_mysql.cc @@ -2090,7 +2090,7 @@ int spider_db_mbase::exec_query( db_conn->affected_rows, db_conn->insert_id, db_conn->server_status, db_conn->warning_count); if (spider_param_log_result_errors() >= 3) - print_warnings(l_time); + fetch_and_print_warnings(l_time); } else if (log_result_errors >= 4) { time_t cur_time = (time_t) time((time_t*) 0); @@ -2172,61 +2172,43 @@ bool spider_db_mbase::is_xa_nota_error( DBUG_RETURN(xa_nota); } -void spider_db_mbase::print_warnings( - struct tm *l_time -) { - DBUG_ENTER("spider_db_mbase::print_warnings"); - DBUG_PRINT("info",("spider this=%p", this)); - if (db_conn->status == MYSQL_STATUS_READY) +void spider_db_mbase::fetch_and_print_warnings(struct tm *l_time) +{ + DBUG_ENTER("spider_db_mbase::fetch_and_print_warnings"); + + if (spider_param_dry_access() || db_conn->status != MYSQL_STATUS_READY || + db_conn->server_status & SERVER_MORE_RESULTS_EXISTS) + DBUG_VOID_RETURN; + + if (mysql_real_query(db_conn, SPIDER_SQL_SHOW_WARNINGS_STR, + SPIDER_SQL_SHOW_WARNINGS_LEN)) + DBUG_VOID_RETURN; + + MYSQL_RES *res= mysql_store_result(db_conn); + if (!res) + DBUG_VOID_RETURN; + + uint num_fields= mysql_num_fields(res); + if (num_fields != 3) { -#if MYSQL_VERSION_ID < 50500 - if (!(db_conn->last_used_con->server_status & SERVER_MORE_RESULTS_EXISTS)) -#else - if (!(db_conn->server_status & SERVER_MORE_RESULTS_EXISTS)) -#endif - { - if ( - spider_param_dry_access() || - !mysql_real_query(db_conn, SPIDER_SQL_SHOW_WARNINGS_STR, - SPIDER_SQL_SHOW_WARNINGS_LEN) - ) { - MYSQL_RES *res = NULL; - MYSQL_ROW row = NULL; - uint num_fields; - if ( - spider_param_dry_access() || - !(res = mysql_store_result(db_conn)) || - !(row = mysql_fetch_row(res)) - ) { - if (mysql_errno(db_conn)) - { - if (res) - mysql_free_result(res); - DBUG_VOID_RETURN; - } - /* no record is ok */ - } - num_fields = mysql_num_fields(res); - if (num_fields != 3) - { - mysql_free_result(res); - DBUG_VOID_RETURN; - } - while (row) - { - fprintf(stderr, "%04d%02d%02d %02d:%02d:%02d [WARN SPIDER RESULT] " - "from [%s] %ld to %ld: %s %s %s\n", + mysql_free_result(res); + DBUG_VOID_RETURN; + } + + MYSQL_ROW row= mysql_fetch_row(res); + while (row) + { + fprintf(stderr, + "%04d%02d%02d %02d:%02d:%02d [WARN SPIDER RESULT] from [%s] %ld " + "to %ld: %s %s %s\n", l_time->tm_year + 1900, l_time->tm_mon + 1, l_time->tm_mday, - l_time->tm_hour, l_time->tm_min, l_time->tm_sec, - conn->tgt_host, (ulong) db_conn->thread_id, - (ulong) current_thd->thread_id, row[0], row[1], row[2]); - row = mysql_fetch_row(res); - } - if (res) - mysql_free_result(res); - } - } + l_time->tm_hour, l_time->tm_min, l_time->tm_sec, conn->tgt_host, + (ulong) db_conn->thread_id, (ulong) current_thd->thread_id, row[0], + row[1], row[2]); + row= mysql_fetch_row(res); } + mysql_free_result(res); + DBUG_VOID_RETURN; } diff --git a/storage/spider/spd_db_mysql.h b/storage/spider/spd_db_mysql.h index 4d5327b7533..576162b2b55 100644 --- a/storage/spider/spd_db_mysql.h +++ b/storage/spider/spd_db_mysql.h @@ -392,9 +392,7 @@ public: bool is_xa_nota_error( int error_num ); - void print_warnings( - struct tm *l_time - ); + void fetch_and_print_warnings(struct tm *l_time); spider_db_result *store_result( spider_db_result_buffer **spider_res_buf, st_spider_db_request_key *request_key, -- cgit v1.2.1 From e62947f38bb9c15f7fa8d3faa60b1852c4fecb80 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 9 Mar 2023 15:32:24 +0100 Subject: bump the VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 876e7c96e80..26166cc8151 100644 --- a/VERSION +++ b/VERSION @@ -1,4 +1,4 @@ MYSQL_VERSION_MAJOR=10 MYSQL_VERSION_MINOR=3 -MYSQL_VERSION_PATCH=38 +MYSQL_VERSION_PATCH=39 SERVER_MATURITY=stable -- cgit v1.2.1 From fb7d5881535574c0e33fa8338eaed9ecc7bc65c6 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 9 Mar 2023 11:32:18 +0100 Subject: main.bootstrap test cleanup --- mysql-test/main/bootstrap.result | 42 ++++++++++++++-- mysql-test/main/bootstrap.test | 102 +++++++++++++++++++-------------------- 2 files changed, 89 insertions(+), 55 deletions(-) diff --git a/mysql-test/main/bootstrap.result b/mysql-test/main/bootstrap.result index 0ba87be092e..7ea00923823 100644 --- a/mysql-test/main/bootstrap.result +++ b/mysql-test/main/bootstrap.result @@ -1,13 +1,26 @@ -drop table if exists t1; +# +# test mysqld in bootstrap mode +# +# +# Check that --bootstrap reads from stdin +# drop table t1; +# +# Check that --bootstrap of file with SQL error returns error +# drop table t1; ERROR 42S02: Unknown table 'test.t1' +# +# Bootstrap with a large thd->net.max_packet +# set @my_max_allowed_packet= @@max_allowed_packet; set @@global.max_allowed_packet= greatest(1073741824, @@max_allowed_packet); set @max_allowed_packed=@@global.max_allowed_packet; set global max_allowed_packet=@my_max_allowed_packet; drop table t1; -End of 5.1 tests +# +# End of 5.1 tests +# # # Bug #11766306: 59393: HAVE_INNODB=YES WHEN MYSQLD # STARTED WITH --SKIP-INNODB @@ -15,7 +28,21 @@ End of 5.1 tests SELECT 'bug' as '' FROM INFORMATION_SCHEMA.ENGINES WHERE engine='innodb' and SUPPORT='YES'; -End of 5.5 tests +# +# MDEV-13063 Server crashes in intern_plugin_lock or assertion `plugin_ptr->ref_count == 1' fails in plugin_init +# +# +# MDEV-19349 mysql_install_db: segfault at tmp_file_prefix check +# +# +# End of 5.5 tests +# +# +# Check that --bootstrap can install and uninstall plugins +# +# +# Check that installed plugins are *not* automatically loaded in --bootstrap +# flush tables; show create table t1; Table Create Table @@ -27,3 +54,12 @@ select * from mysql.plugin; name dl EXAMPLE ha_example.so truncate table mysql.plugin; +# +# MDEV-9969 mysql_install_db error processing ignore_db_dirs. +# +# +# MDEV-13397 MariaDB upgrade fail when using default_time_zone +# +# +# End of 10.3 tests +# diff --git a/mysql-test/main/bootstrap.test b/mysql-test/main/bootstrap.test index 683033979fe..318842b550b 100644 --- a/mysql-test/main/bootstrap.test +++ b/mysql-test/main/bootstrap.test @@ -1,16 +1,20 @@ -# -# test mysqld in bootstrap mode -# ---disable_warnings -drop table if exists t1; ---enable_warnings +--echo # +--echo # test mysqld in bootstrap mode +--echo # +--source include/not_windows_embedded.inc +--source include/have_example_plugin.inc + +--let test_bootstrap=$MYSQLTEST_VARDIR/tmp/test_bootstrap.sql +--write_file $test_bootstrap +use test; +EOF # Add the datadir to the bootstrap command let $MYSQLD_DATADIR= `select @@datadir`; let $MYSQLD_BOOTSTRAP_CMD= $MYSQLD_BOOTSTRAP_CMD --datadir=$MYSQLD_DATADIR --tmpdir=$MYSQL_TMP_DIR --default-storage-engine=MyISAM --loose-skip-innodb --plugin-maturity=unknown; -# -# Check that --bootstrap reads from stdin -# +--echo # +--echo # Check that --bootstrap reads from stdin +--echo # --write_file $MYSQLTEST_VARDIR/tmp/bootstrap_test.sql use test; CREATE TABLE t1(a int); @@ -18,9 +22,9 @@ EOF --exec $MYSQLD_BOOTSTRAP_CMD < $MYSQLTEST_VARDIR/tmp/bootstrap_test.sql >> $MYSQLTEST_VARDIR/tmp/bootstrap.log 2>&1 drop table t1; remove_file $MYSQLTEST_VARDIR/tmp/bootstrap_test.sql; -# -# Check that --bootstrap of file with SQL error returns error -# +--echo # +--echo # Check that --bootstrap of file with SQL error returns error +--echo # --write_file $MYSQLTEST_VARDIR/tmp/bootstrap_error.sql use test; CREATE TABLE t1; @@ -32,9 +36,9 @@ EOF drop table t1; remove_file $MYSQLTEST_VARDIR/tmp/bootstrap_error.sql; -# -# Bootstrap with a large thd->net.max_packet -# +--echo # +--echo # Bootstrap with a large thd->net.max_packet +--echo # set @my_max_allowed_packet= @@max_allowed_packet; set @@global.max_allowed_packet= greatest(1073741824, @@max_allowed_packet); set @max_allowed_packed=@@global.max_allowed_packet; @@ -49,7 +53,9 @@ remove_file $MYSQLTEST_VARDIR/tmp/long_query.sql; set global max_allowed_packet=@my_max_allowed_packet; drop table t1; ---echo End of 5.1 tests +--echo # +--echo # End of 5.1 tests +--echo # --echo # --echo # Bug #11766306: 59393: HAVE_INNODB=YES WHEN MYSQLD @@ -60,28 +66,24 @@ drop table t1; SELECT 'bug' as '' FROM INFORMATION_SCHEMA.ENGINES WHERE engine='innodb' and SUPPORT='YES'; -# -# MDEV-13063 Server crashes in intern_plugin_lock or assertion `plugin_ptr->ref_count == 1' fails in plugin_init -# +--echo # +--echo # MDEV-13063 Server crashes in intern_plugin_lock or assertion `plugin_ptr->ref_count == 1' fails in plugin_init +--echo # --error 1 --exec $MYSQLD_BOOTSTRAP_CMD --myisam_recover_options=NONE -# -# MDEV-19349 mysql_install_db: segfault at tmp_file_prefix check -# ---write_file $MYSQLTEST_VARDIR/tmp/1 -use test; -EOF ---exec $MYSQLD_BOOTSTRAP_CMD < $MYSQLTEST_VARDIR/tmp/1 >> $MYSQLTEST_VARDIR/tmp/bootstrap.log 2>&1 ---remove_file $MYSQLTEST_VARDIR/tmp/1 +--echo # +--echo # MDEV-19349 mysql_install_db: segfault at tmp_file_prefix check +--echo # +--exec $MYSQLD_BOOTSTRAP_CMD < $test_bootstrap >> $MYSQLTEST_VARDIR/tmp/bootstrap.log 2>&1 ---echo End of 5.5 tests +--echo # +--echo # End of 5.5 tests +--echo # ---source include/not_windows_embedded.inc ---source include/have_example_plugin.inc -# -# Check that --bootstrap can install and uninstall plugins -# +--echo # +--echo # Check that --bootstrap can install and uninstall plugins +--echo # let $PLUGIN_DIR=`select @@plugin_dir`; --write_file $MYSQLTEST_VARDIR/tmp/install_plugin.sql install soname 'ha_example'; @@ -90,9 +92,9 @@ EOF --exec $MYSQLD_BOOTSTRAP_CMD --plugin-dir=$PLUGIN_DIR < $MYSQLTEST_VARDIR/tmp/install_plugin.sql >> $MYSQLTEST_VARDIR/tmp/bootstrap.log 2>&1 --remove_file $MYSQLTEST_VARDIR/tmp/install_plugin.sql -# -# Check that installed plugins are *not* automatically loaded in --bootstrap -# +--echo # +--echo # Check that installed plugins are *not* automatically loaded in --bootstrap +--echo # --write_file $MYSQLTEST_VARDIR/tmp/bootstrap_plugins.sql SET SQL_MODE=""; use test; @@ -107,21 +109,17 @@ drop table t1; select * from mysql.plugin; truncate table mysql.plugin; +--echo # +--echo # MDEV-9969 mysql_install_db error processing ignore_db_dirs. +--echo # +--exec $MYSQLD_BOOTSTRAP_CMD --ignore-db-dirs='some_dir' --ignore-db-dirs='some_dir' < $test_bootstrap >> $MYSQLTEST_VARDIR/tmp/bootstrap.log 2>&1 -# -# MDEV-9969 mysql_install_db error processing ignore_db_dirs. -# ---write_file $MYSQLTEST_VARDIR/tmp/bootstrap_9969.sql -use test; -EOF ---exec $MYSQLD_BOOTSTRAP_CMD --ignore-db-dirs='some_dir' --ignore-db-dirs='some_dir' < $MYSQLTEST_VARDIR/tmp/bootstrap_9969.sql >> $MYSQLTEST_VARDIR/tmp/bootstrap.log 2>&1 ---remove_file $MYSQLTEST_VARDIR/tmp/bootstrap_9969.sql +--echo # +--echo # MDEV-13397 MariaDB upgrade fail when using default_time_zone +--echo # +--exec $MYSQLD_BOOTSTRAP_CMD --default-time-zone=Europe/Moscow < $test_bootstrap >> $MYSQLTEST_VARDIR/tmp/bootstrap.log 2>&1 -# -# MDEV-13397 MariaDB upgrade fail when using default_time_zone -# ---write_file $MYSQLTEST_VARDIR/tmp/bootstrap_9969.sql -use test; -EOF ---exec $MYSQLD_BOOTSTRAP_CMD --default-time-zone=Europe/Moscow < $MYSQLTEST_VARDIR/tmp/bootstrap_9969.sql >> $MYSQLTEST_VARDIR/tmp/bootstrap.log 2>&1 ---remove_file $MYSQLTEST_VARDIR/tmp/bootstrap_9969.sql +--echo # +--echo # End of 10.3 tests +--echo # +--remove_file $test_bootstrap -- cgit v1.2.1 From 4c4939bbf619d7e516131c0b3e5691b1c2d2ff8f Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 9 Mar 2023 11:22:41 +0100 Subject: MDEV-30818 invalid ssl prevents bootstrap in bootstrap the server reads stdin and does not listen to network. it won't use ssl anyway --- mysql-test/main/bootstrap.result | 3 +++ mysql-test/main/bootstrap.test | 5 +++++ sql/mysqld.cc | 5 ++++- 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/mysql-test/main/bootstrap.result b/mysql-test/main/bootstrap.result index 7ea00923823..0b256263a29 100644 --- a/mysql-test/main/bootstrap.result +++ b/mysql-test/main/bootstrap.result @@ -61,5 +61,8 @@ truncate table mysql.plugin; # MDEV-13397 MariaDB upgrade fail when using default_time_zone # # +# MDEV-30818 invalid ssl prevents bootstrap +# +# # End of 10.3 tests # diff --git a/mysql-test/main/bootstrap.test b/mysql-test/main/bootstrap.test index 318842b550b..e3c121f201f 100644 --- a/mysql-test/main/bootstrap.test +++ b/mysql-test/main/bootstrap.test @@ -119,6 +119,11 @@ truncate table mysql.plugin; --echo # --exec $MYSQLD_BOOTSTRAP_CMD --default-time-zone=Europe/Moscow < $test_bootstrap >> $MYSQLTEST_VARDIR/tmp/bootstrap.log 2>&1 +--echo # +--echo # MDEV-30818 invalid ssl prevents bootstrap +--echo # +--exec $MYSQLD_BOOTSTRAP_CMD --ssl-ca=/dev/nonexistent < $test_bootstrap >> $MYSQLTEST_VARDIR/tmp/bootstrap.log 2>&1 + --echo # --echo # End of 10.3 tests --echo # diff --git a/sql/mysqld.cc b/sql/mysqld.cc index ea3849ed9f9..68f66c37b6c 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -5026,7 +5026,10 @@ static void init_ssl() { sql_print_error("Failed to setup SSL"); sql_print_error("SSL error: %s", sslGetErrString(error)); - unireg_abort(1); + if (!opt_bootstrap) + unireg_abort(1); + opt_use_ssl = 0; + have_ssl= SHOW_OPTION_DISABLED; } if (global_system_variables.log_warnings > 0) { -- cgit v1.2.1 From 79e27a6bf9e05eb2296e59a4ea4cd1334195faca Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 5 Apr 2023 23:34:03 +0200 Subject: MDEV-25887 "Got notification message from PID xxxx, but reception only permitted for main PID yyyy" in systemd during SST server has systemd support and calls sd_notify() to communicate the status to systemd. mariabackup links the whole server in, but it should not notify systemd, because it's not started or managed by systemd. --- extra/mariabackup/xtrabackup.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index 5e01e64f490..ad35749d0ff 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -113,6 +113,12 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA #define MB_CORRUPTED_PAGES_FILE "innodb_corrupted_pages" +// disable server's systemd notification code +extern "C" { +int sd_notify() { return 0; } +int sd_notifyf() { return 0; } +} + int sys_var_init(); /* === xtrabackup specific options === */ -- cgit v1.2.1 From 54715a1074a0c9fc12a5d52152df73826a484df7 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Thu, 6 Apr 2023 09:57:58 +0400 Subject: MDEV-30072 Wrong ORDER BY for a partitioned prefix key + NOPAD This problem was earlier fixed by MDEV-30034. Adding MTR tests only. --- mysql-test/main/ctype_uca_partitions.result | 40 +++++++++++++++++++++++++++++ mysql-test/main/ctype_uca_partitions.test | 32 +++++++++++++++++++++++ 2 files changed, 72 insertions(+) diff --git a/mysql-test/main/ctype_uca_partitions.result b/mysql-test/main/ctype_uca_partitions.result index d7b79046b34..373fe914527 100644 --- a/mysql-test/main/ctype_uca_partitions.result +++ b/mysql-test/main/ctype_uca_partitions.result @@ -84,3 +84,43 @@ O P Y DROP TABLE t1; +# +# Start of 10.4 tests +# +# +# MDEV-30072 Wrong ORDER BY for a partitioned prefix key + NOPAD +# +SET NAMES utf8mb4; +CREATE TABLE t1 +( +id INT, +data VARCHAR(20), +KEY data_id (data,id) +) COLLATE utf8mb3_unicode_nopad_ci ENGINE=MyISAM +PARTITION BY RANGE COLUMNS (id) +( +PARTITION p10 VALUES LESS THAN (20), +PARTITION p20 VALUES LESS THAN MAXVALUE +); +INSERT INTO t1 VALUES (30, 'ss '), (10, 'ß '); +SELECT id FROM t1 WHERE data='ss ' ORDER BY id; +id +10 +30 +SELECT id FROM t1 WHERE data='ss ' ORDER BY id DESC; +id +30 +10 +ALTER TABLE t1 DROP KEY data_id, ADD KEY data_id2(data(10),id); +SELECT id FROM t1 WHERE data='ss ' ORDER BY id; +id +10 +30 +SELECT id FROM t1 WHERE data='ss ' ORDER BY id DESC; +id +30 +10 +DROP TABLE t1; +# +# End of 10.4 tests +# diff --git a/mysql-test/main/ctype_uca_partitions.test b/mysql-test/main/ctype_uca_partitions.test index 5734bb52008..81f1a091574 100644 --- a/mysql-test/main/ctype_uca_partitions.test +++ b/mysql-test/main/ctype_uca_partitions.test @@ -36,3 +36,35 @@ SELECT * FROM t1 PARTITION (p0) ORDER BY c1; SELECT * FROM t1 PARTITION (p1) ORDER BY c1; SELECT * FROM t1 PARTITION (p2) ORDER BY c1; DROP TABLE t1; + +--echo # +--echo # Start of 10.4 tests +--echo # + +--echo # +--echo # MDEV-30072 Wrong ORDER BY for a partitioned prefix key + NOPAD +--echo # + +SET NAMES utf8mb4; +CREATE TABLE t1 +( + id INT, + data VARCHAR(20), + KEY data_id (data,id) +) COLLATE utf8mb3_unicode_nopad_ci ENGINE=MyISAM +PARTITION BY RANGE COLUMNS (id) +( + PARTITION p10 VALUES LESS THAN (20), + PARTITION p20 VALUES LESS THAN MAXVALUE +); +INSERT INTO t1 VALUES (30, 'ss '), (10, 'ß '); +SELECT id FROM t1 WHERE data='ss ' ORDER BY id; +SELECT id FROM t1 WHERE data='ss ' ORDER BY id DESC; +ALTER TABLE t1 DROP KEY data_id, ADD KEY data_id2(data(10),id); +SELECT id FROM t1 WHERE data='ss ' ORDER BY id; +SELECT id FROM t1 WHERE data='ss ' ORDER BY id DESC; +DROP TABLE t1; + +--echo # +--echo # End of 10.4 tests +--echo # -- cgit v1.2.1 From ed2adc8c6f986f7e9c81d7a99f85cad0e2d46d80 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Thu, 6 Apr 2023 14:50:26 +0400 Subject: MDEV-28190 sql_mode makes MDEV-371 virtual column expressions nondeterministic This problem was fixed earlier by MDEV-27653. Adding MTR tests only. --- .../include/sql_mode_pad_char_to_full_length.inc | 31 +++++++ .../main/sql_mode_pad_char_to_full_length.result | 94 ++++++++++++++++++++++ .../main/sql_mode_pad_char_to_full_length.test | 19 +++++ .../r/sql_mode_pad_char_to_full_length.result | 51 ++++++++++++ .../innodb/t/sql_mode_pad_char_to_full_length.test | 18 +++++ 5 files changed, 213 insertions(+) create mode 100644 mysql-test/include/sql_mode_pad_char_to_full_length.inc create mode 100644 mysql-test/main/sql_mode_pad_char_to_full_length.result create mode 100644 mysql-test/main/sql_mode_pad_char_to_full_length.test create mode 100644 mysql-test/suite/innodb/r/sql_mode_pad_char_to_full_length.result create mode 100644 mysql-test/suite/innodb/t/sql_mode_pad_char_to_full_length.test diff --git a/mysql-test/include/sql_mode_pad_char_to_full_length.inc b/mysql-test/include/sql_mode_pad_char_to_full_length.inc new file mode 100644 index 00000000000..df03c4dbc28 --- /dev/null +++ b/mysql-test/include/sql_mode_pad_char_to_full_length.inc @@ -0,0 +1,31 @@ +--echo # +--echo # MDEV-28190 sql_mode makes MDEV-371 virtual column expressions nondeterministic +--echo # + +CREATE TABLE t1 (a INT,b CHAR(20)); +SHOW CREATE TABLE t1; +CREATE UNIQUE INDEX bi USING HASH ON t1 (b); +INSERT INTO t1 VALUES (0,0); +SET sql_mode='pad_char_to_full_length'; +DELETE FROM t1; +DROP TABLE t1; + + +SET sql_mode=''; +CREATE TABLE t1 (a INT,b CHAR(20)); +SHOW CREATE TABLE t1; +CREATE UNIQUE INDEX bi USING HASH ON t1 (b); +SET sql_mode='pad_char_to_full_length'; +INSERT INTO t1 VALUES (0,0); +DELETE FROM t1; +DROP TABLE t1; + + +SET sql_mode=''; +CREATE OR REPLACE TABLE t1 (a CHAR(20),b CHAR(20)); +SHOW CREATE TABLE t1; +CREATE UNIQUE INDEX bi USING HASH ON t1 (b); +INSERT INTO t1 VALUES (0,0); +SET sql_mode='pad_char_to_full_length'; +DELETE FROM t1; +DROP TABLE t1; diff --git a/mysql-test/main/sql_mode_pad_char_to_full_length.result b/mysql-test/main/sql_mode_pad_char_to_full_length.result new file mode 100644 index 00000000000..6f68aade613 --- /dev/null +++ b/mysql-test/main/sql_mode_pad_char_to_full_length.result @@ -0,0 +1,94 @@ +# +# Start of 10.4 tests +# +# +# MDEV-28190 sql_mode makes MDEV-371 virtual column expressions nondeterministic +# +SET default_storage_engine=MyISAM; +# +# MDEV-28190 sql_mode makes MDEV-371 virtual column expressions nondeterministic +# +CREATE TABLE t1 (a INT,b CHAR(20)); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(20) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +CREATE UNIQUE INDEX bi USING HASH ON t1 (b); +INSERT INTO t1 VALUES (0,0); +SET sql_mode='pad_char_to_full_length'; +DELETE FROM t1; +DROP TABLE t1; +SET sql_mode=''; +CREATE TABLE t1 (a INT,b CHAR(20)); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(20) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +CREATE UNIQUE INDEX bi USING HASH ON t1 (b); +SET sql_mode='pad_char_to_full_length'; +INSERT INTO t1 VALUES (0,0); +DELETE FROM t1; +DROP TABLE t1; +SET sql_mode=''; +CREATE OR REPLACE TABLE t1 (a CHAR(20),b CHAR(20)); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` char(20) DEFAULT NULL, + `b` char(20) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +CREATE UNIQUE INDEX bi USING HASH ON t1 (b); +INSERT INTO t1 VALUES (0,0); +SET sql_mode='pad_char_to_full_length'; +DELETE FROM t1; +DROP TABLE t1; +SET default_storage_engine=MEMORY; +# +# MDEV-28190 sql_mode makes MDEV-371 virtual column expressions nondeterministic +# +CREATE TABLE t1 (a INT,b CHAR(20)); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(20) DEFAULT NULL +) ENGINE=MEMORY DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +CREATE UNIQUE INDEX bi USING HASH ON t1 (b); +INSERT INTO t1 VALUES (0,0); +SET sql_mode='pad_char_to_full_length'; +DELETE FROM t1; +DROP TABLE t1; +SET sql_mode=''; +CREATE TABLE t1 (a INT,b CHAR(20)); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(20) DEFAULT NULL +) ENGINE=MEMORY DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +CREATE UNIQUE INDEX bi USING HASH ON t1 (b); +SET sql_mode='pad_char_to_full_length'; +INSERT INTO t1 VALUES (0,0); +DELETE FROM t1; +DROP TABLE t1; +SET sql_mode=''; +CREATE OR REPLACE TABLE t1 (a CHAR(20),b CHAR(20)); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` char(20) DEFAULT NULL, + `b` char(20) DEFAULT NULL +) ENGINE=MEMORY DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +CREATE UNIQUE INDEX bi USING HASH ON t1 (b); +INSERT INTO t1 VALUES (0,0); +SET sql_mode='pad_char_to_full_length'; +DELETE FROM t1; +DROP TABLE t1; +SET default_storage_engine=DEFAULT; +# +# End of 10.4 tests +# diff --git a/mysql-test/main/sql_mode_pad_char_to_full_length.test b/mysql-test/main/sql_mode_pad_char_to_full_length.test new file mode 100644 index 00000000000..4d492bc1b70 --- /dev/null +++ b/mysql-test/main/sql_mode_pad_char_to_full_length.test @@ -0,0 +1,19 @@ +--echo # +--echo # Start of 10.4 tests +--echo # + +--echo # +--echo # MDEV-28190 sql_mode makes MDEV-371 virtual column expressions nondeterministic +--echo # + +SET default_storage_engine=MyISAM; +--source include/sql_mode_pad_char_to_full_length.inc + +SET default_storage_engine=MEMORY; +--source include/sql_mode_pad_char_to_full_length.inc + +SET default_storage_engine=DEFAULT; + +--echo # +--echo # End of 10.4 tests +--echo # diff --git a/mysql-test/suite/innodb/r/sql_mode_pad_char_to_full_length.result b/mysql-test/suite/innodb/r/sql_mode_pad_char_to_full_length.result new file mode 100644 index 00000000000..09c1cf57497 --- /dev/null +++ b/mysql-test/suite/innodb/r/sql_mode_pad_char_to_full_length.result @@ -0,0 +1,51 @@ +SET default_storage_engine=InnoDB; +# +# Start of 10.4 tests +# +# +# MDEV-28190 sql_mode makes MDEV-371 virtual column expressions nondeterministic +# +# +# MDEV-28190 sql_mode makes MDEV-371 virtual column expressions nondeterministic +# +CREATE TABLE t1 (a INT,b CHAR(20)); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(20) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +CREATE UNIQUE INDEX bi USING HASH ON t1 (b); +INSERT INTO t1 VALUES (0,0); +SET sql_mode='pad_char_to_full_length'; +DELETE FROM t1; +DROP TABLE t1; +SET sql_mode=''; +CREATE TABLE t1 (a INT,b CHAR(20)); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + `b` char(20) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +CREATE UNIQUE INDEX bi USING HASH ON t1 (b); +SET sql_mode='pad_char_to_full_length'; +INSERT INTO t1 VALUES (0,0); +DELETE FROM t1; +DROP TABLE t1; +SET sql_mode=''; +CREATE OR REPLACE TABLE t1 (a CHAR(20),b CHAR(20)); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` char(20) DEFAULT NULL, + `b` char(20) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +CREATE UNIQUE INDEX bi USING HASH ON t1 (b); +INSERT INTO t1 VALUES (0,0); +SET sql_mode='pad_char_to_full_length'; +DELETE FROM t1; +DROP TABLE t1; +# +# End of 10.4 tests +# diff --git a/mysql-test/suite/innodb/t/sql_mode_pad_char_to_full_length.test b/mysql-test/suite/innodb/t/sql_mode_pad_char_to_full_length.test new file mode 100644 index 00000000000..ba286c744d9 --- /dev/null +++ b/mysql-test/suite/innodb/t/sql_mode_pad_char_to_full_length.test @@ -0,0 +1,18 @@ +--source include/have_innodb.inc + +SET default_storage_engine=InnoDB; + +--echo # +--echo # Start of 10.4 tests +--echo # + +--echo # +--echo # MDEV-28190 sql_mode makes MDEV-371 virtual column expressions nondeterministic +--echo # + +--source include/sql_mode_pad_char_to_full_length.inc + + +--echo # +--echo # End of 10.4 tests +--echo # -- cgit v1.2.1 From 4daea2f8b69417881b4f123a252954f22b499df1 Mon Sep 17 00:00:00 2001 From: lilinjie <1136268146@qq.com> Date: Fri, 31 Mar 2023 14:11:04 +0800 Subject: fix typo Signed-off-by: lilinjie <1136268146@qq.com> --- sql/log.cc | 2 +- sql/log_event.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/log.cc b/sql/log.cc index d2cbb49e280..8c75ae847b3 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -408,7 +408,7 @@ private: Rows_log_event *m_pending; /* - Bit flags for what has been writting to cache. Used to + Bit flags for what has been writing to cache. Used to discard logs without any data changes. see enum_logged_status; */ diff --git a/sql/log_event.h b/sql/log_event.h index b12ee07b0e2..5cd303e288c 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -726,7 +726,7 @@ enum Log_event_type /* - Bit flags for what has been writting to cache. Used to + Bit flags for what has been writing to cache. Used to discard logs with table map events but not row events and nothing else important. This is stored by cache. */ -- cgit v1.2.1 From f83b7ae13d9619af29d74a27dd253b67aa0ee4fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Thu, 6 Apr 2023 07:50:23 +0300 Subject: MDEV-26175 : Assertion `! thd->in_sub_stmt' failed in bool trans_rollback_stmt(THD*) If we are inside stored function or trigger we should not commit or rollback current statement transaction. Signed-off-by: Julius Goryavsky --- mysql-test/suite/galera/r/mdev-26175.result | 24 ++++++++++++++++++++++ mysql-test/suite/galera/t/galera_sequences.test | 1 + mysql-test/suite/galera/t/mdev-26175.test | 27 +++++++++++++++++++++++++ sql/handler.cc | 8 +++++++- 4 files changed, 59 insertions(+), 1 deletion(-) create mode 100644 mysql-test/suite/galera/r/mdev-26175.result create mode 100644 mysql-test/suite/galera/t/mdev-26175.test diff --git a/mysql-test/suite/galera/r/mdev-26175.result b/mysql-test/suite/galera/r/mdev-26175.result new file mode 100644 index 00000000000..f84244fe916 --- /dev/null +++ b/mysql-test/suite/galera/r/mdev-26175.result @@ -0,0 +1,24 @@ +connection node_2; +connection node_1; +connection node_1; +SET sql_mode="no_zero_date"; +SET GLOBAL wsrep_max_ws_rows=1; +CREATE TABLE t2 (a INT); +CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY) ENGINE=InnoDB; +CREATE TRIGGER tgr BEFORE INSERT ON t1 FOR EACH ROW INSERT INTO t2 VALUES (0); +INSERT INTO t1 VALUES (0),(1); +ERROR HY000: wsrep_max_ws_rows exceeded +SELECT * FROM t1; +a +SELECT * FROM t2; +a +connection node_2; +SELECT * FROM t1; +a +SELECT * FROM t2; +a +connection node_1; +SET sql_mode=DEFAULT; +SET GLOBAL wsrep_max_ws_rows=DEFAULT; +DROP TRIGGER tgr; +DROP TABLE t1, t2; diff --git a/mysql-test/suite/galera/t/galera_sequences.test b/mysql-test/suite/galera/t/galera_sequences.test index faa3b46d2a7..613823d83e9 100644 --- a/mysql-test/suite/galera/t/galera_sequences.test +++ b/mysql-test/suite/galera/t/galera_sequences.test @@ -1,4 +1,5 @@ --source include/galera_cluster.inc +--source include/have_innodb.inc # # MDEV-19353 : Alter Sequence do not replicate to another nodes with in Galera Cluster diff --git a/mysql-test/suite/galera/t/mdev-26175.test b/mysql-test/suite/galera/t/mdev-26175.test new file mode 100644 index 00000000000..1a3f1153e03 --- /dev/null +++ b/mysql-test/suite/galera/t/mdev-26175.test @@ -0,0 +1,27 @@ +--source include/galera_cluster.inc +--source include/have_innodb.inc + +# +# MDEV-26175 : Assertion `! thd->in_sub_stmt' failed in bool trans_rollback_stmt(THD*) +# +--connection node_1 +SET sql_mode="no_zero_date"; +SET GLOBAL wsrep_max_ws_rows=1; +CREATE TABLE t2 (a INT); +CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY) ENGINE=InnoDB; +CREATE TRIGGER tgr BEFORE INSERT ON t1 FOR EACH ROW INSERT INTO t2 VALUES (0); + +--error ER_ERROR_DURING_COMMIT +INSERT INTO t1 VALUES (0),(1); +SELECT * FROM t1; +SELECT * FROM t2; + +--connection node_2 +SELECT * FROM t1; +SELECT * FROM t2; + +--connection node_1 +SET sql_mode=DEFAULT; +SET GLOBAL wsrep_max_ws_rows=DEFAULT; +DROP TRIGGER tgr; +DROP TABLE t1, t2; diff --git a/sql/handler.cc b/sql/handler.cc index 8ed8ee64880..7eaaaf63f00 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -6618,7 +6618,13 @@ static int wsrep_after_row(THD *thd) wsrep_thd_is_local(thd) && thd->wsrep_affected_rows > wsrep_max_ws_rows) { - trans_rollback_stmt(thd) || trans_rollback(thd); + /* + If we are inside stored function or trigger we should not commit or + rollback current statement transaction. See comment in ha_commit_trans() + call for more information. + */ + if (!thd->in_sub_stmt) + trans_rollback_stmt(thd) || trans_rollback(thd); my_message(ER_ERROR_DURING_COMMIT, "wsrep_max_ws_rows exceeded", MYF(0)); DBUG_RETURN(ER_ERROR_DURING_COMMIT); } -- cgit v1.2.1 From 7bcfa00a6a19ae74afe985213e09fc1f0c0540db Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Wed, 12 Apr 2023 11:40:46 +0400 Subject: MDEV-31039 mariadb-backup: remove global variables ds_data and ds_meta This is a non-functional change. simplifying the code logic: - removing global variables ds_data and ds_meta - passing these variables as parameters to functions instead - adding helper classes: Datasink_free_list and Backup_datasinks - moving some function accepting a ds_ctxt parameter as methods to ds_ctxt. --- extra/mariabackup/backup_copy.cc | 169 ++++++++++++++--------------- extra/mariabackup/backup_copy.h | 19 +--- extra/mariabackup/backup_mysql.cc | 32 +++--- extra/mariabackup/backup_mysql.h | 15 +-- extra/mariabackup/datasink.h | 29 +++++ extra/mariabackup/write_filt.cc | 14 ++- extra/mariabackup/write_filt.h | 3 +- extra/mariabackup/xtrabackup.cc | 219 ++++++++++++++++++++++---------------- extra/mariabackup/xtrabackup.h | 10 +- 9 files changed, 289 insertions(+), 221 deletions(-) diff --git a/extra/mariabackup/backup_copy.cc b/extra/mariabackup/backup_copy.cc index 608be7125bd..ffaf6dc98e4 100644 --- a/extra/mariabackup/backup_copy.cc +++ b/extra/mariabackup/backup_copy.cc @@ -73,9 +73,8 @@ bool binlog_locked; static void rocksdb_create_checkpoint(); static bool has_rocksdb_plugin(); -static void copy_or_move_dir(const char *from, const char *to, bool copy, bool allow_hardlinks); -static void rocksdb_backup_checkpoint(); -static void rocksdb_copy_back(); +static void rocksdb_backup_checkpoint(ds_ctxt *ds_data); +static void rocksdb_copy_back(ds_ctxt *ds_data); static bool is_abs_path(const char *path) { @@ -131,7 +130,7 @@ struct datadir_thread_ctxt_t { bool ret; }; -static bool backup_files_from_datadir(const char *dir_path); +static bool backup_files_from_datadir(ds_ctxt *ds_data, const char *dir_path); /************************************************************************ Retirn true if character if file separator */ @@ -804,7 +803,7 @@ if passes the rules for partial backup. @return true if file backed up or skipped successfully. */ static bool -datafile_copy_backup(const char *filepath, uint thread_n) +datafile_copy_backup(ds_ctxt *ds_data, const char *filepath, uint thread_n) { const char *ext_list[] = {"frm", "isl", "MYD", "MYI", "MAD", "MAI", "MRG", "TRG", "TRN", "ARM", "ARZ", "CSM", "CSV", "opt", "par", @@ -825,7 +824,7 @@ datafile_copy_backup(const char *filepath, uint thread_n) } if (filename_matches(filepath, ext_list)) { - return copy_file(ds_data, filepath, filepath, thread_n); + return ds_data->copy_file(filepath, filepath, thread_n); } return(true); @@ -866,7 +865,8 @@ datafile_rsync_backup(const char *filepath, bool save_to_list, FILE *f) return(true); } -bool backup_file_print_buf(const char *filename, const char *buf, int buf_len) +bool ds_ctxt_t::backup_file_print_buf(const char *filename, + const char *buf, int buf_len) { ds_file_t *dstfile = NULL; MY_STAT stat; /* unused for now */ @@ -877,7 +877,7 @@ bool backup_file_print_buf(const char *filename, const char *buf, int buf_len) stat.st_size = buf_len; stat.st_mtime = my_time(0); - dstfile = ds_open(ds_data, filename, &stat); + dstfile = ds_open(this, filename, &stat); if (dstfile == NULL) { msg("error: Can't open the destination stream for %s", filename); @@ -916,9 +916,9 @@ error_close: return true; }; -static bool -backup_file_vprintf(const char *filename, const char *fmt, va_list ap) +ds_ctxt_t::backup_file_vprintf(const char *filename, + const char *fmt, va_list ap) { char *buf = 0; int buf_len; @@ -929,7 +929,7 @@ backup_file_vprintf(const char *filename, const char *fmt, va_list ap) } bool -backup_file_printf(const char *filename, const char *fmt, ...) +ds_ctxt_t::backup_file_printf(const char *filename, const char *fmt, ...) { bool result; va_list ap; @@ -1055,16 +1055,15 @@ static int fix_win_file_permissions(const char *file) Copy file for backup/restore. @return true in case of success. */ bool -copy_file(ds_ctxt_t *datasink, - const char *src_file_path, - const char *dst_file_path, - uint thread_n) +ds_ctxt_t::copy_file(const char *src_file_path, + const char *dst_file_path, + uint thread_n) { char dst_name[FN_REFLEN]; ds_file_t *dstfile = NULL; datafile_cur_t cursor; xb_fil_cur_result_t res; - DBUG_ASSERT(datasink->datasink->remove); + DBUG_ASSERT(datasink->remove); const char *dst_path = (xtrabackup_copy_back || xtrabackup_move_back)? dst_file_path : trim_dotslash(dst_file_path); @@ -1075,7 +1074,7 @@ copy_file(ds_ctxt_t *datasink, strncpy(dst_name, cursor.rel_path, sizeof(dst_name)); - dstfile = ds_open(datasink, dst_path, &cursor.statinfo); + dstfile = ds_open(this, dst_path, &cursor.statinfo); if (dstfile == NULL) { msg(thread_n,"error: " "cannot open the destination stream for %s", dst_name); @@ -1112,7 +1111,7 @@ copy_file(ds_ctxt_t *datasink, error: datafile_close(&cursor); if (dstfile != NULL) { - datasink->datasink->remove(dstfile->path); + datasink->remove(dstfile->path); ds_close(dstfile); } @@ -1126,12 +1125,10 @@ error_close: Try to move file by renaming it. If source and destination are on different devices fall back to copy and unlink. @return true in case of success. */ -static bool -move_file(ds_ctxt_t *datasink, - const char *src_file_path, - const char *dst_file_path, - const char *dst_dir, uint thread_n) +ds_ctxt_t::move_file(const char *src_file_path, + const char *dst_file_path, + const char *dst_dir, uint thread_n) { char errbuf[MYSYS_STRERROR_SIZE]; char dst_file_path_abs[FN_REFLEN]; @@ -1158,7 +1155,7 @@ move_file(ds_ctxt_t *datasink, if (my_rename(src_file_path, dst_file_path_abs, MYF(0)) != 0) { if (my_errno == EXDEV) { /* Fallback to copy/unlink */ - if(!copy_file(datasink, src_file_path, + if(!copy_file(src_file_path, dst_file_path, thread_n)) return false; msg(thread_n,"Removing %s", src_file_path); @@ -1242,13 +1239,13 @@ Copy or move file depending on current mode. @return true in case of success. */ static bool -copy_or_move_file(const char *src_file_path, +copy_or_move_file(ds_ctxt *datasink0, const char *src_file_path, const char *dst_file_path, const char *dst_dir, uint thread_n, bool copy = xtrabackup_copy_back) { - ds_ctxt_t *datasink = ds_data; /* copy to datadir by default */ + ds_ctxt_t *datasink = datasink0; /* copy to datadir by default */ char filedir[FN_REFLEN]; size_t filedir_len; bool ret; @@ -1296,13 +1293,13 @@ copy_or_move_file(const char *src_file_path, } ret = (copy ? - copy_file(datasink, src_file_path, dst_file_path, thread_n) : - move_file(datasink, src_file_path, dst_file_path, + datasink->copy_file(src_file_path, dst_file_path, thread_n) : + datasink->move_file(src_file_path, dst_file_path, dst_dir, thread_n)); cleanup: - if (datasink != ds_data) { + if (datasink != datasink0) { ds_destroy(datasink); } @@ -1314,7 +1311,7 @@ cleanup: static bool -backup_files(const char *from, bool prep_mode) +backup_files(ds_ctxt *ds_data, const char *from, bool prep_mode) { char rsync_tmpfile_name[FN_REFLEN]; FILE *rsync_tmpfile = NULL; @@ -1352,7 +1349,7 @@ backup_files(const char *from, bool prep_mode) ret = datafile_rsync_backup(node.filepath, !prep_mode, rsync_tmpfile); } else { - ret = datafile_copy_backup(node.filepath, 1); + ret = datafile_copy_backup(ds_data, node.filepath, 1); } if (!ret) { msg("Failed to copy file %s", node.filepath); @@ -1363,7 +1360,7 @@ backup_files(const char *from, bool prep_mode) char path[FN_REFLEN]; snprintf(path, sizeof(path), "%s/db.opt", node.filepath); - if (!(ret = backup_file_printf( + if (!(ret = ds_data->backup_file_printf( trim_dotslash(path), "%s", ""))) { msg("Failed to create file %s", path); goto out; @@ -1452,7 +1449,6 @@ out: return(ret); } -void backup_fix_ddl(CorruptedPages &); lsn_t get_current_lsn(MYSQL *connection) { @@ -1477,7 +1473,8 @@ lsn_t get_current_lsn(MYSQL *connection) lsn_t server_lsn_after_lock; extern void backup_wait_for_lsn(lsn_t lsn); /** Start --backup */ -bool backup_start(CorruptedPages &corrupted_pages) +bool backup_start(ds_ctxt *ds_data, ds_ctxt *ds_meta, + CorruptedPages &corrupted_pages) { if (!opt_no_lock) { if (opt_safe_slave_backup) { @@ -1486,7 +1483,7 @@ bool backup_start(CorruptedPages &corrupted_pages) } } - if (!backup_files(fil_path_to_mysql_datadir, true)) { + if (!backup_files(ds_data, fil_path_to_mysql_datadir, true)) { return(false); } @@ -1498,11 +1495,11 @@ bool backup_start(CorruptedPages &corrupted_pages) server_lsn_after_lock = get_current_lsn(mysql_connection); } - if (!backup_files(fil_path_to_mysql_datadir, false)) { + if (!backup_files(ds_data, fil_path_to_mysql_datadir, false)) { return(false); } - if (!backup_files_from_datadir(fil_path_to_mysql_datadir)) { + if (!backup_files_from_datadir(ds_data, fil_path_to_mysql_datadir)) { return false; } @@ -1512,7 +1509,7 @@ bool backup_start(CorruptedPages &corrupted_pages) msg("Waiting for log copy thread to read lsn %llu", (ulonglong)server_lsn_after_lock); backup_wait_for_lsn(server_lsn_after_lock); - backup_fix_ddl(corrupted_pages); + corrupted_pages.backup_fix_ddl(ds_data, ds_meta); // There is no need to stop slave thread before coping non-Innodb data when // --no-lock option is used because --no-lock option requires that no DDL or @@ -1528,7 +1525,7 @@ bool backup_start(CorruptedPages &corrupted_pages) if (opt_slave_info) { lock_binlog_maybe(mysql_connection); - if (!write_slave_info(mysql_connection)) { + if (!write_slave_info(ds_data, mysql_connection)) { return(false); } } @@ -1540,7 +1537,7 @@ bool backup_start(CorruptedPages &corrupted_pages) avoid that is to have a single process, i.e. merge innobackupex and xtrabackup. */ if (opt_galera_info) { - if (!write_galera_info(mysql_connection)) { + if (!write_galera_info(ds_data, mysql_connection)) { return(false); } } @@ -1548,7 +1545,7 @@ bool backup_start(CorruptedPages &corrupted_pages) if (opt_binlog_info == BINLOG_INFO_ON) { lock_binlog_maybe(mysql_connection); - write_binlog_info(mysql_connection); + write_binlog_info(ds_data, mysql_connection); } if (have_flush_engine_logs && !opt_no_lock) { @@ -1585,20 +1582,20 @@ void backup_release() static const char *default_buffer_pool_file = "ib_buffer_pool"; /** Finish after backup_start() and backup_release() */ -bool backup_finish() +bool backup_finish(ds_ctxt *ds_data) { /* Copy buffer pool dump or LRU dump */ if (!opt_rsync && opt_galera_info) { if (buffer_pool_filename && file_exists(buffer_pool_filename)) { - copy_file(ds_data, buffer_pool_filename, default_buffer_pool_file, 0); + ds_data->copy_file(buffer_pool_filename, default_buffer_pool_file, 0); } if (file_exists("ib_lru_dump")) { - copy_file(ds_data, "ib_lru_dump", "ib_lru_dump", 0); + ds_data->copy_file("ib_lru_dump", "ib_lru_dump", 0); } } if (has_rocksdb_plugin()) { - rocksdb_backup_checkpoint(); + rocksdb_backup_checkpoint(ds_data); } msg("Backup created in directory '%s'", xtrabackup_target_dir); @@ -1610,11 +1607,11 @@ bool backup_finish() mysql_slave_position); } - if (!write_backup_config_file()) { + if (!write_backup_config_file(ds_data)) { return(false); } - if (!write_xtrabackup_info(mysql_connection, XTRABACKUP_INFO, + if (!write_xtrabackup_info(ds_data, mysql_connection, XTRABACKUP_INFO, opt_history != 0, true)) { return(false); } @@ -1681,6 +1678,7 @@ ibx_copy_incremental_over_full() bool ret = true; char path[FN_REFLEN]; int i; + ds_ctxt *ds_data= NULL; DBUG_ASSERT(!opt_galera_info); datadir_node_init(&node); @@ -1708,15 +1706,15 @@ ibx_copy_incremental_over_full() unlink(node.filepath_rel); } - if (!(ret = copy_file(ds_data, node.filepath, - node.filepath_rel, 1))) { + if (!(ret = ds_data->copy_file(node.filepath, + node.filepath_rel, 1))) { msg("Failed to copy file %s", node.filepath); goto cleanup; } } - if (!(ret = backup_files_from_datadir(xtrabackup_incremental_dir))) + if (!(ret = backup_files_from_datadir(ds_data, xtrabackup_incremental_dir))) goto cleanup; /* copy supplementary files */ @@ -1731,7 +1729,7 @@ ibx_copy_incremental_over_full() if (file_exists(sup_files[i])) { unlink(sup_files[i]); } - copy_file(ds_data, path, sup_files[i], 0); + ds_data->copy_file(path, sup_files[i], 0); } } @@ -1745,7 +1743,7 @@ ibx_copy_incremental_over_full() if (my_mkdir(ROCKSDB_BACKUP_DIR, 0777, MYF(0))) { die("my_mkdir failed for " ROCKSDB_BACKUP_DIR); } - copy_or_move_dir(path, ROCKSDB_BACKUP_DIR, true, true); + ds_data->copy_or_move_dir(path, ROCKSDB_BACKUP_DIR, true, true); } ibx_incremental_drop_databases(xtrabackup_target_dir, xtrabackup_incremental_dir); @@ -1894,7 +1892,7 @@ copy_back() dst_dir = dst_dir_buf.make(srv_undo_dir); - ds_data = ds_create(dst_dir, DS_TYPE_LOCAL); + ds_ctxt *ds_tmp = ds_create(dst_dir, DS_TYPE_LOCAL); for (uint i = 1; i <= TRX_SYS_MAX_UNDO_SPACES; i++) { char filename[20]; @@ -1902,14 +1900,14 @@ copy_back() if (!file_exists(filename)) { break; } - if (!(ret = copy_or_move_file(filename, filename, + if (!(ret = copy_or_move_file(ds_tmp, filename, filename, dst_dir, 1))) { goto cleanup; } } - ds_destroy(ds_data); - ds_data = NULL; + ds_destroy(ds_tmp); + ds_tmp = NULL; /* copy redo logs */ @@ -1918,7 +1916,7 @@ copy_back() /* --backup generates a single ib_logfile0, which we must copy if it exists. */ - ds_data = ds_create(dst_dir, DS_TYPE_LOCAL); + ds_tmp = ds_create(dst_dir, DS_TYPE_LOCAL); MY_STAT stat_arg; if (!my_stat("ib_logfile0", &stat_arg, MYF(0)) || !stat_arg.st_size) { /* After completed --prepare, redo log files are redundant. @@ -1932,17 +1930,17 @@ copy_back() dst_dir, i); unlink(filename); } - } else if (!(ret = copy_or_move_file("ib_logfile0", "ib_logfile0", + } else if (!(ret = copy_or_move_file(ds_tmp, "ib_logfile0", "ib_logfile0", dst_dir, 1))) { goto cleanup; } - ds_destroy(ds_data); + ds_destroy(ds_tmp); /* copy innodb system tablespace(s) */ dst_dir = dst_dir_buf.make(innobase_data_home_dir); - ds_data = ds_create(dst_dir, DS_TYPE_LOCAL); + ds_tmp = ds_create(dst_dir, DS_TYPE_LOCAL); for (Tablespace::const_iterator iter(srv_sys_space.begin()), end(srv_sys_space.end()); @@ -1950,16 +1948,16 @@ copy_back() ++iter) { const char *filename = base_name(iter->name()); - if (!(ret = copy_or_move_file(filename, iter->name(), + if (!(ret = copy_or_move_file(ds_tmp, filename, iter->name(), dst_dir, 1))) { goto cleanup; } } - ds_destroy(ds_data); + ds_destroy(ds_tmp); /* copy the rest of tablespaces */ - ds_data = ds_create(mysql_data_home, DS_TYPE_LOCAL); + ds_tmp = ds_create(mysql_data_home, DS_TYPE_LOCAL); it = datadir_iter_new(".", false); @@ -2046,7 +2044,7 @@ copy_back() continue; } - if (!(ret = copy_or_move_file(node.filepath, node.filepath_rel, + if (!(ret = copy_or_move_file(ds_tmp, node.filepath, node.filepath_rel, mysql_data_home, 1))) { goto cleanup; } @@ -2056,12 +2054,12 @@ copy_back() if (file_exists(default_buffer_pool_file) && innobase_buffer_pool_filename) { - copy_or_move_file(default_buffer_pool_file, + copy_or_move_file(ds_tmp, default_buffer_pool_file, innobase_buffer_pool_filename, mysql_data_home, 0); } - rocksdb_copy_back(); + rocksdb_copy_back(ds_tmp); cleanup: if (it != NULL) { @@ -2070,11 +2068,11 @@ cleanup: datadir_node_free(&node); - if (ds_data != NULL) { - ds_destroy(ds_data); + if (ds_tmp != NULL) { + ds_destroy(ds_tmp); } - ds_data = NULL; + ds_tmp = NULL; sync_check_close(); return(ret); @@ -2182,7 +2180,7 @@ decrypt_decompress() } /* copy the rest of tablespaces */ - ds_data = ds_create(".", DS_TYPE_LOCAL); + ds_ctxt *ds_tmp = ds_create(".", DS_TYPE_LOCAL); it = datadir_iter_new(".", false); @@ -2195,11 +2193,11 @@ decrypt_decompress() datadir_iter_free(it); } - if (ds_data != NULL) { - ds_destroy(ds_data); + if (ds_tmp != NULL) { + ds_destroy(ds_tmp); } - ds_data = NULL; + ds_tmp = NULL; sync_check_close(); @@ -2211,7 +2209,7 @@ decrypt_decompress() Do not copy the Innodb files (ibdata1, redo log files), as this is done in a separate step. */ -static bool backup_files_from_datadir(const char *dir_path) +static bool backup_files_from_datadir(ds_ctxt *ds_data, const char *dir_path) { os_file_dir_t dir = os_file_opendir(dir_path); if (dir == IF_WIN(INVALID_HANDLE_VALUE, nullptr)) return false; @@ -2241,7 +2239,7 @@ static bool backup_files_from_datadir(const char *dir_path) std::string full_path(dir_path); full_path.append(1, OS_PATH_SEPARATOR).append(info.name); - if (!(ret = copy_file(ds_data, full_path.c_str() , info.name, 1))) + if (!(ret = ds_data->copy_file(full_path.c_str() , info.name, 1))) break; } os_file_closedir(dir); @@ -2291,13 +2289,14 @@ static char *trim_trailing_dir_sep(char *path) Create a file hardlink. @return true on success, false on error. */ -static bool make_hardlink(const char *from_path, const char *to_path) +bool +ds_ctxt_t::make_hardlink(const char *from_path, const char *to_path) { DBUG_EXECUTE_IF("no_hardlinks", return false;); char to_path_full[FN_REFLEN]; if (!is_abs_path(to_path)) { - fn_format(to_path_full, to_path, ds_data->root, "", MYF(MY_RELATIVE_PATH)); + fn_format(to_path_full, to_path, root, "", MYF(MY_RELATIVE_PATH)); } else { @@ -2318,7 +2317,9 @@ static bool make_hardlink(const char *from_path, const char *to_path) Has optimization that allows to use hardlinks when possible (source and destination are directories on the same device) */ -static void copy_or_move_dir(const char *from, const char *to, bool do_copy, bool allow_hardlinks) +void +ds_ctxt_t::copy_or_move_dir(const char *from, const char *to, + bool do_copy, bool allow_hardlinks) { datadir_node_t node; datadir_node_init(&node); @@ -2346,8 +2347,8 @@ static void copy_or_move_dir(const char *from, const char *to, bool do_copy, boo if (!rc) { rc = (do_copy ? - copy_file(ds_data, from_path, to_path, 1) : - move_file(ds_data, from_path, node.filepath_rel, + copy_file(from_path, to_path, 1) : + move_file(from_path, node.filepath_rel, to, 1)); } if (!rc) @@ -2444,7 +2445,7 @@ static void rocksdb_create_checkpoint() remove temp.checkpoint directory (in server's datadir) and release user level lock acquired inside rocksdb_create_checkpoint(). */ -static void rocksdb_backup_checkpoint() +static void rocksdb_backup_checkpoint(ds_ctxt *ds_data) { msg("Backing up rocksdb files."); char rocksdb_backup_dir[FN_REFLEN]; @@ -2456,7 +2457,7 @@ static void rocksdb_backup_checkpoint() die("Can't create rocksdb backup directory %s", rocksdb_backup_dir); } } - copy_or_move_dir(rocksdb_checkpoint_dir, ROCKSDB_BACKUP_DIR, true, backup_to_directory); + ds_data->copy_or_move_dir(rocksdb_checkpoint_dir, ROCKSDB_BACKUP_DIR, true, backup_to_directory); rocksdb_remove_checkpoint_directory(); rocksdb_unlock_checkpoint(); } @@ -2464,7 +2465,7 @@ static void rocksdb_backup_checkpoint() /* Copies #rocksdb directory to the $rockdb_data_dir, on copy-back */ -static void rocksdb_copy_back() { +static void rocksdb_copy_back(ds_ctxt *ds_data) { if (access(ROCKSDB_BACKUP_DIR, 0)) return; char rocksdb_home_dir[FN_REFLEN]; @@ -2476,5 +2477,5 @@ static void rocksdb_copy_back() { xb_rocksdb_datadir?trim_dotslash(xb_rocksdb_datadir): ROCKSDB_BACKUP_DIR); } mkdirp(rocksdb_home_dir, 0777, MYF(0)); - copy_or_move_dir(ROCKSDB_BACKUP_DIR, rocksdb_home_dir, xtrabackup_copy_back, xtrabackup_copy_back); + ds_data->copy_or_move_dir(ROCKSDB_BACKUP_DIR, rocksdb_home_dir, xtrabackup_copy_back, xtrabackup_copy_back); } diff --git a/extra/mariabackup/backup_copy.h b/extra/mariabackup/backup_copy.h index 62b2b1bc232..b4a323f2e89 100644 --- a/extra/mariabackup/backup_copy.h +++ b/extra/mariabackup/backup_copy.h @@ -14,30 +14,18 @@ extern bool binlog_locked; -bool -backup_file_printf(const char *filename, const char *fmt, ...) - ATTRIBUTE_FORMAT(printf, 2, 0); - /************************************************************************ Return true if first and second arguments are the same path. */ bool equal_paths(const char *first, const char *second); -/************************************************************************ -Copy file for backup/restore. -@return true in case of success. */ -bool -copy_file(ds_ctxt_t *datasink, - const char *src_file_path, - const char *dst_file_path, - uint thread_n); - /** Start --backup */ -bool backup_start(CorruptedPages &corrupted_pages); +bool backup_start(ds_ctxt *ds_data, ds_ctxt *ds_meta, + CorruptedPages &corrupted_pages); /** Release resources after backup_start() */ void backup_release(); /** Finish after backup_start() and backup_release() */ -bool backup_finish(); +bool backup_finish(ds_ctxt *ds_data); bool apply_log_finish(); bool @@ -51,6 +39,5 @@ directory_exists(const char *dir, bool create); lsn_t get_current_lsn(MYSQL *connection); -bool backup_file_print_buf(const char *filename, const char *buf, int buf_len); #endif diff --git a/extra/mariabackup/backup_mysql.cc b/extra/mariabackup/backup_mysql.cc index b895f8c4561..6003bfb36c4 100644 --- a/extra/mariabackup/backup_mysql.cc +++ b/extra/mariabackup/backup_mysql.cc @@ -1383,7 +1383,7 @@ variable. @returns true on success */ bool -write_slave_info(MYSQL *connection) +write_slave_info(ds_ctxt *datasink, MYSQL *connection) { String sql, comment; bool show_all_slaves_status= false; @@ -1413,7 +1413,8 @@ write_slave_info(MYSQL *connection) } mysql_slave_position= strdup(comment.c_ptr()); - return backup_file_print_buf(XTRABACKUP_SLAVE_INFO, sql.ptr(), sql.length()); + return datasink->backup_file_print_buf(XTRABACKUP_SLAVE_INFO, + sql.ptr(), sql.length()); } @@ -1421,7 +1422,7 @@ write_slave_info(MYSQL *connection) Retrieves MySQL Galera and saves it in a file. It also prints it to stdout. */ bool -write_galera_info(MYSQL *connection) +write_galera_info(ds_ctxt *datasink, MYSQL *connection) { char *state_uuid = NULL, *state_uuid55 = NULL; char *last_committed = NULL, *last_committed55 = NULL; @@ -1453,12 +1454,12 @@ write_galera_info(MYSQL *connection) goto cleanup; } - result = backup_file_printf(XTRABACKUP_GALERA_INFO, + result = datasink->backup_file_printf(XTRABACKUP_GALERA_INFO, "%s:%s\n", state_uuid ? state_uuid : state_uuid55, last_committed ? last_committed : last_committed55); if (result) { - write_current_binlog_file(connection); + write_current_binlog_file(datasink, connection); } cleanup: @@ -1472,7 +1473,7 @@ cleanup: Flush and copy the current binary log file into the backup, if GTID is enabled */ bool -write_current_binlog_file(MYSQL *connection) +write_current_binlog_file(ds_ctxt *datasink, MYSQL *connection) { char *executed_gtid_set = NULL; char *gtid_binlog_state = NULL; @@ -1542,7 +1543,7 @@ write_current_binlog_file(MYSQL *connection) snprintf(filepath, sizeof(filepath), "%s%c%s", log_bin_dir, FN_LIBCHAR, log_bin_file); - result = copy_file(ds_data, filepath, log_bin_file, 0); + result = datasink->copy_file(filepath, log_bin_file, 0); } cleanup: @@ -1558,7 +1559,7 @@ cleanup: Retrieves MySQL binlog position and saves it in a file. It also prints it to stdout. */ bool -write_binlog_info(MYSQL *connection) +write_binlog_info(ds_ctxt *datasink, MYSQL *connection) { char *filename = NULL; char *position = NULL; @@ -1603,14 +1604,14 @@ write_binlog_info(MYSQL *connection) "filename '%s', position '%s', " "GTID of the last change '%s'", filename, position, gtid) != -1); - result = backup_file_printf(XTRABACKUP_BINLOG_INFO, + result = datasink->backup_file_printf(XTRABACKUP_BINLOG_INFO, "%s\t%s\t%s\n", filename, position, gtid); } else { ut_a(asprintf(&mysql_binlog_position, "filename '%s', position '%s'", filename, position) != -1); - result = backup_file_printf(XTRABACKUP_BINLOG_INFO, + result = datasink->backup_file_printf(XTRABACKUP_BINLOG_INFO, "%s\t%s\n", filename, position); } @@ -1650,8 +1651,9 @@ PERCONA_SCHEMA.xtrabackup_history and writes a new history record to the table containing all the history info particular to the just completed backup. */ bool -write_xtrabackup_info(MYSQL *connection, const char * filename, bool history, - bool stream) +write_xtrabackup_info(ds_ctxt *datasink, + MYSQL *connection, const char * filename, bool history, + bool stream) { bool result = true; @@ -1727,7 +1729,7 @@ write_xtrabackup_info(MYSQL *connection, const char * filename, bool history, } if (stream) { - backup_file_printf(filename, "%s", buf); + datasink->backup_file_printf(filename, "%s", buf); } else { fp = fopen(filename, "w"); if (!fp) { @@ -1848,9 +1850,9 @@ static std::string make_local_paths(const char *data_file_path) return buf.str(); } -bool write_backup_config_file() +bool write_backup_config_file(ds_ctxt *datasink) { - int rc= backup_file_printf("backup-my.cnf", + int rc= datasink->backup_file_printf("backup-my.cnf", "# This MySQL options file was generated by innobackupex.\n\n" "# The MySQL server\n" "[mysqld]\n" diff --git a/extra/mariabackup/backup_mysql.h b/extra/mariabackup/backup_mysql.h index b61fa2362c6..d80f3bb7bc1 100644 --- a/extra/mariabackup/backup_mysql.h +++ b/extra/mariabackup/backup_mysql.h @@ -62,17 +62,18 @@ void unlock_all(MYSQL *connection); bool -write_current_binlog_file(MYSQL *connection); +write_current_binlog_file(ds_ctxt *datasink, MYSQL *connection); bool -write_binlog_info(MYSQL *connection); +write_binlog_info(ds_ctxt *datasink, MYSQL *connection); bool -write_xtrabackup_info(MYSQL *connection, const char * filename, bool history, - bool stream); +write_xtrabackup_info(ds_ctxt *datasink, + MYSQL *connection, const char * filename, bool history, + bool stream); bool -write_backup_config_file(); +write_backup_config_file(ds_ctxt *datasink); bool lock_binlog_maybe(MYSQL *connection); @@ -84,10 +85,10 @@ bool wait_for_safe_slave(MYSQL *connection); bool -write_galera_info(MYSQL *connection); +write_galera_info(ds_ctxt *datasink, MYSQL *connection); bool -write_slave_info(MYSQL *connection); +write_slave_info(ds_ctxt *datasink, MYSQL *connection); #endif diff --git a/extra/mariabackup/datasink.h b/extra/mariabackup/datasink.h index 4bede4ec9e7..57468e0c9c7 100644 --- a/extra/mariabackup/datasink.h +++ b/extra/mariabackup/datasink.h @@ -37,6 +37,35 @@ typedef struct ds_ctxt { char *root; void *ptr; struct ds_ctxt *pipe_ctxt; + /* + Copy file for backup/restore. + @return true in case of success. + */ + bool copy_file(const char *src_file_path, + const char *dst_file_path, + uint thread_n); + + bool move_file(const char *src_file_path, + const char *dst_file_path, + const char *dst_dir, + uint thread_n); + + bool make_hardlink(const char *from_path, const char *to_path); + + void copy_or_move_dir(const char *from, const char *to, + bool do_copy, bool allow_hardlinks); + + bool backup_file_vprintf(const char *filename, + const char *fmt, va_list ap); + + bool backup_file_print_buf(const char *filename, + const char *buf, + int buf_len); + + bool backup_file_printf(const char *filename, + const char *fmt, ...) + ATTRIBUTE_FORMAT(printf, 2, 0); + } ds_ctxt_t; typedef struct { diff --git a/extra/mariabackup/write_filt.cc b/extra/mariabackup/write_filt.cc index c144a60e378..dd77a9bb953 100644 --- a/extra/mariabackup/write_filt.cc +++ b/extra/mariabackup/write_filt.cc @@ -31,7 +31,8 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA /************************************************************************ Write-through page write filter. */ -static my_bool wf_wt_init(xb_write_filt_ctxt_t *ctxt, char *dst_name, +static my_bool wf_wt_init(ds_ctxt *ds_meta, + xb_write_filt_ctxt_t *ctxt, char *dst_name, xb_fil_cur_t *cursor, CorruptedPages *corrupted_pages); static my_bool wf_wt_process(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile); @@ -44,7 +45,8 @@ xb_write_filt_t wf_write_through = { /************************************************************************ Incremental page write filter. */ -static my_bool wf_incremental_init(xb_write_filt_ctxt_t *ctxt, char *dst_name, +static my_bool wf_incremental_init(ds_ctxt *ds_meta, + xb_write_filt_ctxt_t *ctxt, char *dst_name, xb_fil_cur_t *cursor, CorruptedPages *corrupted_pages); static my_bool wf_incremental_process(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile); @@ -64,7 +66,8 @@ Initialize incremental page write filter. @return TRUE on success, FALSE on error. */ static my_bool -wf_incremental_init(xb_write_filt_ctxt_t *ctxt, char *dst_name, +wf_incremental_init(ds_ctxt *ds_meta, + xb_write_filt_ctxt_t *ctxt, char *dst_name, xb_fil_cur_t *cursor, CorruptedPages *corrupted_pages) { char meta_name[FN_REFLEN]; @@ -88,7 +91,7 @@ wf_incremental_init(xb_write_filt_ctxt_t *ctxt, char *dst_name, XB_DELTA_INFO_SUFFIX); const xb_delta_info_t info(cursor->page_size, cursor->zip_size, cursor->space_id); - if (!xb_write_delta_metadata(meta_name, &info)) { + if (!xb_write_delta_metadata(ds_meta, meta_name, &info)) { msg(cursor->thread_n,"Error: " "failed to write meta info for %s", cursor->rel_path); @@ -195,7 +198,8 @@ Initialize the write-through page write filter. @return TRUE on success, FALSE on error. */ static my_bool -wf_wt_init(xb_write_filt_ctxt_t *ctxt, char *dst_name __attribute__((unused)), +wf_wt_init(ds_ctxt *ds_meta __attribute__((unused)), + xb_write_filt_ctxt_t *ctxt, char *dst_name __attribute__((unused)), xb_fil_cur_t *cursor, CorruptedPages *) { ctxt->cursor = cursor; diff --git a/extra/mariabackup/write_filt.h b/extra/mariabackup/write_filt.h index 6c3ef24291f..a0ce0778a7f 100644 --- a/extra/mariabackup/write_filt.h +++ b/extra/mariabackup/write_filt.h @@ -45,7 +45,8 @@ typedef struct { typedef struct { - my_bool (*init)(xb_write_filt_ctxt_t *ctxt, char *dst_name, + my_bool (*init)(ds_ctxt *ds_meta, + xb_write_filt_ctxt_t *ctxt, char *dst_name, xb_fil_cur_t *cursor, CorruptedPages *corrupted_pages); my_bool (*process)(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile); my_bool (*finalize)(xb_write_filt_ctxt_t *, ds_file_t *dstfile); diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index ad35749d0ff..ee12034c910 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -291,10 +291,66 @@ char *xb_plugin_dir; char *xb_plugin_load; my_bool xb_close_files; -/* Datasinks */ -ds_ctxt_t *ds_data = NULL; -ds_ctxt_t *ds_meta = NULL; -ds_ctxt_t *ds_redo = NULL; + +class Datasink_free_list +{ +protected: + /* + Simple datasink creation tracking... + add datasinks in the reverse order you want them destroyed. + */ +#define XTRABACKUP_MAX_DATASINKS 10 + ds_ctxt_t *m_datasinks_to_destroy[XTRABACKUP_MAX_DATASINKS]; + uint m_actual_datasinks_to_destroy; +public: + Datasink_free_list() + :m_actual_datasinks_to_destroy(0) + { } + + void add_datasink_to_destroy(ds_ctxt_t *ds) + { + xb_ad(m_actual_datasinks_to_destroy < XTRABACKUP_MAX_DATASINKS); + m_datasinks_to_destroy[m_actual_datasinks_to_destroy] = ds; + m_actual_datasinks_to_destroy++; + } + + /* + Destroy datasinks. + Destruction is done in the specific order to not violate their order in the + pipeline so that each datasink is able to flush data down the pipeline. + */ + void destroy() + { + for (uint i= m_actual_datasinks_to_destroy; i > 0; i--) + { + ds_destroy(m_datasinks_to_destroy[i - 1]); + m_datasinks_to_destroy[i - 1] = NULL; + } + } +}; + + +class Backup_datasinks: public Datasink_free_list +{ +public: + ds_ctxt_t *m_data; + ds_ctxt_t *m_meta; + ds_ctxt_t *m_redo; + + Backup_datasinks() + :m_data(NULL), + m_meta(NULL), + m_redo(NULL) + { } + void init(); + void destroy() + { + Datasink_free_list::destroy(); + *this= Backup_datasinks(); + } + bool backup_low(); +}; + static bool innobackupex_mode = false; @@ -440,7 +496,8 @@ void CorruptedPages::rename_space(ulint space_id, const std::string &new_name) ut_a(!pthread_mutex_unlock(&m_mutex)); } -bool CorruptedPages::print_to_file(const char *filename) const +bool CorruptedPages::print_to_file(ds_ctxt *ds_data, + const char *filename) const { std::ostringstream out; ut_a(!pthread_mutex_lock(&m_mutex)); @@ -468,8 +525,8 @@ bool CorruptedPages::print_to_file(const char *filename) const out << "\n"; } ut_a(!pthread_mutex_unlock(&m_mutex)); - if (xtrabackup_backup) - return backup_file_print_buf(filename, out.str().c_str(), + if (ds_data) + return ds_data->backup_file_print_buf(filename, out.str().c_str(), static_cast(out.str().size())); std::ofstream outfile; outfile.open(filename); @@ -587,19 +644,6 @@ void CorruptedPages::zero_out_free_pages() ut_free(buf); } -/* Simple datasink creation tracking...add datasinks in the reverse order you -want them destroyed. */ -#define XTRABACKUP_MAX_DATASINKS 10 -static ds_ctxt_t *datasinks[XTRABACKUP_MAX_DATASINKS]; -static uint actual_datasinks = 0; -static inline -void -xtrabackup_add_datasink(ds_ctxt_t *ds) -{ - xb_ad(actual_datasinks < XTRABACKUP_MAX_DATASINKS); - datasinks[actual_datasinks] = ds; actual_datasinks++; -} - typedef void (*process_single_tablespace_func_t)(const char *dirname, const char *filname, bool is_remote, @@ -955,6 +999,7 @@ typedef struct { pthread_mutex_t* count_mutex; os_thread_id_t id; CorruptedPages *corrupted_pages; + Backup_datasinks *datasinks; } data_thread_ctxt_t; /* ======== for option and variables ======== */ @@ -2456,7 +2501,8 @@ xb_read_delta_metadata(const char *filepath, xb_delta_info_t *info) Write meta info for an incremental delta. @return TRUE on success, FALSE on failure. */ my_bool -xb_write_delta_metadata(const char *filename, const xb_delta_info_t *info) +xb_write_delta_metadata(ds_ctxt *ds_meta, + const char *filename, const xb_delta_info_t *info) { ds_file_t *f; char buf[64]; @@ -2771,7 +2817,9 @@ xb_get_copy_action(const char *dflt) for full backup, pages filter for incremental backup, etc. @return FALSE on success and TRUE on error */ -static my_bool xtrabackup_copy_datafile(fil_node_t *node, uint thread_n, +static my_bool xtrabackup_copy_datafile(ds_ctxt *ds_data, + ds_ctxt *ds_meta, + fil_node_t *node, uint thread_n, const char *dest_name, const xb_write_filt_t &write_filter, CorruptedPages &corrupted_pages) @@ -2839,7 +2887,7 @@ static my_bool xtrabackup_copy_datafile(fil_node_t *node, uint thread_n, ut_a(write_filter.process != NULL); if (write_filter.init != NULL && - !write_filter.init(&write_filt_ctxt, dst_name, &cursor, + !write_filter.init(ds_meta, &write_filt_ctxt, dst_name, &cursor, opt_log_innodb_page_corruption ? &corrupted_pages : NULL)) { msg (thread_n, "mariabackup: error: failed to initialize page write filter."); goto error; @@ -3204,7 +3252,8 @@ DECLARE_THREAD(data_copy_thread_func)( DBUG_EXECUTE_FOR_KEY("wait_innodb_redo_before_copy", node->space->name, backup_wait_for_lsn(get_current_lsn(mysql_connection));); /* copy the datafile */ - if (xtrabackup_copy_datafile(node, num, NULL, + if (xtrabackup_copy_datafile(ctxt->datasinks->m_data, + ctxt->datasinks->m_meta, node, num, NULL, xtrabackup_incremental ? wf_incremental : wf_write_through, *ctxt->corrupted_pages)) die("failed to copy datafile."); @@ -3230,22 +3279,21 @@ Otherwise (i.e. when streaming in the 'tar' format) we need 2 separate datasinks for the data stream (and don't allow parallel data copying) and for metainfo files (including ib_logfile0). The second datasink writes to temporary files first, and then streams them in a serialized way when closed. */ -static void -xtrabackup_init_datasinks(void) +void Backup_datasinks::init() { /* Start building out the pipelines from the terminus back */ if (xtrabackup_stream) { /* All streaming goes to stdout */ - ds_data = ds_meta = ds_redo = ds_create(xtrabackup_target_dir, - DS_TYPE_STDOUT); + m_data = m_meta = m_redo = ds_create(xtrabackup_target_dir, + DS_TYPE_STDOUT); } else { /* Local filesystem */ - ds_data = ds_meta = ds_redo = ds_create(xtrabackup_target_dir, - DS_TYPE_LOCAL); + m_data = m_meta = m_redo = ds_create(xtrabackup_target_dir, + DS_TYPE_LOCAL); } /* Track it for destruction */ - xtrabackup_add_datasink(ds_data); + add_datasink_to_destroy(m_data); /* Stream formatting */ if (xtrabackup_stream) { @@ -3254,66 +3302,50 @@ xtrabackup_init_datasinks(void) ut_a(xtrabackup_stream_fmt == XB_STREAM_FMT_XBSTREAM); ds = ds_create(xtrabackup_target_dir, DS_TYPE_XBSTREAM); - xtrabackup_add_datasink(ds); + add_datasink_to_destroy(ds); - ds_set_pipe(ds, ds_data); - ds_data = ds; + ds_set_pipe(ds, m_data); + m_data = ds; - ds_redo = ds_meta = ds_data; + m_redo = m_meta = m_data; } - /* Compression for ds_data and ds_redo */ + /* Compression for m_data and m_redo */ if (xtrabackup_compress) { ds_ctxt_t *ds; /* Use a 1 MB buffer for compressed output stream */ ds = ds_create(xtrabackup_target_dir, DS_TYPE_BUFFER); ds_buffer_set_size(ds, 1024 * 1024); - xtrabackup_add_datasink(ds); - ds_set_pipe(ds, ds_data); - if (ds_data != ds_redo) { - ds_data = ds; + add_datasink_to_destroy(ds); + ds_set_pipe(ds, m_data); + if (m_data != m_redo) { + m_data = ds; ds = ds_create(xtrabackup_target_dir, DS_TYPE_BUFFER); ds_buffer_set_size(ds, 1024 * 1024); - xtrabackup_add_datasink(ds); - ds_set_pipe(ds, ds_redo); - ds_redo = ds; + add_datasink_to_destroy(ds); + ds_set_pipe(ds, m_redo); + m_redo = ds; } else { - ds_redo = ds_data = ds; + m_redo = m_data = ds; } ds = ds_create(xtrabackup_target_dir, DS_TYPE_COMPRESS); - xtrabackup_add_datasink(ds); - ds_set_pipe(ds, ds_data); - if (ds_data != ds_redo) { - ds_data = ds; + add_datasink_to_destroy(ds); + ds_set_pipe(ds, m_data); + if (m_data != m_redo) { + m_data = ds; ds = ds_create(xtrabackup_target_dir, DS_TYPE_COMPRESS); - xtrabackup_add_datasink(ds); - ds_set_pipe(ds, ds_redo); - ds_redo = ds; + add_datasink_to_destroy(ds); + ds_set_pipe(ds, m_redo); + m_redo = ds; } else { - ds_redo = ds_data = ds; + m_redo = m_data = ds; } } } -/************************************************************************ -Destroy datasinks. - -Destruction is done in the specific order to not violate their order in the -pipeline so that each datasink is able to flush data down the pipeline. */ -static void xtrabackup_destroy_datasinks(void) -{ - for (uint i = actual_datasinks; i > 0; i--) { - ds_destroy(datasinks[i-1]); - datasinks[i-1] = NULL; - } - ds_data = NULL; - ds_meta = NULL; - ds_redo = NULL; -} - #define SRV_MAX_N_PENDING_SYNC_IOS 100 /** Initialize the tablespace cache subsystem. */ @@ -4447,7 +4479,7 @@ static void stop_backup_threads() /** Implement the core of --backup @return whether the operation succeeded */ -static bool xtrabackup_backup_low() +bool Backup_datasinks::backup_low() { ut_ad(!metadata_to_lsn); @@ -4499,7 +4531,7 @@ static bool xtrabackup_backup_low() } metadata_last_lsn = log_copy_scanned_lsn; - if (!xtrabackup_stream_metadata(ds_meta)) { + if (!xtrabackup_stream_metadata(m_meta)) { msg("Error: failed to stream metadata."); return false; } @@ -4515,7 +4547,8 @@ static bool xtrabackup_backup_low() } sprintf(filename, "%s/%s", xtrabackup_extra_lsndir, XTRABACKUP_INFO); - if (!write_xtrabackup_info(mysql_connection, filename, false, false)) { + if (!write_xtrabackup_info(m_data, + mysql_connection, filename, false, false)) { msg("Error: failed to write info " "to '%s'.", filename); return false; @@ -4535,6 +4568,7 @@ static bool xtrabackup_backup_func() pthread_mutex_t count_mutex; CorruptedPages corrupted_pages; data_thread_ctxt_t *data_threads; + Backup_datasinks backup_datasinks; pthread_mutex_init(&backup_mutex, NULL); pthread_cond_init(&scanned_lsn_cond, NULL); @@ -4686,7 +4720,7 @@ reread_log_header: log_mutex_exit(); - xtrabackup_init_datasinks(); + backup_datasinks.init(); if (!select_history()) { goto fail; @@ -4694,7 +4728,7 @@ reread_log_header: /* open the log file */ memset(&stat_info, 0, sizeof(MY_STAT)); - dst_log_file = ds_open(ds_redo, "ib_logfile0", &stat_info); + dst_log_file = ds_open(backup_datasinks.m_redo, "ib_logfile0", &stat_info); if (dst_log_file == NULL) { msg("Error: failed to open the target stream for " "'ib_logfile0'."); @@ -4812,6 +4846,7 @@ fail_before_log_copying_thread_start: data_threads[i].count = &count; data_threads[i].count_mutex = &count_mutex; data_threads[i].corrupted_pages = &corrupted_pages; + data_threads[i].datasinks= &backup_datasinks; os_thread_create(data_copy_thread_func, data_threads + i, &data_threads[i].id); } @@ -4832,10 +4867,13 @@ fail_before_log_copying_thread_start: datafiles_iter_free(it); } - bool ok = backup_start(corrupted_pages); + DBUG_ASSERT(backup_datasinks.m_data); + DBUG_ASSERT(backup_datasinks.m_meta); + bool ok = backup_start(backup_datasinks.m_data, + backup_datasinks.m_meta, corrupted_pages); if (ok) { - ok = xtrabackup_backup_low(); + ok = backup_datasinks.backup_low(); backup_release(); @@ -4845,12 +4883,13 @@ fail_before_log_copying_thread_start: ); if (ok) { - backup_finish(); + backup_finish(backup_datasinks.m_data); } } if (opt_log_innodb_page_corruption) - ok = corrupted_pages.print_to_file(MB_CORRUPTED_PAGES_FILE); + ok = corrupted_pages.print_to_file(backup_datasinks.m_data, + MB_CORRUPTED_PAGES_FILE); if (!ok) { goto fail; @@ -4859,7 +4898,7 @@ fail_before_log_copying_thread_start: if (changed_page_bitmap) { xb_page_bitmap_deinit(changed_page_bitmap); } - xtrabackup_destroy_datasinks(); + backup_datasinks.destroy(); msg("Redo log (from LSN " LSN_PF " to " LSN_PF ") was copied.", checkpoint_lsn_start, log_copy_scanned_lsn); @@ -4906,7 +4945,7 @@ FTWRL. This ensures consistent backup in presence of DDL. It is the responsibility of the prepare phase to deal with .new, .ren, and .del files. */ -void backup_fix_ddl(CorruptedPages &corrupted_pages) +void CorruptedPages::backup_fix_ddl(ds_ctxt *ds_data, ds_ctxt *ds_meta) { std::set new_tables; std::set dropped_tables; @@ -4929,7 +4968,7 @@ void backup_fix_ddl(CorruptedPages &corrupted_pages) if (ddl_tracker.drops.find(id) != ddl_tracker.drops.end()) { dropped_tables.insert(name); - corrupted_pages.drop_space(id); + drop_space(id); continue; } @@ -4951,20 +4990,20 @@ void backup_fix_ddl(CorruptedPages &corrupted_pages) of it because of optimized DDL. We emulate a drop/create.*/ dropped_tables.insert(name); if (opt_log_innodb_page_corruption) - corrupted_pages.drop_space(id); + drop_space(id); new_tables.insert(new_name); } else { /* Renamed, and no optimized DDL*/ renamed_tables[name] = new_name; if (opt_log_innodb_page_corruption) - corrupted_pages.rename_space(id, new_name); + rename_space(id, new_name); } } else if (has_optimized_ddl) { /* Table was recreated, or optimized DDL ran. In both cases we need a full copy in the backup.*/ new_tables.insert(name); if (opt_log_innodb_page_corruption) - corrupted_pages.drop_space(id); + drop_space(id); } } @@ -4985,7 +5024,7 @@ void backup_fix_ddl(CorruptedPages &corrupted_pages) dropped_tables.erase(name); new_tables.insert(name); if (opt_log_innodb_page_corruption) - corrupted_pages.drop_space(id); + drop_space(id); } } @@ -4994,7 +5033,8 @@ void backup_fix_ddl(CorruptedPages &corrupted_pages) iter != renamed_tables.end(); ++iter) { const std::string old_name = iter->first; std::string new_name = iter->second; - backup_file_printf((old_name + ".ren").c_str(), "%s", new_name.c_str()); + DBUG_ASSERT(ds_data); + ds_data->backup_file_printf((old_name + ".ren").c_str(), "%s", new_name.c_str()); } // Mark tablespaces for drop @@ -5002,7 +5042,7 @@ void backup_fix_ddl(CorruptedPages &corrupted_pages) iter != dropped_tables.end(); iter++) { const std::string name(*iter); - backup_file_printf((name + ".del").c_str(), "%s", ""); + ds_data->backup_file_printf((name + ".del").c_str(), "%s", ""); } // Load and copy new tables. @@ -5046,8 +5086,9 @@ void backup_fix_ddl(CorruptedPages &corrupted_pages) continue; std::string dest_name(node->space->name); dest_name.append(".new"); - xtrabackup_copy_datafile(node, 0, dest_name.c_str(), wf_write_through, - corrupted_pages); + xtrabackup_copy_datafile(ds_data, ds_meta, + node, 0, dest_name.c_str(), + wf_write_through, *this); } datafiles_iter_free(it); @@ -6163,7 +6204,7 @@ static bool xtrabackup_prepare_func(char** argv) } } else - corrupted_pages.print_to_file(MB_CORRUPTED_PAGES_FILE); + corrupted_pages.print_to_file(NULL, MB_CORRUPTED_PAGES_FILE); if (xtrabackup_rollback_xa) { diff --git a/extra/mariabackup/xtrabackup.h b/extra/mariabackup/xtrabackup.h index 854456c3afd..de3a96443a3 100644 --- a/extra/mariabackup/xtrabackup.h +++ b/extra/mariabackup/xtrabackup.h @@ -46,11 +46,13 @@ public: bool contains(ulint space_id, ulint page_no) const; void drop_space(ulint space_id); void rename_space(ulint space_id, const std::string &new_name); - bool print_to_file(const char *file_name) const; + bool print_to_file(ds_ctxt *ds_data, const char *file_name) const; void read_from_file(const char *file_name); bool empty() const; void zero_out_free_pages(); + void backup_fix_ddl(ds_ctxt *ds_data, ds_ctxt *ds_meta); + private: void add_page_no_lock(const char *space_name, ulint space_id, ulint page_no, bool convert_space_name); @@ -63,6 +65,7 @@ private: container_t m_spaces; }; + /* value of the --incremental option */ extern lsn_t incremental_lsn; @@ -76,8 +79,6 @@ extern char *xb_rocksdb_datadir; extern my_bool xb_backup_rocksdb; extern uint opt_protocol; -extern ds_ctxt_t *ds_meta; -extern ds_ctxt_t *ds_data; /* The last checkpoint LSN at the backup startup time */ extern lsn_t checkpoint_lsn_start; @@ -177,7 +178,8 @@ extern ulong opt_binlog_info; extern ulong xtrabackup_innodb_force_recovery; void xtrabackup_io_throttling(void); -my_bool xb_write_delta_metadata(const char *filename, +my_bool xb_write_delta_metadata(ds_ctxt *ds_meta, + const char *filename, const xb_delta_info_t *info); /************************************************************************ -- cgit v1.2.1 From ef4d09948d5ff38f5dff8974005ba222a4b18462 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Tue, 11 Apr 2023 21:21:45 -0700 Subject: MDEV-20773 Error from UPDATE when estimating selectivity of a range This bug could affect multi-update statements as well as single-table update statements processed as multi-updates when the where condition contained a range condition over a non-indexed varchar column. The optimizer calculates selectivity of such range conditions using histograms. For each range the buckets containing endpoints of the the range are determined with a procedure that stores the values of the endpoints in the space of the record buffer where values of the columns are usually stored. For a range over a varchar column the value of a endpoint may exceed the size of the buffer and in such case the value is stored with truncation. This truncations cannot affect the result of the calculation of the range selectivity as the calculation employes only the beginning of the value string. However it can trigger generation of an unexpected error on this truncation if an update statement is processed. This patch prohibits truncation messages when selectivity of a range condition is calculated for a non-indexed column. Approved by Oleksandr Byelkin --- mysql-test/main/update.result | 29 +++++++++++++++++++++++++++++ mysql-test/main/update.test | 23 +++++++++++++++++++++++ sql/opt_range.cc | 3 +++ 3 files changed, 55 insertions(+) diff --git a/mysql-test/main/update.result b/mysql-test/main/update.result index f5edf1c6be3..7b6426d2ec5 100644 --- a/mysql-test/main/update.result +++ b/mysql-test/main/update.result @@ -734,3 +734,32 @@ UPDATE t1,t2 SET t1.i1 = -39 WHERE t2.d1 <> t1.i1 AND t2.d1 = t1.d2; ERROR 22007: Incorrect datetime value: '19' for column `test`.`t1`.`i1` at row 1 DROP TABLE t1,t2; # End of MariaDB 10.2 tests +# +# MDEV-20773: UPDATE with LIKE predicate over non-indexed column +# of VARCHAR type +# +create table t1 (a1 varchar(30), a2 varchar(30) collate utf8_bin); +insert into t1 values +('aa','zzz'), ('b','xxaa'), ('ccc','yyy'), ('ddd','xxb'); +analyze table t1 persistent for all; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +explain extended +update t1 set a1 = 'u' + where a2 like 'xx%' and exists(select 1 from t1 where t1.a1 < 'c'); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 4 49.22 Using where +2 SUBQUERY t1 ALL NULL NULL NULL NULL 4 50.00 Using where +Warnings: +Note 1003 /* select#1 */ update `test`.`t1` set `test`.`t1`.`a1` = 'u' where `test`.`t1`.`a2` like 'xx%' +update t1 set a1 = 'u' + where a2 like 'xx%' and exists(select 1 from t1 where t1.a1 < 'c'); +select * from t1; +a1 a2 +aa zzz +u xxaa +ccc yyy +u xxb +drop table t1; +# End of MariaDB 10.4 tests diff --git a/mysql-test/main/update.test b/mysql-test/main/update.test index 8a6949447ee..147d69d50c9 100644 --- a/mysql-test/main/update.test +++ b/mysql-test/main/update.test @@ -676,3 +676,26 @@ UPDATE t1,t2 SET t1.i1 = -39 WHERE t2.d1 <> t1.i1 AND t2.d1 = t1.d2; DROP TABLE t1,t2; --echo # End of MariaDB 10.2 tests + +--echo # +--echo # MDEV-20773: UPDATE with LIKE predicate over non-indexed column +--echo # of VARCHAR type +--echo # + +create table t1 (a1 varchar(30), a2 varchar(30) collate utf8_bin); +insert into t1 values + ('aa','zzz'), ('b','xxaa'), ('ccc','yyy'), ('ddd','xxb'); +analyze table t1 persistent for all; + +explain extended +update t1 set a1 = 'u' + where a2 like 'xx%' and exists(select 1 from t1 where t1.a1 < 'c'); + +update t1 set a1 = 'u' + where a2 like 'xx%' and exists(select 1 from t1 where t1.a1 < 'c'); + +select * from t1; + +drop table t1; + +--echo # End of MariaDB 10.4 tests diff --git a/sql/opt_range.cc b/sql/opt_range.cc index c34420181a2..69a95f8da44 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -3518,7 +3518,10 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond) } else { + enum_check_fields save_count_cuted_fields= thd->count_cuted_fields; + thd->count_cuted_fields= CHECK_FIELD_IGNORE; rows= records_in_column_ranges(¶m, idx, key); + thd->count_cuted_fields= save_count_cuted_fields; if (rows != DBL_MAX) { key->field->cond_selectivity= rows/table_records; -- cgit v1.2.1 From d1a4315f4cb096c2fd81c96bc4afc6bb618bae49 Mon Sep 17 00:00:00 2001 From: Julius Goryavsky Date: Thu, 13 Apr 2023 07:49:35 +0200 Subject: MDEV-30402: Encrypted mariabackup SST breaks on distributions with newer socat This commit adds a new 'no-sni' option to socat which is required to properly authenticate with newer socat versions (after version 1.7.4+). This option is needed to disable the automatic use of the SNI feature (Server Name Indication) since the SST script directly specifies the commonname if necessary and automatic activation of the SNI feature is unnecessary in such scenarios. --- scripts/wsrep_sst_mariabackup.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/wsrep_sst_mariabackup.sh b/scripts/wsrep_sst_mariabackup.sh index 7e26af83701..b46e64a9e8b 100644 --- a/scripts/wsrep_sst_mariabackup.sh +++ b/scripts/wsrep_sst_mariabackup.sh @@ -340,6 +340,9 @@ get_transfer() "Use workaround for socat $SOCAT_VERSION bug" fi fi + if check_for_version "$SOCAT_VERSION" '1.7.4'; then + tcmd="$tcmd,no-sni=1" + fi fi if [ "${sockopt#*,dhparam=}" = "$sockopt" ]; then -- cgit v1.2.1 From 2e1c532bd2d9f9a35559e54f66d33c81e33009b1 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Fri, 24 Mar 2023 13:04:05 +1100 Subject: alloca() fix Corrections from 1e58b8afc086da755cf9209ed17fc36351da5563. * Re-add #pragma alloca for AIX - now in my_alloca.h --- include/my_alloca.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/include/my_alloca.h b/include/my_alloca.h index 25fd8867e69..de5f32bb886 100644 --- a/include/my_alloca.h +++ b/include/my_alloca.h @@ -34,7 +34,10 @@ #endif #endif -#if defined(HAVE_ALLOCA) +#if defined(_AIX) && !defined(__GNUC__) && !defined(_AIX43) +#pragma alloca +#endif /* _AIX */ + /* If the GCC/LLVM compiler from the MinGW is used, alloca may not be defined when using the MSVC CRT: @@ -42,6 +45,5 @@ #if defined(__GNUC__) && !defined(HAVE_ALLOCA_H) && !defined(alloca) #define alloca __builtin_alloca #endif /* GNUC */ -#endif #endif /* MY_ALLOCA_INCLUDED */ -- cgit v1.2.1 From f575de39afacb24cd43c40bf43c27bfcf97a670b Mon Sep 17 00:00:00 2001 From: Florian Weimer Date: Tue, 11 Apr 2023 09:39:40 +0200 Subject: rocksdb: Define _GNU_SOURCE during fallocate CMake probe The glibc headers declare fallocate only if _GNU_SOURCE is defined. Without this change, the probe fails with C compilers which do not support implicit function declarations even if the system does in fact support the fallocate function. Upstream rocksdb does not need this because the probe is run with the C++ compiler, and current g++ versions define _GNU_SOURCE automatically. --- storage/rocksdb/build_rocksdb.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/rocksdb/build_rocksdb.cmake b/storage/rocksdb/build_rocksdb.cmake index 647e51e2f90..ba894d83d75 100644 --- a/storage/rocksdb/build_rocksdb.cmake +++ b/storage/rocksdb/build_rocksdb.cmake @@ -134,8 +134,8 @@ option(WITH_FALLOCATE "build with fallocate" ON) if(WITH_FALLOCATE AND UNIX) include(CheckCSourceCompiles) CHECK_C_SOURCE_COMPILES(" +#define _GNU_SOURCE #include -#include int main() { int fd = open(\"/dev/null\", 0); fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0, 1024); -- cgit v1.2.1 From bc3bfcf943b817b19a41e4f599b4f2e9a259b263 Mon Sep 17 00:00:00 2001 From: Daniele Sciascia Date: Mon, 20 Mar 2023 15:20:32 +0100 Subject: MDEV-30862 Assertion `mode_ == m_high_priority' failed CREATE TABLE AS SELECT is not supported in combination with streaming replication. --- mysql-test/suite/galera_sr/r/MDEV-30862.result | 11 +++++++++++ mysql-test/suite/galera_sr/t/MDEV-30862.test | 24 ++++++++++++++++++++++++ sql/sql_table.cc | 13 +++++++++++++ 3 files changed, 48 insertions(+) create mode 100644 mysql-test/suite/galera_sr/r/MDEV-30862.result create mode 100644 mysql-test/suite/galera_sr/t/MDEV-30862.test diff --git a/mysql-test/suite/galera_sr/r/MDEV-30862.result b/mysql-test/suite/galera_sr/r/MDEV-30862.result new file mode 100644 index 00000000000..43da77f24df --- /dev/null +++ b/mysql-test/suite/galera_sr/r/MDEV-30862.result @@ -0,0 +1,11 @@ +connection node_2; +connection node_1; +SET autocommit=0; +SET SESSION wsrep_trx_fragment_size=1; +CREATE TABLE t2 SELECT seq FROM seq_1_to_50; +ERROR 42000: CREATE TABLE AS SELECT is not supported with streaming replication +CREATE TABLE t1 (f1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY); +INSERT INTO t1 VALUES(DEFAULT); +CREATE TABLE t2 SELECT * FROM t1; +ERROR 42000: CREATE TABLE AS SELECT is not supported with streaming replication +DROP TABLE t1; diff --git a/mysql-test/suite/galera_sr/t/MDEV-30862.test b/mysql-test/suite/galera_sr/t/MDEV-30862.test new file mode 100644 index 00000000000..6be77b4d71b --- /dev/null +++ b/mysql-test/suite/galera_sr/t/MDEV-30862.test @@ -0,0 +1,24 @@ +# +# MDEV-30862 Assertion `mode_ == m_high_priority' failed in +# void wsrep::client_state::after_applying() +# + +--source include/galera_cluster.inc +--source include/have_sequence.inc + +SET autocommit=0; +SET SESSION wsrep_trx_fragment_size=1; +--error ER_NOT_ALLOWED_COMMAND +CREATE TABLE t2 SELECT seq FROM seq_1_to_50; + + +# +# Same test without using seq +# +CREATE TABLE t1 (f1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY); +INSERT INTO t1 VALUES(DEFAULT); +--error ER_NOT_ALLOWED_COMMAND +CREATE TABLE t2 SELECT * FROM t1; + + +DROP TABLE t1; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 9f13dcde40f..1e88e7722e3 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -11569,6 +11569,19 @@ bool Sql_cmd_create_table_like::execute(THD *thd) } #endif +#ifdef WITH_WSREP + if (select_lex->item_list.elements && // With SELECT + WSREP(thd) && thd->variables.wsrep_trx_fragment_size > 0) + { + my_message( + ER_NOT_ALLOWED_COMMAND, + "CREATE TABLE AS SELECT is not supported with streaming replication", + MYF(0)); + res= 1; + goto end_with_restore_list; + } +#endif /* WITH_WSREP */ + if (select_lex->item_list.elements || select_lex->tvc) // With select or TVC { select_result *result; -- cgit v1.2.1 From feeeacc4d747868c234425dc12c157c6e5fa8fbb Mon Sep 17 00:00:00 2001 From: Daniele Sciascia Date: Wed, 29 Mar 2023 13:55:30 +0200 Subject: MDEV-30955 Explicit locks released too early in rollback path Assertion `thd->mdl_context.is_lock_owner()` fires when a client is disconnected, while transaction and and a table is opened through `HANDLER` interface. Reason for the assertion is that when a connection closes, its ongoing transaction is eventually rolled back in `Wsrep_client_state::bf_rollback()`. This method also releases explicit which are expected to survive beyond the transaction lifetime. This patch also removes calls to `mysql_ull_cleanup()`. User level locks are not supported in combination with Galera, making these calls unnecessary. --- mysql-test/suite/galera/r/MDEV-30955.result | 26 +++++++++++ mysql-test/suite/galera/t/MDEV-30955.test | 70 +++++++++++++++++++++++++++++ sql/sql_parse.cc | 2 +- sql/wsrep_client_service.cc | 2 - sql/wsrep_high_priority_service.cc | 2 - 5 files changed, 97 insertions(+), 5 deletions(-) create mode 100644 mysql-test/suite/galera/r/MDEV-30955.result create mode 100644 mysql-test/suite/galera/t/MDEV-30955.test diff --git a/mysql-test/suite/galera/r/MDEV-30955.result b/mysql-test/suite/galera/r/MDEV-30955.result new file mode 100644 index 00000000000..2a090cb58bc --- /dev/null +++ b/mysql-test/suite/galera/r/MDEV-30955.result @@ -0,0 +1,26 @@ +connection node_2; +connection node_1; +CREATE TABLE t (a CHAR(1) KEY); +START TRANSACTION; +HANDLER t OPEN; +disconnect node_1; +connect node_1, 127.0.0.1, root, , test, $NODE_MYPORT_1; +DROP TABLE t; +BACKUP STAGE START; +START TRANSACTION; +disconnect node_1; +connect node_1, 127.0.0.1, root, , test, $NODE_MYPORT_1; +connection node_1; +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY); +CREATE TABLE t2 (f1 INTEGER PRIMARY KEY); +START TRANSACTION; +INSERT INTO t1 VALUES(1); +HANDLER t2 OPEN; +connection node_2; +INSERT INTO t1 VALUES(1); +connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1; +connection node_1a; +connection node_1; +COMMIT; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +DROP TABLE t1,t2; diff --git a/mysql-test/suite/galera/t/MDEV-30955.test b/mysql-test/suite/galera/t/MDEV-30955.test new file mode 100644 index 00000000000..18577120e83 --- /dev/null +++ b/mysql-test/suite/galera/t/MDEV-30955.test @@ -0,0 +1,70 @@ +# +# MDEV-30955 +# Assertion `thd->mdl_context.is_lock_owner(MDL_key::TABLE, +# table->s->db.str, table->s->table_name.str, MDL_SHARED)' +# failed in close_thread_table() +# + +--source include/galera_cluster.inc + +# +# Test 1: Assertion thd->mdl_context.is_lock_owner() +# failed in close_thread_table() +# +CREATE TABLE t (a CHAR(1) KEY); +START TRANSACTION; +HANDLER t OPEN; + +# +# If bug is present the transaction will be aborted +# through Wsrep_client_service::bf_rollback() and +# release explicit locks too early. Later, during +# THD::cleanup(), table t will be closed and the +# THD is expected to be owner of the MDL lock that +# was just released. +# +--disconnect node_1 + +--connect node_1, 127.0.0.1, root, , test, $NODE_MYPORT_1 +DROP TABLE t; + + +# +# Test 2: Similar issue reproduces also with BACKUP STAGE locks. +# See comments in MDEV-25037 +# + +BACKUP STAGE START; +START TRANSACTION; +--disconnect node_1 +--connect node_1, 127.0.0.1, root, , test, $NODE_MYPORT_1 + + +# +# Test 3: Assertion `!thd->mdl_context.has_locks()' failed +# in do_command() +# + +--connection node_1 +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY); +CREATE TABLE t2 (f1 INTEGER PRIMARY KEY); + +--let $bf_count = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.global_status WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'` + +START TRANSACTION; +INSERT INTO t1 VALUES(1); +HANDLER t2 OPEN; + +--connection node_2 +INSERT INTO t1 VALUES(1); + +--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1 +--connection node_1a +--let $wait_condition = SELECT VARIABLE_VALUE = $bf_count + 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts' +--source include/wait_condition.inc + +--connection node_1 +--error ER_LOCK_DEADLOCK +COMMIT; + +DROP TABLE t1,t2; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index c495ae2d6c4..c95993e1604 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1309,7 +1309,7 @@ bool do_command(THD *thd) in wsrep_before_command(). */ WSREP_LOG_THD(thd, "enter found BF aborted"); - DBUG_ASSERT(!thd->mdl_context.has_locks()); + DBUG_ASSERT(!thd->mdl_context.has_transactional_locks()); DBUG_ASSERT(!thd->get_stmt_da()->is_set()); /* We let COM_QUIT and COM_STMT_CLOSE to execute even if wsrep aborted. */ if (command == COM_STMT_EXECUTE) diff --git a/sql/wsrep_client_service.cc b/sql/wsrep_client_service.cc index f00dfccf274..0399cf4f442 100644 --- a/sql/wsrep_client_service.cc +++ b/sql/wsrep_client_service.cc @@ -362,8 +362,6 @@ int Wsrep_client_service::bf_rollback() m_thd->global_read_lock.unlock_global_read_lock(m_thd); } m_thd->release_transactional_locks(); - mysql_ull_cleanup(m_thd); - m_thd->mdl_context.release_explicit_locks(); DBUG_RETURN(ret); } diff --git a/sql/wsrep_high_priority_service.cc b/sql/wsrep_high_priority_service.cc index 96269481559..3c6524b7ddf 100644 --- a/sql/wsrep_high_priority_service.cc +++ b/sql/wsrep_high_priority_service.cc @@ -357,8 +357,6 @@ int Wsrep_high_priority_service::rollback(const wsrep::ws_handle& ws_handle, m_thd->wsrep_cs().prepare_for_ordering(ws_handle, ws_meta, false); int ret= (trans_rollback_stmt(m_thd) || trans_rollback(m_thd)); m_thd->release_transactional_locks(); - mysql_ull_cleanup(m_thd); - m_thd->mdl_context.release_explicit_locks(); free_root(m_thd->mem_root, MYF(MY_KEEP_PREALLOC)); -- cgit v1.2.1 From d6651864773afc2b7534285ee0d50d4fd6b47e9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 19 Apr 2023 14:08:53 +0300 Subject: MDEV-28976: mtr must wait for server to actually die do_shutdown_server(): Call wait_until_dead() also when we are forcibly killing the process (timeout=0). We have evidence that killing the process may take some time and cause mystery failures in crash recovery tests. For InnoDB, several failures were observed between commit da094188f60bf67e3d90227304a4ea256fe2630f and commit 0ee1082bd2e7e7049c4f0e686bad53cf7ba053ab when no advisory file locking was being used by default. --- client/mysqltest.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/mysqltest.cc b/client/mysqltest.cc index 6330d4f881d..9e9b122c5fe 100644 --- a/client/mysqltest.cc +++ b/client/mysqltest.cc @@ -5191,7 +5191,7 @@ void do_shutdown_server(struct st_command *command) if (timeout) (void) my_kill(pid, SIGABRT); /* Give server a few seconds to die in all cases */ - if (!timeout || wait_until_dead(pid, timeout < 5 ? 5 : timeout)) + if (wait_until_dead(pid, timeout < 5 ? 5 : timeout)) { (void) my_kill(pid, SIGKILL); } -- cgit v1.2.1 From b2bbc66a41a7b3b622fbcd477e777c45e3248886 Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Mon, 10 Apr 2023 11:57:39 +0530 Subject: MDEV-24011 InnoDB: Failing assertion: index_cache->words == NULL in fts0fts.cc line 551 This issue happens when race condition happens when DDL and fts optimize thread. DDL adds the new index to fts cache. At the same time, fts optimize thread clears the cache and reinitialize it. Take cache init lock before reinitializing the cache. fts_sync_commit() should take dict_sys mutex to avoid the deadlock with create index. --- storage/innobase/fts/fts0fts.cc | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc index 224fc9593b7..8ce6fee0b76 100644 --- a/storage/innobase/fts/fts0fts.cc +++ b/storage/innobase/fts/fts0fts.cc @@ -851,7 +851,7 @@ fts_drop_index( dberr_t err = DB_SUCCESS; ut_a(indexes); - + ut_d(dict_sys.assert_locked()); if ((ib_vector_size(indexes) == 1 && (index == static_cast( ib_vector_getp(table->fts->indexes, 0))) @@ -873,7 +873,9 @@ fts_drop_index( current_doc_id = table->fts->cache->next_doc_id; first_doc_id = table->fts->cache->first_doc_id; + rw_lock_x_lock(&table->fts->cache->init_lock); fts_cache_clear(table->fts->cache); + rw_lock_x_unlock(&table->fts->cache->init_lock); fts_cache_destroy(table->fts->cache); table->fts->cache = fts_cache_create(table); table->fts->cache->next_doc_id = current_doc_id; @@ -4192,9 +4194,15 @@ fts_sync_commit( /* We need to do this within the deleted lock since fts_delete() can attempt to add a deleted doc id to the cache deleted id array. */ + mutex_enter(&dict_sys.mutex); + sync->table->fts->dict_locked = true; + rw_lock_x_lock(&cache->init_lock); fts_cache_clear(cache); DEBUG_SYNC_C("fts_deleted_doc_ids_clear"); fts_cache_init(cache); + rw_lock_x_unlock(&cache->init_lock); + sync->table->fts->dict_locked = false; + mutex_exit(&dict_sys.mutex); rw_lock_x_unlock(&cache->lock); if (UNIV_LIKELY(error == DB_SUCCESS)) { -- cgit v1.2.1 From 2bfd04e3145b238df5f31143b98b1df501f43d1e Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Tue, 11 Apr 2023 18:36:55 +0530 Subject: MDEV-31025 Redundant table alter fails when fixed column stored externally row_merge_buf_add(): Has strict assert that fixed length mismatch shouldn't happen while rebuilding the redundant row format table btr_index_rec_validate(): Fixed size column can be stored externally. So sum of inline stored length and external stored length of the column should be equal to total column length --- .../suite/innodb/r/default_row_format_alter.result | 20 ++++++++++++++++++++ .../suite/innodb/t/default_row_format_alter.test | 17 +++++++++++++++++ storage/innobase/btr/btr0btr.cc | 2 +- storage/innobase/row/row0merge.cc | 5 ----- 4 files changed, 38 insertions(+), 6 deletions(-) diff --git a/mysql-test/suite/innodb/r/default_row_format_alter.result b/mysql-test/suite/innodb/r/default_row_format_alter.result index 42cbab8a5f2..33936b59003 100644 --- a/mysql-test/suite/innodb/r/default_row_format_alter.result +++ b/mysql-test/suite/innodb/r/default_row_format_alter.result @@ -129,5 +129,25 @@ SELECT ROW_FORMAT FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1'; ROW_FORMAT Dynamic DROP TABLE t1; +# +# MDEV-31025 Redundant table alter fails when fixed column +# stored externally +# +set @old_sql_mode = @@sql_mode; +SET @@sql_mode=''; +CREATE TABLE t1(pk INT,c CHAR(255),c2 CHAR(255),c3 CHAR(255), +c4 char(255), c5 char(255), c6 char(255), +c7 char(255), c8 char(255), primary key(pk) +)Engine=InnoDB character set utf32 ROW_FORMAT=REDUNDANT; +INSERT INTO t1(pk, c) VALUES (1, repeat('a', 255)); +ALTER TABLE t1 FORCE; +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +SELECT LENGTH(c) FROM t1; +LENGTH(c) +1020 +DROP TABLE t1; +set @@sql_mode = @old_sql_mode; # End of 10.4 tests SET GLOBAL innodb_default_row_format = @row_format; diff --git a/mysql-test/suite/innodb/t/default_row_format_alter.test b/mysql-test/suite/innodb/t/default_row_format_alter.test index f5dd246efb5..5f2170454f3 100644 --- a/mysql-test/suite/innodb/t/default_row_format_alter.test +++ b/mysql-test/suite/innodb/t/default_row_format_alter.test @@ -150,6 +150,23 @@ ALTER TABLE t1 DROP b; SELECT ROW_FORMAT FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1'; DROP TABLE t1; +--echo # +--echo # MDEV-31025 Redundant table alter fails when fixed column +--echo # stored externally +--echo # +set @old_sql_mode = @@sql_mode; +SET @@sql_mode=''; +CREATE TABLE t1(pk INT,c CHAR(255),c2 CHAR(255),c3 CHAR(255), + c4 char(255), c5 char(255), c6 char(255), + c7 char(255), c8 char(255), primary key(pk) + )Engine=InnoDB character set utf32 ROW_FORMAT=REDUNDANT; +INSERT INTO t1(pk, c) VALUES (1, repeat('a', 255)); +ALTER TABLE t1 FORCE; +CHECK TABLE t1; +SELECT LENGTH(c) FROM t1; +DROP TABLE t1; +set @@sql_mode = @old_sql_mode; + --echo # End of 10.4 tests SET GLOBAL innodb_default_row_format = @row_format; diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc index f54a8e1125a..2df94715750 100644 --- a/storage/innobase/btr/btr0btr.cc +++ b/storage/innobase/btr/btr0btr.cc @@ -4713,7 +4713,7 @@ n_field_mismatch: len -= BTR_EXTERN_FIELD_REF_SIZE; ulint extern_len = mach_read_from_4( data + len + BTR_EXTERN_LEN + 4); - if (fixed_size == extern_len) { + if (fixed_size == extern_len + len) { goto next_field; } } diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc index 9d755ce6f1e..f0aed489f22 100644 --- a/storage/innobase/row/row0merge.cc +++ b/storage/innobase/row/row0merge.cc @@ -681,11 +681,6 @@ error: row_field, field, col->len, old_table->space->zip_size(), conv_heap); - } else { - /* Field length mismatch should not - happen when rebuilding redundant row - format table. */ - ut_ad(index->table->not_redundant()); } } } -- cgit v1.2.1 From 660afb1e9c11d4fe1ba806557c60cda3f62b1be1 Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Thu, 13 Apr 2023 16:26:03 +0530 Subject: MDEV-30076 ibuf_insert tries to insert the entry for uncommitted index - Change buffer should not buffer the changes for uncommitted index --- storage/innobase/ibuf/ibuf0ibuf.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc index 3fe74c3a270..d611c7793f7 100644 --- a/storage/innobase/ibuf/ibuf0ibuf.cc +++ b/storage/innobase/ibuf/ibuf0ibuf.cc @@ -3577,6 +3577,10 @@ ibuf_insert( ulint zip_size, que_thr_t* thr) { + if (!index->is_committed()) { + return false; + } + dberr_t err; ulint entry_size; ibool no_counter; -- cgit v1.2.1 From 854e8b189e422e2d2e61e66380a18b9f8e33646d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 19 Apr 2023 15:53:26 +0300 Subject: MDEV-28976 fixup: A better fix do_shutdown_server(): After sending SIGKILL, invoke wait_until_dead(). Thanks to Sergei Golubchik for pointing out that the previous fix does not actually work. --- client/mysqltest.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/client/mysqltest.cc b/client/mysqltest.cc index 9e9b122c5fe..e745f8a3d2f 100644 --- a/client/mysqltest.cc +++ b/client/mysqltest.cc @@ -5191,9 +5191,10 @@ void do_shutdown_server(struct st_command *command) if (timeout) (void) my_kill(pid, SIGABRT); /* Give server a few seconds to die in all cases */ - if (wait_until_dead(pid, timeout < 5 ? 5 : timeout)) + if (!timeout || wait_until_dead(pid, timeout < 5 ? 5 : timeout)) { (void) my_kill(pid, SIGKILL); + wait_until_dead(pid, 5); } } DBUG_VOID_RETURN; -- cgit v1.2.1 From fc6e8a3d3264078bed28632a289130b1dc24daea Mon Sep 17 00:00:00 2001 From: Mikhail Chalov Date: Tue, 31 Jan 2023 14:14:55 -0800 Subject: Minimize unsafe C functions usage - replace strcat() and strcpy() Similar to 567b6812 continue to replace use of strcat() and strcpy() with safer options strncat() and strncpy(). All new code of the whole pull request, including one or several files that are either new files or modified ones, are contributed under the BSD-new license. I am contributing on behalf of my employer Amazon Web Services --- storage/connect/reldef.cpp | 26 ++++++++++--------- storage/connect/tabbson.cpp | 47 +++++++++++++++++----------------- storage/connect/tabdos.cpp | 61 ++++++++++++++++++++++++--------------------- storage/connect/tabext.cpp | 55 ++++++++++++++++++++++++---------------- storage/connect/tabfmt.cpp | 38 ++++++++++++++++------------ storage/connect/tabjdbc.cpp | 45 +++++++++++++++++---------------- storage/connect/tabjson.cpp | 54 +++++++++++++++++++-------------------- 7 files changed, 178 insertions(+), 148 deletions(-) diff --git a/storage/connect/reldef.cpp b/storage/connect/reldef.cpp index 144d31735d6..786a53db5a2 100644 --- a/storage/connect/reldef.cpp +++ b/storage/connect/reldef.cpp @@ -91,11 +91,11 @@ PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char* tab, char* db, bool info) /* directories are used (to make this even remotely secure). */ /*********************************************************************/ if (check_valid_path(module, strlen(module))) { - strcpy(g->Message, "Module cannot contain a path"); + safe_strcpy(g->Message, sizeof(g->Message), "Module cannot contain a path"); return NULL; } else if (strlen(subtype)+1+3 >= sizeof(getname)) { - strcpy(g->Message, "Subtype string too long"); + safe_strcpy(g->Message, sizeof(g->Message), "Subtype string too long"); return NULL; } else @@ -118,7 +118,8 @@ PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char* tab, char* db, bool info) FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0, (LPTSTR)buf, sizeof(buf), NULL); - strcat(strcat(g->Message, ": "), buf); + safe_strcat(g->Message, sizeof(g->Message), ": "); + safe_strcat(g->Message, sizeof(g->Message), buf); return NULL; } // endif hDll @@ -281,7 +282,7 @@ char *RELDEF::GetStringCatInfo(PGLOBAL g, PCSZ what, PCSZ sdef) if (IsFileType(GetTypeID(ftype))) { name= Hc->GetPartName(); sval= (char*)PlugSubAlloc(g, NULL, strlen(name) + 12); - strcat(strcpy(sval, name), "."); + snprintf(sval, strlen(name) + 12, "%s.", name); n= strlen(sval); // Fold ftype to lower case @@ -622,12 +623,11 @@ PTABDEF OEMDEF::GetXdef(PGLOBAL g) /* directories are used (to make this even remotely secure). */ /*********************************************************************/ if (check_valid_path(Module, strlen(Module))) { - strcpy(g->Message, "Module cannot contain a path"); + safe_strcpy(g->Message, sizeof(g->Message), "Module cannot contain a path"); return NULL; } else // PlugSetPath(soname, Module, GetPluginDir()); // Crashes on Fedora - strncat(strcpy(soname, GetPluginDir()), Module, - sizeof(soname) - strlen(soname) - 1); + snprintf(soname, sizeof(soname), "%s%s", GetPluginDir(), Module); #if defined(_WIN32) // Is the DLL already loaded? @@ -641,7 +641,8 @@ PTABDEF OEMDEF::GetXdef(PGLOBAL g) FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0, (LPTSTR)buf, sizeof(buf), NULL); - strcat(strcat(g->Message, ": "), buf); + safe_strcat(g->Message, sizeof(g->Message), ": "); + safe_strcat(g->Message, sizeof(g->Message), buf); return NULL; } // endif hDll @@ -661,7 +662,8 @@ PTABDEF OEMDEF::GetXdef(PGLOBAL g) FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, rc, 0, (LPTSTR)buf, sizeof(buf), NULL); - strcat(strcat(g->Message, ": "), buf); + safe_strcat(g->Message, sizeof(g->Message), ": "); + safe_strcat(g->Message, sizeof(g->Message), buf); FreeLibrary((HMODULE)Hdll); return NULL; } // endif getdef @@ -810,7 +812,7 @@ PTDB OEMDEF::GetTable(PGLOBAL g, MODE mode) else txfp = new(g) ZLBFAM(defp); #else // !GZ_SUPPORT - strcpy(g->Message, "Compress not supported"); + safe_strcpy(g->Message, sizeof(g->Message), "Compress not supported"); return NULL; #endif // !GZ_SUPPORT } else if (rfm == RECFM_VAR) { @@ -833,7 +835,7 @@ PTDB OEMDEF::GetTable(PGLOBAL g, MODE mode) else txfp = new(g) VCTFAM((PVCTDEF)defp); #else // !VCT_SUPPORT - strcpy(g->Message, "VCT no more supported"); + safe_strcpy(g->Message, sizeof(g->Message), "VCT no more supported"); return NULL; #endif // !VCT_SUPPORT } // endif's @@ -924,7 +926,7 @@ int COLDEF::Define(PGLOBAL g, void *, PCOLINFO cfp, int poff) return -1; } // endswitch - strcpy(F.Type, GetFormatType(Buf_Type)); + safe_strcpy(F.Type, sizeof(F.Type), GetFormatType(Buf_Type)); F.Length = cfp->Length; F.Prec = cfp->Scale; Offset = (cfp->Offset < 0) ? poff : cfp->Offset; diff --git a/storage/connect/tabbson.cpp b/storage/connect/tabbson.cpp index 22d8648d7c0..2ea74da94b0 100644 --- a/storage/connect/tabbson.cpp +++ b/storage/connect/tabbson.cpp @@ -39,6 +39,7 @@ #include "checklvl.h" #include "resource.h" #include "mycat.h" // for FNC_COL +#include "m_string.h" /***********************************************************************/ /* This should be an option. */ @@ -80,7 +81,7 @@ PQRYRES BSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info) } // endif info if (GetIntegerTableOption(g, topt, "Multiple", 0)) { - strcpy(g->Message, "Cannot find column definition for multiple table"); + safe_strcpy(g->Message, sizeof(g->Message), "Cannot find column definition for multiple table"); return NULL; } // endif Multiple @@ -206,7 +207,7 @@ int BSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) tdp->Uri = (dsn && *dsn ? dsn : NULL); if (!tdp->Fn && !tdp->Uri) { - strcpy(g->Message, MSG(MISSING_FNAME)); + safe_strcpy(g->Message, sizeof(g->Message), MSG(MISSING_FNAME)); return 0; } else topt->subtype = NULL; @@ -318,7 +319,7 @@ int BSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) switch (tjnp->ReadDB(g)) { case RC_EF: - strcpy(g->Message, "Void json table"); + safe_strcpy(g->Message, sizeof(g->Message), "Void json table"); case RC_FX: goto err; default: @@ -328,7 +329,7 @@ int BSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) } // endif pretty if (!(row = (jsp) ? bp->GetObject(jsp) : NULL)) { - strcpy(g->Message, "Can only retrieve columns from object rows"); + safe_strcpy(g->Message, sizeof(g->Message), "Can only retrieve columns from object rows"); goto err; } // endif row @@ -405,7 +406,7 @@ bool BSONDISC::Find(PGLOBAL g, PBVAL jvp, PCSZ key, int j) if (jvp && !bp->IsJson(jvp)) { if (JsonAllPath() && !fmt[bf]) - strcat(fmt, colname); + safe_strcat(fmt, sizeof(fmt), colname); jcol.Type = (JTYP)jvp->Type; @@ -439,7 +440,7 @@ bool BSONDISC::Find(PGLOBAL g, PBVAL jvp, PCSZ key, int j) jcol.Cbn = true; } else if (j < lvl && !Stringified(strfy, colname)) { if (!fmt[bf]) - strcat(fmt, colname); + safe_strcat(fmt, sizeof(fmt), colname); p = fmt + strlen(fmt); jsp = jvp; @@ -510,11 +511,11 @@ bool BSONDISC::Find(PGLOBAL g, PBVAL jvp, PCSZ key, int j) } else if (lvl >= 0) { if (Stringified(strfy, colname)) { if (!fmt[bf]) - strcat(fmt, colname); + safe_strcat(fmt, sizeof(fmt), colname); - strcat(fmt, ".*"); + safe_strcat(fmt, sizeof(fmt), ".*"); } else if (JsonAllPath() && !fmt[bf]) - strcat(fmt, colname); + safe_strcat(fmt, sizeof(fmt), colname); jcol.Type = TYPE_STRG; jcol.Len = sz; @@ -961,7 +962,7 @@ PVAL BCUTIL::ExpandArray(PGLOBAL g, PBVAL arp, int n) } // endif ars if (!(bvp = GetArrayValue(arp, (nodes[n].Rx = nodes[n].Nx)))) { - strcpy(g->Message, "Logical error expanding array"); + safe_strcpy(g->Message, sizeof(g->Message), "Logical error expanding array"); throw 666; } // endif jvp @@ -1146,7 +1147,7 @@ PBVAL BCUTIL::GetRow(PGLOBAL g) } else if (row->Type == TYPE_JAR) { AddArrayValue(row, (nwr = NewVal(type))); } else { - strcpy(g->Message, "Wrong type when writing new row"); + safe_strcpy(g->Message, sizeof(g->Message), "Wrong type when writing new row"); nwr = NULL; } // endif's @@ -1255,7 +1256,7 @@ PTDB BSONDEF::GetTable(PGLOBAL g, MODE m) // Allocate the parse work memory G = PlugInit(NULL, (size_t)Lrecl * (Pretty < 0 ? 3 : 5)); } else { - strcpy(g->Message, "LRECL is not defined"); + safe_strcpy(g->Message, sizeof(g->Message), "LRECL is not defined"); return NULL; } // endif Lrecl @@ -1295,7 +1296,7 @@ PTDB BSONDEF::GetTable(PGLOBAL g, MODE m) } else if (m == MODE_INSERT) { txfp = new(g) ZIPFAM(this); } else { - strcpy(g->Message, "UPDATE/DELETE not supported for ZIP"); + safe_strcpy(g->Message, sizeof(g->Message), "UPDATE/DELETE not supported for ZIP"); return NULL; } // endif's m #else // !ZIP_SUPPORT @@ -1325,10 +1326,10 @@ PTDB BSONDEF::GetTable(PGLOBAL g, MODE m) if (m == MODE_READ || m == MODE_ANY || m == MODE_ALTER) { txfp = new(g) UNZFAM(this); } else if (m == MODE_INSERT) { - strcpy(g->Message, "INSERT supported only for zipped JSON when pretty=0"); + safe_strcpy(g->Message, sizeof(g->Message), "INSERT supported only for zipped JSON when pretty=0"); return NULL; } else { - strcpy(g->Message, "UPDATE/DELETE not supported for ZIP"); + safe_strcpy(g->Message, sizeof(g->Message), "UPDATE/DELETE not supported for ZIP"); return NULL; } // endif's m #else // !ZIP_SUPPORT @@ -1661,7 +1662,7 @@ bool TDBBSN::PrepareWriting(PGLOBAL g) strcat(s, ","); if ((signed)strlen(s) > Lrecl) { - strncpy(To_Line, s, Lrecl); + safe_strcpy(To_Line, Lrecl, s); snprintf(g->Message, sizeof(g->Message), "Line truncated (lrecl=%d)", Lrecl); return PushWarning(g, this); } else @@ -1764,7 +1765,7 @@ bool BSONCOL::CheckExpand(PGLOBAL g, int i, PSZ nm, bool b) Xpd = true; // Expandable object Nodes[i].Op = OP_EXP; } else if (b) { - strcpy(g->Message, "Cannot expand more than one branch"); + safe_strcpy(g->Message, sizeof(g->Message), "Cannot expand more than one branch"); return true; } // endif Xcol @@ -1975,7 +1976,7 @@ bool BSONCOL::ParseJpath(PGLOBAL g) if (SetArrayOptions(g, p, i, Nodes[i - 1].Key)) return true; else if (Xpd && Tbp->Mode == MODE_DELETE) { - strcpy(g->Message, "Cannot delete expanded columns"); + safe_strcpy(g->Message, sizeof(g->Message), "Cannot delete expanded columns"); return true; } // endif Xpd @@ -2096,7 +2097,7 @@ void BSONCOL::ReadColumn(PGLOBAL g) void BSONCOL::WriteColumn(PGLOBAL g) { if (Xpd && Tbp->Pretty < 2) { - strcpy(g->Message, "Cannot write expanded column when Pretty is not 2"); + safe_strcpy(g->Message, sizeof(g->Message), "Cannot write expanded column when Pretty is not 2"); throw 666; } // endif Xpd @@ -2126,7 +2127,7 @@ void BSONCOL::WriteColumn(PGLOBAL g) char *s = Value->GetCharValue(); if (!(jsp = Cp->ParseJson(g, s, strlen(s)))) { - strcpy(g->Message, s); + safe_strcpy(g->Message, sizeof(g->Message), s); throw 666; } // endif jsp @@ -2312,7 +2313,7 @@ int TDBBSON::MakeDocument(PGLOBAL g) if (!a && *p && *p != '[' && !IsNum(p)) { // obj is a key if (jsp->Type != TYPE_JOB) { - strcpy(g->Message, "Table path does not match the json file"); + safe_strcpy(g->Message, sizeof(g->Message), "Table path does not match the json file"); return RC_FX; } // endif Type @@ -2338,7 +2339,7 @@ int TDBBSON::MakeDocument(PGLOBAL g) } // endif p if (jsp->Type != TYPE_JAR) { - strcpy(g->Message, "Table path does not match the json file"); + safe_strcpy(g->Message, sizeof(g->Message), "Table path does not match the json file"); return RC_FX; } // endif Type @@ -2432,7 +2433,7 @@ void TDBBSON::ResetSize(void) int TDBBSON::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool) { if (pxdf) { - strcpy(g->Message, "JSON not indexable when pretty = 2"); + safe_strcpy(g->Message, sizeof(g->Message), "JSON not indexable when pretty = 2"); return RC_FX; } else return RC_OK; diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp index 797b988b309..4b50af0954e 100644 --- a/storage/connect/tabdos.cpp +++ b/storage/connect/tabdos.cpp @@ -62,6 +62,7 @@ #include "tabmul.h" #include "array.h" #include "blkfil.h" +#include "m_string.h" /***********************************************************************/ /* DB static variables. */ @@ -258,7 +259,7 @@ bool DOSDEF::DeleteIndexFile(PGLOBAL g, PIXDEF pxdf) sep = GetBoolCatInfo("SepIndex", false); if (!sep && pxdf) { - strcpy(g->Message, MSG(NO_RECOV_SPACE)); + safe_strcpy(g->Message, sizeof(g->Message), MSG(NO_RECOV_SPACE)); return true; } // endif sep @@ -293,7 +294,8 @@ bool DOSDEF::DeleteIndexFile(PGLOBAL g, PIXDEF pxdf) for (; pxdf; pxdf = pxdf->GetNext()) { _splitpath(Ofn, drive, direc, fname, NULL); - strcat(strcat(fname, "_"), pxdf->GetName()); + safe_strcat(fname, sizeof(fname), "_"); + safe_strcat(fname, sizeof(fname), pxdf->GetName()); _makepath(filename, drive, direc, fname, ftype); PlugSetPath(filename, filename, GetPath()); #if defined(_WIN32) @@ -312,7 +314,7 @@ bool DOSDEF::DeleteIndexFile(PGLOBAL g, PIXDEF pxdf) } else { // !sep // Drop all indexes, delete the common file PlugSetPath(filename, Ofn, GetPath()); - strcat(PlugRemoveType(filename, filename), ftype); + safe_strcat(PlugRemoveType(filename, filename), sizeof(filename), ftype); #if defined(_WIN32) if (!DeleteFile(filename)) rc = (GetLastError() != ERROR_FILE_NOT_FOUND); @@ -365,7 +367,7 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode) if (mode == MODE_READ || mode == MODE_ANY || mode == MODE_ALTER) { txfp = new(g) UZDFAM(this); } else { - strcpy(g->Message, "Zipped DBF tables are read only"); + safe_strcpy(g->Message, sizeof(g->Message), "Zipped DBF tables are read only"); return NULL; } // endif's mode @@ -386,7 +388,7 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode) } else if (mode == MODE_INSERT) { txfp = new(g) ZIPFAM(this); } else { - strcpy(g->Message, "UPDATE/DELETE not supported for ZIP"); + safe_strcpy(g->Message, sizeof(g->Message), "UPDATE/DELETE not supported for ZIP"); return NULL; } // endif's mode @@ -397,7 +399,7 @@ PTDB DOSDEF::GetTable(PGLOBAL g, MODE mode) } else if (mode == MODE_INSERT) { txfp = new(g) ZPXFAM(this); } else { - strcpy(g->Message, "UPDATE/DELETE not supported for ZIP"); + safe_strcpy(g->Message, sizeof(g->Message), "UPDATE/DELETE not supported for ZIP"); return NULL; } // endif's mode @@ -654,7 +656,7 @@ int TDBDOS::MakeBlockValues(PGLOBAL g) if ((nrec = defp->GetElemt()) < 2) { if (!To_Def->Partitioned()) { // This may be wrong to do in some cases - strcpy(g->Message, MSG(TABLE_NOT_OPT)); + safe_strcpy(g->Message, sizeof(g->Message), MSG(TABLE_NOT_OPT)); return RC_INFO; // Not to be optimized } else return RC_OK; @@ -674,7 +676,7 @@ int TDBDOS::MakeBlockValues(PGLOBAL g) if ((block = (int)((MaxSize + (int)nrec - 1) / (int)nrec)) < 2) { // This may be wrong to do in some cases defp->RemoveOptValues(g); - strcpy(g->Message, MSG(TABLE_NOT_OPT)); + safe_strcpy(g->Message, sizeof(g->Message), MSG(TABLE_NOT_OPT)); return RC_INFO; // Not to be optimized } // endif block @@ -757,7 +759,7 @@ int TDBDOS::MakeBlockValues(PGLOBAL g) // No optimised columns. Still useful for blocked variable tables. if (!colp && defp->Recfm != RECFM_VAR) { - strcpy(g->Message, "No optimised columns"); + safe_strcpy(g->Message, sizeof(g->Message), "No optimised columns"); return RC_INFO; } // endif colp @@ -787,7 +789,8 @@ int TDBDOS::MakeBlockValues(PGLOBAL g) /*********************************************************************/ char *p = (char *)PlugSubAlloc(g, NULL, 24 + strlen(Name)); - dup->Step = strcat(strcpy(p, MSG(OPTIMIZING)), Name); + snprintf(p, 24 + strlen(Name), "%s%s", MSG(OPTIMIZING), Name); + dup->Step = p; dup->ProgMax = GetProgMax(g); dup->ProgCur = 0; #endif // SOCKET_MODE || THREAD @@ -804,7 +807,7 @@ int TDBDOS::MakeBlockValues(PGLOBAL g) } else { if (++curnum >= nrec) { if (++curblk >= block) { - strcpy(g->Message, MSG(BAD_BLK_ESTIM)); + safe_strcpy(g->Message, sizeof(g->Message), MSG(BAD_BLK_ESTIM)); goto err; } else curnum = 0; @@ -832,7 +835,7 @@ int TDBDOS::MakeBlockValues(PGLOBAL g) #if defined(PROG_INFO) if (!dup->Step) { - strcpy(g->Message, MSG(OPT_CANCELLED)); + safe_strcpy(g->Message, sizeof(g->Message), MSG(OPT_CANCELLED)); goto err; } else dup->ProgCur = GetProgCur(); @@ -912,7 +915,8 @@ bool TDBDOS::SaveBlockValues(PGLOBAL g) if (!(opfile = fopen(filename, "wb"))) { snprintf(g->Message, sizeof(g->Message), MSG(OPEN_MODE_ERROR), "wb", (int)errno, filename); - strcat(strcat(g->Message, ": "), strerror(errno)); + safe_strcat(g->Message, sizeof(g->Message), ": "); + safe_strcat(g->Message, sizeof(g->Message), strerror(errno)); if (trace(1)) htrc("%s\n", g->Message); @@ -1227,7 +1231,8 @@ bool TDBDOS::GetDistinctColumnValues(PGLOBAL g, int nrec) /* Initialize progress information */ /*********************************************************************/ p = (char *)PlugSubAlloc(g, NULL, 48 + strlen(Name)); - dup->Step = strcat(strcpy(p, MSG(GET_DIST_VALS)), Name); + snprintf(p, 48 + strlen(Name), "%s%s", MSG(GET_DIST_VALS), Name); + dup->Step = p; dup->ProgMax = GetProgMax(g); dup->ProgCur = 0; @@ -1239,12 +1244,12 @@ bool TDBDOS::GetDistinctColumnValues(PGLOBAL g, int nrec) #if defined(SOCKET_MODE) if (SendProgress(dup)) { - strcpy(g->Message, MSG(OPT_CANCELLED)); + safe_strcpy(g->Message, sizeof(g->Message), MSG(OPT_CANCELLED)); return true; } else #elif defined(THREAD) if (!dup->Step) { - strcpy(g->Message, MSG(OPT_CANCELLED)); + safe_strcpy(g->Message, sizeof(g->Message), MSG(OPT_CANCELLED)); return true; } else #endif // THREAD @@ -1525,7 +1530,7 @@ PBF TDBDOS::CheckBlockFilari(PGLOBAL g, PXOB *arg, int op, bool *cnv) } else if (n == 8 || n == 14) { if (n == 8 && ctype != TYPE_LIST) { // Should never happen - strcpy(g->Message, "Block opt: bad constant"); + safe_strcpy(g->Message, sizeof(g->Message), "Block opt: bad constant"); throw 99; } // endif Conv @@ -1683,7 +1688,7 @@ int TDBDOS::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool add) // Are we are called from CreateTable or CreateIndex? if (pxdf) { if (!add && dfp->GetIndx()) { - strcpy(g->Message, MSG(INDX_EXIST_YET)); + safe_strcpy(g->Message, sizeof(g->Message), MSG(INDX_EXIST_YET)); return RC_FX; } // endif To_Indx @@ -1795,7 +1800,7 @@ int TDBDOS::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool add) htrc("Exception %d: %s\n", n, g->Message); rc = RC_FX; } catch (const char *msg) { - strcpy(g->Message, msg); + safe_strcpy(g->Message, sizeof(g->Message), msg); rc = RC_FX; } // end catch @@ -1829,7 +1834,7 @@ bool TDBDOS::InitialyzeIndex(PGLOBAL g, volatile PIXDEF xdp, bool sorted) PKPDEF kdp; if (!xdp && !(xdp = To_Xdp)) { - strcpy(g->Message, "NULL dynamic index"); + safe_strcpy(g->Message, sizeof(g->Message), "NULL dynamic index"); return true; } else dynamic = To_Filter && xdp->IsUnique() && xdp->IsDynamic(); @@ -1918,7 +1923,7 @@ bool TDBDOS::InitialyzeIndex(PGLOBAL g, volatile PIXDEF xdp, bool sorted) htrc("Exception %d: %s\n", n, g->Message); brc = true; } catch (const char *msg) { - strcpy(g->Message, msg); + safe_strcpy(g->Message, sizeof(g->Message), msg); brc = true; } // end catch @@ -2679,38 +2684,38 @@ void DOSCOL::WriteColumn(PGLOBAL g) if (Ldz || Nod || Dcm >= 0) { switch (Buf_Type) { case TYPE_SHORT: - strcpy(fmt, (Ldz) ? "%0*hd" : "%*.hd"); + safe_strcpy(fmt, sizeof(fmt), (Ldz) ? "%0*hd" : "%*.hd"); i = 0; if (Nod) for (; i < Dcm; i++) - strcat(fmt, "0"); + safe_strcat(fmt, sizeof(fmt), "0"); len = sprintf(Buf, fmt, field - i, Value->GetShortValue()); break; case TYPE_INT: - strcpy(fmt, (Ldz) ? "%0*d" : "%*.d"); + safe_strcpy(fmt, sizeof(fmt), (Ldz) ? "%0*d" : "%*.d"); i = 0; if (Nod) for (; i < Dcm; i++) - strcat(fmt, "0"); + safe_strcat(fmt,sizeof(fmt), "0"); len = sprintf(Buf, fmt, field - i, Value->GetIntValue()); break; case TYPE_TINY: - strcpy(fmt, (Ldz) ? "%0*d" : "%*.d"); + safe_strcpy(fmt, sizeof(fmt), (Ldz) ? "%0*d" : "%*.d"); i = 0; if (Nod) for (; i < Dcm; i++) - strcat(fmt, "0"); + safe_strcat(fmt, sizeof(fmt), "0"); len = sprintf(Buf, fmt, field - i, Value->GetTinyValue()); break; case TYPE_DOUBLE: case TYPE_DECIM: - strcpy(fmt, (Ldz) ? "%0*.*lf" : "%*.*lf"); + safe_strcpy(fmt, sizeof(fmt), (Ldz) ? "%0*.*lf" : "%*.*lf"); len = field + ((Nod && Dcm) ? 1 : 0); snprintf(Buf, len + 1, fmt, len, Dcm, Value->GetFloatValue()); len = strlen(Buf); diff --git a/storage/connect/tabext.cpp b/storage/connect/tabext.cpp index 01c55cca1cd..9d8a4aca077 100644 --- a/storage/connect/tabext.cpp +++ b/storage/connect/tabext.cpp @@ -65,7 +65,7 @@ int CONDFIL::Init(PGLOBAL g, PHC hc) while (alt) { if (!(p = strchr(alt, '='))) { - strcpy(g->Message, "Invalid alias list"); + safe_strcpy(g->Message, sizeof(g->Message), "Invalid alias list"); rc = RC_FX; break; } // endif !p @@ -126,7 +126,7 @@ EXTDEF::EXTDEF(void) bool EXTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) { if (g->Createas) { - strcpy(g->Message, + safe_strcpy(g->Message, sizeof(g->Message), "Multiple-table UPDATE/DELETE commands are not supported"); return true; } // endif multi @@ -349,7 +349,7 @@ bool TDBEXT::MakeSrcdef(PGLOBAL g) int n_placeholders = count_placeholders(Srcdef); if (n_placeholders < 0) { - strcpy(g->Message, "MakeSQL: Wrong place holders specification"); + safe_strcpy(g->Message, sizeof(g->Message), "MakeSQL: Wrong place holders specification"); return true; } @@ -372,7 +372,7 @@ bool TDBEXT::MakeSrcdef(PGLOBAL g) Query = new(g)STRING(g, strlen(Srcdef) + strlen(fil1) + strlen(fil2)); Query->SetLength(sprintf(Query->GetStr(), Srcdef, fil2, fil1)); } else { - strcpy(g->Message, "MakeSQL: Wrong place holders specification"); + safe_strcpy(g->Message, sizeof(g->Message), "MakeSQL: Wrong place holders specification"); return true; } // endif's ph @@ -513,7 +513,7 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt) len += ((Mode == MODE_READX) ? 256 : 1); if (Query->IsTruncated()) { - strcpy(g->Message, "MakeSQL: Out of memory"); + safe_strcpy(g->Message, sizeof(g->Message), "MakeSQL: Out of memory"); return true; } else Query->Resize(len); @@ -574,6 +574,7 @@ bool TDBEXT::MakeCommand(PGLOBAL g) bool qtd = Quoted > 0; char q = qtd ? *Quote : ' '; int i = 0, k = 0; + size_t stmt_sz = 0; // Make a lower case copy of the originale query and change // back ticks to the data source identifier quoting character @@ -585,26 +586,30 @@ bool TDBEXT::MakeCommand(PGLOBAL g) p[7] = 0; // Remove where clause Qrystr[(p - qrystr) + 7] = 0; body = To_CondFil->Body; - stmt = (char*)PlugSubAlloc(g, NULL, strlen(qrystr) - + strlen(body) + 64); + stmt_sz = strlen(qrystr) + strlen(body) + 64; } else - stmt = (char*)PlugSubAlloc(g, NULL, strlen(Qrystr) + 64); + stmt_sz = strlen(Qrystr) + 64; + stmt = (char*)PlugSubAlloc(g, NULL, stmt_sz); // Check whether the table name is equal to a keyword // If so, it must be quoted in the original query - strlwr(strcat(strcat(strcpy(name, " "), Name), " ")); + snprintf(name, sizeof(name), " %s ", Name); + strlwr(name); if (strstr(" update delete low_priority ignore quick from ", name)) { if (Quote) { - strlwr(strcat(strcat(strcpy(name, Quote), Name), Quote)); + snprintf(name, sizeof(name), "%s%s%s", Quote, Name, Quote); + strlwr(name); k += 2; } else { - strcpy(g->Message, "Quoted must be specified"); + safe_strcpy(g->Message, sizeof(g->Message), "Quoted must be specified"); return true; } // endif Quote - } else - strlwr(strcpy(name, Name)); // Not a keyword + } else { + safe_strcpy(name, sizeof(name), Name); // Not a keyword + strlwr(name); + } if ((p = strstr(qrystr, name))) { for (i = 0; i < p - qrystr; i++) @@ -618,21 +623,29 @@ bool TDBEXT::MakeCommand(PGLOBAL g) schmp = Schema; if (qtd && *(p - 1) == ' ') { - if (schmp) - strcat(strcat(stmt, schmp), "."); + if (schmp) { + safe_strcat(stmt, stmt_sz, schmp); + safe_strcat(stmt, stmt_sz, "."); + } - strcat(strcat(strcat(stmt, Quote), TableName), Quote); + safe_strcat(stmt, stmt_sz, Quote); + safe_strcat(stmt, stmt_sz, TableName); + safe_strcat(stmt, stmt_sz, Quote); } else { if (schmp) { if (qtd && *(p - 1) != ' ') { stmt[i - 1] = 0; - strcat(strcat(strcat(stmt, schmp), "."), Quote); - } else - strcat(strcat(stmt, schmp), "."); + safe_strcat(stmt, stmt_sz, schmp); + safe_strcat(stmt, stmt_sz, "."); + safe_strcat(stmt, stmt_sz, Quote); + } else { + safe_strcat(stmt, stmt_sz, schmp); + safe_strcat(stmt, stmt_sz, "."); + } } // endif schmp - strcat(stmt, TableName); + safe_strcat(stmt, stmt_sz, TableName); } // endif's i = (int)strlen(stmt); @@ -644,7 +657,7 @@ bool TDBEXT::MakeCommand(PGLOBAL g) RemoveConst(g, stmt); if (body) - strcat(stmt, body); + safe_strcat(stmt, stmt_sz, body); } else { snprintf(g->Message, sizeof(g->Message), "Cannot use this %s command", diff --git a/storage/connect/tabfmt.cpp b/storage/connect/tabfmt.cpp index f20d9afb959..b93b7d8dc8b 100644 --- a/storage/connect/tabfmt.cpp +++ b/storage/connect/tabfmt.cpp @@ -62,6 +62,7 @@ #define NO_FUNC #include "plgcnx.h" // For DB types #include "resource.h" +#include "m_string.h" /***********************************************************************/ /* This should be an option. */ @@ -137,7 +138,7 @@ PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info) ? strchr(tdp->Entry, '*') || strchr(tdp->Entry, '?') : GetBooleanTableOption(g, topt, "Mulentries", false); #else // !ZIP_SUPPORT - strcpy(g->Message, "ZIP not supported by this version"); + safe_strcpy(g->Message, sizeof(g->Message), "ZIP not supported by this version"); return NULL; #endif // !ZIP_SUPPORT } // endif // Zipped @@ -145,7 +146,7 @@ PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info) fn = tdp->Fn = GetStringTableOption(g, topt, "Filename", NULL); if (!tdp->Fn) { - strcpy(g->Message, MSG(MISSING_FNAME)); + safe_strcpy(g->Message, sizeof(g->Message), MSG(MISSING_FNAME)); return NULL; } // endif Fn @@ -472,7 +473,7 @@ bool CSVDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) if (Catfunc == FNC_NO) for (PCOLDEF cdp = To_Cols; cdp; cdp = cdp->GetNext()) if (cdp->GetOffset() < 1 && !cdp->IsSpecial()) { - strcpy(g->Message, MSG(BAD_OFFSET_VAL)); + safe_strcpy(g->Message, sizeof(g->Message), MSG(BAD_OFFSET_VAL)); return true; } // endif Offset @@ -528,11 +529,11 @@ PTDB CSVDEF::GetTable(PGLOBAL g, MODE mode) } else if (mode == MODE_INSERT) { txfp = new(g) ZIPFAM(this); } else { - strcpy(g->Message, "UPDATE/DELETE not supported for ZIP"); + safe_strcpy(g->Message, sizeof(g->Message), "UPDATE/DELETE not supported for ZIP"); return NULL; } // endif's mode #else // !ZIP_SUPPORT - strcpy(g->Message, "ZIP not supported"); + safe_strcpy(g->Message, sizeof(g->Message), "ZIP not supported"); return NULL; #endif // !ZIP_SUPPORT } else if (map) { @@ -546,7 +547,7 @@ PTDB CSVDEF::GetTable(PGLOBAL g, MODE mode) txfp = new(g) ZLBFAM(this); #else // !GZ_SUPPORT - strcpy(g->Message, "Compress not supported"); + safe_strcpy(g->Message, sizeof(g->Message), "Compress not supported"); return NULL; #endif // !GZ_SUPPORT } else @@ -879,7 +880,7 @@ bool TDBCSV::SkipHeader(PGLOBAL g) if (q) To_Line[strlen(To_Line)] = Qot; - strcat(To_Line, cdp->GetName()); + safe_strcat(To_Line, Lrecl, cdp->GetName()); if (q) To_Line[strlen(To_Line)] = Qot; @@ -1049,14 +1050,16 @@ bool TDBCSV::PrepareWriting(PGLOBAL g) for (i = 0; i < Fields; i++) { if (i) - strcat(To_Line, sep); + safe_strcat(To_Line, Lrecl, sep); if (Field[i]) { if (!strlen(Field[i])) { // Generally null fields are not quoted - if (Quoted > 2) + if (Quoted > 2) { // Except if explicitly required - strcat(strcat(To_Line, qot), qot); + safe_strcat(To_Line, Lrecl, qot); + safe_strcat(To_Line, Lrecl, qot); + } } else if (Qot && (strchr(Field[i], Sep) || *Field[i] == Qot || Quoted > 1 || (Quoted == 1 && !Fldtyp[i]))) { @@ -1075,12 +1078,15 @@ bool TDBCSV::PrepareWriting(PGLOBAL g) To_Line[k++] = Qot; To_Line[k] = '\0'; - } else - strcat(strcat(strcat(To_Line, qot), Field[i]), qot); + } else { + safe_strcat(To_Line, Lrecl, qot); + safe_strcat(To_Line, Lrecl, Field[i]); + safe_strcat(To_Line, Lrecl, qot); + } } else - strcat(To_Line, Field[i]); + safe_strcat(To_Line, Lrecl, Field[i]); } } // endfor i @@ -1157,7 +1163,7 @@ int TDBCSV::CheckWrite(PGLOBAL g) } // endif } if ((nlen += n) > maxlen) { - strcpy(g->Message, MSG(LINE_TOO_LONG)); + safe_strcpy(g->Message, sizeof(g->Message), MSG(LINE_TOO_LONG)); return -1; } // endif nlen @@ -1267,7 +1273,7 @@ bool TDBFMT::OpenDB(PGLOBAL g) } // endif n FldFormat[i] = (PSZ)PlugSubAlloc(g, NULL, n + 5); - strcpy(FldFormat[i], pfm); + safe_strcpy(FldFormat[i], n + 5, pfm); if (!strcmp(pfm + n, "%m")) { // This is a field that can be missing. Flag it so it can @@ -1277,7 +1283,7 @@ bool TDBFMT::OpenDB(PGLOBAL g) } else if (i+1 < Fields && strcmp(pfm + n, "%n")) { // There are trailing characters after the field contents // add a marker for the next field start position. - strcat(FldFormat[i], "%n"); + safe_strcat(FldFormat[i], n + 5, "%n"); FmtTest[i] = 1; } // endif's diff --git a/storage/connect/tabjdbc.cpp b/storage/connect/tabjdbc.cpp index dc1f5e2fe5b..fe7609a1cac 100644 --- a/storage/connect/tabjdbc.cpp +++ b/storage/connect/tabjdbc.cpp @@ -277,7 +277,7 @@ PTDB JDBCDEF::GetTable(PGLOBAL g, MODE m) if (Multiple == 1) tdbp = new(g)TDBMUL(tdbp); else if (Multiple == 2) - strcpy(g->Message, "NO_JDBC_MUL"); + safe_strcpy(g->Message, sizeof(g->Message), "NO_JDBC_MUL"); } // endswitch Catfunc @@ -386,7 +386,7 @@ bool TDBJDBC::MakeInsert(PGLOBAL g) for (colp = Columns; colp; colp = colp->GetNext()) if (colp->IsSpecial()) { - strcpy(g->Message, "No JDBC special columns"); + safe_strcpy(g->Message, sizeof(g->Message), "No JDBC special columns"); return true; } else { // Column name can be encoded in UTF-8 @@ -460,7 +460,7 @@ bool TDBJDBC::MakeInsert(PGLOBAL g) } // endfor colp if ((Query->Append(") VALUES ("))) { - strcpy(g->Message, "MakeInsert: Out of memory"); + safe_strcpy(g->Message, sizeof(g->Message), "MakeInsert: Out of memory"); return true; } else // in case prepared statement fails pos = Query->GetLength(); @@ -470,7 +470,7 @@ bool TDBJDBC::MakeInsert(PGLOBAL g) Query->Append("?,"); if (Query->IsTruncated()) { - strcpy(g->Message, "MakeInsert: Out of memory"); + safe_strcpy(g->Message, sizeof(g->Message), "MakeInsert: Out of memory"); return true; } else Query->RepLast(')'); @@ -532,12 +532,15 @@ int TDBJDBC::Cardinality(PGLOBAL g) // Table name can be encoded in UTF-8 Decode(TableName, tbn, sizeof(tbn)); - strcpy(qry, "SELECT COUNT(*) FROM "); + safe_strcpy(qry, sizeof(qry), "SELECT COUNT(*) FROM "); - if (Quote) - strcat(strcat(strcat(qry, Quote), tbn), Quote); + if (Quote) { + safe_strcat(qry, sizeof(qry), Quote); + safe_strcat(qry, sizeof(qry), tbn); + safe_strcat(qry, sizeof(qry), Quote); + } else - strcat(qry, tbn); + safe_strcat(qry, sizeof(qry), tbn); // Allocate a Count(*) column (must not use the default constructor) Cnp = new(g)JDBCCOL; @@ -654,7 +657,7 @@ bool TDBJDBC::OpenDB(PGLOBAL g) if ((Qrp = Jcp->AllocateResult(g, this))) Memory = 2; // Must be filled else { - strcpy(g->Message, "Result set memory allocation failed"); + safe_strcpy(g->Message, sizeof(g->Message), "Result set memory allocation failed"); return true; } // endif n @@ -681,7 +684,7 @@ bool TDBJDBC::OpenDB(PGLOBAL g) #if 0 if (!(rc = MakeInsert(g))) { if (Nparm != Jcp->PrepareSQL(Query->GetStr())) { - strcpy(g->Message, MSG(PARM_CNT_MISS)); + safe_strcpy(g->Message, sizeof(g->Message), MSG(PARM_CNT_MISS)); rc = true; } else rc = BindParameters(g); @@ -733,12 +736,12 @@ bool TDBJDBC::SetRecpos(PGLOBAL g, int recpos) CurNum = recpos; Fpos = recpos; } else { - strcpy(g->Message, "Scrolling out of row set NIY"); + safe_strcpy(g->Message, sizeof(g->Message), "Scrolling out of row set NIY"); return true; } // endif recpos } else { - strcpy(g->Message, "This action requires a scrollable cursor"); + safe_strcpy(g->Message, sizeof(g->Message), "This action requires a scrollable cursor"); return true; } // endif's @@ -784,7 +787,7 @@ bool TDBJDBC::ReadKey(PGLOBAL g, OPVAL op, const key_range *kr) if (To_CondFil) if (Query->Append(" AND ") || Query->Append(To_CondFil->Body)) { - strcpy(g->Message, "Readkey: Out of memory"); + safe_strcpy(g->Message, sizeof(g->Message), "Readkey: Out of memory"); return true; } // endif Append @@ -917,7 +920,7 @@ int TDBJDBC::WriteDB(PGLOBAL g) } // endfor colp if (unlikely(Query->IsTruncated())) { - strcpy(g->Message, "WriteDB: Out of memory"); + safe_strcpy(g->Message, sizeof(g->Message), "WriteDB: Out of memory"); return RC_FX; } // endif Query @@ -1110,13 +1113,13 @@ PCMD TDBXJDC::MakeCMD(PGLOBAL g) (To_CondFil->Op == OP_EQ || To_CondFil->Op == OP_IN)) { xcmd = To_CondFil->Cmds; } else - strcpy(g->Message, "Invalid command specification filter"); + safe_strcpy(g->Message, sizeof(g->Message), "Invalid command specification filter"); } else - strcpy(g->Message, "No command column in select list"); + safe_strcpy(g->Message, sizeof(g->Message), "No command column in select list"); } else if (!Srcdef) - strcpy(g->Message, "No Srcdef default command"); + safe_strcpy(g->Message, sizeof(g->Message), "No Srcdef default command"); else xcmd = new(g) CMD(g, Srcdef); @@ -1149,7 +1152,7 @@ bool TDBXJDC::OpenDB(PGLOBAL g) this, Tdb_No, Use, Mode); if (Use == USE_OPEN) { - strcpy(g->Message, "Multiple execution is not allowed"); + safe_strcpy(g->Message, sizeof(g->Message), "Multiple execution is not allowed"); return true; } // endif use @@ -1171,7 +1174,7 @@ bool TDBXJDC::OpenDB(PGLOBAL g) Use = USE_OPEN; // Do it now in case we are recursively called if (Mode != MODE_READ && Mode != MODE_READX) { - strcpy(g->Message, "No INSERT/DELETE/UPDATE of XJDBC tables"); + safe_strcpy(g->Message, sizeof(g->Message), "No INSERT/DELETE/UPDATE of XJDBC tables"); return true; } // endif Mode @@ -1224,7 +1227,7 @@ int TDBXJDC::ReadDB(PGLOBAL g) /***********************************************************************/ int TDBXJDC::WriteDB(PGLOBAL g) { - strcpy(g->Message, "Execsrc tables are read only"); + safe_strcpy(g->Message, sizeof(g->Message), "Execsrc tables are read only"); return RC_FX; } // end of DeleteDB @@ -1233,7 +1236,7 @@ int TDBXJDC::WriteDB(PGLOBAL g) /***********************************************************************/ int TDBXJDC::DeleteDB(PGLOBAL g, int irc) { - strcpy(g->Message, "NO_XJDBC_DELETE"); + safe_strcpy(g->Message, sizeof(g->Message), "NO_XJDBC_DELETE"); return RC_FX; } // end of DeleteDB diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index 67eb5bbede0..4462541d712 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -85,7 +85,7 @@ PQRYRES JSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info) } // endif info if (GetIntegerTableOption(g, topt, "Multiple", 0)) { - strcpy(g->Message, "Cannot find column definition for multiple table"); + safe_strcpy(g->Message, sizeof(g->Message), "Cannot find column definition for multiple table"); return NULL; } // endif Multiple @@ -212,7 +212,7 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) tdp->Uri = (dsn && *dsn ? dsn : NULL); if (!tdp->Fn && !tdp->Uri) { - strcpy(g->Message, MSG(MISSING_FNAME)); + safe_strcpy(g->Message, sizeof(g->Message), MSG(MISSING_FNAME)); return 0; } else topt->subtype = NULL; @@ -320,7 +320,7 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) switch (tjnp->ReadDB(g)) { case RC_EF: - strcpy(g->Message, "Void json table"); + safe_strcpy(g->Message, sizeof(g->Message), "Void json table"); case RC_FX: goto err; default: @@ -333,7 +333,7 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) } // endif pretty if (!(row = (jsp) ? jsp->GetObject() : NULL)) { - strcpy(g->Message, "Can only retrieve columns from object rows"); + safe_strcpy(g->Message, sizeof(g->Message), "Can only retrieve columns from object rows"); goto err; } // endif row @@ -417,7 +417,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j) if (jvp && jvp->DataType != TYPE_JSON) { if (JsonAllPath() && !fmt[bf]) - strcat(fmt, colname); + safe_strcat(fmt, sizeof(fmt), colname); jcol.Type = jvp->DataType; @@ -450,7 +450,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j) jcol.Cbn = true; } else if (j < lvl && !Stringified(strfy, colname)) { if (!fmt[bf]) - strcat(fmt, colname); + safe_strcat(fmt, sizeof(fmt), colname); p = fmt + strlen(fmt); jsp = jvp->GetJson(); @@ -520,11 +520,11 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j) } else if (lvl >= 0) { if (Stringified(strfy, colname)) { if (!fmt[bf]) - strcat(fmt, colname); + safe_strcat(fmt, sizeof(fmt), colname); - strcat(fmt, ".*"); + safe_strcat(fmt, sizeof(fmt), ".*"); } else if (JsonAllPath() && !fmt[bf]) - strcat(fmt, colname); + safe_strcat(fmt, sizeof(fmt), colname); jcol.Type = TYPE_STRG; jcol.Len = sz; @@ -735,7 +735,7 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m) } else if (m == MODE_INSERT) { txfp = new(g) ZIPFAM(this); } else { - strcpy(g->Message, "UPDATE/DELETE not supported for ZIP"); + safe_strcpy(g->Message, sizeof(g->Message), "UPDATE/DELETE not supported for ZIP"); return NULL; } // endif's m #else // !ZIP_SUPPORT @@ -775,7 +775,7 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m) #endif // 0 ((TDBJSN*)tdbp)->G = PlugInit(NULL, (size_t)Lrecl * (Pretty >= 0 ? 12 : 4)); } else { - strcpy(g->Message, "LRECL is not defined"); + safe_strcpy(g->Message, sizeof(g->Message), "LRECL is not defined"); return NULL; } // endif Lrecl @@ -785,10 +785,10 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m) if (m == MODE_READ || m == MODE_ANY || m == MODE_ALTER) { txfp = new(g) UNZFAM(this); } else if (m == MODE_INSERT) { - strcpy(g->Message, "INSERT supported only for zipped JSON when pretty=0"); + safe_strcpy(g->Message, sizeof(g->Message), "INSERT supported only for zipped JSON when pretty=0"); return NULL; } else { - strcpy(g->Message, "UPDATE/DELETE not supported for ZIP"); + safe_strcpy(g->Message, sizeof(g->Message), "UPDATE/DELETE not supported for ZIP"); return NULL; } // endif's m #else // !ZIP_SUPPORT @@ -1145,7 +1145,7 @@ int TDBJSN::ReadDB(PGLOBAL g) { M = 1; rc = RC_OK; } else if (Pretty != 1 || strcmp(To_Line, "]")) { - strcpy(g->Message, G->Message); + safe_strcpy(g->Message, sizeof(g->Message), G->Message); rc = RC_FX; } else rc = RC_EF; @@ -1258,7 +1258,7 @@ bool TDBJSN::PrepareWriting(PGLOBAL g) strcat(s, ","); if ((signed)strlen(s) > Lrecl) { - strncpy(To_Line, s, Lrecl); + safe_strcpy(To_Line, Lrecl, s); snprintf(g->Message, sizeof(g->Message), "Line truncated (lrecl=%d)", Lrecl); return PushWarning(g, this); } else @@ -1360,7 +1360,7 @@ bool JSONCOL::CheckExpand(PGLOBAL g, int i, PSZ nm, bool b) Xpd = true; // Expandable object Nodes[i].Op = OP_EXP; } else if (b) { - strcpy(g->Message, "Cannot expand more than one branch"); + safe_strcpy(g->Message, sizeof(g->Message), "Cannot expand more than one branch"); return true; } // endif Xcol @@ -1571,7 +1571,7 @@ bool JSONCOL::ParseJpath(PGLOBAL g) if (SetArrayOptions(g, p, i, Nodes[i - 1].Key)) return true; else if (Xpd && Tjp->Mode == MODE_DELETE) { - strcpy(g->Message, "Cannot delete expanded columns"); + safe_strcpy(g->Message, sizeof(g->Message), "Cannot delete expanded columns"); return true; } // endif Xpd @@ -1675,7 +1675,7 @@ PSZ JSONCOL::GetJpath(PGLOBAL g, bool proj) PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp, int n) { if (Value->IsTypeNum()) { - strcpy(g->Message, "Cannot make Json for a numeric column"); + safe_strcpy(g->Message, sizeof(g->Message), "Cannot make Json for a numeric column"); if (!Warned) { PushWarning(g, Tjp); @@ -1690,10 +1690,10 @@ PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp, int n) ulong len = Tjp->Lrecl ? Tjp->Lrecl : 500; PBSON bsp = JbinAlloc(g, NULL, len, jsp); - strcat(bsp->Msg, " column"); + safe_strcat(bsp->Msg, sizeof(bsp->Msg), " column"); ((BINVAL*)Value)->SetBinValue(bsp, sizeof(BSON)); } else { - strcpy(g->Message, "Column size too small"); + safe_strcpy(g->Message, sizeof(g->Message), "Column size too small"); Value->SetValue_char(NULL, 0); } // endif Clen #endif // 0 @@ -1937,7 +1937,7 @@ PVAL JSONCOL::ExpandArray(PGLOBAL g, PJAR arp, int n) } // endif ars if (!(jvp = arp->GetArrayValue((Nodes[n].Rx = Nodes[n].Nx)))) { - strcpy(g->Message, "Logical error expanding array"); + safe_strcpy(g->Message, sizeof(g->Message), "Logical error expanding array"); throw 666; } // endif jvp @@ -2125,7 +2125,7 @@ PJSON JSONCOL::GetRow(PGLOBAL g) ((PJAR)row)->AddArrayValue(G, new(G) JVALUE(nwr)); ((PJAR)row)->InitArray(G); } else { - strcpy(g->Message, "Wrong type when writing new row"); + safe_strcpy(g->Message, sizeof(g->Message), "Wrong type when writing new row"); nwr = NULL; } // endif's @@ -2146,7 +2146,7 @@ PJSON JSONCOL::GetRow(PGLOBAL g) void JSONCOL::WriteColumn(PGLOBAL g) { if (Xpd && Tjp->Pretty < 2) { - strcpy(g->Message, "Cannot write expanded column when Pretty is not 2"); + safe_strcpy(g->Message, sizeof(g->Message), "Cannot write expanded column when Pretty is not 2"); throw 666; } // endif Xpd @@ -2182,7 +2182,7 @@ void JSONCOL::WriteColumn(PGLOBAL g) if (s && *s) { if (!(jsp = ParseJson(G, s, strlen(s)))) { - strcpy(g->Message, s); + safe_strcpy(g->Message, sizeof(g->Message), s); throw 666; } // endif jsp @@ -2365,7 +2365,7 @@ int TDBJSON::MakeDocument(PGLOBAL g) if (!a && *p && *p != '[' && !IsNum(p)) { // obj is a key if (jsp->GetType() != TYPE_JOB) { - strcpy(g->Message, "Table path does not match the json file"); + safe_strcpy(g->Message, sizeof(g->Message), "Table path does not match the json file"); return RC_FX; } // endif Type @@ -2391,7 +2391,7 @@ int TDBJSON::MakeDocument(PGLOBAL g) } // endif p if (jsp->GetType() != TYPE_JAR) { - strcpy(g->Message, "Table path does not match the json file"); + safe_strcpy(g->Message, sizeof(g->Message), "Table path does not match the json file"); return RC_FX; } // endif Type @@ -2486,7 +2486,7 @@ void TDBJSON::ResetSize(void) int TDBJSON::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool) { if (pxdf) { - strcpy(g->Message, "JSON not indexable when pretty = 2"); + safe_strcpy(g->Message, sizeof(g->Message), "JSON not indexable when pretty = 2"); return RC_FX; } else return RC_OK; -- cgit v1.2.1 From da1c91fb9232fbea88d3f9a27f81b39f85cfc468 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Tue, 28 Feb 2023 10:43:39 +1100 Subject: MDEV-30713 field length handling for CONNECT engine fp->field_length was unsigned and therefore the negative condition around it. Backport of cc182aca9352 fixes it, however to correct the consistent use of types pcf->Length needs to be unsigned too. At one point pcf->Precision is assigned from pcf->Length so that's also unsigned. GetTypeSize is assigned to length and has a length argument. A -1 default value seemed dangerious to case, so at least 0 should assert if every hit. --- storage/connect/catalog.h | 4 ++-- storage/connect/ha_connect.cc | 5 +---- storage/connect/tabext.cpp | 2 +- storage/connect/value.cpp | 8 ++++---- storage/connect/value.h | 2 +- 5 files changed, 9 insertions(+), 12 deletions(-) diff --git a/storage/connect/catalog.h b/storage/connect/catalog.h index 2649a50cf76..a46615f5d6e 100644 --- a/storage/connect/catalog.h +++ b/storage/connect/catalog.h @@ -39,9 +39,9 @@ typedef struct _colinfo { PCSZ Name; int Type; int Offset; - int Length; + unsigned Length; int Key; - int Precision; + unsigned Precision; int Scale; int Opt; int Freq; diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 81960332cbe..0a0a206c9c8 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -1618,10 +1618,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf) pcf->Scale= 0; pcf->Opt= (fop) ? (int)fop->opt : 0; - if (fp->field_length >= 0) - pcf->Length= fp->field_length; - else - pcf->Length= 256; // BLOB? + pcf->Length= fp->field_length; pcf->Precision= pcf->Length; diff --git a/storage/connect/tabext.cpp b/storage/connect/tabext.cpp index 9d8a4aca077..6903e112238 100644 --- a/storage/connect/tabext.cpp +++ b/storage/connect/tabext.cpp @@ -466,7 +466,7 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt) if (Quote) { // Tabname can have both database and table identifiers, we need to parse - if (res= strstr(buf, ".")) + if ((res= strstr(buf, "."))) { // Parse schema my_len= res - buf + 1; diff --git a/storage/connect/value.cpp b/storage/connect/value.cpp index 498ec71a87f..7265b2ed0ca 100644 --- a/storage/connect/value.cpp +++ b/storage/connect/value.cpp @@ -163,9 +163,9 @@ PCSZ GetTypeName(int type) /***********************************************************************/ /* GetTypeSize: returns the PlugDB internal type size. */ /***********************************************************************/ -int GetTypeSize(int type, int len) - { - switch (type) { +unsigned GetTypeSize(int type, unsigned len) +{ + switch (type) { case TYPE_DECIM: case TYPE_BIN: case TYPE_STRING: len = len * sizeof(char); break; @@ -176,7 +176,7 @@ int GetTypeSize(int type, int len) case TYPE_DOUBLE: len = sizeof(double); break; case TYPE_TINY: len = sizeof(char); break; case TYPE_PCHAR: len = sizeof(char*); break; - default: len = -1; + default: len = 0; } // endswitch type return len; diff --git a/storage/connect/value.h b/storage/connect/value.h index a0d947347c3..7eb0dec29f2 100644 --- a/storage/connect/value.h +++ b/storage/connect/value.h @@ -41,7 +41,7 @@ typedef struct _datpar *PDTP; // For DTVAL /***********************************************************************/ // Exported functions DllExport PCSZ GetTypeName(int); -DllExport int GetTypeSize(int, int); +DllExport unsigned GetTypeSize(int, unsigned); #ifdef ODBC_SUPPORT /* This function is exported for use in OEM table type DLLs */ DllExport int TranslateSQLType(int stp, int prec, -- cgit v1.2.1 From 9f98a2acd71dcfbdb32a08e72a4737359ed9be40 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Thu, 13 Apr 2023 15:42:53 +0400 Subject: MDEV-30968 mariadb-backup does not copy Aria logs if aria_log_dir_path is used - `mariadb-backup --backup` was fixed to fetch the value of the @@aria_log_dir_path server variable and copy aria_log* files from @@aria_log_dir_path directory to the backup directory. Absolute and relative (to --datadir) paths are supported. Before this change aria_log* files were copied to the backup only if they were in the default location in @@datadir. - `mariadb-backup --copy-back` now understands a new my.cnf and command line parameter --aria-log-dir-path. `mariadb-backup --copy-back` in the main loop in copy_back() (when copying back from the backup directory to --datadir) was fixed to ignore all aria_log* files. A new function copy_back_aria_logs() was added. It consists of a separate loop copying back aria_log* files from the backup directory to the directory specified in --aria-log-dir-path. Absolute and relative (to --datadir) paths are supported. If --aria-log-dir-path is not specified, aria_log* files are copied to --datadir by default. - The function is_absolute_path() was fixed to understand MTR style paths on Windows with forward slashes, e.g. --aria-log-dir-path=D:/Buildbot/amd64-windows/build/mysql-test/var/... --- extra/mariabackup/backup_copy.cc | 70 ++++++++++++-- extra/mariabackup/backup_mysql.cc | 7 ++ extra/mariabackup/xtrabackup.cc | 14 ++- extra/mariabackup/xtrabackup.h | 1 + .../suite/mariabackup/aria_log_dir_path.result | 41 ++++++++ .../suite/mariabackup/aria_log_dir_path.test | 105 +++++++++++++++++++++ .../suite/mariabackup/aria_log_dir_path_rel.result | 41 ++++++++ .../suite/mariabackup/aria_log_dir_path_rel.test | 4 + storage/innobase/include/os0file.h | 6 +- 9 files changed, 281 insertions(+), 8 deletions(-) create mode 100644 mysql-test/suite/mariabackup/aria_log_dir_path.result create mode 100644 mysql-test/suite/mariabackup/aria_log_dir_path.test create mode 100644 mysql-test/suite/mariabackup/aria_log_dir_path_rel.result create mode 100644 mysql-test/suite/mariabackup/aria_log_dir_path_rel.test diff --git a/extra/mariabackup/backup_copy.cc b/extra/mariabackup/backup_copy.cc index ffaf6dc98e4..8ab52fa983b 100644 --- a/extra/mariabackup/backup_copy.cc +++ b/extra/mariabackup/backup_copy.cc @@ -130,7 +130,9 @@ struct datadir_thread_ctxt_t { bool ret; }; -static bool backup_files_from_datadir(ds_ctxt *ds_data, const char *dir_path); +static bool backup_files_from_datadir(ds_ctxt_t *ds_data, + const char *dir_path, + const char *prefix); /************************************************************************ Retirn true if character if file separator */ @@ -1499,7 +1501,11 @@ bool backup_start(ds_ctxt *ds_data, ds_ctxt *ds_meta, return(false); } - if (!backup_files_from_datadir(ds_data, fil_path_to_mysql_datadir)) { + if (!backup_files_from_datadir(ds_data, fil_path_to_mysql_datadir, + "aws-kms-key") || + !backup_files_from_datadir(ds_data, + aria_log_dir_path, + "aria_log")) { return false; } @@ -1714,7 +1720,12 @@ ibx_copy_incremental_over_full() } } - if (!(ret = backup_files_from_datadir(ds_data, xtrabackup_incremental_dir))) + if (!(ret = backup_files_from_datadir(ds_data, + xtrabackup_incremental_dir, + "aws-kms-key")) || + !(ret = backup_files_from_datadir(ds_data, + xtrabackup_incremental_dir, + "aria_log"))) goto cleanup; /* copy supplementary files */ @@ -1829,6 +1840,41 @@ public: } }; + +static inline bool +is_aria_log_dir_file(const datadir_node_t &node) +{ + return starts_with(node.filepath_rel, "aria_log"); +} + + +bool +copy_back_aria_logs() +{ + Copy_back_dst_dir dst_dir_buf; + const char *dstdir= dst_dir_buf.make(aria_log_dir_path); + std::unique_ptr + ds_ctxt_aria_log_dir_path(ds_create(dstdir, DS_TYPE_LOCAL), ds_destroy); + + datadir_node_t node; + datadir_node_init(&node); + datadir_iter_t *it = datadir_iter_new(".", false); + + while (datadir_iter_next(it, &node)) + { + if (!is_aria_log_dir_file(node)) + continue; + if (!copy_or_move_file(ds_ctxt_aria_log_dir_path.get(), + node.filepath, node.filepath_rel, + dstdir, 1)) + return false; + } + datadir_node_free(&node); + datadir_iter_free(it); + return true; +} + + bool copy_back() { @@ -1861,6 +1907,10 @@ copy_back() && !directory_exists(srv_log_group_home_dir, true)) { return(false); } + if (aria_log_dir_path && *aria_log_dir_path + && !directory_exists(aria_log_dir_path, true)) { + return false; + } /* cd to backup directory */ if (my_setwd(xtrabackup_target_dir, MYF(MY_WME))) @@ -1869,6 +1919,9 @@ copy_back() return(false); } + if (!copy_back_aria_logs()) + return false; + /* parse data file path */ if (!innobase_data_file_path) { @@ -1973,6 +2026,10 @@ copy_back() int i_tmp; bool is_ibdata_file; + /* Skip aria log files */ + if (is_aria_log_dir_file(node)) + continue; + if (strstr(node.filepath,"/" ROCKSDB_BACKUP_DIR "/") #ifdef _WIN32 || strstr(node.filepath,"\\" ROCKSDB_BACKUP_DIR "\\") @@ -2209,7 +2266,9 @@ decrypt_decompress() Do not copy the Innodb files (ibdata1, redo log files), as this is done in a separate step. */ -static bool backup_files_from_datadir(ds_ctxt *ds_data, const char *dir_path) +static bool backup_files_from_datadir(ds_ctxt_t *ds_data, + const char *dir_path, + const char *prefix) { os_file_dir_t dir = os_file_opendir(dir_path); if (dir == IF_WIN(INVALID_HANDLE_VALUE, nullptr)) return false; @@ -2225,8 +2284,7 @@ static bool backup_files_from_datadir(ds_ctxt *ds_data, const char *dir_path) if (!pname) pname = info.name; - if (!starts_with(pname, "aws-kms-key") && - !starts_with(pname, "aria_log")) + if (!starts_with(pname, prefix)) /* For ES exchange the above line with the following code: (!xtrabackup_prepare || !xtrabackup_incremental_dir || !starts_with(pname, "aria_log"))) diff --git a/extra/mariabackup/backup_mysql.cc b/extra/mariabackup/backup_mysql.cc index 6003bfb36c4..1831485e957 100644 --- a/extra/mariabackup/backup_mysql.cc +++ b/extra/mariabackup/backup_mysql.cc @@ -367,6 +367,7 @@ bool get_mysql_vars(MYSQL *connection) char *innodb_undo_directory_var= NULL; char *innodb_page_size_var= NULL; char *innodb_undo_tablespaces_var= NULL; + char *aria_log_dir_path_var= NULL; char *page_zip_level_var= NULL; char *ignore_db_dirs= NULL; char *endptr; @@ -397,6 +398,7 @@ bool get_mysql_vars(MYSQL *connection) {"innodb_undo_tablespaces", &innodb_undo_tablespaces_var}, {"innodb_compression_level", &page_zip_level_var}, {"ignore_db_dirs", &ignore_db_dirs}, + {"aria_log_dir_path", &aria_log_dir_path_var}, {NULL, NULL}}; read_mysql_variables(connection, "SHOW VARIABLES", mysql_vars, true); @@ -538,6 +540,11 @@ bool get_mysql_vars(MYSQL *connection) ut_ad(*endptr == 0); } + if (aria_log_dir_path_var) + { + aria_log_dir_path= my_strdup(aria_log_dir_path_var, MYF(MY_FAE)); + } + if (page_zip_level_var != NULL) { page_zip_level= strtoul(page_zip_level_var, &endptr, 10); diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index ee12034c910..96c90b5afad 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -266,6 +266,8 @@ my_bool innobase_locks_unsafe_for_binlog; my_bool innobase_rollback_on_timeout; my_bool innobase_create_status_file; +char *aria_log_dir_path; + /* The following counter is used to convey information to InnoDB about server activity: in selects it is not sensible to call srv_active_wake_master_thread after each fetch or search, we only do @@ -1105,7 +1107,8 @@ enum options_xtrabackup OPT_XTRA_CHECK_PRIVILEGES, OPT_XTRA_MYSQLD_ARGS, OPT_XB_IGNORE_INNODB_PAGE_CORRUPTION, - OPT_INNODB_FORCE_RECOVERY + OPT_INNODB_FORCE_RECOVERY, + OPT_ARIA_LOG_DIR_PATH }; struct my_option xb_client_options[]= { @@ -1696,6 +1699,11 @@ struct my_option xb_server_options[] = &innodb_log_checksums, &innodb_log_checksums, 0, GET_BOOL, REQUIRED_ARG, 1, 0, 0, 0, 0, 0 }, + {"aria_log_dir_path", OPT_ARIA_LOG_DIR_PATH, + "Path to individual files and their sizes.", + &aria_log_dir_path, &aria_log_dir_path, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"open_files_limit", OPT_OPEN_FILES_LIMIT, "the maximum number of file " "descriptors to reserve with setrlimit().", (G_PTR*) &xb_open_files_limit, (G_PTR*) &xb_open_files_limit, 0, GET_ULONG, @@ -2012,6 +2020,10 @@ xb_get_one_option(int optid, } break; + case OPT_ARIA_LOG_DIR_PATH: + ADD_PRINT_PARAM_OPT(aria_log_dir_path); + break; + case OPT_XTRA_TARGET_DIR: strmake(xtrabackup_real_target_dir,argument, sizeof(xtrabackup_real_target_dir)-1); xtrabackup_target_dir= xtrabackup_real_target_dir; diff --git a/extra/mariabackup/xtrabackup.h b/extra/mariabackup/xtrabackup.h index de3a96443a3..df2f766aedb 100644 --- a/extra/mariabackup/xtrabackup.h +++ b/extra/mariabackup/xtrabackup.h @@ -74,6 +74,7 @@ extern char *xtrabackup_incremental_dir; extern char *xtrabackup_incremental_basedir; extern char *innobase_data_home_dir; extern char *innobase_buffer_pool_filename; +extern char *aria_log_dir_path; extern char *xb_plugin_dir; extern char *xb_rocksdb_datadir; extern my_bool xb_backup_rocksdb; diff --git a/mysql-test/suite/mariabackup/aria_log_dir_path.result b/mysql-test/suite/mariabackup/aria_log_dir_path.result new file mode 100644 index 00000000000..1a877321bbe --- /dev/null +++ b/mysql-test/suite/mariabackup/aria_log_dir_path.result @@ -0,0 +1,41 @@ +# +# MDEV-30968 mariadb-backup does not copy Aria logs if aria_log_dir_path is used +# +# Restart mariadbd with the test specific parameters +# restart: --aria-log-file-size=8388608 --aria-log-purge-type=external --loose-aria-log-dir-path=MYSQLTEST_VARDIR/tmp/backup_aria_log_dir_path +# Create and populate an Aria table (and Aria logs) +CREATE TABLE t1 (id INT, txt LONGTEXT) ENGINE=Aria; +BEGIN NOT ATOMIC +FOR id IN 0..9 DO +INSERT INTO test.t1 (id, txt) VALUES (id, REPEAT(id,1024*1024)); +END FOR; +END; +$$ +# Testing aria log files before --backup +SET @@global.aria_checkpoint_interval=DEFAULT /*Force checkpoint*/; +SHOW ENGINE aria logs; +Type Name Status +Aria aria_log.00000001 free +Aria aria_log.00000002 in use +# mariadb-backup --backup +# mariadb-backup --prepare +# shutdown server +# remove datadir +# remove aria-log-dir-path +# mariadb-backup --copy-back +# with parameters: --defaults-file=MYSQLTEST_VARDIR/my.cnf --copy-back --datadir=MYSQLTEST_VARDIR/mysqld.1/data/ --target-dir=MYSQLTEST_VARDIR/tmp/backup --parallel=2 --throttle=1 --aria-log-dir-path=MYSQLTEST_VARDIR/tmp/backup_aria_log_dir_path +# starting server +# restart: --aria-log-file-size=8388608 --aria-log-purge-type=external --loose-aria-log-dir-path=MYSQLTEST_VARDIR/tmp/backup_aria_log_dir_path +# Check that the table is there after --copy-back +SELECT COUNT(*) from t1; +COUNT(*) +10 +DROP TABLE t1; +# Testing aria log files after --copy-back +SET @@global.aria_checkpoint_interval=DEFAULT /*Force checkpoint*/; +SHOW ENGINE aria logs; +Type Name Status +Aria aria_log.00000001 free +Aria aria_log.00000002 in use +# Restarting mariadbd with default parameters +# restart diff --git a/mysql-test/suite/mariabackup/aria_log_dir_path.test b/mysql-test/suite/mariabackup/aria_log_dir_path.test new file mode 100644 index 00000000000..0178cd4eae5 --- /dev/null +++ b/mysql-test/suite/mariabackup/aria_log_dir_path.test @@ -0,0 +1,105 @@ +--source include/have_maria.inc + +--echo # +--echo # MDEV-30968 mariadb-backup does not copy Aria logs if aria_log_dir_path is used +--echo # + +--let $datadir=`SELECT @@datadir` +--let $targetdir=$MYSQLTEST_VARDIR/tmp/backup + +if ($ARIA_LOGDIR_MARIADB == '') +{ + --let $ARIA_LOGDIR_MARIADB=$MYSQLTEST_VARDIR/tmp/backup_aria_log_dir_path +} + +if ($ARIA_LOGDIR_FS == '') +{ + --let $ARIA_LOGDIR_FS=$MYSQLTEST_VARDIR/tmp/backup_aria_log_dir_path +} + +--let $server_parameters=--aria-log-file-size=8388608 --aria-log-purge-type=external --loose-aria-log-dir-path=$ARIA_LOGDIR_MARIADB + + +--echo # Restart mariadbd with the test specific parameters +--mkdir $ARIA_LOGDIR_FS +--let $restart_parameters=$server_parameters +--source include/restart_mysqld.inc + + +--echo # Create and populate an Aria table (and Aria logs) +CREATE TABLE t1 (id INT, txt LONGTEXT) ENGINE=Aria; +DELIMITER $$; +BEGIN NOT ATOMIC + FOR id IN 0..9 DO + INSERT INTO test.t1 (id, txt) VALUES (id, REPEAT(id,1024*1024)); + END FOR; +END; +$$ +DELIMITER ;$$ + + +--echo # Testing aria log files before --backup +SET @@global.aria_checkpoint_interval=DEFAULT /*Force checkpoint*/; +--file_exists $ARIA_LOGDIR_FS/aria_log_control +--file_exists $ARIA_LOGDIR_FS/aria_log.00000001 +--file_exists $ARIA_LOGDIR_FS/aria_log.00000002 +--error 1 +--file_exists $ARIA_LOGDIR_FS/aria_log.00000003 +--replace_regex /Size +[0-9]+ ; .+aria_log/aria_log/ +SHOW ENGINE aria logs; + + +--echo # mariadb-backup --backup +--disable_result_log +--mkdir $targetdir +--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir +--enable_result_log + + +--echo # mariadb-backup --prepare +--disable_result_log +--exec $XTRABACKUP --prepare --target-dir=$targetdir +--enable_result_log + + +--echo # shutdown server +--disable_result_log +--source include/shutdown_mysqld.inc +--echo # remove datadir +--rmdir $datadir +--echo # remove aria-log-dir-path +--rmdir $ARIA_LOGDIR_FS + +--echo # mariadb-backup --copy-back +--let $mariadb_backup_parameters=--defaults-file=$MYSQLTEST_VARDIR/my.cnf --copy-back --datadir=$datadir --target-dir=$targetdir --parallel=2 --throttle=1 --aria-log-dir-path=$ARIA_LOGDIR_MARIADB +--replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +--exec echo "# with parameters: $mariadb_backup_parameters" +--exec $XTRABACKUP $mariadb_backup_parameters + +--echo # starting server +--let $restart_parameters=$server_parameters +--source include/start_mysqld.inc +--enable_result_log +--rmdir $targetdir + + +--echo # Check that the table is there after --copy-back +SELECT COUNT(*) from t1; +DROP TABLE t1; + + +--echo # Testing aria log files after --copy-back +SET @@global.aria_checkpoint_interval=DEFAULT /*Force checkpoint*/; +--file_exists $ARIA_LOGDIR_FS/aria_log_control +--file_exists $ARIA_LOGDIR_FS/aria_log.00000001 +--file_exists $ARIA_LOGDIR_FS/aria_log.00000002 +--error 1 +--file_exists $ARIA_LOGDIR_FS/aria_log.00000003 +--replace_regex /Size +[0-9]+ ; .+aria_log/aria_log/ +SHOW ENGINE aria logs; + + +--echo # Restarting mariadbd with default parameters +--let $restart_parameters= +--source include/restart_mysqld.inc +--rmdir $ARIA_LOGDIR_FS diff --git a/mysql-test/suite/mariabackup/aria_log_dir_path_rel.result b/mysql-test/suite/mariabackup/aria_log_dir_path_rel.result new file mode 100644 index 00000000000..7fef26096e0 --- /dev/null +++ b/mysql-test/suite/mariabackup/aria_log_dir_path_rel.result @@ -0,0 +1,41 @@ +# +# MDEV-30968 mariadb-backup does not copy Aria logs if aria_log_dir_path is used +# +# Restart mariadbd with the test specific parameters +# restart: --aria-log-file-size=8388608 --aria-log-purge-type=external --loose-aria-log-dir-path=../../tmp/backup_aria_log_dir_path_rel +# Create and populate an Aria table (and Aria logs) +CREATE TABLE t1 (id INT, txt LONGTEXT) ENGINE=Aria; +BEGIN NOT ATOMIC +FOR id IN 0..9 DO +INSERT INTO test.t1 (id, txt) VALUES (id, REPEAT(id,1024*1024)); +END FOR; +END; +$$ +# Testing aria log files before --backup +SET @@global.aria_checkpoint_interval=DEFAULT /*Force checkpoint*/; +SHOW ENGINE aria logs; +Type Name Status +Aria aria_log.00000001 free +Aria aria_log.00000002 in use +# mariadb-backup --backup +# mariadb-backup --prepare +# shutdown server +# remove datadir +# remove aria-log-dir-path +# mariadb-backup --copy-back +# with parameters: --defaults-file=MYSQLTEST_VARDIR/my.cnf --copy-back --datadir=MYSQLTEST_VARDIR/mysqld.1/data/ --target-dir=MYSQLTEST_VARDIR/tmp/backup --parallel=2 --throttle=1 --aria-log-dir-path=../../tmp/backup_aria_log_dir_path_rel +# starting server +# restart: --aria-log-file-size=8388608 --aria-log-purge-type=external --loose-aria-log-dir-path=../../tmp/backup_aria_log_dir_path_rel +# Check that the table is there after --copy-back +SELECT COUNT(*) from t1; +COUNT(*) +10 +DROP TABLE t1; +# Testing aria log files after --copy-back +SET @@global.aria_checkpoint_interval=DEFAULT /*Force checkpoint*/; +SHOW ENGINE aria logs; +Type Name Status +Aria aria_log.00000001 free +Aria aria_log.00000002 in use +# Restarting mariadbd with default parameters +# restart diff --git a/mysql-test/suite/mariabackup/aria_log_dir_path_rel.test b/mysql-test/suite/mariabackup/aria_log_dir_path_rel.test new file mode 100644 index 00000000000..c8169959929 --- /dev/null +++ b/mysql-test/suite/mariabackup/aria_log_dir_path_rel.test @@ -0,0 +1,4 @@ +--let $ARIA_LOGDIR_MARIADB=../../tmp/backup_aria_log_dir_path_rel +--let $ARIA_LOGDIR_FS=$MYSQLTEST_VARDIR/tmp/backup_aria_log_dir_path_rel + +--source aria_log_dir_path.test diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h index 20fcc0b64b8..76d4f465b95 100644 --- a/storage/innobase/include/os0file.h +++ b/storage/innobase/include/os0file.h @@ -1567,7 +1567,11 @@ is_absolute_path( } #ifdef _WIN32 - if (path[1] == ':' && path[2] == OS_PATH_SEPARATOR) { + // This will conflict during a 10.5->10.6 merge. + // Choose the 10.6 version as is. + if (path[1] == ':' && + (path[2] == OS_PATH_SEPARATOR || + path[2] == OS_PATH_SEPARATOR_ALT)) { return(true); } #endif /* _WIN32 */ -- cgit v1.2.1 From 6dc6c22c14fe204dbac43b6132c5bd130c69aba1 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Fri, 21 Apr 2023 18:49:52 -0700 Subject: MDEV-31085 Crash when processing multi-update using view with optimizer_trace on This bug caused server crash when processing a multi-update statement that used views if optimizer tracing was enabled. The bug was introduced in the patch for MDEV-30539 that could incorrectly detect the most top level selects of queries if views were used in them. Approved by Oleksandr Byelkin --- mysql-test/main/opt_trace.result | 356 +++++++++++++++++++++++++++++++++++++++ mysql-test/main/opt_trace.test | 19 +++ sql/sql_select.cc | 4 +- 3 files changed, 377 insertions(+), 2 deletions(-) diff --git a/mysql-test/main/opt_trace.result b/mysql-test/main/opt_trace.result index a8b391ffbe8..a7a8fb88e6d 100644 --- a/mysql-test/main/opt_trace.result +++ b/mysql-test/main/opt_trace.result @@ -8505,5 +8505,361 @@ SELECT a FROM t1 WHERE (a,b) in (SELECT @c,@d); a DROP TABLE t1; # +# MDEV-31085: multi-update using view with optimizer trace enabled +# +SET SESSION optimizer_trace = 'enabled=on'; +CREATE TABLE t (a int, b int); +CREATE VIEW v AS SELECT 1 AS c UNION SELECT 2 AS c; +INSERT INTO t VALUES (0,4),(5,6); +UPDATE t, v SET t.b = t.a, t.a = v.c WHERE v.c < t.a; +SELECT * FROM information_schema.optimizer_trace; +QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES +UPDATE t, v SET t.b = t.a, t.a = v.c WHERE v.c < t.a { + "steps": [ + { + "view": { + "table": "v", + "select_id": 2, + "algorithm": "materialized" + } + }, + { + "join_preparation": { + "select_id": 2, + "steps": [ + { + "expanded_query": "/* select#2 */ select 1 AS c" + } + ] + } + }, + { + "join_preparation": { + "select_id": 3, + "steps": [ + { + "expanded_query": "/* select#3 */ select 2 AS c" + } + ] + } + }, + { + "join_preparation": { + "select_id": 1, + "steps": [ + { + "expanded_query": "/* select#1 */ update t join v set t.b = t.a,t.a = v.c where v.c < t.a" + } + ] + } + }, + { + "join_optimization": { + "select_id": 1, + "steps": [ + { + "condition_processing": { + "condition": "WHERE", + "original_condition": "v.c < t.a", + "steps": [ + { + "transformation": "equality_propagation", + "resulting_condition": "v.c < t.a" + }, + { + "transformation": "constant_propagation", + "resulting_condition": "v.c < t.a" + }, + { + "transformation": "trivial_condition_removal", + "resulting_condition": "v.c < t.a" + } + ] + } + }, + { + "join_optimization": { + "select_id": 2, + "steps": [] + } + }, + { + "join_optimization": { + "select_id": 3, + "steps": [] + } + }, + { + "table_dependencies": [ + { + "table": "t", + "row_may_be_null": false, + "map_bit": 0, + "depends_on_map_bits": [] + }, + { + "table": "", + "row_may_be_null": false, + "map_bit": 1, + "depends_on_map_bits": [] + } + ] + }, + { + "ref_optimizer_key_uses": [] + }, + { + "rows_estimation": [ + { + "table": "t", + "table_scan": { + "rows": 2, + "cost": 2.0044 + } + }, + { + "table": "", + "table_scan": { + "rows": 2, + "cost": 2 + } + } + ] + }, + { + "considered_execution_plans": [ + { + "plan_prefix": [], + "table": "t", + "best_access_path": { + "considered_access_paths": [ + { + "access_type": "scan", + "resulting_rows": 2, + "cost": 2.0044, + "chosen": true + } + ], + "chosen_access_method": { + "type": "scan", + "records": 2, + "cost": 2.0044, + "uses_join_buffering": false + } + }, + "rows_for_plan": 2, + "cost_for_plan": 2.4044, + "rest_of_plan": [ + { + "plan_prefix": ["t"], + "table": "", + "best_access_path": { + "considered_access_paths": [ + { + "access_type": "scan", + "resulting_rows": 2, + "cost": 2, + "chosen": true + } + ], + "chosen_access_method": { + "type": "scan", + "records": 2, + "cost": 2, + "uses_join_buffering": true + } + }, + "rows_for_plan": 4, + "cost_for_plan": 5.2044, + "estimated_join_cardinality": 4 + } + ] + }, + { + "plan_prefix": [], + "table": "", + "best_access_path": { + "considered_access_paths": [ + { + "access_type": "scan", + "resulting_rows": 2, + "cost": 2, + "chosen": true + } + ], + "chosen_access_method": { + "type": "scan", + "records": 2, + "cost": 2, + "uses_join_buffering": false + } + }, + "rows_for_plan": 2, + "cost_for_plan": 2.4, + "rest_of_plan": [ + { + "plan_prefix": [""], + "table": "t", + "best_access_path": { + "considered_access_paths": [ + { + "access_type": "scan", + "resulting_rows": 2, + "cost": 2.0044, + "chosen": true + } + ], + "chosen_access_method": { + "type": "scan", + "records": 2, + "cost": 2.0044, + "uses_join_buffering": true + } + }, + "rows_for_plan": 4, + "cost_for_plan": 5.2044, + "pruned_by_cost": true + } + ] + } + ] + }, + { + "best_join_order": ["t", ""] + }, + { + "attaching_conditions_to_tables": { + "original_condition": "v.c < t.a", + "attached_conditions_computation": [], + "attached_conditions_summary": [ + { + "table": "t", + "attached": null + }, + { + "table": "", + "attached": "v.c < t.a" + } + ] + } + } + ] + } + }, + { + "join_execution": { + "select_id": 1, + "steps": [ + { + "join_execution": { + "select_id": 2, + "steps": [] + } + }, + { + "join_execution": { + "select_id": 3, + "steps": [] + } + }, + { + "join_preparation": { + "select_id": "fake", + "steps": [ + { + "expanded_query": "select c AS c from dual" + } + ] + } + }, + { + "join_optimization": { + "select_id": "fake", + "steps": [ + { + "table_dependencies": [ + { + "table": "union", + "row_may_be_null": false, + "map_bit": 0, + "depends_on_map_bits": [] + } + ] + }, + { + "rows_estimation": [ + { + "table": "union", + "table_scan": { + "rows": 2, + "cost": 10.1 + } + } + ] + }, + { + "considered_execution_plans": [ + { + "plan_prefix": [], + "table": "union", + "best_access_path": { + "considered_access_paths": [ + { + "access_type": "scan", + "resulting_rows": 2, + "cost": 10.1, + "chosen": true + } + ], + "chosen_access_method": { + "type": "scan", + "records": 2, + "cost": 10.1, + "uses_join_buffering": false + } + }, + "rows_for_plan": 2, + "cost_for_plan": 10.5, + "estimated_join_cardinality": 2 + } + ] + }, + { + "best_join_order": ["union"] + }, + { + "attaching_conditions_to_tables": { + "original_condition": null, + "attached_conditions_computation": [], + "attached_conditions_summary": [ + { + "table": "union", + "attached": null + } + ] + } + } + ] + } + }, + { + "join_execution": { + "select_id": "fake", + "steps": [] + } + } + ] + } + } + ] +} 0 0 +SELECT * FROM t; +a b +0 4 +1 5 +SET optimizer_trace=DEFAULT; +DROP VIEW v; +DROP TABLE t; +# # End of 10.4 tests # diff --git a/mysql-test/main/opt_trace.test b/mysql-test/main/opt_trace.test index 0785d828a07..e0be5360069 100644 --- a/mysql-test/main/opt_trace.test +++ b/mysql-test/main/opt_trace.test @@ -677,6 +677,25 @@ INSERT INTO t1 VALUES (0,0); SELECT a FROM t1 WHERE (a,b) in (SELECT @c,@d); DROP TABLE t1; +--echo # +--echo # MDEV-31085: multi-update using view with optimizer trace enabled +--echo # + +SET SESSION optimizer_trace = 'enabled=on'; + +CREATE TABLE t (a int, b int); +CREATE VIEW v AS SELECT 1 AS c UNION SELECT 2 AS c; +INSERT INTO t VALUES (0,4),(5,6); +UPDATE t, v SET t.b = t.a, t.a = v.c WHERE v.c < t.a; +SELECT * FROM information_schema.optimizer_trace; + +SELECT * FROM t; + +SET optimizer_trace=DEFAULT; + +DROP VIEW v; +DROP TABLE t; + --echo # --echo # End of 10.4 tests --echo # diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 0651c1d58bd..03a2c3d0853 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -28050,7 +28050,7 @@ void st_select_lex::print_item_list(THD *thd, String *str, outer_select() can not be used here because it is for name resolution and will return NULL at any end of name resolution chain (view/derived) */ - bool top_level= (get_master()->get_master() == 0); + bool top_level= (get_master() == &thd->lex->unit); List_iterator_fast it(item_list); Item *item; while ((item= it++)) @@ -28157,7 +28157,7 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) return; } - bool top_level= (get_master()->get_master() == 0); + bool top_level= (get_master() == &thd->lex->unit); enum explainable_cmd_type sel_type= SELECT_CMD; if (top_level) sel_type= get_explainable_cmd_type(thd); -- cgit v1.2.1 From d3e394b3b1ff1e2c4e160972aad1f78a13fbb62e Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Mon, 24 Apr 2023 10:27:55 +0400 Subject: A cleanup for MDEV-30968 mariadb-backup does not copy Aria logs if aria_log_dir_path is used Fixing buildbot failures on mariabackup.aria_log_dir_path_rel. The problem was that directory_exists() was called with the relative aria_log_dir_path value, while the current directory in mariadb-backup is not necessarily equal to datadir when MTR is running. Fix: - Moving building the absolute path un level upper: from the function copy_back_aria_logs() to the function copy_back(). - Passing the built absolute path to both directory_exists() and copy_back_aria_logs() as a parameter. --- extra/mariabackup/backup_copy.cc | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/extra/mariabackup/backup_copy.cc b/extra/mariabackup/backup_copy.cc index 8ab52fa983b..05acc1e6765 100644 --- a/extra/mariabackup/backup_copy.cc +++ b/extra/mariabackup/backup_copy.cc @@ -1849,10 +1849,8 @@ is_aria_log_dir_file(const datadir_node_t &node) bool -copy_back_aria_logs() +copy_back_aria_logs(const char *dstdir) { - Copy_back_dst_dir dst_dir_buf; - const char *dstdir= dst_dir_buf.make(aria_log_dir_path); std::unique_ptr ds_ctxt_aria_log_dir_path(ds_create(dstdir, DS_TYPE_LOCAL), ds_destroy); @@ -1907,8 +1905,11 @@ copy_back() && !directory_exists(srv_log_group_home_dir, true)) { return(false); } + + Copy_back_dst_dir aria_log_dir_path_dst; + const char *aria_log_dir_path_abs= aria_log_dir_path_dst.make(aria_log_dir_path); if (aria_log_dir_path && *aria_log_dir_path - && !directory_exists(aria_log_dir_path, true)) { + && !directory_exists(aria_log_dir_path_abs, true)) { return false; } @@ -1919,7 +1920,7 @@ copy_back() return(false); } - if (!copy_back_aria_logs()) + if (!copy_back_aria_logs(aria_log_dir_path_abs)) return false; /* parse data file path */ -- cgit v1.2.1 From 31f09e36c183d026a28f42ddbb9be2229613a3ed Mon Sep 17 00:00:00 2001 From: Brandon Nesterenko Date: Tue, 18 Apr 2023 13:22:43 -0600 Subject: MDEV-31038: Parallel Replication Breaks if XA PREPARE Fails Updating Slave GTID State If a replica failed to update the GTID slave state when committing an XA PREPARE, the replica would retry the transaction and get an out-of-order GTID error. This is because the commit phase of an XA PREPARE is bifurcated. That is, first, the prepare is handled by the relevant storage engines. Then second, the GTID slave state is updated as a separate autocommit transaction. If the second phase fails, and the transaction is retried, then the same transaction is attempted to be committed again, resulting in a GTID out-of-order error. This patch fixes this error by immediately stopping the slave and reporting the appropriate error. That is, there was logic to bypass the error when updating the GTID slave state table if the underlying error is allowed for retry on a parallel slave. This patch adds a parameter to disallow the error bypass, thereby forcing the error state to still happen. Reviewed By ============ Andrei Elkin --- .../suite/rpl/r/rpl_xa_prepare_gtid_fail.result | 51 ++++++++++ .../suite/rpl/t/rpl_xa_prepare_gtid_fail.test | 106 +++++++++++++++++++++ sql/handler.cc | 7 +- sql/log_event.h | 2 +- sql/log_event_server.cc | 64 +++++++++---- 5 files changed, 208 insertions(+), 22 deletions(-) create mode 100644 mysql-test/suite/rpl/r/rpl_xa_prepare_gtid_fail.result create mode 100644 mysql-test/suite/rpl/t/rpl_xa_prepare_gtid_fail.test diff --git a/mysql-test/suite/rpl/r/rpl_xa_prepare_gtid_fail.result b/mysql-test/suite/rpl/r/rpl_xa_prepare_gtid_fail.result new file mode 100644 index 00000000000..f3fecbda349 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_xa_prepare_gtid_fail.result @@ -0,0 +1,51 @@ +include/master-slave.inc +[connection master] +connection slave; +include/stop_slave.inc +change master to master_use_gtid=slave_pos; +set @@global.slave_parallel_threads= 4; +set @@global.slave_parallel_mode= optimistic; +set @@global.gtid_strict_mode=ON; +set sql_log_bin= 0; +alter table mysql.gtid_slave_pos engine=innodb; +call mtr.add_suppression("Deadlock found.*"); +set sql_log_bin= 1; +include/start_slave.inc +connection master; +create table t1 (a int primary key, b int) engine=innodb; +insert t1 values (1,1); +include/save_master_gtid.inc +connection slave; +include/sync_with_master_gtid.inc +include/stop_slave.inc +set @@global.innodb_lock_wait_timeout= 1; +connection master; +set @@session.gtid_seq_no=100; +xa start '1'; +update t1 set b=b+10 where a=1; +xa end '1'; +xa prepare '1'; +xa commit '1'; +include/save_master_gtid.inc +connection slave; +connection slave1; +BEGIN; +SELECT * FROM mysql.gtid_slave_pos WHERE seq_no=100 FOR UPDATE; +domain_id sub_id server_id seq_no +connection slave; +include/start_slave.inc +include/wait_for_slave_sql_error.inc [errno=1942,1213] +connection slave1; +ROLLBACK; +# Cleanup +connection master; +drop table t1; +connection slave; +include/stop_slave.inc +set @@global.gtid_slave_pos= "0-1-100"; +set @@global.slave_parallel_threads= 0; +set @@global.gtid_strict_mode= 0; +set @@global.innodb_lock_wait_timeout= 50; +include/start_slave.inc +include/rpl_end.inc +# End of rpl_xa_prepare_gtid_fail.test diff --git a/mysql-test/suite/rpl/t/rpl_xa_prepare_gtid_fail.test b/mysql-test/suite/rpl/t/rpl_xa_prepare_gtid_fail.test new file mode 100644 index 00000000000..8042b355754 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_xa_prepare_gtid_fail.test @@ -0,0 +1,106 @@ +# +# When handling the replication of an XA PREPARE, the commit phase is +# bifurcated. First, the prepare is handled by the relevant storage engines. +# Then second,the GTID slave state is updated as a separate autocommit +# transaction. If the second stage fails, i.e. we are unable to update the +# GTID slave state, then the slave should immediately quit in error, without +# retry. +# +# This tests validates the above behavior by simulating a deadlock on the +# GTID slave state table during the second part of XA PREPARE's commit, to +# ensure that the appropriate error is reported and the transaction was never +# retried. +# +# +# References +# MDEV-31038: Parallel Replication Breaks if XA PREPARE Fails Updating Slave +# GTID State +# +source include/master-slave.inc; +source include/have_binlog_format_row.inc; +source include/have_innodb.inc; + +--connection slave +--source include/stop_slave.inc + +--let $save_par_thds= `SELECT @@global.slave_parallel_threads` +--let $save_strict_mode= `SELECT @@global.gtid_strict_mode` +--let $save_innodb_lock_wait_timeout= `SELECT @@global.innodb_lock_wait_timeout` + +change master to master_use_gtid=slave_pos; +set @@global.slave_parallel_threads= 4; +set @@global.slave_parallel_mode= optimistic; +set @@global.gtid_strict_mode=ON; + +set sql_log_bin= 0; +alter table mysql.gtid_slave_pos engine=innodb; +call mtr.add_suppression("Deadlock found.*"); +set sql_log_bin= 1; +--source include/start_slave.inc + +--connection master +let $datadir= `select @@datadir`; +create table t1 (a int primary key, b int) engine=innodb; +insert t1 values (1,1); +--source include/save_master_gtid.inc + +--connection slave +--source include/sync_with_master_gtid.inc +--source include/stop_slave.inc +set @@global.innodb_lock_wait_timeout= 1; + +--let $retried_tx_initial= query_get_value(SHOW ALL SLAVES STATUS, Retried_transactions, 1) + +--connection master +--let $gtid_domain_id=`SELECT @@GLOBAL.gtid_domain_id` +--let $gtid_server_id=`SELECT @@GLOBAL.server_id` +--let $xap_seq_no=100 +--eval set @@session.gtid_seq_no=$xap_seq_no +xa start '1'; +update t1 set b=b+10 where a=1; +xa end '1'; +xa prepare '1'; +--let $new_gtid= `SELECT @@global.gtid_binlog_pos` +xa commit '1'; +--source include/save_master_gtid.inc + + +--connection slave + +#--eval set statement sql_log_bin=0 for insert into mysql.gtid_slave_pos values ($gtid_domain_id, 5, $gtid_server_id, $xap_seq_no) + +--connection slave1 +BEGIN; +--eval SELECT * FROM mysql.gtid_slave_pos WHERE seq_no=$xap_seq_no FOR UPDATE + +--connection slave +--source include/start_slave.inc + +--let $slave_sql_errno= 1942,1213 +--source include/wait_for_slave_sql_error.inc + +--let $retried_tx_test= query_get_value(SHOW ALL SLAVES STATUS, Retried_transactions, 1) +if ($retried_tx_initial != $retried_tx_test) +{ + --echo Transaction was retried when a failed XA PREPARE slave GTID update should lead to immediate slave stop without retry + --die Transaction was retried when a failed XA PREPARE slave GTID update should lead to immediate slave stop without retry +} + +--connection slave1 +ROLLBACK; + +--echo # Cleanup + +--connection master +drop table t1; + +--connection slave +--source include/stop_slave.inc +--eval set @@global.gtid_slave_pos= "$new_gtid" +--eval set @@global.slave_parallel_threads= $save_par_thds +--eval set @@global.gtid_strict_mode= $save_strict_mode +--eval set @@global.innodb_lock_wait_timeout= $save_innodb_lock_wait_timeout +--source include/start_slave.inc + +--source include/rpl_end.inc +--echo # End of rpl_xa_prepare_gtid_fail.test diff --git a/sql/handler.cc b/sql/handler.cc index 926ef2f4f54..eaaf4664c07 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -2057,8 +2057,11 @@ int ha_rollback_trans(THD *thd, bool all) rollback without signalling following transactions. And in release builds, we explicitly do the signalling before rolling back. */ - DBUG_ASSERT(!(thd->rgi_slave && thd->rgi_slave->did_mark_start_commit) || - thd->transaction->xid_state.is_explicit_XA()); + DBUG_ASSERT( + !(thd->rgi_slave && thd->rgi_slave->did_mark_start_commit) || + (thd->transaction->xid_state.is_explicit_XA() || + (thd->rgi_slave->gtid_ev_flags2 & Gtid_log_event::FL_PREPARED_XA))); + if (thd->rgi_slave && thd->rgi_slave->did_mark_start_commit) thd->rgi_slave->unmark_start_commit(); } diff --git a/sql/log_event.h b/sql/log_event.h index 5dcf0315f68..2fcc4549bc9 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -3042,7 +3042,7 @@ private: virtual int do_commit()= 0; virtual int do_apply_event(rpl_group_info *rgi); int do_record_gtid(THD *thd, rpl_group_info *rgi, bool in_trans, - void **out_hton); + void **out_hton, bool force_err= false); enum_skip_reason do_shall_skip(rpl_group_info *rgi); virtual const char* get_query()= 0; #endif diff --git a/sql/log_event_server.cc b/sql/log_event_server.cc index c5fb637a000..a2b78bc241d 100644 --- a/sql/log_event_server.cc +++ b/sql/log_event_server.cc @@ -152,6 +152,30 @@ is_parallel_retry_error(rpl_group_info *rgi, int err) return has_temporary_error(rgi->thd); } +/** + Accumulate a Diagnostics_area's errors and warnings into an output buffer + + @param errbuf The output buffer to write error messages + @param errbuf_size The size of the output buffer + @param da The Diagnostics_area to check for errors +*/ +static void inline aggregate_da_errors(char *errbuf, size_t errbuf_size, + Diagnostics_area *da) +{ + const char *errbuf_end= errbuf + errbuf_size; + char *slider; + Diagnostics_area::Sql_condition_iterator it= da->sql_conditions(); + const Sql_condition *err; + size_t len; + for (err= it++, slider= errbuf; err && slider < errbuf_end - 1; + slider += len, err= it++) + { + len= my_snprintf(slider, errbuf_end - slider, + " %s, Error_code: %d;", err->get_message_text(), + err->get_sql_errno()); + } +} + /** Error reporting facility for Rows_log_event::do_apply_event @@ -172,13 +196,8 @@ static void inline slave_rows_error_report(enum loglevel level, int ha_error, const char *log_name, my_off_t pos) { const char *handler_error= (ha_error ? HA_ERR(ha_error) : NULL); - char buff[MAX_SLAVE_ERRMSG], *slider; - const char *buff_end= buff + sizeof(buff); - size_t len; - Diagnostics_area::Sql_condition_iterator it= - thd->get_stmt_da()->sql_conditions(); + char buff[MAX_SLAVE_ERRMSG]; Relay_log_info const *rli= rgi->rli; - const Sql_condition *err; buff[0]= 0; int errcode= thd->is_error() ? thd->get_stmt_da()->sql_errno() : 0; @@ -191,13 +210,7 @@ static void inline slave_rows_error_report(enum loglevel level, int ha_error, if (is_parallel_retry_error(rgi, errcode)) return; - for (err= it++, slider= buff; err && slider < buff_end - 1; - slider += len, err= it++) - { - len= my_snprintf(slider, buff_end - slider, - " %s, Error_code: %d;", err->get_message_text(), - err->get_sql_errno()); - } + aggregate_da_errors(buff, sizeof(buff), thd->get_stmt_da()); if (ha_error != 0) rli->report(level, errcode, rgi->gtid_info(), @@ -3893,7 +3906,8 @@ bool slave_execute_deferred_events(THD *thd) #if defined(HAVE_REPLICATION) int Xid_apply_log_event::do_record_gtid(THD *thd, rpl_group_info *rgi, - bool in_trans, void **out_hton) + bool in_trans, void **out_hton, + bool force_err) { int err= 0; Relay_log_info const *rli= rgi->rli; @@ -3908,14 +3922,26 @@ int Xid_apply_log_event::do_record_gtid(THD *thd, rpl_group_info *rgi, int ec= thd->get_stmt_da()->sql_errno(); /* Do not report an error if this is really a kill due to a deadlock. - In this case, the transaction will be re-tried instead. + In this case, the transaction will be re-tried instead. Unless force_err + is set, as in the case of XA PREPARE, as the GTID state is updated as a + separate transaction, and if that fails, we should not retry but exit in + error immediately. */ - if (!is_parallel_retry_error(rgi, ec)) + if (!is_parallel_retry_error(rgi, ec) || force_err) + { + char buff[MAX_SLAVE_ERRMSG]; + buff[0]= 0; + aggregate_da_errors(buff, sizeof(buff), thd->get_stmt_da()); + + if (force_err) + thd->clear_error(); + rli->report(ERROR_LEVEL, ER_CANNOT_UPDATE_GTID_STATE, rgi->gtid_info(), "Error during XID COMMIT: failed to update GTID state in " - "%s.%s: %d: %s", + "%s.%s: %d: %s the event's master log %s, end_log_pos %llu", "mysql", rpl_gtid_slave_state_table_name.str, ec, - thd->get_stmt_da()->message()); + buff, RPL_LOG_NAME, log_pos); + } thd->is_slave_error= 1; } @@ -3989,7 +4015,7 @@ int Xid_apply_log_event::do_apply_event(rpl_group_info *rgi) { DBUG_ASSERT(!thd->transaction->xid_state.is_explicit_XA()); - if ((err= do_record_gtid(thd, rgi, false, &hton))) + if ((err= do_record_gtid(thd, rgi, false, &hton, true))) return err; } -- cgit v1.2.1 From 50f3b7d1649002df3c73ec88827707096ce3135c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 25 Apr 2023 12:17:06 +0300 Subject: MDEV-31124 Innodb_data_written miscounts doublewrites When commit a5a2ef079cec378340d8b575aef05974b0b3442e implemented asynchronous doublewrite, the writes via the doublewrite buffer started to be counted incorrectly, without multiplying them by innodb_page_size. srv_export_innodb_status(): Correctly count the Innodb_data_written. buf_dblwr_t: Remove submitted(), because it is close to written() and only Innodb_data_written was interested in it. According to its name, it should count completed and not submitted writes. Tested by: Axel Schwenke --- storage/innobase/buf/buf0dblwr.cc | 1 - storage/innobase/include/buf0dblwr.h | 5 ----- storage/innobase/srv/srv0srv.cc | 7 ++++--- 3 files changed, 4 insertions(+), 9 deletions(-) diff --git a/storage/innobase/buf/buf0dblwr.cc b/storage/innobase/buf/buf0dblwr.cc index 1d582b6cfbf..b6b6e87b6df 100644 --- a/storage/innobase/buf/buf0dblwr.cc +++ b/storage/innobase/buf/buf0dblwr.cc @@ -582,7 +582,6 @@ bool buf_dblwr_t::flush_buffered_writes(const ulint size) const bool multi_batch= block1 + static_cast(size) != block2 && old_first_free > size; flushing_buffered_writes= 1 + multi_batch; - pages_submitted+= old_first_free; /* Now safe to release the mutex. */ mysql_mutex_unlock(&mutex); #ifdef UNIV_DEBUG diff --git a/storage/innobase/include/buf0dblwr.h b/storage/innobase/include/buf0dblwr.h index fb9df55504c..f82baeec89a 100644 --- a/storage/innobase/include/buf0dblwr.h +++ b/storage/innobase/include/buf0dblwr.h @@ -66,8 +66,6 @@ class buf_dblwr_t bool batch_running; /** number of expected flush_buffered_writes_completed() calls */ unsigned flushing_buffered_writes; - /** pages submitted to flush_buffered_writes() */ - ulint pages_submitted; /** number of flush_buffered_writes_completed() calls */ ulint writes_completed; /** number of pages written by flush_buffered_writes_completed() */ @@ -92,9 +90,6 @@ public: /** Acquire the mutex */ void lock() { mysql_mutex_lock(&mutex); } - /** @return the number of submitted page writes */ - ulint submitted() const - { mysql_mutex_assert_owner(&mutex); return pages_submitted; } /** @return the number of completed batches */ ulint batches() const { mysql_mutex_assert_owner(&mutex); return writes_completed; } diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index 337460fc4d2..57aa4bef9fe 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -1013,13 +1013,14 @@ srv_export_innodb_status(void) if (buf_dblwr.is_initialised()) { buf_dblwr.lock(); - dblwr = buf_dblwr.submitted(); - export_vars.innodb_dblwr_pages_written = buf_dblwr.written(); + dblwr = buf_dblwr.written(); + export_vars.innodb_dblwr_pages_written = dblwr; export_vars.innodb_dblwr_writes = buf_dblwr.batches(); buf_dblwr.unlock(); } - export_vars.innodb_data_written = srv_stats.data_written + dblwr; + export_vars.innodb_data_written = srv_stats.data_written + + (dblwr << srv_page_size_shift); export_vars.innodb_buffer_pool_read_requests = buf_pool.stat.n_page_gets; -- cgit v1.2.1