From a9b31b0814b02e65930209b1a90a8293b2ca6619 Mon Sep 17 00:00:00 2001 From: Dmitry Shulga Date: Fri, 9 Dec 2022 11:50:05 +0700 Subject: MDEV-29988: (revert) Major performance regression with 10.6.11 Reverted changed in server code introduced by the commit bd9274faa469cc164099c7497c18a0e0a9b1184b. Tests from this commit are retained. --- sql/item_cmpfunc.cc | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index b7b0c981c2d..fe6b8feb4de 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -442,18 +442,9 @@ bool Item_func::setup_args_and_comparator(THD *thd, Arg_comparator *cmp) if (args[0]->cmp_type() == STRING_RESULT && args[1]->cmp_type() == STRING_RESULT) { - Query_arena *arena, backup; - arena= thd->activate_stmt_arena_if_needed(&backup); - DTCollation tmp; - bool ret= agg_arg_charsets_for_comparison(tmp, args, 2); - - if (arena) - thd->restore_active_arena(arena, &backup); - - if (ret) - return ret; - + if (agg_arg_charsets_for_comparison(tmp, args, 2)) + return true; cmp->m_compare_collation= tmp.collation; } // Convert constants when compared to int/year field -- cgit v1.2.1 From 37a316c01d778a62a056d5d20110ef18bb55975e Mon Sep 17 00:00:00 2001 From: Dmitry Shulga Date: Fri, 9 Dec 2022 21:10:25 +0700 Subject: MDEV-29988: Major performance regression with 10.6.11 The idea is to put Item_direct_ref_to_item as a transparent and permanent wrapper before a string which require conversion. So that Item_direct_ref_to_item would be the only place where the pointer to the string item is stored, this pointer can be changed and restored during PS execution as needed. And if any permanent (subquery) optimization would need a pointer to the item, it'll use a pointer to the Item_direct_ref_to_item - which is a permanent item and won't go away. --- mysql-test/main/ps.result | 15 ++++++++ mysql-test/main/ps.test | 35 ++++++++++++++++++ sql/item.cc | 80 +++++++++++++++++++++++++++++++++------- sql/item.h | 93 +++++++++++++++++++++++++++++++++++++++++++++++ sql/sql_tvc.cc | 5 +-- sql/table.cc | 5 ++- 6 files changed, 215 insertions(+), 18 deletions(-) diff --git a/mysql-test/main/ps.result b/mysql-test/main/ps.result index 26c41526389..16700fd83d3 100644 --- a/mysql-test/main/ps.result +++ b/mysql-test/main/ps.result @@ -5700,3 +5700,18 @@ EXECUTE stmt USING 'd'; EXECUTE stmt USING 'd'; 300 DROP TABLE t1, t2, t3; +set @@max_session_mem_used=default; +create table t (a varchar(10)) character set utf8; +insert into t values (''); +prepare stmt from "select 1 from t where a = ?"; +set @@max_session_mem_used=(select memory_used*2 from information_schema.processlist where id=connection_id()); +deallocate prepare stmt; +drop table t; +set @@max_session_mem_used=default; +create table t (a varchar(10)) character set utf8; +insert into t values (''); +prepare stmt from "select 1 from t where a = 'a'"; +set @@max_session_mem_used=(select memory_used*2 from information_schema.processlist where id=connection_id()); +deallocate prepare stmt; +drop table t; +set @@max_session_mem_used=default; diff --git a/mysql-test/main/ps.test b/mysql-test/main/ps.test index 2ccfac3c119..87cae7211a4 100644 --- a/mysql-test/main/ps.test +++ b/mysql-test/main/ps.test @@ -5116,3 +5116,38 @@ EXECUTE stmt USING 'b'; EXECUTE stmt USING 'd'; EXECUTE stmt USING 'd'; DROP TABLE t1, t2, t3; + +set @@max_session_mem_used=default; +create table t (a varchar(10)) character set utf8; +insert into t values (''); +prepare stmt from "select 1 from t where a = ?"; +set @@max_session_mem_used=(select memory_used*2 from information_schema.processlist where id=connection_id()); +let $run= 1000; +disable_result_log; +disable_query_log; +while ($run) { + execute stmt using repeat('x',10000); + dec $run; +} +enable_result_log; +enable_query_log; +deallocate prepare stmt; +drop table t; +set @@max_session_mem_used=default; + +create table t (a varchar(10)) character set utf8; +insert into t values (''); +prepare stmt from "select 1 from t where a = 'a'"; +set @@max_session_mem_used=(select memory_used*2 from information_schema.processlist where id=connection_id()); +let $run= 1000; +disable_result_log; +disable_query_log; +while ($run) { + execute stmt; + dec $run; +} +enable_result_log; +enable_query_log; +deallocate prepare stmt; +drop table t; +set @@max_session_mem_used=default; diff --git a/sql/item.cc b/sql/item.cc index 757f09dec01..03e6ffb5715 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -41,6 +41,7 @@ // find_item_in_list, // RESOLVED_AGAINST_ALIAS, ... #include "sql_expression_cache.h" +#include "sql_lex.h" // empty_clex_str const String my_null_string("NULL", 4, default_charset_info); const String my_default_string("DEFAULT", 7, default_charset_info); @@ -1401,13 +1402,11 @@ Item *Item_cache::safe_charset_converter(THD *thd, CHARSET_INFO *tocs) Item *conv= example->safe_charset_converter(thd, tocs); if (conv == example) return this; - Item_cache *cache; - if (!conv || conv->fix_fields(thd, (Item **) NULL) || - unlikely(!(cache= new (thd->mem_root) Item_cache_str(thd, conv)))) + if (!conv || conv->fix_fields(thd, (Item **) NULL)) return NULL; // Safe conversion is not possible, or OEM - cache->setup(thd, conv); - cache->fixed= false; // Make Item::fix_fields() happy - return cache; + setup(thd, conv); + thd->change_item_tree(&example, conv); + return this; } @@ -2782,7 +2781,6 @@ bool Type_std_attributes::agg_item_set_converter(const DTCollation &coll, safe_args[1]= args[item_sep]; } - bool res= FALSE; uint i; DBUG_ASSERT(!thd->stmt_arena->is_stmt_prepare()); @@ -2802,19 +2800,31 @@ bool Type_std_attributes::agg_item_set_converter(const DTCollation &coll, args[item_sep]= safe_args[1]; } my_coll_agg_error(args, nargs, fname, item_sep); - res= TRUE; - break; // we cannot return here, we need to restore "arena". + return TRUE; } - thd->change_item_tree(arg, conv); - if (conv->fix_fields(thd, arg)) + return TRUE; + + Query_arena *arena, backup; + arena= thd->activate_stmt_arena_if_needed(&backup); + if (arena) { - res= TRUE; - break; // we cannot return here, we need to restore "arena". + Item_direct_ref_to_item *ref= + new (thd->mem_root) Item_direct_ref_to_item(thd, *arg); + if ((ref == NULL) || ref->fix_fields(thd, (Item **)&ref)) + { + thd->restore_active_arena(arena, &backup); + return TRUE; + } + *arg= ref; + thd->restore_active_arena(arena, &backup); + ref->change_item(thd, conv); } + else + thd->change_item_tree(arg, conv); } - return res; + return FALSE; } @@ -10952,3 +10962,45 @@ void Item::register_in(THD *thd) next= thd->free_list; thd->free_list= this; } + + +Item_direct_ref_to_item::Item_direct_ref_to_item(THD *thd, Item *item) +: Item_direct_ref(thd, NULL, NULL, "", &empty_clex_str, FALSE) +{ + m_item= item; + ref= (Item**)&m_item; +} + +bool Item_direct_ref_to_item::fix_fields(THD *thd, Item **) +{ + DBUG_ASSERT(m_item != NULL); + if (m_item->fix_fields_if_needed_for_scalar(thd, ref)) + return TRUE; + set_properties(); + return FALSE; +} + +void Item_direct_ref_to_item::print(String *str, enum_query_type query_type) +{ + m_item->print(str, query_type); +} + +Item *Item_direct_ref_to_item::safe_charset_converter(THD *thd, + CHARSET_INFO *tocs) +{ + Item *conv= m_item->safe_charset_converter(thd, tocs); + if (conv != m_item) + { + if (conv== NULL || conv->fix_fields(thd, &conv)) + return NULL; + change_item(thd, conv); + } + return this; +} + +void Item_direct_ref_to_item::change_item(THD *thd, Item *i) +{ + DBUG_ASSERT(i->fixed); + thd->change_item_tree(ref, i); + set_properties(); +} diff --git a/sql/item.h b/sql/item.h index ec87a3fb812..e0bcaf41eac 100644 --- a/sql/item.h +++ b/sql/item.h @@ -6890,4 +6890,97 @@ inline void Virtual_column_info::print(String* str) expr->print_for_table_def(str); } +class Item_direct_ref_to_item : public Item_direct_ref +{ + Item *m_item; +public: + Item_direct_ref_to_item(THD *thd, Item *item); + + void change_item(THD *thd, Item *); + + bool fix_fields(THD *thd, Item **it); + + void print(String *str, enum_query_type query_type); + + Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs); + Item *get_tmp_table_item(THD *thd) + { return m_item->get_tmp_table_item(thd); } + Item *get_copy(THD *thd) + { return m_item->get_copy(thd); } + COND *build_equal_items(THD *thd, COND_EQUAL *inherited, + bool link_item_fields, + COND_EQUAL **cond_equal_ref) + { + return m_item->build_equal_items(thd, inherited, link_item_fields, + cond_equal_ref); + } + const char *full_name() const { return m_item->full_name(); } + void make_send_field(THD *thd, Send_field *field) + { m_item->make_send_field(thd, field); } + bool eq(const Item *item, bool binary_cmp) const + { + Item *it= ((Item *) item)->real_item(); + return m_item->eq(it, binary_cmp); + } + void fix_after_pullout(st_select_lex *new_parent, Item **refptr, bool merge) + { m_item->fix_after_pullout(new_parent, &m_item, merge); } + void save_val(Field *to) + { return m_item->save_val(to); } + void save_result(Field *to) + { return m_item->save_result(to); } + int save_in_field(Field *to, bool no_conversions) + { return m_item->save_in_field(to, no_conversions); } + const Type_handler *type_handler() const { return m_item->type_handler(); } + table_map used_tables() const { return m_item->used_tables(); } + void update_used_tables() + { m_item->update_used_tables(); } + bool const_item() const { return m_item->const_item(); } + table_map not_null_tables() const { return m_item->not_null_tables(); } + bool walk(Item_processor processor, bool walk_subquery, void *arg) + { + return m_item->walk(processor, walk_subquery, arg) || + (this->*processor)(arg); + } + bool enumerate_field_refs_processor(void *arg) + { return m_item->enumerate_field_refs_processor(arg); } + Item_field *field_for_view_update() + { return m_item->field_for_view_update(); } + + /* Row emulation: forwarding of ROW-related calls to orig_item */ + uint cols() const + { return m_item->cols(); } + Item* element_index(uint i) + { return this; } + Item** addr(uint i) + { return &m_item; } + bool check_cols(uint c) + { return Item::check_cols(c); } + bool null_inside() + { return m_item->null_inside(); } + void bring_value() + {} + + Item_equal *get_item_equal() { return m_item->get_item_equal(); } + void set_item_equal(Item_equal *item_eq) { m_item->set_item_equal(item_eq); } + Item_equal *find_item_equal(COND_EQUAL *cond_equal) + { return m_item->find_item_equal(cond_equal); } + Item *propagate_equal_fields(THD *thd, const Context &ctx, COND_EQUAL *cond) + { return m_item->propagate_equal_fields(thd, ctx, cond); } + Item *replace_equal_field(THD *thd, uchar *arg) + { return m_item->replace_equal_field(thd, arg); } + + bool excl_dep_on_table(table_map tab_map) + { return m_item->excl_dep_on_table(tab_map); } + bool excl_dep_on_grouping_fields(st_select_lex *sel) + { return m_item->excl_dep_on_grouping_fields(sel); } + bool is_expensive() { return m_item->is_expensive(); } + Item* build_clone(THD *thd) { return get_copy(thd); } + + /* + This processor states that this is safe for virtual columns + (because this Item transparency) + */ + bool check_vcol_func_processor(void *arg) { return FALSE;} +}; + #endif /* SQL_ITEM_INCLUDED */ diff --git a/sql/sql_tvc.cc b/sql/sql_tvc.cc index 43c25db5097..b2e9df2d745 100644 --- a/sql/sql_tvc.cc +++ b/sql/sql_tvc.cc @@ -539,7 +539,7 @@ bool Item_func_in::create_value_list_for_tvc(THD *thd, if (is_list_of_rows) { - Item_row *row_list= (Item_row *)(args[i]->build_clone(thd)); + Item_row *row_list= (Item_row *)(args[i]); if (!row_list) return true; @@ -564,8 +564,7 @@ bool Item_func_in::create_value_list_for_tvc(THD *thd, sprintf(col_name, "_col_%i", 1); args[i]->set_name(thd, col_name, strlen(col_name), thd->charset()); } - Item *arg_clone= args[i]->build_clone(thd); - if (!arg_clone || tvc_value->push_back(arg_clone)) + if (tvc_value->push_back(args[i])) return true; } diff --git a/sql/table.cc b/sql/table.cc index ec49241dd7a..a1ee2f3b8b3 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -3010,7 +3010,7 @@ class Vcol_expr_context bool inited; THD *thd; TABLE *table; - Query_arena backup_arena; + Query_arena backup_arena, *stmt_arena; table_map old_map; Security_context *save_security_ctx; sql_mode_t save_sql_mode; @@ -3020,6 +3020,7 @@ public: inited(false), thd(_thd), table(_table), + stmt_arena(thd->stmt_arena), old_map(table->map), save_security_ctx(thd->security_ctx), save_sql_mode(thd->variables.sql_mode) {} @@ -3040,6 +3041,7 @@ bool Vcol_expr_context::init() thd->security_ctx= tl->security_ctx; thd->set_n_backup_active_arena(table->expr_arena, &backup_arena); + thd->stmt_arena= thd; inited= true; return false; @@ -3053,6 +3055,7 @@ Vcol_expr_context::~Vcol_expr_context() thd->security_ctx= save_security_ctx; thd->restore_active_arena(table->expr_arena, &backup_arena); thd->variables.sql_mode= save_sql_mode; + thd->stmt_arena= stmt_arena; } -- cgit v1.2.1 From 60f646e2f3fefa7909538ca8982393b74a9bf5f2 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Wed, 14 Dec 2022 14:36:27 +0100 Subject: MDEV-29988: (spider fix) Major performance regression with 10.6.11 Make Item_direct_ref_to_item transparent for Spider --- storage/spider/spd_db_conn.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/storage/spider/spd_db_conn.cc b/storage/spider/spd_db_conn.cc index 5a8c7492654..6e377b482a4 100644 --- a/storage/spider/spd_db_conn.cc +++ b/storage/spider/spd_db_conn.cc @@ -8853,6 +8853,12 @@ int spider_db_print_item_type( DBUG_ENTER("spider_db_print_item_type"); DBUG_PRINT("info",("spider COND type=%d", item->type())); + if (item->type() == Item::REF_ITEM && + ((Item_ref*)item)->ref_type() == Item_ref::DIRECT_REF) + { + item= item->real_item(); + DBUG_PRINT("info",("spider new COND type=%d", item->type())); + } switch (item->type()) { case Item::FUNC_ITEM: -- cgit v1.2.1 From 908c48a34dd94350c87c89a32937bf32399c3ab4 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sun, 18 Dec 2022 14:30:36 +0100 Subject: fixes for json.json_table and main.func_json in --ps --- sql/item.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sql/item.cc b/sql/item.cc index 03e6ffb5715..02220fd56bd 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -2468,7 +2468,8 @@ void Item::split_sum_func2(THD *thd, Ref_ptr_array ref_pointer_array, if (unlikely((!(used_tables() & ~PARAM_TABLE_BIT) || (type() == REF_ITEM && - ((Item_ref*)this)->ref_type() != Item_ref::VIEW_REF)))) + ((Item_ref*)this)->ref_type() != Item_ref::VIEW_REF && + ((Item_ref*)this)->ref_type() != Item_ref::DIRECT_REF)))) return; } -- cgit v1.2.1 From 21223c0461455b4175bae91aee7e3525a51c6d70 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sun, 18 Dec 2022 16:30:26 +0100 Subject: MDEV-29988 group by fix --- mysql-test/main/func_group.result | 11 ++++++++++- mysql-test/main/func_group.test | 12 ++++++++---- sql/item.h | 5 +++++ 3 files changed, 23 insertions(+), 5 deletions(-) diff --git a/mysql-test/main/func_group.result b/mysql-test/main/func_group.result index 0f80d14b603..6617d21a197 100644 --- a/mysql-test/main/func_group.result +++ b/mysql-test/main/func_group.result @@ -1,4 +1,3 @@ -drop table if exists t1,t2,t3,t4,t5,t6; set @sav_dpi= @@div_precision_increment; set div_precision_increment= 5; show variables like 'div_precision_increment'; @@ -2566,5 +2565,15 @@ stddev_samp(i) stddev_pop(i) stddev(i) std(i) drop view v1; drop table t1; # +# MDEV-29988: Major performance regression with 10.6.11 +# +create table t1 (a varchar(10) charset utf8mb4, b int, c int); +insert t1 values (1,2,3),(4,5,6),(1,7,8); +select concat(a,":",group_concat(b)) from t1 group by a; +concat(a,":",group_concat(b)) +1:2,7 +4:5 +drop table t1; +# # End of 10.3 tests # diff --git a/mysql-test/main/func_group.test b/mysql-test/main/func_group.test index 6b3a15fd45e..5693940dda6 100644 --- a/mysql-test/main/func_group.test +++ b/mysql-test/main/func_group.test @@ -2,10 +2,6 @@ # simple test of all group functions # ---disable_warnings -drop table if exists t1,t2,t3,t4,t5,t6; ---enable_warnings - set @sav_dpi= @@div_precision_increment; set div_precision_increment= 5; show variables like 'div_precision_increment'; @@ -1800,6 +1796,14 @@ select * from v1; drop view v1; drop table t1; +--echo # +--echo # MDEV-29988: Major performance regression with 10.6.11 +--echo # +create table t1 (a varchar(10) charset utf8mb4, b int, c int); +insert t1 values (1,2,3),(4,5,6),(1,7,8); +select concat(a,":",group_concat(b)) from t1 group by a; +drop table t1; + --echo # --echo # End of 10.3 tests --echo # diff --git a/sql/item.h b/sql/item.h index e0bcaf41eac..35428b30252 100644 --- a/sql/item.h +++ b/sql/item.h @@ -6976,6 +6976,11 @@ public: bool is_expensive() { return m_item->is_expensive(); } Item* build_clone(THD *thd) { return get_copy(thd); } + void split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array, + List &fields, uint flags) + { + m_item->split_sum_func(thd, ref_pointer_array, fields, flags); + } /* This processor states that this is safe for virtual columns (because this Item transparency) -- cgit v1.2.1 From e51a1d6fc0c105d56c6f05efb3d9fd05b7e4f22f Mon Sep 17 00:00:00 2001 From: Andrew Hutchings Date: Tue, 3 Jan 2023 10:48:57 +1100 Subject: MDEV-30329: mariadb-service-convert resets systemd service to default User=root If mariadb-service-convert is run and the user variable is unset then this sets `User=` in `[Service]`, which then tries to run mariadb as root, which in-turn fails. This only happens when mysqld_safe is missing which is all the time now. So don't set `User=` if there is no user variable. Reviewer: Sergei Golubchik (in PR #2382) --- scripts/mariadb-service-convert | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/mariadb-service-convert b/scripts/mariadb-service-convert index 38043733554..ade07e9a336 100755 --- a/scripts/mariadb-service-convert +++ b/scripts/mariadb-service-convert @@ -36,7 +36,7 @@ echo '[Service]' echo -if [[ ( "$user" != "root" && "$user" != "mysql" ) || "${SET_USER}" == 1 ]]; then +if [[ ( ! -z "$user" && "$user" != "root" && "$user" != "mysql" ) || "${SET_USER}" == 1 ]]; then echo User=$user fi -- cgit v1.2.1 From 758c24dae2c1e03f6c0837028e7e7f931497a9b5 Mon Sep 17 00:00:00 2001 From: lilinjie Date: Wed, 4 Jan 2023 18:32:54 +0800 Subject: fix typos Signed-off-by: lilinjie --- sql/field.cc | 2 +- sql/field.h | 2 +- sql/sp_head.cc | 4 ++-- sql/sql_yacc.yy | 2 +- sql/sql_yacc_ora.yy | 2 +- storage/connect/tabfmt.cpp | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/sql/field.cc b/sql/field.cc index de92d1dea94..5fc033c0280 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -11287,7 +11287,7 @@ Create_field *Create_field::clone(MEM_ROOT *mem_root) const } /** - Return true if default is an expression that must be saved explicitely + Return true if default is an expression that must be saved explicitly This is: - Not basic constants diff --git a/sql/field.h b/sql/field.h index ce15184ea1d..f1087332087 100644 --- a/sql/field.h +++ b/sql/field.h @@ -4600,7 +4600,7 @@ public: return make_field(share, mem_root, (uchar *) 0, (uchar *) "", 0, field_name_arg); } - /* Return true if default is an expression that must be saved explicitely */ + /* Return true if default is an expression that must be saved explicitly */ bool has_default_expression(); bool has_default_now_unireg_check() const diff --git a/sql/sp_head.cc b/sql/sp_head.cc index df4ef8522ba..160e66f7bcd 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -2366,7 +2366,7 @@ sp_head::execute_procedure(THD *thd, List *args) Disable slow log if: - Slow logging is enabled (no change needed) - This is a normal SP (not event log) - - If we have not explicitely disabled logging of SP + - If we have not explicitly disabled logging of SP */ if (save_enable_slow_log && ((!(m_flags & LOG_SLOW_STATEMENTS) && @@ -2380,7 +2380,7 @@ sp_head::execute_procedure(THD *thd, List *args) Disable general log if: - If general log is enabled (no change needed) - This is a normal SP (not event log) - - If we have not explicitely disabled logging of SP + - If we have not explicitly disabled logging of SP */ if (!(thd->variables.option_bits & OPTION_LOG_OFF) && (!(m_flags & LOG_GENERAL_LOG) && diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 73922ceb4f4..3025d93de0f 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -1750,7 +1750,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); TRANSACTION can be a non-empty history unit, or can be an identifier in bit_expr. - In the grammar below we use %prec to explicitely tell Bison to go + In the grammar below we use %prec to explicitly tell Bison to go through the empty branch in the optional rule only when the lookahead token does not belong to a small set of selected tokens. diff --git a/sql/sql_yacc_ora.yy b/sql/sql_yacc_ora.yy index 3a1b0c0e077..df90ba6c634 100644 --- a/sql/sql_yacc_ora.yy +++ b/sql/sql_yacc_ora.yy @@ -1144,7 +1144,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); TRANSACTION can be a non-empty history unit, or can be an identifier in bit_expr. - In the grammar below we use %prec to explicitely tell Bison to go + In the grammar below we use %prec to explicitly tell Bison to go through the empty branch in the optional rule only when the lookahead token does not belong to a small set of selected tokens. diff --git a/storage/connect/tabfmt.cpp b/storage/connect/tabfmt.cpp index 56d3cc05547..a57f0ef07aa 100644 --- a/storage/connect/tabfmt.cpp +++ b/storage/connect/tabfmt.cpp @@ -1055,7 +1055,7 @@ bool TDBCSV::PrepareWriting(PGLOBAL g) if (!strlen(Field[i])) { // Generally null fields are not quoted if (Quoted > 2) - // Except if explicitely required + // Except if explicitly required strcat(strcat(To_Line, qot), qot); } else if (Qot && (strchr(Field[i], Sep) || *Field[i] == Qot -- cgit v1.2.1 From 111a752b968561b34a88f33052519cb989a8a90f Mon Sep 17 00:00:00 2001 From: Weijun-H Date: Wed, 4 Jan 2023 18:44:03 +0000 Subject: MDEV-19160 JSON_DETAILED output unnecessarily verbose --- mysql-test/main/func_json.result | 126 ++++++++++++++++ mysql-test/main/func_json.test | 100 ++++++++++++ mysql-test/main/opt_trace.result | 218 ++++++--------------------- mysql-test/main/opt_trace_index_merge.result | 102 +++---------- mysql-test/main/opt_trace_ucs2.result | 9 +- mysql-test/main/range_notembedded.result | 1 - mysql-test/suite/plugins/r/multiauth.result | 3 - sql/item_jsonfunc.cc | 47 ++++-- 8 files changed, 327 insertions(+), 279 deletions(-) diff --git a/mysql-test/main/func_json.result b/mysql-test/main/func_json.result index b6605df547f..6ea8c78b2b6 100644 --- a/mysql-test/main/func_json.result +++ b/mysql-test/main/func_json.result @@ -1133,3 +1133,129 @@ DROP TABLE t1; # # End of 10.4 tests # +# +# MDEV-19160 JSON_DETAILED output unnecessarily verbose +# +create table t200 (a text); +insert into t200 values +('{ + "steps": [ + { + "join_optimization": { + "select_id": 1, + "steps": [ + { + "rows_estimation": [ + { + "table": "t1", + "range_analysis": { + "table_scan": { + "rows": 1000, + "cost": 2e308 + }, + "potential_range_indexes": [ + { + "index": "a_b", + "usable": true, + "key_parts": ["a", "b"] + } + ], + "best_covering_index_scan": { + "index": "a_b", + "cost": 52.195, + "chosen": true + }, + "setup_range_conditions": [], + "group_index_range": { + "chosen": false, + "cause": "no group by or distinct" + }, + "analyzing_range_alternatives": { + "range_scan_alternatives": [ + { + "index": "a_b", + "ranges": ["2 <= a <= 2 AND 4 <= b <= 4", "123"], + "rowid_ordered": true, + "using_mrr": false, + "index_only": true, + "rows": 1, + "cost": 1.1752, + "chosen": true + } + ], + "analyzing_roworder_intersect": { + "cause": "too few roworder scans" + }, + "analyzing_index_merge_union": [], + "test_one_line_array":["123"] + }, + "chosen_range_access_summary": { + "range_access_plan": { + "type": "range_scan", + "index": "a_b", + "rows": 1, + "ranges": ["2 <= a <= 2 AND 4 <= b <= 4"] + }, + "rows_for_plan": 1, + "cost_for_plan": 1.1752, + "chosen": true + } + } + }, + { + "selectivity_for_indexes": [ + { + "index_name": "a_b", + "selectivity_from_index": 0.001 + } + ], + "selectivity_for_columns": [], + "cond_selectivity": 0.001 + } + ] + } + ] + } + }, + { + "join_execution": { + "select_id": 1, + "steps": [] + } + } + ] +}'); +select JSON_DETAILED(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) from t200; +JSON_DETAILED(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) +[ + { + "range_scan_alternatives": + [ + { + "index": "a_b", + "ranges": + [ + "2 <= a <= 2 AND 4 <= b <= 4", + "123" + ], + "rowid_ordered": true, + "using_mrr": false, + "index_only": true, + "rows": 1, + "cost": 1.1752, + "chosen": true + } + ], + "analyzing_roworder_intersect": + { + "cause": "too few roworder scans" + }, + "analyzing_index_merge_union": + [], + "test_one_line_array": + ["123"] + } +] +select JSON_LOOSE(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) from t200; +JSON_LOOSE(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) +[{"range_scan_alternatives": [{"index": "a_b", "ranges": ["2 <= a <= 2 AND 4 <= b <= 4", "123"], "rowid_ordered": true, "using_mrr": false, "index_only": true, "rows": 1, "cost": 1.1752, "chosen": true}], "analyzing_roworder_intersect": {"cause": "too few roworder scans"}, "analyzing_index_merge_union": [], "test_one_line_array": ["123"]}] diff --git a/mysql-test/main/func_json.test b/mysql-test/main/func_json.test index 0987af80b79..6cfcefbee3d 100644 --- a/mysql-test/main/func_json.test +++ b/mysql-test/main/func_json.test @@ -718,3 +718,103 @@ DROP TABLE t1; --echo # --echo # End of 10.4 tests --echo # + + +--echo # +--echo # MDEV-19160 JSON_DETAILED output unnecessarily verbose +--echo # + +create table t200 (a text); + +insert into t200 values +('{ + "steps": [ + { + "join_optimization": { + "select_id": 1, + "steps": [ + { + "rows_estimation": [ + { + "table": "t1", + "range_analysis": { + "table_scan": { + "rows": 1000, + "cost": 2e308 + }, + "potential_range_indexes": [ + { + "index": "a_b", + "usable": true, + "key_parts": ["a", "b"] + } + ], + "best_covering_index_scan": { + "index": "a_b", + "cost": 52.195, + "chosen": true + }, + "setup_range_conditions": [], + "group_index_range": { + "chosen": false, + "cause": "no group by or distinct" + }, + "analyzing_range_alternatives": { + "range_scan_alternatives": [ + { + "index": "a_b", + "ranges": ["2 <= a <= 2 AND 4 <= b <= 4", "123"], + "rowid_ordered": true, + "using_mrr": false, + "index_only": true, + "rows": 1, + "cost": 1.1752, + "chosen": true + } + ], + "analyzing_roworder_intersect": { + "cause": "too few roworder scans" + }, + "analyzing_index_merge_union": [], + "test_one_line_array":["123"] + }, + "chosen_range_access_summary": { + "range_access_plan": { + "type": "range_scan", + "index": "a_b", + "rows": 1, + "ranges": ["2 <= a <= 2 AND 4 <= b <= 4"] + }, + "rows_for_plan": 1, + "cost_for_plan": 1.1752, + "chosen": true + } + } + }, + { + "selectivity_for_indexes": [ + { + "index_name": "a_b", + "selectivity_from_index": 0.001 + } + ], + "selectivity_for_columns": [], + "cond_selectivity": 0.001 + } + ] + } + ] + } + }, + { + "join_execution": { + "select_id": 1, + "steps": [] + } + } + ] +}'); + + +select JSON_DETAILED(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) from t200; +select JSON_LOOSE(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) from t200; diff --git a/mysql-test/main/opt_trace.result b/mysql-test/main/opt_trace.result index 79201d49474..2eef0da62bb 100644 --- a/mysql-test/main/opt_trace.result +++ b/mysql-test/main/opt_trace.result @@ -7459,17 +7459,13 @@ id select_type table type possible_keys key key_len ref rows Extra select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) [ - { "range_scan_alternatives": [ - { "index": "a_b", "ranges": - [ - "(2,4) <= (a,b) <= (2,4)" - ], + ["(2,4) <= (a,b) <= (2,4)"], "rowid_ordered": true, "using_mrr": false, "index_only": true, @@ -7483,8 +7479,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "cause": "too few roworder scans" }, "analyzing_index_merge_union": - [ - ] + [] } ] explain select * from t1 where a >= 900 and b between 10 and 20; @@ -7493,17 +7488,13 @@ id select_type table type possible_keys key key_len ref rows Extra select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) [ - { "range_scan_alternatives": [ - { "index": "a_b", "ranges": - [ - "(900,10) <= (a,b)" - ], + ["(900,10) <= (a,b)"], "rowid_ordered": false, "using_mrr": false, "index_only": true, @@ -7517,8 +7508,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "cause": "too few roworder scans" }, "analyzing_index_merge_union": - [ - ] + [] } ] drop table t0,t1; @@ -7530,17 +7520,13 @@ id select_type table type possible_keys key key_len ref rows Extra select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) [ - { "range_scan_alternatives": [ - { "index": "start_date", "ranges": - [ - "(2019-02-10,NULL) < (start_date,end_date)" - ], + ["(2019-02-10,NULL) < (start_date,end_date)"], "rowid_ordered": false, "using_mrr": false, "index_only": false, @@ -7554,8 +7540,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "cause": "too few roworder scans" }, "analyzing_index_merge_union": - [ - ] + [] } ] drop table t1,one_k; @@ -7575,17 +7560,13 @@ id select_type table type possible_keys key key_len ref rows Extra select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) [ - { "range_scan_alternatives": [ - { "index": "a_b_c", "ranges": - [ - "(1) <= (a,b) < (4,50)" - ], + ["(1) <= (a,b) < (4,50)"], "rowid_ordered": false, "using_mrr": false, "index_only": false, @@ -7599,8 +7580,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "cause": "too few roworder scans" }, "analyzing_index_merge_union": - [ - ] + [] } ] drop table ten,t1; @@ -7614,17 +7594,13 @@ id select_type table type possible_keys key key_len ref rows Extra select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) [ - { "range_scan_alternatives": [ - { "index": "i_b", "ranges": - [ - "(\xD9[\x943j\x99F\xA3\x9C\xF5\xB5\x8C\xFEw-\x8C) <= (b) <= (\xD9[\x943j\x99F\xA3\x9C\xF5\xB5\x8C\xFEw-\x8C)" - ], + ["(\xD9[\x943j\x99F\xA3\x9C\xF5\xB5\x8C\xFEw-\x8C) <= (b) <= (\xD9[\x943j\x99F\xA3\x9C\xF5\xB5\x8C\xFEw-\x8C)"], "rowid_ordered": true, "using_mrr": false, "index_only": false, @@ -7638,8 +7614,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "cause": "too few roworder scans" }, "analyzing_index_merge_union": - [ - ] + [] } ] EXPLAIN SELECT * FROM t1 WHERE b IS NULL; @@ -7648,17 +7623,13 @@ id select_type table type possible_keys key key_len ref rows Extra select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) [ - { "range_scan_alternatives": [ - { "index": "i_b", "ranges": - [ - "(NULL) <= (b) <= (NULL)" - ], + ["(NULL) <= (b) <= (NULL)"], "rowid_ordered": true, "using_mrr": false, "index_only": false, @@ -7672,8 +7643,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "cause": "too few roworder scans" }, "analyzing_index_merge_union": - [ - ] + [] } ] drop table t1; @@ -7690,17 +7660,13 @@ id select_type table type possible_keys key key_len ref rows Extra select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) [ - { "range_scan_alternatives": [ - { "index": "i_b", "ranges": - [ - "(ab\x0A) <= (b) <= (ab\x0A)" - ], + ["(ab\x0A) <= (b) <= (ab\x0A)"], "rowid_ordered": true, "using_mrr": false, "index_only": false, @@ -7714,8 +7680,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "cause": "too few roworder scans" }, "analyzing_index_merge_union": - [ - ] + [] } ] ALTER TABLE t1 modify column b BINARY(10) AFTER i; @@ -7725,17 +7690,13 @@ id select_type table type possible_keys key key_len ref rows Extra select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) [ - { "range_scan_alternatives": [ - { "index": "i_b", "ranges": - [ - "(ab\x0A\x00\x00\x00\x00\x00\x00\x00) <= (b) <= (ab\x0A\x00\x00\x00\x00\x00\x00\x00)" - ], + ["(ab\x0A\x00\x00\x00\x00\x00\x00\x00) <= (b) <= (ab\x0A\x00\x00\x00\x00\x00\x00\x00)"], "rowid_ordered": true, "using_mrr": false, "index_only": false, @@ -7749,8 +7710,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "cause": "too few roworder scans" }, "analyzing_index_merge_union": - [ - ] + [] } ] ALTER TABLE t1 modify column b VARBINARY(10) AFTER i; @@ -7760,17 +7720,13 @@ id select_type table type possible_keys key key_len ref rows Extra select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) [ - { "range_scan_alternatives": [ - { "index": "i_b", "ranges": - [ - "(ab\x0A) <= (b) <= (ab\x0A)" - ], + ["(ab\x0A) <= (b) <= (ab\x0A)"], "rowid_ordered": true, "using_mrr": false, "index_only": false, @@ -7784,8 +7740,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "cause": "too few roworder scans" }, "analyzing_index_merge_union": - [ - ] + [] } ] drop table t1; @@ -7798,17 +7753,13 @@ id select_type table type possible_keys key key_len ref rows Extra select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) [ - { "range_scan_alternatives": [ - { "index": "i_b", "ranges": - [ - "(ab\n) <= (b) <= (ab\n)" - ], + ["(ab\n) <= (b) <= (ab\n)"], "rowid_ordered": true, "using_mrr": false, "index_only": false, @@ -7822,8 +7773,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "cause": "too few roworder scans" }, "analyzing_index_merge_union": - [ - ] + [] } ] drop table t1; @@ -7839,17 +7789,13 @@ id select_type table type possible_keys key key_len ref rows Extra select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) [ - { "range_scan_alternatives": [ - { "index": "i_b", "ranges": - [ - "(ab\x0A) <= (b) <= (ab\x0A)" - ], + ["(ab\x0A) <= (b) <= (ab\x0A)"], "rowid_ordered": false, "using_mrr": false, "index_only": false, @@ -7863,8 +7809,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "cause": "too few roworder scans" }, "analyzing_index_merge_union": - [ - ] + [] } ] drop table t1; @@ -7878,17 +7823,13 @@ id select_type table type possible_keys key key_len ref rows Extra select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) [ - { "range_scan_alternatives": [ - { "index": "i_b", "ranges": - [ - "(ab\n) <= (b) <= (ab\n)" - ], + ["(ab\n) <= (b) <= (ab\n)"], "rowid_ordered": true, "using_mrr": false, "index_only": false, @@ -7902,8 +7843,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "cause": "too few roworder scans" }, "analyzing_index_merge_union": - [ - ] + [] } ] drop table t1; @@ -7934,17 +7874,13 @@ EXPLAIN select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) [ - { "range_scan_alternatives": [ - { "index": "start_date", "ranges": - [ - "(2019-02-10,NULL) < (start_date,end_date)" - ], + ["(2019-02-10,NULL) < (start_date,end_date)"], "rowid_ordered": false, "using_mrr": false, "index_only": false, @@ -7958,8 +7894,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "cause": "too few roworder scans" }, "analyzing_index_merge_union": - [ - ] + [] } ] drop table t1, t0, one_k; @@ -7998,19 +7933,15 @@ id select_type table type possible_keys key key_len ref rows Extra select JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) [ - [ - { "plan_prefix": - [ - ], + [], "table": "A", "best_access_path": { "considered_access_paths": [ - { "access_type": "scan", "resulting_rows": 5.9375, @@ -8030,18 +7961,14 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) "cost_for_plan": 4.0171, "rest_of_plan": [ - { "plan_prefix": - [ - "A" - ], + ["A"], "table": "B", "best_access_path": { "considered_access_paths": [ - { "access_type": "scan", "resulting_rows": 804.69, @@ -8063,17 +7990,14 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) } ] }, - { "plan_prefix": - [ - ], + [], "table": "B", "best_access_path": { "considered_access_paths": [ - { "access_type": "scan", "resulting_rows": 804.69, @@ -8104,19 +8028,15 @@ id select_type table type possible_keys key key_len ref rows Extra select JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) [ - [ - { "plan_prefix": - [ - ], + [], "table": "A", "best_access_path": { "considered_access_paths": [ - { "access_type": "scan", "resulting_rows": 10, @@ -8136,18 +8056,14 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) "cost_for_plan": 4.0171, "rest_of_plan": [ - { "plan_prefix": - [ - "A" - ], + ["A"], "table": "B", "best_access_path": { "considered_access_paths": [ - { "access_type": "ref", "index": "b", @@ -8158,7 +8074,6 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) "cost": 20, "chosen": true }, - { "access_type": "scan", "resulting_rows": 804.69, @@ -8181,17 +8096,14 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) } ] }, - { "plan_prefix": - [ - ], + [], "table": "B", "best_access_path": { "considered_access_paths": [ - { "access_type": "scan", "resulting_rows": 804.69, @@ -8227,17 +8139,13 @@ a select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) [ - { "range_scan_alternatives": [ - { "index": "a", "ranges": - [ - "() <= (a) <= ()" - ], + ["() <= (a) <= ()"], "rowid_ordered": false, "using_mrr": false, "index_only": false, @@ -8251,8 +8159,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "cause": "too few roworder scans" }, "analyzing_index_merge_union": - [ - ] + [] } ] DROP TABLE t1; @@ -8274,7 +8181,6 @@ id select_type table type possible_keys key key_len ref rows Extra select JSON_DETAILED(JSON_EXTRACT(trace, '$**.semijoin_table_pullout')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.semijoin_table_pullout')) [ - { "pulled_out_tables": [ @@ -8295,15 +8201,11 @@ kp1 kp2 select JSON_DETAILED(JSON_EXTRACT(trace, '$**.range_scan_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.range_scan_alternatives')) [ - [ - { "index": "kp1", "ranges": - [ - "(2,4) <= (kp1,kp2) <= (2)" - ], + ["(2,4) <= (kp1,kp2) <= (2)"], "rowid_ordered": false, "using_mrr": false, "index_only": true, @@ -8330,19 +8232,15 @@ id select_type table type possible_keys key key_len ref rows Extra select JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) [ - [ - { "plan_prefix": - [ - ], + [], "table": "t1", "best_access_path": { "considered_access_paths": [ - { "access_type": "scan", "resulting_rows": 10, @@ -8362,18 +8260,14 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) "cost_for_plan": 4.022, "rest_of_plan": [ - { "plan_prefix": - [ - "t1" - ], + ["t1"], "table": "t2", "best_access_path": { "considered_access_paths": [ - { "access_type": "ref", "index": "a", @@ -8384,7 +8278,6 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) "cost": 20, "chosen": true }, - { "access_type": "scan", "resulting_rows": 100, @@ -8407,17 +8300,14 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) } ] }, - { "plan_prefix": - [ - ], + [], "table": "t2", "best_access_path": { "considered_access_paths": [ - { "access_type": "scan", "resulting_rows": 100, @@ -8438,18 +8328,14 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) "cost_for_plan": 22.22, "rest_of_plan": [ - { "plan_prefix": - [ - "t2" - ], + ["t2"], "table": "t1", "best_access_path": { "considered_access_paths": [ - { "access_type": "ref", "index": "a", @@ -8460,7 +8346,6 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.considered_execution_plans')) "cost": 200, "chosen": true }, - { "access_type": "scan", "resulting_rows": 10, @@ -8504,24 +8389,17 @@ Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` select JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_columns')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_columns')) [ - [ - { "column_name": "a", "ranges": - [ - "1 <= a <= 5" - ], + ["1 <= a <= 5"], "selectivity_from_histogram": 0.0469 }, - { "column_name": "b", "ranges": - [ - "NULL < b <= 5" - ], + ["NULL < b <= 5"], "selectivity_from_histogram": 0.0469 } ] @@ -8534,9 +8412,7 @@ Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` select JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_columns')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_columns')) [ - [ - { "column_name": "a", "ranges": @@ -8556,15 +8432,11 @@ Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` select JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_columns')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.selectivity_for_columns')) [ - [ - { "column_name": "b", "ranges": - [ - "10 <= b < 25" - ], + ["10 <= b < 25"], "selectivity_from_histogram": 0.1562 } ] @@ -8582,15 +8454,11 @@ UPDATE t1 SET b=10 WHERE a=1; SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.range_scan_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.range_scan_alternatives')) [ - [ - { "index": "PRIMARY", "ranges": - [ - "(1) <= (a) <= (1)" - ], + ["(1) <= (a) <= (1)"], "rowid_ordered": true, "using_mrr": false, "index_only": false, diff --git a/mysql-test/main/opt_trace_index_merge.result b/mysql-test/main/opt_trace_index_merge.result index ed5ddfd69f4..554ddde66a9 100644 --- a/mysql-test/main/opt_trace_index_merge.result +++ b/mysql-test/main/opt_trace_index_merge.result @@ -307,17 +307,13 @@ id select_type table type possible_keys key key_len ref rows Extra select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) [ - { "range_scan_alternatives": [ - { "index": "key1", "ranges": - [ - "(100) <= (key1) <= (100)" - ], + ["(100) <= (key1) <= (100)"], "rowid_ordered": true, "using_mrr": false, "index_only": false, @@ -325,13 +321,10 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "cost": 2844.1, "chosen": true }, - { "index": "key2", "ranges": - [ - "(100) <= (key2) <= (100)" - ], + ["(100) <= (key2) <= (100)"], "rowid_ordered": true, "using_mrr": false, "index_only": false, @@ -340,13 +333,10 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "chosen": false, "cause": "cost" }, - { "index": "key3", "ranges": - [ - "(100) <= (key3) <= (100)" - ], + ["(100) <= (key3) <= (100)"], "rowid_ordered": true, "using_mrr": false, "index_only": false, @@ -360,7 +350,6 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) { "intersecting_indexes": [ - { "index": "key1", "index_scan_cost": 58.252, @@ -372,7 +361,6 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "intersect_covering_with_this_index": false, "chosen": true }, - { "index": "key2", "index_scan_cost": 58.252, @@ -384,7 +372,6 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "intersect_covering_with_this_index": false, "chosen": true }, - { "index": "key3", "index_scan_cost": 58.252, @@ -408,14 +395,12 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "chosen": true }, "analyzing_index_merge_union": - [ - ] + [] } ] select JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) [ - { "range_access_plan": { @@ -426,35 +411,26 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) "clustered_pk_scan": false, "intersect_of": [ - { "type": "range_scan", "index": "key1", "rows": 2243, "ranges": - [ - "(100) <= (key1) <= (100)" - ] + ["(100) <= (key1) <= (100)"] }, - { "type": "range_scan", "index": "key2", "rows": 2243, "ranges": - [ - "(100) <= (key2) <= (100)" - ] + ["(100) <= (key2) <= (100)"] }, - { "type": "range_scan", "index": "key3", "rows": 2243, "ranges": - [ - "(100) <= (key3) <= (100)" - ] + ["(100) <= (key3) <= (100)"] } ] }, @@ -470,32 +446,25 @@ id select_type table type possible_keys key key_len ref rows Extra select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) [ - { "range_scan_alternatives": - [ - ], + [], "analyzing_roworder_intersect": { "cause": "too few roworder scans" }, "analyzing_index_merge_union": [ - { "indexes_to_merge": [ - { "range_scan_alternatives": [ - { "index": "key1", "ranges": - [ - "(100) <= (key1) <= (100)" - ], + ["(100) <= (key1) <= (100)"], "rowid_ordered": true, "using_mrr": false, "index_only": true, @@ -503,13 +472,10 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "cost": 152.53, "chosen": true }, - { "index": "key2", "ranges": - [ - "(100) <= (key2) <= (100)" - ], + ["(100) <= (key2) <= (100)"], "rowid_ordered": true, "using_mrr": false, "index_only": true, @@ -522,17 +488,13 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "index_to_merge": "key1", "cumulated_cost": 152.53 }, - { "range_scan_alternatives": [ - { "index": "key3", "ranges": - [ - "(100) <= (key3) <= (100)" - ], + ["(100) <= (key3) <= (100)"], "rowid_ordered": true, "using_mrr": false, "index_only": true, @@ -540,13 +502,10 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "cost": 152.53, "chosen": true }, - { "index": "key4", "ranges": - [ - "(100) <= (key4) <= (100)" - ], + ["(100) <= (key4) <= (100)"], "rowid_ordered": true, "using_mrr": false, "index_only": true, @@ -565,20 +524,16 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "cause": "always cheaper than non roworder retrieval", "analyzing_roworder_scans": [ - { "type": "range_scan", "index": "key1", "rows": 2243, "ranges": - [ - "(100) <= (key1) <= (100)" - ], + ["(100) <= (key1) <= (100)"], "analyzing_roworder_intersect": { "intersecting_indexes": [ - { "index": "key1", "index_scan_cost": 58.252, @@ -590,7 +545,6 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "intersect_covering_with_this_index": false, "chosen": true }, - { "index": "key2", "index_scan_cost": 58.252, @@ -614,20 +568,16 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "chosen": true } }, - { "type": "range_scan", "index": "key3", "rows": 2243, "ranges": - [ - "(100) <= (key3) <= (100)" - ], + ["(100) <= (key3) <= (100)"], "analyzing_roworder_intersect": { "intersecting_indexes": [ - { "index": "key3", "index_scan_cost": 58.252, @@ -639,7 +589,6 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "intersect_covering_with_this_index": false, "chosen": true }, - { "index": "key4", "index_scan_cost": 58.252, @@ -674,14 +623,12 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) select JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) [ - { "range_access_plan": { "type": "index_roworder_union", "union_of": [ - { "type": "index_roworder_intersect", "rows": 77, @@ -690,29 +637,22 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) "clustered_pk_scan": false, "intersect_of": [ - { "type": "range_scan", "index": "key1", "rows": 2243, "ranges": - [ - "(100) <= (key1) <= (100)" - ] + ["(100) <= (key1) <= (100)"] }, - { "type": "range_scan", "index": "key2", "rows": 2243, "ranges": - [ - "(100) <= (key2) <= (100)" - ] + ["(100) <= (key2) <= (100)"] } ] }, - { "type": "index_roworder_intersect", "rows": 77, @@ -721,25 +661,19 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.chosen_range_access_summary')) "clustered_pk_scan": false, "intersect_of": [ - { "type": "range_scan", "index": "key3", "rows": 2243, "ranges": - [ - "(100) <= (key3) <= (100)" - ] + ["(100) <= (key3) <= (100)"] }, - { "type": "range_scan", "index": "key4", "rows": 2243, "ranges": - [ - "(100) <= (key4) <= (100)" - ] + ["(100) <= (key4) <= (100)"] } ] } diff --git a/mysql-test/main/opt_trace_ucs2.result b/mysql-test/main/opt_trace_ucs2.result index 306fdbf94ad..945392d0ac1 100644 --- a/mysql-test/main/opt_trace_ucs2.result +++ b/mysql-test/main/opt_trace_ucs2.result @@ -23,17 +23,13 @@ EXPLAIN select JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) from INFORMATION_SCHEMA.OPTIMIZER_TRACE; JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) [ - { "range_scan_alternatives": [ - { "index": "col1", "ranges": - [ - "(a) <= (col1)" - ], + ["(a) <= (col1)"], "rowid_ordered": false, "using_mrr": false, "index_only": false, @@ -47,8 +43,7 @@ JSON_DETAILED(JSON_EXTRACT(trace, '$**.analyzing_range_alternatives')) "cause": "too few roworder scans" }, "analyzing_index_merge_union": - [ - ] + [] } ] drop table t1; diff --git a/mysql-test/main/range_notembedded.result b/mysql-test/main/range_notembedded.result index 7084e0ca7a0..e17f6341c6e 100644 --- a/mysql-test/main/range_notembedded.result +++ b/mysql-test/main/range_notembedded.result @@ -20,7 +20,6 @@ select json_detailed(JSON_EXTRACT(trace, '$**.ranges')) from information_schema.optimizer_trace; json_detailed(JSON_EXTRACT(trace, '$**.ranges')) [ - [ "(1) <= (key1) <= (1)", "(2) <= (key1) <= (2)", diff --git a/mysql-test/suite/plugins/r/multiauth.result b/mysql-test/suite/plugins/r/multiauth.result index 75b604555a7..9651dac6740 100644 --- a/mysql-test/suite/plugins/r/multiauth.result +++ b/mysql-test/suite/plugins/r/multiauth.result @@ -110,16 +110,13 @@ json_detailed(priv) "authentication_string": "*7D8C3DF236D9163B6C274A9D47704BC496988460", "auth_or": [ - { "plugin": "ed25519", "authentication_string": "F4aF8bw7130VaRbdLCl4f/P/wkjDmgJXwWvpJ5gmsZc" }, - { "plugin": "unix_socket" }, - { } ], diff --git a/sql/item_jsonfunc.cc b/sql/item_jsonfunc.cc index 7e37fc7d096..ece35a40835 100644 --- a/sql/item_jsonfunc.cc +++ b/sql/item_jsonfunc.cc @@ -257,16 +257,22 @@ static int json_nice(json_engine_t *je, String *nice_js, Item_func_json_format::formats mode, int tab_size=4) { int depth= 0; - const char *comma, *colon; + static const char *comma= ", ", *colon= "\": "; uint comma_len, colon_len; int first_value= 1; - - DBUG_ASSERT(je->s.cs == nice_js->charset()); + int value_size = 0; + int curr_state= -1; + int64_t value_len= 0; + String curr_str{}; + + nice_js->length(0); + nice_js->set_charset(je->s.cs); + nice_js->alloc(je->s.str_end - je->s.c_str + 32); + + DBUG_ASSERT(mode != Item_func_json_format::DETAILED || (tab_size >= 0 && tab_size <= TAB_SIZE_LIMIT)); - comma= ", "; - colon= "\": "; if (mode == Item_func_json_format::LOOSE) { comma_len= 2; @@ -285,6 +291,7 @@ static int json_nice(json_engine_t *je, String *nice_js, do { + curr_state= je->state; switch (je->state) { case JST_KEY: @@ -307,7 +314,7 @@ static int json_nice(json_engine_t *je, String *nice_js, append_tab(nice_js, depth, tab_size)) goto error; - nice_js->append("\"", 1); + nice_js->append('"'); append_simple(nice_js, key_start, key_end - key_start); nice_js->append(colon, colon_len); } @@ -332,17 +339,23 @@ handle_value: if (append_simple(nice_js, je->value_begin, je->value_end - je->value_begin)) goto error; - + + curr_str.copy((const char *)je->value_begin, + je->value_end - je->value_begin, je->s.cs); + value_len= je->value_end - je->value_begin; first_value= 0; + if (value_size != -1) + value_size++; } else { if (mode == Item_func_json_format::DETAILED && - depth > 0 && + depth > 0 && !(curr_state != JST_KEY) && append_tab(nice_js, depth, tab_size)) goto error; nice_js->append((je->value_type == JSON_VALUE_OBJECT) ? "{" : "[", 1); first_value= 1; + value_size= (je->value_type == JSON_VALUE_OBJECT) ? -1: 0; depth++; } @@ -351,11 +364,27 @@ handle_value: case JST_OBJ_END: case JST_ARRAY_END: depth--; - if (mode == Item_func_json_format::DETAILED && + if (mode == Item_func_json_format::DETAILED && (value_size > 1 || value_size == -1) && append_tab(nice_js, depth, tab_size)) goto error; + + if (mode == Item_func_json_format::DETAILED && + value_size == 1 && je->state != JST_OBJ_END) + { + for (auto i = 0; i < value_len; i++) + { + nice_js->chop(); + } + for (auto i = 0; i < (depth + 1) * tab_size + 1; i++) + { + nice_js->chop(); + } + nice_js->append(curr_str); + } + nice_js->append((je->state == JST_OBJ_END) ? "}": "]", 1); first_value= 0; + value_size= -1; break; default: -- cgit v1.2.1 From af0ff8b4553f96f18b87f7a499846c510f17c49b Mon Sep 17 00:00:00 2001 From: Haidong Ji Date: Thu, 8 Dec 2022 19:34:00 +0000 Subject: MDEV-17093: SOURCE_REVISION in log and handle_fatal_signal MariaDB MDEV-12583 added `SOURCE_REVISION` variable that exposes the SHA1 of source code commit that the current running engine was built from. This info is useful for troubleshooting and debugging. This commit does the following: - addes the `SOURCE_REVISION` value into engine error log. - when a crash triggers handle_fatal_signal, the `SOURCE_REVISION` will be included in crash report. - resolves MDEV-20344: startup messages belong in stderr/error-log not stdout All new code of the whole pull request, including one or several files that are either new files or modified ones, are contributed under the BSD-new license. I am contributing on behalf of my employer Amazon Web Services, Inc. --- sql/mysqld.cc | 24 +++++++++--------------- sql/signal_handler.cc | 4 +++- 2 files changed, 12 insertions(+), 16 deletions(-) diff --git a/sql/mysqld.cc b/sql/mysqld.cc index b1cf5ab2796..75eb2230b32 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -119,6 +119,8 @@ #include +#include + #define mysqld_charset &my_charset_latin1 /* We have HAVE_valgrind below as this speeds up the shutdown of MySQL */ @@ -4445,21 +4447,6 @@ static int init_common_variables() mysql_real_data_home_len= uint(strlen(mysql_real_data_home)); - if (!opt_abort) - { - if (IS_SYSVAR_AUTOSIZE(&server_version_ptr)) - sql_print_information("%s (mysqld %s) starting as process %lu ...", - my_progname, server_version, (ulong) getpid()); - else - { - char real_server_version[SERVER_VERSION_LENGTH]; - set_server_version(real_server_version, sizeof(real_server_version)); - sql_print_information("%s (mysqld %s as %s) starting as process %lu ...", - my_progname, real_server_version, server_version, - (ulong) getpid()); - } - } - sf_leaking_memory= 0; // no memory leaks from now on #ifndef EMBEDDED_LIBRARY @@ -5258,6 +5245,13 @@ static int init_server_components() error_handler_hook= my_message_sql; proc_info_hook= set_thd_stage_info; + /* + Print source revision hash, as one of the first lines, if not the + first in error log, for troubleshooting and debugging purposes + */ + sql_print_information("Starting MariaDB %s source revision %s as process %lu", + server_version, SOURCE_REVISION, (ulong) getpid()); + #ifdef WITH_PERFSCHEMA_STORAGE_ENGINE /* Parsing the performance schema command line option may have reported diff --git a/sql/signal_handler.cc b/sql/signal_handler.cc index 915291185d5..a01950c99af 100644 --- a/sql/signal_handler.cc +++ b/sql/signal_handler.cc @@ -22,6 +22,7 @@ #include "mysqld.h" #include "sql_class.h" #include "my_stacktrace.h" +#include #ifdef __WIN__ #include @@ -177,7 +178,8 @@ extern "C" sig_handler handle_fatal_signal(int sig) "something is definitely wrong and this may fail.\n\n"); set_server_version(server_version, sizeof(server_version)); - my_safe_printf_stderr("Server version: %s\n", server_version); + my_safe_printf_stderr("Server version: %s source revision: %s\n", + server_version, SOURCE_REVISION); if (dflt_key_cache) my_safe_printf_stderr("key_buffer_size=%lu\n", -- cgit v1.2.1 From b21832ef1525e37f0dda4e17853bcca9e4e20f2b Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Wed, 26 Oct 2022 22:08:32 -0700 Subject: MDEV-27624 Wrong result for nested left join using not_exists optimization This bug affected queries with nested left joins having the same last inner table such that not_exists optimization could be applied to the most inner outer join when optimizer chose to use join buffers. The bug could lead to producing wrong a result set. If the WHERE condition a query contains a conjunctive IS NULL predicate over a non-nullable column of an inner table of a not nested outer join then not_exists optimization can be applied to tho the outer join. With this optimization when looking for matches for a certain record from the outer table of the join the records of the inner table can be ignored right after the first match satisfying the ON condition is found. In the case of nested outer joins having the same last inner table this optimization still can be applied but only if all ON conditions of the embedding outer joins are satisfied. Such check was missing in the code that tried to apply not_exists optimization when join buffers were used for outer join operations. This problem has been already fixed in the patch for bug MDEV-7992. Yet there it was resolved only for the cases when join buffers were not used for outer joins. Approved by Oleksandr Byelkin --- mysql-test/main/join_nested.result | 52 +++++++++++++++++++++++++++++++++ mysql-test/main/join_nested.test | 38 ++++++++++++++++++++++++ mysql-test/main/join_nested_jcl6.result | 52 +++++++++++++++++++++++++++++++++ sql/sql_join_cache.cc | 35 ++++++++++++++++++---- 4 files changed, 171 insertions(+), 6 deletions(-) diff --git a/mysql-test/main/join_nested.result b/mysql-test/main/join_nested.result index 5ab94a6b568..1efb2e64116 100644 --- a/mysql-test/main/join_nested.result +++ b/mysql-test/main/join_nested.result @@ -1999,3 +1999,55 @@ Note 1003 select `test`.`t3`.`pk` AS `pk`,`test`.`t3`.`c1` AS `c1`,`test`.`t3`.` DROP TABLE t1,t2,t3; set join_cache_level= @save_join_cache_level; set optimizer_switch=@save_optimizer_switch; +# +# MDEV-27624: Nested left joins with not_exists optimization +# for most inner left join +# +set @save_join_cache_level= @@join_cache_level; +CREATE TABLE t1 (a INT NOT NULL, b INT, c INT); +INSERT INTO t1 VALUES (1,1,1), (1,2,1), (1,3,1); +CREATE TABLE t2(a INT NOT NULL); +INSERT INTO t2 VALUES (1), (2); +CREATE TABLE t3(a INT not null, b INT); +INSERT INTO t3 VALUES (1, 1), (2, 1), (3, 1); +set join_cache_level = 0; +EXPLAIN SELECT * +FROM t1 +LEFT JOIN +( t2 LEFT JOIN t3 ON t2.a = t3.b ) +ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL) +WHERE t1.c = 1 AND t3.a is NULL; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where +1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using where +1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Not exists +SELECT * +FROM t1 +LEFT JOIN +( t2 LEFT JOIN t3 ON t2.a = t3.b ) +ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL) +WHERE t1.c = 1 AND t3.a is NULL; +a b c a a b +1 3 1 NULL NULL NULL +set join_cache_level = 2; +EXPLAIN SELECT * +FROM t1 +LEFT JOIN +( t2 LEFT JOIN t3 ON t2.a = t3.b ) +ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL) +WHERE t1.c = 1 AND t3.a is NULL; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where +1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) +1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Not exists; Using join buffer (incremental, BNL join) +SELECT * +FROM t1 +LEFT JOIN +( t2 LEFT JOIN t3 ON t2.a = t3.b ) +ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL) +WHERE t1.c = 1 AND t3.a is NULL; +a b c a a b +1 3 1 NULL NULL NULL +DROP TABLE t1, t2, t3; +set join_cache_level= @save_join_cache_level; +# end of 10.3 tests diff --git a/mysql-test/main/join_nested.test b/mysql-test/main/join_nested.test index eaf754fb0ca..6b2ae9a457f 100644 --- a/mysql-test/main/join_nested.test +++ b/mysql-test/main/join_nested.test @@ -1419,3 +1419,41 @@ DROP TABLE t1,t2,t3; set join_cache_level= @save_join_cache_level; set optimizer_switch=@save_optimizer_switch; + +--echo # +--echo # MDEV-27624: Nested left joins with not_exists optimization +--echo # for most inner left join +--echo # + +set @save_join_cache_level= @@join_cache_level; + +CREATE TABLE t1 (a INT NOT NULL, b INT, c INT); +INSERT INTO t1 VALUES (1,1,1), (1,2,1), (1,3,1); + +CREATE TABLE t2(a INT NOT NULL); +INSERT INTO t2 VALUES (1), (2); + +CREATE TABLE t3(a INT not null, b INT); +INSERT INTO t3 VALUES (1, 1), (2, 1), (3, 1); + +let $q= +SELECT * +FROM t1 + LEFT JOIN + ( t2 LEFT JOIN t3 ON t2.a = t3.b ) + ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL) +WHERE t1.c = 1 AND t3.a is NULL; + +set join_cache_level = 0; +eval EXPLAIN $q; +eval $q; + +set join_cache_level = 2; +eval EXPLAIN $q; +eval $q; + +DROP TABLE t1, t2, t3; + +set join_cache_level= @save_join_cache_level; + +--echo # end of 10.3 tests diff --git a/mysql-test/main/join_nested_jcl6.result b/mysql-test/main/join_nested_jcl6.result index 541cd47bf04..338705c0a29 100644 --- a/mysql-test/main/join_nested_jcl6.result +++ b/mysql-test/main/join_nested_jcl6.result @@ -2008,6 +2008,58 @@ Note 1003 select `test`.`t3`.`pk` AS `pk`,`test`.`t3`.`c1` AS `c1`,`test`.`t3`.` DROP TABLE t1,t2,t3; set join_cache_level= @save_join_cache_level; set optimizer_switch=@save_optimizer_switch; +# +# MDEV-27624: Nested left joins with not_exists optimization +# for most inner left join +# +set @save_join_cache_level= @@join_cache_level; +CREATE TABLE t1 (a INT NOT NULL, b INT, c INT); +INSERT INTO t1 VALUES (1,1,1), (1,2,1), (1,3,1); +CREATE TABLE t2(a INT NOT NULL); +INSERT INTO t2 VALUES (1), (2); +CREATE TABLE t3(a INT not null, b INT); +INSERT INTO t3 VALUES (1, 1), (2, 1), (3, 1); +set join_cache_level = 0; +EXPLAIN SELECT * +FROM t1 +LEFT JOIN +( t2 LEFT JOIN t3 ON t2.a = t3.b ) +ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL) +WHERE t1.c = 1 AND t3.a is NULL; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where +1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using where +1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Not exists +SELECT * +FROM t1 +LEFT JOIN +( t2 LEFT JOIN t3 ON t2.a = t3.b ) +ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL) +WHERE t1.c = 1 AND t3.a is NULL; +a b c a a b +1 3 1 NULL NULL NULL +set join_cache_level = 2; +EXPLAIN SELECT * +FROM t1 +LEFT JOIN +( t2 LEFT JOIN t3 ON t2.a = t3.b ) +ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL) +WHERE t1.c = 1 AND t3.a is NULL; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where +1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join) +1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Not exists; Using join buffer (incremental, BNL join) +SELECT * +FROM t1 +LEFT JOIN +( t2 LEFT JOIN t3 ON t2.a = t3.b ) +ON t2.a = 1 AND (t3.b = t1.a AND t3.a > t1.b OR t3.a is NULL) +WHERE t1.c = 1 AND t3.a is NULL; +a b c a a b +1 3 1 NULL NULL NULL +DROP TABLE t1, t2, t3; +set join_cache_level= @save_join_cache_level; +# end of 10.3 tests CREATE TABLE t5 (a int, b int, c int, PRIMARY KEY(a), KEY b_i (b)); CREATE TABLE t6 (a int, b int, c int, PRIMARY KEY(a), KEY b_i (b)); CREATE TABLE t7 (a int, b int, c int, PRIMARY KEY(a), KEY b_i (b)); diff --git a/sql/sql_join_cache.cc b/sql/sql_join_cache.cc index 86df1d32bd9..0804f4f9463 100644 --- a/sql/sql_join_cache.cc +++ b/sql/sql_join_cache.cc @@ -2287,11 +2287,7 @@ enum_nested_loop_state JOIN_CACHE::join_matching_records(bool skip_last) int error; enum_nested_loop_state rc= NESTED_LOOP_OK; join_tab->table->null_row= 0; - bool check_only_first_match= - join_tab->check_only_first_match() && - (!join_tab->first_inner || // semi-join case - join_tab->first_inner == join_tab->first_unmatched); // outer join case - bool outer_join_first_inner= join_tab->is_first_inner_for_outer_join(); + bool check_only_first_match= join_tab->check_only_first_match(); DBUG_ENTER("JOIN_CACHE::join_matching_records"); /* Return at once if there are no records in the join buffer */ @@ -2355,7 +2351,34 @@ enum_nested_loop_state JOIN_CACHE::join_matching_records(bool skip_last) Also those records that must be null complemented are not considered as candidates for matches. */ - if ((!check_only_first_match && !outer_join_first_inner) || + + bool not_exists_opt_is_applicable= true; + if (check_only_first_match && join_tab->first_inner) + { + /* + This is the case with not_exists optimization for nested outer join + when join_tab is the last inner table for one or more embedding outer + joins. To safely use 'not_exists' optimization in this case we have + to check that the match flags for all these embedding outer joins are + in the 'on' state. + (See also a similar check in evaluate_join_record() for the case when + join buffer are not used.) + */ + for (JOIN_TAB *tab= join_tab->first_inner; + tab && tab->first_inner && tab->last_inner == join_tab; + tab= tab->first_inner->first_upper) + { + if (get_match_flag_by_pos_from_join_buffer(rec_ptr, tab) != + MATCH_FOUND) + { + not_exists_opt_is_applicable= false; + break; + } + } + } + + if (!check_only_first_match || + (join_tab->first_inner && !not_exists_opt_is_applicable) || !skip_next_candidate_for_match(rec_ptr)) { read_next_candidate_for_match(rec_ptr); -- cgit v1.2.1 From e64e6768e00c6ef58824b79f2009aa5668ccd538 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Sun, 8 Jan 2023 09:26:50 +1100 Subject: MDEV-17093: SOURCE_REVISION in log (postfix - not in help) Don't display the source revision in the mysqld --help output. --- sql/mysqld.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 75eb2230b32..e4a814b82dd 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -5249,8 +5249,9 @@ static int init_server_components()  Print source revision hash, as one of the first lines, if not the  first in error log, for troubleshooting and debugging purposes  */ - sql_print_information("Starting MariaDB %s source revision %s as process %lu", - server_version, SOURCE_REVISION, (ulong) getpid()); + if (!opt_help) + sql_print_information("Starting MariaDB %s source revision %s as process %lu", + server_version, SOURCE_REVISION, (ulong) getpid()); #ifdef WITH_PERFSCHEMA_STORAGE_ENGINE /* -- cgit v1.2.1 From d7f447915c96681c85abf00d22de589a5332a6da Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Sun, 8 Jan 2023 09:53:09 +1100 Subject: MDEV-30342 Wrong "Truncated incorrect DECIMAL value" warning/error regression from MDEV-29540 / 8c389393695. INSERT SELECT errors needed to be unconditionally ignored. As this touches the CREATE .. SELECT functionality, show the equalivent test there. --- mysql-test/main/create.result | 11 +++++++++++ mysql-test/main/create.test | 13 +++++++++++++ mysql-test/main/insert_select.result | 13 +++++++++++++ mysql-test/main/insert_select.test | 17 +++++++++++++++++ sql/sql_class.h | 4 ++-- sql/sql_insert.cc | 12 ++++++------ 6 files changed, 62 insertions(+), 8 deletions(-) diff --git a/mysql-test/main/create.result b/mysql-test/main/create.result index b7ed4388db7..290f293325d 100644 --- a/mysql-test/main/create.result +++ b/mysql-test/main/create.result @@ -2121,5 +2121,16 @@ Warnings: Warning 1280 Name 'foo' ignored for PRIMARY key. DROP TABLE t1; # +# MDEV-30342 Wrong "Truncated incorrect DECIMAL value" warning/error +# +create table t1(c1 varchar(1)); +insert into t1(c1) values('#'); +select @@sql_mode like '%strict_all_tables%'; +@@sql_mode like '%strict_all_tables%' +0 +create table t2 as select if(c1 = '#', c1 = 0, c1) as c1 from t1; +ERROR 22007: Truncated incorrect DECIMAL value: '#' +drop table t1; +# # End of 10.3 tests # diff --git a/mysql-test/main/create.test b/mysql-test/main/create.test index f1dfba65b4b..83c894dd8dc 100644 --- a/mysql-test/main/create.test +++ b/mysql-test/main/create.test @@ -1978,6 +1978,19 @@ create table t1 (c int(10) unsigned) engine=memory transactional=0; CREATE TABLE t1 ( id1 INT, id2 INT, CONSTRAINT `foo` PRIMARY KEY (id1), CONSTRAINT `bar` UNIQUE KEY(id2)); DROP TABLE t1; +--echo # +--echo # MDEV-30342 Wrong "Truncated incorrect DECIMAL value" warning/error +--echo # + +create table t1(c1 varchar(1)); +insert into t1(c1) values('#'); + +select @@sql_mode like '%strict_all_tables%'; +--error ER_TRUNCATED_WRONG_VALUE +create table t2 as select if(c1 = '#', c1 = 0, c1) as c1 from t1; + +drop table t1; + --echo # --echo # End of 10.3 tests --echo # diff --git a/mysql-test/main/insert_select.result b/mysql-test/main/insert_select.result index 6d939d0ed8e..29618c6ddd4 100644 --- a/mysql-test/main/insert_select.result +++ b/mysql-test/main/insert_select.result @@ -1042,4 +1042,17 @@ select * from t1; a deallocate prepare stmt; drop table t1,t2,t3; +# +# MDEV-30342 Wrong "Truncated incorrect DECIMAL value" warning/error +# +create table t1(c1 varchar(1)); +create table t2(c1 varchar(1)); +insert into t1(c1) values('#'); +select @@sql_mode like '%strict_all_tables%'; +@@sql_mode like '%strict_all_tables%' +0 +insert into t2(c1) select if(c1 = '#', c1 = 0, c1) as c1 from t1; +drop table t1, t2; +# # End of 10.3 test +# diff --git a/mysql-test/main/insert_select.test b/mysql-test/main/insert_select.test index 6baa7e43c34..a3604e38f34 100644 --- a/mysql-test/main/insert_select.test +++ b/mysql-test/main/insert_select.test @@ -595,4 +595,21 @@ deallocate prepare stmt; drop table t1,t2,t3; + +--echo # +--echo # MDEV-30342 Wrong "Truncated incorrect DECIMAL value" warning/error +--echo # + +create table t1(c1 varchar(1)); +create table t2(c1 varchar(1)); + +insert into t1(c1) values('#'); + +select @@sql_mode like '%strict_all_tables%'; +insert into t2(c1) select if(c1 = '#', c1 = 0, c1) as c1 from t1; + +drop table t1, t2; + +--echo # --echo # End of 10.3 test +--echo # diff --git a/sql/sql_class.h b/sql/sql_class.h index 5e209f56458..49a8509b519 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -5455,7 +5455,7 @@ class select_insert :public select_result_interceptor { int prepare(List &list, SELECT_LEX_UNIT *u); virtual int prepare2(JOIN *join); virtual int send_data(List &items); - virtual bool store_values(List &values, bool ignore_errors); + virtual bool store_values(List &values); virtual bool can_rollback_data() { return 0; } bool prepare_eof(); bool send_ok_packet(); @@ -5497,7 +5497,7 @@ public: int prepare(List &list, SELECT_LEX_UNIT *u); int binlog_show_create_table(TABLE **tables, uint count); - bool store_values(List &values, bool ignore_errors); + bool store_values(List &values); bool send_eof(); virtual void abort_result_set(); virtual bool can_rollback_data() { return 1; } diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 43007b2243a..b743fc88061 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -3928,7 +3928,7 @@ int select_insert::send_data(List &values) DBUG_RETURN(0); thd->count_cuted_fields= CHECK_FIELD_WARN; // Calculate cuted fields - if (store_values(values, info.ignore)) + if (store_values(values)) DBUG_RETURN(1); thd->count_cuted_fields= CHECK_FIELD_ERROR_FOR_NULL; if (unlikely(thd->is_error())) @@ -3986,17 +3986,17 @@ int select_insert::send_data(List &values) } -bool select_insert::store_values(List &values, bool ignore_errors) +bool select_insert::store_values(List &values) { DBUG_ENTER("select_insert::store_values"); bool error; if (fields->elements) error= fill_record_n_invoke_before_triggers(thd, table, *fields, values, - ignore_errors, TRG_EVENT_INSERT); + true, TRG_EVENT_INSERT); else error= fill_record_n_invoke_before_triggers(thd, table, table->field_to_fill(), - values, ignore_errors, TRG_EVENT_INSERT); + values, true, TRG_EVENT_INSERT); DBUG_RETURN(error); } @@ -4669,10 +4669,10 @@ select_create::binlog_show_create_table(TABLE **tables, uint count) return result; } -bool select_create::store_values(List &values, bool ignore_errors) +bool select_create::store_values(List &values) { return fill_record_n_invoke_before_triggers(thd, table, field, values, - ignore_errors, TRG_EVENT_INSERT); + true, TRG_EVENT_INSERT); } -- cgit v1.2.1 From 3d95737e5596e3f719bbdfb68669e2870c5dca3f Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 23 Dec 2022 23:58:56 +0100 Subject: galera.galera_wsrep_new_cluster: fix bad merge --- .../suite/galera/r/galera_wsrep_new_cluster.result | 8 ------- .../suite/galera/t/galera_wsrep_new_cluster.test | 28 ---------------------- 2 files changed, 36 deletions(-) diff --git a/mysql-test/suite/galera/r/galera_wsrep_new_cluster.result b/mysql-test/suite/galera/r/galera_wsrep_new_cluster.result index 87f61e2be62..143dee3e6e4 100644 --- a/mysql-test/suite/galera/r/galera_wsrep_new_cluster.result +++ b/mysql-test/suite/galera/r/galera_wsrep_new_cluster.result @@ -9,14 +9,6 @@ connection node_2; Cleaning grastate.dat file ... Starting server ... connection node_1; -connection node_2; -connection node_2; -Shutting down server ... -connection node_1; -connection node_2; -Cleaning grastate.dat file ... -Starting server ... -connection node_1; SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status'; VARIABLE_VALUE Primary diff --git a/mysql-test/suite/galera/t/galera_wsrep_new_cluster.test b/mysql-test/suite/galera/t/galera_wsrep_new_cluster.test index 3fff51fa26d..94ea008cb16 100644 --- a/mysql-test/suite/galera/t/galera_wsrep_new_cluster.test +++ b/mysql-test/suite/galera/t/galera_wsrep_new_cluster.test @@ -34,34 +34,6 @@ --source include/start_mysqld.inc --source include/wait_until_ready.inc -# Save original auto_increment_offset values. ---let $node_1=node_1 ---let $node_2=node_2 ---source include/auto_increment_offset_save.inc - ---connection node_2 ---echo Shutting down server ... ---source include/shutdown_mysqld.inc - ---connection node_1 - ---let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size' ---source include/wait_condition.inc - ---connection node_2 - -# -# Delete grastate.dat with safe_to_bootstrap: 0 -# ---echo Cleaning grastate.dat file ... ---remove_file $MYSQLTEST_VARDIR/mysqld.2/data/grastate.dat - ---echo Starting server ... ---let $restart_noprint=2 ---let $start_mysqld_params="--wsrep-new-cluster" ---source include/start_mysqld.inc ---source include/wait_until_ready.inc - --connection node_1 --let $wait_condition = SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status'; -- cgit v1.2.1 From ad27e95d542dfb2ad3a31e4fdf1063cc0685d1f2 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sat, 24 Dec 2022 13:45:06 +0100 Subject: disable hanging galera test --- mysql-test/suite/galera/disabled.def | 1 + 1 file changed, 1 insertion(+) diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index bff8e472539..ce0e8d986a4 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -16,5 +16,6 @@ galera_sst_encrypted : MDEV-29876 Galera test failure on galera_sst_encrypted galera.MW-284 : MDEV-29861 Galera test case hangs galera.galera_binlog_checksum : MDEV-29861 Galera test case hangs galera_var_notify_ssl_ipv6 : MDEV-29861 Galera test case hangs +galera_var_notify_cmd: MDEV-29861 Galera test case hangs galera_var_node_address : MDEV-20485 Galera test failure MDEV-26575 : MDEV-29878 Galera test failure on MDEV-26575 -- cgit v1.2.1 From 610cea3ddae5aa7756a3ec1409466c9e0c5fd5b3 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 17 Nov 2022 17:30:59 +0100 Subject: cleanup Helper class to swicth to relaxed checks during field copy. Temporarily. --- mysql-test/main/olap.result | 1 - mysql-test/main/olap.test | 4 ---- mysql-test/main/type_date.result | 1 - mysql-test/main/type_date.test | 4 ---- sql/item.cc | 6 +----- sql/sql_class.h | 13 +++++++++++++ sql/sql_select.h | 6 +----- 7 files changed, 15 insertions(+), 20 deletions(-) diff --git a/mysql-test/main/olap.result b/mysql-test/main/olap.result index 93eda747d83..cc261c92603 100644 --- a/mysql-test/main/olap.result +++ b/mysql-test/main/olap.result @@ -1,4 +1,3 @@ -drop table if exists t1,t2; set @sav_dpi= @@div_precision_increment; set div_precision_increment= 5; show variables like 'div_precision_increment'; diff --git a/mysql-test/main/olap.test b/mysql-test/main/olap.test index 0c990300b68..6f4048bf120 100644 --- a/mysql-test/main/olap.test +++ b/mysql-test/main/olap.test @@ -1,7 +1,3 @@ ---disable_warnings -drop table if exists t1,t2; ---enable_warnings - set @sav_dpi= @@div_precision_increment; set div_precision_increment= 5; show variables like 'div_precision_increment'; diff --git a/mysql-test/main/type_date.result b/mysql-test/main/type_date.result index e22b15c88cf..0cf78458817 100644 --- a/mysql-test/main/type_date.result +++ b/mysql-test/main/type_date.result @@ -1,4 +1,3 @@ -drop table if exists t1,t2; create table t1 (a char(16), b date, c datetime); insert into t1 SET a='test 2000-01-01', b='2000-01-01', c='2000-01-01'; select * from t1 where c = '2000-01-01'; diff --git a/mysql-test/main/type_date.test b/mysql-test/main/type_date.test index 27d03bd888c..74997aa0ef7 100644 --- a/mysql-test/main/type_date.test +++ b/mysql-test/main/type_date.test @@ -1,10 +1,6 @@ # # test of problem with date fields # ---disable_warnings -drop table if exists t1,t2; ---enable_warnings - create table t1 (a char(16), b date, c datetime); insert into t1 SET a='test 2000-01-01', b='2000-01-01', c='2000-01-01'; select * from t1 where c = '2000-01-01'; diff --git a/sql/item.cc b/sql/item.cc index 630a408b13f..8af83f3a7f1 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -1472,12 +1472,8 @@ int Item::save_in_field_no_warnings(Field *field, bool no_conversions) { int res; TABLE *table= field->table; - THD *thd= table->in_use; - Check_level_instant_set check_level_save(thd, CHECK_FIELD_IGNORE); - Sql_mode_save sql_mode(thd); - thd->variables.sql_mode&= ~(MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE); - thd->variables.sql_mode|= MODE_INVALID_DATES; MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); + Use_relaxed_field_copy urfc(table->in_use); res= save_in_field(field, no_conversions); dbug_tmp_restore_column_map(&table->write_set, old_map); return res; diff --git a/sql/sql_class.h b/sql/sql_class.h index e82c786c96f..3abd9b9308d 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -6881,6 +6881,19 @@ public: }; +class Use_relaxed_field_copy: public Sql_mode_save, + public Check_level_instant_set +{ +public: + Use_relaxed_field_copy(THD *thd) : + Sql_mode_save(thd), Check_level_instant_set(thd, CHECK_FIELD_IGNORE) + { + thd->variables.sql_mode&= ~(MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE); + thd->variables.sql_mode|= MODE_INVALID_DATES; + } +}; + + /** This class resembles the SQL Standard schema qualified object name: ::= [ ] diff --git a/sql/sql_select.h b/sql/sql_select.h index b3945c499f8..d71fc034a9d 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -1914,11 +1914,7 @@ public: enum store_key_result copy() { enum store_key_result result; - THD *thd= to_field->table->in_use; - Check_level_instant_set check_level_save(thd, CHECK_FIELD_IGNORE); - Sql_mode_save sql_mode(thd); - thd->variables.sql_mode&= ~(MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE); - thd->variables.sql_mode|= MODE_INVALID_DATES; + Use_relaxed_field_copy urfc(to_field->table->in_use); result= copy_inner(); return result; } -- cgit v1.2.1 From 32f09df2b86ed4d2b9b63b26b5c95f8555bc1db3 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 17 Nov 2022 17:33:34 +0100 Subject: MDEV-29890 Update with inner join false row count result when creating a temp table field from an actual table field, these two fields are supposed to be mostly identical (except for BIT field storage), in particular, temp field should have the same default as the orig field, even if the sql_mode has been changed meanwhile (e.g. to include NO_ZERO_DATE) --- mysql-test/main/type_date.result | 18 ++++++++++++++++++ mysql-test/main/type_date.test | 17 +++++++++++++++++ sql/sql_select.cc | 4 +++- 3 files changed, 38 insertions(+), 1 deletion(-) diff --git a/mysql-test/main/type_date.result b/mysql-test/main/type_date.result index 0cf78458817..d1ea71dae68 100644 --- a/mysql-test/main/type_date.result +++ b/mysql-test/main/type_date.result @@ -1144,5 +1144,23 @@ t2 CREATE TABLE `t2` ( DROP TABLE t2; DROP TABLE t1; # +# MDEV-29890 Update with inner join false row count result +# +set sql_mode='NO_ZERO_DATE'; +create table t1 (a1 bigint primary key, a2 date not null, a3 bigint not null); +create table t2 (b1 bigint primary key); +insert into t2 (b1) values (1); +insert into t1 (a1, a2, a3) values (1, current_date, 1),( 2, current_date, 1); +update t1 inner join t2 on t1.a3 = t2.b1 set t1.a2 = t1.a2 + interval 1 day; +select row_count(); +row_count() +2 +set sql_mode=''; +alter table t1 modify a2 date not null default '0000-00-00'; +set sql_mode='NO_ZERO_DATE'; +update t1 inner join t2 on t1.a3 = t2.b1 set t1.a2 = t1.a2 + interval 1 day; +drop table t1, t2; +set sql_mode=default; +# # End of 10.4 tests # diff --git a/mysql-test/main/type_date.test b/mysql-test/main/type_date.test index 74997aa0ef7..cbc3c67bf64 100644 --- a/mysql-test/main/type_date.test +++ b/mysql-test/main/type_date.test @@ -784,6 +784,23 @@ SHOW CREATE TABLE t2; DROP TABLE t2; DROP TABLE t1; +--echo # +--echo # MDEV-29890 Update with inner join false row count result +--echo # +set sql_mode='NO_ZERO_DATE'; +create table t1 (a1 bigint primary key, a2 date not null, a3 bigint not null); +create table t2 (b1 bigint primary key); +insert into t2 (b1) values (1); +insert into t1 (a1, a2, a3) values (1, current_date, 1),( 2, current_date, 1); +update t1 inner join t2 on t1.a3 = t2.b1 set t1.a2 = t1.a2 + interval 1 day; +select row_count(); +set sql_mode=''; +alter table t1 modify a2 date not null default '0000-00-00'; +set sql_mode='NO_ZERO_DATE'; +update t1 inner join t2 on t1.a3 = t2.b1 set t1.a2 = t1.a2 + interval 1 day; +drop table t1, t2; +set sql_mode=default; + --echo # --echo # End of 10.4 tests --echo # diff --git a/sql/sql_select.cc b/sql/sql_select.cc index f313d571b5b..d7b2891c9d4 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -19035,8 +19035,10 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List &fields, { /* Copy default value. We have to use field_conv() for copy, instead of - memcpy(), because bit_fields may be stored differently + memcpy(), because bit_fields may be stored differently. + But otherwise we copy as is, in particular, ignore NO_ZERO_DATE, etc */ + Use_relaxed_field_copy urfc(thd); my_ptrdiff_t ptr_diff= (orig_field->table->s->default_values - orig_field->table->record[0]); field->set_notnull(); -- cgit v1.2.1 From 1e6ad0ce13c5d2e4125df8006316d9dfe0f8d649 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 9 Jan 2023 12:49:37 +0100 Subject: don't set default value in temp table if NO_DEFAULT_VALUE_FLAG when an internal temporary table field is created from a real field, a new temp field should only copy a default from the source field when the latter has it --- mysql-test/main/default.result | 21 +++++++++++++++++++++ mysql-test/main/default.test | 14 ++++++++++++++ mysql-test/main/olap.result | 10 +++++----- sql/sql_select.cc | 3 ++- 4 files changed, 42 insertions(+), 6 deletions(-) diff --git a/mysql-test/main/default.result b/mysql-test/main/default.result index 100a2fe6a6f..ae835940af3 100644 --- a/mysql-test/main/default.result +++ b/mysql-test/main/default.result @@ -3463,5 +3463,26 @@ SELECT 1 FROM t1 GROUP BY DEFAULT(pk); 1 DROP TABLE t1; # +# MDEV-29890 Update with inner join false row count result +# +create table t1 (a int not null); +select column_name, column_default, column_default is not null as 'has_default', is_nullable from information_schema.columns where table_schema='test' and table_name='t1'; +column_name column_default has_default is_nullable +a NULL 0 NO +create or replace view v1 as select * from t1; +select column_name, column_default, column_default is not null as 'has_default', is_nullable from information_schema.columns where table_schema='test' and table_name='v1'; +column_name column_default has_default is_nullable +a NULL 0 NO +create or replace view v1 as select * from t1 group by a; +select column_name, column_default, column_default is not null as 'has_default', is_nullable from information_schema.columns where table_schema='test' and table_name='v1'; +column_name column_default has_default is_nullable +a NULL 0 NO +create or replace view v1 as select * from t1 group by a with rollup; +select column_name, column_default, column_default is not null as 'has_default', is_nullable from information_schema.columns where table_schema='test' and table_name='v1'; +column_name column_default has_default is_nullable +a NULL 1 YES +drop view v1; +drop table t1; +# # End of 10.4 test # diff --git a/mysql-test/main/default.test b/mysql-test/main/default.test index 3064209a4a2..13f611246c9 100644 --- a/mysql-test/main/default.test +++ b/mysql-test/main/default.test @@ -2169,6 +2169,20 @@ INSERT INTO t1 VALUES (),(); SELECT 1 FROM t1 GROUP BY DEFAULT(pk); DROP TABLE t1; +--echo # +--echo # MDEV-29890 Update with inner join false row count result +--echo # +create table t1 (a int not null); +select column_name, column_default, column_default is not null as 'has_default', is_nullable from information_schema.columns where table_schema='test' and table_name='t1'; +create or replace view v1 as select * from t1; +select column_name, column_default, column_default is not null as 'has_default', is_nullable from information_schema.columns where table_schema='test' and table_name='v1'; +create or replace view v1 as select * from t1 group by a; +select column_name, column_default, column_default is not null as 'has_default', is_nullable from information_schema.columns where table_schema='test' and table_name='v1'; +create or replace view v1 as select * from t1 group by a with rollup; +select column_name, column_default, column_default is not null as 'has_default', is_nullable from information_schema.columns where table_schema='test' and table_name='v1'; +drop view v1; +drop table t1; + --echo # --echo # End of 10.4 test --echo # diff --git a/mysql-test/main/olap.result b/mysql-test/main/olap.result index cc261c92603..b837aeaf1db 100644 --- a/mysql-test/main/olap.result +++ b/mysql-test/main/olap.result @@ -695,7 +695,7 @@ CREATE VIEW v1 AS SELECT a, LENGTH(a), COUNT(*) FROM t1 GROUP BY a WITH ROLLUP; DESC v1; Field Type Null Key Default Extra -a int(11) YES 0 +a int(11) YES NULL LENGTH(a) int(10) YES NULL COUNT(*) bigint(21) NO 0 SELECT * FROM v1; @@ -858,7 +858,7 @@ INSERT INTO t1 VALUES (1),(2); CREATE OR REPLACE VIEW v1 AS SELECT a, LENGTH(a), COUNT(*) FROM t1 GROUP BY a WITH ROLLUP; DESCRIBE v1; Field Type Null Key Default Extra -a int(11) YES 0 +a int(11) YES NULL LENGTH(a) int(10) YES NULL COUNT(*) bigint(21) NO 0 DROP VIEW v1; @@ -868,7 +868,7 @@ INSERT INTO t1 VALUES (1),(2); CREATE OR REPLACE VIEW v1 AS SELECT a, LENGTH(a), COUNT(*) FROM t1 GROUP BY a WITH ROLLUP; DESCRIBE v1; Field Type Null Key Default Extra -a bigint(20) YES 0 +a bigint(20) YES NULL LENGTH(a) int(10) YES NULL COUNT(*) bigint(21) NO 0 DROP VIEW v1; @@ -893,8 +893,8 @@ DROP VIEW v1; CREATE VIEW v1 AS SELECT a, b FROM t1 GROUP BY a,b WITH ROLLUP; DESC v1; Field Type Null Key Default Extra -a int(11) YES 0 -b int(20) YES 0 +a int(11) YES NULL +b int(20) YES NULL DROP VIEW v1; DROP TABLE t1; # diff --git a/sql/sql_select.cc b/sql/sql_select.cc index d7b2891c9d4..d5519e9f541 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -18241,7 +18241,8 @@ Field *Item_field::create_tmp_field_ex(TABLE *table, src->set_field(field); if (!(result= create_tmp_field_from_item_field(table, NULL, param))) return NULL; - if (field->eq_def(result)) + if (!(field->flags & NO_DEFAULT_VALUE_FLAG) && + field->eq_def(result)) src->set_default_field(field); return result; } -- cgit v1.2.1 From df82d68421699f5d51df15f0e5c1a6aa78bced63 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 17 Nov 2022 20:03:33 +0100 Subject: cleanup remove casts. uchar can store 1/0 as good as bool. --- sql/item.cc | 6 +++--- sql/sql_select.cc | 16 +++++++--------- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/sql/item.cc b/sql/item.cc index 8af83f3a7f1..a543664fdc1 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -7225,7 +7225,7 @@ bool Item_null::send(Protocol *protocol, st_value *buffer) bool Item::cache_const_expr_analyzer(uchar **arg) { - bool *cache_flag= (bool*)*arg; + uchar *cache_flag= *arg; if (!*cache_flag) { Item *item= real_item(); @@ -7264,9 +7264,9 @@ bool Item::cache_const_expr_analyzer(uchar **arg) Item* Item::cache_const_expr_transformer(THD *thd, uchar *arg) { - if (*(bool*)arg) + if (*arg) { - *((bool*)arg)= FALSE; + *arg= FALSE; Item_cache *cache= get_cache(thd); if (!cache) return NULL; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index d5519e9f541..d9922fddb8f 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -28287,20 +28287,20 @@ JOIN::reoptimize(Item *added_where, table_map join_tables, void JOIN::cache_const_exprs() { - bool cache_flag= FALSE; - bool *analyzer_arg= &cache_flag; + uchar cache_flag= FALSE; + uchar *analyzer_arg= &cache_flag; /* No need in cache if all tables are constant. */ if (const_tables == table_count) return; if (conds) - conds->compile(thd, &Item::cache_const_expr_analyzer, (uchar **)&analyzer_arg, - &Item::cache_const_expr_transformer, (uchar *)&cache_flag); + conds->compile(thd, &Item::cache_const_expr_analyzer, &analyzer_arg, + &Item::cache_const_expr_transformer, &cache_flag); cache_flag= FALSE; if (having) - having->compile(thd, &Item::cache_const_expr_analyzer, (uchar **)&analyzer_arg, - &Item::cache_const_expr_transformer, (uchar *)&cache_flag); + having->compile(thd, &Item::cache_const_expr_analyzer, + &analyzer_arg, &Item::cache_const_expr_transformer, &cache_flag); for (JOIN_TAB *tab= first_depth_first_tab(this); tab; tab= next_depth_first_tab(this, tab)) @@ -28309,9 +28309,7 @@ void JOIN::cache_const_exprs() { cache_flag= FALSE; (*tab->on_expr_ref)->compile(thd, &Item::cache_const_expr_analyzer, - (uchar **)&analyzer_arg, - &Item::cache_const_expr_transformer, - (uchar *)&cache_flag); + &analyzer_arg, &Item::cache_const_expr_transformer, &cache_flag); } } } -- cgit v1.2.1 From 6cb84346e1bde63ec79dd5e3a7d80f69bb106ead Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 17 Nov 2022 19:23:08 +0100 Subject: MDEV-17869 AddressSanitizer: use-after-poison in Item_change_list::rollback_item_tree_changes it's incorrect to use change_item_tree() to replace arguments of top-level AND/OR, because they (arguments) are stored in a List, so a pointer to an argument is in the list_node, and individual list_node's of top-level AND/OR can be deleted in Item_cond::build_equal_items(). In that case rollback_item_tree_changes() will modify the deleted object. Luckily, it's not needed to use change_item_tree() for top-level AND/OR, because the whole top-level item is copied and preserved in prep_where and prep_on, and restored from there. So, just don't. Additionally to the test case in the commit it fixes * ASAN failure of main.opt_tvc --ps * ASAN failure of main.having_cond_pushdown --ps --- mysql-test/main/prepare.result | 16 ++++++++++++++++ mysql-test/main/prepare.test | 17 +++++++++++++++++ sql/item.h | 10 ++++++++++ sql/item_cmpfunc.cc | 20 ++++++++++++++------ sql/item_cmpfunc.h | 22 ++++++++++++++++++++-- sql/sql_lex.cc | 5 ++--- sql/sql_select.cc | 7 +++---- sql/sql_tvc.cc | 13 ++++--------- 8 files changed, 86 insertions(+), 24 deletions(-) diff --git a/mysql-test/main/prepare.result b/mysql-test/main/prepare.result index cfe6603dbbe..7c730bff0c5 100644 --- a/mysql-test/main/prepare.result +++ b/mysql-test/main/prepare.result @@ -64,3 +64,19 @@ SQRT(?) is not null # # End of 10.3 tests # +# +# MDEV-17869 AddressSanitizer: use-after-poison in Item_change_list::rollback_item_tree_changes +# +create table t1 (pk int, v1 varchar(1)); +insert t1 values (1,'v'),(2,'v'),(3,'c'); +create table t2 (pk int, v1 varchar(1)); +insert t2 values (1,'x'); +create table t3 (pk int, i1 int, v1 varchar(1)); +insert t3 values (10,8,9); +execute immediate 'select straight_join 1 from (t1 join t2 on (t1.v1 = t2.v1)) +where (3, 6) in (select tc.pk, t3.i1 from (t3 join t1 as tc on (tc.v1 = t3.v1)) having tc.pk > 1 );'; +1 +drop table t1, t2, t3; +# +# End of 10.4 tests +# diff --git a/mysql-test/main/prepare.test b/mysql-test/main/prepare.test index 4d1573eb0c8..bf37f6dc8d1 100644 --- a/mysql-test/main/prepare.test +++ b/mysql-test/main/prepare.test @@ -52,3 +52,20 @@ execute p1 using 17864960750176564435; --echo # --echo # End of 10.3 tests --echo # + +--echo # +--echo # MDEV-17869 AddressSanitizer: use-after-poison in Item_change_list::rollback_item_tree_changes +--echo # +create table t1 (pk int, v1 varchar(1)); +insert t1 values (1,'v'),(2,'v'),(3,'c'); +create table t2 (pk int, v1 varchar(1)); +insert t2 values (1,'x'); +create table t3 (pk int, i1 int, v1 varchar(1)); +insert t3 values (10,8,9); +execute immediate 'select straight_join 1 from (t1 join t2 on (t1.v1 = t2.v1)) +where (3, 6) in (select tc.pk, t3.i1 from (t3 join t1 as tc on (tc.v1 = t3.v1)) having tc.pk > 1 );'; +drop table t1, t2, t3; + +--echo # +--echo # End of 10.4 tests +--echo # diff --git a/sql/item.h b/sql/item.h index 9389250d6ec..89d47f0026b 100644 --- a/sql/item.h +++ b/sql/item.h @@ -1868,6 +1868,11 @@ public: } virtual Item* transform(THD *thd, Item_transformer transformer, uchar *arg); + virtual Item* top_level_transform(THD *thd, Item_transformer transformer, + uchar *arg) + { + return transform(thd, transformer, arg); + } /* This function performs a generic "compilation" of the Item tree. @@ -1892,6 +1897,11 @@ public: return ((this->*transformer) (thd, arg_t)); return 0; } + virtual Item* top_level_compile(THD *thd, Item_analyzer analyzer, uchar **arg_p, + Item_transformer transformer, uchar *arg_t) + { + return compile(thd, analyzer, arg_p, transformer, arg_t); + } virtual void traverse_cond(Cond_traverser traverser, void *arg, traverse_order order) diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index b6509fa8b25..278e11f4002 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -5032,7 +5032,8 @@ bool Item_cond::walk(Item_processor processor, bool walk_subquery, void *arg) Item returned as the result of transformation of the root node */ -Item *Item_cond::transform(THD *thd, Item_transformer transformer, uchar *arg) +Item *Item_cond::do_transform(THD *thd, Item_transformer transformer, uchar *arg, + bool toplevel) { DBUG_ASSERT(!thd->stmt_arena->is_stmt_prepare()); @@ -5040,7 +5041,8 @@ Item *Item_cond::transform(THD *thd, Item_transformer transformer, uchar *arg) Item *item; while ((item= li++)) { - Item *new_item= item->transform(thd, transformer, arg); + Item *new_item= toplevel ? item->top_level_transform(thd, transformer, arg) + : item->transform(thd, transformer, arg); if (!new_item) return 0; @@ -5050,7 +5052,9 @@ Item *Item_cond::transform(THD *thd, Item_transformer transformer, uchar *arg) Otherwise we'll be allocating a lot of unnecessary memory for change records at each execution. */ - if (new_item != item) + if (toplevel) + *li.ref()= new_item; + else if (new_item != item) thd->change_item_tree(li.ref(), new_item); } return Item_func::transform(thd, transformer, arg); @@ -5081,8 +5085,8 @@ Item *Item_cond::transform(THD *thd, Item_transformer transformer, uchar *arg) Item returned as the result of transformation of the root node */ -Item *Item_cond::compile(THD *thd, Item_analyzer analyzer, uchar **arg_p, - Item_transformer transformer, uchar *arg_t) +Item *Item_cond::do_compile(THD *thd, Item_analyzer analyzer, uchar **arg_p, + Item_transformer transformer, uchar *arg_t, bool toplevel) { if (!(this->*analyzer)(arg_p)) return 0; @@ -5097,7 +5101,11 @@ Item *Item_cond::compile(THD *thd, Item_analyzer analyzer, uchar **arg_p, */ uchar *arg_v= *arg_p; Item *new_item= item->compile(thd, analyzer, &arg_v, transformer, arg_t); - if (new_item && new_item != item) + if (!new_item || new_item == item) + continue; + if (toplevel) + *li.ref()= new_item; + else thd->change_item_tree(li.ref(), new_item); } return Item_func::transform(thd, transformer, arg_t); diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 05b1f95ff36..01834fe06d7 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -3018,12 +3018,30 @@ public: bool top_level() { return abort_on_null; } void copy_andor_arguments(THD *thd, Item_cond *item); bool walk(Item_processor processor, bool walk_subquery, void *arg); - Item *transform(THD *thd, Item_transformer transformer, uchar *arg); + Item *do_transform(THD *thd, Item_transformer transformer, uchar *arg, bool toplevel); + Item *transform(THD *thd, Item_transformer transformer, uchar *arg) + { + return do_transform(thd, transformer, arg, 0); + } + Item *top_level_transform(THD *thd, Item_transformer transformer, uchar *arg) + { + return do_transform(thd, transformer, arg, 1); + } void traverse_cond(Cond_traverser, void *arg, traverse_order order); void neg_arguments(THD *thd); Item* propagate_equal_fields(THD *, const Context &, COND_EQUAL *); + Item *do_compile(THD *thd, Item_analyzer analyzer, uchar **arg_p, + Item_transformer transformer, uchar *arg_t, bool toplevel); Item *compile(THD *thd, Item_analyzer analyzer, uchar **arg_p, - Item_transformer transformer, uchar *arg_t); + Item_transformer transformer, uchar *arg_t) + { + return do_compile(thd, analyzer, arg_p, transformer, arg_t, 0); + } + Item* top_level_compile(THD *thd, Item_analyzer analyzer, uchar **arg_p, + Item_transformer transformer, uchar *arg_t) + { + return do_compile(thd, analyzer, arg_p, transformer, arg_t, 1); + } bool eval_not_null_tables(void *opt_arg); Item *build_clone(THD *thd); bool excl_dep_on_table(table_map tab_map); diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index f31a128832b..ac570be78aa 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -10042,9 +10042,8 @@ st_select_lex::build_pushable_cond_for_having_pushdown(THD *thd, Item *cond) */ if (cond->get_extraction_flag() == FULL_EXTRACTION_FL) { - Item *result= cond->transform(thd, - &Item::multiple_equality_transformer, - (uchar *)this); + Item *result= cond->top_level_transform(thd, + &Item::multiple_equality_transformer, (uchar *)this); if (!result) return true; if (result->type() == Item::COND_ITEM && diff --git a/sql/sql_select.cc b/sql/sql_select.cc index d9922fddb8f..eb54484fa51 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -28295,11 +28295,11 @@ void JOIN::cache_const_exprs() return; if (conds) - conds->compile(thd, &Item::cache_const_expr_analyzer, &analyzer_arg, + conds->top_level_compile(thd, &Item::cache_const_expr_analyzer, &analyzer_arg, &Item::cache_const_expr_transformer, &cache_flag); cache_flag= FALSE; if (having) - having->compile(thd, &Item::cache_const_expr_analyzer, + having->top_level_compile(thd, &Item::cache_const_expr_analyzer, &analyzer_arg, &Item::cache_const_expr_transformer, &cache_flag); for (JOIN_TAB *tab= first_depth_first_tab(this); tab; @@ -28308,7 +28308,7 @@ void JOIN::cache_const_exprs() if (*tab->on_expr_ref) { cache_flag= FALSE; - (*tab->on_expr_ref)->compile(thd, &Item::cache_const_expr_analyzer, + (*tab->on_expr_ref)->top_level_compile(thd, &Item::cache_const_expr_analyzer, &analyzer_arg, &Item::cache_const_expr_transformer, &cache_flag); } } @@ -29365,7 +29365,6 @@ select_handler *SELECT_LEX::find_select_handler(THD *thd) } - /** @} (end of group Query_Optimizer) */ diff --git a/sql/sql_tvc.cc b/sql/sql_tvc.cc index a25f6522bd9..d3d20ef7a9a 100644 --- a/sql/sql_tvc.cc +++ b/sql/sql_tvc.cc @@ -1121,12 +1121,10 @@ bool JOIN::transform_in_predicates_into_in_subq(THD *thd) { select_lex->parsing_place= IN_WHERE; conds= - conds->transform(thd, - &Item::in_predicate_to_in_subs_transformer, - (uchar*) 0); + conds->top_level_transform(thd, + &Item::in_predicate_to_in_subs_transformer, 0); if (!conds) DBUG_RETURN(true); - select_lex->prep_where= conds ? conds->copy_andor_structure(thd) : 0; select_lex->where= conds; } @@ -1141,13 +1139,10 @@ bool JOIN::transform_in_predicates_into_in_subq(THD *thd) if (table->on_expr) { table->on_expr= - table->on_expr->transform(thd, - &Item::in_predicate_to_in_subs_transformer, - (uchar*) 0); + table->on_expr->top_level_transform(thd, + &Item::in_predicate_to_in_subs_transformer, 0); if (!table->on_expr) DBUG_RETURN(true); - table->prep_on_expr= table->on_expr ? - table->on_expr->copy_andor_structure(thd) : 0; } } } -- cgit v1.2.1 From 56948ee54c9d113f07f725ebdc560d1919fc6676 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Tue, 10 Jan 2023 10:45:03 +1100 Subject: clang15 warnings - unused vars and old prototypes clang15 finally errors on old prototype definations. Its also a lot fussier about variables that aren't used as is the case a number of time with loop counters that aren't examined. RocksDB was complaining that its get_range function was declared without the array length in ha_rocksdb.h. While a constant is used rather than trying to import the Rdb_key_def::INDEX_NUMBER_SIZE header (was causing a lot of errors on the defination of other orders). If the constant does change can be assured that the same compile warnings will tell us of the error. The ha_rocksdb::index_read_map_impl DBUG_EXECUTE_IF was similar to the existing endless functions used in replication tests. Its rather moot point as the rocksdb.force_shutdown test that uses myrocks_busy_loop_on_row_read is currently disabled. --- storage/archive/azio.c | 21 ++++++--------------- storage/connect/connect.cc | 6 +++--- storage/connect/ioapi.c | 3 +-- storage/connect/tabtbl.cpp | 4 +--- storage/connect/zip.c | 5 ----- storage/rocksdb/ha_rocksdb.cc | 5 +---- storage/rocksdb/ha_rocksdb.h | 2 +- storage/tokudb/PerconaFT/ft/loader/loader.cc | 7 +------ storage/tokudb/PerconaFT/ft/txn/txn_manager.cc | 2 -- 9 files changed, 14 insertions(+), 41 deletions(-) diff --git a/storage/archive/azio.c b/storage/archive/azio.c index 0f66b999c94..916abc90d7b 100644 --- a/storage/archive/azio.c +++ b/storage/archive/azio.c @@ -249,8 +249,7 @@ int azdopen(azio_stream *s, File fd, int Flags) for end of file. IN assertion: the stream s has been sucessfully opened for reading. */ -int get_byte(s) - azio_stream *s; +int get_byte(azio_stream *s) { if (s->z_eof) return EOF; if (s->stream.avail_in == 0) @@ -427,8 +426,7 @@ void read_header(azio_stream *s, unsigned char *buffer) * Cleanup then free the given azio_stream. Return a zlib error code. Try freeing in the reverse order of allocations. */ -int destroy (s) - azio_stream *s; +int destroy (azio_stream *s) { int err = Z_OK; @@ -679,9 +677,7 @@ int do_flush (azio_stream *s, int flush) return s->z_err == Z_STREAM_END ? Z_OK : s->z_err; } -int ZEXPORT azflush (s, flush) - azio_stream *s; - int flush; +int ZEXPORT azflush (azio_stream *s, int flush) { int err; @@ -708,8 +704,7 @@ int ZEXPORT azflush (s, flush) /* =========================================================================== Rewinds input file. */ -int azrewind (s) - azio_stream *s; +int azrewind (azio_stream *s) { if (s == NULL || s->mode != 'r') return -1; @@ -733,10 +728,7 @@ int azrewind (s) SEEK_END is not implemented, returns error. In this version of the library, azseek can be extremely slow. */ -my_off_t azseek (s, offset, whence) - azio_stream *s; - my_off_t offset; - int whence; +my_off_t azseek (azio_stream *s, my_off_t offset, int whence) { if (s == NULL || whence == SEEK_END || @@ -812,8 +804,7 @@ my_off_t azseek (s, offset, whence) given compressed file. This position represents a number of bytes in the uncompressed data stream. */ -my_off_t ZEXPORT aztell (file) - azio_stream *file; +my_off_t ZEXPORT aztell (azio_stream *file) { return azseek(file, 0L, SEEK_CUR); } diff --git a/storage/connect/connect.cc b/storage/connect/connect.cc index 9b9a9d5acb8..73e852bbcba 100644 --- a/storage/connect/connect.cc +++ b/storage/connect/connect.cc @@ -237,7 +237,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, bool del, PHC) { char *p; - int i, n; + int n; bool rcop= true; PCOL colp; //PCOLUMN cp; @@ -276,7 +276,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, n = strlen(p) + 1; } // endfor p - for (i = 0, colp = tdbp->GetColumns(); colp; i++, colp = colp->GetNext()) { + for (colp = tdbp->GetColumns(); colp; colp = colp->GetNext()) { if (colp->InitValue(g)) throw 2; @@ -310,7 +310,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, n = strlen(p) + 1; } // endfor p - for (i = 0, colp = utp->GetColumns(); colp; i++, colp = colp->GetNext()) { + for (colp = utp->GetColumns(); colp; colp = colp->GetNext()) { if (colp->InitValue(g)) throw 5; diff --git a/storage/connect/ioapi.c b/storage/connect/ioapi.c index 1f339982926..b6a0ec318e6 100644 --- a/storage/connect/ioapi.c +++ b/storage/connect/ioapi.c @@ -221,8 +221,7 @@ static int ZCALLBACK ferror_file_func (voidpf opaque __attribute__((unused)), vo return ret; } -void fill_fopen_filefunc (pzlib_filefunc_def) - zlib_filefunc_def* pzlib_filefunc_def; +void fill_fopen_filefunc (zlib_filefunc_def* pzlib_filefunc_def) { pzlib_filefunc_def->zopen_file = fopen_file_func; pzlib_filefunc_def->zread_file = fread_file_func; diff --git a/storage/connect/tabtbl.cpp b/storage/connect/tabtbl.cpp index b2240bffa2c..607e7d131f3 100644 --- a/storage/connect/tabtbl.cpp +++ b/storage/connect/tabtbl.cpp @@ -230,7 +230,6 @@ PCOL TDBTBL::InsertSpecialColumn(PCOL scp) /***********************************************************************/ bool TDBTBL::InitTableList(PGLOBAL g) { - int n; uint sln; const char *scs; PTABLE tp, tabp; @@ -243,7 +242,7 @@ bool TDBTBL::InitTableList(PGLOBAL g) sln = hc->get_table()->s->connect_string.length; // PlugSetPath(filename, Tdbp->GetFile(g), Tdbp->GetPath()); - for (n = 0, tp = tdp->Tablep; tp; tp = tp->GetNext()) { + for (tp = tdp->Tablep; tp; tp = tp->GetNext()) { if (TestFil(g, To_CondFil, tp)) { tabp = new(g) XTAB(tp); @@ -276,7 +275,6 @@ bool TDBTBL::InitTableList(PGLOBAL g) else Tablist = tabp; - n++; } // endif filp } // endfor tp diff --git a/storage/connect/zip.c b/storage/connect/zip.c index 4bbe31ab7dd..d796fe09b87 100644 --- a/storage/connect/zip.c +++ b/storage/connect/zip.c @@ -1471,11 +1471,6 @@ extern int ZEXPORT zipWriteInFileInZip (zipFile file,const void* buf,unsigned in { uLong uTotalOutBefore = zi->ci.stream.total_out; err=deflate(&zi->ci.stream, Z_NO_FLUSH); - if(uTotalOutBefore > zi->ci.stream.total_out) - { - int bBreak = 0; - bBreak++; - } zi->ci.pos_in_buffered_data += (uInt)(zi->ci.stream.total_out - uTotalOutBefore) ; } diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index b1a64b4aec0..502dd647a27 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -8468,8 +8468,7 @@ int ha_rocksdb::index_read_map_impl(uchar *const buf, const uchar *const key, const key_range *end_key) { DBUG_ENTER_FUNC(); - DBUG_EXECUTE_IF("myrocks_busy_loop_on_row_read", int debug_i = 0; - while (1) { debug_i++; }); + DBUG_EXECUTE_IF("myrocks_busy_loop_on_row_read", my_sleep(50000);); int rc = 0; @@ -12124,7 +12123,6 @@ static int calculate_stats( } } - int num_sst = 0; for (const auto &it : props) { std::vector sst_stats; Rdb_tbl_prop_coll::read_stats_from_tbl_props(it.second, &sst_stats); @@ -12153,7 +12151,6 @@ static int calculate_stats( stats[it1.m_gl_index_id].merge( it1, true, it_index->second->max_storage_fmt_length()); } - num_sst++; } if (include_memtables) { diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index 758ca71fc95..6211c7ebc04 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -401,7 +401,7 @@ class ha_rocksdb : public my_core::handler { void free_key_buffers(); // the buffer size should be at least 2*Rdb_key_def::INDEX_NUMBER_SIZE - rocksdb::Range get_range(const int i, uchar buf[]) const; + rocksdb::Range get_range(const int i, uchar buf[2 * 4]) const; /* Perf timers for data reads diff --git a/storage/tokudb/PerconaFT/ft/loader/loader.cc b/storage/tokudb/PerconaFT/ft/loader/loader.cc index 3ff237f05c7..ce41783aa02 100644 --- a/storage/tokudb/PerconaFT/ft/loader/loader.cc +++ b/storage/tokudb/PerconaFT/ft/loader/loader.cc @@ -2423,7 +2423,6 @@ static int toku_loader_write_ft_from_q (FTLOADER bl, // The pivots file will contain all the pivot strings (in the form ) // The pivots_fname is the name of the pivots file. // Note that the pivots file will have one extra pivot in it (the last key in the dictionary) which will not appear in the tree. - int64_t n_pivots=0; // number of pivots in pivots_file FIDX pivots_file; // the file r = ft_loader_open_temp_file (bl, &pivots_file); @@ -2539,8 +2538,6 @@ static int toku_loader_write_ft_from_q (FTLOADER bl, allocate_node(&sts, lblock); - n_pivots++; - invariant(maxkey.data != NULL); if ((r = bl_write_dbt(&maxkey, pivots_stream, NULL, nullptr, bl))) { ft_loader_set_panic(bl, r, true, which_db, nullptr, nullptr); @@ -2616,8 +2613,6 @@ static int toku_loader_write_ft_from_q (FTLOADER bl, // We haven't paniced, so the sum should add up. invariant(used_estimate == total_disksize_estimate); - n_pivots++; - { DBT key = make_dbt(0,0); // must write an extra DBT into the pivots file. r = bl_write_dbt(&key, pivots_stream, NULL, nullptr, bl); @@ -3302,7 +3297,7 @@ static int write_nonleaves (FTLOADER bl, FIDX pivots_fidx, struct dbout *out, st int height = 1; // Watch out for the case where we saved the last pivot but didn't write any more nodes out. - // The trick is not to look at n_pivots, but to look at blocks.n_blocks + // The trick is to look at blocks.n_blocks while (sts->n_subtrees > 1) { // If there is more than one block in blocks, then we must build another level of the tree. diff --git a/storage/tokudb/PerconaFT/ft/txn/txn_manager.cc b/storage/tokudb/PerconaFT/ft/txn/txn_manager.cc index 1b55844bc7d..8af2f6aab63 100644 --- a/storage/tokudb/PerconaFT/ft/txn/txn_manager.cc +++ b/storage/tokudb/PerconaFT/ft/txn/txn_manager.cc @@ -208,12 +208,10 @@ verify_snapshot_system(TXN_MANAGER txn_manager UU()) { { //verify neither pair->begin_id nor end_id is in snapshot_xids TOKUTXN curr_txn = txn_manager->snapshot_head; - uint32_t curr_index = 0; while (curr_txn != NULL) { invariant(tuple->begin_id != curr_txn->txnid.parent_id64); invariant(tuple->end_id != curr_txn->txnid.parent_id64); curr_txn = curr_txn->snapshot_next; - curr_index++; } } { -- cgit v1.2.1 From b218dfead2c57bc412107bebefd6ac33d2270dc6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 11 Jan 2023 08:37:27 +0200 Subject: Remove an unused parameter lock_rec_has_to_wait(): Remove the unused parameter for_locking that had been originally added in commit df4dd593f29aec8e2116aec1775ad4b8833d8c93 --- storage/innobase/lock/lock0lock.cc | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 2fd2ef94365..2073380eadb 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2022, Oracle and/or its affiliates. -Copyright (c) 2014, 2022, MariaDB Corporation. +Copyright (c) 2014, 2023, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -720,8 +720,6 @@ UNIV_INLINE bool lock_rec_has_to_wait( /*=================*/ - bool for_locking, - /*!< in is called locking or releasing */ const trx_t* trx, /*!< in: trx of new lock */ ulint type_mode,/*!< in: precise mode of the new lock to set: LOCK_S or LOCK_X, possibly @@ -877,7 +875,7 @@ lock_has_to_wait( } return lock_rec_has_to_wait( - false, lock1->trx, lock1->type_mode, lock2, + lock1->trx, lock1->type_mode, lock2, lock_rec_get_nth_bit(lock1, PAGE_HEAP_NO_SUPREMUM)); } @@ -1168,7 +1166,7 @@ lock_rec_other_has_conflicting( lock != NULL; lock = lock_rec_get_next(heap_no, lock)) { - if (lock_rec_has_to_wait(true, trx, mode, lock, is_supremum)) { + if (lock_rec_has_to_wait(trx, mode, lock, is_supremum)) { #ifdef WITH_WSREP if (trx->is_wsrep()) { trx_mutex_enter(lock->trx); -- cgit v1.2.1 From b928c849d220f47de0bdfd6a78b8eb584b3e5ae6 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Wed, 4 Jan 2023 13:01:47 +0300 Subject: MDEV-28602 Wrong result with outer join, merged derived table and view (Variant 3, initial variant was by Rex Jonston) A LEFT JOIN with a constant as a column of the inner table produced wrong query result if the optimizer had to write the inner table column into a temp table. Query pattern: SELECT ... FROM (SELECT /*non-mergeable select*/ FROM t1 LEFT JOIN (SELECT 'Y' as Val) t2 ON ...) as tbl Fixed this by adding Item_direct_view_ref::save_in_field() which follows the pattern of Item_direct_view_ref's save_org_in_field(), save_in_result_field() and val_XXX() functions: * call check_null_ref() and handle NULL value * if we didn't get a NULL-complemented row, call Item_direct_ref's function. --- mysql-test/main/join_outer.result | 85 ++++++++++++++++++++++++++++++++++ mysql-test/main/join_outer.test | 85 ++++++++++++++++++++++++++++++++++ mysql-test/main/join_outer_jcl6.result | 85 ++++++++++++++++++++++++++++++++++ sql/item.h | 8 ++++ 4 files changed, 263 insertions(+) diff --git a/mysql-test/main/join_outer.result b/mysql-test/main/join_outer.result index 758d1eda7ae..ae0aabeabc5 100644 --- a/mysql-test/main/join_outer.result +++ b/mysql-test/main/join_outer.result @@ -1,4 +1,5 @@ drop table if exists t0,t1,t2,t3,t4,t5; +drop view if exists v0,v1,v2,v3; SET @org_optimizer_switch=@@optimizer_switch; SET optimizer_switch=ifnull(@optimizer_switch_for_join_outer_test,'outer_join_with_cache=off'); set join_cache_level=1; @@ -2822,5 +2823,89 @@ WHERE t3.pk IN (2); 1 drop view v4; drop table t1,t2,t3,t4; +# +# MDEV-28602 Wrong result with outer join, merged derived table and view +# +create table t1 ( +Election int(10) unsigned NOT NULL +); +insert into t1 (Election) values (1); +create table t2 ( +VoteID int(10), +ElectionID int(10), +UserID int(10) +); +insert into t2 (ElectionID, UserID) values (2, 30), (3, 30); +create view v1 as select * from t1 +left join ( select 'Y' AS Voted, ElectionID from t2 ) AS T +on T.ElectionID = t1.Election +limit 9; +select * from v1; +Election Voted ElectionID +1 NULL NULL +drop table t1, t2; +drop view v1; +# +# and another contrived example showing a bit of heirarchy +# +create table t10 (a int); +create table t20 (b int); +insert into t10 values (1),(2); +insert into t20 values (1),(3); +create view v10 as select *, 'U' as u from t10 left join (select 'Y' as y, t20.b from t20) dt1 on t10.a= dt1.b limit 3; +create table t30 (c int); +insert into t30 values (1),(3); +create view v20 as select * from t30 left join (select 'X' as x, v10.u, v10.y, v10.b from v10) dt2 on t30.c=dt2.b limit 6; +select * from v20 limit 9; +c x u y b +1 X U Y 1 +3 NULL NULL NULL NULL +drop view v10, v20; +drop table t10, t20, t30; +# +# More complex testcase +# +create table t2 (b int); +insert into t2 values (3),(7),(1); +create table t3 (c int); +insert into t3 values (3),(1); +create table t1 (a int); +insert into t1 values (1),(2),(7),(1); +select * from +( +select * from +(select 'Z' as z, t1.a from t1) dt1 +left join +(select 'Y' as y, t2.b from t2) dt2 +left join +(select 'X' as x, t3.c from t3) dt3 +on dt2.b=dt3.c +on dt1.a=dt2.b +limit 9 +) dt; +z a y b x c +Z 1 Y 1 X 1 +Z 2 NULL NULL NULL NULL +Z 7 Y 7 NULL NULL +Z 1 Y 1 X 1 +create view v3(x,c) as select * from (select 'X' as x, t3.c from t3) dt3; +create view v2(y,b) as select * from (select 'Y' as y, t2.b from t2) dt2; +create view v0(y,b,x,c) as select * from v2 left join v3 on v2.b=v3.c; +create view v1 as select 'Z' as z, t1.a, v0.* from t1 left join v0 on t1.a=v0.b limit 9; +select * from v1; +z a y b x c +Z 1 Y 1 X 1 +Z 2 NULL NULL NULL NULL +Z 7 Y 7 NULL NULL +Z 1 Y 1 X 1 +set statement join_cache_level=0 for +select * from v1; +z a y b x c +Z 1 Y 1 X 1 +Z 2 NULL NULL NULL NULL +Z 7 Y 7 NULL NULL +Z 1 Y 1 X 1 +drop view v0, v1, v2, v3; +drop table t1, t2, t3; # end of 10.3 tests SET optimizer_switch=@org_optimizer_switch; diff --git a/mysql-test/main/join_outer.test b/mysql-test/main/join_outer.test index ff74e5280e7..5e1e83e4049 100644 --- a/mysql-test/main/join_outer.test +++ b/mysql-test/main/join_outer.test @@ -6,6 +6,7 @@ --disable_warnings drop table if exists t0,t1,t2,t3,t4,t5; +drop view if exists v0,v1,v2,v3; --enable_warnings SET @org_optimizer_switch=@@optimizer_switch; @@ -2341,6 +2342,90 @@ WHERE t3.pk IN (2); drop view v4; drop table t1,t2,t3,t4; +--echo # +--echo # MDEV-28602 Wrong result with outer join, merged derived table and view +--echo # + +create table t1 ( + Election int(10) unsigned NOT NULL +); + +insert into t1 (Election) values (1); + +create table t2 ( + VoteID int(10), + ElectionID int(10), + UserID int(10) +); + +insert into t2 (ElectionID, UserID) values (2, 30), (3, 30); +create view v1 as select * from t1 + left join ( select 'Y' AS Voted, ElectionID from t2 ) AS T + on T.ElectionID = t1.Election +limit 9; +# limit X causes merge algorithm select as opposed to temp table +select * from v1; +drop table t1, t2; +drop view v1; + +--echo # +--echo # and another contrived example showing a bit of heirarchy +--echo # +create table t10 (a int); +create table t20 (b int); +insert into t10 values (1),(2); +insert into t20 values (1),(3); +create view v10 as select *, 'U' as u from t10 left join (select 'Y' as y, t20.b from t20) dt1 on t10.a= dt1.b limit 3; +create table t30 (c int); +insert into t30 values (1),(3); +create view v20 as select * from t30 left join (select 'X' as x, v10.u, v10.y, v10.b from v10) dt2 on t30.c=dt2.b limit 6; +select * from v20 limit 9; +drop view v10, v20; +drop table t10, t20, t30; + +--echo # +--echo # More complex testcase +--echo # +create table t2 (b int); +insert into t2 values (3),(7),(1); +create table t3 (c int); +insert into t3 values (3),(1); +create table t1 (a int); +insert into t1 values (1),(2),(7),(1); + +select * from +( + select * from + (select 'Z' as z, t1.a from t1) dt1 + left join + (select 'Y' as y, t2.b from t2) dt2 + left join + (select 'X' as x, t3.c from t3) dt3 + on dt2.b=dt3.c + on dt1.a=dt2.b + limit 9 +) dt; + +## Same as dt3 above +create view v3(x,c) as select * from (select 'X' as x, t3.c from t3) dt3; + +## Same as dt2 above +create view v2(y,b) as select * from (select 'Y' as y, t2.b from t2) dt2; + +## Same as (...) in the "... dt1 left join (...)" above +create view v0(y,b,x,c) as select * from v2 left join v3 on v2.b=v3.c; + +# Same as above select statement +create view v1 as select 'Z' as z, t1.a, v0.* from t1 left join v0 on t1.a=v0.b limit 9; + +select * from v1; + +set statement join_cache_level=0 for +select * from v1; + +drop view v0, v1, v2, v3; +drop table t1, t2, t3; + --echo # end of 10.3 tests SET optimizer_switch=@org_optimizer_switch; diff --git a/mysql-test/main/join_outer_jcl6.result b/mysql-test/main/join_outer_jcl6.result index 71579ae7b38..4e03ffb51cf 100644 --- a/mysql-test/main/join_outer_jcl6.result +++ b/mysql-test/main/join_outer_jcl6.result @@ -6,6 +6,7 @@ set @@join_cache_level=6; set @optimizer_switch_for_join_outer_test=@@optimizer_switch; set @join_cache_level_for_join_outer_test=@@join_cache_level; drop table if exists t0,t1,t2,t3,t4,t5; +drop view if exists v0,v1,v2,v3; SET @org_optimizer_switch=@@optimizer_switch; SET optimizer_switch=ifnull(@optimizer_switch_for_join_outer_test,'outer_join_with_cache=off'); set join_cache_level=@join_cache_level_for_join_outer_test; @@ -2829,5 +2830,89 @@ WHERE t3.pk IN (2); 1 drop view v4; drop table t1,t2,t3,t4; +# +# MDEV-28602 Wrong result with outer join, merged derived table and view +# +create table t1 ( +Election int(10) unsigned NOT NULL +); +insert into t1 (Election) values (1); +create table t2 ( +VoteID int(10), +ElectionID int(10), +UserID int(10) +); +insert into t2 (ElectionID, UserID) values (2, 30), (3, 30); +create view v1 as select * from t1 +left join ( select 'Y' AS Voted, ElectionID from t2 ) AS T +on T.ElectionID = t1.Election +limit 9; +select * from v1; +Election Voted ElectionID +1 NULL NULL +drop table t1, t2; +drop view v1; +# +# and another contrived example showing a bit of heirarchy +# +create table t10 (a int); +create table t20 (b int); +insert into t10 values (1),(2); +insert into t20 values (1),(3); +create view v10 as select *, 'U' as u from t10 left join (select 'Y' as y, t20.b from t20) dt1 on t10.a= dt1.b limit 3; +create table t30 (c int); +insert into t30 values (1),(3); +create view v20 as select * from t30 left join (select 'X' as x, v10.u, v10.y, v10.b from v10) dt2 on t30.c=dt2.b limit 6; +select * from v20 limit 9; +c x u y b +1 X U Y 1 +3 NULL NULL NULL NULL +drop view v10, v20; +drop table t10, t20, t30; +# +# More complex testcase +# +create table t2 (b int); +insert into t2 values (3),(7),(1); +create table t3 (c int); +insert into t3 values (3),(1); +create table t1 (a int); +insert into t1 values (1),(2),(7),(1); +select * from +( +select * from +(select 'Z' as z, t1.a from t1) dt1 +left join +(select 'Y' as y, t2.b from t2) dt2 +left join +(select 'X' as x, t3.c from t3) dt3 +on dt2.b=dt3.c +on dt1.a=dt2.b +limit 9 +) dt; +z a y b x c +Z 1 Y 1 X 1 +Z 1 Y 1 X 1 +Z 7 Y 7 NULL NULL +Z 2 NULL NULL NULL NULL +create view v3(x,c) as select * from (select 'X' as x, t3.c from t3) dt3; +create view v2(y,b) as select * from (select 'Y' as y, t2.b from t2) dt2; +create view v0(y,b,x,c) as select * from v2 left join v3 on v2.b=v3.c; +create view v1 as select 'Z' as z, t1.a, v0.* from t1 left join v0 on t1.a=v0.b limit 9; +select * from v1; +z a y b x c +Z 1 Y 1 X 1 +Z 1 Y 1 X 1 +Z 7 Y 7 NULL NULL +Z 2 NULL NULL NULL NULL +set statement join_cache_level=0 for +select * from v1; +z a y b x c +Z 1 Y 1 X 1 +Z 2 NULL NULL NULL NULL +Z 7 Y 7 NULL NULL +Z 1 Y 1 X 1 +drop view v0, v1, v2, v3; +drop table t1, t2, t3; # end of 10.3 tests SET optimizer_switch=@org_optimizer_switch; diff --git a/sql/item.h b/sql/item.h index 35428b30252..5ae3d7a3435 100644 --- a/sql/item.h +++ b/sql/item.h @@ -5504,6 +5504,14 @@ public: Item_direct_ref::save_in_result_field(no_conversions); } + int save_in_field(Field *field, bool no_conversions) + { + if (check_null_ref()) + return set_field_to_null_with_conversions(field, no_conversions); + + return Item_direct_ref::save_in_field(field, no_conversions); + } + void cleanup() { null_ref_table= NULL; -- cgit v1.2.1 From e4a4aad7cf5a8cdb3ae4d97527f489cc590146a1 Mon Sep 17 00:00:00 2001 From: Julius Goryavsky Date: Tue, 13 Dec 2022 10:32:21 +0100 Subject: pre-MDEV-30157 & pre-MDEV-28669: fixes before the main corrections This commit adds even more correct handling of parameters with paths when they contain leading or trailing spaces and/or slashes. Also it fixes problems that occur when the user specified explicit paths to additional directories, but these paths match the specified path of the data directory - in this case, additional subdirectories should be treated (in relation to the data directory) in the same way as if these paths were not specified or as if they are implicitly specified as "." or "./". But prior to this fix, existing code treated any values as if they were completely separate directories, whether or not they actually point to the same location to which datadir points to - and this sometimes resulted in incorrect file transfers. This fix does not contain separate tests, as tests will be part of the main commit(s). This fix has been made as a separate commit to facilitate review for major substantive fixes related to MDEV-30157 and MDEV-28669. --- scripts/wsrep_sst_common.sh | 113 ++++++++++++++++++++++++++------------- scripts/wsrep_sst_mariabackup.sh | 30 ++++++++--- scripts/wsrep_sst_rsync.sh | 26 ++++++--- 3 files changed, 118 insertions(+), 51 deletions(-) diff --git a/scripts/wsrep_sst_common.sh b/scripts/wsrep_sst_common.sh index 6a94cb0f706..407a4c9dd24 100644 --- a/scripts/wsrep_sst_common.sh +++ b/scripts/wsrep_sst_common.sh @@ -47,18 +47,51 @@ trim_string() trim_dir() { - local t=$(trim_string "$1") - if [ "$t" != '/' ]; then - if [ "${t%/}" != "$t" ]; then - t=$(trim_string "${t%/}") + if [ -n "$BASH_VERSION" ]; then + local pattern="![:space:]${2:-}" + local x="${1#*[$pattern]}" + local z=${#1} + x=${#x} + if [ $x -ne $z ]; then + local y="${1%[$pattern/]*}" + y=${#y} + x=$(( z-x-1 )) + y=$(( y-x+1 )) + x="${1:$x:$y}" + [ -z "$x" ] && x='.' + printf '%s' "$x" + else + printf '' fi else - t='.' + local pattern="[:space:]${2:-}" + local x=$(echo "$1" | sed -E "s/^[$pattern]+|[$pattern/]+\$//g") + if [ -n "$x" ]; then + echo "$x" + elif "${1#*/}" != "$1"; then + echo '.' + else + echo '' + fi fi +} + +trim_right() +{ if [ -n "$BASH_VERSION" ]; then - printf '%s' "$t" + local pattern="[![:space:]${2:-}]" + local z=${#1} + local y="${1%$pattern*}" + y=${#y} + if [ $y -ne $z ]; then + y=$(( y+1 )) + printf '%s' "${1:0:$y}" + else + printf '' + fi else - echo "$t" + local pattern="[[:space:]${2:-}]" + echo "$1" | sed -E "s/$pattern+\$//g" fi } @@ -111,7 +144,7 @@ INNOEXTRA="" while [ $# -gt 0 ]; do case "$1" in '--address') - WSREP_SST_OPT_ADDR="$2" + WSREP_SST_OPT_ADDR=$(trim_string "$2") # # Break address string into host:port/path parts # @@ -119,20 +152,22 @@ case "$1" in \[*) # IPv6 # Remove the starting and ending square brackets, if present: - addr_no_bracket="${WSREP_SST_OPT_ADDR#\[}" + addr="${WSREP_SST_OPT_ADDR#\[}" + addr=$(trim_right "${addr%%\]*}") # Some utilities and subsequent code require an address # without square brackets: - readonly WSREP_SST_OPT_HOST_UNESCAPED="${addr_no_bracket%%\]*}" + readonly WSREP_SST_OPT_HOST_UNESCAPED="$addr" # Square brackets are needed in most cases: - readonly WSREP_SST_OPT_HOST="[$WSREP_SST_OPT_HOST_UNESCAPED]" + readonly WSREP_SST_OPT_HOST="[$addr]" # Mark this address as IPv6: readonly WSREP_SST_OPT_HOST_IPv6=1 # Let's remove the leading part that contains the host address: remain="${WSREP_SST_OPT_ADDR#*\]}" ;; *) - readonly WSREP_SST_OPT_HOST="${WSREP_SST_OPT_ADDR%%[:/]*}" - readonly WSREP_SST_OPT_HOST_UNESCAPED="$WSREP_SST_OPT_HOST" + addr=$(trim_right "${WSREP_SST_OPT_ADDR%%[:/]*}") + readonly WSREP_SST_OPT_HOST="$addr" + readonly WSREP_SST_OPT_HOST_UNESCAPED="$addr" readonly WSREP_SST_OPT_HOST_IPv6=0 # Let's remove the leading part that contains the host address: remain="${WSREP_SST_OPT_ADDR#*[:/]}" @@ -154,17 +189,18 @@ case "$1" in else readonly WSREP_SST_OPT_PATH="" fi + WSREP_SST_OPT_ADDR_PORT=$(trim_right "$WSREP_SST_OPT_ADDR_PORT") # Remove the module name part from the string, which ends with "/": remain="${WSREP_SST_OPT_PATH#*/}" # This operation removes the tail after the very first occurrence # of the "/" character, inclusively: - readonly WSREP_SST_OPT_MODULE="${WSREP_SST_OPT_PATH%%/*}" + readonly WSREP_SST_OPT_MODULE=$(trim_right "${WSREP_SST_OPT_PATH%%/*}") # If there is one more "/" in the string, then everything before # it will be the LSN, otherwise the LSN is empty: if [ "$remain" != "$WSREP_SST_OPT_PATH" ]; then # Extract the part that matches the LSN by removing all # characters starting from the very first "/": - readonly WSREP_SST_OPT_LSN="${remain%%/*}" + readonly WSREP_SST_OPT_LSN=$(trim_right "${remain%%/*}") # Exctract everything after the first occurrence of # the "/" character in the string: source="$remain" @@ -176,7 +212,7 @@ case "$1" in # Let's extract the version number by removing the tail # after the very first occurence of the "/" character # (inclusively): - readonly WSREP_SST_OPT_SST_VER="${remain%%/*}" + readonly WSREP_SST_OPT_SST_VER=$(trim_right "${remain%%/*}") else readonly WSREP_SST_OPT_SST_VER="" fi @@ -218,41 +254,46 @@ case "$1" in shift ;; '--defaults-file') - readonly WSREP_SST_OPT_DEFAULT="$1=$2" - readonly WSREP_SST_OPT_DEFAULTS="$1='$2'" + file=$(trim_string "$2") + readonly WSREP_SST_OPT_DEFAULT="$1=$file" + readonly WSREP_SST_OPT_DEFAULTS="$1='$file'" shift ;; '--defaults-extra-file') - readonly WSREP_SST_OPT_EXTRA_DEFAULT="$1=$2" - readonly WSREP_SST_OPT_EXTRA_DEFAULTS="$1='$2'" + file=$(trim_string "$2") + readonly WSREP_SST_OPT_EXTRA_DEFAULT="$1=$file" + readonly WSREP_SST_OPT_EXTRA_DEFAULTS="$1='$file'" shift ;; '--defaults-group-suffix') - readonly WSREP_SST_OPT_SUFFIX_DEFAULT="$1=$2" - readonly WSREP_SST_OPT_SUFFIX_VALUE="$2" + suffix=$(trim_string "$2") + readonly WSREP_SST_OPT_SUFFIX_DEFAULT="$1=$suffix" + readonly WSREP_SST_OPT_SUFFIX_VALUE="$suffix" shift ;; '--host') - case "$2" in + addr=$(trim_string "$2") + case "$addr" in \[*) # IPv6 # Remove the starting and ending square brackets, if present: - addr_no_bracket="${2#\[}" + addr="${addr#\[}" + addr=$(trim_right "${addr%%\]*}") # Some utilities and subsequent code require an address # without square brackets: - readonly WSREP_SST_OPT_HOST_UNESCAPED="${addr_no_bracket%%\]*}" + readonly WSREP_SST_OPT_HOST_UNESCAPED="$addr" # Square brackets are needed in most cases: - readonly WSREP_SST_OPT_HOST="[${WSREP_SST_OPT_HOST_UNESCAPED}]" + readonly WSREP_SST_OPT_HOST="[$addr]" # Mark this address as IPv6: readonly WSREP_SST_OPT_HOST_IPv6=1 ;; *) - readonly WSREP_SST_OPT_HOST="$2" - readonly WSREP_SST_OPT_HOST_UNESCAPED="$2" + readonly WSREP_SST_OPT_HOST="$addr" + readonly WSREP_SST_OPT_HOST_UNESCAPED="$addr" readonly WSREP_SST_OPT_HOST_IPv6=0 ;; esac - WSREP_SST_OPT_ADDR="$WSREP_SST_OPT_HOST" + WSREP_SST_OPT_ADDR="$addr" shift ;; '--local-port') @@ -272,11 +313,11 @@ case "$1" in shift ;; '--role') - readonly WSREP_SST_OPT_ROLE="$2" + readonly WSREP_SST_OPT_ROLE=$(trim_string "$2") shift ;; '--socket') - readonly WSREP_SST_OPT_SOCKET="$2" + readonly WSREP_SST_OPT_SOCKET=$(trim_string "$2") shift ;; '--user') @@ -284,23 +325,23 @@ case "$1" in shift ;; '--gtid') - readonly WSREP_SST_OPT_GTID="$2" + readonly WSREP_SST_OPT_GTID=$(trim_string "$2") shift ;; '--binlog'|'--log-bin') - readonly WSREP_SST_OPT_BINLOG="$2" + readonly WSREP_SST_OPT_BINLOG=$(trim_string "$2") shift ;; '--binlog-index'|'--log-bin-index') - WSREP_SST_OPT_BINLOG_INDEX="$2" + WSREP_SST_OPT_BINLOG_INDEX=$(trim_string "$2") shift ;; '--log-basename') - readonly WSREP_SST_OPT_LOG_BASENAME="$2" + readonly WSREP_SST_OPT_LOG_BASENAME=$(trim_string "$2") shift ;; '--gtid-domain-id') - readonly WSREP_SST_OPT_GTID_DOMAIN_ID="$2" + readonly WSREP_SST_OPT_GTID_DOMAIN_ID=$(trim_string "$2") shift ;; '--mysqld-args') diff --git a/scripts/wsrep_sst_mariabackup.sh b/scripts/wsrep_sst_mariabackup.sh index 63ef8be8690..7465b2a3b08 100644 --- a/scripts/wsrep_sst_mariabackup.sh +++ b/scripts/wsrep_sst_mariabackup.sh @@ -439,9 +439,10 @@ get_footprint() -regex '.*undo[0-9]+$\|.*\.ibd$\|.*\.MYI$\|.*\.MYD$\|.*ibdata1$' \ -type f -print0 | du --files0-from=- --block-size=1 -c -s | \ awk 'END { print $1 }') - local payload_undo=0 - if [ -n "$ib_undo_dir" -a -d "$ib_undo_dir" ]; then + if [ -n "$ib_undo_dir" -a "$ib_undo_dir" != '.' -a \ + "$ib_undo_dir" != "$DATA_DIR" -a -d "$ib_undo_dir" ] + then cd "$ib_undo_dir" payload_undo=$(find . -regex '.*undo[0-9]+$' -type f -print0 | \ du --files0-from=- --block-size=1 -c -s | awk 'END { print $1 }') @@ -451,7 +452,7 @@ get_footprint() wsrep_log_info \ "SST footprint estimate: data: $payload_data, undo: $payload_undo" - payload=$(( payload_data + payload_undo )) + payload=$(( payload_data+payload_undo )) if [ "$compress" != 'none' ]; then # QuickLZ has around 50% compression ratio @@ -1220,13 +1221,16 @@ else # joiner INNODB_DATA_HOME_DIR=$(trim_dir "$INNODB_DATA_HOME_DIR") fi - if [ -n "$INNODB_DATA_HOME_DIR" -a "$INNODB_DATA_HOME_DIR" != '.' ]; then + if [ -n "$INNODB_DATA_HOME_DIR" -a "$INNODB_DATA_HOME_DIR" != '.' -a \ + "$INNODB_DATA_HOME_DIR" != "$DATA_DIR" ] + then # handle both relative and absolute paths: cd "$DATA" [ ! -d "$INNODB_DATA_HOME_DIR" ] && mkdir -p "$INNODB_DATA_HOME_DIR" cd "$INNODB_DATA_HOME_DIR" ib_home_dir="$(pwd)" cd "$OLD_PWD" + [ "$ib_home_dir" = "$DATA_DIR" ] && ib_home_dir="" fi # if no command line argument and INNODB_LOG_GROUP_HOME is not set, @@ -1236,13 +1240,16 @@ else # joiner INNODB_LOG_GROUP_HOME=$(trim_dir "$INNODB_LOG_GROUP_HOME") fi - if [ -n "$INNODB_LOG_GROUP_HOME" -a "$INNODB_LOG_GROUP_HOME" != '.' ]; then + if [ -n "$INNODB_LOG_GROUP_HOME" -a "$INNODB_LOG_GROUP_HOME" != '.' -a \ + "$INNODB_LOG_GROUP_HOME" != "$DATA_DIR" ] + then # handle both relative and absolute paths: cd "$DATA" [ ! -d "$INNODB_LOG_GROUP_HOME" ] && mkdir -p "$INNODB_LOG_GROUP_HOME" cd "$INNODB_LOG_GROUP_HOME" ib_log_dir="$(pwd)" cd "$OLD_PWD" + [ "$ib_log_dir" = "$DATA_DIR" ] && ib_log_dir="" fi # if no command line argument and INNODB_UNDO_DIR is not set, @@ -1252,13 +1259,16 @@ else # joiner INNODB_UNDO_DIR=$(trim_dir "$INNODB_UNDO_DIR") fi - if [ -n "$INNODB_UNDO_DIR" -a "$INNODB_UNDO_DIR" != '.' ]; then + if [ -n "$INNODB_UNDO_DIR" -a "$INNODB_UNDO_DIR" != '.' -a \ + "$INNODB_UNDO_DIR" != "$DATA_DIR" ] + then # handle both relative and absolute paths: cd "$DATA" [ ! -d "$INNODB_UNDO_DIR" ] && mkdir -p "$INNODB_UNDO_DIR" cd "$INNODB_UNDO_DIR" ib_undo_dir="$(pwd)" cd "$OLD_PWD" + [ "$ib_undo_dir" = "$DATA_DIR" ] && ib_undo_dir="" fi if [ -n "$backup_threads" ]; then @@ -1500,11 +1510,15 @@ else # joiner binlogs=$(ls -d -1 "$binlog_base".[0-9]* 2>/dev/null || :) fi cd "$DATA_DIR" - if [ -n "$binlog_dir" -a "$binlog_dir" != '.' ]; then + if [ -n "$binlog_dir" -a "$binlog_dir" != '.' -a \ + "$binlog_dir" != "$DATA_DIR" ] + then [ ! -d "$binlog_dir" ] && mkdir -p "$binlog_dir" fi index_dir=$(dirname "$binlog_index"); - if [ -n "$index_dir" -a "$index_dir" != '.' ]; then + if [ -n "$index_dir" -a "$index_dir" != '.' -a \ + "$index_dir" != "$DATA_DIR" ] + then [ ! -d "$index_dir" ] && mkdir -p "$index_dir" fi if [ -n "$binlogs" ]; then diff --git a/scripts/wsrep_sst_rsync.sh b/scripts/wsrep_sst_rsync.sh index ddf41559c29..ee3e848025b 100644 --- a/scripts/wsrep_sst_rsync.sh +++ b/scripts/wsrep_sst_rsync.sh @@ -185,7 +185,9 @@ if [ -z "$INNODB_LOG_GROUP_HOME" ]; then INNODB_LOG_GROUP_HOME=$(trim_dir "$INNODB_LOG_GROUP_HOME") fi -if [ -n "$INNODB_LOG_GROUP_HOME" -a "$INNODB_LOG_GROUP_HOME" != '.' ]; then +if [ -n "$INNODB_LOG_GROUP_HOME" -a "$INNODB_LOG_GROUP_HOME" != '.' -a \ + "$INNODB_LOG_GROUP_HOME" != "$DATA_DIR" ] +then # handle both relative and absolute paths: cd "$DATA" [ ! -d "$INNODB_LOG_GROUP_HOME" ] && mkdir -p "$INNODB_LOG_GROUP_HOME" @@ -201,7 +203,9 @@ if [ -z "$INNODB_DATA_HOME_DIR" ]; then INNODB_DATA_HOME_DIR=$(trim_dir "$INNODB_DATA_HOME_DIR") fi -if [ -n "$INNODB_DATA_HOME_DIR" -a "$INNODB_DATA_HOME_DIR" != '.' ]; then +if [ -n "$INNODB_DATA_HOME_DIR" -a "$INNODB_DATA_HOME_DIR" != '.' -a \ + "$INNODB_DATA_HOME_DIR" != "$DATA_DIR" ] +then # handle both relative and absolute paths: cd "$DATA" [ ! -d "$INNODB_DATA_HOME_DIR" ] && mkdir -p "$INNODB_DATA_HOME_DIR" @@ -217,7 +221,9 @@ if [ -z "$INNODB_UNDO_DIR" ]; then INNODB_UNDO_DIR=$(trim_dir "$INNODB_UNDO_DIR") fi -if [ -n "$INNODB_UNDO_DIR" -a "$INNODB_UNDO_DIR" != '.' ]; then +if [ -n "$INNODB_UNDO_DIR" -a "$INNODB_UNDO_DIR" != '.' -a \ + "$INNODB_UNDO_DIR" != "$DATA_DIR" ] +then # handle both relative and absolute paths: cd "$DATA" [ ! -d "$INNODB_UNDO_DIR" ] && mkdir -p "$INNODB_UNDO_DIR" @@ -504,7 +510,9 @@ EOF if [ "$first" = '-' -o "$first" = '@' ]; then bin_base="./$bin_base" fi - if [ -n "$bin_dir" -a "$bin_dir" != '.' ]; then + if [ -n "$bin_dir" -a "$bin_dir" != '.' -a \ + "$bin_dir" != "$DATA_DIR" ] + then tar $tar_options "$BINLOG_TAR_FILE" \ -C "$bin_dir" "$bin_base" >&2 else @@ -872,7 +880,7 @@ EOF binlog_cd=0 # Change the directory to binlog base (if possible): if [ -n "$binlog_dir" -a "$binlog_dir" != '.' -a \ - -d "$binlog_dir" ] + "$binlog_dir" != "$DATA_DIR" -a -d "$binlog_dir" ] then binlog_cd=1 cd "$binlog_dir" @@ -891,11 +899,15 @@ EOF tmpfile=$(TMPDIR="$tmpdir"; mktemp) fi index_dir=$(dirname "$binlog_index"); - if [ -n "$index_dir" -a "$index_dir" != '.' ]; then + if [ -n "$index_dir" -a "$index_dir" != '.' -a \ + "$index_dir" != "$DATA_DIR" ] + then [ ! -d "$index_dir" ] && mkdir -p "$index_dir" fi binlog_cd=0 - if [ -n "$binlog_dir" -a "$binlog_dir" != '.' ]; then + if [ -n "$binlog_dir" -a "$binlog_dir" != '.' -a \ + "$binlog_dir" != "$DATA_DIR" ] + then [ ! -d "$binlog_dir" ] && mkdir -p "$binlog_dir" binlog_cd=1 cd "$binlog_dir" -- cgit v1.2.1 From b84f3fa769228c4ffd367bc8f2426e6e912325a5 Mon Sep 17 00:00:00 2001 From: Julius Goryavsky Date: Tue, 13 Dec 2022 14:59:24 +0100 Subject: MDEV-30157: Galera SST doesn't properly handle undo* files from innodb This fix adds separate handling for "undo*" files that contain undo logs as part of innodb files and adds a filter for undo* to the main filter used when initially transferring files with rsync. --- scripts/wsrep_sst_rsync.sh | 35 ++++++++++++++++++++++++++++++----- 1 file changed, 30 insertions(+), 5 deletions(-) diff --git a/scripts/wsrep_sst_rsync.sh b/scripts/wsrep_sst_rsync.sh index ee3e848025b..123b0ab04dc 100644 --- a/scripts/wsrep_sst_rsync.sh +++ b/scripts/wsrep_sst_rsync.sh @@ -549,7 +549,10 @@ FILTER="-f '- /lost+found' -f '+ /wsrep_sst_binlog.tar' -f '- $ib_home_dir/ib_lru_dump' -f '- $ib_home_dir/ibdata*' - -f '+ $ib_undo_dir/undo*' + -f '- $ib_undo_dir/undo*' + -f '- $ib_log_dir/ib_logfile[0-9]*' + -f '- $ib_log_dir/aria_log_control' + -f '- $ib_log_dir/aria_log.*' -f '+ /*/' -f '- /*'" @@ -610,24 +613,44 @@ FILTER="-f '- /lost+found' wsrep_log_info "Transfer of InnoDB and Aria log files done" + # third, we transfer InnoDB undo logs + rsync ${STUNNEL:+--rsh="$STUNNEL"} \ + --owner --group --perms --links --specials \ + --ignore-times --inplace --dirs --delete --quiet \ + $WHOLE_FILE_OPT -f '+ /undo*' \ + -f '- **' "$ib_undo_dir/" \ + "rsync://$WSREP_SST_OPT_ADDR-undo_dir" >&2 || RC=$? + + if [ $RC -ne 0 ]; then + wsrep_log_error "rsync innodb_undo_dir returned code $RC:" + exit 255 # unknown error + fi + + wsrep_log_info "Transfer of InnoDB undo logs done" + # then, we parallelize the transfer of database directories, # use '.' so that path concatenation works: - cd "$DATA" - backup_threads=$(parse_cnf '--mysqld|sst' 'backup-threads') if [ -z "$backup_threads" ]; then get_proc backup_threads=$nproc fi + cd "$DATA" + find . -maxdepth 1 -mindepth 1 -type d -not -name 'lost+found' \ -not -name '.zfs' -print0 | xargs -I{} -0 -P $backup_threads \ rsync ${STUNNEL:+--rsh="$STUNNEL"} \ --owner --group --perms --links --specials --ignore-times \ --inplace --recursive --delete --quiet $WHOLE_FILE_OPT \ - --exclude '*/ib_logfile*' --exclude '*/aria_log.*' \ - --exclude '*/aria_log_control' "$WSREP_SST_OPT_DATA/{}/" \ + -f '- $ib_home_dir/ib_lru_dump' \ + -f '- $ib_home_dir/ibdata*' \ + -f '- $ib_undo_dir/undo*' \ + -f '- $ib_log_dir/ib_logfile[0-9]*' \ + -f '- $ib_log_dir/aria_log_control' \ + -f '- $ib_log_dir/aria_log.*' \ + "$WSREP_SST_OPT_DATA/{}/" \ "rsync://$WSREP_SST_OPT_ADDR/{}" >&2 || RC=$? cd "$OLD_PWD" @@ -715,6 +738,8 @@ $SILENT path = $ib_log_dir [$MODULE-data_dir] path = $ib_home_dir +[$MODULE-undo_dir] + path = $ib_undo_dir EOF # If the IP is local, listen only on it: -- cgit v1.2.1 From 53c4be7bc02949e178b31fd89c4b196dd7f24bd0 Mon Sep 17 00:00:00 2001 From: Julius Goryavsky Date: Tue, 13 Dec 2022 15:44:24 +0100 Subject: MDEV-30220: rsync SST completely ignores aria-log-dir-path This commit adds support for the --aria-log-dir-path option on the command line and for the aria-log-dir-path option in the configuration file to the SST scripts, since before this change these parameters were completely ignored during SST - SST scripts assumed that aria logs files are always located in the same directory as logs for innodb. Tests for this change will be added as a separate commit, along with tests for MDEV-30157 and MDEV-28669. --- scripts/wsrep_sst_common.sh | 19 +++++++++++++++ scripts/wsrep_sst_mariabackup.sh | 21 +++++++++++++++++ scripts/wsrep_sst_rsync.sh | 51 +++++++++++++++++++++++++++++++++------- 3 files changed, 83 insertions(+), 8 deletions(-) diff --git a/scripts/wsrep_sst_common.sh b/scripts/wsrep_sst_common.sh index 407a4c9dd24..4e177073872 100644 --- a/scripts/wsrep_sst_common.sh +++ b/scripts/wsrep_sst_common.sh @@ -134,6 +134,7 @@ WSREP_SST_OPT_ADDR="" WSREP_SST_OPT_ADDR_PORT="" WSREP_SST_OPT_HOST="" WSREP_SST_OPT_HOST_UNESCAPED="" +ARIA_LOG_DIR="" INNODB_DATA_HOME_DIR=$(trim_dir "${INNODB_DATA_HOME_DIR:-}") INNODB_LOG_GROUP_HOME=$(trim_dir "${INNODB_LOG_GROUP_HOME:-}") INNODB_UNDO_DIR=$(trim_dir "${INNODB_UNDO_DIR:-}") @@ -234,6 +235,11 @@ case "$1" in readonly WSREP_SST_OPT_DATA=$(trim_dir "$2") shift ;; + '--aria-log-dir-path') + # Let's remove the trailing slash: + readonly ARIA_LOG_DIR=$(trim_dir "$2") + shift + ;; '--innodb-data-home-dir') # Let's remove the trailing slash: readonly INNODB_DATA_HOME_DIR=$(trim_dir "$2") @@ -499,6 +505,12 @@ case "$1" in # from mysqld's argument list: skip_mysqld_arg=0 case "$option" in + '--aria-log-dir-path') + if [ -z "$ARIA_LOG_DIR" ]; then + MYSQLD_OPT_ARIA_LOG_DIR=$(trim_dir "$value") + fi + skip_mysqld_arg=1 + ;; '--innodb-data-home-dir') if [ -z "$INNODB_DATA_HOME_DIR" ]; then MYSQLD_OPT_INNODB_DATA_HOME_DIR=$(trim_dir "$value") @@ -592,6 +604,10 @@ readonly WSREP_SST_OPT_PROGRESS # The same argument can be present on the command line several # times, in this case we must take its last value: +if [ -n "${MYSQLD_OPT_ARIA_LOG_DIR:-}" -a \ + -z "$ARIA_LOG_DIR" ]; then + readonly ARIA_LOG_DIR="$MYSQLD_OPT_ARIA_LOG_DIR" +fi if [ -n "${MYSQLD_OPT_INNODB_DATA_HOME_DIR:-}" -a \ -z "$INNODB_DATA_HOME_DIR" ]; then readonly INNODB_DATA_HOME_DIR="$MYSQLD_OPT_INNODB_DATA_HOME_DIR" @@ -649,6 +665,9 @@ if [ -n "$WSREP_SST_OPT_LOG_BASENAME" ]; then WSREP_SST_OPT_MYSQLD="--log-basename='$WSREP_SST_OPT_LOG_BASENAME'" fi fi +if [ -n "$ARIA_LOG_DIR" ]; then + INNOEXTRA="$INNOEXTRA --aria-log-dir-path='$ARIA_LOG_DIR'" +fi if [ -n "$INNODB_DATA_HOME_DIR" ]; then INNOEXTRA="$INNOEXTRA --innodb-data-home-dir='$INNODB_DATA_HOME_DIR'" fi diff --git a/scripts/wsrep_sst_mariabackup.sh b/scripts/wsrep_sst_mariabackup.sh index 7465b2a3b08..7e26af83701 100644 --- a/scripts/wsrep_sst_mariabackup.sh +++ b/scripts/wsrep_sst_mariabackup.sh @@ -55,6 +55,7 @@ speciald=1 ib_home_dir="" ib_log_dir="" ib_undo_dir="" +ar_log_dir="" sfmt="" strmcmd="" @@ -1271,6 +1272,24 @@ else # joiner [ "$ib_undo_dir" = "$DATA_DIR" ] && ib_undo_dir="" fi + # if no command line argument then try to get it from the my.cnf: + if [ -z "$ARIA_LOG_DIR" ]; then + ARIA_LOG_DIR=$(parse_cnf '--mysqld' 'aria-log-dir-path') + ARIA_LOG_DIR=$(trim_dir "$ARIA_LOG_DIR") + fi + + if [ -n "$ARIA_LOG_DIR" -a "$ARIA_LOG_DIR" != '.' -a \ + "$ARIA_LOG_DIR" != "$DATA_DIR" ] + then + # handle both relative and absolute paths: + cd "$DATA" + [ ! -d "$ARIA_LOG_DIR" ] && mkdir -p "$ARIA_LOG_DIR" + cd "$ARIA_LOG_DIR" + ar_log_dir="$(pwd)" + cd "$OLD_PWD" + [ "$ar_log_dir" = "$DATA_DIR" ] && ar_log_dir="" + fi + if [ -n "$backup_threads" ]; then impts="--parallel=$backup_threads${impts:+ }$impts" fi @@ -1410,12 +1429,14 @@ else # joiner find -E ${ib_home_dir:+"$ib_home_dir"} \ ${ib_undo_dir:+"$ib_undo_dir"} \ ${ib_log_dir:+"$ib_log_dir"} \ + ${ar_log_dir:+"$ar_log_dir"} \ "$DATA" -mindepth 1 -prune -regex "$cpat" \ -o -exec rm -rf {} >&2 \+ else find ${ib_home_dir:+"$ib_home_dir"} \ ${ib_undo_dir:+"$ib_undo_dir"} \ ${ib_log_dir:+"$ib_log_dir"} \ + ${ar_log_dir:+"$ar_log_dir"} \ "$DATA" -mindepth 1 -prune -regex "$cpat" \ -o -exec rm -rf {} >&2 \+ fi diff --git a/scripts/wsrep_sst_rsync.sh b/scripts/wsrep_sst_rsync.sh index 123b0ab04dc..5279929c5b0 100644 --- a/scripts/wsrep_sst_rsync.sh +++ b/scripts/wsrep_sst_rsync.sh @@ -174,6 +174,7 @@ cd "$OLD_PWD" BINLOG_TAR_FILE="$DATA_DIR/wsrep_sst_binlog.tar" +ar_log_dir="$DATA_DIR" ib_log_dir="$DATA_DIR" ib_home_dir="$DATA_DIR" ib_undo_dir="$DATA_DIR" @@ -232,6 +233,23 @@ then cd "$OLD_PWD" fi +# if no command line argument then try to get it from the my.cnf: +if [ -z "$ARIA_LOG_DIR" ]; then + ARIA_LOG_DIR=$(parse_cnf '--mysqld' 'aria-log-dir-path') + ARIA_LOG_DIR=$(trim_dir "$ARIA_LOG_DIR") +fi + +if [ -n "$ARIA_LOG_DIR" -a "$ARIA_LOG_DIR" != '.' -a \ + "$ARIA_LOG_DIR" != "$DATA_DIR" ] +then + # handle both relative and absolute paths: + cd "$DATA" + [ ! -d "$ARIA_LOG_DIR" ] && mkdir -p "$ARIA_LOG_DIR" + cd "$ARIA_LOG_DIR" + ar_log_dir="$(pwd)" + cd "$OLD_PWD" +fi + encgroups='--mysqld|sst' check_server_ssl_config @@ -551,8 +569,8 @@ FILTER="-f '- /lost+found' -f '- $ib_home_dir/ibdata*' -f '- $ib_undo_dir/undo*' -f '- $ib_log_dir/ib_logfile[0-9]*' - -f '- $ib_log_dir/aria_log_control' - -f '- $ib_log_dir/aria_log.*' + -f '- $ar_log_dir/aria_log_control' + -f '- $ar_log_dir/aria_log.*' -f '+ /*/' -f '- /*'" @@ -598,12 +616,12 @@ FILTER="-f '- /lost+found' wsrep_log_info "Transfer of InnoDB data files done" - # second, we transfer InnoDB and Aria log files + # second, we transfer InnoDB log files rsync ${STUNNEL:+--rsh="$STUNNEL"} \ --owner --group --perms --links --specials \ --ignore-times --inplace --dirs --delete --quiet \ - $WHOLE_FILE_OPT -f '+ /ib_logfile[0-9]*' -f '+ /aria_log.*' \ - -f '+ /aria_log_control' -f '- **' "$ib_log_dir/" \ + $WHOLE_FILE_OPT -f '+ /ib_logfile[0-9]*' \ + -f '- **' "$ib_log_dir/" \ "rsync://$WSREP_SST_OPT_ADDR-log_dir" >&2 || RC=$? if [ $RC -ne 0 ]; then @@ -611,7 +629,7 @@ FILTER="-f '- /lost+found' exit 255 # unknown error fi - wsrep_log_info "Transfer of InnoDB and Aria log files done" + wsrep_log_info "Transfer of InnoDB log files done" # third, we transfer InnoDB undo logs rsync ${STUNNEL:+--rsh="$STUNNEL"} \ @@ -628,6 +646,21 @@ FILTER="-f '- /lost+found' wsrep_log_info "Transfer of InnoDB undo logs done" + # fourth, we transfer Aria logs + rsync ${STUNNEL:+--rsh="$STUNNEL"} \ + --owner --group --perms --links --specials \ + --ignore-times --inplace --dirs --delete --quiet \ + $WHOLE_FILE_OPT -f '+ /aria_log_control' -f '+ /aria_log.*' \ + -f '- **' "$ar_log_dir/" \ + "rsync://$WSREP_SST_OPT_ADDR-aria_log" >&2 || RC=$? + + if [ $RC -ne 0 ]; then + wsrep_log_error "rsync aria_log_dir_path returned code $RC:" + exit 255 # unknown error + fi + + wsrep_log_info "Transfer of Aria logs done" + # then, we parallelize the transfer of database directories, # use '.' so that path concatenation works: @@ -648,8 +681,8 @@ FILTER="-f '- /lost+found' -f '- $ib_home_dir/ibdata*' \ -f '- $ib_undo_dir/undo*' \ -f '- $ib_log_dir/ib_logfile[0-9]*' \ - -f '- $ib_log_dir/aria_log_control' \ - -f '- $ib_log_dir/aria_log.*' \ + -f '- $ar_log_dir/aria_log_control' \ + -f '- $ar_log_dir/aria_log.*' \ "$WSREP_SST_OPT_DATA/{}/" \ "rsync://$WSREP_SST_OPT_ADDR/{}" >&2 || RC=$? @@ -740,6 +773,8 @@ $SILENT path = $ib_home_dir [$MODULE-undo_dir] path = $ib_undo_dir +[$MODULE-aria_log] + path = $ar_log_dir EOF # If the IP is local, listen only on it: -- cgit v1.2.1 From b194c83b7bd2f4e861fdce041a9ea3de18f4d227 Mon Sep 17 00:00:00 2001 From: Brandon Nesterenko Date: Thu, 3 Jun 2021 11:24:34 -0600 Subject: MDEV-25277: mysqlbinlog --verbose cannot read row events with compressed columns: Don't know how to handle column type: 140 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Problem: ======= Mysqlbinlog cannot show the type of a compressed column when two levels of verbosity is provided. Solution: ======== Extend the log event printing logic to handle and tag compressed types. Behavioral Changes: ================== Old: When mysqlbinlog is called in verbose mode and the database uses compressed columns, an error is returned to the user. New: The output will append “ COMPRESSED” on the type of compressed columns Reviewed By =========== Andrei Elkin --- .../r/binlog_verbose_compressed_fields.result | 15 +++++ .../binlog/t/binlog_verbose_compressed_fields.test | 70 ++++++++++++++++++++++ sql/log_event.cc | 17 ++++-- 3 files changed, 97 insertions(+), 5 deletions(-) create mode 100644 mysql-test/suite/binlog/r/binlog_verbose_compressed_fields.result create mode 100644 mysql-test/suite/binlog/t/binlog_verbose_compressed_fields.test diff --git a/mysql-test/suite/binlog/r/binlog_verbose_compressed_fields.result b/mysql-test/suite/binlog/r/binlog_verbose_compressed_fields.result new file mode 100644 index 00000000000..6ee11754935 --- /dev/null +++ b/mysql-test/suite/binlog/r/binlog_verbose_compressed_fields.result @@ -0,0 +1,15 @@ +CREATE TABLE t1 (a TEXT, ac TEXT COMPRESSED, b TINYTEXT, bc TINYTEXT COMPRESSED, c MEDIUMTEXT, cc MEDIUMTEXT COMPRESSED, d LONGTEXT, dc LONGTEXT COMPRESSED, e VARCHAR(10), ec VARCHAR(10) COMPRESSED); +# Isolate row event into its own binary log +FLUSH BINARY LOGS; +INSERT INTO t1 VALUES ('mya', 'myac', 'myb', 'mybc', 'myc', 'mycc', 'myd', 'mydc', 'mye', 'myec'); +FLUSH BINARY LOGS; +# MYSQLBINLOG --base64-output=decode-rows -vv datadir/binlog_file --result-file=result_binlog +include/assert_grep.inc [Ensure compressed TEXT fields are annotated correctly] +include/assert_grep.inc [Ensure compressed TINYTEXT fields are annotated correctly] +include/assert_grep.inc [Ensure compressed MEDIUMTEXT fields are annotated correctly] +include/assert_grep.inc [Ensure compressed LONGTEXT fields are annotated correctly] +include/assert_grep.inc [Ensure compressed VARSTRING fields are annotated correctly] +include/assert_grep.inc [Ensure COMPRESSED only shows up for corresponding fields] +include/assert_grep.inc [Ensure non-compressed TEXT fields are annotated correctly] +include/assert_grep.inc [Ensure non-compressed VARSTRING fields are annotated correctly] +DROP TABLE t1; diff --git a/mysql-test/suite/binlog/t/binlog_verbose_compressed_fields.test b/mysql-test/suite/binlog/t/binlog_verbose_compressed_fields.test new file mode 100644 index 00000000000..8cbcdbef601 --- /dev/null +++ b/mysql-test/suite/binlog/t/binlog_verbose_compressed_fields.test @@ -0,0 +1,70 @@ +# +# Purpose: +# This test validates that mysqlbinlog is able to annotate compressed column +# types with two levels of verbosity. +# +# Methodology: +# Validate that the output from mysqlbinlog -vv after creating and inserting +# into a table with compressed and uncompressed fields correctly annotates +# which columns are compressed +# +# References: +# MDEV-25277: mysqlbinlog --verbose cannot read row events with compressed +# columns: Don't know how to handle column type: 140 +# +--source include/have_binlog_format_row.inc + +CREATE TABLE t1 (a TEXT, ac TEXT COMPRESSED, b TINYTEXT, bc TINYTEXT COMPRESSED, c MEDIUMTEXT, cc MEDIUMTEXT COMPRESSED, d LONGTEXT, dc LONGTEXT COMPRESSED, e VARCHAR(10), ec VARCHAR(10) COMPRESSED); + +--echo # Isolate row event into its own binary log +FLUSH BINARY LOGS; +INSERT INTO t1 VALUES ('mya', 'myac', 'myb', 'mybc', 'myc', 'mycc', 'myd', 'mydc', 'mye', 'myec'); +FLUSH BINARY LOGS; + +--let $binlog_file= query_get_value(SHOW BINARY LOGS, Log_name, 2) +--let $datadir= `SELECT @@datadir` +--let $result_binlog= $MYSQLTEST_VARDIR/tmp/$binlog_file + +--echo # MYSQLBINLOG --base64-output=decode-rows -vv datadir/binlog_file --result-file=result_binlog +--exec $MYSQL_BINLOG --base64-output=decode-rows -vv $datadir/$binlog_file --result-file=$result_binlog + +--let $assert_file= $result_binlog +--let $assert_count= 1 + +--let $assert_text= Ensure compressed TEXT fields are annotated correctly +--let $assert_select=\WTEXT COMPRESSED +--source include/assert_grep.inc + +--let $assert_text= Ensure compressed TINYTEXT fields are annotated correctly +--let $assert_select=\WTINYTEXT COMPRESSED +--source include/assert_grep.inc + +--let $assert_text= Ensure compressed MEDIUMTEXT fields are annotated correctly +--let $assert_select=\WMEDIUMTEXT COMPRESSED +--source include/assert_grep.inc + +--let $assert_text= Ensure compressed LONGTEXT fields are annotated correctly +--let $assert_select=\WLONGTEXT COMPRESSED +--source include/assert_grep.inc + +--let $assert_text= Ensure compressed VARSTRING fields are annotated correctly +--let $assert_select=\WVARSTRING\(\d+\) COMPRESSED +--source include/assert_grep.inc + +--let $assert_text= Ensure COMPRESSED only shows up for corresponding fields +--let $assert_count= 5 +--let $assert_select= COMPRESSED +--source include/assert_grep.inc + +--let $assert_text= Ensure non-compressed TEXT fields are annotated correctly +--let $assert_count= 8 +--let $assert_select=/*.*TEXT +--source include/assert_grep.inc + +--let $assert_text= Ensure non-compressed VARSTRING fields are annotated correctly +--let $assert_count= 2 +--let $assert_select=/*.*VARSTRING +--source include/assert_grep.inc + +# Cleanup +DROP TABLE t1; diff --git a/sql/log_event.cc b/sql/log_event.cc index a5a31d57ed2..b2b1adc1558 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -2986,10 +2986,12 @@ log_event_print_value(IO_CACHE *file, PRINT_EVENT_INFO *print_event_info, my_b_write_bit(file, ptr , (meta & 0xFF) * 8); return meta & 0xFF; + case MYSQL_TYPE_BLOB_COMPRESSED: case MYSQL_TYPE_BLOB: switch (meta) { case 1: - strmake(typestr, "TINYBLOB/TINYTEXT", typestr_length); + my_snprintf(typestr, typestr_length, "TINYBLOB/TINYTEXT%s", + type == MYSQL_TYPE_BLOB_COMPRESSED ? " COMPRESSED" : ""); if (!ptr) goto return_null; @@ -2997,7 +2999,8 @@ log_event_print_value(IO_CACHE *file, PRINT_EVENT_INFO *print_event_info, my_b_write_quoted(file, ptr + 1, length); return length + 1; case 2: - strmake(typestr, "BLOB/TEXT", typestr_length); + my_snprintf(typestr, typestr_length, "BLOB/TEXT%s", + type == MYSQL_TYPE_BLOB_COMPRESSED ? " COMPRESSED" : ""); if (!ptr) goto return_null; @@ -3005,7 +3008,8 @@ log_event_print_value(IO_CACHE *file, PRINT_EVENT_INFO *print_event_info, my_b_write_quoted(file, ptr + 2, length); return length + 2; case 3: - strmake(typestr, "MEDIUMBLOB/MEDIUMTEXT", typestr_length); + my_snprintf(typestr, typestr_length, "MEDIUMBLOB/MEDIUMTEXT%s", + type == MYSQL_TYPE_BLOB_COMPRESSED ? " COMPRESSED" : ""); if (!ptr) goto return_null; @@ -3013,7 +3017,8 @@ log_event_print_value(IO_CACHE *file, PRINT_EVENT_INFO *print_event_info, my_b_write_quoted(file, ptr + 3, length); return length + 3; case 4: - strmake(typestr, "LONGBLOB/LONGTEXT", typestr_length); + my_snprintf(typestr, typestr_length, "LONGBLOB/LONGTEXT%s", + type == MYSQL_TYPE_BLOB_COMPRESSED ? " COMPRESSED" : ""); if (!ptr) goto return_null; @@ -3025,10 +3030,12 @@ log_event_print_value(IO_CACHE *file, PRINT_EVENT_INFO *print_event_info, return 0; } + case MYSQL_TYPE_VARCHAR_COMPRESSED: case MYSQL_TYPE_VARCHAR: case MYSQL_TYPE_VAR_STRING: length= meta; - my_snprintf(typestr, typestr_length, "VARSTRING(%d)", length); + my_snprintf(typestr, typestr_length, "VARSTRING(%d)%s", length, + type == MYSQL_TYPE_VARCHAR_COMPRESSED ? " COMPRESSED" : ""); if (!ptr) goto return_null; -- cgit v1.2.1 From f3d8a546e7476ddec8386eacd5c58bf1c1519377 Mon Sep 17 00:00:00 2001 From: Monty Date: Wed, 11 Jan 2023 20:15:26 +0200 Subject: MDEV-30345 DML does not find rows it is supposed to This only happens with 'timestamp_column IN (select ...) The reason was a missing assignment in Item_cache_timestamp::cache_value() --- mysql-test/main/cache_temporal_4265.result | 18 ++++++++++++++++++ mysql-test/main/cache_temporal_4265.test | 18 ++++++++++++++++++ sql/item.cc | 4 ++-- 3 files changed, 38 insertions(+), 2 deletions(-) diff --git a/mysql-test/main/cache_temporal_4265.result b/mysql-test/main/cache_temporal_4265.result index 1cda7004a6f..5c648c8de53 100644 --- a/mysql-test/main/cache_temporal_4265.result +++ b/mysql-test/main/cache_temporal_4265.result @@ -20,3 +20,21 @@ select (select max(m2.ut) from t1 m2 where m1.id <> 0) from t1 m1; 2001-01-01 00:00:00.200000 2001-01-01 00:00:00.200000 drop table t1; +# +# MDEV-30345 DML does not find rows it is supposed to +# +CREATE TABLE t1 (f timestamp); +INSERT INTO t1 VALUES ('2022-01-01 00:00:00'),('2022-12-12 12:12:12'); +CREATE TABLE t2 (f timestamp); +INSERT INTO t2 VALUES ('2022-01-01 00:00:00'),('2022-12-12 12:12:12'); +SELECT * FROM t2 WHERE f IN ( SELECT f FROM t1 ) ; +f +2022-01-01 00:00:00 +2022-12-12 12:12:12 +DELETE FROM t2 WHERE f IN ( SELECT f FROM t1 ) ; +SELECT * FROM t2 WHERE f IN ( SELECT f FROM t1 ) ; +f +DROP TABLE t1,t2; +# +# End of 10.4 tests +# diff --git a/mysql-test/main/cache_temporal_4265.test b/mysql-test/main/cache_temporal_4265.test index 1af683c617b..1dfd57b0c8d 100644 --- a/mysql-test/main/cache_temporal_4265.test +++ b/mysql-test/main/cache_temporal_4265.test @@ -20,3 +20,21 @@ select * from t1; select (select max(m2.ut) from t1 m2 where m1.id <> 0) from t1 m1; drop table t1; +--echo # +--echo # MDEV-30345 DML does not find rows it is supposed to +--echo # + +CREATE TABLE t1 (f timestamp); +INSERT INTO t1 VALUES ('2022-01-01 00:00:00'),('2022-12-12 12:12:12'); + +CREATE TABLE t2 (f timestamp); +INSERT INTO t2 VALUES ('2022-01-01 00:00:00'),('2022-12-12 12:12:12'); + +SELECT * FROM t2 WHERE f IN ( SELECT f FROM t1 ) ; +DELETE FROM t2 WHERE f IN ( SELECT f FROM t1 ) ; +SELECT * FROM t2 WHERE f IN ( SELECT f FROM t1 ) ; +DROP TABLE t1,t2; + +--echo # +--echo # End of 10.4 tests +--echo # diff --git a/sql/item.cc b/sql/item.cc index 3c1b0977843..10ad6e31dec 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -10225,8 +10225,8 @@ bool Item_cache_timestamp::cache_value() if (!example) return false; value_cached= true; - null_value= example->val_native_with_conversion_result(current_thd, &m_native, - type_handler()); + null_value_inside= null_value= + example->val_native_with_conversion_result(current_thd, &m_native, type_handler()); return true; } -- cgit v1.2.1 From c08dba7b2864913bbf6fc36307837498f7def44a Mon Sep 17 00:00:00 2001 From: Monty Date: Thu, 12 Jan 2023 02:48:33 +0200 Subject: Fixed failing test main.func_json --- mysql-test/main/func_json.result | 1 + mysql-test/main/func_json.test | 1 + 2 files changed, 2 insertions(+) diff --git a/mysql-test/main/func_json.result b/mysql-test/main/func_json.result index 6ea8c78b2b6..a8f97d087e8 100644 --- a/mysql-test/main/func_json.result +++ b/mysql-test/main/func_json.result @@ -1259,3 +1259,4 @@ JSON_DETAILED(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) select JSON_LOOSE(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) from t200; JSON_LOOSE(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) [{"range_scan_alternatives": [{"index": "a_b", "ranges": ["2 <= a <= 2 AND 4 <= b <= 4", "123"], "rowid_ordered": true, "using_mrr": false, "index_only": true, "rows": 1, "cost": 1.1752, "chosen": true}], "analyzing_roworder_intersect": {"cause": "too few roworder scans"}, "analyzing_index_merge_union": [], "test_one_line_array": ["123"]}] +drop table t200; diff --git a/mysql-test/main/func_json.test b/mysql-test/main/func_json.test index 6cfcefbee3d..ffe7575310f 100644 --- a/mysql-test/main/func_json.test +++ b/mysql-test/main/func_json.test @@ -818,3 +818,4 @@ insert into t200 values select JSON_DETAILED(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) from t200; select JSON_LOOSE(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) from t200; +drop table t200; -- cgit v1.2.1 From eb145e5ad7afa29f1d298452b80fcca36a6c3bbe Mon Sep 17 00:00:00 2001 From: lilinjie Date: Thu, 12 Jan 2023 10:38:38 +0800 Subject: fix typos Signed-off-by: lilinjie --- scripts/mysqlaccess.sh | 2 +- sql/opt_range.h | 8 ++++---- sql/sql_cache.h | 2 +- sql/sys_vars.cc | 2 +- storage/perfschema/pfs_timer.cc | 4 ++-- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/scripts/mysqlaccess.sh b/scripts/mysqlaccess.sh index aaf711b0fb9..83118a8fdc6 100644 --- a/scripts/mysqlaccess.sh +++ b/scripts/mysqlaccess.sh @@ -584,7 +584,7 @@ if ($MySQLaccess::CGI) { #CGI-version # ---------------------- # brief and table-format # exclude each-other -# table-format is prefered +# table-format is preferred if (defined($Param{'table'})) { undef($Param{'brief'}); } if (defined($Param{'preview'}) or defined($Param{'copy'}) or diff --git a/sql/opt_range.h b/sql/opt_range.h index 84b1cd08ba8..fd883547cb3 100644 --- a/sql/opt_range.h +++ b/sql/opt_range.h @@ -711,7 +711,7 @@ class QUICK_RANGE :public Sql_alloc { } /** - Initalizes a key_range object for communication with storage engine. + Initializes a key_range object for communication with storage engine. This function facilitates communication with the Storage Engine API by translating the minimum endpoint of the interval represented by this @@ -732,7 +732,7 @@ class QUICK_RANGE :public Sql_alloc { } /** - Initalizes a key_range object for communication with storage engine. + Initializes a key_range object for communication with storage engine. This function facilitates communication with the Storage Engine API by translating the minimum endpoint of the interval represented by this @@ -749,7 +749,7 @@ class QUICK_RANGE :public Sql_alloc { } /** - Initalizes a key_range object for communication with storage engine. + Initializes a key_range object for communication with storage engine. This function facilitates communication with the Storage Engine API by translating the maximum endpoint of the interval represented by this @@ -770,7 +770,7 @@ class QUICK_RANGE :public Sql_alloc { } /** - Initalizes a key_range object for communication with storage engine. + Initializes a key_range object for communication with storage engine. This function facilitates communication with the Storage Engine API by translating the maximum endpoint of the interval represented by this diff --git a/sql/sql_cache.h b/sql/sql_cache.h index d59bc37b7a3..d89bcda2491 100644 --- a/sql/sql_cache.h +++ b/sql/sql_cache.h @@ -32,7 +32,7 @@ typedef struct st_changed_table_list CHANGED_TABLE_LIST; Can't create new free memory block if unused memory in block less then QUERY_CACHE_MIN_ALLOCATION_UNIT. if QUERY_CACHE_MIN_ALLOCATION_UNIT == 0 then - QUERY_CACHE_MIN_ALLOCATION_UNIT choosed automaticaly + QUERY_CACHE_MIN_ALLOCATION_UNIT choosed automatically */ #define QUERY_CACHE_MIN_ALLOCATION_UNIT 512 diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index d5164587900..44bea7b4581 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -5430,7 +5430,7 @@ static bool update_wsrep_auto_increment_control (sys_var *self, THD *thd, enum_v { /* The variables that control auto increment shall be calculated - automaticaly based on the size of the cluster. This usually done + automatically based on the size of the cluster. This usually done within the wsrep_view_handler_cb callback. However, if the user manually sets the value of wsrep_auto_increment_control to 'ON', then we should to re-calculate these variables again (because diff --git a/storage/perfschema/pfs_timer.cc b/storage/perfschema/pfs_timer.cc index 8533dffcb27..ea39e684110 100644 --- a/storage/perfschema/pfs_timer.cc +++ b/storage/perfschema/pfs_timer.cc @@ -166,7 +166,7 @@ void init_timers(void) /* For STAGE and STATEMENT, a timer with a fixed frequency is better. - The prefered timer is nanosecond, or lower resolutions. + The preferred timer is nanosecond, or lower resolutions. */ if (nanosec_to_pico != 0) @@ -203,7 +203,7 @@ void init_timers(void) /* For IDLE, a timer with a fixed frequency is critical, as the CPU clock may slow down a lot if the server is completely idle. - The prefered timer is microsecond, or lower resolutions. + The preferred timer is microsecond, or lower resolutions. */ if (microsec_to_pico != 0) -- cgit v1.2.1 From 17920291a661d3d2b02a9261293caebc7158d38a Mon Sep 17 00:00:00 2001 From: Rucha Deodhar Date: Thu, 12 Jan 2023 20:11:19 +0530 Subject: fixup for MDEV-19160 --- mysql-test/main/func_json.result | 6 +++--- mysql-test/main/func_json.test | 9 ++++----- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/mysql-test/main/func_json.result b/mysql-test/main/func_json.result index a8f97d087e8..400ecdbfc36 100644 --- a/mysql-test/main/func_json.result +++ b/mysql-test/main/func_json.result @@ -1131,9 +1131,6 @@ SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a=2) FROM t1))='{" a DROP TABLE t1; # -# End of 10.4 tests -# -# # MDEV-19160 JSON_DETAILED output unnecessarily verbose # create table t200 (a text); @@ -1260,3 +1257,6 @@ select JSON_LOOSE(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) from t200 JSON_LOOSE(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) [{"range_scan_alternatives": [{"index": "a_b", "ranges": ["2 <= a <= 2 AND 4 <= b <= 4", "123"], "rowid_ordered": true, "using_mrr": false, "index_only": true, "rows": 1, "cost": 1.1752, "chosen": true}], "analyzing_roworder_intersect": {"cause": "too few roworder scans"}, "analyzing_index_merge_union": [], "test_one_line_array": ["123"]}] drop table t200; +# +# End of 10.4 tests +# diff --git a/mysql-test/main/func_json.test b/mysql-test/main/func_json.test index ffe7575310f..743f97fedc1 100644 --- a/mysql-test/main/func_json.test +++ b/mysql-test/main/func_json.test @@ -715,11 +715,6 @@ SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a=2) FROM t1))='{" DROP TABLE t1; ---echo # ---echo # End of 10.4 tests ---echo # - - --echo # --echo # MDEV-19160 JSON_DETAILED output unnecessarily verbose --echo # @@ -819,3 +814,7 @@ insert into t200 values select JSON_DETAILED(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) from t200; select JSON_LOOSE(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) from t200; drop table t200; + +--echo # +--echo # End of 10.4 tests +--echo # -- cgit v1.2.1 From 12618cfb28cb2843dc74bb3a176dae76acdb698a Mon Sep 17 00:00:00 2001 From: Weijun-H Date: Sun, 8 Jan 2023 08:43:02 +0000 Subject: MDEV-19160 json_pretty() alias for json_detailed() --- mysql-test/main/func_json.result | 31 +++++++++++++++++++++++++++++++ mysql-test/main/func_json.test | 1 + sql/item_create.cc | 1 + 3 files changed, 33 insertions(+) diff --git a/mysql-test/main/func_json.result b/mysql-test/main/func_json.result index 400ecdbfc36..1e59256b517 100644 --- a/mysql-test/main/func_json.result +++ b/mysql-test/main/func_json.result @@ -1253,6 +1253,37 @@ JSON_DETAILED(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) ["123"] } ] +select JSON_PRETTY(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) from t200; +JSON_PRETTY(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) +[ + { + "range_scan_alternatives": + [ + { + "index": "a_b", + "ranges": + [ + "2 <= a <= 2 AND 4 <= b <= 4", + "123" + ], + "rowid_ordered": true, + "using_mrr": false, + "index_only": true, + "rows": 1, + "cost": 1.1752, + "chosen": true + } + ], + "analyzing_roworder_intersect": + { + "cause": "too few roworder scans" + }, + "analyzing_index_merge_union": + [], + "test_one_line_array": + ["123"] + } +] select JSON_LOOSE(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) from t200; JSON_LOOSE(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) [{"range_scan_alternatives": [{"index": "a_b", "ranges": ["2 <= a <= 2 AND 4 <= b <= 4", "123"], "rowid_ordered": true, "using_mrr": false, "index_only": true, "rows": 1, "cost": 1.1752, "chosen": true}], "analyzing_roworder_intersect": {"cause": "too few roworder scans"}, "analyzing_index_merge_union": [], "test_one_line_array": ["123"]}] diff --git a/mysql-test/main/func_json.test b/mysql-test/main/func_json.test index 743f97fedc1..b3e19f76972 100644 --- a/mysql-test/main/func_json.test +++ b/mysql-test/main/func_json.test @@ -812,6 +812,7 @@ insert into t200 values select JSON_DETAILED(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) from t200; +select JSON_PRETTY(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) from t200; select JSON_LOOSE(JSON_EXTRACT(a, '$**.analyzing_range_alternatives')) from t200; drop table t200; diff --git a/sql/item_create.cc b/sql/item_create.cc index a5b882e2b91..2dfbc1d2021 100644 --- a/sql/item_create.cc +++ b/sql/item_create.cc @@ -7351,6 +7351,7 @@ Native_func_registry func_array[] = { { STRING_WITH_LEN("JSON_CONTAINS_PATH") }, BUILDER(Create_func_json_contains_path)}, { { STRING_WITH_LEN("JSON_DEPTH") }, BUILDER(Create_func_json_depth)}, { { STRING_WITH_LEN("JSON_DETAILED") }, BUILDER(Create_func_json_detailed)}, + { { STRING_WITH_LEN("JSON_PRETTY") }, BUILDER(Create_func_json_detailed)}, { { STRING_WITH_LEN("JSON_EXISTS") }, BUILDER(Create_func_json_exists)}, { { STRING_WITH_LEN("JSON_EXTRACT") }, BUILDER(Create_func_json_extract)}, { { STRING_WITH_LEN("JSON_INSERT") }, BUILDER(Create_func_json_insert)}, -- cgit v1.2.1 From 7a98d232e42b66efc759d584b05214e91681c346 Mon Sep 17 00:00:00 2001 From: Nikita Malyavin Date: Wed, 11 Jan 2023 18:57:44 +0300 Subject: MDEV-30378 Versioned REPLACE succeeds with ON DELETE RESTRICT constraint node->is_delete was incorrectly set to NO_DELETE for a set of operations. In general we shouldn't rely on sql_command and look for more abstract ways to control the behavior. trg_event_map seems to be a suitable way. To mind replica nodes, it is ORed with slave_fk_event_map, which stores trg_event_map when replica has triggers disabled. --- mysql-test/suite/versioning/r/foreign.result | 39 +++++++++++++++++++++++ mysql-test/suite/versioning/t/foreign.test | 46 ++++++++++++++++++++++++++++ sql/sql_insert.cc | 1 + storage/innobase/handler/ha_innodb.cc | 4 ++- 4 files changed, 89 insertions(+), 1 deletion(-) diff --git a/mysql-test/suite/versioning/r/foreign.result b/mysql-test/suite/versioning/r/foreign.result index 3eb968f1a33..9b5946e5262 100644 --- a/mysql-test/suite/versioning/r/foreign.result +++ b/mysql-test/suite/versioning/r/foreign.result @@ -493,3 +493,42 @@ set foreign_key_checks= on; delete history from t1; delete from t1; drop table t1; +# +# MDEV-30378 Versioned REPLACE succeeds with ON DELETE RESTRICT +# constraint +# +create table t0 (pk integer primary key) with system versioning engine=innodb; +create table t1 (pk integer primary key, +foreign key(pk) references t0(pk) +on delete restrict on update cascade) engine=innodb; +create table t2 (pk integer); +insert into t0 (pk) values (1); +insert into t1 (pk) values (1); +insert into t2 (pk) values (1); +delete from t0; +ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t1`, CONSTRAINT `t1_ibfk_1` FOREIGN KEY (`pk`) REFERENCES `t0` (`pk`) ON UPDATE CASCADE) +replace t0 values (1); +ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t1`, CONSTRAINT `t1_ibfk_1` FOREIGN KEY (`pk`) REFERENCES `t0` (`pk`) ON UPDATE CASCADE) +select * into outfile 'load_t0' from t0 ; +load data infile 'load_t0' replace into table t0; +ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t1`, CONSTRAINT `t1_ibfk_1` FOREIGN KEY (`pk`) REFERENCES `t0` (`pk`) ON UPDATE CASCADE) +delete t0, t2 from t0 join t2; +ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t1`, CONSTRAINT `t1_ibfk_1` FOREIGN KEY (`pk`) REFERENCES `t0` (`pk`) ON UPDATE CASCADE) +select pk from t0; +pk +1 +# Cleanup +drop table t1, t0, t2; +# create_select for a temporary table didn't set up pos_in_locked_tables. +create table t (a int unique) engine=innodb +replace select 1 as a, 2 as b union select 1 as a, 3 as c; +select * from t; +a b +1 3 +drop table t; +create temporary table t (a int unique) engine=innodb +replace select 1 as a, 2 as b union select 1 as a, 3 as c; +select * from t; +a b +1 3 +drop table t; diff --git a/mysql-test/suite/versioning/t/foreign.test b/mysql-test/suite/versioning/t/foreign.test index f4e4fa7a354..85b0c3e92e4 100644 --- a/mysql-test/suite/versioning/t/foreign.test +++ b/mysql-test/suite/versioning/t/foreign.test @@ -527,4 +527,50 @@ delete from t1; # cleanup drop table t1; +--echo # +--echo # MDEV-30378 Versioned REPLACE succeeds with ON DELETE RESTRICT +--echo # constraint +--echo # +create table t0 (pk integer primary key) with system versioning engine=innodb; +create table t1 (pk integer primary key, + foreign key(pk) references t0(pk) + on delete restrict on update cascade) engine=innodb; +create table t2 (pk integer); + +insert into t0 (pk) values (1); +insert into t1 (pk) values (1); +insert into t2 (pk) values (1); + +--error ER_ROW_IS_REFERENCED_2 +delete from t0; + +--error ER_ROW_IS_REFERENCED_2 +replace t0 values (1); + +select * into outfile 'load_t0' from t0 ; +--error ER_ROW_IS_REFERENCED_2 +load data infile 'load_t0' replace into table t0; + +--error ER_ROW_IS_REFERENCED_2 +delete t0, t2 from t0 join t2; + +select pk from t0; + +--echo # Cleanup +drop table t1, t0, t2; +--let $datadir= `select @@datadir` +--remove_file $datadir/test/load_t0 + + +--echo # create_select for a temporary table didn't set up pos_in_locked_tables. +create table t (a int unique) engine=innodb + replace select 1 as a, 2 as b union select 1 as a, 3 as c; +select * from t; +drop table t; + +create temporary table t (a int unique) engine=innodb + replace select 1 as a, 2 as b union select 1 as a, 3 as c; +select * from t; +drop table t; + --source suite/versioning/common_finish.inc diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index b743fc88061..0fd18c3e597 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -4396,6 +4396,7 @@ TABLE *select_create::create_table_from_items(THD *thd, */ DBUG_ASSERT(0); } + create_table->table->pos_in_table_list= create_table; } } else diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 58b2068b56f..440c6cf3f64 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -8957,10 +8957,12 @@ ha_innobase::update_row( const bool vers_ins_row = vers_set_fields && thd_sql_command(m_user_thd) != SQLCOM_ALTER_TABLE; + TABLE_LIST *tl= table->pos_in_table_list; + uint8 op_map= tl->trg_event_map | tl->slave_fk_event_map; /* This is not a delete */ m_prebuilt->upd_node->is_delete = (vers_set_fields && !vers_ins_row) || - (thd_sql_command(m_user_thd) == SQLCOM_DELETE && + (op_map & trg2bit(TRG_EVENT_DELETE) && table->versioned(VERS_TIMESTAMP)) ? VERSIONED_DELETE : NO_DELETE; -- cgit v1.2.1