diff options
Diffstat (limited to 'sql')
40 files changed, 894 insertions, 368 deletions
diff --git a/sql/filesort.cc b/sql/filesort.cc index 4e5aeccb78e..262ffecb882 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -223,7 +223,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, MYSQL_FILESORT_START(table->s->db.str, table->s->table_name.str); DEBUG_SYNC(thd, "filesort_start"); - if (!(sort= new SORT_INFO)) + if (!(sort= new SORT_INFO)) // Note that this is not automatically freed! return 0; if (subselect && subselect->filesort_buffer.is_allocated()) @@ -434,6 +434,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, error= 0; err: + param.tmp_buffer.free(); if (!subselect || !subselect->is_uncacheable()) { if (!param.using_addon_fields()) @@ -1105,7 +1106,7 @@ void store_length(uchar *to, uint length, uint pack_length) void Type_handler_string_result::make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp_buffer) const { CHARSET_INFO *cs= item->collation.collation; bool maybe_null= item->maybe_null; @@ -1113,7 +1114,7 @@ Type_handler_string_result::make_sort_key_part(uchar *to, Item *item, if (maybe_null) *to++= 1; - String *res= item->str_result(¶m->tmp_buffer); + Binary_string *res= item->str_result(tmp_buffer); if (!res) { if (maybe_null) @@ -1175,7 +1176,7 @@ Type_handler_string_result::make_sort_key_part(uchar *to, Item *item, void Type_handler_int_result::make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp_buffer) const { longlong value= item->val_int_result(); make_sort_key_longlong(to, item->maybe_null, item->null_value, @@ -1186,7 +1187,7 @@ Type_handler_int_result::make_sort_key_part(uchar *to, Item *item, void Type_handler_temporal_result::make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp_buffer) const { MYSQL_TIME buf; // This is a temporal type. No nanoseconds. Rounding mode is not important. @@ -1208,7 +1209,7 @@ Type_handler_temporal_result::make_sort_key_part(uchar *to, Item *item, void Type_handler_timestamp_common::make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp_buffer) const { THD *thd= current_thd; uint binlen= my_timestamp_binary_length(item->decimals); @@ -1301,7 +1302,7 @@ Type_handler::make_packed_sort_key_longlong(uchar *to, bool maybe_null, void Type_handler_decimal_result::make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp_buffer) const { my_decimal dec_buf, *dec_val= item->val_decimal_result(&dec_buf); if (item->maybe_null) @@ -1321,7 +1322,7 @@ Type_handler_decimal_result::make_sort_key_part(uchar *to, Item *item, void Type_handler_real_result::make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp_buffer) const { double value= item->val_result(); if (item->maybe_null) @@ -2566,7 +2567,7 @@ void Sort_param::try_to_pack_sortkeys() uint Type_handler_string_result::make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp) const { CHARSET_INFO *cs= item->collation.collation; bool maybe_null= item->maybe_null; @@ -2574,7 +2575,7 @@ Type_handler_string_result::make_packed_sort_key_part(uchar *to, Item *item, if (maybe_null) *to++= 1; - Binary_string *res= item->str_result(¶m->tmp_buffer); + Binary_string *res= item->str_result(tmp); if (!res) { if (maybe_null) @@ -2605,7 +2606,7 @@ Type_handler_string_result::make_packed_sort_key_part(uchar *to, Item *item, uint Type_handler_int_result::make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp) const { longlong value= item->val_int_result(); return make_packed_sort_key_longlong(to, item->maybe_null, @@ -2617,7 +2618,7 @@ Type_handler_int_result::make_packed_sort_key_part(uchar *to, Item *item, uint Type_handler_decimal_result::make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp) const { my_decimal dec_buf, *dec_val= item->val_decimal_result(&dec_buf); if (item->maybe_null) @@ -2639,7 +2640,7 @@ Type_handler_decimal_result::make_packed_sort_key_part(uchar *to, Item *item, uint Type_handler_real_result::make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp) const { double value= item->val_result(); if (item->maybe_null) @@ -2660,7 +2661,7 @@ Type_handler_real_result::make_packed_sort_key_part(uchar *to, Item *item, uint Type_handler_temporal_result::make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp) const { MYSQL_TIME buf; // This is a temporal type. No nanoseconds. Rounding mode is not important. @@ -2682,7 +2683,7 @@ Type_handler_temporal_result::make_packed_sort_key_part(uchar *to, Item *item, uint Type_handler_timestamp_common::make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const + String *tmp) const { THD *thd= current_thd; uint binlen= my_timestamp_binary_length(item->decimals); @@ -3026,7 +3027,8 @@ static uint make_sortkey(Sort_param *param, uchar *to) { // Item sort_field->item->type_handler()->make_sort_key_part(to, sort_field->item, - sort_field, param); + sort_field, + ¶m->tmp_buffer); if ((maybe_null= sort_field->item->maybe_null)) to++; } @@ -3079,7 +3081,7 @@ static uint make_packed_sortkey(Sort_param *param, uchar *to) Item *item= sort_field->item; length= item->type_handler()->make_packed_sort_key_part(to, item, sort_field, - param); + ¶m->tmp_buffer); if ((maybe_null= sort_field->item->maybe_null)) to++; } diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 1faee3216c8..b4f6982101e 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -1303,7 +1303,18 @@ public: The following code is not safe if you are using different storage engines or different index types per partition. */ - return m_file[0]->index_flags(inx, part, all_parts); + ulong part_flags= m_file[0]->index_flags(inx, part, all_parts); + + /* + The underlying storage engine might support Rowid Filtering. But + ha_partition does not forward the needed SE API calls, so the feature + will not be used. + + Note: It's the same with IndexConditionPushdown, except for its variant + of IndexConditionPushdown+BatchedKeyAccess (that one works). Because of + that, we do not clear HA_DO_INDEX_COND_PUSHDOWN here. + */ + return part_flags & ~HA_DO_RANGE_FILTER_PUSHDOWN; } /** diff --git a/sql/handler.cc b/sql/handler.cc index 7c42b3bbb6f..4afd30021ee 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -4321,7 +4321,7 @@ int handler::check_collation_compatibility() { ulong mysql_version= table->s->mysql_version; - if (mysql_version < 50124) + if (mysql_version < Charset::latest_mariadb_version_with_collation_change()) { KEY *key= table->key_info; KEY *key_end= key + table->s->keys; @@ -4335,18 +4335,7 @@ int handler::check_collation_compatibility() continue; Field *field= table->field[key_part->fieldnr - 1]; uint cs_number= field->charset()->number; - if ((mysql_version < 50048 && - (cs_number == 11 || /* ascii_general_ci - bug #29499, bug #27562 */ - cs_number == 41 || /* latin7_general_ci - bug #29461 */ - cs_number == 42 || /* latin7_general_cs - bug #29461 */ - cs_number == 20 || /* latin7_estonian_cs - bug #29461 */ - cs_number == 21 || /* latin2_hungarian_ci - bug #29461 */ - cs_number == 22 || /* koi8u_general_ci - bug #29461 */ - cs_number == 23 || /* cp1251_ukrainian_ci - bug #29461 */ - cs_number == 26)) || /* cp1250_general_ci - bug #29461 */ - (mysql_version < 50124 && - (cs_number == 33 || /* utf8mb3_general_ci - bug #27877 */ - cs_number == 35))) /* ucs2_general_ci - bug #27877 */ + if (Charset::collation_changed_order(mysql_version, cs_number)) return HA_ADMIN_NEEDS_UPGRADE; } } @@ -7644,11 +7633,13 @@ static int del_global_index_stats_for_table(THD *thd, uchar* cache_key, size_t cache_key_length) { int res = 0; + uint to_delete_counter= 0; + INDEX_STATS *index_stats_to_delete[MAX_INDEXES]; DBUG_ENTER("del_global_index_stats_for_table"); mysql_mutex_lock(&LOCK_global_index_stats); - for (uint i= 0; i < global_index_stats.records;) + for (uint i= 0; i < global_index_stats.records; i++) { INDEX_STATS *index_stats = (INDEX_STATS*) my_hash_element(&global_index_stats, i); @@ -7658,19 +7649,13 @@ int del_global_index_stats_for_table(THD *thd, uchar* cache_key, size_t cache_ke index_stats->index_name_length >= cache_key_length && !memcmp(index_stats->index, cache_key, cache_key_length)) { - res= my_hash_delete(&global_index_stats, (uchar*)index_stats); - /* - In our HASH implementation on deletion one elements - is moved into a place where a deleted element was, - and the last element is moved into the empty space. - Thus we need to re-examine the current element, but - we don't have to restart the search from the beginning. - */ + index_stats_to_delete[to_delete_counter++]= index_stats; } - else - i++; } + for (uint i= 0; i < to_delete_counter; i++) + res= my_hash_delete(&global_index_stats, (uchar*)index_stats_to_delete[i]); + mysql_mutex_unlock(&LOCK_global_index_stats); DBUG_RETURN(res); } diff --git a/sql/item.cc b/sql/item.cc index 19c0b341982..85bb1f42632 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -5717,7 +5717,8 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) max_arg_level for the function if it's needed. */ if (thd->lex->in_sum_func && - thd->lex == context->select_lex->parent_lex && + last_checked_context->select_lex->parent_lex == + context->select_lex->parent_lex && thd->lex->in_sum_func->nest_level >= select->nest_level) { Item::Type ref_type= (*reference)->type(); @@ -5743,7 +5744,8 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) (Item_ident*) (*reference) : 0), false); if (thd->lex->in_sum_func && - thd->lex == context->select_lex->parent_lex && + last_checked_context->select_lex->parent_lex == + context->select_lex->parent_lex && thd->lex->in_sum_func->nest_level >= select->nest_level) { set_if_bigger(thd->lex->in_sum_func->max_arg_level, @@ -6081,7 +6083,6 @@ bool Item_field::fix_fields(THD *thd, Item **reference) if (!thd->lex->current_select->no_wrap_view_item && thd->lex->in_sum_func && - thd->lex == select->parent_lex && thd->lex->in_sum_func->nest_level == select->nest_level) set_if_bigger(thd->lex->in_sum_func->max_arg_level, @@ -8116,7 +8117,8 @@ bool Item_ref::fix_fields(THD *thd, Item **reference) max_arg_level for the function if it's needed. */ if (thd->lex->in_sum_func && - thd->lex == context->select_lex->parent_lex && + last_checked_context->select_lex->parent_lex == + context->select_lex->parent_lex && thd->lex->in_sum_func->nest_level >= last_checked_context->select_lex->nest_level) set_if_bigger(thd->lex->in_sum_func->max_arg_level, @@ -8140,7 +8142,8 @@ bool Item_ref::fix_fields(THD *thd, Item **reference) max_arg_level for the function if it's needed. */ if (thd->lex->in_sum_func && - thd->lex == context->select_lex->parent_lex && + last_checked_context->select_lex->parent_lex == + context->select_lex->parent_lex && thd->lex->in_sum_func->nest_level >= last_checked_context->select_lex->nest_level) set_if_bigger(thd->lex->in_sum_func->max_arg_level, @@ -8155,7 +8158,8 @@ bool Item_ref::fix_fields(THD *thd, Item **reference) 1. outer reference (will be fixed later by the fix_inner_refs function); 2. an unnamed reference inside an aggregate function. */ - if (!((*ref)->type() == REF_ITEM && + if (!set_properties_only && + !((*ref)->type() == REF_ITEM && ((Item_ref *)(*ref))->ref_type() == OUTER_REF) && (((*ref)->with_sum_func() && name.str && !(current_sel->get_linkage() != GLOBAL_OPTIONS_TYPE && diff --git a/sql/item_jsonfunc.h b/sql/item_jsonfunc.h index 9472e184124..4aaf66ec695 100644 --- a/sql/item_jsonfunc.h +++ b/sql/item_jsonfunc.h @@ -376,6 +376,11 @@ class Item_func_json_length: public Item_long_func { bool check_arguments() const { + if (arg_count == 0 || arg_count > 2) + { + my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), func_name()); + return true; + } return args[0]->check_type_can_return_text(func_name()) || (arg_count > 1 && args[1]->check_type_general_purpose_string(func_name())); diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 000d198eb0c..0bea808077e 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -3711,6 +3711,7 @@ String *Item_func_weight_string::val_str(String *str) flags); DBUG_ASSERT(frm_length <= tmp_length); + str->set_charset(&my_charset_bin); str->length(frm_length); null_value= 0; return str; @@ -3790,6 +3791,7 @@ String *Item_func_unhex::val_str(String *str) from= res->ptr(); null_value= 0; + str->set_charset(&my_charset_bin); str->length(length); to= (char*) str->ptr(); if (res->length() % 2) diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 9baf945644e..46942c0c785 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -409,7 +409,8 @@ bool Item_sum::register_sum_func(THD *thd, Item **ref) sl= sl->master_unit()->outer_select() ) sl->master_unit()->item->get_with_sum_func_cache()->set_with_sum_func(); } - thd->lex->current_select->mark_as_dependent(thd, aggr_sel, NULL); + if (aggr_sel) + thd->lex->current_select->mark_as_dependent(thd, aggr_sel, NULL); if ((thd->lex->describe & DESCRIBE_EXTENDED) && aggr_sel) { diff --git a/sql/mysqld.h b/sql/mysqld.h index d31f0159eb6..fe34a438405 100644 --- a/sql/mysqld.h +++ b/sql/mysqld.h @@ -885,6 +885,8 @@ enum enum_query_type // it evaluates to. Should be used for error messages, so that they // don't reveal values. QT_NO_DATA_EXPANSION= (1 << 9), + // Remove wrappers added for TVC when creating or showing view + QT_NO_WRAPPERS_FOR_TVC_IN_VIEW= (1 << 12), }; diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc index 6332e02453d..3df6c2d9f66 100644 --- a/sql/rpl_parallel.cc +++ b/sql/rpl_parallel.cc @@ -261,6 +261,12 @@ finish_event_group(rpl_parallel_thread *rpt, uint64 sub_id, STRING_WITH_LEN("now WAIT_FOR proceed_by_1000")); } }); + DBUG_EXECUTE_IF("hold_worker2_favor_worker3", { + if (rgi->current_gtid.seq_no == 2001) { + DBUG_ASSERT(!rgi->worker_error || entry->stop_on_error_sub_id == sub_id); + debug_sync_set_action(thd, STRING_WITH_LEN("now SIGNAL cont_worker3")); + } + }); #endif if (rgi->killed_for_retry == rpl_group_info::RETRY_KILL_PENDING) @@ -284,6 +290,11 @@ signal_error_to_sql_driver_thread(THD *thd, rpl_group_info *rgi, int err) In case we get an error during commit, inform following transactions that we aborted our commit. */ + DBUG_EXECUTE_IF("hold_worker2_favor_worker3", { + if (rgi->current_gtid.seq_no == 2002) { + debug_sync_set_action(thd, STRING_WITH_LEN("now WAIT_FOR cont_worker2")); + }}); + rgi->unmark_start_commit(); rgi->cleanup_context(thd, true); rgi->rli->abort_slave= true; @@ -790,7 +801,14 @@ do_retry: thd->reset_killed(); thd->clear_error(); rgi->killed_for_retry = rpl_group_info::RETRY_KILL_NONE; - +#ifdef ENABLED_DEBUG_SYNC + DBUG_EXECUTE_IF("hold_worker2_favor_worker3", { + if (rgi->current_gtid.seq_no == 2003) { + debug_sync_set_action(thd, + STRING_WITH_LEN("now WAIT_FOR cont_worker3")); + } + }); +#endif /* If we retry due to a deadlock kill that occurred during the commit step, we might have already updated (but not committed) an update of table @@ -808,15 +826,12 @@ do_retry: for (;;) { mysql_mutex_lock(&entry->LOCK_parallel_entry); - if (entry->stop_on_error_sub_id == (uint64) ULONGLONG_MAX || + register_wait_for_prior_event_group_commit(rgi, entry); + if (!(entry->stop_on_error_sub_id == (uint64) ULONGLONG_MAX || #ifndef DBUG_OFF - (DBUG_EVALUATE_IF("simulate_mdev_12746", 1, 0)) || + (DBUG_EVALUATE_IF("simulate_mdev_12746", 1, 0)) || #endif - rgi->gtid_sub_id < entry->stop_on_error_sub_id) - { - register_wait_for_prior_event_group_commit(rgi, entry); - } - else + rgi->gtid_sub_id < entry->stop_on_error_sub_id)) { /* A failure of a preceding "parent" transaction may not be @@ -1993,6 +2008,9 @@ rpl_parallel_thread::get_gco(uint64 wait_count, group_commit_orderer *prev, gco->prior_sub_id= prior_sub_id; gco->installed= false; gco->flags= 0; +#ifndef DBUG_OFF + gco->gc_done= false; +#endif return gco; } @@ -2000,6 +2018,10 @@ rpl_parallel_thread::get_gco(uint64 wait_count, group_commit_orderer *prev, void rpl_parallel_thread::loc_free_gco(group_commit_orderer *gco) { +#ifndef DBUG_OFF + DBUG_ASSERT(!gco->gc_done); + gco->gc_done= true; +#endif if (!loc_gco_list) loc_gco_last_ptr_ptr= &gco->next_gco; else diff --git a/sql/rpl_parallel.h b/sql/rpl_parallel.h index b88e77d5427..9da0c70d12e 100644 --- a/sql/rpl_parallel.h +++ b/sql/rpl_parallel.h @@ -90,6 +90,9 @@ struct group_commit_orderer { FORCE_SWITCH= 2 }; uint8 flags; +#ifndef DBUG_OFF + bool gc_done; +#endif }; diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc index dbb54c7d0f0..de5fcb83a7d 100644 --- a/sql/rpl_rli.cc +++ b/sql/rpl_rli.cc @@ -2425,8 +2425,13 @@ mark_start_commit_inner(rpl_parallel_entry *e, group_commit_orderer *gco, uint64 count= ++e->count_committing_event_groups; /* Signal any following GCO whose wait_count has been reached now. */ tmp= gco; + + DBUG_ASSERT(!tmp->gc_done); + while ((tmp= tmp->next_gco)) { + DBUG_ASSERT(!tmp->gc_done); + uint64 wait_count= tmp->wait_count; if (wait_count > count) break; diff --git a/sql/semisync_master.cc b/sql/semisync_master.cc index b57fc199826..17adeed86e7 100644 --- a/sql/semisync_master.cc +++ b/sql/semisync_master.cc @@ -317,8 +317,8 @@ void Active_tranx::clear_active_tranx_nodes(const char *log_file_name, /******************************************************************************* * - * <Repl_semi_sync_master> class: the basic code layer for syncsync master. - * <Repl_semi_sync_slave> class: the basic code layer for syncsync slave. + * <Repl_semi_sync_master> class: the basic code layer for semisync master. + * <Repl_semi_sync_slave> class: the basic code layer for semisync slave. * * The most important functions during semi-syn replication listed: * @@ -809,8 +809,6 @@ void Repl_semi_sync_master::dump_end(THD* thd) remove_slave(); ack_receiver.remove_slave(thd); - - return; } int Repl_semi_sync_master::commit_trx(const char* trx_wait_binlog_name, diff --git a/sql/signal_handler.cc b/sql/signal_handler.cc index 44e31125d49..b739634d8e4 100644 --- a/sql/signal_handler.cc +++ b/sql/signal_handler.cc @@ -27,6 +27,7 @@ #ifdef __WIN__ #include <crtdbg.h> +#include <direct.h> #define SIGNAL_FMT "exception 0x%x" #else #define SIGNAL_FMT "signal %d" @@ -66,30 +67,30 @@ static inline void output_core_info() (int) len, buff); } #ifdef __FreeBSD__ - if ((fd= my_open("/proc/curproc/rlimit", O_RDONLY, MYF(0))) >= 0) + if ((fd= open("/proc/curproc/rlimit", O_RDONLY, MYF(0))) >= 0) #else - if ((fd= my_open("/proc/self/limits", O_RDONLY, MYF(0))) >= 0) + if ((fd= open("/proc/self/limits", O_RDONLY, MYF(0))) >= 0) #endif { my_safe_printf_stderr("Resource Limits:\n"); - while ((len= my_read(fd, (uchar*)buff, sizeof(buff), MYF(0))) > 0) + while ((len= read(fd, (uchar*)buff, sizeof(buff))) > 0) { my_write_stderr(buff, len); } - my_close(fd, MYF(0)); + close(fd); } #ifdef __linux__ - if ((fd= my_open("/proc/sys/kernel/core_pattern", O_RDONLY, MYF(0))) >= 0) + if ((fd= open("/proc/sys/kernel/core_pattern", O_RDONLY, MYF(0))) >= 0) { - len= my_read(fd, (uchar*)buff, sizeof(buff), MYF(0)); + len= read(fd, (uchar*)buff, sizeof(buff)); my_safe_printf_stderr("Core pattern: %.*s\n", (int) len, buff); - my_close(fd, MYF(0)); + close(fd); } - if ((fd= my_open("/proc/version", O_RDONLY, MYF(0))) >= 0) + if ((fd= open("/proc/version", O_RDONLY)) >= 0) { - len= my_read(fd, (uchar*)buff, sizeof(buff), MYF(0)); + len= read(fd, (uchar*)buff, sizeof(buff)); my_safe_printf_stderr("Kernel version: %.*s\n", (int) len, buff); - my_close(fd, MYF(0)); + close(fd); } #endif #elif defined(__APPLE__) || defined(__FreeBSD__) @@ -103,11 +104,14 @@ static inline void output_core_info() { my_safe_printf_stderr("Kernel version: %.*s\n", (int) len, buff); } -#else +#elif defined(HAVE_GETCWD) char buff[80]; - my_getwd(buff, sizeof(buff), 0); - my_safe_printf_stderr("Writing a core file at %s\n", buff); - fflush(stderr); + + if (getcwd(buff, sizeof(buff))) + { + my_safe_printf_stderr("Writing a core file at %.*s\n", (int) sizeof(buff), buff); + fflush(stderr); + } #endif } diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index ce9f13ca01d..3cfb68b69b4 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -2565,6 +2565,8 @@ static bool acl_load(THD *thd, const Grant_tables& tables) "possible to remove this privilege using REVOKE.", host.host.hostname, host.db); } + else if (!host.db) + host.db= const_cast<char*>(host_not_specified.str); host.access= host_table.get_access(); host.access= fix_rights_for_db(host.access); host.sort= get_magic_sort("hd", host.host.hostname, host.db); @@ -2573,8 +2575,7 @@ static bool acl_load(THD *thd, const Grant_tables& tables) { sql_print_warning("'host' entry '%s|%s' " "ignored in --skip-name-resolve mode.", - safe_str(host.host.hostname), - safe_str(host.db)); + host.host.hostname, host.db); continue; } #ifndef TO_BE_REMOVED @@ -3666,7 +3667,7 @@ privilege_t acl_get(const char *host, const char *ip, ACL_HOST *acl_host=dynamic_element(&acl_hosts,i,ACL_HOST*); if (compare_hostname(&acl_host->host,host,ip)) { - if (!acl_host->db || !wild_compare(db,acl_host->db,db_is_pattern)) + if (!wild_compare(db, acl_host->db, db_is_pattern)) { host_access=acl_host->access; // Fully specified. Take it break; @@ -6691,6 +6692,7 @@ static int update_role_columns(GRANT_TABLE *merged, } } +restart: for (uint i=0 ; i < mh->records ; i++) { GRANT_COLUMN *col = (GRANT_COLUMN *)my_hash_element(mh, i); @@ -6699,6 +6701,7 @@ static int update_role_columns(GRANT_TABLE *merged, { changed= 1; my_hash_delete(mh, (uchar*)col); + goto restart; } } DBUG_ASSERT(rights == merged->cols); diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 6ccc9be9901..3364e8ad639 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -5960,7 +5960,10 @@ find_field_in_table(THD *thd, TABLE *table, const char *name, size_t length, if (cached_field_index < table->s->fields && !my_strcasecmp(system_charset_info, table->field[cached_field_index]->field_name.str, name)) + { field= table->field[cached_field_index]; + DEBUG_SYNC(thd, "table_field_cached"); + } else { LEX_CSTRING fname= {name, length}; @@ -6411,6 +6414,13 @@ find_field_in_tables(THD *thd, Item_ident *item, if (last_table) last_table= last_table->next_name_resolution_table; + uint fake_index_for_duplicate_search= NO_CACHED_FIELD_INDEX; + /* + For the field search it will point to field cache, but for duplicate + search it will point to fake_index_for_duplicate_search (no cache + present). + */ + uint *current_cache= &(item->cached_field_index); for (; cur_table != last_table ; cur_table= cur_table->next_name_resolution_table) { @@ -6420,7 +6430,7 @@ find_field_in_tables(THD *thd, Item_ident *item, SQLCOM_SHOW_FIELDS) ? false : check_privileges, allow_rowid, - &(item->cached_field_index), + current_cache, register_tree_change, &actual_table); if (cur_field) @@ -6435,7 +6445,7 @@ find_field_in_tables(THD *thd, Item_ident *item, item->name.str, db, table_name, ref, false, allow_rowid, - &(item->cached_field_index), + current_cache, register_tree_change, &actual_table); if (cur_field) @@ -6452,8 +6462,19 @@ find_field_in_tables(THD *thd, Item_ident *item, Store the original table of the field, which may be different from cur_table in the case of NATURAL/USING join. */ - item->cached_table= (!actual_table->cacheable_table || found) ? - 0 : actual_table; + if (actual_table->cacheable_table /*(1)*/ && !found /*(2)*/) + { + /* + We have just found a field allowed to cache (1) and + it is not dublicate search (2). + */ + item->cached_table= actual_table; + } + else + { + item->cached_table= NULL; + item->cached_field_index= NO_CACHED_FIELD_INDEX; + } DBUG_ASSERT(thd->where); /* @@ -6472,6 +6493,7 @@ find_field_in_tables(THD *thd, Item_ident *item, return (Field*) 0; } found= cur_field; + current_cache= &fake_index_for_duplicate_search; } } @@ -7926,9 +7948,8 @@ bool setup_tables(THD *thd, Name_resolution_context *context, table_list; table_list= table_list->next_local) { - if (table_list->merge_underlying_list) + if (table_list->is_merged_derived() && table_list->merge_underlying_list) { - DBUG_ASSERT(table_list->is_merged_derived()); Query_arena *arena, backup; arena= thd->activate_stmt_arena_if_needed(&backup); bool res; diff --git a/sql/sql_class.h b/sql/sql_class.h index a5a5f3df44d..68a69762354 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1841,7 +1841,7 @@ show_system_thread(enum_thread_type thread) RETURN_NAME_AS_STRING(SYSTEM_THREAD_SLAVE_BACKGROUND); RETURN_NAME_AS_STRING(SYSTEM_THREAD_SEMISYNC_MASTER_BACKGROUND); default: - sprintf(buf, "<UNKNOWN SYSTEM THREAD: %d>", thread); + snprintf(buf, sizeof(buf), "<UNKNOWN SYSTEM THREAD: %d>", thread); return buf; } #undef RETURN_NAME_AS_STRING @@ -7429,7 +7429,7 @@ public: if (unlikely(!(dst->str= tmp= (char*) alloc_root(mem_root, dst->length + 1)))) return true; - sprintf(tmp, "%.*s%.*s%.*s", + snprintf(tmp, dst->length + 1, "%.*s%.*s%.*s", (int) m_db.length, (m_db.length ? m_db.str : ""), dot, ".", (int) m_name.length, m_name.str); diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index e628ce60d2d..81c3141c252 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -1070,6 +1070,8 @@ int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds, DBUG_RETURN(TRUE); select_lex->fix_prepare_information(thd, conds, &fake_conds); + if (!thd->lex->upd_del_where) + thd->lex->upd_del_where= *conds; DBUG_RETURN(FALSE); } diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index fa060afde8d..30a464d06e9 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -351,24 +351,6 @@ bool mysql_derived_merge(THD *thd, LEX *lex, TABLE_LIST *derived) DBUG_RETURN(FALSE); } - if (dt_select->uncacheable & UNCACHEABLE_RAND) - { - /* There is random function => fall back to materialization. */ - cause= "Random function in the select"; - if (unlikely(thd->trace_started())) - { - OPT_TRACE_VIEWS_TRANSFORM(thd, trace_wrapper, trace_derived, - derived->is_derived() ? "derived" : "view", - derived->alias.str ? derived->alias.str : "<NULL>", - derived->get_unit()->first_select()->select_number, - "materialized"); - trace_derived.add("cause", cause); - } - derived->change_refs_to_fields(); - derived->set_materialized_derived(); - DBUG_RETURN(FALSE); - } - if (derived->dt_handler) { derived->change_refs_to_fields(); diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc index 1b59dce10b9..e8e8a55540b 100644 --- a/sql/sql_explain.cc +++ b/sql/sql_explain.cc @@ -161,7 +161,7 @@ void Explain_query::query_plan_ready() Send EXPLAIN output to the client. */ -int Explain_query::send_explain(THD *thd) +int Explain_query::send_explain(THD *thd, bool extended) { select_result *result; LEX *lex= thd->lex; @@ -174,8 +174,22 @@ int Explain_query::send_explain(THD *thd) if (thd->lex->explain_json) print_explain_json(result, thd->lex->analyze_stmt); else + { res= print_explain(result, lex->describe, thd->lex->analyze_stmt); - + if (extended) + { + char buff[1024]; + String str(buff,(uint32) sizeof(buff), system_charset_info); + str.length(0); + /* + The warnings system requires input in utf8, @see + mysqld_show_warnings(). + */ + lex->unit.print(&str, QT_EXPLAIN_EXTENDED); + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, + ER_YES, str.c_ptr_safe()); + } + } if (res) result->abort_result_set(); else @@ -185,6 +199,7 @@ int Explain_query::send_explain(THD *thd) } + /* The main entry point to print EXPLAIN of the entire query */ diff --git a/sql/sql_explain.h b/sql/sql_explain.h index df0a165860d..780945acbdc 100644 --- a/sql/sql_explain.h +++ b/sql/sql_explain.h @@ -474,7 +474,7 @@ public: bool is_analyze); /* Send tabular EXPLAIN to the client */ - int send_explain(THD *thd); + int send_explain(THD *thd, bool extended); /* Return tabular EXPLAIN output as a text string */ bool print_explain_str(THD *thd, String *out_str, bool is_analyze); diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 132934773f9..c17c93c5a9c 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -853,7 +853,8 @@ bool mysql_insert(THD *thd, TABLE_LIST *table_list, save_insert_query_plan(thd, table_list); if (thd->lex->describe) { - retval= thd->lex->explain->send_explain(thd); + bool extended= thd->lex->describe & DESCRIBE_EXTENDED; + retval= thd->lex->explain->send_explain(thd, extended); goto abort; } diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index fad9aabdc90..91837ffb5c3 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -1314,6 +1314,8 @@ void LEX::start(THD *thd_arg) frame_bottom_bound= NULL; win_spec= NULL; + upd_del_where= NULL; + vers_conditions.empty(); period_conditions.empty(); @@ -3024,6 +3026,7 @@ void st_select_lex::init_select() curr_tvc_name= 0; in_tvc= false; versioned_tables= 0; + is_tvc_wrapper= false; nest_flags= 0; } @@ -3922,40 +3925,45 @@ LEX::LEX() } +bool LEX::can_be_merged() +{ + return unit.can_be_merged(); +} + + /* - Check whether the merging algorithm can be used on this VIEW + Check whether the merging algorithm can be used for this unit SYNOPSIS - LEX::can_be_merged() + st_select_lex_unit::can_be_merged() DESCRIPTION - We can apply merge algorithm if it is single SELECT view with - subqueries only in WHERE clause (we do not count SELECTs of underlying - views, and second level subqueries) and we have not grpouping, ordering, - HAVING clause, aggregate functions, DISTINCT clause, LIMIT clause and - several underlying tables. + We can apply merge algorithm for a unit if it is single SELECT with + subqueries only in WHERE clauses or in ON conditions or in select list + (we do not count SELECTs of underlying views/derived tables/CTEs and + second level subqueries) and we have no grouping, ordering, HAVING + clause, aggregate functions, DISTINCT clause, LIMIT clause. RETURN FALSE - only temporary table algorithm can be used TRUE - merge algorithm can be used */ -bool LEX::can_be_merged() +bool st_select_lex_unit::can_be_merged() { // TODO: do not forget implement case when select_lex.table_list.elements==0 /* find non VIEW subqueries/unions */ - bool selects_allow_merge= (first_select_lex()->next_select() == 0 && - !(first_select_lex()->uncacheable & + bool selects_allow_merge= (first_select()->next_select() == 0 && + !(first_select()->uncacheable & UNCACHEABLE_RAND)); if (selects_allow_merge) { - for (SELECT_LEX_UNIT *tmp_unit= first_select_lex()->first_inner_unit(); + for (SELECT_LEX_UNIT *tmp_unit= first_select()->first_inner_unit(); tmp_unit; tmp_unit= tmp_unit->next_unit()) { - if (tmp_unit->first_select()->parent_lex == this && - (tmp_unit->item != 0 && + if ((tmp_unit->item != 0 && (tmp_unit->item->place() != IN_WHERE && tmp_unit->item->place() != IN_ON && tmp_unit->item->place() != SELECT_LIST))) @@ -3967,12 +3975,12 @@ bool LEX::can_be_merged() } return (selects_allow_merge && - first_select_lex()->group_list.elements == 0 && - first_select_lex()->having == 0 && - first_select_lex()->with_sum_func == 0 && - first_select_lex()->table_list.elements >= 1 && - !(first_select_lex()->options & SELECT_DISTINCT) && - first_select_lex()->select_limit == 0); + first_select()->group_list.elements == 0 && + first_select()->having == 0 && + first_select()->with_sum_func == 0 && + first_select()->table_list.elements >= 1 && + !(first_select()->options & SELECT_DISTINCT) && + first_select()->select_limit == 0); } diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 5d3aed56740..f7db8c6942c 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -1034,6 +1034,8 @@ public: bool set_lock_to_the_last_select(Lex_select_lock l); + bool can_be_merged(); + friend class st_select_lex; }; @@ -1163,7 +1165,8 @@ public: st_select_lex. */ uint curr_tvc_name; - + /* true <=> select has been created a TVC wrapper */ + bool is_tvc_wrapper; /* Needed to correctly generate 'PRIMARY' or 'SIMPLE' for select_type column of EXPLAIN @@ -1443,6 +1446,10 @@ public: } bool setup_ref_array(THD *thd, uint order_group_num); void print(THD *thd, String *str, enum_query_type query_type); + void print_item_list(THD *thd, String *str, enum_query_type query_type); + void print_set_clause(THD *thd, String *str, enum_query_type query_type); + void print_on_duplicate_key_clause(THD *thd, String *str, + enum_query_type query_type); static void print_order(String *str, ORDER *order, enum_query_type query_type); @@ -3541,6 +3548,8 @@ public: Window_frame_bound *frame_bottom_bound; Window_spec *win_spec; + Item *upd_del_where; + /* System Versioning */ vers_select_conds_t vers_conditions; vers_select_conds_t period_conditions; diff --git a/sql/sql_locale.cc b/sql/sql_locale.cc index dd19807dd6d..60e7abc3fa2 100644 --- a/sql/sql_locale.cc +++ b/sql/sql_locale.cc @@ -29,7 +29,7 @@ enum err_msgs_index { - en_US= 0, cs_CZ, da_DK, nl_NL, et_EE, fr_FR, de_DE, el_GR, hu_HU, it_IT, + en_US= 0, zh_CN, cs_CZ, da_DK, nl_NL, et_EE, fr_FR, de_DE, el_GR, hu_HU, it_IT, ja_JP, ko_KR, no_NO, nn_NO, pl_PL, pt_PT, ro_RO, ru_RU, sr_RS, sk_SK, es_ES, sv_SE, uk_UA, hi_IN } ERR_MSGS_INDEX; @@ -38,6 +38,7 @@ enum err_msgs_index MY_LOCALE_ERRMSGS global_errmsgs[]= { {"english", NULL}, + {"chinese", NULL}, {"czech", NULL}, {"danish", NULL}, {"dutch", NULL}, @@ -2095,7 +2096,7 @@ MY_LOCALE my_locale_zh_CN '.', /* decimal point zh_CN */ ',', /* thousands_sep zh_CN */ "\x03", /* grouping zh_CN */ - &global_errmsgs[en_US] + &global_errmsgs[zh_CN] ); /***** LOCALE END zh_CN *****/ diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 7176eebd3f3..62d33d2a007 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -4635,7 +4635,10 @@ mysql_execute_command(THD *thd) thd->protocol= save_protocol; } if (!res && thd->lex->analyze_stmt) - res= thd->lex->explain->send_explain(thd); + { + bool extended= thd->lex->describe & DESCRIBE_EXTENDED; + res= thd->lex->explain->send_explain(thd, extended); + } delete sel_result; MYSQL_INSERT_DONE(res, (ulong) thd->get_row_count_func()); /* @@ -4815,7 +4818,10 @@ mysql_execute_command(THD *thd) thd->protocol= save_protocol; } if (!res && (explain || lex->analyze_stmt)) - res= thd->lex->explain->send_explain(thd); + { + bool extended= thd->lex->describe & DESCRIBE_EXTENDED; + res= thd->lex->explain->send_explain(thd, extended); + } /* revert changes for SP */ MYSQL_INSERT_SELECT_DONE(res, (ulong) thd->get_row_count_func()); @@ -4882,7 +4888,10 @@ mysql_execute_command(THD *thd) if (thd->lex->analyze_stmt || thd->lex->describe) { if (!res) - res= thd->lex->explain->send_explain(thd); + { + bool extended= thd->lex->describe & DESCRIBE_EXTENDED; + res= thd->lex->explain->send_explain(thd, extended); + } } delete sel_result; @@ -4943,7 +4952,10 @@ mysql_execute_command(THD *thd) else { if (lex->describe || lex->analyze_stmt) - res= thd->lex->explain->send_explain(thd); + { + bool extended= thd->lex->describe & DESCRIBE_EXTENDED; + res= thd->lex->explain->send_explain(thd, extended); + } } multi_delete_error: delete result; @@ -6328,7 +6340,10 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables) thd->protocol= save_protocol; } if (!res) - res= thd->lex->explain->send_explain(thd); + { + bool extended= thd->lex->describe & DESCRIBE_EXTENDED; + res= thd->lex->explain->send_explain(thd, extended); + } } } } @@ -9363,7 +9378,9 @@ static my_bool kill_threads_callback(THD *thd, kill_threads_callback_arg *arg) if (!(arg->thd->security_ctx->master_access & PRIV_KILL_OTHER_USER_PROCESS) && !arg->thd->security_ctx->user_matches(thd->security_ctx)) - return 1; + { + return MY_TEST(arg->thd->security_ctx->master_access & PROCESS_ACL); + } if (!arg->threads_to_kill.push_back(thd, arg->thd->mem_root)) { mysql_mutex_lock(&thd->LOCK_thd_kill); // Lock from delete @@ -9485,7 +9502,10 @@ sql_kill_user(THD *thd, LEX_USER *user, killed_state state) my_ok(thd, rows); break; case ER_KILL_DENIED_ERROR: - my_error(error, MYF(0), (long long) thd->thread_id); + char buf[DEFINER_LENGTH+1]; + strxnmov(buf, sizeof(buf), user->user.str, "@", user->host.str, NULL); + my_printf_error(ER_KILL_DENIED_ERROR, ER_THD(thd, ER_CANNOT_USER), MYF(0), + "KILL USER", buf); break; case ER_OUT_OF_RESOURCES: default: diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index decddba6c4f..36dc0853908 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -432,7 +432,7 @@ static int send_file(THD *thd) /** Internal to mysql_binlog_send() routine that recalculates checksum for - 1. FD event (asserted) that needs additional arranment prior sending to slave. + 1. FD event (asserted) that needs additional arrangement prior sending to slave. 2. Start_encryption_log_event whose Ignored flag is set TODO DBUG_ASSERT can be removed if this function is used for more general cases */ diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 8441ec685dc..774898d8c26 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -244,10 +244,12 @@ static bool find_field_in_item_list (Field *field, void *data); static bool find_field_in_order_list (Field *field, void *data); int create_sort_index(THD *thd, JOIN *join, JOIN_TAB *tab, Filesort *fsort); static int remove_dup_with_compare(THD *thd, TABLE *entry, Field **field, - Item *having); + SORT_FIELD *sortorder, ulong keylength, + Item *having); static int remove_dup_with_hash_index(THD *thd,TABLE *table, - uint field_count, Field **first_field, - ulong key_length,Item *having); + uint field_count, Field **first_field, + SORT_FIELD *sortorder, + ulong key_length,Item *having); static bool cmp_buffer_with_ref(THD *thd, TABLE *table, TABLE_REF *tab_ref); static bool setup_new_fields(THD *thd, List<Item> &fields, List<Item> &all_fields, ORDER *new_order); @@ -303,6 +305,9 @@ static Item **get_sargable_cond(JOIN *join, TABLE *table); bool is_eq_cond_injected_for_split_opt(Item_func_eq *eq_item); +void print_list_item(String *str, List_item *list, + enum_query_type query_type); + static bool build_notnull_conds_for_range_scans(JOIN *join, COND *cond, table_map allowed); @@ -7659,7 +7664,6 @@ best_access_path(JOIN *join, rec= MATCHING_ROWS_IN_OTHER_TABLE; // Fix for small tables Json_writer_object trace_access_idx(thd); - double eq_ref_rows= 0.0, eq_ref_cost= 0.0; /* full text keys require special treatment */ @@ -7700,14 +7704,13 @@ best_access_path(JOIN *join, type= JT_EQ_REF; trace_access_idx.add("access_type", join_type_str[type]) .add("index", keyinfo->name); + if (!found_ref && table->opt_range_keys.is_set(key)) tmp= adjust_quick_cost(table->opt_range[key].cost, 1); else tmp= table->file->avg_io_cost(); - eq_ref_rows= prev_record_reads(join_positions, idx, + tmp*= prev_record_reads(join_positions, idx, found_ref); - tmp*= eq_ref_rows; - eq_ref_cost= tmp; records=1.0; } else @@ -8009,28 +8012,7 @@ best_access_path(JOIN *join, (table->file->index_flags(start_key->key,0,1) & HA_DO_RANGE_FILTER_PUSHDOWN)) { - double rows; - if (type == JT_EQ_REF) - { - /* - Treat EQ_REF access in a special way: - 1. We have no cost for index-only read. Assume its cost is 50% of - the cost of the full read. - - 2. A regular ref access will do #record_count lookups, but eq_ref - has "lookup cache" which reduces the number of lookups made. - The estimation code uses prev_record_reads() call to estimate: - - tmp = prev_record_reads(join_positions, idx, found_ref); - - Set the effective number of rows from "tmp" here. - */ - keyread_tmp= COST_ADD(eq_ref_cost / 2, s->startup_cost); - rows= eq_ref_rows; - } - else - rows= record_count * records; - + double rows= record_count * records; /* If we use filter F with selectivity s the the cost of fetching data by key using this filter will be @@ -8052,46 +8034,63 @@ best_access_path(JOIN *join, cost_of_fetching_1_row = tmp/rows cost_of_fetching_1_key_tuple = keyread_tmp/rows - access_cost_factor is the gain we expect for using rowid filter. - An access_cost_factor of 1.0 means that keyread_tmp is 0 - (using key read is infinitely fast) and the gain for each row when - using filter is great. - An access_cost_factor if 0.0 means that using keyread has the - same cost as reading rows, so there is no gain to get with - filter. - access_cost_factor should never be bigger than 1.0 (if all - calculations are correct) as the cost of keyread should always be - smaller than the cost of fetching the same number of keys + rows. - access_cost_factor should also never be smaller than 0.0. - The one exception is if number of records is 1 (eq_ref), then - because we are comparing rows to cost of keyread_tmp, keyread_tmp - is higher by 1.0. This is a big that will be fixed in a later - version. - - If we have limited the cost (=tmp) of reading rows with 'worst_seek' - we cannot use filters as the cost calculation below would cause - tmp to become negative. The future resultion is to not limit - cost with worst_seek. + Here's a more detailed explanation that uses the formulas behind + the function the call filter->get_adjusted_gain(). The function + takes as a parameter the number of probes/look-ups into the filter + that is equal to the number of fetched key entries that is equal to + the number of row fetches when no filter is used (assuming no + index condition pushdown is employed for the used key access). + Let this number be N. Then the total gain from using the filter is + N*a_adj - b where b is the cost of building the filter and + a_adj is calcilated as follows: + a - (1-access_cost_factor)*(1-s) = + (1+1_cond_eval_cost)*(1-s)-1_probe_cost - (1-access_cost_factor)*(1-s) + = (1-s)*(1_cond_eval_cost+access_cost_factor) - 1_probe_cost. + Here ((1-s)*(1_cond_eval_cost) * N is the gain from checking less + conditions pushed into the table, 1_probe_cost*N is the cost of the + probes and (1*s) * access_cost_factor * N must be the gain from + accessing less rows. + It does not matter how we calculate the cost of N full row fetches + cost_of_fetching_N_rows or + how we calculate the cost of fetching N key entries + cost_of_fetching_N_key_entries + the gain from less row fetches will be + (cost_of_fetching_N_rows - cost_of_fetching_N_key_entries) * (1-s) + and this should be equal to (1*s) * access_cost_factor * N. + Thus access_cost_factor must be calculated as + (cost_of_fetching_N_rows - cost_of_fetching_N_key_entries) / N. + + For safety we clip cost_of_fetching_N_key_entries by the value + of cost_of_fetching_N_row though formally it's not necessary. */ - double access_cost_factor= MY_MIN((rows - keyread_tmp) / rows, 1.0); + /* + For eq_ref access we assume that the cost of fetching N key entries + is equal to the half of fetching N rows + */ + double key_access_cost= + type == JT_EQ_REF ? 0.5 * tmp : MY_MIN(tmp, keyread_tmp); + double access_cost_factor= MY_MIN((tmp - key_access_cost) / rows, 1.0); + if (!(records < s->worst_seeks && records <= thd->variables.max_seeks_for_key)) + { + // Don't use rowid filter trace_access_idx.add("rowid_filter_skipped", "worst/max seeks clipping"); - else if (access_cost_factor <= 0.0) - trace_access_idx.add("rowid_filter_skipped", "cost_factor <= 0"); + filter= NULL; + } else { filter= table->best_range_rowid_filter_for_partial_join(start_key->key, rows, access_cost_factor); - if (filter) - { - tmp-= filter->get_adjusted_gain(rows) - filter->get_cmp_gain(rows); - DBUG_ASSERT(tmp >= 0); - trace_access_idx.add("rowid_filter_key", - table->key_info[filter->key_no].name); - } + } + if (filter) + { + tmp-= filter->get_adjusted_gain(rows) - filter->get_cmp_gain(rows); + DBUG_ASSERT(tmp >= 0); + trace_access_idx.add("rowid_filter_key", + table->key_info[filter->key_no].name); } } trace_access_idx.add("rows", records).add("cost", tmp); @@ -8240,27 +8239,23 @@ best_access_path(JOIN *join, if ( s->quick->get_type() == QUICK_SELECT_I::QS_TYPE_RANGE) { double rows= record_count * s->found_records; - double access_cost_factor= MY_MIN(tmp / rows, 1.0); uint key_no= s->quick->index; /* See the comment concerning using rowid filter for with ref access */ - keyread_tmp= s->table->opt_range[key_no].index_only_cost * - record_count; - access_cost_factor= MY_MIN((rows - keyread_tmp) / rows, 1.0); - if (access_cost_factor > 0.0) + double row_access_cost= s->quick->read_time * record_count; + double key_access_cost= + MY_MIN(row_access_cost, + s->table->opt_range[key_no].index_only_cost * record_count); + double access_cost_factor= MY_MIN((row_access_cost - key_access_cost) / + rows, 1.0); + filter= + s->table->best_range_rowid_filter_for_partial_join(key_no, rows, + access_cost_factor); + if (filter) { - filter= - s->table-> - best_range_rowid_filter_for_partial_join(key_no, rows, - access_cost_factor); - if (filter) - { - tmp-= filter->get_adjusted_gain(rows); - DBUG_ASSERT(tmp >= 0); - } + tmp-= filter->get_adjusted_gain(rows); + DBUG_ASSERT(tmp >= 0); } - else - trace_access_scan.add("rowid_filter_skipped", "cost_factor <= 0"); type= JT_RANGE; } @@ -24522,39 +24517,71 @@ JOIN_TAB::remove_duplicates() { bool error; - ulong keylength= 0; - uint field_count; + ulong keylength= 0, sort_field_keylength= 0; + uint field_count, item_count; List<Item> *fields= (this-1)->fields; + Item *item; THD *thd= join->thd; - + SORT_FIELD *sortorder, *sorder; DBUG_ENTER("remove_duplicates"); DBUG_ASSERT(join->aggr_tables > 0 && table->s->tmp_table != NO_TMP_TABLE); THD_STAGE_INFO(join->thd, stage_removing_duplicates); - //join->explain->ops_tracker.report_duplicate_removal(); - - table->reginfo.lock_type=TL_WRITE; + if (!(sortorder= (SORT_FIELD*) my_malloc(PSI_INSTRUMENT_ME, + (fields->elements+1) * + sizeof(SORT_FIELD), + MYF(MY_WME)))) + DBUG_RETURN(TRUE); /* Calculate how many saved fields there is in list */ - field_count=0; + field_count= item_count= 0; + List_iterator<Item> it(*fields); - Item *item; - while ((item=it++)) + for (sorder= sortorder ; (item=it++) ;) { - if (item->get_tmp_table_field() && ! item->const_item()) - field_count++; + if (!item->const_item()) + { + if (item->get_tmp_table_field()) + { + /* Field is stored in temporary table, skipp */ + field_count++; + } + else + { + /* Item is not stored in temporary table, remember it */ + sorder->field= 0; // Safety, not used + sorder->item= item; + /* Calculate sorder->length */ + item->type_handler()->sort_length(thd, item, sorder); + sorder++; + item_count++; + } + } } + sorder->item= 0; // End marker - if (!field_count && !(join->select_options & OPTION_FOUND_ROWS) && !having) - { // only const items with no OPTION_FOUND_ROWS + if ((field_count + item_count == 0) && ! having && + !(join->select_options & OPTION_FOUND_ROWS)) + { + // only const items with no OPTION_FOUND_ROWS join->unit->lim.set_single_row(); // Only send first row + my_free(sortorder); DBUG_RETURN(false); } + /* + The table contains first fields that will be in the output, then + temporary results pointed to by the fields list. + Example: SELECT DISTINCT sum(a), sum(d) > 2 FROM ... + In this case the temporary table contains sum(a), sum(d). + */ + Field **first_field=table->field+table->s->fields - field_count; for (Field **ptr=first_field; *ptr; ptr++) keylength+= (*ptr)->sort_length() + (*ptr)->maybe_null(); + for (SORT_FIELD *ptr= sortorder ; ptr->item ; ptr++) + sort_field_keylength+= ptr->length + (ptr->item->maybe_null ? 1 : 0); /* Disable LIMIT ROWS EXAMINED in order to avoid interrupting prematurely @@ -24565,30 +24592,80 @@ JOIN_TAB::remove_duplicates() thd->reset_killed(); table->file->info(HA_STATUS_VARIABLE); + table->reginfo.lock_type=TL_WRITE; + if (table->s->db_type() == heap_hton || (!table->s->blob_fields && ((ALIGN_SIZE(keylength) + HASH_OVERHEAD) * table->file->stats.records < thd->variables.sortbuff_size))) - error=remove_dup_with_hash_index(join->thd, table, field_count, first_field, - keylength, having); + error= remove_dup_with_hash_index(join->thd, table, field_count, + first_field, sortorder, + keylength + sort_field_keylength, having); else - error=remove_dup_with_compare(join->thd, table, first_field, having); + error=remove_dup_with_compare(join->thd, table, first_field, sortorder, + sort_field_keylength, having); if (join->select_lex != join->select_lex->master_unit()->fake_select_lex) thd->lex->set_limit_rows_examined(); free_blobs(first_field); + my_free(sortorder); DBUG_RETURN(error); } +/* + Create a sort/compare key from items + + Key is of fixed length and binary comparable +*/ + +static uchar *make_sort_key(SORT_FIELD *sortorder, uchar *key_buffer, + String *tmp_value) +{ + for (SORT_FIELD *ptr= sortorder ; ptr->item ; ptr++) + { + ptr->item->type_handler()->make_sort_key_part(key_buffer, + ptr->item, + ptr, tmp_value); + key_buffer+= (ptr->item->maybe_null ? 1 : 0) + ptr->length; + } + return key_buffer; +} + + +/* + Remove duplicates by comparing all rows with all other rows + + @param thd THD + @param table Temporary table + @param first_field Pointer to fields in temporary table that are part of + distinct, ends with null pointer + @param sortorder An array of Items part of distsinct. Terminated with an + element N with sortorder[N]->item=NULL. + @param keylength Length of key produced by sortorder + @param having Having expression (NULL if no having) +*/ + static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, + SORT_FIELD *sortorder, ulong keylength, Item *having) { handler *file=table->file; - uchar *record=table->record[0]; + uchar *record=table->record[0], *key_buffer, *key_buffer2; + char *tmp_buffer; int error; + String tmp_value; DBUG_ENTER("remove_dup_with_compare"); + if (unlikely(!my_multi_malloc(PSI_INSTRUMENT_ME, + MYF(MY_WME), + &key_buffer, keylength, + &key_buffer2, keylength, + &tmp_buffer, keylength+1, + NullS))) + DBUG_RETURN(1); + tmp_value.set(tmp_buffer, keylength, &my_charset_bin); + if (unlikely(file->ha_rnd_init_with_error(1))) DBUG_RETURN(1); @@ -24597,8 +24674,8 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, { if (unlikely(thd->check_killed())) { - error=0; - goto err; + error= 1; + goto end; } if (unlikely(error)) { @@ -24617,9 +24694,10 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, { my_message(ER_OUTOFMEMORY, ER_THD(thd,ER_OUTOFMEMORY), MYF(ME_FATAL)); - error=0; - goto err; + error= 1; + goto end; } + make_sort_key(sortorder, key_buffer, &tmp_value); store_record(table,record[1]); /* Read through rest of file and mark duplicated rows deleted */ @@ -24632,7 +24710,10 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, break; goto err; } - if (compare_record(table, first_field) == 0) + make_sort_key(sortorder, key_buffer2, &tmp_value); + if (compare_record(table, first_field) == 0 && + (!keylength || + memcmp(key_buffer, key_buffer2, keylength) == 0)) { if (unlikely((error= file->ha_delete_row(record)))) goto err; @@ -24651,38 +24732,52 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, goto err; } + error= 0; +end: + my_free(key_buffer); file->extra(HA_EXTRA_NO_CACHE); (void) file->ha_rnd_end(); - DBUG_RETURN(0); + DBUG_RETURN(error); + err: - file->extra(HA_EXTRA_NO_CACHE); - (void) file->ha_rnd_end(); - if (error) - file->print_error(error,MYF(0)); - DBUG_RETURN(1); + DBUG_ASSERT(error); + file->print_error(error,MYF(0)); + goto end; } /** - Generate a hash index for each row to quickly find duplicate rows. + Generate a hash index for each row to quickly find duplicate rows. + + @param thd THD + @param table Temporary table + @param field_count Number of fields part of distinct + @param first_field Pointer to fields in temporary table that are part of + distinct, ends with null pointer + @param sortorder An array of Items part of distsinct. Terminated with an + element N with sortorder[N]->item=NULL. + @param keylength Length of hash key + @param having Having expression (NULL if no having) - @note - Note that this will not work on tables with blobs! + @note + Note that this will not work on tables with blobs! */ static int remove_dup_with_hash_index(THD *thd, TABLE *table, uint field_count, Field **first_field, + SORT_FIELD *sortorder, ulong key_length, Item *having) { uchar *key_buffer, *key_pos, *record=table->record[0]; + char *tmp_buffer; int error; handler *file= table->file; ulong extra_length= ALIGN_SIZE(key_length)-key_length; uint *field_lengths, *field_length; HASH hash; - Field **ptr; + String tmp_value; DBUG_ENTER("remove_dup_with_hash_index"); if (!my_multi_malloc(key_memory_hash_index_key_buffer, MYF(MY_WME), @@ -24691,10 +24786,13 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table, (long) file->stats.records), &field_lengths, (uint) (field_count*sizeof(*field_lengths)), + &tmp_buffer, key_length+1, NullS)) DBUG_RETURN(1); - for (ptr= first_field, field_length=field_lengths ; *ptr ; ptr++) + tmp_value.set(tmp_buffer, key_length, &my_charset_bin); + field_length= field_lengths; + for (Field **ptr= first_field ; *ptr ; ptr++) (*field_length++)= (*ptr)->sort_length(); if (my_hash_init(key_memory_hash_index_key_buffer, &hash, &my_charset_bin, @@ -24708,7 +24806,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table, if (unlikely((error= file->ha_rnd_init(1)))) goto err; - key_pos=key_buffer; + key_pos= key_buffer; for (;;) { uchar *org_key_pos; @@ -24733,11 +24831,14 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table, /* copy fields to key buffer */ org_key_pos= key_pos; field_length=field_lengths; - for (ptr= first_field ; *ptr ; ptr++) + for (Field **ptr= first_field ; *ptr ; ptr++) { (*ptr)->make_sort_key_part(key_pos, *field_length); key_pos+= (*ptr)->maybe_null() + *field_length++; } + /* Copy result fields not stored in table to key buffer */ + key_pos= make_sort_key(sortorder, key_pos, &tmp_value); + /* Check if it exists before */ if (my_hash_search(&hash, org_key_pos, key_length)) { @@ -28156,6 +28257,162 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str, } } +enum explainable_cmd_type +{ + SELECT_CMD, INSERT_CMD, REPLACE_CMD, UPDATE_CMD, DELETE_CMD, NO_CMD +}; + +static +const char * const explainable_cmd_name []= +{ + "select ", + "insert ", + "replace ", + "update ", + "delete ", +}; + +static +char const *get_explainable_cmd_name(enum explainable_cmd_type cmd) +{ + return explainable_cmd_name[cmd]; +} + +static +enum explainable_cmd_type get_explainable_cmd_type(THD *thd) +{ + switch (thd->lex->sql_command) { + case SQLCOM_SELECT: + return SELECT_CMD; + case SQLCOM_INSERT: + case SQLCOM_INSERT_SELECT: + return INSERT_CMD; + case SQLCOM_REPLACE: + case SQLCOM_REPLACE_SELECT: + return REPLACE_CMD; + case SQLCOM_UPDATE: + case SQLCOM_UPDATE_MULTI: + return UPDATE_CMD; + case SQLCOM_DELETE: + case SQLCOM_DELETE_MULTI: + return DELETE_CMD; + default: + return SELECT_CMD; + } +} + + +void TABLE_LIST::print_leaf_tables(THD *thd, String *str, + enum_query_type query_type) +{ + if (merge_underlying_list) + { + for (TABLE_LIST *tbl= merge_underlying_list; tbl; tbl= tbl->next_local) + tbl->print_leaf_tables(thd, str, query_type); + } + else + print(thd, 0, str, query_type); +} + + +void st_select_lex::print_item_list(THD *thd, String *str, + enum_query_type query_type) +{ + bool first= 1; + /* + outer_select() can not be used here because it is for name resolution + and will return NULL at any end of name resolution chain (view/derived) + */ + bool top_level= (get_master()->get_master() == 0); + List_iterator_fast<Item> it(item_list); + Item *item; + while ((item= it++)) + { + if (first) + first= 0; + else + str->append(','); + + if ((is_subquery_function() && item->is_autogenerated_name()) || + !item->name.str) + { + /* + Do not print auto-generated aliases in subqueries. It has no purpose + in a view definition or other contexts where the query is printed. + */ + item->print(str, query_type); + } + else + { + /* + Do not print illegal names (if it is not top level SELECT). + Top level view checked (and correct name are assigned), + other cases of top level SELECT are not important, because + it is not "table field". + */ + if (top_level || + !item->is_autogenerated_name() || + !check_column_name(item->name.str)) + item->print_item_w_name(str, query_type); + else + item->print(str, query_type); + } + } +} + + +void st_select_lex::print_set_clause(THD *thd, String *str, + enum_query_type query_type) +{ + bool first= 1; + /* + outer_select() can not be used here because it is for name resolution + and will return NULL at any end of name resolution chain (view/derived) + */ + List_iterator_fast<Item> it(item_list); + List_iterator_fast<Item> vt(thd->lex->value_list); + Item *item; + Item *val; + while ((item= it++, val= vt++ )) + { + if (first) + { + str->append(STRING_WITH_LEN(" set ")); + first= 0; + } + else + str->append(','); + + item->print(str, query_type); + str->append(STRING_WITH_LEN(" = ")); + val->print(str, query_type); + } +} + + +void st_select_lex::print_on_duplicate_key_clause(THD *thd, String *str, + enum_query_type query_type) +{ + bool first= 1; + List_iterator_fast<Item> it(thd->lex->update_list); + List_iterator_fast<Item> vt(thd->lex->value_list); + Item *item; + Item *val; + while ((item= it++, val= vt++ )) + { + if (first) + { + str->append(STRING_WITH_LEN(" on duplicate key update ")); + first= 0; + } + else + str->append(','); + + item->print(str, query_type); + str->append(STRING_WITH_LEN(" = ")); + val->print(str, query_type); + } +} void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) { @@ -28167,6 +28424,67 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) return; } + if (is_tvc_wrapper && (query_type & QT_NO_WRAPPERS_FOR_TVC_IN_VIEW)) + { + first_inner_unit()->first_select()->print(thd, str, query_type); + return; + } + + bool top_level= (get_master()->get_master() == 0); + enum explainable_cmd_type sel_type= SELECT_CMD; + if (top_level) + sel_type= get_explainable_cmd_type(thd); + + if (sel_type == INSERT_CMD || sel_type == REPLACE_CMD) + { + str->append(get_explainable_cmd_name(sel_type)); + str->append(STRING_WITH_LEN("into ")); + TABLE_LIST *tbl= thd->lex->query_tables; + while (tbl->merge_underlying_list) + tbl= tbl->merge_underlying_list; + tbl->print(thd, 0, str, query_type); + if (thd->lex->field_list.elements) + { + str->append ('('); + List_iterator_fast<Item> it(thd->lex->field_list); + Item *item; + bool first= true; + while ((item= it++)) + { + if (first) + first= false; + else + str->append(','); + str->append(item->name); + } + str->append(')'); + } + + str->append(' '); + + if (thd->lex->sql_command == SQLCOM_INSERT || + thd->lex->sql_command == SQLCOM_REPLACE) + { + str->append(STRING_WITH_LEN("values ")); + bool is_first_elem= true; + List_iterator_fast<List_item> li(thd->lex->many_values); + List_item *list; + + while ((list= li++)) + { + if (is_first_elem) + is_first_elem= false; + else + str->append(','); + + print_list_item(str, list, query_type); + } + if (thd->lex->update_list.elements) + print_on_duplicate_key_clause(thd, str, query_type); + return; + } + } + if ((query_type & QT_SHOW_SELECT_NUMBER) && thd->lex->all_selects_list && thd->lex->all_selects_list->link_next && @@ -28190,7 +28508,10 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) str->append(" */ "); } - str->append(STRING_WITH_LEN("select ")); + if (sel_type == SELECT_CMD || + sel_type == INSERT_CMD || + sel_type == REPLACE_CMD) + str->append(STRING_WITH_LEN("select ")); if (join && join->cleaned) { @@ -28236,57 +28557,66 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) } //Item List - bool first= 1; + if (sel_type == SELECT_CMD || + sel_type == INSERT_CMD || + sel_type == REPLACE_CMD) + print_item_list(thd, str, query_type); /* - outer_select() can not be used here because it is for name resolution - and will return NULL at any end of name resolution chain (view/derived) + from clause + TODO: support USING/FORCE/IGNORE index */ - bool top_level= (get_master()->get_master() == 0); - List_iterator_fast<Item> it(item_list); - Item *item; - while ((item= it++)) + if (table_list.elements) { - if (first) - first= 0; - else - str->append(','); - - if ((is_subquery_function() && item->is_autogenerated_name()) || - !item->name.str) + if (sel_type == SELECT_CMD || + sel_type == INSERT_CMD || + sel_type == REPLACE_CMD) { - /* - Do not print auto-generated aliases in subqueries. It has no purpose - in a view definition or other contexts where the query is printed. - */ - item->print(str, query_type); + str->append(STRING_WITH_LEN(" from ")); + /* go through join tree */ + print_join(thd, join? join->eliminated_tables: 0, str, &top_join_list, + query_type); } - else + if (sel_type == UPDATE_CMD || sel_type == DELETE_CMD) + str->append(STRING_WITH_LEN(get_explainable_cmd_name(sel_type))); + if (sel_type == DELETE_CMD) { - /* - Do not print illegal names (if it is not top level SELECT). - Top level view checked (and correct name are assigned), - other cases of top level SELECT are not important, because - it is not "table field". - */ - if (top_level || - !item->is_autogenerated_name() || - !check_column_name(item->name.str)) - item->print_item_w_name(str, query_type); + str->append(STRING_WITH_LEN(" from ")); + bool first= true; + for (TABLE_LIST *target_tbl= thd->lex->auxiliary_table_list.first; + target_tbl; + target_tbl= target_tbl->next_local) + { + if (first) + first= false; + else + str->append(','); + target_tbl->correspondent_table->print_leaf_tables(thd, str, + query_type); + } + + if (!first) + str->append(STRING_WITH_LEN(" using ")); + } + if (sel_type == UPDATE_CMD || sel_type == DELETE_CMD) + { + if (join) + print_join(thd, 0, str, &top_join_list, query_type); else - item->print(str, query_type); + { + bool first= true; + List_iterator_fast<TABLE_LIST> li(leaf_tables); + TABLE_LIST *tbl; + while ((tbl= li++)) + { + if (first) + first= false; + else + str->append(','); + tbl->print(thd, 0, str, query_type); + } + } } } - - /* - from clause - TODO: support USING/FORCE/IGNORE index - */ - if (table_list.elements) - { - str->append(STRING_WITH_LEN(" from ")); - /* go through join tree */ - print_join(thd, join? join->eliminated_tables: 0, str, &top_join_list, query_type); - } else if (where) { /* @@ -28296,10 +28626,15 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) str->append(STRING_WITH_LEN(" from DUAL ")); } + if (sel_type == UPDATE_CMD) + print_set_clause(thd, str, query_type); + // Where Item *cur_where= where; if (join) cur_where= join->conds; + else if (sel_type == UPDATE_CMD || sel_type == DELETE_CMD) + cur_where= thd->lex->upd_del_where; if (cur_where || cond_value != Item::COND_UNDEF) { str->append(STRING_WITH_LEN(" where ")); @@ -28356,6 +28691,15 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) else if (lock_type == TL_WRITE) str->append(" for update"); + if ((sel_type == INSERT_CMD || sel_type == REPLACE_CMD) && + thd->lex->update_list.elements) + print_on_duplicate_key_clause(thd, str, query_type); + + // returning clause + if (sel_type == DELETE_CMD && !item_list.elements) + { + print_item_list(thd, str, query_type); + } // PROCEDURE unsupported here } diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 6405a919698..1f1e7d67a2a 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -2629,7 +2629,8 @@ static int show_create_view(THD *thd, TABLE_LIST *table, String *buff) a different syntax, like when ANSI_QUOTES is defined. */ table->view->unit.print(buff, enum_query_type(QT_VIEW_INTERNAL | - QT_ITEM_ORIGINAL_FUNC_NULLIF)); + QT_ITEM_ORIGINAL_FUNC_NULLIF | + QT_NO_WRAPPERS_FOR_TVC_IN_VIEW)); if (table->with_check != VIEW_CHECK_NONE) { diff --git a/sql/sql_sort.h b/sql/sql_sort.h index a474d7c25e9..7b9512404ff 100644 --- a/sql/sql_sort.h +++ b/sql/sql_sort.h @@ -20,6 +20,7 @@ #include "my_base.h" /* ha_rows */ #include <my_sys.h> /* qsort2_cmp */ #include "queues.h" +#include "sql_string.h" #include "sql_class.h" class Field; diff --git a/sql/sql_string.cc b/sql/sql_string.cc index f4fa880eeb3..ed48591db4e 100644 --- a/sql/sql_string.cc +++ b/sql/sql_string.cc @@ -553,7 +553,7 @@ bool String::append(const char *s,size_t size) } /* - For an ASCII compatinble string we can just append. + For an ASCII compatible string we can just append. */ return Binary_string::append(s, arg_length); } diff --git a/sql/sql_string.h b/sql/sql_string.h index 32df8b668f2..6f8dd2773e1 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -197,6 +197,83 @@ public: { return m_charset != &my_charset_bin; } + + /* + The MariaDB version when the last collation change happened, + e.g. due to a bug fix. See functions below. + */ + static ulong latest_mariadb_version_with_collation_change() + { + return 110002; + } + + /* + Check if the collation with the given ID changed its order + since the given MariaDB version. + */ + static bool collation_changed_order(ulong mysql_version, uint cs_number) + { + if ((mysql_version < 50048 && + (cs_number == 11 || /* ascii_general_ci - bug #29499, bug #27562 */ + cs_number == 41 || /* latin7_general_ci - bug #29461 */ + cs_number == 42 || /* latin7_general_cs - bug #29461 */ + cs_number == 20 || /* latin7_estonian_cs - bug #29461 */ + cs_number == 21 || /* latin2_hungarian_ci - bug #29461 */ + cs_number == 22 || /* koi8u_general_ci - bug #29461 */ + cs_number == 23 || /* cp1251_ukrainian_ci - bug #29461 */ + cs_number == 26)) || /* cp1250_general_ci - bug #29461 */ + (mysql_version < 50124 && + (cs_number == 33 || /* utf8mb3_general_ci - bug #27877 */ + cs_number == 35))) /* ucs2_general_ci - bug #27877 */ + return true; + + if (cs_number == 159 && /* ucs2_general_mysql500_ci - MDEV-30746 */ + ((mysql_version >= 100400 && mysql_version < 100429) || + (mysql_version >= 100500 && mysql_version < 100520) || + (mysql_version >= 100600 && mysql_version < 100613) || + (mysql_version >= 100700 && mysql_version < 100708) || + (mysql_version >= 100800 && mysql_version < 100808) || + (mysql_version >= 100900 && mysql_version < 100906) || + (mysql_version >= 101000 && mysql_version < 101004) || + (mysql_version >= 101100 && mysql_version < 101103) || + (mysql_version >= 110000 && mysql_version < 110002))) + return true; + return false; + } + + /** + Check if a collation has changed ID since the given version. + Return the new ID. + + @param mysql_version + @param cs_number - collation ID + + @retval the new collation ID (or cs_number, if no change) + */ + + static uint upgrade_collation_id(ulong mysql_version, uint cs_number) + { + if (mysql_version >= 50300 && mysql_version <= 50399) + { + switch (cs_number) { + case 149: return MY_PAGE2_COLLATION_ID_UCS2; // ucs2_crotian_ci + case 213: return MY_PAGE2_COLLATION_ID_UTF8; // utf8_crotian_ci + } + } + if ((mysql_version >= 50500 && mysql_version <= 50599) || + (mysql_version >= 100000 && mysql_version <= 100005)) + { + switch (cs_number) { + case 149: return MY_PAGE2_COLLATION_ID_UCS2; // ucs2_crotian_ci + case 213: return MY_PAGE2_COLLATION_ID_UTF8; // utf8_crotian_ci + case 214: return MY_PAGE2_COLLATION_ID_UTF32; // utf32_croatian_ci + case 215: return MY_PAGE2_COLLATION_ID_UTF16; // utf16_croatian_ci + case 245: return MY_PAGE2_COLLATION_ID_UTF8MB4;// utf8mb4_croatian_ci + } + } + return cs_number; + } + }; diff --git a/sql/sql_tvc.cc b/sql/sql_tvc.cc index b9219515b48..63dc5749b1d 100644 --- a/sql/sql_tvc.cc +++ b/sql/sql_tvc.cc @@ -705,6 +705,7 @@ st_select_lex *wrap_tvc(THD *thd, st_select_lex *tvc_sl, wrapper_sl->parent_lex= lex; /* Used in init_query. */ wrapper_sl->init_query(); wrapper_sl->init_select(); + wrapper_sl->is_tvc_wrapper= true; wrapper_sl->nest_level= tvc_sl->nest_level; wrapper_sl->parsing_place= tvc_sl->parsing_place; diff --git a/sql/sql_type.cc b/sql/sql_type.cc index d2939f5e6e9..5a411053a6d 100644 --- a/sql/sql_type.cc +++ b/sql/sql_type.cc @@ -9034,13 +9034,13 @@ Type_handler_timestamp_common::Item_val_native_with_conversion(THD *thd, Item *item, Native *to) const { - MYSQL_TIME ltime; if (item->type_handler()->type_handler_for_native_format() == &type_handler_timestamp2) return item->val_native(thd, to); + Datetime dt(thd, item, Datetime::Options(TIME_NO_ZERO_IN_DATE, thd)); return - item->get_date(thd, <ime, Datetime::Options(TIME_NO_ZERO_IN_DATE, thd)) || - TIME_to_native(thd, <ime, to, item->datetime_precision(thd)); + !dt.is_valid_datetime() || + TIME_to_native(thd, dt.get_mysql_time(), to, item->datetime_precision(thd)); } bool Type_handler_null::union_element_finalize(Item_type_holder *item) const diff --git a/sql/sql_type.h b/sql/sql_type.h index db4f59d4d9f..f3256ee01d3 100644 --- a/sql/sql_type.h +++ b/sql/sql_type.h @@ -4070,14 +4070,14 @@ public: */ virtual void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const= 0; + String *tmp) const= 0; /* create a compact size key part for a sort key */ virtual uint make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const=0; + String *tmp) const=0; virtual void sort_length(THD *thd, const Type_std_attributes *item, @@ -4484,12 +4484,12 @@ public: uint32 flags) const override; void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override + String *tmp) const override { MY_ASSERT_UNREACHABLE(); } uint make_packed_sort_key_part(uchar *, Item *, const SORT_FIELD_ATTR *, - Sort_param *) const override + String *) const override { MY_ASSERT_UNREACHABLE(); return 0; @@ -4830,10 +4830,10 @@ public: const override; void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; uint make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; void sort_length(THD *thd, const Type_std_attributes *item, SORT_FIELD_ATTR *attr) const override; @@ -4942,10 +4942,10 @@ public: const override; void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; uint make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; void Column_definition_attributes_frm_pack(const Column_definition_attributes *at, uchar *buff) const override; @@ -5198,10 +5198,10 @@ public: TABLE_SHARE *share) const override; void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; uint make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; void Column_definition_attributes_frm_pack(const Column_definition_attributes *at, uchar *buff) const override; @@ -5309,10 +5309,10 @@ public: uchar *buff) const override; void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; uint make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; void sort_length(THD *thd, const Type_std_attributes *item, SORT_FIELD_ATTR *attr) const override; @@ -5410,10 +5410,10 @@ public: CHARSET_INFO *cs) const override; void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; uint make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; void sort_length(THD *thd, const Type_std_attributes *item, SORT_FIELD_ATTR *attr) const override; @@ -6635,10 +6635,10 @@ public: const override; void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; uint make_packed_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field, - Sort_param *param) const override; + String *tmp) const override; void sort_length(THD *thd, const Type_std_attributes *item, SORT_FIELD_ATTR *attr) const override; diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 244bd319205..9405eda1f7b 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -1373,7 +1373,8 @@ produce_explain_and_leave: goto err; emit_explain_and_leave: - int err2= thd->lex->explain->send_explain(thd); + bool extended= thd->lex->describe & DESCRIBE_EXTENDED; + int err2= thd->lex->explain->send_explain(thd, extended); delete select; free_underlaid_joins(thd, select_lex); @@ -1447,6 +1448,8 @@ bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list, select_lex->fix_prepare_information(thd, conds, &fake_conds); + if (!thd->lex->upd_del_where) + thd->lex->upd_del_where= *conds; DBUG_RETURN(FALSE); } @@ -1974,7 +1977,10 @@ bool mysql_multi_update(THD *thd, TABLE_LIST *table_list, List<Item> *fields, else { if (thd->lex->describe || thd->lex->analyze_stmt) - res= thd->lex->explain->send_explain(thd); + { + bool extended= thd->lex->describe & DESCRIBE_EXTENDED; + res= thd->lex->explain->send_explain(thd, extended); + } } thd->abort_on_warning= 0; DBUG_RETURN(res); diff --git a/sql/sql_view.cc b/sql/sql_view.cc index 0c1d0e13382..026ddd6ea0d 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -962,10 +962,12 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view, Sql_mode_instant_remove sms(thd, MODE_ANSI_QUOTES); lex->unit.print(&view_query, enum_query_type(QT_VIEW_INTERNAL | - QT_ITEM_ORIGINAL_FUNC_NULLIF)); + QT_ITEM_ORIGINAL_FUNC_NULLIF | + QT_NO_WRAPPERS_FOR_TVC_IN_VIEW)); lex->unit.print(&is_query, enum_query_type(QT_TO_SYSTEM_CHARSET | QT_WITHOUT_INTRODUCERS | - QT_ITEM_ORIGINAL_FUNC_NULLIF)); + QT_ITEM_ORIGINAL_FUNC_NULLIF | + QT_NO_WRAPPERS_FOR_TVC_IN_VIEW)); } DBUG_PRINT("info", ("View: %.*s", view_query.length(), view_query.ptr())); diff --git a/sql/table.cc b/sql/table.cc index ff650eef257..1c13a244afd 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -938,39 +938,6 @@ static uint enum_value_with_check(THD *thd, TABLE_SHARE *share, } -/** - Check if a collation has changed number - - @param mysql_version - @param current collation number - - @retval new collation number (same as current collation number of no change) -*/ - -static uint upgrade_collation(ulong mysql_version, uint cs_number) -{ - if (mysql_version >= 50300 && mysql_version <= 50399) - { - switch (cs_number) { - case 149: return MY_PAGE2_COLLATION_ID_UCS2; // ucs2_crotian_ci - case 213: return MY_PAGE2_COLLATION_ID_UTF8; // utf8_crotian_ci - } - } - if ((mysql_version >= 50500 && mysql_version <= 50599) || - (mysql_version >= 100000 && mysql_version <= 100005)) - { - switch (cs_number) { - case 149: return MY_PAGE2_COLLATION_ID_UCS2; // ucs2_crotian_ci - case 213: return MY_PAGE2_COLLATION_ID_UTF8; // utf8_crotian_ci - case 214: return MY_PAGE2_COLLATION_ID_UTF32; // utf32_croatian_ci - case 215: return MY_PAGE2_COLLATION_ID_UTF16; // utf16_croatian_ci - case 245: return MY_PAGE2_COLLATION_ID_UTF8MB4;// utf8mb4_croatian_ci - } - } - return cs_number; -} - - void Column_definition_attributes::frm_pack_basic(uchar *buff) const { int2store(buff + 3, length); @@ -1030,7 +997,7 @@ bool Column_definition_attributes::frm_unpack_charset(TABLE_SHARE *share, const uchar *buff) { uint cs_org= buff[14] + (((uint) buff[11]) << 8); - uint cs_new= upgrade_collation(share->mysql_version, cs_org); + uint cs_new= Charset::upgrade_collation_id(share->mysql_version, cs_org); if (cs_org != cs_new) share->incompatible_version|= HA_CREATE_USED_CHARSET; if (cs_new && !(charset= get_charset(cs_new, MYF(0)))) @@ -1857,7 +1824,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, if (!frm_image[32]) // New frm file in 3.23 { uint cs_org= (((uint) frm_image[41]) << 8) + (uint) frm_image[38]; - uint cs_new= upgrade_collation(share->mysql_version, cs_org); + uint cs_new= Charset::upgrade_collation_id(share->mysql_version, cs_org); if (cs_org != cs_new) share->incompatible_version|= HA_CREATE_USED_CHARSET; @@ -2965,6 +2932,9 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, goto err; field= key_part->field= share->field[key_part->fieldnr-1]; + if (Charset::collation_changed_order(share->mysql_version, + field->charset()->number)) + share->incompatible_version|= HA_CREATE_USED_CHARSET; key_part->type= field->key_type(); if (field->invisible > INVISIBLE_USER && !field->vers_sys_field()) @@ -6655,6 +6625,9 @@ bool TABLE_LIST::prepare_security(THD *thd) #ifndef DBUG_OFF void TABLE_LIST::set_check_merged() { + if (is_view()) + return; + DBUG_ASSERT(derived); /* It is not simple to check all, but at least this should be checked: @@ -6982,9 +6955,8 @@ void Field_iterator_table_ref::set_field_iterator() table_ref->alias.str)); } /* This is a merge view, so use field_translation. */ - else if (table_ref->field_translation) + else if (table_ref->is_merged_derived() && table_ref->field_translation) { - DBUG_ASSERT(table_ref->is_merged_derived()); field_it= &view_field_it; DBUG_PRINT("info", ("field_it for '%s' is Field_iterator_view", table_ref->alias.str)); @@ -9477,15 +9449,15 @@ bool TABLE_LIST::init_derived(THD *thd, bool init_view) set_derived(); } - if (!is_view() && + if (is_view() || !derived_table_optimization_done(this)) { /* A subquery might be forced to be materialized due to a side-effect. */ - if (!is_materialized_derived() && first_select->is_mergeable() && - optimizer_flag(thd, OPTIMIZER_SWITCH_DERIVED_MERGE) && + if (!is_materialized_derived() && unit->can_be_merged() && + (optimizer_flag(thd, OPTIMIZER_SWITCH_DERIVED_MERGE) || is_view()) && !thd->lex->can_not_use_merged() && - !(thd->lex->sql_command == SQLCOM_UPDATE_MULTI || - thd->lex->sql_command == SQLCOM_DELETE_MULTI) && + !((thd->lex->sql_command == SQLCOM_UPDATE_MULTI || + thd->lex->sql_command == SQLCOM_DELETE_MULTI) && !is_view()) && !is_recursive_with_table()) set_merged_derived(); else diff --git a/sql/table.h b/sql/table.h index 3ea8347de68..d8756deb43c 100644 --- a/sql/table.h +++ b/sql/table.h @@ -2690,6 +2690,8 @@ struct TABLE_LIST } void print(THD *thd, table_map eliminated_tables, String *str, enum_query_type query_type); + void print_leaf_tables(THD *thd, String *str, + enum_query_type query_type); bool check_single_table(TABLE_LIST **table, table_map map, TABLE_LIST *view); bool set_insert_values(MEM_ROOT *mem_root); @@ -2830,8 +2832,7 @@ struct TABLE_LIST DBUG_PRINT("enter", ("Alias: '%s' Unit: %p", (alias.str ? alias.str : "<NULL>"), get_unit())); - derived_type= static_cast<uint8>((derived_type & DTYPE_MASK) | - DTYPE_TABLE | DTYPE_MERGE); + derived_type= static_cast<uint8>((derived_type & DTYPE_MASK) | DTYPE_MERGE); set_check_merged(); DBUG_VOID_RETURN; } @@ -2845,10 +2846,9 @@ struct TABLE_LIST DBUG_PRINT("enter", ("Alias: '%s' Unit: %p", (alias.str ? alias.str : "<NULL>"), get_unit())); - derived= get_unit(); derived_type= static_cast<uint8>((derived_type & (derived ? DTYPE_MASK : DTYPE_VIEW)) | - DTYPE_TABLE | DTYPE_MATERIALIZE); + DTYPE_MATERIALIZE); set_check_materialized(); DBUG_VOID_RETURN; } diff --git a/sql/wsrep_high_priority_service.cc b/sql/wsrep_high_priority_service.cc index 7d8296a75a1..d9988914c4d 100644 --- a/sql/wsrep_high_priority_service.cc +++ b/sql/wsrep_high_priority_service.cc @@ -502,7 +502,13 @@ int Wsrep_high_priority_service::log_dummy_write_set(const wsrep::ws_handle& ws_ if (!WSREP_EMULATE_BINLOG(m_thd)) { wsrep_register_for_group_commit(m_thd); - ret = ret || cs.provider().commit_order_leave(ws_handle, ws_meta, err); + /* wait_for_prior_commit() ensures that all preceding transactions + have been committed and seqno has been synced into + storage engine. We don't release commit order here yet to + avoid following transactions to sync seqno before + wsrep_set_SE_checkpoint() below returns. This effectively pauses + group commit for the checkpoint operation, but is the only way to + ensure proper ordering. */ m_thd->wait_for_prior_commit(); } @@ -512,10 +518,7 @@ int Wsrep_high_priority_service::log_dummy_write_set(const wsrep::ws_handle& ws_ { wsrep_unregister_from_group_commit(m_thd); } - else - { - ret= ret || cs.provider().commit_order_leave(ws_handle, ws_meta, err); - } + ret= ret || cs.provider().commit_order_leave(ws_handle, ws_meta, err); cs.after_applying(); } DBUG_RETURN(ret); diff --git a/sql/wsrep_trans_observer.h b/sql/wsrep_trans_observer.h index 3c5cff2b741..8f998244ee6 100644 --- a/sql/wsrep_trans_observer.h +++ b/sql/wsrep_trans_observer.h @@ -1,4 +1,4 @@ -/* Copyright 2016-2022 Codership Oy <http://www.codership.com> +/* Copyright 2016-2023 Codership Oy <http://www.codership.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -217,6 +217,19 @@ static inline bool wsrep_run_commit_hook(THD* thd, bool all) } mysql_mutex_unlock(&thd->LOCK_thd_data); } + + mysql_mutex_lock(&thd->LOCK_thd_data); + /* Transaction creating sequence is TOI or RSU, + CREATE [TEMPORARY] SEQUENCE = CREATE + INSERT (initial value) + and replicated using statement based replication, thus + the commit hooks will be skipped */ + if (ret && + (thd->wsrep_cs().mode() == wsrep::client_state::m_toi || + thd->wsrep_cs().mode() == wsrep::client_state::m_rsu) && + thd->lex->sql_command == SQLCOM_CREATE_SEQUENCE) + ret= false; + mysql_mutex_unlock(&thd->LOCK_thd_data); + DBUG_PRINT("wsrep", ("return: %d", ret)); DBUG_RETURN(ret); } |