summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorOleksandr Byelkin <sanja@mariadb.com>2021-01-06 10:53:00 +0100
committerOleksandr Byelkin <sanja@mariadb.com>2021-01-06 10:53:00 +0100
commit02e7bff882c5169ba3e1ce7e9a93d4c28fe3afd1 (patch)
tree4a6a8d578fedb4c2b1f437ab39c2d7430ad95163 /sql
parent5f10870c8a8c2448bfc26d990110390d48000b84 (diff)
parent478b83032b170b2ae030fa77fe4bed60a7910472 (diff)
downloadmariadb-git-02e7bff882c5169ba3e1ce7e9a93d4c28fe3afd1.tar.gz
Merge commit '10.4' into 10.5
Diffstat (limited to 'sql')
-rw-r--r--sql/field.cc2
-rw-r--r--sql/handler.cc7
-rw-r--r--sql/item_cmpfunc.cc19
-rw-r--r--sql/item_cmpfunc.h7
-rw-r--r--sql/item_sum.cc14
-rw-r--r--sql/opt_range.cc24
-rw-r--r--sql/opt_split.cc2
-rw-r--r--sql/sql_acl.cc36
-rw-r--r--sql/sql_base.cc36
-rw-r--r--sql/sql_cache.cc4
-rw-r--r--sql/sql_cache.h2
-rw-r--r--sql/sql_class.h15
-rw-r--r--sql/sql_cte.cc27
-rw-r--r--sql/sql_delete.cc10
-rw-r--r--sql/sql_derived.cc11
-rw-r--r--sql/sql_help.cc3
-rw-r--r--sql/sql_parse.cc11
-rw-r--r--sql/sql_select.cc4
-rw-r--r--sql/sql_show.cc3
-rw-r--r--sql/sql_table.cc38
-rw-r--r--sql/sql_type.cc11
-rw-r--r--sql/sql_union.cc68
-rw-r--r--sql/sql_update.cc75
-rw-r--r--sql/sql_view.cc2
-rw-r--r--sql/sql_yacc.yy16
-rw-r--r--sql/sys_vars.cc6
-rw-r--r--sql/wsrep_binlog.cc2
-rw-r--r--sql/wsrep_high_priority_service.cc29
-rw-r--r--sql/wsrep_mysqld.cc4
29 files changed, 335 insertions, 153 deletions
diff --git a/sql/field.cc b/sql/field.cc
index 2173670572d..45b3a3c703e 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -8549,7 +8549,7 @@ int Field_blob::store(const char *from,size_t length,CHARSET_INFO *cs)
DBUG_ASSERT(length <= max_data_length());
new_length= length;
- copy_length= (size_t)MY_MIN(UINT_MAX,table->in_use->variables.group_concat_max_len);
+ copy_length= table->in_use->variables.group_concat_max_len;
if (new_length > copy_length)
{
new_length= Well_formed_prefix(cs,
diff --git a/sql/handler.cc b/sql/handler.cc
index bdf6011c364..228c016e082 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -3526,6 +3526,13 @@ int handler::update_auto_increment()
(table->auto_increment_field_not_null &&
thd->variables.sql_mode & MODE_NO_AUTO_VALUE_ON_ZERO))
{
+
+ /*
+ There could be an error reported because value was truncated
+ when strict mode is enabled.
+ */
+ if (thd->is_error())
+ DBUG_RETURN(HA_ERR_AUTOINC_ERANGE);
/*
Update next_insert_id if we had already generated a value in this
statement (case of INSERT VALUES(null),(3763),(null):
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index bfd415344ef..c93f4761fc9 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -5616,6 +5616,7 @@ void Item_func_like::print(String *str, enum_query_type query_type)
longlong Item_func_like::val_int()
{
DBUG_ASSERT(fixed == 1);
+ DBUG_ASSERT(escape != -1);
String* res= args[0]->val_str(&cmp_value1);
if (args[0]->null_value)
{
@@ -5702,15 +5703,29 @@ bool fix_escape_item(THD *thd, Item *escape_item, String *tmp_str,
bool escape_used_in_parsing, CHARSET_INFO *cmp_cs,
int *escape)
{
- if (!escape_item->const_during_execution())
+ /*
+ ESCAPE clause accepts only constant arguments and Item_param.
+
+ Subqueries during context_analysis_only might decide they're
+ const_during_execution, but not quite const yet, not evaluate-able.
+ This is fine, as most of context_analysis_only modes will never
+ reach val_int(), so we won't need the value.
+ CONTEXT_ANALYSIS_ONLY_DERIVED being a notable exception here.
+ */
+ if (!escape_item->const_during_execution() ||
+ (!escape_item->const_item() &&
+ !(thd->lex->context_analysis_only & ~CONTEXT_ANALYSIS_ONLY_DERIVED)))
{
my_error(ER_WRONG_ARGUMENTS,MYF(0),"ESCAPE");
return TRUE;
}
-
+
+ IF_DBUG(*escape= -1,);
+
if (escape_item->const_item())
{
/* If we are on execution stage */
+ /* XXX is it safe to evaluate is_expensive() items here? */
String *escape_str= escape_item->val_str(tmp_str);
if (escape_str)
{
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index fa715badfc7..46412fe8ad6 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -2809,6 +2809,13 @@ public:
return this;
}
+ bool walk(Item_processor processor, bool walk_subquery, void *arg)
+ {
+ return walk_args(processor, walk_subquery, arg)
+ || escape_item->walk(processor, walk_subquery, arg)
+ || (this->*processor)(arg);
+ }
+
bool find_selective_predicates_list_processor(void *arg);
Item *get_copy(THD *thd)
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 5d0578df4ac..7106f72e237 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -3780,7 +3780,7 @@ int dump_leaf_key(void* key_arg, element_count count __attribute__((unused)),
{
Item_func_group_concat *item= (Item_func_group_concat *) item_arg;
TABLE *table= item->table;
- uint max_length= (uint)table->in_use->variables.group_concat_max_len;
+ uint max_length= table->in_use->variables.group_concat_max_len;
String tmp((char *)table->record[1], table->s->reclength,
default_charset_info);
String tmp2;
@@ -4109,7 +4109,7 @@ bool Item_func_group_concat::repack_tree(THD *thd)
DBUG_ASSERT(tree->size_of_element == st.tree.size_of_element);
st.table= table;
st.len= 0;
- st.maxlen= (size_t)thd->variables.group_concat_max_len;
+ st.maxlen= thd->variables.group_concat_max_len;
tree_walk(tree, &copy_to_tree, &st, left_root_right);
if (st.len <= st.maxlen) // Copying aborted. Must be OOM
{
@@ -4131,7 +4131,7 @@ bool Item_func_group_concat::repack_tree(THD *thd)
decreases up to N=10 (that is, factor=1024) and then starts to increase,
again, very slowly.
*/
-#define GCONCAT_REPACK_FACTOR (1 << 10)
+#define GCONCAT_REPACK_FACTOR 10
bool Item_func_group_concat::add(bool exclude_nulls)
{
@@ -4186,7 +4186,7 @@ bool Item_func_group_concat::add(bool exclude_nulls)
{
THD *thd= table->in_use;
table->field[0]->store(row_str_len, FALSE);
- if (tree_len > thd->variables.group_concat_max_len * GCONCAT_REPACK_FACTOR
+ if ((tree_len >> GCONCAT_REPACK_FACTOR) > thd->variables.group_concat_max_len
&& tree->elements_in_tree > 1)
if (repack_tree(thd))
return 1;
@@ -4240,9 +4240,9 @@ Item_func_group_concat::fix_fields(THD *thd, Item **ref)
result.set_charset(collation.collation);
result_field= 0;
null_value= 1;
- max_length= (uint32)(thd->variables.group_concat_max_len
- / collation.collation->mbminlen
- * collation.collation->mbmaxlen);
+ max_length= (uint32)MY_MIN(thd->variables.group_concat_max_len
+ / collation.collation->mbminlen
+ * collation.collation->mbmaxlen, UINT_MAX32);
uint32 offset;
if (separator->needs_conversion(separator->length(), separator->charset(),
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index cb5a0604733..f146fc25126 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -7801,6 +7801,30 @@ SEL_TREE *Item_func_in::get_func_mm_tree(RANGE_OPT_PARAM *param,
if (array->count > NOT_IN_IGNORE_THRESHOLD || !value_item)
DBUG_RETURN(0);
+ /*
+ If this is "unique_key NOT IN (...)", do not consider it sargable (for
+ any index, not just the unique one). The logic is as follows:
+ - if there are only a few constants, this condition is not selective
+ (unless the table is also very small in which case we won't gain
+ anything)
+ - If there are a lot of constants, the overhead of building and
+ processing enormous range list is not worth it.
+ */
+ if (param->using_real_indexes)
+ {
+ key_map::Iterator it(field->key_start);
+ uint key_no;
+ while ((key_no= it++) != key_map::Iterator::BITMAP_END)
+ {
+ KEY *key_info= &field->table->key_info[key_no];
+ if (key_info->user_defined_key_parts == 1 &&
+ (key_info->flags & HA_NOSAME))
+ {
+ DBUG_RETURN(0);
+ }
+ }
+ }
+
/* Get a SEL_TREE for "(-inf|NULL) < X < c_0" interval. */
uint i=0;
do
diff --git a/sql/opt_split.cc b/sql/opt_split.cc
index 3a086aef04c..6cb4a12e51f 100644
--- a/sql/opt_split.cc
+++ b/sql/opt_split.cc
@@ -1147,7 +1147,7 @@ bool JOIN_TAB::fix_splitting(SplM_plan_info *spl_plan,
bool JOIN::fix_all_splittings_in_plan()
{
table_map prev_tables= 0;
- table_map all_tables= (1 << table_count) - 1;
+ table_map all_tables= (table_map(1) << table_count) - 1;
for (uint tablenr= 0; tablenr < table_count; tablenr++)
{
POSITION *cur_pos= &best_positions[tablenr];
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 25e8aa42c7c..aed0ad02e4d 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -13201,15 +13201,6 @@ static bool find_mpvio_user(MPVIO_EXT *mpvio)
ACL_USER *user= find_user_or_anon(sctx->host, sctx->user, sctx->ip);
- if (user && user->password_errors >= max_password_errors && !ignore_max_password_errors(user))
- {
- mysql_mutex_unlock(&acl_cache->lock);
- my_error(ER_USER_IS_BLOCKED, MYF(0));
- general_log_print(mpvio->auth_info.thd, COM_CONNECT,
- ER_THD(mpvio->auth_info.thd, ER_USER_IS_BLOCKED));
- DBUG_RETURN(1);
- }
-
if (user)
mpvio->acl_user= user->copy(mpvio->auth_info.thd->mem_root);
@@ -13246,6 +13237,15 @@ static bool find_mpvio_user(MPVIO_EXT *mpvio)
mpvio->make_it_fail= true;
}
+ if (mpvio->acl_user->password_errors >= max_password_errors &&
+ !ignore_max_password_errors(mpvio->acl_user))
+ {
+ my_error(ER_USER_IS_BLOCKED, MYF(0));
+ general_log_print(mpvio->auth_info.thd, COM_CONNECT,
+ ER_THD(mpvio->auth_info.thd, ER_USER_IS_BLOCKED));
+ DBUG_RETURN(1);
+ }
+
/* user account requires non-default plugin and the client is too old */
if (mpvio->acl_user->auth->plugin.str != native_password_plugin_name.str &&
mpvio->acl_user->auth->plugin.str != old_password_plugin_name.str &&
@@ -14586,6 +14586,7 @@ static int native_password_get_salt(const char *hash, size_t hash_length,
{
DBUG_ASSERT(sizeof(invalid_password) > SCRAMBLE_LENGTH);
DBUG_ASSERT(*out_length >= SCRAMBLE_LENGTH);
+ DBUG_ASSERT(*out_length >= sizeof(invalid_password));
if (hash_length == 0)
{
*out_length= 0;
@@ -14596,14 +14597,27 @@ static int native_password_get_salt(const char *hash, size_t hash_length,
{
if (hash_length == 7 && strcmp(hash, "invalid") == 0)
{
- memcpy(out, invalid_password, SCRAMBLED_PASSWORD_CHAR_LENGTH);
- *out_length= SCRAMBLED_PASSWORD_CHAR_LENGTH;
+ memcpy(out, invalid_password, sizeof(invalid_password));
+ *out_length= sizeof(invalid_password);
return 0;
}
my_error(ER_PASSWD_LENGTH, MYF(0), SCRAMBLED_PASSWORD_CHAR_LENGTH);
return 1;
}
+ for (const char *c= hash + 1; c < (hash + hash_length); c++)
+ {
+ /* If any non-hex characters are found, mark the password as invalid. */
+ if (!(*c >= '0' && *c <= '9') &&
+ !(*c >= 'A' && *c <= 'F') &&
+ !(*c >= 'a' && *c <= 'f'))
+ {
+ memcpy(out, invalid_password, sizeof(invalid_password));
+ *out_length= sizeof(invalid_password);
+ return 0;
+ }
+ }
+
*out_length= SCRAMBLE_LENGTH;
get_salt_from_password(out, hash);
return 0;
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index d02d130c084..291cad89d79 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -6151,6 +6151,7 @@ find_field_in_table_ref(THD *thd, TABLE_LIST *table_list,
#ifndef NO_EMBEDDED_ACCESS_CHECKS
/* Check if there are sufficient access rights to the found field. */
if (check_privileges &&
+ !table_list->is_derived() &&
check_column_grant_in_table_ref(thd, *actual_table, name, length, fld))
fld= WRONG_GRANT;
else
@@ -8020,36 +8021,23 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
#ifndef NO_EMBEDDED_ACCESS_CHECKS
/*
- Ensure that we have access rights to all fields to be inserted. Under
- some circumstances, this check may be skipped.
+ Ensure that we have access rights to all fields to be inserted
+ the table 'tables'. Under some circumstances, this check may be skipped.
- - If any_privileges is true, skip the check.
+ The check is skipped in the following cases:
- - If the SELECT privilege has been found as fulfilled already for both
- the TABLE and TABLE_LIST objects (and both of these exist, of
- course), the check is skipped.
+ - any_privileges is true
- - If the SELECT privilege has been found fulfilled for the TABLE object
- and the TABLE_LIST represents a derived table other than a view (see
- below), the check is skipped.
+ - the table is a derived table
- - If the TABLE_LIST object represents a view, we may skip checking if
- the SELECT privilege has been found fulfilled for it, regardless of
- the TABLE object.
+ - the table is a view with SELECT privilege
- - If there is no TABLE object, the test is skipped if either
- * the TABLE_LIST does not represent a view, or
- * the SELECT privilege has been found fulfilled.
-
- A TABLE_LIST that is not a view may be a subquery, an
- information_schema table, or a nested table reference. See the comment
- for TABLE_LIST.
+ - the table is a base table with SELECT privilege
*/
- if (!((table && tables->is_non_derived() &&
- (table->grant.privilege & SELECT_ACL)) ||
- ((!tables->is_non_derived() &&
- (tables->grant.privilege & SELECT_ACL)))) &&
- !any_privileges)
+ if (!any_privileges &&
+ !tables->is_derived() &&
+ !(tables->is_view() && (tables->grant.privilege & SELECT_ACL)) &&
+ !(table && (table->grant.privilege & SELECT_ACL)))
{
field_iterator.set(tables);
if (check_grant_all_columns(thd, SELECT_ACL, &field_iterator))
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index 35e3a7c5608..57070b038ca 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -1449,7 +1449,7 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used)
DBUG_PRINT("qcache", ("\
long %d, 4.1: %d, eof: %d, bin_proto: %d, more results %d, pkt_nr: %d, \
CS client: %u, CS result: %u, CS conn: %u, limit: %llu, TZ: %p, \
-sql mode: 0x%llx, sort len: %llu, conncat len: %llu, div_precision: %zu, \
+sql mode: 0x%llx, sort len: %llu, concat len: %u, div_precision: %zu, \
def_week_frmt: %zu, in_trans: %d, autocommit: %d",
(int)flags.client_long_flag,
(int)flags.client_protocol_41,
@@ -1949,7 +1949,7 @@ Query_cache::send_result_to_client(THD *thd, char *org_sql, uint query_length)
DBUG_PRINT("qcache", ("\
long %d, 4.1: %d, eof: %d, bin_proto: %d, more results %d, pkt_nr: %d, \
CS client: %u, CS result: %u, CS conn: %u, limit: %llu, TZ: %p, \
-sql mode: 0x%llx, sort len: %llu, conncat len: %llu, div_precision: %zu, \
+sql mode: 0x%llx, sort len: %llu, concat len: %u, div_precision: %zu, \
def_week_frmt: %zu, in_trans: %d, autocommit: %d",
(int)flags.client_long_flag,
(int)flags.client_protocol_41,
diff --git a/sql/sql_cache.h b/sql/sql_cache.h
index b92b3972512..196acf6fa2e 100644
--- a/sql/sql_cache.h
+++ b/sql/sql_cache.h
@@ -558,11 +558,11 @@ struct Query_cache_query_flags
uint character_set_client_num;
uint character_set_results_num;
uint collation_connection_num;
+ uint group_concat_max_len;
ha_rows limit;
Time_zone *time_zone;
sql_mode_t sql_mode;
ulonglong max_sort_length;
- ulonglong group_concat_max_len;
size_t default_week_format;
size_t div_precision_increment;
MY_LOCALE *lc_time_names;
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 1da82edc061..3c28d089e20 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -641,7 +641,6 @@ typedef struct system_variables
ulonglong bulk_insert_buff_size;
ulonglong join_buff_size;
ulonglong sortbuff_size;
- ulonglong group_concat_max_len;
ulonglong default_regex_flags;
ulonglong max_mem_used;
@@ -734,6 +733,8 @@ typedef struct system_variables
uint32 gtid_domain_id;
uint64 gtid_seq_no;
+ uint group_concat_max_len;
+
/**
Default transaction access mode. READ ONLY (true) or READ WRITE (false).
*/
@@ -6140,10 +6141,15 @@ class select_union_recursive :public select_unit
public:
/* The temporary table with the new records generated by one iterative step */
TABLE *incr_table;
+ /* The TMP_TABLE_PARAM structure used to create incr_table */
+ TMP_TABLE_PARAM incr_table_param;
/* One of tables from the list rec_tables (determined dynamically) */
TABLE *first_rec_table_to_update;
- /* The temporary tables used for recursive table references */
- List<TABLE> rec_tables;
+ /*
+ The list of all recursive table references to the CTE for whose
+ specification this select_union_recursive was created
+ */
+ List<TABLE_LIST> rec_table_refs;
/*
The count of how many times cleanup() was called with cleaned==false
for the unit specifying the recursive CTE for which this object was created
@@ -6153,7 +6159,8 @@ class select_union_recursive :public select_unit
select_union_recursive(THD *thd_arg):
select_unit(thd_arg),
- incr_table(0), first_rec_table_to_update(0), cleanup_count(0) {};
+ incr_table(0), first_rec_table_to_update(0), cleanup_count(0)
+ { incr_table_param.init(); };
int send_data(List<Item> &items);
bool create_result_table(THD *thd, List<Item> *column_types,
diff --git a/sql/sql_cte.cc b/sql/sql_cte.cc
index 5bf9930ef21..c32ea8c0238 100644
--- a/sql/sql_cte.cc
+++ b/sql/sql_cte.cc
@@ -253,6 +253,8 @@ With_element *With_clause::find_table_def(TABLE_LIST *table,
!table->is_fqtn)
{
table->set_derived();
+ table->db.str= empty_c_string;
+ table->db.length= 0;
return with_elem;
}
}
@@ -890,8 +892,6 @@ st_select_lex_unit *With_element::clone_parsed_spec(THD *thd,
goto err;
spec_tables_tail= tbl;
}
- if (check_table_access(thd, SELECT_ACL, spec_tables, FALSE, UINT_MAX, FALSE))
- goto err;
if (spec_tables)
{
if (with_table->next_global)
@@ -917,6 +917,22 @@ st_select_lex_unit *With_element::clone_parsed_spec(THD *thd,
with_select));
if (check_dependencies_in_with_clauses(lex->with_clauses_list))
res= NULL;
+ /*
+ Resolve references to CTE from the spec_tables list that has not
+ been resolved yet.
+ */
+ for (TABLE_LIST *tbl= spec_tables;
+ tbl;
+ tbl= tbl->next_global)
+ {
+ if (!tbl->with)
+ tbl->with= with_select->find_table_def_in_with_clauses(tbl);
+ if (tbl == spec_tables_tail)
+ break;
+ }
+ if (check_table_access(thd, SELECT_ACL, spec_tables, FALSE, UINT_MAX, FALSE))
+ goto err;
+
lex->sphead= NULL; // in order not to delete lex->sphead
lex_end(lex);
err:
@@ -1521,10 +1537,11 @@ void With_element::print(THD *thd, String *str, enum_query_type query_type)
bool With_element::instantiate_tmp_tables()
{
- List_iterator_fast<TABLE> li(rec_result->rec_tables);
- TABLE *rec_table;
- while ((rec_table= li++))
+ List_iterator_fast<TABLE_LIST> li(rec_result->rec_table_refs);
+ TABLE_LIST *rec_tbl;
+ while ((rec_tbl= li++))
{
+ TABLE *rec_table= rec_tbl->table;
if (!rec_table->is_created() &&
instantiate_tmp_table(rec_table,
rec_table->s->key_info,
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 01f67476688..94d5ceb309d 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -281,7 +281,15 @@ int TABLE::delete_row()
store_record(this, record[1]);
vers_update_end();
- return file->ha_update_row(record[1], record[0]);
+ int err= file->ha_update_row(record[1], record[0]);
+ /*
+ MDEV-23644: we get HA_ERR_FOREIGN_DUPLICATE_KEY iff we already got history
+ row with same trx_id which is the result of foreign key action, so we
+ don't need one more history row.
+ */
+ if (err == HA_ERR_FOREIGN_DUPLICATE_KEY)
+ return file->ha_delete_row(record[0]);
+ return err;
}
diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc
index a9155f361b5..90d7ce8fc2c 100644
--- a/sql/sql_derived.cc
+++ b/sql/sql_derived.cc
@@ -710,7 +710,7 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
if (derived->is_with_table_recursive_reference())
{
/* Here 'derived" is a secondary recursive table reference */
- unit->with_element->rec_result->rec_tables.push_back(derived->table);
+ unit->with_element->rec_result->rec_table_refs.push_back(derived);
}
}
DBUG_ASSERT(derived->table || res);
@@ -808,17 +808,17 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
derived->fill_me= FALSE;
- if (!(derived->derived_result= new (thd->mem_root) select_unit(thd)))
+ if ((!derived->is_with_table_recursive_reference() ||
+ !derived->derived_result) &&
+ !(derived->derived_result= new (thd->mem_root) select_unit(thd)))
DBUG_RETURN(TRUE); // out of memory
- lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_DERIVED;
// st_select_lex_unit::prepare correctly work for single select
if ((res= unit->prepare(derived, derived->derived_result, 0)))
goto exit;
if (derived->with &&
(res= derived->with->process_columns_of_derived_unit(thd, unit)))
goto exit;
- lex->context_analysis_only&= ~CONTEXT_ANALYSIS_ONLY_DERIVED;
if ((res= check_duplicate_names(thd, unit->types, 0)))
goto exit;
@@ -827,7 +827,8 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
Depending on the result field translation will or will not
be created.
*/
- if (derived->init_derived(thd, FALSE))
+ if (!derived->is_with_table_recursive_reference() &&
+ derived->init_derived(thd, FALSE))
goto exit;
/*
diff --git a/sql/sql_help.cc b/sql/sql_help.cc
index e31e51d0316..ebda874d1fe 100644
--- a/sql/sql_help.cc
+++ b/sql/sql_help.cc
@@ -741,6 +741,9 @@ static bool mysqld_help_internal(THD *thd, const char *mask)
&name, &description, &example);
delete select;
+ if (thd->is_error())
+ goto error;
+
if (count_topics == 0)
{
int UNINIT_VAR(key_id);
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index f09d7140d3b..9211625c804 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -5847,6 +5847,14 @@ mysql_execute_command(THD *thd)
break;
}
case SQLCOM_XA_START:
+#ifdef WITH_WSREP
+ if (WSREP(thd))
+ {
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0),
+ "XA transactions with Galera replication");
+ break;
+ }
+#endif /* WITH_WSREP */
if (trans_xa_start(thd))
goto error;
my_ok(thd);
@@ -6893,6 +6901,9 @@ check_access(THD *thd, privilege_t want_access,
bool check_single_table_access(THD *thd, privilege_t privilege,
TABLE_LIST *tables, bool no_errors)
{
+ if (tables->derived)
+ return 0;
+
Switch_to_definer_security_ctx backup_sctx(thd, tables);
const char *db_name;
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 808d49d4067..5422346884d 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -13399,10 +13399,6 @@ void JOIN_TAB::cleanup()
{
DBUG_ENTER("JOIN_TAB::cleanup");
- if (tab_list && tab_list->is_with_table_recursive_reference() &&
- tab_list->with->is_cleaned())
- DBUG_VOID_RETURN;
-
DBUG_PRINT("enter", ("tab: %p table %s.%s",
this,
(table ? table->s->db.str : "?"),
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index ca3e2aaa97e..87d41756684 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -9472,7 +9472,8 @@ ST_FIELD_INFO check_constraints_fields_info[]=
Column("CONSTRAINT_SCHEMA", Name(), NOT_NULL, OPEN_FULL_TABLE),
Column("TABLE_NAME", Name(), NOT_NULL, OPEN_FULL_TABLE),
Column("CONSTRAINT_NAME", Name(), NOT_NULL, OPEN_FULL_TABLE),
- Column("CHECK_CLAUSE", Name(), NOT_NULL, OPEN_FULL_TABLE),
+ Column("CHECK_CLAUSE", Longtext(MAX_FIELD_VARCHARLENGTH),
+ NOT_NULL, OPEN_FULL_TABLE),
CEnd()
};
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 4e1ff93c576..6825fbc8e24 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -2311,8 +2311,8 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
Table_type table_type;
size_t path_length= 0;
char *path_end= 0;
-
error= 0;
+
DBUG_PRINT("table", ("table_l: '%s'.'%s' table: %p s: %p",
db.str, table_name.str, table->table,
table->table ? table->table->s : NULL));
@@ -2327,9 +2327,40 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
thd->find_temporary_table(table) &&
table->mdl_request.ticket != NULL));
+ if (drop_sequence && table->table &&
+ table->table->s->table_type != TABLE_TYPE_SEQUENCE)
+ {
+ if (if_exists)
+ {
+ char buff[FN_REFLEN];
+ String tbl_name(buff, sizeof(buff), system_charset_info);
+ tbl_name.length(0);
+ tbl_name.append(&db);
+ tbl_name.append('.');
+ tbl_name.append(&table->table_name);
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_NOT_SEQUENCE2, ER_THD(thd, ER_NOT_SEQUENCE2),
+ tbl_name.c_ptr_safe());
+
+ /*
+ Our job is done here. This statement was added to avoid executing
+ unnecessary code farther below which in some strange corner cases
+ caused the server to crash (see MDEV-17896).
+ */
+ continue;
+ }
+ /* "DROP SEQUENCE" but a sequence table was not found */
+ unknown_tables.append(&db);
+ unknown_tables.append('.');
+ unknown_tables.append(&table_name);
+ unknown_tables.append(',');
+ error= ENOENT;
+ not_found_errors++;
+ continue;
+ }
+
/* First try to delete temporary tables and temporary sequences */
- if ((table->open_type != OT_BASE_ONLY && is_temporary_table(table)) &&
- (!drop_sequence || table->table->s->table_type == TABLE_TYPE_SEQUENCE))
+ if ((table->open_type != OT_BASE_ONLY && is_temporary_table(table)))
{
table_creation_was_logged= table->table->s->table_creation_was_logged;
if (thd->drop_temporary_table(table->table, &is_trans, true))
@@ -10516,6 +10547,7 @@ do_continue:;
tmp_disable_binlog(thd);
create_info->options|=HA_CREATE_TMP_ALTER;
+ create_info->alias= alter_ctx.table_name;
error= create_table_impl(thd, alter_ctx.db, alter_ctx.table_name,
alter_ctx.new_db, alter_ctx.tmp_name,
alter_ctx.get_tmp_path(),
diff --git a/sql/sql_type.cc b/sql/sql_type.cc
index 6c5f72c2ce2..5a31b39c7b6 100644
--- a/sql/sql_type.cc
+++ b/sql/sql_type.cc
@@ -9289,11 +9289,18 @@ LEX_CSTRING Charset::collation_specific_name() const
for character sets and collations, so a collation
name not necessarily starts with the character set name.
*/
+ LEX_CSTRING retval;
size_t csname_length= strlen(m_charset->csname);
if (strncmp(m_charset->name, m_charset->csname, csname_length))
- return {NULL, 0};
+ {
+ retval.str= NULL;
+ retval.length= 0;
+ return retval;
+ }
const char *ptr= m_charset->name + csname_length;
- return {ptr, strlen(ptr) };
+ retval.str= ptr;
+ retval.length= strlen(ptr);
+ return retval;
}
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index 6da1175e709..b88d78c0db3 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -362,7 +362,10 @@ select_union_recursive::create_result_table(THD *thd_arg,
hidden))
return true;
- if (! (incr_table= create_tmp_table(thd_arg, &tmp_table_param, *column_types,
+ incr_table_param.init();
+ incr_table_param.field_count= column_types->elements;
+ incr_table_param.bit_fields_as_long= bit_fields_as_long;
+ if (! (incr_table= create_tmp_table(thd_arg, &incr_table_param, *column_types,
(ORDER*) 0, false, 1,
options, HA_POS_ERROR, &empty_clex_str,
true, keep_row_order)))
@@ -372,20 +375,6 @@ select_union_recursive::create_result_table(THD *thd_arg,
for (uint i=0; i < table->s->fields; i++)
incr_table->field[i]->flags &= ~(PART_KEY_FLAG | PART_INDIRECT_KEY_FLAG);
- TABLE *rec_table= 0;
- if (! (rec_table= create_tmp_table(thd_arg, &tmp_table_param, *column_types,
- (ORDER*) 0, false, 1,
- options, HA_POS_ERROR, alias,
- true, keep_row_order)))
- return true;
-
- rec_table->keys_in_use_for_query.clear_all();
- for (uint i=0; i < table->s->fields; i++)
- rec_table->field[i]->flags &= ~(PART_KEY_FLAG | PART_INDIRECT_KEY_FLAG);
-
- if (rec_tables.push_back(rec_table))
- return true;
-
return false;
}
@@ -908,23 +897,25 @@ void select_union_recursive::cleanup()
free_tmp_table(thd, incr_table);
}
- List_iterator<TABLE> it(rec_tables);
- TABLE *tab;
- while ((tab= it++))
+ List_iterator<TABLE_LIST> it(rec_table_refs);
+ TABLE_LIST *tbl;
+ while ((tbl= it++))
{
+ TABLE *tab= tbl->table;
if (tab->is_created())
{
tab->file->extra(HA_EXTRA_RESET_STATE);
tab->file->ha_delete_all_rows();
}
- /*
+ /*
The table will be closed later in close_thread_tables(),
because it might be used in the statements like
ANALYZE WITH r AS (...) SELECT * from r
- where r is defined through recursion.
+ where r is defined through recursion.
*/
tab->next= thd->rec_tables;
thd->rec_tables= tab;
+ tbl->derived_result= 0;
}
}
@@ -1630,9 +1621,33 @@ bool st_select_lex_unit::prepare(TABLE_LIST *derived_arg,
}
if (!derived_arg->table)
{
- derived_arg->table= with_element->rec_result->rec_tables.head();
- if (derived_arg->derived_result)
- derived_arg->derived_result->table= derived_arg->table;
+ bool res= false;
+
+ if ((!derived_arg->is_with_table_recursive_reference() ||
+ !derived_arg->derived_result) &&
+ !(derived_arg->derived_result=
+ new (thd->mem_root) select_unit(thd)))
+ goto err; // out of memory
+ thd->create_tmp_table_for_derived= TRUE;
+
+ res= derived_arg->derived_result->create_result_table(thd,
+ &types,
+ FALSE,
+ create_options,
+ &derived_arg->alias,
+ FALSE, FALSE,
+ FALSE, 0);
+ thd->create_tmp_table_for_derived= FALSE;
+ if (res)
+ goto err;
+ derived_arg->derived_result->set_unit(this);
+ derived_arg->table= derived_arg->derived_result->table;
+ if (derived_arg->is_with_table_recursive_reference())
+ {
+ /* Here 'derived_arg' is the primary recursive table reference */
+ derived_arg->with->rec_result->
+ rec_table_refs.push_back(derived_arg);
+ }
}
with_element->mark_as_with_prepared_anchor();
is_rec_result_table_created= true;
@@ -2428,11 +2443,11 @@ bool st_select_lex_unit::exec_recursive()
TABLE *incr_table= with_element->rec_result->incr_table;
st_select_lex *end= NULL;
bool is_unrestricted= with_element->is_unrestricted();
- List_iterator_fast<TABLE> li(with_element->rec_result->rec_tables);
+ List_iterator_fast<TABLE_LIST> li(with_element->rec_result->rec_table_refs);
TMP_TABLE_PARAM *tmp_table_param= &with_element->rec_result->tmp_table_param;
ha_rows examined_rows= 0;
bool was_executed= executed;
- TABLE *rec_table;
+ TABLE_LIST *rec_tbl;
DBUG_ENTER("st_select_lex_unit::exec_recursive");
@@ -2510,8 +2525,9 @@ bool st_select_lex_unit::exec_recursive()
else
with_element->level++;
- while ((rec_table= li++))
+ while ((rec_tbl= li++))
{
+ TABLE *rec_table= rec_tbl->table;
saved_error=
incr_table->insert_all_rows_into_tmp_table(thd, rec_table,
tmp_table_param,
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 9e6a1cd3bb4..d0956ec8bd1 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -51,8 +51,9 @@
compare_record(TABLE*).
*/
bool records_are_comparable(const TABLE *table) {
- return ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) == 0) ||
- bitmap_is_subset(table->write_set, table->read_set);
+ return !table->versioned(VERS_TRX_ID) &&
+ (((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) == 0) ||
+ bitmap_is_subset(table->write_set, table->read_set));
}
@@ -1084,20 +1085,9 @@ update_begin:
}
else if (likely(!error))
{
- if (has_vers_fields && table->versioned())
- {
- if (table->versioned(VERS_TIMESTAMP))
- {
- store_record(table, record[2]);
- table->mark_columns_per_binlog_row_image();
- error= vers_insert_history_row(table);
- restore_record(table, record[2]);
- }
- if (likely(!error))
- rows_inserted++;
- }
- if (likely(!error))
- updated++;
+ if (has_vers_fields && table->versioned(VERS_TRX_ID))
+ rows_inserted++;
+ updated++;
}
if (likely(!error) && !record_was_same && table_list->has_period())
@@ -1113,6 +1103,19 @@ update_begin:
if (unlikely(error) &&
(!ignore || table->file->is_fatal_error(error, HA_CHECK_ALL)))
{
+ goto error;
+ }
+ }
+
+ if (likely(!error) && has_vers_fields && table->versioned(VERS_TIMESTAMP))
+ {
+ store_record(table, record[2]);
+ table->mark_columns_per_binlog_row_image();
+ error= vers_insert_history_row(table);
+ restore_record(table, record[2]);
+ if (unlikely(error))
+ {
+error:
/*
If (ignore && error is ignorable) we don't have to
do anything; otherwise...
@@ -1123,10 +1126,11 @@ update_begin:
flags|= ME_FATAL; /* Other handler errors are fatal */
prepare_record_for_error_message(error, table);
- table->file->print_error(error,MYF(flags));
- error= 1;
- break;
- }
+ table->file->print_error(error,MYF(flags));
+ error= 1;
+ break;
+ }
+ rows_inserted++;
}
if (table->triggers &&
@@ -1851,7 +1855,11 @@ int mysql_multi_update_prepare(THD *thd)
During prepare phase acquire only S metadata locks instead of SW locks to
keep prepare of multi-UPDATE compatible with concurrent LOCK TABLES WRITE
and global read lock.
+
+ Don't evaluate any subqueries even if constant, because
+ tables aren't locked yet.
*/
+ lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_DERIVED;
if (thd->lex->sql_command == SQLCOM_UPDATE_MULTI)
{
if (open_tables(thd, &table_list, &table_count,
@@ -1873,6 +1881,8 @@ int mysql_multi_update_prepare(THD *thd)
lock_tables(thd, table_list, table_count, 0))
DBUG_RETURN(TRUE);
+ lex->context_analysis_only&= ~CONTEXT_ANALYSIS_ONLY_DERIVED;
+
(void) read_statistics_for_tables_if_needed(thd, table_list);
/* @todo: downgrade the metadata locks here. */
@@ -2551,6 +2561,7 @@ int multi_update::send_data(List<Item> &not_used_values)
if (!ignore ||
table->file->is_fatal_error(error, HA_CHECK_ALL))
{
+error:
/*
If (ignore && error == is ignorable) we don't have to
do anything; otherwise...
@@ -2572,19 +2583,8 @@ int multi_update::send_data(List<Item> &not_used_values)
error= 0;
updated--;
}
- else if (has_vers_fields && table->versioned())
+ else if (has_vers_fields && table->versioned(VERS_TRX_ID))
{
- if (table->versioned(VERS_TIMESTAMP))
- {
- store_record(table, record[2]);
- if (vers_insert_history_row(table))
- {
- restore_record(table, record[2]);
- error= 1;
- break;
- }
- restore_record(table, record[2]);
- }
updated_sys_ver++;
}
/* non-transactional or transactional table got modified */
@@ -2598,6 +2598,17 @@ int multi_update::send_data(List<Item> &not_used_values)
}
}
}
+ if (has_vers_fields && table->versioned(VERS_TIMESTAMP))
+ {
+ store_record(table, record[2]);
+ if (vers_insert_history_row(table))
+ {
+ restore_record(table, record[2]);
+ goto error;
+ }
+ restore_record(table, record[2]);
+ updated_sys_ver++;
+ }
if (table->triggers &&
unlikely(table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
TRG_ACTION_AFTER, TRUE)))
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index 617254244e1..cfd43bd13ab 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -292,6 +292,8 @@ bool create_view_precheck(THD *thd, TABLE_LIST *tables, TABLE_LIST *view,
{
for (tbl= sl->get_table_list(); tbl; tbl= tbl->next_local)
{
+ if (!tbl->with && tbl->select_lex)
+ tbl->with= tbl->select_lex->find_table_def_in_with_clauses(tbl);
/*
Ensure that we have some privileges on this table, more strict check
will be done on column level after preparation,
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 0b6f099b1b0..4fb9419d2f8 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -341,12 +341,12 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
*/
/* Start SQL_MODE_DEFAULT_SPECIFIC */
-%expect 53
+%expect 46
/* End SQL_MODE_DEFAULT_SPECIFIC */
/* Start SQL_MODE_ORACLE_SPECIFIC
-%expect 56
+%expect 49
End SQL_MODE_ORACLE_SPECIFIC */
@@ -1445,7 +1445,7 @@ End SQL_MODE_ORACLE_SPECIFIC */
%type <item>
literal insert_ident order_ident temporal_literal
simple_ident expr sum_expr in_sum_expr
- variable variable_aux bool_pri
+ variable variable_aux
predicate bit_expr parenthesized_expr
table_wild simple_expr column_default_non_parenthesized_expr udf_expr
primary_expr string_factor_expr mysql_concatenation_expr
@@ -9364,23 +9364,19 @@ expr:
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | bool_pri
- ;
-
-bool_pri:
- bool_pri EQUAL_SYM predicate %prec EQUAL_SYM
+ | expr EQUAL_SYM predicate %prec EQUAL_SYM
{
$$= new (thd->mem_root) Item_func_equal(thd, $1, $3);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | bool_pri comp_op predicate %prec '='
+ | expr comp_op predicate %prec '='
{
$$= (*$2)(0)->create(thd, $1, $3);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | bool_pri comp_op all_or_any '(' subselect ')' %prec '='
+ | expr comp_op all_or_any '(' subselect ')' %prec '='
{
$$= all_any_subquery_creator(thd, $1, $2, $3, $5);
if (unlikely($$ == NULL))
diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc
index d0eff0adada..44d29c5da0b 100644
--- a/sql/sys_vars.cc
+++ b/sql/sys_vars.cc
@@ -2465,7 +2465,7 @@ static Sys_var_ulong Sys_max_sort_length(
"the first max_sort_length bytes of each value are used; the rest "
"are ignored)",
SESSION_VAR(max_sort_length), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(8, 8192*1024L), DEFAULT(1024), BLOCK_SIZE(1));
+ VALID_RANGE(64, 8192*1024L), DEFAULT(1024), BLOCK_SIZE(1));
static Sys_var_ulong Sys_max_sp_recursion_depth(
"max_sp_recursion_depth",
@@ -4785,11 +4785,11 @@ static Sys_var_ulong Sys_default_week_format(
SESSION_VAR(default_week_format), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(0, 7), DEFAULT(0), BLOCK_SIZE(1));
-static Sys_var_ulonglong Sys_group_concat_max_len(
+static Sys_var_uint Sys_group_concat_max_len(
"group_concat_max_len",
"The maximum length of the result of function GROUP_CONCAT()",
SESSION_VAR(group_concat_max_len), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(4, SIZE_T_MAX), DEFAULT(1024*1024), BLOCK_SIZE(1));
+ VALID_RANGE(4, UINT_MAX32), DEFAULT(1024*1024), BLOCK_SIZE(1));
static char *glob_hostname_ptr;
static Sys_var_charptr Sys_hostname(
diff --git a/sql/wsrep_binlog.cc b/sql/wsrep_binlog.cc
index da899321ba8..5595f263fa5 100644
--- a/sql/wsrep_binlog.cc
+++ b/sql/wsrep_binlog.cc
@@ -358,7 +358,7 @@ void wsrep_register_for_group_commit(THD *thd)
DBUG_VOID_RETURN;
}
- DBUG_ASSERT(thd->wsrep_trx().state() == wsrep::transaction::s_committing);
+ DBUG_ASSERT(thd->wsrep_trx().ordered());
wait_for_commit *wfc= thd->wait_for_commit_ptr= &thd->wsrep_wfc;
diff --git a/sql/wsrep_high_priority_service.cc b/sql/wsrep_high_priority_service.cc
index 3116315b9c7..6136a459d16 100644
--- a/sql/wsrep_high_priority_service.cc
+++ b/sql/wsrep_high_priority_service.cc
@@ -159,11 +159,12 @@ Wsrep_high_priority_service::Wsrep_high_priority_service(THD* thd)
/* Disable general logging on applier threads */
thd->variables.option_bits |= OPTION_LOG_OFF;
- /* Enable binlogging if opt_log_slave_updates is set */
- if (opt_log_slave_updates)
- thd->variables.option_bits|= OPTION_BIN_LOG;
- else
- thd->variables.option_bits&= ~(OPTION_BIN_LOG);
+
+ /* enable binlogging regardless of log_slave_updates setting
+ this is for ensuring that both local and applier transaction go through
+ same commit ordering algorithm in group commit control
+ */
+ thd->variables.option_bits|= OPTION_BIN_LOG;
thd->net.vio= 0;
thd->reset_db(&db_str);
@@ -462,8 +463,24 @@ int Wsrep_high_priority_service::log_dummy_write_set(const wsrep::ws_handle& ws_
cs.before_rollback();
cs.after_rollback();
}
+
+ if (!WSREP_EMULATE_BINLOG(m_thd))
+ {
+ wsrep_register_for_group_commit(m_thd);
+ ret = ret || cs.provider().commit_order_leave(ws_handle, ws_meta, err);
+ m_thd->wait_for_prior_commit();
+ }
+
wsrep_set_SE_checkpoint(ws_meta.gtid(), wsrep_gtid_server.gtid());
- ret= ret || cs.provider().commit_order_leave(ws_handle, ws_meta, err);
+
+ if (!WSREP_EMULATE_BINLOG(m_thd))
+ {
+ wsrep_unregister_from_group_commit(m_thd);
+ }
+ else
+ {
+ ret= ret || cs.provider().commit_order_leave(ws_handle, ws_meta, err);
+ }
cs.after_applying();
}
DBUG_RETURN(ret);
diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc
index 284df936160..50650280a2a 100644
--- a/sql/wsrep_mysqld.cc
+++ b/sql/wsrep_mysqld.cc
@@ -2649,10 +2649,12 @@ static my_bool kill_remaining_threads(THD *thd, THD *caller_thd)
if (is_client_connection(thd) &&
!abort_replicated(thd) &&
!is_replaying_connection(thd) &&
+ thd_is_connection_alive(thd) &&
thd != caller_thd)
{
+
WSREP_INFO("killing local connection: %lld", (longlong) thd->thread_id);
- close_connection(thd, 0);
+ close_connection(thd);
}
#endif
return 0;