summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/field.cc40
-rw-r--r--sql/field.h1
-rw-r--r--sql/filesort.cc9
-rw-r--r--sql/item_jsonfunc.cc35
-rw-r--r--sql/item_jsonfunc.h1
-rw-r--r--sql/item_subselect.cc2
-rw-r--r--sql/item_windowfunc.cc4
-rw-r--r--sql/mysql_install_db.cc22
-rw-r--r--sql/opt_range.cc129
-rw-r--r--sql/rpl_rli.cc6
-rw-r--r--sql/signal_handler.cc2
-rw-r--r--sql/sql_lex.h4
-rw-r--r--sql/sql_parse.cc8
-rw-r--r--sql/sql_prepare.cc5
-rw-r--r--sql/sql_repl.cc8
-rw-r--r--sql/sql_select.cc54
-rw-r--r--sql/sql_show.cc40
-rw-r--r--sql/sql_table.cc7
-rw-r--r--sql/table.cc2
-rw-r--r--sql/table.h6
-rw-r--r--sql/winservice.c39
-rw-r--r--sql/wsrep_server_service.cc14
22 files changed, 326 insertions, 112 deletions
diff --git a/sql/field.cc b/sql/field.cc
index 8e8ed7df0cb..9b6f117a82e 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -11107,6 +11107,46 @@ void Field_blob::print_key_value(String *out, uint32 length)
}
+/*
+ @brief Print value of the key part
+
+ @param
+ out Output string
+ key value of the key
+ length Length of field in bytes,
+ excluding NULL flag and length bytes
+*/
+
+
+void
+Field::print_key_part_value(String *out, const uchar* key, uint32 length)
+{
+ StringBuffer<128> tmp(system_charset_info);
+ uint null_byte= 0;
+ if (real_maybe_null())
+ {
+ /*
+ Byte 0 of key is the null-byte. If set, key is NULL.
+ Otherwise, print the key value starting immediately after the
+ null-byte
+ */
+ if (*key)
+ {
+ out->append(STRING_WITH_LEN("NULL"));
+ return;
+ }
+ null_byte++; // Skip null byte
+ }
+
+ set_key_image(key + null_byte, length);
+ print_key_value(&tmp, length);
+ if (charset() == &my_charset_bin)
+ out->append(tmp.ptr(), tmp.length(), tmp.charset());
+ else
+ tmp.print(out, system_charset_info);
+}
+
+
void Field::print_key_value_binary(String *out, const uchar* key, uint32 length)
{
out->append_semi_hex((const char*)key, length, charset());
diff --git a/sql/field.h b/sql/field.h
index e4c5ffcc0de..420ff6866bd 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -1611,6 +1611,7 @@ public:
bool set_warning(Sql_condition::enum_warning_level, unsigned int code,
int cuted_increment, ulong current_row=0) const;
virtual void print_key_value(String *out, uint32 length);
+ void print_key_part_value(String *out, const uchar *key, uint32 length);
void print_key_value_binary(String *out, const uchar* key, uint32 length);
protected:
bool set_warning(unsigned int code, int cuted_increment) const
diff --git a/sql/filesort.cc b/sql/filesort.cc
index ac43c96b0e0..f5d57a36685 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -586,7 +586,14 @@ Filesort::make_sortorder(THD *thd, JOIN *join, table_map first_table_bit)
if (item->type() == Item::FIELD_ITEM)
pos->field= ((Item_field*) item)->field;
else if (item->type() == Item::SUM_FUNC_ITEM && !item->const_item())
- pos->field= ((Item_sum*) item)->get_tmp_table_field();
+ {
+ // Aggregate, or Item_aggregate_ref
+ DBUG_ASSERT(first->type() == Item::SUM_FUNC_ITEM ||
+ (first->type() == Item::REF_ITEM &&
+ static_cast<Item_ref*>(first)->ref_type() ==
+ Item_ref::AGGREGATE_REF));
+ pos->field= first->get_tmp_table_field();
+ }
else if (item->type() == Item::COPY_STR_ITEM)
{ // Blob patch
pos->item= ((Item_copy*) item)->get_item();
diff --git a/sql/item_jsonfunc.cc b/sql/item_jsonfunc.cc
index fe3fae5840c..aec3e1edcd3 100644
--- a/sql/item_jsonfunc.cc
+++ b/sql/item_jsonfunc.cc
@@ -951,6 +951,41 @@ double Item_func_json_extract::val_real()
}
+my_decimal *Item_func_json_extract::val_decimal(my_decimal *to)
+{
+ json_value_types type;
+ char *value;
+ int value_len;
+
+ if (read_json(NULL, &type, &value, &value_len) != NULL)
+ {
+ switch (type)
+ {
+ case JSON_VALUE_STRING:
+ case JSON_VALUE_NUMBER:
+ {
+ my_decimal *res= decimal_from_string_with_check(to, collation.collation,
+ value,
+ value + value_len);
+ null_value= res == NULL;
+ return res;
+ }
+ case JSON_VALUE_TRUE:
+ int2my_decimal(E_DEC_FATAL_ERROR, 1, false/*unsigned_flag*/, to);
+ return to;
+ case JSON_VALUE_OBJECT:
+ case JSON_VALUE_ARRAY:
+ case JSON_VALUE_FALSE:
+ case JSON_VALUE_NULL:
+ break;
+ };
+ }
+ int2my_decimal(E_DEC_FATAL_ERROR, 0, false/*unsigned_flag*/, to);
+ return to;
+}
+
+
+
bool Item_func_json_contains::fix_length_and_dec()
{
a2_constant= args[1]->const_item();
diff --git a/sql/item_jsonfunc.h b/sql/item_jsonfunc.h
index 69625831045..6bd2a81afc6 100644
--- a/sql/item_jsonfunc.h
+++ b/sql/item_jsonfunc.h
@@ -241,6 +241,7 @@ public:
String *val_str(String *);
longlong val_int();
double val_real();
+ my_decimal *val_decimal(my_decimal *);
uint get_n_paths() const { return arg_count - 1; }
Item *get_copy(THD *thd)
{ return get_item_copy<Item_func_json_extract>(thd, this); }
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index 23ff906e3cf..1690ec96e65 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -2104,7 +2104,7 @@ bool Item_allany_subselect::transform_into_max_min(JOIN *join)
The swap is needed for expressions of type 'f1 < ALL ( SELECT ....)'
where we want to evaluate the sub query even if f1 would be null.
*/
- subs= func->create_swap(thd, *(optimizer->get_cache()), subs);
+ subs= func->create_swap(thd, expr, subs);
thd->change_item_tree(place, subs);
if (subs->fix_fields(thd, &subs))
DBUG_RETURN(true);
diff --git a/sql/item_windowfunc.cc b/sql/item_windowfunc.cc
index acf2ee4fdda..25e86c5d777 100644
--- a/sql/item_windowfunc.cc
+++ b/sql/item_windowfunc.cc
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2016,2017 MariaDB
+ Copyright (c) 2016, 2020, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -341,9 +341,9 @@ bool Item_sum_hybrid_simple::fix_fields(THD *thd, Item **ref)
for (uint i= 0; i < arg_count; i++)
{
- // 'item' can be changed during fix_fields
if (args[i]->fix_fields_if_needed_for_scalar(thd, &args[i]))
return TRUE;
+ with_window_func|= args[i]->with_window_func;
}
for (uint i= 0; i < arg_count && !m_with_subquery; i++)
diff --git a/sql/mysql_install_db.cc b/sql/mysql_install_db.cc
index ed3f777b1de..651eea33304 100644
--- a/sql/mysql_install_db.cc
+++ b/sql/mysql_install_db.cc
@@ -426,8 +426,8 @@ static int register_service()
static void clean_directory(const char *dir)
{
- char dir2[MAX_PATH+2];
- *(strmake_buf(dir2, dir)+1)= 0;
+ char dir2[MAX_PATH + 4]= {};
+ snprintf(dir2, MAX_PATH+2, "%s\\*", dir);
SHFILEOPSTRUCT fileop;
fileop.hwnd= NULL; /* no status display */
@@ -556,7 +556,7 @@ static int create_db_instance()
DWORD cwd_len= MAX_PATH;
char cmdline[3*MAX_PATH];
FILE *in;
- bool cleanup_datadir= true;
+ bool created_datadir= false;
DWORD last_error;
verbose("Running bootstrap");
@@ -565,7 +565,11 @@ static int create_db_instance()
/* Create datadir and datadir/mysql, if they do not already exist. */
- if (!CreateDirectory(opt_datadir, NULL) && (GetLastError() != ERROR_ALREADY_EXISTS))
+ if (CreateDirectory(opt_datadir, NULL))
+ {
+ created_datadir= true;
+ }
+ else if (GetLastError() != ERROR_ALREADY_EXISTS)
{
last_error = GetLastError();
switch(last_error)
@@ -602,9 +606,11 @@ static int create_db_instance()
}
}
- if (PathIsDirectoryEmpty(opt_datadir))
+ if (!PathIsDirectoryEmpty(opt_datadir))
{
- cleanup_datadir= false;
+ fprintf(stderr,"ERROR : Data directory %s is not empty."
+ " Only new or empty existing directories are accepted for --datadir\n",opt_datadir);
+ exit(1);
}
if (!CreateDirectory("mysql",NULL))
@@ -732,10 +738,12 @@ static int create_db_instance()
}
end:
- if (ret && cleanup_datadir)
+ if (ret)
{
SetCurrentDirectory(cwd);
clean_directory(opt_datadir);
+ if (created_datadir)
+ RemoveDirectory(opt_datadir);
}
return ret;
}
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 80d67d884bf..1993dc265a0 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -442,6 +442,13 @@ static
void print_range(String *out, const KEY_PART_INFO *key_part,
KEY_MULTI_RANGE *range, uint n_key_parts);
+static
+void print_range_for_non_indexed_field(String *out, Field *field,
+ KEY_MULTI_RANGE *range);
+
+static void print_min_range_operator(String *out, const ha_rkey_function flag);
+static void print_max_range_operator(String *out, const ha_rkey_function flag);
+
/*
SEL_IMERGE is a list of possible ways to do index merge, i.e. it is
@@ -2691,10 +2698,8 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
DBUG_PRINT("info",("Time to scan table: %g", read_time));
Json_writer_object table_records(thd);
- if (head->reginfo.join_tab)
- table_records.add_table_name(head->reginfo.join_tab);
- else
- table_records.add_table_name(head);
+ table_records.add_table_name(head);
+
Json_writer_object trace_range(thd, "range_analysis");
{
Json_writer_object table_rec(thd, "table_scan");
@@ -3191,6 +3196,7 @@ static
double records_in_column_ranges(PARAM *param, uint idx,
SEL_ARG *tree)
{
+ THD *thd= param->thd;
SEL_ARG_RANGE_SEQ seq;
KEY_MULTI_RANGE range;
range_seq_t seq_it;
@@ -3217,6 +3223,8 @@ double records_in_column_ranges(PARAM *param, uint idx,
seq_it= seq_if.init((void *) &seq, 0, flags);
+ Json_writer_array range_trace(thd, "ranges");
+
while (!seq_if.next(seq_it, &range))
{
key_range *min_endp, *max_endp;
@@ -3233,6 +3241,13 @@ double records_in_column_ranges(PARAM *param, uint idx,
if (range.start_key.flag == HA_READ_BEFORE_KEY)
range_flag |= NEAR_MAX;
+ if (unlikely(thd->trace_started()))
+ {
+ StringBuffer<128> range_info(system_charset_info);
+ print_range_for_non_indexed_field(&range_info, field, &range);
+ range_trace.add(range_info.c_ptr_safe(), range_info.length());
+ }
+
rows= get_column_range_cardinality(field, min_endp, max_endp, range_flag);
if (DBL_MAX == rows)
{
@@ -15820,6 +15835,37 @@ void QUICK_GROUP_MIN_MAX_SELECT::dbug_dump(int indent, bool verbose)
#endif /* !DBUG_OFF */
+
+/*
+ @brief Print the comparison operator for the min range
+*/
+
+static void print_min_range_operator(String *out, const ha_rkey_function flag)
+{
+ if (flag == HA_READ_AFTER_KEY)
+ out->append(STRING_WITH_LEN(" < "));
+ else if (flag == HA_READ_KEY_EXACT || flag == HA_READ_KEY_OR_NEXT)
+ out->append(STRING_WITH_LEN(" <= "));
+ else
+ out->append(STRING_WITH_LEN(" ? "));
+}
+
+
+/*
+ @brief Print the comparison operator for the max range
+*/
+
+static void print_max_range_operator(String *out, const ha_rkey_function flag)
+{
+ if (flag == HA_READ_BEFORE_KEY)
+ out->append(STRING_WITH_LEN(" < "));
+ else if (flag == HA_READ_AFTER_KEY)
+ out->append(STRING_WITH_LEN(" <= "));
+ else
+ out->append(STRING_WITH_LEN(" ? "));
+}
+
+
static
void print_range(String *out, const KEY_PART_INFO *key_part,
KEY_MULTI_RANGE *range, uint n_key_parts)
@@ -15848,30 +15894,55 @@ void print_range(String *out, const KEY_PART_INFO *key_part,
{
print_key_value(out, key_part, range->start_key.key,
range->start_key.length);
- if (range->start_key.flag == HA_READ_AFTER_KEY)
- out->append(STRING_WITH_LEN(" < "));
- else if (range->start_key.flag == HA_READ_KEY_EXACT ||
- range->start_key.flag == HA_READ_KEY_OR_NEXT)
- out->append(STRING_WITH_LEN(" <= "));
- else
- out->append(STRING_WITH_LEN(" ? "));
+ print_min_range_operator(out, range->start_key.flag);
}
print_keyparts_name(out, key_part, n_key_parts, keypart_map);
if (range->end_key.length)
{
- if (range->end_key.flag == HA_READ_BEFORE_KEY)
- out->append(STRING_WITH_LEN(" < "));
- else if (range->end_key.flag == HA_READ_AFTER_KEY)
- out->append(STRING_WITH_LEN(" <= "));
- else
- out->append(STRING_WITH_LEN(" ? "));
+ print_max_range_operator(out, range->end_key.flag);
print_key_value(out, key_part, range->end_key.key,
range->end_key.length);
}
}
+
+/*
+ @brief Print range created for non-indexed columns
+
+ @param
+ out output string
+ field field for which the range is printed
+ range range for the field
+*/
+
+static
+void print_range_for_non_indexed_field(String *out, Field *field,
+ KEY_MULTI_RANGE *range)
+{
+ TABLE *table= field->table;
+ my_bitmap_map *old_sets[2];
+ dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set);
+
+ if (range->start_key.length)
+ {
+ field->print_key_part_value(out, range->start_key.key, field->key_length());
+ print_min_range_operator(out, range->start_key.flag);
+ }
+
+ out->append(field->field_name);
+
+ if (range->end_key.length)
+ {
+ print_max_range_operator(out, range->end_key.flag);
+ field->print_key_part_value(out, range->end_key.key, field->key_length());
+ }
+ dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets);
+}
+
+
+
/*
Add ranges to the trace
@@ -15943,30 +16014,8 @@ static void print_key_value(String *out, const KEY_PART_INFO *key_part,
field= key_part->field;
store_length= key_part->store_length;
- if (field->real_maybe_null())
- {
- /*
- Byte 0 of key is the null-byte. If set, key is NULL.
- Otherwise, print the key value starting immediately after the
- null-byte
- */
- if (*key)
- {
- out->append(STRING_WITH_LEN("NULL"));
- goto next;
- }
- key++; // Skip null byte
- store_length--;
- }
-
- field->set_key_image(key, key_part->length);
- field->print_key_value(&tmp, key_part->length);
- if (field->charset() == &my_charset_bin)
- out->append(tmp.ptr(), tmp.length(), tmp.charset());
- else
- tmp.print(out, system_charset_info);
+ field->print_key_part_value(out, key, key_part->length);
- next:
if (key + store_length < key_end)
out->append(STRING_WITH_LEN(","));
}
diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc
index d8ef17e3494..11eccefdde9 100644
--- a/sql/rpl_rli.cc
+++ b/sql/rpl_rli.cc
@@ -1473,8 +1473,14 @@ bool Relay_log_info::stmt_done(my_off_t event_master_log_pos, THD *thd,
}
DBUG_EXECUTE_IF("inject_crash_before_flush_rli", DBUG_SUICIDE(););
if (mi->using_gtid == Master_info::USE_GTID_NO)
+ {
+ if (rgi->is_parallel_exec)
+ mysql_mutex_lock(&data_lock);
if (flush())
error= 1;
+ if (rgi->is_parallel_exec)
+ mysql_mutex_unlock(&data_lock);
+ }
DBUG_EXECUTE_IF("inject_crash_after_flush_rli", DBUG_SUICIDE(););
}
DBUG_RETURN(error);
diff --git a/sql/signal_handler.cc b/sql/signal_handler.cc
index 147b2568981..5e3f32eae4e 100644
--- a/sql/signal_handler.cc
+++ b/sql/signal_handler.cc
@@ -279,7 +279,7 @@ extern "C" sig_handler handle_fatal_signal(int sig)
}
my_safe_printf_stderr("%s",
"The manual page at "
- "http://dev.mysql.com/doc/mysql/en/crashing.html contains\n"
+ "https://mariadb.com/kb/en/how-to-produce-a-full-stack-trace-for-mysqld/ contains\n"
"information that should help you find out what is causing the crash.\n");
#endif /* HAVE_STACKTRACE */
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index adfa0c08959..b6dcb49ed08 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2019, Oracle and/or its affiliates.
- Copyright (c) 2010, 2019, MariaDB Corporation.
+ Copyright (c) 2010, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -1015,7 +1015,7 @@ public:
int save_union_explain_part2(Explain_query *output);
unit_common_op common_op();
- bool explainable()
+ bool explainable() const
{
/*
EXPLAIN/ANALYZE unit, when:
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index cd60472e81c..167aefee7a6 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -942,11 +942,6 @@ void execute_init_command(THD *thd, LEX_STRING *init_command,
char *buf= thd->strmake(init_command->str, len);
mysql_rwlock_unlock(var_lock);
-#if defined(ENABLED_PROFILING)
- thd->profiling.start_new_query();
- thd->profiling.set_query_source(buf, len);
-#endif
-
THD_STAGE_INFO(thd, stage_execution_of_init_command);
save_client_capabilities= thd->client_capabilities;
thd->client_capabilities|= CLIENT_MULTI_QUERIES;
@@ -961,9 +956,6 @@ void execute_init_command(THD *thd, LEX_STRING *init_command,
thd->client_capabilities= save_client_capabilities;
thd->net.vio= save_vio;
-#if defined(ENABLED_PROFILING)
- thd->profiling.finish_current_query();
-#endif
}
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 6dbdfe75767..93bef049d28 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -853,6 +853,8 @@ static bool insert_params_with_log(Prepared_statement *stmt, uchar *null_array,
if (param->convert_str_value(thd))
DBUG_RETURN(1); /* out of memory */
+
+ param->sync_clones();
}
if (acc.finalize())
DBUG_RETURN(1);
@@ -3130,7 +3132,10 @@ static void reset_stmt_params(Prepared_statement *stmt)
Item_param **item= stmt->param_array;
Item_param **end= item + stmt->param_count;
for (;item < end ; ++item)
+ {
(**item).reset();
+ (**item).sync_clones();
+ }
}
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index 83ae25c962d..3ed0e288870 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -2121,9 +2121,13 @@ static int init_binlog_sender(binlog_send_info *info,
});
if (global_system_variables.log_warnings > 1)
+ {
sql_print_information(
- "Start binlog_dump to slave_server(%lu), pos(%s, %lu)",
- thd->variables.server_id, log_ident, (ulong)*pos);
+ "Start binlog_dump to slave_server(%lu), pos(%s, %lu), "
+ "using_gtid(%d), gtid('%s')", thd->variables.server_id,
+ log_ident, (ulong)*pos, info->using_gtid_state,
+ connect_gtid_state.c_ptr_quick());
+ }
#ifndef DBUG_OFF
if (opt_sporadic_binlog_dump_fail && (binlog_dump_count++ % 2))
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 52e6b905638..552363f80c0 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -982,9 +982,12 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables)
}
bool is_select= false;
+ bool use_sysvar= false;
switch (thd->lex->sql_command)
{
case SQLCOM_SELECT:
+ use_sysvar= true;
+ /* fall through */
case SQLCOM_INSERT_SELECT:
case SQLCOM_REPLACE_SELECT:
case SQLCOM_DELETE_MULTI:
@@ -1028,7 +1031,7 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables)
}
// propagate system_time from sysvar
- if (!vers_conditions.is_set() && is_select)
+ if (!vers_conditions.is_set() && use_sysvar)
{
if (vers_conditions.init_from_sysvar(thd))
DBUG_RETURN(-1);
@@ -6942,6 +6945,7 @@ void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array)
uint n_tables= my_count_bits(map);
if (n_tables == 1) // Only one table
{
+ DBUG_ASSERT(!(map & PSEUDO_TABLE_BITS)); // Must be a real table
Table_map_iterator it(map);
int tablenr= it.next_bit();
DBUG_ASSERT(tablenr != Table_map_iterator::BITMAP_END);
@@ -16610,10 +16614,15 @@ static uint build_bitmap_for_nested_joins(List<TABLE_LIST> *join_list,
/**
- Set NESTED_JOIN::counter=0 in all nested joins in passed list.
+ Set NESTED_JOIN::counter and n_tables in all nested joins in passed list.
- Recursively set NESTED_JOIN::counter=0 for all nested joins contained in
- the passed join_list.
+ For all nested joins contained in the passed join_list (including its
+ children), set:
+ - nested_join->counter=0
+ - nested_join->n_tables= {number of non-degenerate direct children}.
+
+ Non-degenerate means non-const base table or a join nest that has a
+ non-degenerate child.
@param join_list List of nested joins to process. It may also contain base
tables which will be ignored.
@@ -16636,8 +16645,11 @@ static uint reset_nj_counters(JOIN *join, List<TABLE_LIST> *join_list)
if (!nested_join->n_tables)
is_eliminated_nest= TRUE;
}
- if ((table->nested_join && !is_eliminated_nest) ||
- (!table->nested_join && (table->table->map & ~join->eliminated_tables)))
+ const table_map removed_tables= join->eliminated_tables |
+ join->const_table_map;
+
+ if ((table->nested_join && !is_eliminated_nest) ||
+ (!table->nested_join && (table->table->map & ~removed_tables)))
n++;
}
DBUG_RETURN(n);
@@ -27062,13 +27074,18 @@ int JOIN::save_explain_data_intern(Explain_query *output,
output->add_node(xpl_sel);
}
- for (SELECT_LEX_UNIT *tmp_unit= join->select_lex->first_inner_unit();
- tmp_unit;
- tmp_unit= tmp_unit->next_unit())
- {
- if (tmp_unit->explainable())
- explain->add_child(tmp_unit->first_select()->select_number);
- }
+ /*
+ Don't try to add query plans for child selects if this select was pushed
+ down into a Smart Storage Engine:
+ - the entire statement was pushed down ("PUSHED SELECT"), or
+ - this derived table was pushed down ("PUSHED DERIVED")
+ */
+ if (!select_lex->pushdown_select && select_lex->type != pushed_derived_text)
+ for (SELECT_LEX_UNIT *tmp_unit= join->select_lex->first_inner_unit();
+ tmp_unit;
+ tmp_unit= tmp_unit->next_unit())
+ if (tmp_unit->explainable())
+ explain->add_child(tmp_unit->first_select()->select_number);
if (select_lex->is_top_level_node())
output->query_plan_ready();
@@ -27099,7 +27116,16 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
THD *thd=join->thd;
select_result *result=join->result;
DBUG_ENTER("select_describe");
-
+
+ if (join->select_lex->pushdown_select)
+ {
+ /*
+ The whole statement was pushed down to a Smart Storage Engine. Do not
+ attempt to produce a query plan locally.
+ */
+ DBUG_VOID_RETURN;
+ }
+
/* Update the QPF with latest values of using_temporary, using_filesort */
for (SELECT_LEX_UNIT *unit= join->select_lex->first_inner_unit();
unit;
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index f61c5677ced..5eb2d911926 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -5275,6 +5275,29 @@ bool store_schema_schemata(THD* thd, TABLE *table, LEX_CSTRING *db_name,
}
+/*
+ Check if the specified database exists on disk.
+
+ @param dbname - the database name
+ @retval true - on error, the database directory does not exists
+ @retval false - on success, the database directory exists
+*/
+static bool verify_database_directory_exists(const LEX_CSTRING &dbname)
+{
+ DBUG_ENTER("verify_database_directory_exists");
+ char path[FN_REFLEN + 16];
+ uint path_len;
+ MY_STAT stat_info;
+ if (!dbname.str[0])
+ DBUG_RETURN(true); // Empty database name: does not exist.
+ path_len= build_table_filename(path, sizeof(path) - 1, dbname.str, "", "", 0);
+ path[path_len - 1]= 0;
+ if (!mysql_file_stat(key_file_misc, path, &stat_info, MYF(0)))
+ DBUG_RETURN(true); // The database directory was not found: does not exist.
+ DBUG_RETURN(false); // The database directory was found.
+}
+
+
int fill_schema_schemata(THD *thd, TABLE_LIST *tables, COND *cond)
{
/*
@@ -5303,19 +5326,10 @@ int fill_schema_schemata(THD *thd, TABLE_LIST *tables, COND *cond)
If we have lookup db value we should check that the database exists
*/
if(lookup_field_vals.db_value.str && !lookup_field_vals.wild_db_value &&
- db_names.at(0) != &INFORMATION_SCHEMA_NAME)
- {
- char path[FN_REFLEN+16];
- uint path_len;
- MY_STAT stat_info;
- if (!lookup_field_vals.db_value.str[0])
- DBUG_RETURN(0);
- path_len= build_table_filename(path, sizeof(path) - 1,
- lookup_field_vals.db_value.str, "", "", 0);
- path[path_len-1]= 0;
- if (!mysql_file_stat(key_file_misc, path, &stat_info, MYF(0)))
- DBUG_RETURN(0);
- }
+ (!db_names.elements() /* The database name was too long */||
+ (db_names.at(0) != &INFORMATION_SCHEMA_NAME &&
+ verify_database_directory_exists(lookup_field_vals.db_value))))
+ DBUG_RETURN(0);
for (size_t i=0; i < db_names.elements(); i++)
{
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 5c491421511..f091fd29303 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -9844,11 +9844,16 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db,
(!create_info->db_type || /* unknown engine */
!(create_info->db_type->flags & HTON_SUPPORT_LOG_TABLES)))
{
+ unsupported:
my_error(ER_UNSUPORTED_LOG_ENGINE, MYF(0),
hton_name(create_info->db_type)->str);
DBUG_RETURN(true);
}
+ if (create_info->db_type == maria_hton &&
+ create_info->transactional != HA_CHOICE_NO)
+ goto unsupported;
+
#ifdef WITH_PARTITION_STORAGE_ENGINE
if (alter_info->partition_flags & ALTER_PARTITION_INFO)
{
@@ -11130,6 +11135,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
sql_mode_t save_sql_mode= thd->variables.sql_mode;
ulonglong prev_insert_id, time_to_report_progress;
Field **dfield_ptr= to->default_field;
+ uint save_to_s_default_fields= to->s->default_fields;
bool make_versioned= !from->versioned() && to->versioned();
bool make_unversioned= from->versioned() && !to->versioned();
bool keep_versioned= from->versioned() && to->versioned();
@@ -11463,6 +11469,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
*copied= found_count;
*deleted=delete_count;
to->file->ha_release_auto_increment();
+ to->s->default_fields= save_to_s_default_fields;
if (!cleanup_done)
{
diff --git a/sql/table.cc b/sql/table.cc
index 73c3bd4b3ba..7b7313d3ea1 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -5374,6 +5374,8 @@ void TABLE::init(THD *thd, TABLE_LIST *tl)
fulltext_searched= 0;
file->ft_handler= 0;
reginfo.impossible_range= 0;
+ reginfo.join_tab= NULL;
+ reginfo.not_exists_optimize= FALSE;
created= TRUE;
cond_selectivity= 1.0;
cond_selectivity_sampling_explain= NULL;
diff --git a/sql/table.h b/sql/table.h
index f2fad6c19b2..43ef03e16df 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -2984,9 +2984,11 @@ typedef struct st_nested_join
Before each use the counters are zeroed by reset_nj_counters.
*/
uint counter;
+
/*
- Number of elements in join_list that were not (or contain table(s) that
- weren't) removed by table elimination.
+ Number of elements in join_list that participate in the join plan choice:
+ - Base tables that were not removed by table elimination
+ - Join nests that were not removed by mark_join_nest_as_const
*/
uint n_tables;
nested_join_map nj_map; /* Bit used to identify this nested join*/
diff --git a/sql/winservice.c b/sql/winservice.c
index 2f30cac9206..c275e6d99c8 100644
--- a/sql/winservice.c
+++ b/sql/winservice.c
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2011, 2012, Monty Program Ab
+ Copyright (c) 2011, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -148,7 +148,7 @@ int get_mysql_service_properties(const wchar_t *bin_path,
{
/*
There are rare cases where service config does not have
- --defaults-file in the binary parth . There services were
+ --defaults-file in the binary path . There services were
registered with plain mysqld --install, the data directory is
next to "bin" in this case.
*/
@@ -209,7 +209,7 @@ int get_mysql_service_properties(const wchar_t *bin_path,
}
}
- if(!have_inifile)
+ if(!have_inifile || props->datadir[0] == 0)
{
/*
Hard, although a rare case, we're guessing datadir and defaults-file.
@@ -233,22 +233,25 @@ int get_mysql_service_properties(const wchar_t *bin_path,
*p= 0;
}
- /* Look for my.ini, my.cnf in the install root */
- sprintf_s(props->inifile, MAX_PATH, "%s\\my.ini", install_root);
- if (GetFileAttributes(props->inifile) == INVALID_FILE_ATTRIBUTES)
+ if (!have_inifile)
{
- sprintf_s(props->inifile, MAX_PATH, "%s\\my.cnf", install_root);
- }
- if (GetFileAttributes(props->inifile) != INVALID_FILE_ATTRIBUTES)
- {
- /* Ini file found, get datadir from there */
- GetPrivateProfileString("mysqld", "datadir", NULL, props->datadir,
- MAX_PATH, props->inifile);
- }
- else
- {
- /* No ini file */
- props->inifile[0]= 0;
+ /* Look for my.ini, my.cnf in the install root */
+ sprintf_s(props->inifile, MAX_PATH, "%s\\my.ini", install_root);
+ if (GetFileAttributes(props->inifile) == INVALID_FILE_ATTRIBUTES)
+ {
+ sprintf_s(props->inifile, MAX_PATH, "%s\\my.cnf", install_root);
+ }
+ if (GetFileAttributes(props->inifile) != INVALID_FILE_ATTRIBUTES)
+ {
+ /* Ini file found, get datadir from there */
+ GetPrivateProfileString("mysqld", "datadir", NULL, props->datadir,
+ MAX_PATH, props->inifile);
+ }
+ else
+ {
+ /* No ini file */
+ props->inifile[0]= 0;
+ }
}
/* Try datadir in install directory.*/
diff --git a/sql/wsrep_server_service.cc b/sql/wsrep_server_service.cc
index 50aea494255..7ba744b4d3c 100644
--- a/sql/wsrep_server_service.cc
+++ b/sql/wsrep_server_service.cc
@@ -303,9 +303,21 @@ wsrep::gtid Wsrep_server_service::get_position(wsrep::client_service&)
return wsrep_get_SE_checkpoint<wsrep::gtid>();
}
-void Wsrep_server_service::set_position(wsrep::client_service&,
+void Wsrep_server_service::set_position(wsrep::client_service& c WSREP_UNUSED,
const wsrep::gtid& gtid)
{
+ Wsrep_client_service& cs WSREP_UNUSED (static_cast<Wsrep_client_service&>(c));
+ DBUG_ASSERT(cs.m_client_state.transaction().state()
+ == wsrep::transaction::s_aborted);
+ // Wait until all prior committers have finished.
+ wsrep::gtid wait_for(gtid.id(),
+ wsrep::seqno(gtid.seqno().get() - 1));
+ if (auto err = Wsrep_server_state::instance().provider()
+ .wait_for_gtid(wait_for, std::numeric_limits<int>::max()))
+ {
+ WSREP_WARN("Wait for gtid returned error %d while waiting for "
+ "prior transactions to commit before setting position", err);
+ }
wsrep_set_SE_checkpoint(gtid, wsrep_gtid_server.gtid());
}