summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2022-07-27 18:26:21 +0300
committerMarko Mäkelä <marko.makela@mariadb.com>2022-07-27 18:26:21 +0300
commit742e1c727fc2be50b758068c2ab92abb19f3ff56 (patch)
treeb4efffa4eade915001faf0ceaf50715306416907 /sql
parent19283c67c6d196a49211da6f925ca59fceef3ea0 (diff)
parent30914389fe9ca13cf29470dd033a5cf6997a3931 (diff)
downloadmariadb-git-742e1c727fc2be50b758068c2ab92abb19f3ff56.tar.gz
Merge 10.6 into 10.7
Diffstat (limited to 'sql')
-rw-r--r--sql/field.cc6
-rw-r--r--sql/ha_partition.cc36
-rw-r--r--sql/ha_partition.h2
-rw-r--r--sql/item_jsonfunc.cc143
-rw-r--r--sql/json_table.cc39
-rw-r--r--sql/log.cc78
-rw-r--r--sql/log.h2
-rw-r--r--sql/mysqld.cc20
-rw-r--r--sql/opt_subselect.cc1
-rw-r--r--sql/rpl_gtid.cc9
-rw-r--r--sql/rpl_gtid.h3
-rw-r--r--sql/rpl_mi.cc3
-rw-r--r--sql/rpl_mi.h14
-rw-r--r--sql/slave.cc31
-rw-r--r--sql/sql_base.cc178
-rw-r--r--sql/sql_class.cc2
-rw-r--r--sql/sql_insert.cc14
-rw-r--r--sql/sql_lex.cc12
-rw-r--r--sql/sql_lex.h12
-rw-r--r--sql/sql_parse.cc7
-rw-r--r--sql/sql_prepare.cc2
-rw-r--r--sql/sql_reload.cc2
-rw-r--r--sql/sql_select.cc22
-rw-r--r--sql/sql_select.h1
-rw-r--r--sql/sql_sequence.cc10
-rw-r--r--sql/sql_type.cc5
-rw-r--r--sql/sql_update.cc4
-rw-r--r--sql/sql_yacc.yy2
-rw-r--r--sql/sys_vars.cc5
-rw-r--r--sql/temporary_tables.cc4
30 files changed, 509 insertions, 160 deletions
diff --git a/sql/field.cc b/sql/field.cc
index 2435b689e48..249269a6b1d 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -7555,7 +7555,7 @@ my_decimal *Field_string::val_decimal(my_decimal *decimal_value)
THD *thd= get_thd();
Converter_str2my_decimal_with_warn(thd,
Warn_filter_string(thd, this),
- E_DEC_FATAL_ERROR,
+ E_DEC_FATAL_ERROR & ~E_DEC_BAD_NUM,
Field_string::charset(),
(const char *) ptr,
field_length, decimal_value);
@@ -7916,7 +7916,7 @@ my_decimal *Field_varstring::val_decimal(my_decimal *decimal_value)
DBUG_ASSERT(marked_for_read());
THD *thd= get_thd();
Converter_str2my_decimal_with_warn(thd, Warn_filter(thd),
- E_DEC_FATAL_ERROR,
+ E_DEC_FATAL_ERROR & ~E_DEC_BAD_NUM,
Field_varstring::charset(),
(const char *) get_data(),
get_length(), decimal_value);
@@ -8762,7 +8762,7 @@ my_decimal *Field_blob::val_decimal(my_decimal *decimal_value)
THD *thd= get_thd();
Converter_str2my_decimal_with_warn(thd, Warn_filter(thd),
- E_DEC_FATAL_ERROR,
+ E_DEC_FATAL_ERROR & ~E_DEC_BAD_NUM,
Field_blob::charset(),
blob, length, decimal_value);
return decimal_value;
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 7487ce24412..4a9e6cd4f1a 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -4509,15 +4509,15 @@ int ha_partition::write_row(const uchar * buf)
if (have_auto_increment)
{
if (!table_share->next_number_keypart)
- update_next_auto_inc_val();
- error= update_auto_increment();
+ if (unlikely(error= update_next_auto_inc_val()))
+ goto exit;
/*
If we have failed to set the auto-increment value for this row,
it is highly likely that we will not be able to insert it into
the correct partition. We must check and fail if necessary.
*/
- if (unlikely(error))
+ if (unlikely(error= update_auto_increment()))
goto exit;
/*
@@ -8475,6 +8475,7 @@ int ha_partition::compare_number_of_records(ha_partition *me,
int ha_partition::info(uint flag)
{
+ int error;
uint no_lock_flag= flag & HA_STATUS_NO_LOCK;
uint extra_var_flag= flag & HA_STATUS_VARIABLE_EXTRA;
DBUG_ENTER("ha_partition::info");
@@ -8527,7 +8528,11 @@ int ha_partition::info(uint flag)
break;
}
file= *file_array;
- file->info(HA_STATUS_AUTO | no_lock_flag);
+ if ((error= file->info(HA_STATUS_AUTO | no_lock_flag)))
+ {
+ unlock_auto_increment();
+ DBUG_RETURN(error);
+ }
set_if_bigger(auto_increment_value,
file->stats.auto_increment_value);
} while (*(++file_array));
@@ -8584,7 +8589,8 @@ int ha_partition::info(uint flag)
i= bitmap_get_next_set(&m_part_info->read_partitions, i))
{
file= m_file[i];
- file->info(HA_STATUS_VARIABLE | no_lock_flag | extra_var_flag);
+ if ((error= file->info(HA_STATUS_VARIABLE | no_lock_flag | extra_var_flag)))
+ DBUG_RETURN(error);
stats.records+= file->stats.records;
stats.deleted+= file->stats.deleted;
stats.data_file_length+= file->stats.data_file_length;
@@ -8673,7 +8679,8 @@ int ha_partition::info(uint flag)
if (!(flag & HA_STATUS_VARIABLE) ||
!bitmap_is_set(&(m_part_info->read_partitions),
(uint) (file_array - m_file)))
- file->info(HA_STATUS_VARIABLE | no_lock_flag | extra_var_flag);
+ if ((error= file->info(HA_STATUS_VARIABLE | no_lock_flag | extra_var_flag)))
+ DBUG_RETURN(error);
if (file->stats.records > max_records || !handler_instance_set)
{
handler_instance_set= 1;
@@ -8694,7 +8701,8 @@ int ha_partition::info(uint flag)
this);
file= m_file[handler_instance];
- file->info(HA_STATUS_CONST | no_lock_flag);
+ if ((error= file->info(HA_STATUS_CONST | no_lock_flag)))
+ DBUG_RETURN(error);
stats.block_size= file->stats.block_size;
stats.create_time= file->stats.create_time;
ref_length= m_ref_length;
@@ -8710,7 +8718,8 @@ int ha_partition::info(uint flag)
Note: all engines does not support HA_STATUS_ERRKEY, so set errkey.
*/
file->errkey= errkey;
- file->info(HA_STATUS_ERRKEY | no_lock_flag);
+ if ((error= file->info(HA_STATUS_ERRKEY | no_lock_flag)))
+ DBUG_RETURN(error);
errkey= file->errkey;
}
if (flag & HA_STATUS_TIME)
@@ -8727,7 +8736,8 @@ int ha_partition::info(uint flag)
do
{
file= *file_array;
- file->info(HA_STATUS_TIME | no_lock_flag);
+ if ((error= file->info(HA_STATUS_TIME | no_lock_flag)))
+ DBUG_RETURN(error);
if (file->stats.update_time > stats.update_time)
stats.update_time= file->stats.update_time;
} while (*(++file_array));
@@ -10745,11 +10755,11 @@ int ha_partition::cmp_ref(const uchar *ref1, const uchar *ref2)
the underlying partitions require that the value should be re-calculated
*/
-void ha_partition::update_next_auto_inc_val()
+int ha_partition::update_next_auto_inc_val()
{
- if (!part_share->auto_inc_initialized ||
- need_info_for_auto_inc())
- info(HA_STATUS_AUTO);
+ if (!part_share->auto_inc_initialized || need_info_for_auto_inc())
+ return info(HA_STATUS_AUTO);
+ return 0;
}
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index c064f7662ce..4a4b899708e 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -1366,7 +1366,7 @@ public:
void release_auto_increment() override;
private:
int reset_auto_increment(ulonglong value) override;
- void update_next_auto_inc_val();
+ int update_next_auto_inc_val();
virtual void lock_auto_increment()
{
/* lock already taken */
diff --git a/sql/item_jsonfunc.cc b/sql/item_jsonfunc.cc
index 89ac59098ac..09b3856d578 100644
--- a/sql/item_jsonfunc.cc
+++ b/sql/item_jsonfunc.cc
@@ -18,7 +18,22 @@
#include "sql_priv.h"
#include "sql_class.h"
#include "item.h"
+#include "sql_parse.h" // For check_stack_overrun
+/*
+ Allocating memory and *also* using it (reading and
+ writing from it) because some build instructions cause
+ compiler to optimize out stack_used_up. Since alloca()
+ here depends on stack_used_up, it doesnt get executed
+ correctly and causes json_debug_nonembedded to fail
+ ( --error ER_STACK_OVERRUN_NEED_MORE does not occur).
+*/
+#define ALLOCATE_MEM_ON_STACK(A) do \
+ { \
+ uchar *array= (uchar*)alloca(A); \
+ bzero(array, A); \
+ my_checksum(0, array, A); \
+ } while(0)
/*
Compare ASCII string against the string with the specified
@@ -128,6 +143,113 @@ static int append_tab(String *js, int depth, int tab_size)
return 0;
}
+int json_path_parts_compare(
+ const json_path_step_t *a, const json_path_step_t *a_end,
+ const json_path_step_t *b, const json_path_step_t *b_end,
+ enum json_value_types vt)
+{
+ int res, res2;
+
+ long arbitrary_var;
+ long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var));
+ DBUG_EXECUTE_IF("json_check_min_stack_requirement",
+ {ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE);});
+ if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL))
+ return 1;
+
+ while (a <= a_end)
+ {
+ if (b > b_end)
+ {
+ while (vt != JSON_VALUE_ARRAY &&
+ (a->type & JSON_PATH_ARRAY_WILD) == JSON_PATH_ARRAY &&
+ a->n_item == 0)
+ {
+ if (++a > a_end)
+ return 0;
+ }
+ return -2;
+ }
+
+ DBUG_ASSERT((b->type & (JSON_PATH_WILD | JSON_PATH_DOUBLE_WILD)) == 0);
+
+
+ if (a->type & JSON_PATH_ARRAY)
+ {
+ if (b->type & JSON_PATH_ARRAY)
+ {
+ if ((a->type & JSON_PATH_WILD) || a->n_item == b->n_item)
+ goto step_fits;
+ goto step_failed;
+ }
+ if ((a->type & JSON_PATH_WILD) == 0 && a->n_item == 0)
+ goto step_fits_autowrap;
+ goto step_failed;
+ }
+ else /* JSON_PATH_KEY */
+ {
+ if (!(b->type & JSON_PATH_KEY))
+ goto step_failed;
+
+ if (!(a->type & JSON_PATH_WILD) &&
+ (a->key_end - a->key != b->key_end - b->key ||
+ memcmp(a->key, b->key, a->key_end - a->key) != 0))
+ goto step_failed;
+
+ goto step_fits;
+ }
+step_failed:
+ if (!(a->type & JSON_PATH_DOUBLE_WILD))
+ return -1;
+ b++;
+ continue;
+
+step_fits:
+ b++;
+ if (!(a->type & JSON_PATH_DOUBLE_WILD))
+ {
+ a++;
+ continue;
+ }
+
+ /* Double wild handling needs recursions. */
+ res= json_path_parts_compare(a+1, a_end, b, b_end, vt);
+ if (res == 0)
+ return 0;
+
+ res2= json_path_parts_compare(a, a_end, b, b_end, vt);
+
+ return (res2 >= 0) ? res2 : res;
+
+step_fits_autowrap:
+ if (!(a->type & JSON_PATH_DOUBLE_WILD))
+ {
+ a++;
+ continue;
+ }
+
+ /* Double wild handling needs recursions. */
+ res= json_path_parts_compare(a+1, a_end, b+1, b_end, vt);
+ if (res == 0)
+ return 0;
+
+ res2= json_path_parts_compare(a, a_end, b+1, b_end, vt);
+
+ return (res2 >= 0) ? res2 : res;
+
+ }
+
+ return b <= b_end;
+}
+
+
+int json_path_compare(const json_path_t *a, const json_path_t *b,
+ enum json_value_types vt)
+{
+ return json_path_parts_compare(a->steps+1, a->last_step,
+ b->steps+1, b->last_step, vt);
+}
+
static int json_nice(json_engine_t *je, String *nice_js,
Item_func_json_format::formats mode, int tab_size=4)
@@ -1088,6 +1210,12 @@ static int check_contains(json_engine_t *js, json_engine_t *value)
{
json_engine_t loc_js;
bool set_js;
+ long arbitrary_var;
+ long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var));
+ DBUG_EXECUTE_IF("json_check_min_stack_requirement",
+ {ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE);});
+ if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL))
+ return 1;
switch (js->value_type)
{
@@ -2051,6 +2179,14 @@ err_return:
static int do_merge(String *str, json_engine_t *je1, json_engine_t *je2)
{
+
+ long arbitrary_var;
+ long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var));
+ DBUG_EXECUTE_IF("json_check_min_stack_requirement",
+ {ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE);});
+ if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL))
+ return 1;
+
if (json_read_value(je1) || json_read_value(je2))
return 1;
@@ -2385,6 +2521,13 @@ static int copy_value_patch(String *str, json_engine_t *je)
static int do_merge_patch(String *str, json_engine_t *je1, json_engine_t *je2,
bool *empty_result)
{
+ long arbitrary_var;
+ long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var));
+ DBUG_EXECUTE_IF("json_check_min_stack_requirement",
+ {ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE);});
+ if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL))
+ return 1;
+
if (json_read_value(je1) || json_read_value(je2))
return 1;
diff --git a/sql/json_table.cc b/sql/json_table.cc
index 164c05eff41..4d07cf0d367 100644
--- a/sql/json_table.cc
+++ b/sql/json_table.cc
@@ -25,9 +25,26 @@
#include "sql_show.h"
#include "sql_select.h"
#include "create_tmp_table.h"
+#include "sql_parse.h"
#define HA_ERR_JSON_TABLE (HA_ERR_LAST+1)
+/*
+ Allocating memory and *also* using it (reading and
+ writing from it) because some build instructions cause
+ compiler to optimize out stack_used_up. Since alloca()
+ here depends on stack_used_up, it doesnt get executed
+ correctly and causes json_debug_nonembedded to fail
+ ( --error ER_STACK_OVERRUN_NEED_MORE does not occur).
+*/
+#define ALLOCATE_MEM_ON_STACK(A) do \
+ { \
+ uchar *array= (uchar*)alloca(A); \
+ array[0]= 1; \
+ array[0]++; \
+ array[0] ? array[0]++ : array[0]--; \
+ } while(0)
+
class table_function_handlerton
{
public:
@@ -101,6 +118,13 @@ int get_disallowed_table_deps_for_list(MEM_ROOT *mem_root,
NESTED_JOIN *nested_join;
List_iterator<TABLE_LIST> li(*join_list);
+ long arbitrary_var;
+ long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var));
+ DBUG_EXECUTE_IF("json_check_min_stack_requirement",
+ {ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE);});
+ if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL))
+ return 1;
+
while ((table= li++))
{
if ((nested_join= table->nested_join))
@@ -1303,6 +1327,14 @@ static void add_extra_deps(List<TABLE_LIST> *join_list, table_map deps)
{
TABLE_LIST *table;
List_iterator<TABLE_LIST> li(*join_list);
+
+ long arbitrary_var;
+ long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var));
+ DBUG_EXECUTE_IF("json_check_min_stack_requirement",
+ {ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE);});
+ if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL))
+ return;
+
while ((table= li++))
{
table->dep_tables |= deps;
@@ -1391,6 +1423,13 @@ table_map add_table_function_dependencies(List<TABLE_LIST> *join_list,
table_map res= 0;
List_iterator<TABLE_LIST> li(*join_list);
+ long arbitrary_var;
+ long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var));
+ DBUG_EXECUTE_IF("json_check_min_stack_requirement",
+ {ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE);});
+ if ((res=check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL)))
+ return res;
+
// Recursively compute extra dependencies
while ((table= li++))
{
diff --git a/sql/log.cc b/sql/log.cc
index 468f171ae2c..af09e04cf15 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -2238,7 +2238,7 @@ static int binlog_rollback(handlerton *hton, THD *thd, bool all)
error |= binlog_commit_flush_stmt_cache(thd, all, cache_mngr);
}
- if (cache_mngr->trx_cache.empty() &&
+ if (!cache_mngr->trx_cache.has_incident() && cache_mngr->trx_cache.empty() &&
thd->transaction->xid_state.get_state_code() != XA_PREPARED)
{
/*
@@ -5928,7 +5928,6 @@ void THD::binlog_prepare_for_row_logging()
bool THD::binlog_write_annotated_row(Log_event_writer *writer)
{
- int error;
DBUG_ENTER("THD::binlog_write_annotated_row");
if (!(IF_WSREP(!wsrep_fragments_certified_for_stmt(this), true) &&
@@ -5937,13 +5936,7 @@ bool THD::binlog_write_annotated_row(Log_event_writer *writer)
DBUG_RETURN(0);
Annotate_rows_log_event anno(this, 0, false);
- if (unlikely((error= writer->write(&anno))))
- {
- if (my_errno == EFBIG)
- writer->set_incident();
- DBUG_RETURN(error);
- }
- DBUG_RETURN(0);
+ DBUG_RETURN(writer->write(&anno));
}
@@ -6016,21 +6009,22 @@ bool THD::binlog_write_table_maps()
/**
- This function writes a table map to the binary log.
- Note that in order to keep the signature uniform with related methods,
- we use a redundant parameter to indicate whether a transactional table
- was changed or not.
+ This function writes a table map to the binary log.
- @param table a pointer to the table.
- @param with_annotate If true call binlog_write_annotated_row()
+ If an error occurs while writing events and rollback is not possible, e.g.
+ due to the statement modifying a non-transactional table, an incident event
+ is logged.
+ @param table a pointer to the table.
+ @param with_annotate @c true to write an annotate event before writing
+ the table_map event, @c false otherwise.
@return
nonzero if an error pops up when writing the table map event.
*/
bool THD::binlog_write_table_map(TABLE *table, bool with_annotate)
{
- int error;
+ int error= 1;
bool is_transactional= table->file->row_logging_has_trans;
DBUG_ENTER("THD::binlog_write_table_map");
DBUG_PRINT("enter", ("table: %p (%s: #%lu)",
@@ -6056,12 +6050,34 @@ bool THD::binlog_write_table_map(TABLE *table, bool with_annotate)
if (with_annotate)
if (binlog_write_annotated_row(&writer))
- DBUG_RETURN(1);
+ goto write_err;
+
+ DBUG_EXECUTE_IF("table_map_write_error",
+ {
+ if (is_transactional)
+ {
+ my_errno= EFBIG;
+ goto write_err;
+ }
+ });
if (unlikely((error= writer.write(&the_event))))
- DBUG_RETURN(error);
+ goto write_err;
DBUG_RETURN(0);
+
+write_err:
+ mysql_bin_log.set_write_error(this, is_transactional);
+ /*
+ For non-transactional engine or multi statement transaction with mixed
+ engines, data is written to table but writing to binary log failed. In
+ these scenarios rollback is not possible. Hence report an incident.
+ */
+ if (mysql_bin_log.check_write_error(this) && cache_data &&
+ lex->stmt_accessed_table(LEX::STMT_WRITES_NON_TRANS_TABLE) &&
+ table->current_lock == F_WRLCK)
+ cache_data->set_incident();
+ DBUG_RETURN(error);
}
@@ -6462,11 +6478,13 @@ MYSQL_BIN_LOG::bump_seq_no_counter_if_needed(uint32 domain_id, uint64 seq_no)
bool
MYSQL_BIN_LOG::check_strict_gtid_sequence(uint32 domain_id,
uint32 server_id_arg,
- uint64 seq_no)
+ uint64 seq_no,
+ bool no_error)
{
return rpl_global_gtid_binlog_state.check_strict_sequence(domain_id,
server_id_arg,
- seq_no);
+ seq_no,
+ no_error);
}
@@ -7495,7 +7513,9 @@ bool MYSQL_BIN_LOG::write_incident(THD *thd)
if (likely(is_open()))
{
prev_binlog_id= current_binlog_id;
- if (likely(!(error= write_incident_already_locked(thd))) &&
+ if (likely(!(error= DBUG_IF("incident_event_write_error")
+ ? 1
+ : write_incident_already_locked(thd))) &&
likely(!(error= flush_and_sync(0))))
{
update_binlog_end_pos();
@@ -7524,6 +7544,22 @@ bool MYSQL_BIN_LOG::write_incident(THD *thd)
mysql_mutex_unlock(&LOCK_log);
}
+ /*
+ Upon writing incident event, check for thd->error() and print the
+ relevant error message in the error log.
+ */
+ if (thd->is_error())
+ {
+ sql_print_error("Write to binary log failed: "
+ "%s. An incident event is written to binary log "
+ "and slave will be stopped.\n",
+ thd->get_stmt_da()->message());
+ }
+ if (error)
+ {
+ sql_print_error("Incident event write to the binary log file failed.");
+ }
+
DBUG_RETURN(error);
}
diff --git a/sql/log.h b/sql/log.h
index 516fb36adb9..aec48263d84 100644
--- a/sql/log.h
+++ b/sql/log.h
@@ -920,7 +920,7 @@ public:
bool lookup_domain_in_binlog_state(uint32 domain_id, rpl_gtid *out_gtid);
int bump_seq_no_counter_if_needed(uint32 domain_id, uint64 seq_no);
bool check_strict_gtid_sequence(uint32 domain_id, uint32 server_id,
- uint64 seq_no);
+ uint64 seq_no, bool no_error= false);
/**
* used when opening new file, and binlog_end_pos moves backwards
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index ea1bfa1aaf3..c2eba615d51 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -1851,6 +1851,13 @@ extern "C" sig_handler print_signal_warning(int sig)
#endif
}
+#ifdef _WIN32
+typedef void (*report_svc_status_t)(DWORD current_state, DWORD win32_exit_code,
+ DWORD wait_hint);
+static void dummy_svc_status(DWORD, DWORD, DWORD) {}
+static report_svc_status_t my_report_svc_status= dummy_svc_status;
+#endif
+
#ifndef EMBEDDED_LIBRARY
extern "C" void unireg_abort(int exit_code)
{
@@ -1896,13 +1903,6 @@ extern "C" void unireg_abort(int exit_code)
mysqld_exit(exit_code);
}
-#ifdef _WIN32
-typedef void (*report_svc_status_t)(DWORD current_state, DWORD win32_exit_code,
- DWORD wait_hint);
-static void dummy_svc_status(DWORD, DWORD, DWORD) {}
-static report_svc_status_t my_report_svc_status= dummy_svc_status;
-#endif
-
static void mysqld_exit(int exit_code)
{
DBUG_ENTER("mysqld_exit");
@@ -4593,6 +4593,7 @@ void ssl_acceptor_stats_update(int sslaccept_ret)
static void init_ssl()
{
+#if !defined(EMBEDDED_LIBRARY)
/*
Not need to check require_secure_transport on the Linux,
because it always has Unix domain sockets that are secure:
@@ -4608,7 +4609,7 @@ static void init_ssl()
unireg_abort(1);
}
#endif
-#if defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY)
+#if defined(HAVE_OPENSSL)
if (opt_use_ssl)
{
enum enum_ssl_init_error error= SSL_INITERR_NOERROR;
@@ -4649,7 +4650,8 @@ static void init_ssl()
}
if (des_key_file)
load_des_key_file(des_key_file);
-#endif /* HAVE_OPENSSL && ! EMBEDDED_LIBRARY */
+#endif /* HAVE_OPENSSL */
+#endif /* !EMBEDDED_LIBRARY */
}
/* Reinitialize SSL (FLUSH SSL) */
diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc
index ace55e77485..4148065c7ee 100644
--- a/sql/opt_subselect.cc
+++ b/sql/opt_subselect.cc
@@ -3032,6 +3032,7 @@ void optimize_semi_joins(JOIN *join, table_map remaining_tables, uint idx,
void update_sj_state(JOIN *join, const JOIN_TAB *new_tab,
uint idx, table_map remaining_tables)
{
+ DBUG_ASSERT(!join->emb_sjm_nest);
if (TABLE_LIST *emb_sj_nest= new_tab->emb_sj_nest)
{
join->cur_sj_inner_tables |= emb_sj_nest->sj_inner_tables;
diff --git a/sql/rpl_gtid.cc b/sql/rpl_gtid.cc
index 306ae878060..2124980693a 100644
--- a/sql/rpl_gtid.cc
+++ b/sql/rpl_gtid.cc
@@ -1743,7 +1743,7 @@ rpl_binlog_state::alloc_element_nolock(const rpl_gtid *gtid)
*/
bool
rpl_binlog_state::check_strict_sequence(uint32 domain_id, uint32 server_id,
- uint64 seq_no)
+ uint64 seq_no, bool no_error)
{
element *elem;
bool res= 0;
@@ -1754,9 +1754,10 @@ rpl_binlog_state::check_strict_sequence(uint32 domain_id, uint32 server_id,
sizeof(domain_id))) &&
elem->last_gtid && elem->last_gtid->seq_no >= seq_no)
{
- my_error(ER_GTID_STRICT_OUT_OF_ORDER, MYF(0), domain_id, server_id, seq_no,
- elem->last_gtid->domain_id, elem->last_gtid->server_id,
- elem->last_gtid->seq_no);
+ if (!no_error)
+ my_error(ER_GTID_STRICT_OUT_OF_ORDER, MYF(0), domain_id, server_id, seq_no,
+ elem->last_gtid->domain_id, elem->last_gtid->server_id,
+ elem->last_gtid->seq_no);
res= 1;
}
mysql_mutex_unlock(&LOCK_binlog_state);
diff --git a/sql/rpl_gtid.h b/sql/rpl_gtid.h
index 531d746763b..c8decff8fe8 100644
--- a/sql/rpl_gtid.h
+++ b/sql/rpl_gtid.h
@@ -317,7 +317,8 @@ struct rpl_binlog_state
int update_with_next_gtid(uint32 domain_id, uint32 server_id,
rpl_gtid *gtid);
int alloc_element_nolock(const rpl_gtid *gtid);
- bool check_strict_sequence(uint32 domain_id, uint32 server_id, uint64 seq_no);
+ bool check_strict_sequence(uint32 domain_id, uint32 server_id, uint64 seq_no,
+ bool no_error= false);
int bump_seq_no_if_needed(uint32 domain_id, uint64 seq_no);
int write_to_iocache(IO_CACHE *dest);
int read_from_iocache(IO_CACHE *src);
diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc
index 4fd36891a4b..7b438b3efbe 100644
--- a/sql/rpl_mi.cc
+++ b/sql/rpl_mi.cc
@@ -43,7 +43,8 @@ Master_info::Master_info(LEX_CSTRING *connection_name_arg,
gtid_reconnect_event_skip_count(0), gtid_event_seen(false),
in_start_all_slaves(0), in_stop_all_slaves(0), in_flush_all_relay_logs(0),
users(0), killed(0),
- total_ddl_groups(0), total_non_trans_groups(0), total_trans_groups(0)
+ total_ddl_groups(0), total_non_trans_groups(0), total_trans_groups(0),
+ do_accept_own_server_id(false)
{
char *tmp;
host[0] = 0; user[0] = 0; password[0] = 0;
diff --git a/sql/rpl_mi.h b/sql/rpl_mi.h
index a4a06d42a5c..5b0088ca65a 100644
--- a/sql/rpl_mi.h
+++ b/sql/rpl_mi.h
@@ -352,6 +352,20 @@ class Master_info : public Slave_reporting_capability
ACK from slave, or if delay_master is enabled.
*/
int semi_ack;
+ /*
+ The flag has replicate_same_server_id semantics and is raised to accept
+ a same-server-id event group by the gtid strict mode semisync slave.
+ Own server-id events can normally appear as result of EITHER
+ A. this server semisync (failover to) slave crash-recovery:
+ the transaction was created on this server then being master,
+ got replicated elsewhere right before the crash before commit,
+ and finally at recovery the transaction gets evicted from the
+ server's binlog and its gtid (slave) state; OR
+ B. in a general circular configuration and then when a recieved (returned
+ to slave) gtid exists in the server's binlog. Then, in gtid strict mode,
+ it must be ignored similarly to the replicate-same-server-id rule.
+ */
+ bool do_accept_own_server_id;
};
int init_master_info(Master_info* mi, const char* master_info_fname,
diff --git a/sql/slave.cc b/sql/slave.cc
index b9ac71a9842..16b47c52179 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -5047,6 +5047,7 @@ err_during_init:
mi->abort_slave= 0;
mi->slave_running= MYSQL_SLAVE_NOT_RUN;
mi->io_thd= 0;
+ mi->do_accept_own_server_id= false;
/*
Note: the order of the two following calls (first broadcast, then unlock)
is important. Otherwise a killer_thread can execute between the calls and
@@ -6186,15 +6187,6 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len)
bool is_malloc = false;
bool is_rows_event= false;
/*
- The flag has replicate_same_server_id semantics and is raised to accept
- a same-server-id event group by the gtid strict mode semisync slave.
- Own server-id events can appear as result of this server crash-recovery:
- the transaction was created on this server then being master, got replicated
- elsewhere right before the crash before commit;
- finally at recovery the transaction gets evicted from the server's binlog.
- */
- bool do_accept_own_server_id;
- /*
FD_q must have been prepared for the first R_a event
inside get_master_version_and_clock()
Show-up of FD:s affects checksum_alg at once because
@@ -6783,6 +6775,19 @@ dbug_gtid_accept:
++mi->events_queued_since_last_gtid;
inc_pos= event_len;
+
+ /*
+ To compute `true` is normal for this *now* semisync slave server when
+ it has passed its crash-recovery as a former master.
+ */
+ mi->do_accept_own_server_id=
+ (s_id == global_system_variables.server_id &&
+ rpl_semi_sync_slave_enabled && opt_gtid_strict_mode &&
+ mi->using_gtid != Master_info::USE_GTID_NO &&
+ !mysql_bin_log.check_strict_gtid_sequence(event_gtid.domain_id,
+ event_gtid.server_id,
+ event_gtid.seq_no,
+ true));
// ...} eof else_likely
}
break;
@@ -6965,10 +6970,6 @@ dbug_gtid_accept:
break;
}
- do_accept_own_server_id= (s_id == global_system_variables.server_id
- && rpl_semi_sync_slave_enabled && opt_gtid_strict_mode
- && mi->using_gtid != Master_info::USE_GTID_NO);
-
/*
Integrity of Rows- event group check.
A sequence of Rows- events must end with STMT_END_F flagged one.
@@ -7059,7 +7060,7 @@ dbug_gtid_accept:
else
if ((s_id == global_system_variables.server_id &&
!(mi->rli.replicate_same_server_id ||
- do_accept_own_server_id)) ||
+ mi->do_accept_own_server_id)) ||
event_that_should_be_ignored(buf) ||
/*
the following conjunction deals with IGNORE_SERVER_IDS, if set
@@ -7119,7 +7120,7 @@ dbug_gtid_accept:
}
else
{
- if (do_accept_own_server_id)
+ if (mi->do_accept_own_server_id)
{
int2store(const_cast<uchar*>(buf + FLAGS_OFFSET),
uint2korr(buf + FLAGS_OFFSET) | LOG_EVENT_ACCEPT_OWN_F);
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index e2da2729249..4fb8070fee8 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -7744,6 +7744,39 @@ bool setup_fields(THD *thd, Ref_ptr_array ref_pointer_array,
DBUG_RETURN(MY_TEST(thd->is_error()));
}
+/*
+ make list of leaves for a single TABLE_LIST
+
+ SYNOPSIS
+ make_leaves_for_single_table()
+ thd Thread handler
+ leaves List of leaf tables to be filled
+ table TABLE_LIST object to process
+ full_table_list Whether to include tables from mergeable derived table/view
+*/
+void make_leaves_for_single_table(THD *thd, List<TABLE_LIST> &leaves,
+ TABLE_LIST *table, bool& full_table_list,
+ TABLE_LIST *boundary)
+{
+ if (table == boundary)
+ full_table_list= !full_table_list;
+ if (full_table_list && table->is_merged_derived())
+ {
+ SELECT_LEX *select_lex= table->get_single_select();
+ /*
+ It's safe to use select_lex->leaf_tables because all derived
+ tables/views were already prepared and has their leaf_tables
+ set properly.
+ */
+ make_leaves_list(thd, leaves, select_lex->get_table_list(),
+ full_table_list, boundary);
+ }
+ else
+ {
+ leaves.push_back(table, thd->mem_root);
+ }
+}
+
/*
Perform checks like all given fields exists, if exists fill struct with
@@ -7770,40 +7803,79 @@ int setup_returning_fields(THD* thd, TABLE_LIST* table_list)
SYNOPSIS
make_leaves_list()
- list pointer to pointer on list first element
- tables table list
- full_table_list whether to include tables from mergeable derived table/view.
- we need them for checks for INSERT/UPDATE statements only.
-
- RETURN pointer on pointer to next_leaf of last element
+ leaves List of leaf tables to be filled
+ tables Table list
+ full_table_list Whether to include tables from mergeable derived table/view.
+ We need them for checks for INSERT/UPDATE statements only.
*/
-void make_leaves_list(THD *thd, List<TABLE_LIST> &list, TABLE_LIST *tables,
+void make_leaves_list(THD *thd, List<TABLE_LIST> &leaves, TABLE_LIST *tables,
bool full_table_list, TABLE_LIST *boundary)
{
for (TABLE_LIST *table= tables; table; table= table->next_local)
{
- if (table == boundary)
- full_table_list= !full_table_list;
- if (full_table_list && table->is_merged_derived())
- {
- SELECT_LEX *select_lex= table->get_single_select();
- /*
- It's safe to use select_lex->leaf_tables because all derived
- tables/views were already prepared and has their leaf_tables
- set properly.
- */
- make_leaves_list(thd, list, select_lex->get_table_list(),
- full_table_list, boundary);
- }
- else
- {
- list.push_back(table, thd->mem_root);
- }
+ make_leaves_for_single_table(thd, leaves, table, full_table_list,
+ boundary);
+ }
+}
+
+
+/*
+ Setup the map and other attributes for a single TABLE_LIST object
+
+ SYNOPSIS
+ setup_table_attributes()
+ thd Thread handler
+ table_list TABLE_LIST object to process
+ first_select_table First table participating in SELECT for INSERT..SELECT
+ statements, NULL for other cases
+ tablenr Serial number of the table in the SQL statement
+
+ RETURN
+ false Success
+ true Failure
+*/
+bool setup_table_attributes(THD *thd, TABLE_LIST *table_list,
+ TABLE_LIST *first_select_table,
+ uint &tablenr)
+{
+ TABLE *table= table_list->table;
+ if (table)
+ table->pos_in_table_list= table_list;
+ if (first_select_table && table_list->top_table() == first_select_table)
+ {
+ /* new counting for SELECT of INSERT ... SELECT command */
+ first_select_table= 0;
+ thd->lex->first_select_lex()->insert_tables= tablenr;
+ tablenr= 0;
+ }
+ if (table_list->jtbm_subselect)
+ {
+ table_list->jtbm_table_no= tablenr;
+ }
+ else if (table)
+ {
+ table->pos_in_table_list= table_list;
+ setup_table_map(table, table_list, tablenr);
+
+ if (table_list->process_index_hints(table))
+ return true;
}
+ tablenr++;
+ /*
+ We test the max tables here as we setup_table_map() should not be called
+ with tablenr >= 64
+ */
+ if (tablenr > MAX_TABLES)
+ {
+ my_error(ER_TOO_MANY_TABLES, MYF(0), static_cast<int>(MAX_TABLES));
+ return true;
+ }
+ return false;
}
+
/*
prepare tables
@@ -7860,7 +7932,14 @@ bool setup_tables(THD *thd, Name_resolution_context *context,
leaves.empty();
if (select_lex->prep_leaf_list_state != SELECT_LEX::SAVED)
{
- make_leaves_list(thd, leaves, tables, full_table_list, first_select_table);
+ /*
+ For INSERT ... SELECT statements we must not include the first table
+ (where the data is being inserted into) in the list of leaves
+ */
+ TABLE_LIST *tables_for_leaves=
+ select_insert ? first_select_table : tables;
+ make_leaves_list(thd, leaves, tables_for_leaves, full_table_list,
+ first_select_table);
select_lex->prep_leaf_list_state= SELECT_LEX::READY;
select_lex->leaf_tables_exec.empty();
}
@@ -7871,40 +7950,33 @@ bool setup_tables(THD *thd, Name_resolution_context *context,
leaves.push_back(table_list, thd->mem_root);
}
+ List_iterator<TABLE_LIST> ti(leaves);
while ((table_list= ti++))
{
- TABLE *table= table_list->table;
- if (table)
- table->pos_in_table_list= table_list;
- if (first_select_table &&
- table_list->top_table() == first_select_table)
- {
- /* new counting for SELECT of INSERT ... SELECT command */
- first_select_table= 0;
- thd->lex->first_select_lex()->insert_tables= tablenr;
- tablenr= 0;
- }
- if(table_list->jtbm_subselect)
- {
- table_list->jtbm_table_no= tablenr;
- }
- else if (table)
- {
- table->pos_in_table_list= table_list;
- setup_table_map(table, table_list, tablenr);
+ if (setup_table_attributes(thd, table_list, first_select_table, tablenr))
+ DBUG_RETURN(1);
+ }
- if (table_list->process_index_hints(table))
- DBUG_RETURN(1);
- }
- tablenr++;
+ if (select_insert)
+ {
/*
- We test the max tables here as we setup_table_map() should not be called
- with tablenr >= 64
+ The table/view in which the data is inserted must not be included into
+ the leaf_tables list. But we need this table/view to setup attributes
+ for it. So build a temporary list of leaves and setup attributes for
+ the tables included
*/
- if (tablenr > MAX_TABLES)
+ List<TABLE_LIST> leaves;
+ TABLE_LIST *table= tables;
+
+ make_leaves_for_single_table(thd, leaves, table, full_table_list,
+ first_select_table);
+
+ List_iterator<TABLE_LIST> ti(leaves);
+ while ((table_list= ti++))
{
- my_error(ER_TOO_MANY_TABLES,MYF(0), static_cast<int>(MAX_TABLES));
- DBUG_RETURN(1);
+ if (setup_table_attributes(thd, table_list, first_select_table,
+ tablenr))
+ DBUG_RETURN(1);
}
}
}
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 23a1566e573..c07c70f55a4 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -1860,7 +1860,7 @@ void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var,
extern std::atomic<my_thread_id> shutdown_thread_id;
void THD::awake_no_mutex(killed_state state_to_set)
{
- DBUG_ENTER("THD::awake");
+ DBUG_ENTER("THD::awake_no_mutex");
DBUG_PRINT("enter", ("this: %p current_thd: %p state: %d",
this, current_thd, (int) state_to_set));
THD_CHECK_SENTRY(this);
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 73bc914746d..9507aaf3ab7 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -1367,8 +1367,12 @@ values_loop_end:
thd->lex->current_select->save_leaf_tables(thd);
thd->lex->current_select->first_cond_optimization= 0;
}
- if (readbuff)
- my_free(readbuff);
+
+ my_free(readbuff);
+#ifndef EMBEDDED_LIBRARY
+ if (lock_type == TL_WRITE_DELAYED && table->expr_arena)
+ table->expr_arena->free_items();
+#endif
DBUG_RETURN(FALSE);
abort:
@@ -1385,6 +1389,8 @@ abort:
*/
for (Field **ptr= table_list->table->field ; *ptr ; ptr++)
(*ptr)->free();
+ if (table_list->table->expr_arena)
+ table_list->table->expr_arena->free_items();
}
#endif
if (table != NULL)
@@ -1563,8 +1569,7 @@ static bool mysql_prepare_insert_check_table(THD *thd, TABLE_LIST *table_list,
if (insert_into_view && !fields.elements)
{
thd->lex->empty_field_list_on_rset= 1;
- if (!thd->lex->first_select_lex()->leaf_tables.head()->table ||
- table_list->is_multitable())
+ if (!table_list->table || table_list->is_multitable())
{
my_error(ER_VIEW_NO_INSERT_FIELD_LIST, MYF(0),
table_list->view_db.str, table_list->view_name.str);
@@ -3820,7 +3825,6 @@ int mysql_insert_select_prepare(THD *thd, select_result *sel_res)
if (sel_res)
sel_res->prepare(lex->returning()->item_list, NULL);
- DBUG_ASSERT(select_lex->leaf_tables.elements != 0);
List_iterator<TABLE_LIST> ti(select_lex->leaf_tables);
TABLE_LIST *table;
uint insert_tables;
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 987a4d950ea..8a757ab16a8 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -10492,11 +10492,13 @@ void LEX::relink_hack(st_select_lex *select_lex)
{
if (!select_stack_top) // Statements of the second type
{
- if (!select_lex->get_master()->get_master())
- ((st_select_lex *) select_lex->get_master())->
- set_master(&builtin_select);
- if (!builtin_select.get_slave())
- builtin_select.set_slave(select_lex->get_master());
+ if (!select_lex->outer_select() &&
+ !builtin_select.first_inner_unit())
+ {
+ builtin_select.register_unit(select_lex->master_unit(),
+ &builtin_select.context);
+ builtin_select.add_statistics(select_lex->master_unit());
+ }
}
}
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 3233234fd39..71014cfdeb1 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -765,7 +765,6 @@ public:
}
inline st_select_lex_node* get_master() { return master; }
- inline st_select_lex_node* get_slave() { return slave; }
void include_down(st_select_lex_node *upper);
void add_slave(st_select_lex_node *slave_arg);
void include_neighbour(st_select_lex_node *before);
@@ -1744,15 +1743,6 @@ public:
Sroutine_hash_entry **sroutines_list_own_last;
uint sroutines_list_own_elements;
- /**
- Number of tables which were open by open_tables() and to be locked
- by lock_tables().
- Note that we set this member only in some cases, when this value
- needs to be passed from open_tables() to lock_tables() which are
- separated by some amount of code.
- */
- uint table_count;
-
/*
These constructor and destructor serve for creation/destruction
of Query_tables_list instances which are used as backup storage.
@@ -3449,7 +3439,7 @@ public:
stores total number of tables. For LEX representing multi-delete
holds number of tables from which we will delete records.
*/
- uint table_count;
+ uint table_count_update;
uint8 describe;
/*
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index f8f61c66df0..dd7ca5d877a 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -3323,6 +3323,7 @@ bool run_set_statement_if_requested(THD *thd, LEX *lex)
{
switch (v->var->option.var_type & GET_TYPE_MASK)
{
+ case GET_BIT:
case GET_BOOL:
case GET_INT:
case GET_LONG:
@@ -4853,7 +4854,7 @@ mysql_execute_command(THD *thd, bool is_called_from_prepared_stmt)
if (likely(!thd->is_fatal_error))
{
result= new (thd->mem_root) multi_delete(thd, aux_tables,
- lex->table_count);
+ lex->table_count_update);
if (likely(result))
{
if (unlikely(select_lex->vers_setup_conds(thd, aux_tables)))
@@ -9748,12 +9749,12 @@ bool multi_delete_set_locks_and_link_aux_tables(LEX *lex)
TABLE_LIST *target_tbl;
DBUG_ENTER("multi_delete_set_locks_and_link_aux_tables");
- lex->table_count= 0;
+ lex->table_count_update= 0;
for (target_tbl= lex->auxiliary_table_list.first;
target_tbl; target_tbl= target_tbl->next_local)
{
- lex->table_count++;
+ lex->table_count_update++;
/* All tables in aux_tables must be found in FROM PART */
TABLE_LIST *walk= multi_delete_table_match(lex, target_tbl, tables);
if (!walk)
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index e3979867285..8606bc10dbc 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -1443,7 +1443,7 @@ static int mysql_test_update(Prepared_statement *stmt,
DBUG_ASSERT(update_source_table || table_list->view != 0);
DBUG_PRINT("info", ("Switch to multi-update"));
/* pass counter value */
- thd->lex->table_count= table_count;
+ thd->lex->table_count_update= table_count;
/* convert to multiupdate */
DBUG_RETURN(2);
}
diff --git a/sql/sql_reload.cc b/sql/sql_reload.cc
index 1dfa238de50..1490ad554d7 100644
--- a/sql/sql_reload.cc
+++ b/sql/sql_reload.cc
@@ -601,6 +601,7 @@ bool flush_tables_with_read_lock(THD *thd, TABLE_LIST *all_tables)
if (table_list->is_view_or_derived())
continue;
if (thd->lex->type & REFRESH_FOR_EXPORT &&
+ table_list->table &&
!(table_list->table->file->ha_table_flags() & HA_CAN_EXPORT))
{
my_error(ER_ILLEGAL_HA, MYF(0),table_list->table->file->table_type(),
@@ -608,6 +609,7 @@ bool flush_tables_with_read_lock(THD *thd, TABLE_LIST *all_tables)
goto error_reset_bits;
}
if (thd->lex->type & REFRESH_READ_LOCK &&
+ table_list->table &&
table_list->table->file->extra(HA_EXTRA_FLUSH))
goto error_reset_bits;
}
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index c9189e84829..728cb8f947b 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -787,7 +787,22 @@ void remove_redundant_subquery_clauses(st_select_lex *subq_select_lex)
Here SUBQ cannot be removed.
*/
if (!ord->in_field_list)
+ {
(*ord->item)->walk(&Item::eliminate_subselect_processor, FALSE, NULL);
+ /*
+ Remove from the JOIN::all_fields list any reference to the elements
+ of the eliminated GROUP BY list unless it is 'in_field_list'.
+ This is needed in order not to confuse JOIN::make_aggr_tables_info()
+ when it constructs different structure for execution phase.
+ */
+ List_iterator<Item> li(subq_select_lex->join->all_fields);
+ Item *item;
+ while ((item= li++))
+ {
+ if (item == *ord->item)
+ li.remove();
+ }
+ }
}
subq_select_lex->join->group_list= NULL;
subq_select_lex->group_list.empty();
@@ -2012,7 +2027,6 @@ JOIN::optimize_inner()
/* Merge all mergeable derived tables/views in this SELECT. */
if (select_lex->handle_derived(thd->lex, DT_MERGE))
DBUG_RETURN(TRUE);
- table_count= select_lex->leaf_tables.elements;
}
if (select_lex->first_cond_optimization &&
@@ -2060,8 +2074,6 @@ JOIN::optimize_inner()
eval_select_list_used_tables();
- table_count= select_lex->leaf_tables.elements;
-
if (select_lex->options & OPTION_SCHEMA_TABLE &&
optimize_schema_tables_memory_usage(select_lex->leaf_tables))
DBUG_RETURN(1);
@@ -9213,7 +9225,8 @@ greedy_search(JOIN *join,
picked semi-join operation is in best_pos->...picker, but we need to
update the global state in the JOIN object, too.
*/
- update_sj_state(join, best_table, idx, remaining_tables);
+ if (!join->emb_sjm_nest)
+ update_sj_state(join, best_table, idx, remaining_tables);
/* find the position of 'best_table' in 'join->best_ref' */
best_idx= idx;
@@ -14394,7 +14407,6 @@ void JOIN::cleanup(bool full)
/* Free the original optimized join created for the group_by_handler */
join_tab= original_join_tab;
original_join_tab= 0;
- table_count= original_table_count;
}
if (join_tab)
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 7a72d0efe42..d72fc82be81 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -1289,7 +1289,6 @@ public:
Pushdown_query *pushdown_query;
JOIN_TAB *original_join_tab;
- uint original_table_count;
/******* Join optimization state members start *******/
/*
diff --git a/sql/sql_sequence.cc b/sql/sql_sequence.cc
index 07571c3bbac..11b5109c349 100644
--- a/sql/sql_sequence.cc
+++ b/sql/sql_sequence.cc
@@ -706,7 +706,9 @@ longlong SEQUENCE::next_value(TABLE *table, bool second_round, int *error)
{
longlong res_value, org_reserved_until, add_to;
bool out_of_values;
+ THD *thd= table->in_use;
DBUG_ENTER("SEQUENCE::next_value");
+ DBUG_ASSERT(thd);
*error= 0;
if (!second_round)
@@ -771,7 +773,8 @@ longlong SEQUENCE::next_value(TABLE *table, bool second_round, int *error)
DBUG_RETURN(next_value(table, 1, error));
}
- if (unlikely((*error= write(table, 0))))
+ if (unlikely((*error= write(table, thd->variables.binlog_row_image !=
+ BINLOG_ROW_IMAGE_MINIMAL))))
{
reserved_until= org_reserved_until;
next_free_value= res_value;
@@ -838,7 +841,9 @@ int SEQUENCE::set_value(TABLE *table, longlong next_val, ulonglong next_round,
longlong org_reserved_until= reserved_until;
longlong org_next_free_value= next_free_value;
ulonglong org_round= round;
+ THD *thd= table->in_use;
DBUG_ENTER("SEQUENCE::set_value");
+ DBUG_ASSERT(thd);
write_lock(table);
if (is_used)
@@ -877,7 +882,8 @@ int SEQUENCE::set_value(TABLE *table, longlong next_val, ulonglong next_round,
needs_to_be_stored)
{
reserved_until= next_free_value;
- if (write(table, 0))
+ if (write(table,
+ thd->variables.binlog_row_image != BINLOG_ROW_IMAGE_MINIMAL))
{
reserved_until= org_reserved_until;
next_free_value= org_next_free_value;
diff --git a/sql/sql_type.cc b/sql/sql_type.cc
index a76843e4296..e33c8766ed0 100644
--- a/sql/sql_type.cc
+++ b/sql/sql_type.cc
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2015, 2021, MariaDB
+ Copyright (c) 2015, 2022, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -1959,6 +1959,9 @@ Type_collection_std::aggregate_for_comparison(const Type_handler *ha,
return ha;
}
}
+ if ((a == INT_RESULT && b == STRING_RESULT) ||
+ (b == INT_RESULT && a == STRING_RESULT))
+ return &type_handler_newdecimal;
if ((a == INT_RESULT || a == DECIMAL_RESULT) &&
(b == INT_RESULT || b == DECIMAL_RESULT))
return &type_handler_newdecimal;
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index d5b5ac5eef4..e8f10920504 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -427,7 +427,7 @@ int mysql_update(THD *thd,
DBUG_ASSERT(update_source_table || table_list->view != 0);
DBUG_PRINT("info", ("Switch to multi-update"));
/* pass counter value */
- thd->lex->table_count= table_count;
+ thd->lex->table_count_update= table_count;
if (thd->lex->period_conditions.is_set())
{
my_error(ER_NOT_SUPPORTED_YET, MYF(0),
@@ -1861,7 +1861,7 @@ int mysql_multi_update_prepare(THD *thd)
TABLE_LIST *table_list= lex->query_tables;
TABLE_LIST *tl;
Multiupdate_prelocking_strategy prelocking_strategy;
- uint table_count= lex->table_count;
+ uint table_count= lex->table_count_update;
DBUG_ENTER("mysql_multi_update_prepare");
/*
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index eb1fee2a00c..717ebf0e5eb 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -2519,6 +2519,7 @@ create:
{
if (Lex->main_select_push())
MYSQL_YYABORT;
+ Lex->inc_select_stack_outer_barrier();
if (Lex->add_create_view(thd, $1 | $5,
DTYPE_ALGORITHM_UNDEFINED, $3, $6))
MYSQL_YYABORT;
@@ -2534,6 +2535,7 @@ create:
MYSQL_YYABORT;
if (Lex->main_select_push())
MYSQL_YYABORT;
+ Lex->inc_select_stack_outer_barrier();
}
view_list_opt AS view_select
{
diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc
index 8a5f01cbc07..84f743d6db3 100644
--- a/sql/sys_vars.cc
+++ b/sql/sys_vars.cc
@@ -2047,7 +2047,10 @@ Sys_gtid_strict_mode(
"gtid_strict_mode",
"Enforce strict seq_no ordering of events in the binary log. Slave "
"stops with an error if it encounters an event that would cause it to "
- "generate an out-of-order binlog if executed.",
+ "generate an out-of-order binlog if executed. "
+ "When ON the same server-id semisync-replicated transactions that "
+ "duplicate exising ones in binlog are ignored without error "
+ "and slave interruption.",
GLOBAL_VAR(opt_gtid_strict_mode),
CMD_LINE(OPT_ARG), DEFAULT(FALSE));
diff --git a/sql/temporary_tables.cc b/sql/temporary_tables.cc
index f3264a4c809..65aa6c3c28b 100644
--- a/sql/temporary_tables.cc
+++ b/sql/temporary_tables.cc
@@ -625,6 +625,10 @@ bool THD::drop_temporary_table(TABLE *table, bool *is_trans, bool delete_table)
DBUG_PRINT("tmptable", ("Dropping table: '%s'.'%s'",
table->s->db.str, table->s->table_name.str));
+ // close all handlers in case it is statement abort and some can be left
+ if (is_error())
+ table->file->ha_reset();
+
locked= lock_temporary_tables();
share= tmp_table_share(table);