summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorMarko Mäkelä <marko.makela@mariadb.com>2022-07-28 11:25:21 +0300
committerMarko Mäkelä <marko.makela@mariadb.com>2022-07-28 11:25:21 +0300
commit4ce6e78059c1a91765c91afa5330737bd7568d0a (patch)
tree94e684c003fa5e171e62bc59178b72ae107b2ba9 /sql
parent0149abf66ff8fd7893cd24cd7381ea5e59f6b91b (diff)
parentf53f64b7b9edaef8e413add322225dc33ebc8131 (diff)
downloadmariadb-git-4ce6e78059c1a91765c91afa5330737bd7568d0a.tar.gz
Merge 10.9 into 10.10
Diffstat (limited to 'sql')
-rw-r--r--sql/field.cc11
-rw-r--r--sql/ha_partition.cc39
-rw-r--r--sql/ha_partition.h5
-rw-r--r--sql/item_jsonfunc.cc172
-rw-r--r--sql/json_table.cc39
-rw-r--r--sql/log.cc83
-rw-r--r--sql/log.h2
-rw-r--r--sql/mdl.cc55
-rw-r--r--sql/mysqld.cc20
-rw-r--r--sql/opt_subselect.cc1
-rw-r--r--sql/partition_info.cc13
-rw-r--r--sql/rpl_gtid.cc9
-rw-r--r--sql/rpl_gtid.h3
-rw-r--r--sql/rpl_mi.cc5
-rw-r--r--sql/rpl_mi.h20
-rw-r--r--sql/slave.cc31
-rw-r--r--sql/sql_base.cc183
-rw-r--r--sql/sql_base.h5
-rw-r--r--sql/sql_class.cc4
-rw-r--r--sql/sql_insert.cc14
-rw-r--r--sql/sql_lex.cc14
-rw-r--r--sql/sql_lex.h12
-rw-r--r--sql/sql_parse.cc7
-rw-r--r--sql/sql_prepare.cc2
-rw-r--r--sql/sql_reload.cc2
-rw-r--r--sql/sql_select.cc22
-rw-r--r--sql/sql_select.h2
-rw-r--r--sql/sql_sequence.cc10
-rw-r--r--sql/sql_statistics.cc4
-rw-r--r--sql/sql_table.cc8
-rw-r--r--sql/sql_type.cc7
-rw-r--r--sql/sql_type_fixedbin.h2
-rw-r--r--sql/sql_update.cc4
-rw-r--r--sql/sql_view.cc6
-rw-r--r--sql/sql_yacc.yy2
-rw-r--r--sql/sys_vars.cc5
-rw-r--r--sql/temporary_tables.cc4
-rw-r--r--sql/threadpool_winsockets.cc13
38 files changed, 613 insertions, 227 deletions
diff --git a/sql/field.cc b/sql/field.cc
index 942a9c67020..c5b855363ed 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2017, Oracle and/or its affiliates.
- Copyright (c) 2008, 2021, MariaDB
+ Copyright (c) 2008, 2022, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -7573,7 +7573,7 @@ my_decimal *Field_string::val_decimal(my_decimal *decimal_value)
THD *thd= get_thd();
Converter_str2my_decimal_with_warn(thd,
Warn_filter_string(thd, this),
- E_DEC_FATAL_ERROR,
+ E_DEC_FATAL_ERROR & ~E_DEC_BAD_NUM,
Field_string::charset(),
(const char *) ptr,
field_length, decimal_value);
@@ -7934,7 +7934,7 @@ my_decimal *Field_varstring::val_decimal(my_decimal *decimal_value)
DBUG_ASSERT(marked_for_read());
THD *thd= get_thd();
Converter_str2my_decimal_with_warn(thd, Warn_filter(thd),
- E_DEC_FATAL_ERROR,
+ E_DEC_FATAL_ERROR & ~E_DEC_BAD_NUM,
Field_varstring::charset(),
(const char *) get_data(),
get_length(), decimal_value);
@@ -8780,7 +8780,7 @@ my_decimal *Field_blob::val_decimal(my_decimal *decimal_value)
THD *thd= get_thd();
Converter_str2my_decimal_with_warn(thd, Warn_filter(thd),
- E_DEC_FATAL_ERROR,
+ E_DEC_FATAL_ERROR & ~E_DEC_BAD_NUM,
Field_blob::charset(),
blob, length, decimal_value);
return decimal_value;
@@ -10011,7 +10011,7 @@ int Field_bit::cmp_prefix(const uchar *a, const uchar *b,
}
-int Field_bit::key_cmp(const uchar *str, uint length) const
+int Field_bit::key_cmp(const uchar *str, uint) const
{
if (bit_len)
{
@@ -10020,7 +10020,6 @@ int Field_bit::key_cmp(const uchar *str, uint length) const
if ((flag= (int) (bits - *str)))
return flag;
str++;
- length--;
}
return memcmp(ptr, str, bytes_in_rec);
}
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 5224c378d19..0c7d4d79d90 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -2447,7 +2447,6 @@ uint ha_partition::del_ren_table(const char *from, const char *to)
char *name_buffer_ptr;
const char *from_path;
const char *to_path= NULL;
- uint i;
handler **file, **abort_file;
THD *thd= ha_thd();
DBUG_ENTER("ha_partition::del_ren_table");
@@ -2487,7 +2486,6 @@ uint ha_partition::del_ren_table(const char *from, const char *to)
from_path= get_canonical_filename(*file, from, from_lc_buff);
if (to != NULL)
to_path= get_canonical_filename(*file, to, to_lc_buff);
- i= 0;
do
{
if (unlikely((error= create_partition_name(from_buff, sizeof(from_buff),
@@ -2512,7 +2510,6 @@ uint ha_partition::del_ren_table(const char *from, const char *to)
name_buffer_ptr= strend(name_buffer_ptr) + 1;
if (unlikely(error))
save_error= error;
- i++;
} while (*(++file));
if (to != NULL)
{
@@ -4512,15 +4509,15 @@ int ha_partition::write_row(const uchar * buf)
if (have_auto_increment)
{
if (!table_share->next_number_keypart)
- update_next_auto_inc_val();
- error= update_auto_increment();
+ if (unlikely(error= update_next_auto_inc_val()))
+ goto exit;
/*
If we have failed to set the auto-increment value for this row,
it is highly likely that we will not be able to insert it into
the correct partition. We must check and fail if necessary.
*/
- if (unlikely(error))
+ if (unlikely(error= update_auto_increment()))
goto exit;
/*
@@ -8478,6 +8475,7 @@ int ha_partition::compare_number_of_records(ha_partition *me,
int ha_partition::info(uint flag)
{
+ int error;
uint no_lock_flag= flag & HA_STATUS_NO_LOCK;
uint extra_var_flag= flag & HA_STATUS_VARIABLE_EXTRA;
DBUG_ENTER("ha_partition::info");
@@ -8530,7 +8528,11 @@ int ha_partition::info(uint flag)
break;
}
file= *file_array;
- file->info(HA_STATUS_AUTO | no_lock_flag);
+ if ((error= file->info(HA_STATUS_AUTO | no_lock_flag)))
+ {
+ unlock_auto_increment();
+ DBUG_RETURN(error);
+ }
set_if_bigger(auto_increment_value,
file->stats.auto_increment_value);
} while (*(++file_array));
@@ -8587,7 +8589,8 @@ int ha_partition::info(uint flag)
i= bitmap_get_next_set(&m_part_info->read_partitions, i))
{
file= m_file[i];
- file->info(HA_STATUS_VARIABLE | no_lock_flag | extra_var_flag);
+ if ((error= file->info(HA_STATUS_VARIABLE | no_lock_flag | extra_var_flag)))
+ DBUG_RETURN(error);
stats.records+= file->stats.records;
stats.deleted+= file->stats.deleted;
stats.data_file_length+= file->stats.data_file_length;
@@ -8676,7 +8679,8 @@ int ha_partition::info(uint flag)
if (!(flag & HA_STATUS_VARIABLE) ||
!bitmap_is_set(&(m_part_info->read_partitions),
(uint) (file_array - m_file)))
- file->info(HA_STATUS_VARIABLE | no_lock_flag | extra_var_flag);
+ if ((error= file->info(HA_STATUS_VARIABLE | no_lock_flag | extra_var_flag)))
+ DBUG_RETURN(error);
if (file->stats.records > max_records || !handler_instance_set)
{
handler_instance_set= 1;
@@ -8697,7 +8701,8 @@ int ha_partition::info(uint flag)
this);
file= m_file[handler_instance];
- file->info(HA_STATUS_CONST | no_lock_flag);
+ if ((error= file->info(HA_STATUS_CONST | no_lock_flag)))
+ DBUG_RETURN(error);
stats.block_size= file->stats.block_size;
stats.create_time= file->stats.create_time;
ref_length= m_ref_length;
@@ -8713,7 +8718,8 @@ int ha_partition::info(uint flag)
Note: all engines does not support HA_STATUS_ERRKEY, so set errkey.
*/
file->errkey= errkey;
- file->info(HA_STATUS_ERRKEY | no_lock_flag);
+ if ((error= file->info(HA_STATUS_ERRKEY | no_lock_flag)))
+ DBUG_RETURN(error);
errkey= file->errkey;
}
if (flag & HA_STATUS_TIME)
@@ -8730,7 +8736,8 @@ int ha_partition::info(uint flag)
do
{
file= *file_array;
- file->info(HA_STATUS_TIME | no_lock_flag);
+ if ((error= file->info(HA_STATUS_TIME | no_lock_flag)))
+ DBUG_RETURN(error);
if (file->stats.update_time > stats.update_time)
stats.update_time= file->stats.update_time;
} while (*(++file_array));
@@ -10748,11 +10755,11 @@ int ha_partition::cmp_ref(const uchar *ref1, const uchar *ref2)
the underlying partitions require that the value should be re-calculated
*/
-void ha_partition::update_next_auto_inc_val()
+int ha_partition::update_next_auto_inc_val()
{
- if (!part_share->auto_inc_initialized ||
- need_info_for_auto_inc())
- info(HA_STATUS_AUTO);
+ if (!part_share->auto_inc_initialized || need_info_for_auto_inc())
+ return info(HA_STATUS_AUTO);
+ return 0;
}
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index fd149146694..45a835f734a 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -1366,7 +1366,7 @@ public:
void release_auto_increment() override;
private:
int reset_auto_increment(ulonglong value) override;
- void update_next_auto_inc_val();
+ int update_next_auto_inc_val();
virtual void lock_auto_increment()
{
/* lock already taken */
@@ -1413,7 +1413,8 @@ private:
unless we already did it.
*/
if (!part_share->auto_inc_initialized &&
- (ha_thd()->lex->sql_command == SQLCOM_INSERT ||
+ (ha_thd()->lex->sql_command == SQLCOM_INSERT ||
+ ha_thd()->lex->sql_command == SQLCOM_INSERT_SELECT ||
ha_thd()->lex->sql_command == SQLCOM_REPLACE) &&
table->found_next_number_field)
bitmap_set_all(&m_part_info->read_partitions);
diff --git a/sql/item_jsonfunc.cc b/sql/item_jsonfunc.cc
index acf5c199a02..7eea659555a 100644
--- a/sql/item_jsonfunc.cc
+++ b/sql/item_jsonfunc.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, 2021, MariaDB Corporation.
+/* Copyright (c) 2016, 2022, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -18,7 +18,22 @@
#include "sql_priv.h"
#include "sql_class.h"
#include "item.h"
+#include "sql_parse.h" // For check_stack_overrun
+/*
+ Allocating memory and *also* using it (reading and
+ writing from it) because some build instructions cause
+ compiler to optimize out stack_used_up. Since alloca()
+ here depends on stack_used_up, it doesnt get executed
+ correctly and causes json_debug_nonembedded to fail
+ ( --error ER_STACK_OVERRUN_NEED_MORE does not occur).
+*/
+#define ALLOCATE_MEM_ON_STACK(A) do \
+ { \
+ uchar *array= (uchar*)alloca(A); \
+ bzero(array, A); \
+ my_checksum(0, array, A); \
+ } while(0)
/*
Compare ASCII string against the string with the specified
@@ -128,6 +143,131 @@ static int append_tab(String *js, int depth, int tab_size)
return 0;
}
+int json_path_parts_compare(
+ const json_path_step_t *a, const json_path_step_t *a_end,
+ const json_path_step_t *b, const json_path_step_t *b_end,
+ enum json_value_types vt, const int *array_sizes)
+{
+ int res, res2;
+ const json_path_step_t *temp_b= b;
+
+ while (a <= a_end)
+ {
+ if (b > b_end)
+ {
+ while (vt != JSON_VALUE_ARRAY &&
+ (a->type & JSON_PATH_ARRAY_WILD) == JSON_PATH_ARRAY &&
+ a->n_item == 0)
+ {
+ if (++a > a_end)
+ return 0;
+ }
+ return -2;
+ }
+
+ DBUG_ASSERT((b->type & (JSON_PATH_WILD | JSON_PATH_DOUBLE_WILD)) == 0);
+
+ if (a->type & JSON_PATH_ARRAY)
+ {
+ if (b->type & JSON_PATH_ARRAY)
+ {
+ int res= 0, corrected_n_item_a= 0;
+ if (array_sizes)
+ corrected_n_item_a= a->n_item < 0 ?
+ array_sizes[b-temp_b] + a->n_item : a->n_item;
+ if (a->type & JSON_PATH_ARRAY_RANGE)
+ {
+ int corrected_n_item_end_a= 0;
+ if (array_sizes)
+ corrected_n_item_end_a= a->n_item_end < 0 ?
+ array_sizes[b-temp_b] + a->n_item_end :
+ a->n_item_end;
+ res= b->n_item >= corrected_n_item_a &&
+ b->n_item <= corrected_n_item_end_a;
+ }
+ else
+ res= corrected_n_item_a == b->n_item;
+
+ if ((a->type & JSON_PATH_WILD) || res)
+ goto step_fits;
+ goto step_failed;
+ }
+ if ((a->type & JSON_PATH_WILD) == 0 && a->n_item == 0)
+ goto step_fits_autowrap;
+ goto step_failed;
+ }
+ else /* JSON_PATH_KEY */
+ {
+ if (!(b->type & JSON_PATH_KEY))
+ goto step_failed;
+
+ if (!(a->type & JSON_PATH_WILD) &&
+ (a->key_end - a->key != b->key_end - b->key ||
+ memcmp(a->key, b->key, a->key_end - a->key) != 0))
+ goto step_failed;
+
+ goto step_fits;
+ }
+step_failed:
+ if (!(a->type & JSON_PATH_DOUBLE_WILD))
+ return -1;
+ b++;
+ continue;
+
+step_fits:
+ b++;
+ if (!(a->type & JSON_PATH_DOUBLE_WILD))
+ {
+ a++;
+ continue;
+ }
+
+ /* Double wild handling needs recursions. */
+ res= json_path_parts_compare(a+1, a_end, b, b_end, vt,
+ array_sizes ? array_sizes + (b - temp_b) :
+ NULL);
+ if (res == 0)
+ return 0;
+
+ res2= json_path_parts_compare(a, a_end, b, b_end, vt,
+ array_sizes ? array_sizes + (b - temp_b) :
+ NULL);
+
+ return (res2 >= 0) ? res2 : res;
+
+step_fits_autowrap:
+ if (!(a->type & JSON_PATH_DOUBLE_WILD))
+ {
+ a++;
+ continue;
+ }
+
+ /* Double wild handling needs recursions. */
+ res= json_path_parts_compare(a+1, a_end, b+1, b_end, vt,
+ array_sizes ? array_sizes + (b - temp_b) :
+ NULL);
+ if (res == 0)
+ return 0;
+
+ res2= json_path_parts_compare(a, a_end, b+1, b_end, vt,
+ array_sizes ? array_sizes + (b - temp_b) :
+ NULL);
+
+ return (res2 >= 0) ? res2 : res;
+
+ }
+
+ return b <= b_end;
+}
+
+
+int json_path_compare(const json_path_t *a, const json_path_t *b,
+ enum json_value_types vt, const int *array_size)
+{
+ return json_path_parts_compare(a->steps+1, a->last_step,
+ b->steps+1, b->last_step, vt, array_size);
+}
+
static int json_nice(json_engine_t *je, String *nice_js,
Item_func_json_format::formats mode, int tab_size=4)
@@ -1103,6 +1243,12 @@ static int check_contains(json_engine_t *js, json_engine_t *value)
{
json_engine_t loc_js;
bool set_js;
+ long arbitrary_var;
+ long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var));
+ DBUG_EXECUTE_IF("json_check_min_stack_requirement",
+ {ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE);});
+ if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL))
+ return 1;
switch (js->value_type)
{
@@ -2091,6 +2237,14 @@ err_return:
static int do_merge(String *str, json_engine_t *je1, json_engine_t *je2)
{
+
+ long arbitrary_var;
+ long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var));
+ DBUG_EXECUTE_IF("json_check_min_stack_requirement",
+ {ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE);});
+ if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL))
+ return 1;
+
if (json_read_value(je1) || json_read_value(je2))
return 1;
@@ -2425,6 +2579,13 @@ static int copy_value_patch(String *str, json_engine_t *je)
static int do_merge_patch(String *str, json_engine_t *je1, json_engine_t *je2,
bool *empty_result)
{
+ long arbitrary_var;
+ long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var));
+ DBUG_EXECUTE_IF("json_check_min_stack_requirement",
+ {ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE);});
+ if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL))
+ return 1;
+
if (json_read_value(je1) || json_read_value(je2))
return 1;
@@ -2840,7 +3001,7 @@ longlong Item_func_json_depth::val_int()
bool Item_func_json_type::fix_length_and_dec(THD *thd)
{
collation.set(&my_charset_utf8mb3_general_ci);
- max_length= 12;
+ max_length= 12 * collation.collation->mbmaxlen;
set_maybe_null();
return FALSE;
}
@@ -4406,6 +4567,13 @@ int json_find_overlap_with_object(json_engine_t *js, json_engine_t *value,
*/
int check_overlaps(json_engine_t *js, json_engine_t *value, bool compare_whole)
{
+ long arbitrary_var;
+ long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var));
+ DBUG_EXECUTE_IF("json_check_min_stack_requirement",
+ {ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE);});
+ if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL))
+ return 1;
+
switch (js->value_type)
{
case JSON_VALUE_OBJECT:
diff --git a/sql/json_table.cc b/sql/json_table.cc
index 6e9f3d1db67..7642e992347 100644
--- a/sql/json_table.cc
+++ b/sql/json_table.cc
@@ -25,9 +25,26 @@
#include "sql_show.h"
#include "sql_select.h"
#include "create_tmp_table.h"
+#include "sql_parse.h"
#define HA_ERR_JSON_TABLE (HA_ERR_LAST+1)
+/*
+ Allocating memory and *also* using it (reading and
+ writing from it) because some build instructions cause
+ compiler to optimize out stack_used_up. Since alloca()
+ here depends on stack_used_up, it doesnt get executed
+ correctly and causes json_debug_nonembedded to fail
+ ( --error ER_STACK_OVERRUN_NEED_MORE does not occur).
+*/
+#define ALLOCATE_MEM_ON_STACK(A) do \
+ { \
+ uchar *array= (uchar*)alloca(A); \
+ array[0]= 1; \
+ array[0]++; \
+ array[0] ? array[0]++ : array[0]--; \
+ } while(0)
+
class table_function_handlerton
{
public:
@@ -101,6 +118,13 @@ int get_disallowed_table_deps_for_list(MEM_ROOT *mem_root,
NESTED_JOIN *nested_join;
List_iterator<TABLE_LIST> li(*join_list);
+ long arbitrary_var;
+ long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var));
+ DBUG_EXECUTE_IF("json_check_min_stack_requirement",
+ {ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE);});
+ if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL))
+ return 1;
+
while ((table= li++))
{
if ((nested_join= table->nested_join))
@@ -1319,6 +1343,14 @@ static void add_extra_deps(List<TABLE_LIST> *join_list, table_map deps)
{
TABLE_LIST *table;
List_iterator<TABLE_LIST> li(*join_list);
+
+ long arbitrary_var;
+ long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var));
+ DBUG_EXECUTE_IF("json_check_min_stack_requirement",
+ {ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE);});
+ if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL))
+ return;
+
while ((table= li++))
{
table->dep_tables |= deps;
@@ -1407,6 +1439,13 @@ table_map add_table_function_dependencies(List<TABLE_LIST> *join_list,
table_map res= 0;
List_iterator<TABLE_LIST> li(*join_list);
+ long arbitrary_var;
+ long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var));
+ DBUG_EXECUTE_IF("json_check_min_stack_requirement",
+ {ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE);});
+ if ((res=check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL)))
+ return res;
+
// Recursively compute extra dependencies
while ((table= li++))
{
diff --git a/sql/log.cc b/sql/log.cc
index 619d5ffaccd..ee6c0425192 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -2364,7 +2364,7 @@ static int binlog_rollback(handlerton *hton, THD *thd, bool all)
error |= binlog_commit_flush_stmt_cache(thd, all, cache_mngr);
}
- if (cache_mngr->trx_cache.empty() &&
+ if (!cache_mngr->trx_cache.has_incident() && cache_mngr->trx_cache.empty() &&
thd->transaction->xid_state.get_state_code() != XA_PREPARED)
{
/*
@@ -6087,7 +6087,6 @@ void THD::binlog_prepare_for_row_logging()
bool THD::binlog_write_annotated_row(Log_event_writer *writer)
{
- int error;
DBUG_ENTER("THD::binlog_write_annotated_row");
if (!(IF_WSREP(!wsrep_fragments_certified_for_stmt(this), true) &&
@@ -6096,13 +6095,7 @@ bool THD::binlog_write_annotated_row(Log_event_writer *writer)
DBUG_RETURN(0);
Annotate_rows_log_event anno(this, 0, false);
- if (unlikely((error= writer->write(&anno))))
- {
- if (my_errno == EFBIG)
- writer->set_incident();
- DBUG_RETURN(error);
- }
- DBUG_RETURN(0);
+ DBUG_RETURN(writer->write(&anno));
}
@@ -6175,21 +6168,22 @@ bool THD::binlog_write_table_maps()
/**
- This function writes a table map to the binary log.
- Note that in order to keep the signature uniform with related methods,
- we use a redundant parameter to indicate whether a transactional table
- was changed or not.
+ This function writes a table map to the binary log.
- @param table a pointer to the table.
- @param with_annotate If true call binlog_write_annotated_row()
+ If an error occurs while writing events and rollback is not possible, e.g.
+ due to the statement modifying a non-transactional table, an incident event
+ is logged.
+ @param table a pointer to the table.
+ @param with_annotate @c true to write an annotate event before writing
+ the table_map event, @c false otherwise.
@return
nonzero if an error pops up when writing the table map event.
*/
bool THD::binlog_write_table_map(TABLE *table, bool with_annotate)
{
- int error;
+ int error= 1;
bool is_transactional= table->file->row_logging_has_trans;
DBUG_ENTER("THD::binlog_write_table_map");
DBUG_PRINT("enter", ("table: %p (%s: #%lu)",
@@ -6215,12 +6209,34 @@ bool THD::binlog_write_table_map(TABLE *table, bool with_annotate)
if (with_annotate)
if (binlog_write_annotated_row(&writer))
- DBUG_RETURN(1);
+ goto write_err;
+
+ DBUG_EXECUTE_IF("table_map_write_error",
+ {
+ if (is_transactional)
+ {
+ my_errno= EFBIG;
+ goto write_err;
+ }
+ });
if (unlikely((error= writer.write(&the_event))))
- DBUG_RETURN(error);
+ goto write_err;
DBUG_RETURN(0);
+
+write_err:
+ mysql_bin_log.set_write_error(this, is_transactional);
+ /*
+ For non-transactional engine or multi statement transaction with mixed
+ engines, data is written to table but writing to binary log failed. In
+ these scenarios rollback is not possible. Hence report an incident.
+ */
+ if (mysql_bin_log.check_write_error(this) && cache_data &&
+ lex->stmt_accessed_table(LEX::STMT_WRITES_NON_TRANS_TABLE) &&
+ table->current_lock == F_WRLCK)
+ cache_data->set_incident();
+ DBUG_RETURN(error);
}
@@ -6623,11 +6639,13 @@ MYSQL_BIN_LOG::bump_seq_no_counter_if_needed(uint32 domain_id, uint64 seq_no)
bool
MYSQL_BIN_LOG::check_strict_gtid_sequence(uint32 domain_id,
uint32 server_id_arg,
- uint64 seq_no)
+ uint64 seq_no,
+ bool no_error)
{
return rpl_global_gtid_binlog_state.check_strict_sequence(domain_id,
server_id_arg,
- seq_no);
+ seq_no,
+ no_error);
}
@@ -7656,7 +7674,9 @@ bool MYSQL_BIN_LOG::write_incident(THD *thd)
if (likely(is_open()))
{
prev_binlog_id= current_binlog_id;
- if (likely(!(error= write_incident_already_locked(thd))) &&
+ if (likely(!(error= DBUG_IF("incident_event_write_error")
+ ? 1
+ : write_incident_already_locked(thd))) &&
likely(!(error= flush_and_sync(0))))
{
update_binlog_end_pos();
@@ -7685,6 +7705,22 @@ bool MYSQL_BIN_LOG::write_incident(THD *thd)
mysql_mutex_unlock(&LOCK_log);
}
+ /*
+ Upon writing incident event, check for thd->error() and print the
+ relevant error message in the error log.
+ */
+ if (thd->is_error())
+ {
+ sql_print_error("Write to binary log failed: "
+ "%s. An incident event is written to binary log "
+ "and slave will be stopped.\n",
+ thd->get_stmt_da()->message());
+ }
+ if (error)
+ {
+ sql_print_error("Incident event write to the binary log file failed.");
+ }
+
DBUG_RETURN(error);
}
@@ -9145,8 +9181,9 @@ void sql_perror(const char *message)
*/
bool reopen_fstreams(const char *filename, FILE *outstream, FILE *errstream)
{
- if ((outstream && !my_freopen(filename, "a", outstream)) ||
- (errstream && !my_freopen(filename, "a", errstream)))
+ static constexpr const char *mode= "a" IF_WIN("t", );
+ if ((outstream && !my_freopen(filename, mode, outstream)) ||
+ (errstream && !my_freopen(filename, mode, errstream)))
{
my_error(ER_CANT_CREATE_FILE, MYF(0), filename, errno);
return TRUE;
diff --git a/sql/log.h b/sql/log.h
index 433057f4b93..edb6e4617ac 100644
--- a/sql/log.h
+++ b/sql/log.h
@@ -920,7 +920,7 @@ public:
bool lookup_domain_in_binlog_state(uint32 domain_id, rpl_gtid *out_gtid);
int bump_seq_no_counter_if_needed(uint32 domain_id, uint64 seq_no);
bool check_strict_gtid_sequence(uint32 domain_id, uint32 server_id,
- uint64 seq_no);
+ uint64 seq_no, bool no_error= false);
/**
* used when opening new file, and binlog_end_pos moves backwards
diff --git a/sql/mdl.cc b/sql/mdl.cc
index bb764d04a42..13b01da36d1 100644
--- a/sql/mdl.cc
+++ b/sql/mdl.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2007, 2012, Oracle and/or its affiliates.
- Copyright (c) 2020, 2021, MariaDB
+ Copyright (c) 2020, 2022, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -247,25 +247,32 @@ private:
Print a list of all locks to DBUG trace to help with debugging
*/
+const char *dbug_print_mdl(MDL_ticket *mdl_ticket)
+{
+ thread_local char buffer[256];
+ MDL_key *mdl_key= mdl_ticket->get_key();
+ my_snprintf(buffer, sizeof(buffer) - 1, "%.*s/%.*s (%s)",
+ (int) mdl_key->db_name_length(), mdl_key->db_name(),
+ (int) mdl_key->name_length(), mdl_key->name(),
+ mdl_ticket->get_type_name()->str);
+ return buffer;
+}
+
+
static int mdl_dbug_print_lock(MDL_ticket *mdl_ticket, void *arg, bool granted)
{
String *tmp= (String*) arg;
- char buffer[128];
- MDL_key *mdl_key= mdl_ticket->get_key();
- size_t length;
- length= my_snprintf(buffer, sizeof(buffer)-1,
- "\nname: %s db: %.*s key_name: %.*s (%s)",
- mdl_ticket->get_type_name()->str,
- (int) mdl_key->db_name_length(), mdl_key->db_name(),
- (int) mdl_key->name_length(), mdl_key->name(),
- granted ? "granted" : "waiting");
+ char buffer[256];
+ size_t length= my_snprintf(buffer, sizeof(buffer) - 1,
+ "\n %s (%s)", dbug_print_mdl(mdl_ticket),
+ granted ? "granted" : "waiting");
tmp->append(buffer, length);
return 0;
}
const char *mdl_dbug_print_locks()
{
- static String tmp;
+ thread_local String tmp;
mdl_iterate(mdl_dbug_print_lock, (void*) &tmp);
return tmp.c_ptr();
}
@@ -2271,13 +2278,19 @@ MDL_context::acquire_lock(MDL_request *mdl_request, double lock_wait_timeout)
MDL_ticket *ticket;
MDL_wait::enum_wait_status wait_status;
DBUG_ENTER("MDL_context::acquire_lock");
+#ifndef DBUG_OFF
+ const char *mdl_lock_name= get_mdl_lock_name(
+ mdl_request->key.mdl_namespace(), mdl_request->type)->str;
+#endif
DBUG_PRINT("enter", ("lock_type: %s timeout: %f",
- get_mdl_lock_name(mdl_request->key.mdl_namespace(),
- mdl_request->type)->str,
+ mdl_lock_name,
lock_wait_timeout));
if (try_acquire_lock_impl(mdl_request, &ticket))
+ {
+ DBUG_PRINT("mdl", ("OOM: %s", mdl_lock_name));
DBUG_RETURN(TRUE);
+ }
if (mdl_request->ticket)
{
@@ -2287,9 +2300,14 @@ MDL_context::acquire_lock(MDL_request *mdl_request, double lock_wait_timeout)
accordingly, so we can simply return success.
*/
DBUG_PRINT("info", ("Got lock without waiting"));
+ DBUG_PRINT("mdl", ("Seized: %s", dbug_print_mdl(mdl_request->ticket)));
DBUG_RETURN(FALSE);
}
+#ifndef DBUG_OFF
+ const char *ticket_msg= dbug_print_mdl(ticket);
+#endif
+
/*
Our attempt to acquire lock without waiting has failed.
As a result of this attempt we got MDL_ticket with m_lock
@@ -2300,6 +2318,7 @@ MDL_context::acquire_lock(MDL_request *mdl_request, double lock_wait_timeout)
if (lock_wait_timeout == 0)
{
+ DBUG_PRINT("mdl", ("Nowait: %s", ticket_msg));
mysql_prlock_unlock(&lock->m_rwlock);
MDL_ticket::destroy(ticket);
my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0));
@@ -2360,6 +2379,7 @@ MDL_context::acquire_lock(MDL_request *mdl_request, double lock_wait_timeout)
locker= PSI_CALL_start_metadata_wait(&state, ticket->m_psi, __FILE__, __LINE__);
#endif
+ DBUG_PRINT("mdl", ("Waiting: %s", ticket_msg));
will_wait_for(ticket);
/* There is a shared or exclusive lock on the object. */
@@ -2417,15 +2437,16 @@ MDL_context::acquire_lock(MDL_request *mdl_request, double lock_wait_timeout)
switch (wait_status)
{
case MDL_wait::VICTIM:
- DBUG_LOCK_FILE;
- DBUG_PRINT("mdl_locks", ("%s", mdl_dbug_print_locks()));
- DBUG_UNLOCK_FILE;
+ DBUG_PRINT("mdl", ("Deadlock: %s", ticket_msg));
+ DBUG_PRINT("mdl_locks", ("Existing locks:%s", mdl_dbug_print_locks()));
my_error(ER_LOCK_DEADLOCK, MYF(0));
break;
case MDL_wait::TIMEOUT:
+ DBUG_PRINT("mdl", ("Timeout: %s", ticket_msg));
my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0));
break;
case MDL_wait::KILLED:
+ DBUG_PRINT("mdl", ("Killed: %s", ticket_msg));
get_thd()->send_kill_message();
break;
default:
@@ -2449,6 +2470,7 @@ MDL_context::acquire_lock(MDL_request *mdl_request, double lock_wait_timeout)
mysql_mdl_set_status(ticket->m_psi, MDL_ticket::GRANTED);
+ DBUG_PRINT("mdl", ("Acquired: %s", ticket_msg));
DBUG_RETURN(FALSE);
}
@@ -2870,6 +2892,7 @@ void MDL_context::release_lock(enum_mdl_duration duration, MDL_ticket *ticket)
lock->key.db_name(), lock->key.name()));
DBUG_ASSERT(this == ticket->get_ctx());
+ DBUG_PRINT("mdl", ("Released: %s", dbug_print_mdl(ticket)));
lock->remove_ticket(m_pins, &MDL_lock::m_granted, ticket);
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index cdc292f8006..7d4f2b24529 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -1856,6 +1856,13 @@ extern "C" sig_handler print_signal_warning(int sig)
#endif
}
+#ifdef _WIN32
+typedef void (*report_svc_status_t)(DWORD current_state, DWORD win32_exit_code,
+ DWORD wait_hint);
+static void dummy_svc_status(DWORD, DWORD, DWORD) {}
+static report_svc_status_t my_report_svc_status= dummy_svc_status;
+#endif
+
#ifndef EMBEDDED_LIBRARY
extern "C" void unireg_abort(int exit_code)
{
@@ -1901,13 +1908,6 @@ extern "C" void unireg_abort(int exit_code)
mysqld_exit(exit_code);
}
-#ifdef _WIN32
-typedef void (*report_svc_status_t)(DWORD current_state, DWORD win32_exit_code,
- DWORD wait_hint);
-static void dummy_svc_status(DWORD, DWORD, DWORD) {}
-static report_svc_status_t my_report_svc_status= dummy_svc_status;
-#endif
-
static void mysqld_exit(int exit_code)
{
DBUG_ENTER("mysqld_exit");
@@ -4599,6 +4599,7 @@ void ssl_acceptor_stats_update(int sslaccept_ret)
static void init_ssl()
{
+#if !defined(EMBEDDED_LIBRARY)
/*
Not need to check require_secure_transport on the Linux,
because it always has Unix domain sockets that are secure:
@@ -4614,7 +4615,7 @@ static void init_ssl()
unireg_abort(1);
}
#endif
-#if defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY)
+#if defined(HAVE_OPENSSL)
if (opt_use_ssl)
{
enum enum_ssl_init_error error= SSL_INITERR_NOERROR;
@@ -4655,7 +4656,8 @@ static void init_ssl()
}
if (des_key_file)
load_des_key_file(des_key_file);
-#endif /* HAVE_OPENSSL && ! EMBEDDED_LIBRARY */
+#endif /* HAVE_OPENSSL */
+#endif /* !EMBEDDED_LIBRARY */
}
/* Reinitialize SSL (FLUSH SSL) */
diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc
index 2440047638e..da871e099dd 100644
--- a/sql/opt_subselect.cc
+++ b/sql/opt_subselect.cc
@@ -3033,6 +3033,7 @@ void optimize_semi_joins(JOIN *join, table_map remaining_tables, uint idx,
void update_sj_state(JOIN *join, const JOIN_TAB *new_tab,
uint idx, table_map remaining_tables)
{
+ DBUG_ASSERT(!join->emb_sjm_nest);
if (TABLE_LIST *emb_sj_nest= new_tab->emb_sj_nest)
{
join->cur_sj_inner_tables |= emb_sj_nest->sj_inner_tables;
diff --git a/sql/partition_info.cc b/sql/partition_info.cc
index 3a11234c013..3c7d63098b9 100644
--- a/sql/partition_info.cc
+++ b/sql/partition_info.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2006, 2015, Oracle and/or its affiliates.
- Copyright (c) 2010, 2020, MariaDB Corporation.
+ Copyright (c) 2010, 2022, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -1688,7 +1688,6 @@ bool partition_info::set_up_charset_field_preps(THD *thd)
uchar **char_ptrs;
unsigned i;
size_t size;
- uint tot_fields= 0;
uint tot_part_fields= 0;
uint tot_subpart_fields= 0;
DBUG_ENTER("set_up_charset_field_preps");
@@ -1700,13 +1699,8 @@ bool partition_info::set_up_charset_field_preps(THD *thd)
ptr= part_field_array;
/* Set up arrays and buffers for those fields */
while ((field= *(ptr++)))
- {
if (field_is_partition_charset(field))
- {
tot_part_fields++;
- tot_fields++;
- }
- }
size= tot_part_fields * sizeof(char*);
if (!(char_ptrs= (uchar**)thd->calloc(size)))
goto error;
@@ -1740,13 +1734,8 @@ bool partition_info::set_up_charset_field_preps(THD *thd)
/* Set up arrays and buffers for those fields */
ptr= subpart_field_array;
while ((field= *(ptr++)))
- {
if (field_is_partition_charset(field))
- {
tot_subpart_fields++;
- tot_fields++;
- }
- }
size= tot_subpart_fields * sizeof(char*);
if (!(char_ptrs= (uchar**) thd->calloc(size)))
goto error;
diff --git a/sql/rpl_gtid.cc b/sql/rpl_gtid.cc
index 516c9da31d9..c4e5c75b10a 100644
--- a/sql/rpl_gtid.cc
+++ b/sql/rpl_gtid.cc
@@ -1770,7 +1770,7 @@ rpl_binlog_state::alloc_element_nolock(const rpl_gtid *gtid)
*/
bool
rpl_binlog_state::check_strict_sequence(uint32 domain_id, uint32 server_id,
- uint64 seq_no)
+ uint64 seq_no, bool no_error)
{
element *elem;
bool res= 0;
@@ -1781,9 +1781,10 @@ rpl_binlog_state::check_strict_sequence(uint32 domain_id, uint32 server_id,
sizeof(domain_id))) &&
elem->last_gtid && elem->last_gtid->seq_no >= seq_no)
{
- my_error(ER_GTID_STRICT_OUT_OF_ORDER, MYF(0), domain_id, server_id, seq_no,
- elem->last_gtid->domain_id, elem->last_gtid->server_id,
- elem->last_gtid->seq_no);
+ if (!no_error)
+ my_error(ER_GTID_STRICT_OUT_OF_ORDER, MYF(0), domain_id, server_id, seq_no,
+ elem->last_gtid->domain_id, elem->last_gtid->server_id,
+ elem->last_gtid->seq_no);
res= 1;
}
mysql_mutex_unlock(&LOCK_binlog_state);
diff --git a/sql/rpl_gtid.h b/sql/rpl_gtid.h
index 925e271e68a..8fed16543f3 100644
--- a/sql/rpl_gtid.h
+++ b/sql/rpl_gtid.h
@@ -330,7 +330,8 @@ struct rpl_binlog_state
int update_with_next_gtid(uint32 domain_id, uint32 server_id,
rpl_gtid *gtid);
int alloc_element_nolock(const rpl_gtid *gtid);
- bool check_strict_sequence(uint32 domain_id, uint32 server_id, uint64 seq_no);
+ bool check_strict_sequence(uint32 domain_id, uint32 server_id, uint64 seq_no,
+ bool no_error= false);
int bump_seq_no_if_needed(uint32 domain_id, uint64 seq_no);
int write_to_iocache(IO_CACHE *dest);
int read_from_iocache(IO_CACHE *src);
diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc
index b4331c9efc5..ab2523d960b 100644
--- a/sql/rpl_mi.cc
+++ b/sql/rpl_mi.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2006, 2017, Oracle and/or its affiliates.
- Copyright (c) 2010, 2017, MariaDB Corporation
+ Copyright (c) 2010, 2022, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -43,8 +43,7 @@ Master_info::Master_info(LEX_CSTRING *connection_name_arg,
gtid_reconnect_event_skip_count(0), gtid_event_seen(false),
in_start_all_slaves(0), in_stop_all_slaves(0), in_flush_all_relay_logs(0),
users(0), killed(0),
- total_ddl_groups(0), total_non_trans_groups(0), total_trans_groups(0),
- is_shutdown(false), master_supports_gtid(true)
+ total_ddl_groups(0), total_non_trans_groups(0), total_trans_groups(0)
{
char *tmp;
host[0] = 0; user[0] = 0; password[0] = 0;
diff --git a/sql/rpl_mi.h b/sql/rpl_mi.h
index e026ec60340..6058b7fb34c 100644
--- a/sql/rpl_mi.h
+++ b/sql/rpl_mi.h
@@ -352,6 +352,20 @@ class Master_info : public Slave_reporting_capability
ACK from slave, or if delay_master is enabled.
*/
int semi_ack;
+ /*
+ The flag has replicate_same_server_id semantics and is raised to accept
+ a same-server-id event group by the gtid strict mode semisync slave.
+ Own server-id events can normally appear as result of EITHER
+ A. this server semisync (failover to) slave crash-recovery:
+ the transaction was created on this server then being master,
+ got replicated elsewhere right before the crash before commit,
+ and finally at recovery the transaction gets evicted from the
+ server's binlog and its gtid (slave) state; OR
+ B. in a general circular configuration and then when a recieved (returned
+ to slave) gtid exists in the server's binlog. Then, in gtid strict mode,
+ it must be ignored similarly to the replicate-same-server-id rule.
+ */
+ bool do_accept_own_server_id= false;
List <start_alter_info> start_alter_list;
MEM_ROOT mem_root;
/*
@@ -360,14 +374,14 @@ class Master_info : public Slave_reporting_capability
The flag is read by Start Alter event to self-mark its state accordingly
at time its alter info struct is about to be appened to the list.
*/
- bool is_shutdown;
+ bool is_shutdown= false;
/*
A replica will default to Slave_Pos for using Using_Gtid; however, we
first need to test if the master supports GTIDs. If not, fall back to 'No'.
Cache the value so future RESET SLAVE commands don't revert to Slave_Pos.
*/
- bool master_supports_gtid;
+ bool master_supports_gtid= true;
/*
When TRUE, transition this server from being an active master to a slave.
@@ -375,7 +389,7 @@ class Master_info : public Slave_reporting_capability
were committed into the binary log. In particular, it merges
gtid_binlog_pos into gtid_slave_pos.
*/
- bool is_demotion;
+ bool is_demotion= false;
};
struct start_alter_thd_args
diff --git a/sql/slave.cc b/sql/slave.cc
index 616e1248738..26743e1c781 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -5111,6 +5111,7 @@ err_during_init:
mi->abort_slave= 0;
mi->slave_running= MYSQL_SLAVE_NOT_RUN;
mi->io_thd= 0;
+ mi->do_accept_own_server_id= false;
/*
Note: the order of the two following calls (first broadcast, then unlock)
is important. Otherwise a killer_thread can execute between the calls and
@@ -6252,15 +6253,6 @@ static int queue_event(Master_info* mi, const uchar *buf, ulong event_len)
bool is_malloc = false;
bool is_rows_event= false;
/*
- The flag has replicate_same_server_id semantics and is raised to accept
- a same-server-id event group by the gtid strict mode semisync slave.
- Own server-id events can appear as result of this server crash-recovery:
- the transaction was created on this server then being master, got replicated
- elsewhere right before the crash before commit;
- finally at recovery the transaction gets evicted from the server's binlog.
- */
- bool do_accept_own_server_id;
- /*
FD_q must have been prepared for the first R_a event
inside get_master_version_and_clock()
Show-up of FD:s affects checksum_alg at once because
@@ -6867,6 +6859,19 @@ dbug_gtid_accept:
++mi->events_queued_since_last_gtid;
inc_pos= event_len;
+
+ /*
+ To compute `true` is normal for this *now* semisync slave server when
+ it has passed its crash-recovery as a former master.
+ */
+ mi->do_accept_own_server_id=
+ (s_id == global_system_variables.server_id &&
+ rpl_semi_sync_slave_enabled && opt_gtid_strict_mode &&
+ mi->using_gtid != Master_info::USE_GTID_NO &&
+ !mysql_bin_log.check_strict_gtid_sequence(event_gtid.domain_id,
+ event_gtid.server_id,
+ event_gtid.seq_no,
+ true));
// ...} eof else_likely
}
break;
@@ -7049,10 +7054,6 @@ dbug_gtid_accept:
break;
}
- do_accept_own_server_id= (s_id == global_system_variables.server_id
- && rpl_semi_sync_slave_enabled && opt_gtid_strict_mode
- && mi->using_gtid != Master_info::USE_GTID_NO);
-
/*
Integrity of Rows- event group check.
A sequence of Rows- events must end with STMT_END_F flagged one.
@@ -7143,7 +7144,7 @@ dbug_gtid_accept:
else
if ((s_id == global_system_variables.server_id &&
!(mi->rli.replicate_same_server_id ||
- do_accept_own_server_id)) ||
+ mi->do_accept_own_server_id)) ||
event_that_should_be_ignored(buf) ||
/*
the following conjunction deals with IGNORE_SERVER_IDS, if set
@@ -7203,7 +7204,7 @@ dbug_gtid_accept:
}
else
{
- if (do_accept_own_server_id)
+ if (mi->do_accept_own_server_id)
{
int2store(const_cast<uchar*>(buf + FLAGS_OFFSET),
uint2korr(buf + FLAGS_OFFSET) | LOG_EVENT_ACCEPT_OWN_F);
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index d9e04261dc8..54296af5a0b 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -4678,10 +4678,11 @@ restart:
if (unlikely(error))
{
+ /* F.ex. deadlock happened */
if (ot_ctx.can_recover_from_failed_open())
{
- // FIXME: is this really used?
- DBUG_ASSERT(0);
+ DBUG_ASSERT(ot_ctx.get_action() !=
+ Open_table_context::OT_ADD_HISTORY_PARTITION);
close_tables_for_reopen(thd, start,
ot_ctx.start_of_statement_svp());
if (ot_ctx.recover_from_failed_open())
@@ -8009,6 +8010,39 @@ bool setup_fields(THD *thd, Ref_ptr_array ref_pointer_array,
DBUG_RETURN(MY_TEST(thd->is_error()));
}
+/*
+ make list of leaves for a single TABLE_LIST
+
+ SYNOPSIS
+ make_leaves_for_single_table()
+ thd Thread handler
+ leaves List of leaf tables to be filled
+ table TABLE_LIST object to process
+ full_table_list Whether to include tables from mergeable derived table/view
+*/
+void make_leaves_for_single_table(THD *thd, List<TABLE_LIST> &leaves,
+ TABLE_LIST *table, bool& full_table_list,
+ TABLE_LIST *boundary)
+{
+ if (table == boundary)
+ full_table_list= !full_table_list;
+ if (full_table_list && table->is_merged_derived())
+ {
+ SELECT_LEX *select_lex= table->get_single_select();
+ /*
+ It's safe to use select_lex->leaf_tables because all derived
+ tables/views were already prepared and has their leaf_tables
+ set properly.
+ */
+ make_leaves_list(thd, leaves, select_lex->get_table_list(),
+ full_table_list, boundary);
+ }
+ else
+ {
+ leaves.push_back(table, thd->mem_root);
+ }
+}
+
/*
Perform checks like all given fields exists, if exists fill struct with
@@ -8035,40 +8069,79 @@ int setup_returning_fields(THD* thd, TABLE_LIST* table_list)
SYNOPSIS
make_leaves_list()
- list pointer to pointer on list first element
- tables table list
- full_table_list whether to include tables from mergeable derived table/view.
- we need them for checks for INSERT/UPDATE statements only.
-
- RETURN pointer on pointer to next_leaf of last element
+ leaves List of leaf tables to be filled
+ tables Table list
+ full_table_list Whether to include tables from mergeable derived table/view.
+ We need them for checks for INSERT/UPDATE statements only.
*/
-void make_leaves_list(THD *thd, List<TABLE_LIST> &list, TABLE_LIST *tables,
+void make_leaves_list(THD *thd, List<TABLE_LIST> &leaves, TABLE_LIST *tables,
bool full_table_list, TABLE_LIST *boundary)
{
for (TABLE_LIST *table= tables; table; table= table->next_local)
{
- if (table == boundary)
- full_table_list= !full_table_list;
- if (full_table_list && table->is_merged_derived())
- {
- SELECT_LEX *select_lex= table->get_single_select();
- /*
- It's safe to use select_lex->leaf_tables because all derived
- tables/views were already prepared and has their leaf_tables
- set properly.
- */
- make_leaves_list(thd, list, select_lex->get_table_list(),
- full_table_list, boundary);
- }
- else
- {
- list.push_back(table, thd->mem_root);
- }
+ make_leaves_for_single_table(thd, leaves, table, full_table_list,
+ boundary);
}
}
+
+/*
+ Setup the map and other attributes for a single TABLE_LIST object
+
+ SYNOPSIS
+ setup_table_attributes()
+ thd Thread handler
+ table_list TABLE_LIST object to process
+ first_select_table First table participating in SELECT for INSERT..SELECT
+ statements, NULL for other cases
+ tablenr Serial number of the table in the SQL statement
+
+ RETURN
+ false Success
+ true Failure
+*/
+bool setup_table_attributes(THD *thd, TABLE_LIST *table_list,
+ TABLE_LIST *first_select_table,
+ uint &tablenr)
+{
+ TABLE *table= table_list->table;
+ if (table)
+ table->pos_in_table_list= table_list;
+ if (first_select_table && table_list->top_table() == first_select_table)
+ {
+ /* new counting for SELECT of INSERT ... SELECT command */
+ first_select_table= 0;
+ thd->lex->first_select_lex()->insert_tables= tablenr;
+ tablenr= 0;
+ }
+ if (table_list->jtbm_subselect)
+ {
+ table_list->jtbm_table_no= tablenr;
+ }
+ else if (table)
+ {
+ table->pos_in_table_list= table_list;
+ setup_table_map(table, table_list, tablenr);
+
+ if (table_list->process_index_hints(table))
+ return true;
+ }
+ tablenr++;
+ /*
+ We test the max tables here as we setup_table_map() should not be called
+ with tablenr >= 64
+ */
+ if (tablenr > MAX_TABLES)
+ {
+ my_error(ER_TOO_MANY_TABLES, MYF(0), static_cast<int>(MAX_TABLES));
+ return true;
+ }
+ return false;
+}
+
+
/*
prepare tables
@@ -8125,7 +8198,14 @@ bool setup_tables(THD *thd, Name_resolution_context *context,
leaves.empty();
if (select_lex->prep_leaf_list_state != SELECT_LEX::SAVED)
{
- make_leaves_list(thd, leaves, tables, full_table_list, first_select_table);
+ /*
+ For INSERT ... SELECT statements we must not include the first table
+ (where the data is being inserted into) in the list of leaves
+ */
+ TABLE_LIST *tables_for_leaves=
+ select_insert ? first_select_table : tables;
+ make_leaves_list(thd, leaves, tables_for_leaves, full_table_list,
+ first_select_table);
select_lex->prep_leaf_list_state= SELECT_LEX::READY;
select_lex->leaf_tables_exec.empty();
}
@@ -8136,40 +8216,33 @@ bool setup_tables(THD *thd, Name_resolution_context *context,
leaves.push_back(table_list, thd->mem_root);
}
+ List_iterator<TABLE_LIST> ti(leaves);
while ((table_list= ti++))
{
- TABLE *table= table_list->table;
- if (table)
- table->pos_in_table_list= table_list;
- if (first_select_table &&
- table_list->top_table() == first_select_table)
- {
- /* new counting for SELECT of INSERT ... SELECT command */
- first_select_table= 0;
- thd->lex->first_select_lex()->insert_tables= tablenr;
- tablenr= 0;
- }
- if(table_list->jtbm_subselect)
- {
- table_list->jtbm_table_no= tablenr;
- }
- else if (table)
- {
- table->pos_in_table_list= table_list;
- setup_table_map(table, table_list, tablenr);
+ if (setup_table_attributes(thd, table_list, first_select_table, tablenr))
+ DBUG_RETURN(1);
+ }
- if (table_list->process_index_hints(table))
- DBUG_RETURN(1);
- }
- tablenr++;
+ if (select_insert)
+ {
/*
- We test the max tables here as we setup_table_map() should not be called
- with tablenr >= 64
+ The table/view in which the data is inserted must not be included into
+ the leaf_tables list. But we need this table/view to setup attributes
+ for it. So build a temporary list of leaves and setup attributes for
+ the tables included
*/
- if (tablenr > MAX_TABLES)
+ List<TABLE_LIST> leaves;
+ TABLE_LIST *table= tables;
+
+ make_leaves_for_single_table(thd, leaves, table, full_table_list,
+ first_select_table);
+
+ List_iterator<TABLE_LIST> ti(leaves);
+ while ((table_list= ti++))
{
- my_error(ER_TOO_MANY_TABLES,MYF(0), static_cast<int>(MAX_TABLES));
- DBUG_RETURN(1);
+ if (setup_table_attributes(thd, table_list, first_select_table,
+ tablenr))
+ DBUG_RETURN(1);
}
}
}
diff --git a/sql/sql_base.h b/sql/sql_base.h
index 06d75596827..c86a652c33a 100644
--- a/sql/sql_base.h
+++ b/sql/sql_base.h
@@ -561,6 +561,11 @@ public:
return m_timeout;
}
+ enum_open_table_action get_action() const
+ {
+ return m_action;
+ }
+
uint get_flags() const { return m_flags; }
/**
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 982afaa0560..bd536132949 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -1861,7 +1861,7 @@ void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var,
extern std::atomic<my_thread_id> shutdown_thread_id;
void THD::awake_no_mutex(killed_state state_to_set)
{
- DBUG_ENTER("THD::awake");
+ DBUG_ENTER("THD::awake_no_mutex");
DBUG_PRINT("enter", ("this: %p current_thd: %p state: %d",
this, current_thd, (int) state_to_set));
THD_CHECK_SENTRY(this);
@@ -7358,7 +7358,7 @@ int THD::binlog_flush_pending_rows_event(bool stmt_end, bool is_transactional)
*/
bool THD::binlog_for_noop_dml(bool transactional_table)
{
- if (log_current_statement())
+ if (mysql_bin_log.is_open() && log_current_statement())
{
reset_unsafe_warnings();
if (binlog_query(THD::STMT_QUERY_TYPE, query(), query_length(),
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 8e799cc2448..283dd463fa8 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -1368,8 +1368,12 @@ values_loop_end:
thd->lex->current_select->save_leaf_tables(thd);
thd->lex->current_select->first_cond_optimization= 0;
}
- if (readbuff)
- my_free(readbuff);
+
+ my_free(readbuff);
+#ifndef EMBEDDED_LIBRARY
+ if (lock_type == TL_WRITE_DELAYED && table->expr_arena)
+ table->expr_arena->free_items();
+#endif
DBUG_RETURN(FALSE);
abort:
@@ -1386,6 +1390,8 @@ abort:
*/
for (Field **ptr= table_list->table->field ; *ptr ; ptr++)
(*ptr)->free();
+ if (table_list->table->expr_arena)
+ table_list->table->expr_arena->free_items();
}
#endif
if (table != NULL)
@@ -1564,8 +1570,7 @@ static bool mysql_prepare_insert_check_table(THD *thd, TABLE_LIST *table_list,
if (insert_into_view && !fields.elements)
{
thd->lex->empty_field_list_on_rset= 1;
- if (!thd->lex->first_select_lex()->leaf_tables.head()->table ||
- table_list->is_multitable())
+ if (!table_list->table || table_list->is_multitable())
{
my_error(ER_VIEW_NO_INSERT_FIELD_LIST, MYF(0),
table_list->view_db.str, table_list->view_name.str);
@@ -3821,7 +3826,6 @@ int mysql_insert_select_prepare(THD *thd, select_result *sel_res)
if (sel_res)
sel_res->prepare(lex->returning()->item_list, NULL);
- DBUG_ASSERT(select_lex->leaf_tables.elements != 0);
List_iterator<TABLE_LIST> ti(select_lex->leaf_tables);
TABLE_LIST *table;
uint insert_tables;
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 3db784ce61d..07b894fbc2d 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -2785,7 +2785,6 @@ int Lex_input_stream::scan_ident_delimited(THD *thd,
uchar quote_char)
{
CHARSET_INFO *const cs= thd->charset();
- uint double_quotes= 0;
uchar c;
DBUG_ASSERT(m_ptr == m_tok_start + 1);
@@ -2810,7 +2809,6 @@ int Lex_input_stream::scan_ident_delimited(THD *thd,
if (yyPeek() != quote_char)
break;
c= yyGet();
- double_quotes++;
continue;
}
}
@@ -10505,11 +10503,13 @@ void LEX::relink_hack(st_select_lex *select_lex)
{
if (!select_stack_top) // Statements of the second type
{
- if (!select_lex->get_master()->get_master())
- ((st_select_lex *) select_lex->get_master())->
- set_master(&builtin_select);
- if (!builtin_select.get_slave())
- builtin_select.set_slave(select_lex->get_master());
+ if (!select_lex->outer_select() &&
+ !builtin_select.first_inner_unit())
+ {
+ builtin_select.register_unit(select_lex->master_unit(),
+ &builtin_select.context);
+ builtin_select.add_statistics(select_lex->master_unit());
+ }
}
}
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 23015933303..e58db8fa502 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -767,7 +767,6 @@ public:
}
inline st_select_lex_node* get_master() { return master; }
- inline st_select_lex_node* get_slave() { return slave; }
void include_down(st_select_lex_node *upper);
void add_slave(st_select_lex_node *slave_arg);
void include_neighbour(st_select_lex_node *before);
@@ -1749,15 +1748,6 @@ public:
Sroutine_hash_entry **sroutines_list_own_last;
uint sroutines_list_own_elements;
- /**
- Number of tables which were open by open_tables() and to be locked
- by lock_tables().
- Note that we set this member only in some cases, when this value
- needs to be passed from open_tables() to lock_tables() which are
- separated by some amount of code.
- */
- uint table_count;
-
/*
These constructor and destructor serve for creation/destruction
of Query_tables_list instances which are used as backup storage.
@@ -3452,7 +3442,7 @@ public:
stores total number of tables. For LEX representing multi-delete
holds number of tables from which we will delete records.
*/
- uint table_count;
+ uint table_count_update;
uint8 describe;
/*
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 0597b086b7b..a2d856e55f0 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -3323,6 +3323,7 @@ bool run_set_statement_if_requested(THD *thd, LEX *lex)
{
switch (v->var->option.var_type & GET_TYPE_MASK)
{
+ case GET_BIT:
case GET_BOOL:
case GET_INT:
case GET_LONG:
@@ -4853,7 +4854,7 @@ mysql_execute_command(THD *thd, bool is_called_from_prepared_stmt)
if (likely(!thd->is_fatal_error))
{
result= new (thd->mem_root) multi_delete(thd, aux_tables,
- lex->table_count);
+ lex->table_count_update);
if (likely(result))
{
if (unlikely(select_lex->vers_setup_conds(thd, aux_tables)))
@@ -9757,12 +9758,12 @@ bool multi_delete_set_locks_and_link_aux_tables(LEX *lex)
TABLE_LIST *target_tbl;
DBUG_ENTER("multi_delete_set_locks_and_link_aux_tables");
- lex->table_count= 0;
+ lex->table_count_update= 0;
for (target_tbl= lex->auxiliary_table_list.first;
target_tbl; target_tbl= target_tbl->next_local)
{
- lex->table_count++;
+ lex->table_count_update++;
/* All tables in aux_tables must be found in FROM PART */
TABLE_LIST *walk= multi_delete_table_match(lex, target_tbl, tables);
if (!walk)
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index b180bf28ba7..cca9036bdac 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -1443,7 +1443,7 @@ static int mysql_test_update(Prepared_statement *stmt,
DBUG_ASSERT(update_source_table || table_list->view != 0);
DBUG_PRINT("info", ("Switch to multi-update"));
/* pass counter value */
- thd->lex->table_count= table_count;
+ thd->lex->table_count_update= table_count;
/* convert to multiupdate */
DBUG_RETURN(2);
}
diff --git a/sql/sql_reload.cc b/sql/sql_reload.cc
index 1dfa238de50..1490ad554d7 100644
--- a/sql/sql_reload.cc
+++ b/sql/sql_reload.cc
@@ -601,6 +601,7 @@ bool flush_tables_with_read_lock(THD *thd, TABLE_LIST *all_tables)
if (table_list->is_view_or_derived())
continue;
if (thd->lex->type & REFRESH_FOR_EXPORT &&
+ table_list->table &&
!(table_list->table->file->ha_table_flags() & HA_CAN_EXPORT))
{
my_error(ER_ILLEGAL_HA, MYF(0),table_list->table->file->table_type(),
@@ -608,6 +609,7 @@ bool flush_tables_with_read_lock(THD *thd, TABLE_LIST *all_tables)
goto error_reset_bits;
}
if (thd->lex->type & REFRESH_READ_LOCK &&
+ table_list->table &&
table_list->table->file->extra(HA_EXTRA_FLUSH))
goto error_reset_bits;
}
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 3db50da3009..e46fb6b4278 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -814,7 +814,22 @@ void remove_redundant_subquery_clauses(st_select_lex *subq_select_lex)
Here SUBQ cannot be removed.
*/
if (!ord->in_field_list)
+ {
(*ord->item)->walk(&Item::eliminate_subselect_processor, FALSE, NULL);
+ /*
+ Remove from the JOIN::all_fields list any reference to the elements
+ of the eliminated GROUP BY list unless it is 'in_field_list'.
+ This is needed in order not to confuse JOIN::make_aggr_tables_info()
+ when it constructs different structure for execution phase.
+ */
+ List_iterator<Item> li(subq_select_lex->join->all_fields);
+ Item *item;
+ while ((item= li++))
+ {
+ if (item == *ord->item)
+ li.remove();
+ }
+ }
}
subq_select_lex->join->group_list= NULL;
subq_select_lex->group_list.empty();
@@ -2039,7 +2054,6 @@ JOIN::optimize_inner()
/* Merge all mergeable derived tables/views in this SELECT. */
if (select_lex->handle_derived(thd->lex, DT_MERGE))
DBUG_RETURN(TRUE);
- table_count= select_lex->leaf_tables.elements;
}
if (select_lex->first_cond_optimization &&
@@ -2087,8 +2101,6 @@ JOIN::optimize_inner()
eval_select_list_used_tables();
- table_count= select_lex->leaf_tables.elements;
-
if (select_lex->options & OPTION_SCHEMA_TABLE &&
optimize_schema_tables_memory_usage(select_lex->leaf_tables))
DBUG_RETURN(1);
@@ -9332,7 +9344,8 @@ greedy_search(JOIN *join,
picked semi-join operation is in best_pos->...picker, but we need to
update the global state in the JOIN object, too.
*/
- update_sj_state(join, best_table, idx, remaining_tables);
+ if (!join->emb_sjm_nest)
+ update_sj_state(join, best_table, idx, remaining_tables);
/* find the position of 'best_table' in 'join->best_ref' */
best_idx= idx;
@@ -14757,7 +14770,6 @@ void JOIN::cleanup(bool full)
/* Free the original optimized join created for the group_by_handler */
join_tab= original_join_tab;
original_join_tab= 0;
- table_count= original_table_count;
}
if (join_tab)
diff --git a/sql/sql_select.h b/sql/sql_select.h
index e7c8b3527f6..6c9b0eca4f6 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -1302,7 +1302,7 @@ public:
Pushdown_query *pushdown_query;
JOIN_TAB *original_join_tab;
- uint original_table_count, sort_space;
+ uint sort_space;
/******* Join optimization state members start *******/
/*
diff --git a/sql/sql_sequence.cc b/sql/sql_sequence.cc
index 07571c3bbac..11b5109c349 100644
--- a/sql/sql_sequence.cc
+++ b/sql/sql_sequence.cc
@@ -706,7 +706,9 @@ longlong SEQUENCE::next_value(TABLE *table, bool second_round, int *error)
{
longlong res_value, org_reserved_until, add_to;
bool out_of_values;
+ THD *thd= table->in_use;
DBUG_ENTER("SEQUENCE::next_value");
+ DBUG_ASSERT(thd);
*error= 0;
if (!second_round)
@@ -771,7 +773,8 @@ longlong SEQUENCE::next_value(TABLE *table, bool second_round, int *error)
DBUG_RETURN(next_value(table, 1, error));
}
- if (unlikely((*error= write(table, 0))))
+ if (unlikely((*error= write(table, thd->variables.binlog_row_image !=
+ BINLOG_ROW_IMAGE_MINIMAL))))
{
reserved_until= org_reserved_until;
next_free_value= res_value;
@@ -838,7 +841,9 @@ int SEQUENCE::set_value(TABLE *table, longlong next_val, ulonglong next_round,
longlong org_reserved_until= reserved_until;
longlong org_next_free_value= next_free_value;
ulonglong org_round= round;
+ THD *thd= table->in_use;
DBUG_ENTER("SEQUENCE::set_value");
+ DBUG_ASSERT(thd);
write_lock(table);
if (is_used)
@@ -877,7 +882,8 @@ int SEQUENCE::set_value(TABLE *table, longlong next_val, ulonglong next_round,
needs_to_be_stored)
{
reserved_until= next_free_value;
- if (write(table, 0))
+ if (write(table,
+ thd->variables.binlog_row_image != BINLOG_ROW_IMAGE_MINIMAL))
{
reserved_until= org_reserved_until;
next_free_value= org_next_free_value;
diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc
index 84d0902193b..40760c1e2eb 100644
--- a/sql/sql_statistics.cc
+++ b/sql/sql_statistics.cc
@@ -1,5 +1,5 @@
/* Copyright (C) 2009 MySQL AB
- Copyright (c) 2019, 2020, MariaDB Corporation.
+ Copyright (c) 2019, 2022, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -2623,7 +2623,6 @@ int collect_statistics_for_index(THD *thd, TABLE *table, uint index)
{
int rc= 0;
KEY *key_info= &table->key_info[index];
- ha_rows rows= 0;
DBUG_ENTER("collect_statistics_for_index");
@@ -2658,7 +2657,6 @@ int collect_statistics_for_index(THD *thd, TABLE *table, uint index)
if (rc)
break;
- rows++;
index_prefix_calc.add();
rc= table->file->ha_index_next(table->record[0]);
}
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 8adec337942..aa998b02384 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -1297,7 +1297,7 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables,
StringBuffer<160> unknown_tables(system_charset_info);
DDL_LOG_STATE local_ddl_log_state;
const char *comment_start;
- uint not_found_errors= 0, table_count= 0, non_temp_tables_count= 0;
+ uint table_count= 0, non_temp_tables_count= 0;
int error= 0;
uint32 comment_len;
bool trans_tmp_table_deleted= 0, non_trans_tmp_table_deleted= 0;
@@ -1428,7 +1428,6 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables,
unknown_tables.append(&table_name);
unknown_tables.append(',');
error= ENOENT;
- not_found_errors++;
continue;
}
@@ -1511,7 +1510,6 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables,
unknown_tables.append(&table_name);
unknown_tables.append(',');
error= ENOENT;
- not_found_errors++;
continue;
}
@@ -1757,7 +1755,6 @@ report_error:
}
else
{
- not_found_errors++;
if (unknown_tables.append(tbl_name) || unknown_tables.append(','))
{
error= 1;
@@ -2962,7 +2959,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
List_iterator<Key> key_iterator(alter_info->key_list);
List_iterator<Key> key_iterator2(alter_info->key_list);
- uint key_parts=0, fk_key_count=0;
+ uint key_parts=0;
bool primary_key=0,unique_key=0;
Key *key, *key2;
uint tmp, key_number;
@@ -2978,7 +2975,6 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
"(none)" , key->type));
if (key->type == Key::FOREIGN_KEY)
{
- fk_key_count++;
Foreign_key *fk_key= (Foreign_key*) key;
if (fk_key->validate(alter_info->create_list))
DBUG_RETURN(TRUE);
diff --git a/sql/sql_type.cc b/sql/sql_type.cc
index 0ede2b3fee2..98b71246472 100644
--- a/sql/sql_type.cc
+++ b/sql/sql_type.cc
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2015, 2021, MariaDB
+ Copyright (c) 2015, 2022, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -1746,7 +1746,7 @@ Type_handler_time_common::type_handler_for_native_format() const
const Type_handler *Type_handler_typelib::type_handler_for_item_field() const
{
- return &type_handler_string;
+ return &type_handler_varchar;
}
@@ -1959,6 +1959,9 @@ Type_collection_std::aggregate_for_comparison(const Type_handler *ha,
return ha;
}
}
+ if ((a == INT_RESULT && b == STRING_RESULT) ||
+ (b == INT_RESULT && a == STRING_RESULT))
+ return &type_handler_newdecimal;
if ((a == INT_RESULT || a == DECIMAL_RESULT) &&
(b == INT_RESULT || b == DECIMAL_RESULT))
return &type_handler_newdecimal;
diff --git a/sql/sql_type_fixedbin.h b/sql/sql_type_fixedbin.h
index 6d072a9b809..077e4039643 100644
--- a/sql/sql_type_fixedbin.h
+++ b/sql/sql_type_fixedbin.h
@@ -1190,7 +1190,6 @@ public:
bool val_native(Native *to) override
{
DBUG_ASSERT(marked_for_read());
- DBUG_ASSERT(!is_null());
if (to->alloc(FbtImpl::binary_length()))
return true;
to->length(FbtImpl::binary_length());
@@ -1201,7 +1200,6 @@ public:
Fbt to_fbt() const
{
DBUG_ASSERT(marked_for_read());
- DBUG_ASSERT(!is_null());
return Fbt::record_to_memory((const char*) ptr);
}
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 903b4ead6c0..a8862113cf7 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -427,7 +427,7 @@ int mysql_update(THD *thd,
DBUG_ASSERT(update_source_table || table_list->view != 0);
DBUG_PRINT("info", ("Switch to multi-update"));
/* pass counter value */
- thd->lex->table_count= table_count;
+ thd->lex->table_count_update= table_count;
if (thd->lex->period_conditions.is_set())
{
my_error(ER_NOT_SUPPORTED_YET, MYF(0),
@@ -1868,7 +1868,7 @@ int mysql_multi_update_prepare(THD *thd)
TABLE_LIST *table_list= lex->query_tables;
TABLE_LIST *tl;
Multiupdate_prelocking_strategy prelocking_strategy;
- uint table_count= lex->table_count;
+ uint table_count= lex->table_count_update;
DBUG_ENTER("mysql_multi_update_prepare");
/*
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index 9f9ef6335f3..6dedd5e85be 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -699,11 +699,9 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
if (lex->view_list.elements)
{
List_iterator_fast<LEX_CSTRING> names(lex->view_list);
- LEX_CSTRING *name;
- int i;
-
+
buff.append('(');
- for (i= 0; (name= names++); i++)
+ while (LEX_CSTRING *name= names++)
{
append_identifier(thd, &buff, name);
buff.append(", ", 2);
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 6f7bc8e4306..7e7b2cca390 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -2532,6 +2532,7 @@ create:
{
if (Lex->main_select_push())
MYSQL_YYABORT;
+ Lex->inc_select_stack_outer_barrier();
if (Lex->add_create_view(thd, $1 | $5,
DTYPE_ALGORITHM_UNDEFINED, $3, $6))
MYSQL_YYABORT;
@@ -2547,6 +2548,7 @@ create:
MYSQL_YYABORT;
if (Lex->main_select_push())
MYSQL_YYABORT;
+ Lex->inc_select_stack_outer_barrier();
}
view_list_opt AS view_select
{
diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc
index fc9b4262394..e5b508c1719 100644
--- a/sql/sys_vars.cc
+++ b/sql/sys_vars.cc
@@ -2049,7 +2049,10 @@ Sys_gtid_strict_mode(
"gtid_strict_mode",
"Enforce strict seq_no ordering of events in the binary log. Slave "
"stops with an error if it encounters an event that would cause it to "
- "generate an out-of-order binlog if executed.",
+ "generate an out-of-order binlog if executed. "
+ "When ON the same server-id semisync-replicated transactions that "
+ "duplicate exising ones in binlog are ignored without error "
+ "and slave interruption.",
GLOBAL_VAR(opt_gtid_strict_mode),
CMD_LINE(OPT_ARG), DEFAULT(FALSE));
diff --git a/sql/temporary_tables.cc b/sql/temporary_tables.cc
index 73f1f9f7a99..5aacd0e6e99 100644
--- a/sql/temporary_tables.cc
+++ b/sql/temporary_tables.cc
@@ -626,6 +626,10 @@ bool THD::drop_temporary_table(TABLE *table, bool *is_trans, bool delete_table)
DBUG_PRINT("tmptable", ("Dropping table: '%s'.'%s'",
table->s->db.str, table->s->table_name.str));
+ // close all handlers in case it is statement abort and some can be left
+ if (is_error())
+ table->file->ha_reset();
+
locked= lock_temporary_tables();
share= tmp_table_share(table);
diff --git a/sql/threadpool_winsockets.cc b/sql/threadpool_winsockets.cc
index 41167781283..a214cda2a5c 100644
--- a/sql/threadpool_winsockets.cc
+++ b/sql/threadpool_winsockets.cc
@@ -114,8 +114,17 @@ void AIO_buffer_cache::clear()
if (!m_base)
return;
- /* Check that all items are returned to the cache. */
- DBUG_ASSERT(m_cache.size() == m_elements);
+ std::unique_lock<std::mutex> lk(m_mtx, std::defer_lock);
+ for(;;)
+ {
+ if (lk.try_lock())
+ {
+ if (m_cache.size() == m_elements)
+ break;
+ lk.unlock();
+ }
+ Sleep(100);
+ }
VirtualFree(m_base, 0, MEM_RELEASE);
m_cache.clear();
m_base= 0;