summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/field.cc6
-rw-r--r--sql/ha_partition.cc36
-rw-r--r--sql/ha_partition.h2
-rw-r--r--sql/item_jsonfunc.cc143
-rw-r--r--sql/log.cc72
-rw-r--r--sql/opt_subselect.cc1
-rw-r--r--sql/sql_base.cc178
-rw-r--r--sql/sql_class.cc2
-rw-r--r--sql/sql_insert.cc14
-rw-r--r--sql/sql_lex.cc12
-rw-r--r--sql/sql_lex.h12
-rw-r--r--sql/sql_parse.cc7
-rw-r--r--sql/sql_prepare.cc2
-rw-r--r--sql/sql_select.cc22
-rw-r--r--sql/sql_select.h1
-rw-r--r--sql/sql_sequence.cc10
-rw-r--r--sql/sql_type.cc5
-rw-r--r--sql/sql_update.cc4
-rw-r--r--sql/sql_yacc.yy2
19 files changed, 405 insertions, 126 deletions
diff --git a/sql/field.cc b/sql/field.cc
index d12ff37fba8..b6d1886be6b 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -7529,7 +7529,7 @@ my_decimal *Field_string::val_decimal(my_decimal *decimal_value)
THD *thd= get_thd();
Converter_str2my_decimal_with_warn(thd,
Warn_filter_string(thd, this),
- E_DEC_FATAL_ERROR,
+ E_DEC_FATAL_ERROR & ~E_DEC_BAD_NUM,
Field_string::charset(),
(const char *) ptr,
field_length, decimal_value);
@@ -7890,7 +7890,7 @@ my_decimal *Field_varstring::val_decimal(my_decimal *decimal_value)
DBUG_ASSERT(marked_for_read());
THD *thd= get_thd();
Converter_str2my_decimal_with_warn(thd, Warn_filter(thd),
- E_DEC_FATAL_ERROR,
+ E_DEC_FATAL_ERROR & ~E_DEC_BAD_NUM,
Field_varstring::charset(),
(const char *) get_data(),
get_length(), decimal_value);
@@ -8736,7 +8736,7 @@ my_decimal *Field_blob::val_decimal(my_decimal *decimal_value)
THD *thd= get_thd();
Converter_str2my_decimal_with_warn(thd, Warn_filter(thd),
- E_DEC_FATAL_ERROR,
+ E_DEC_FATAL_ERROR & ~E_DEC_BAD_NUM,
Field_blob::charset(),
blob, length, decimal_value);
return decimal_value;
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index e0a02c0c7b5..09f494268d5 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -4509,15 +4509,15 @@ int ha_partition::write_row(const uchar * buf)
if (have_auto_increment)
{
if (!table_share->next_number_keypart)
- update_next_auto_inc_val();
- error= update_auto_increment();
+ if (unlikely(error= update_next_auto_inc_val()))
+ goto exit;
/*
If we have failed to set the auto-increment value for this row,
it is highly likely that we will not be able to insert it into
the correct partition. We must check and fail if necessary.
*/
- if (unlikely(error))
+ if (unlikely(error= update_auto_increment()))
goto exit;
/*
@@ -8475,6 +8475,7 @@ int ha_partition::compare_number_of_records(ha_partition *me,
int ha_partition::info(uint flag)
{
+ int error;
uint no_lock_flag= flag & HA_STATUS_NO_LOCK;
uint extra_var_flag= flag & HA_STATUS_VARIABLE_EXTRA;
DBUG_ENTER("ha_partition::info");
@@ -8527,7 +8528,11 @@ int ha_partition::info(uint flag)
break;
}
file= *file_array;
- file->info(HA_STATUS_AUTO | no_lock_flag);
+ if ((error= file->info(HA_STATUS_AUTO | no_lock_flag)))
+ {
+ unlock_auto_increment();
+ DBUG_RETURN(error);
+ }
set_if_bigger(auto_increment_value,
file->stats.auto_increment_value);
} while (*(++file_array));
@@ -8584,7 +8589,8 @@ int ha_partition::info(uint flag)
i= bitmap_get_next_set(&m_part_info->read_partitions, i))
{
file= m_file[i];
- file->info(HA_STATUS_VARIABLE | no_lock_flag | extra_var_flag);
+ if ((error= file->info(HA_STATUS_VARIABLE | no_lock_flag | extra_var_flag)))
+ DBUG_RETURN(error);
stats.records+= file->stats.records;
stats.deleted+= file->stats.deleted;
stats.data_file_length+= file->stats.data_file_length;
@@ -8673,7 +8679,8 @@ int ha_partition::info(uint flag)
if (!(flag & HA_STATUS_VARIABLE) ||
!bitmap_is_set(&(m_part_info->read_partitions),
(uint) (file_array - m_file)))
- file->info(HA_STATUS_VARIABLE | no_lock_flag | extra_var_flag);
+ if ((error= file->info(HA_STATUS_VARIABLE | no_lock_flag | extra_var_flag)))
+ DBUG_RETURN(error);
if (file->stats.records > max_records || !handler_instance_set)
{
handler_instance_set= 1;
@@ -8694,7 +8701,8 @@ int ha_partition::info(uint flag)
this);
file= m_file[handler_instance];
- file->info(HA_STATUS_CONST | no_lock_flag);
+ if ((error= file->info(HA_STATUS_CONST | no_lock_flag)))
+ DBUG_RETURN(error);
stats.block_size= file->stats.block_size;
stats.create_time= file->stats.create_time;
ref_length= m_ref_length;
@@ -8710,7 +8718,8 @@ int ha_partition::info(uint flag)
Note: all engines does not support HA_STATUS_ERRKEY, so set errkey.
*/
file->errkey= errkey;
- file->info(HA_STATUS_ERRKEY | no_lock_flag);
+ if ((error= file->info(HA_STATUS_ERRKEY | no_lock_flag)))
+ DBUG_RETURN(error);
errkey= file->errkey;
}
if (flag & HA_STATUS_TIME)
@@ -8727,7 +8736,8 @@ int ha_partition::info(uint flag)
do
{
file= *file_array;
- file->info(HA_STATUS_TIME | no_lock_flag);
+ if ((error= file->info(HA_STATUS_TIME | no_lock_flag)))
+ DBUG_RETURN(error);
if (file->stats.update_time > stats.update_time)
stats.update_time= file->stats.update_time;
} while (*(++file_array));
@@ -10745,11 +10755,11 @@ int ha_partition::cmp_ref(const uchar *ref1, const uchar *ref2)
the underlying partitions require that the value should be re-calculated
*/
-void ha_partition::update_next_auto_inc_val()
+int ha_partition::update_next_auto_inc_val()
{
- if (!part_share->auto_inc_initialized ||
- need_info_for_auto_inc())
- info(HA_STATUS_AUTO);
+ if (!part_share->auto_inc_initialized || need_info_for_auto_inc())
+ return info(HA_STATUS_AUTO);
+ return 0;
}
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index c064f7662ce..4a4b899708e 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -1366,7 +1366,7 @@ public:
void release_auto_increment() override;
private:
int reset_auto_increment(ulonglong value) override;
- void update_next_auto_inc_val();
+ int update_next_auto_inc_val();
virtual void lock_auto_increment()
{
/* lock already taken */
diff --git a/sql/item_jsonfunc.cc b/sql/item_jsonfunc.cc
index f62b683e6ee..8299192fa8d 100644
--- a/sql/item_jsonfunc.cc
+++ b/sql/item_jsonfunc.cc
@@ -18,7 +18,22 @@
#include "sql_priv.h"
#include "sql_class.h"
#include "item.h"
+#include "sql_parse.h" // For check_stack_overrun
+/*
+ Allocating memory and *also* using it (reading and
+ writing from it) because some build instructions cause
+ compiler to optimize out stack_used_up. Since alloca()
+ here depends on stack_used_up, it doesnt get executed
+ correctly and causes json_debug_nonembedded to fail
+ ( --error ER_STACK_OVERRUN_NEED_MORE does not occur).
+*/
+#define ALLOCATE_MEM_ON_STACK(A) do \
+ { \
+ uchar *array= (uchar*)alloca(A); \
+ bzero(array, A); \
+ my_checksum(0, array, A); \
+ } while(0)
/*
Compare ASCII string against the string with the specified
@@ -128,6 +143,113 @@ static int append_tab(String *js, int depth, int tab_size)
return 0;
}
+int json_path_parts_compare(
+ const json_path_step_t *a, const json_path_step_t *a_end,
+ const json_path_step_t *b, const json_path_step_t *b_end,
+ enum json_value_types vt)
+{
+ int res, res2;
+
+ long arbitrary_var;
+ long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var));
+ DBUG_EXECUTE_IF("json_check_min_stack_requirement",
+ {ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE);});
+ if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL))
+ return 1;
+
+ while (a <= a_end)
+ {
+ if (b > b_end)
+ {
+ while (vt != JSON_VALUE_ARRAY &&
+ (a->type & JSON_PATH_ARRAY_WILD) == JSON_PATH_ARRAY &&
+ a->n_item == 0)
+ {
+ if (++a > a_end)
+ return 0;
+ }
+ return -2;
+ }
+
+ DBUG_ASSERT((b->type & (JSON_PATH_WILD | JSON_PATH_DOUBLE_WILD)) == 0);
+
+
+ if (a->type & JSON_PATH_ARRAY)
+ {
+ if (b->type & JSON_PATH_ARRAY)
+ {
+ if ((a->type & JSON_PATH_WILD) || a->n_item == b->n_item)
+ goto step_fits;
+ goto step_failed;
+ }
+ if ((a->type & JSON_PATH_WILD) == 0 && a->n_item == 0)
+ goto step_fits_autowrap;
+ goto step_failed;
+ }
+ else /* JSON_PATH_KEY */
+ {
+ if (!(b->type & JSON_PATH_KEY))
+ goto step_failed;
+
+ if (!(a->type & JSON_PATH_WILD) &&
+ (a->key_end - a->key != b->key_end - b->key ||
+ memcmp(a->key, b->key, a->key_end - a->key) != 0))
+ goto step_failed;
+
+ goto step_fits;
+ }
+step_failed:
+ if (!(a->type & JSON_PATH_DOUBLE_WILD))
+ return -1;
+ b++;
+ continue;
+
+step_fits:
+ b++;
+ if (!(a->type & JSON_PATH_DOUBLE_WILD))
+ {
+ a++;
+ continue;
+ }
+
+ /* Double wild handling needs recursions. */
+ res= json_path_parts_compare(a+1, a_end, b, b_end, vt);
+ if (res == 0)
+ return 0;
+
+ res2= json_path_parts_compare(a, a_end, b, b_end, vt);
+
+ return (res2 >= 0) ? res2 : res;
+
+step_fits_autowrap:
+ if (!(a->type & JSON_PATH_DOUBLE_WILD))
+ {
+ a++;
+ continue;
+ }
+
+ /* Double wild handling needs recursions. */
+ res= json_path_parts_compare(a+1, a_end, b+1, b_end, vt);
+ if (res == 0)
+ return 0;
+
+ res2= json_path_parts_compare(a, a_end, b+1, b_end, vt);
+
+ return (res2 >= 0) ? res2 : res;
+
+ }
+
+ return b <= b_end;
+}
+
+
+int json_path_compare(const json_path_t *a, const json_path_t *b,
+ enum json_value_types vt)
+{
+ return json_path_parts_compare(a->steps+1, a->last_step,
+ b->steps+1, b->last_step, vt);
+}
+
static int json_nice(json_engine_t *je, String *nice_js,
Item_func_json_format::formats mode, int tab_size=4)
@@ -1028,6 +1150,12 @@ static int check_contains(json_engine_t *js, json_engine_t *value)
{
json_engine_t loc_js;
bool set_js;
+ long arbitrary_var;
+ long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var));
+ DBUG_EXECUTE_IF("json_check_min_stack_requirement",
+ {ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE);});
+ if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL))
+ return 1;
switch (js->value_type)
{
@@ -1991,6 +2119,14 @@ err_return:
static int do_merge(String *str, json_engine_t *je1, json_engine_t *je2)
{
+
+ long arbitrary_var;
+ long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var));
+ DBUG_EXECUTE_IF("json_check_min_stack_requirement",
+ {ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE);});
+ if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL))
+ return 1;
+
if (json_read_value(je1) || json_read_value(je2))
return 1;
@@ -2325,6 +2461,13 @@ static int copy_value_patch(String *str, json_engine_t *je)
static int do_merge_patch(String *str, json_engine_t *je1, json_engine_t *je2,
bool *empty_result)
{
+ long arbitrary_var;
+ long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var));
+ DBUG_EXECUTE_IF("json_check_min_stack_requirement",
+ {ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE);});
+ if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL))
+ return 1;
+
if (json_read_value(je1) || json_read_value(je2))
return 1;
diff --git a/sql/log.cc b/sql/log.cc
index 11dd979715c..15de3f46275 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -2238,7 +2238,7 @@ static int binlog_rollback(handlerton *hton, THD *thd, bool all)
error |= binlog_commit_flush_stmt_cache(thd, all, cache_mngr);
}
- if (cache_mngr->trx_cache.empty() &&
+ if (!cache_mngr->trx_cache.has_incident() && cache_mngr->trx_cache.empty() &&
thd->transaction->xid_state.get_state_code() != XA_PREPARED)
{
/*
@@ -5929,7 +5929,6 @@ void THD::binlog_prepare_for_row_logging()
bool THD::binlog_write_annotated_row(Log_event_writer *writer)
{
- int error;
DBUG_ENTER("THD::binlog_write_annotated_row");
if (!(IF_WSREP(!wsrep_fragments_certified_for_stmt(this), true) &&
@@ -5938,13 +5937,7 @@ bool THD::binlog_write_annotated_row(Log_event_writer *writer)
DBUG_RETURN(0);
Annotate_rows_log_event anno(this, 0, false);
- if (unlikely((error= writer->write(&anno))))
- {
- if (my_errno == EFBIG)
- writer->set_incident();
- DBUG_RETURN(error);
- }
- DBUG_RETURN(0);
+ DBUG_RETURN(writer->write(&anno));
}
@@ -6017,21 +6010,22 @@ bool THD::binlog_write_table_maps()
/**
- This function writes a table map to the binary log.
- Note that in order to keep the signature uniform with related methods,
- we use a redundant parameter to indicate whether a transactional table
- was changed or not.
+ This function writes a table map to the binary log.
- @param table a pointer to the table.
- @param with_annotate If true call binlog_write_annotated_row()
+ If an error occurs while writing events and rollback is not possible, e.g.
+ due to the statement modifying a non-transactional table, an incident event
+ is logged.
+ @param table a pointer to the table.
+ @param with_annotate @c true to write an annotate event before writing
+ the table_map event, @c false otherwise.
@return
nonzero if an error pops up when writing the table map event.
*/
bool THD::binlog_write_table_map(TABLE *table, bool with_annotate)
{
- int error;
+ int error= 1;
bool is_transactional= table->file->row_logging_has_trans;
DBUG_ENTER("THD::binlog_write_table_map");
DBUG_PRINT("enter", ("table: %p (%s: #%lu)",
@@ -6057,12 +6051,34 @@ bool THD::binlog_write_table_map(TABLE *table, bool with_annotate)
if (with_annotate)
if (binlog_write_annotated_row(&writer))
- DBUG_RETURN(1);
+ goto write_err;
+
+ DBUG_EXECUTE_IF("table_map_write_error",
+ {
+ if (is_transactional)
+ {
+ my_errno= EFBIG;
+ goto write_err;
+ }
+ });
if (unlikely((error= writer.write(&the_event))))
- DBUG_RETURN(error);
+ goto write_err;
DBUG_RETURN(0);
+
+write_err:
+ mysql_bin_log.set_write_error(this, is_transactional);
+ /*
+ For non-transactional engine or multi statement transaction with mixed
+ engines, data is written to table but writing to binary log failed. In
+ these scenarios rollback is not possible. Hence report an incident.
+ */
+ if (mysql_bin_log.check_write_error(this) && cache_data &&
+ lex->stmt_accessed_table(LEX::STMT_WRITES_NON_TRANS_TABLE) &&
+ table->current_lock == F_WRLCK)
+ cache_data->set_incident();
+ DBUG_RETURN(error);
}
@@ -7498,7 +7514,9 @@ bool MYSQL_BIN_LOG::write_incident(THD *thd)
if (likely(is_open()))
{
prev_binlog_id= current_binlog_id;
- if (likely(!(error= write_incident_already_locked(thd))) &&
+ if (likely(
+ !(error= DBUG_EVALUATE_IF("incident_event_write_error", 1,
+ write_incident_already_locked(thd)))) &&
likely(!(error= flush_and_sync(0))))
{
update_binlog_end_pos();
@@ -7527,6 +7545,22 @@ bool MYSQL_BIN_LOG::write_incident(THD *thd)
mysql_mutex_unlock(&LOCK_log);
}
+ /*
+ Upon writing incident event, check for thd->error() and print the
+ relevant error message in the error log.
+ */
+ if (thd->is_error())
+ {
+ sql_print_error("Write to binary log failed: "
+ "%s. An incident event is written to binary log "
+ "and slave will be stopped.\n",
+ thd->get_stmt_da()->message());
+ }
+ if (error)
+ {
+ sql_print_error("Incident event write to the binary log file failed.");
+ }
+
DBUG_RETURN(error);
}
diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc
index 3a1ebd9ec57..b971c96cda2 100644
--- a/sql/opt_subselect.cc
+++ b/sql/opt_subselect.cc
@@ -3032,6 +3032,7 @@ void optimize_semi_joins(JOIN *join, table_map remaining_tables, uint idx,
void update_sj_state(JOIN *join, const JOIN_TAB *new_tab,
uint idx, table_map remaining_tables)
{
+ DBUG_ASSERT(!join->emb_sjm_nest);
if (TABLE_LIST *emb_sj_nest= new_tab->emb_sj_nest)
{
join->cur_sj_inner_tables |= emb_sj_nest->sj_inner_tables;
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 1dd57cc319a..90fd7953124 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -7744,6 +7744,39 @@ bool setup_fields(THD *thd, Ref_ptr_array ref_pointer_array,
DBUG_RETURN(MY_TEST(thd->is_error()));
}
+/*
+ make list of leaves for a single TABLE_LIST
+
+ SYNOPSIS
+ make_leaves_for_single_table()
+ thd Thread handler
+ leaves List of leaf tables to be filled
+ table TABLE_LIST object to process
+ full_table_list Whether to include tables from mergeable derived table/view
+*/
+void make_leaves_for_single_table(THD *thd, List<TABLE_LIST> &leaves,
+ TABLE_LIST *table, bool& full_table_list,
+ TABLE_LIST *boundary)
+{
+ if (table == boundary)
+ full_table_list= !full_table_list;
+ if (full_table_list && table->is_merged_derived())
+ {
+ SELECT_LEX *select_lex= table->get_single_select();
+ /*
+ It's safe to use select_lex->leaf_tables because all derived
+ tables/views were already prepared and has their leaf_tables
+ set properly.
+ */
+ make_leaves_list(thd, leaves, select_lex->get_table_list(),
+ full_table_list, boundary);
+ }
+ else
+ {
+ leaves.push_back(table, thd->mem_root);
+ }
+}
+
/*
Perform checks like all given fields exists, if exists fill struct with
@@ -7770,40 +7803,79 @@ int setup_returning_fields(THD* thd, TABLE_LIST* table_list)
SYNOPSIS
make_leaves_list()
- list pointer to pointer on list first element
- tables table list
- full_table_list whether to include tables from mergeable derived table/view.
- we need them for checks for INSERT/UPDATE statements only.
-
- RETURN pointer on pointer to next_leaf of last element
+ leaves List of leaf tables to be filled
+ tables Table list
+ full_table_list Whether to include tables from mergeable derived table/view.
+ We need them for checks for INSERT/UPDATE statements only.
*/
-void make_leaves_list(THD *thd, List<TABLE_LIST> &list, TABLE_LIST *tables,
+void make_leaves_list(THD *thd, List<TABLE_LIST> &leaves, TABLE_LIST *tables,
bool full_table_list, TABLE_LIST *boundary)
{
for (TABLE_LIST *table= tables; table; table= table->next_local)
{
- if (table == boundary)
- full_table_list= !full_table_list;
- if (full_table_list && table->is_merged_derived())
- {
- SELECT_LEX *select_lex= table->get_single_select();
- /*
- It's safe to use select_lex->leaf_tables because all derived
- tables/views were already prepared and has their leaf_tables
- set properly.
- */
- make_leaves_list(thd, list, select_lex->get_table_list(),
- full_table_list, boundary);
- }
- else
- {
- list.push_back(table, thd->mem_root);
- }
+ make_leaves_for_single_table(thd, leaves, table, full_table_list,
+ boundary);
+ }
+}
+
+
+/*
+ Setup the map and other attributes for a single TABLE_LIST object
+
+ SYNOPSIS
+ setup_table_attributes()
+ thd Thread handler
+ table_list TABLE_LIST object to process
+ first_select_table First table participating in SELECT for INSERT..SELECT
+ statements, NULL for other cases
+ tablenr Serial number of the table in the SQL statement
+
+ RETURN
+ false Success
+ true Failure
+*/
+bool setup_table_attributes(THD *thd, TABLE_LIST *table_list,
+ TABLE_LIST *first_select_table,
+ uint &tablenr)
+{
+ TABLE *table= table_list->table;
+ if (table)
+ table->pos_in_table_list= table_list;
+ if (first_select_table && table_list->top_table() == first_select_table)
+ {
+ /* new counting for SELECT of INSERT ... SELECT command */
+ first_select_table= 0;
+ thd->lex->first_select_lex()->insert_tables= tablenr;
+ tablenr= 0;
+ }
+ if (table_list->jtbm_subselect)
+ {
+ table_list->jtbm_table_no= tablenr;
+ }
+ else if (table)
+ {
+ table->pos_in_table_list= table_list;
+ setup_table_map(table, table_list, tablenr);
+
+ if (table_list->process_index_hints(table))
+ return true;
}
+ tablenr++;
+ /*
+ We test the max tables here as we setup_table_map() should not be called
+ with tablenr >= 64
+ */
+ if (tablenr > MAX_TABLES)
+ {
+ my_error(ER_TOO_MANY_TABLES, MYF(0), static_cast<int>(MAX_TABLES));
+ return true;
+ }
+ return false;
}
+
/*
prepare tables
@@ -7860,7 +7932,14 @@ bool setup_tables(THD *thd, Name_resolution_context *context,
leaves.empty();
if (select_lex->prep_leaf_list_state != SELECT_LEX::SAVED)
{
- make_leaves_list(thd, leaves, tables, full_table_list, first_select_table);
+ /*
+ For INSERT ... SELECT statements we must not include the first table
+ (where the data is being inserted into) in the list of leaves
+ */
+ TABLE_LIST *tables_for_leaves=
+ select_insert ? first_select_table : tables;
+ make_leaves_list(thd, leaves, tables_for_leaves, full_table_list,
+ first_select_table);
select_lex->prep_leaf_list_state= SELECT_LEX::READY;
select_lex->leaf_tables_exec.empty();
}
@@ -7871,40 +7950,33 @@ bool setup_tables(THD *thd, Name_resolution_context *context,
leaves.push_back(table_list, thd->mem_root);
}
+ List_iterator<TABLE_LIST> ti(leaves);
while ((table_list= ti++))
{
- TABLE *table= table_list->table;
- if (table)
- table->pos_in_table_list= table_list;
- if (first_select_table &&
- table_list->top_table() == first_select_table)
- {
- /* new counting for SELECT of INSERT ... SELECT command */
- first_select_table= 0;
- thd->lex->first_select_lex()->insert_tables= tablenr;
- tablenr= 0;
- }
- if(table_list->jtbm_subselect)
- {
- table_list->jtbm_table_no= tablenr;
- }
- else if (table)
- {
- table->pos_in_table_list= table_list;
- setup_table_map(table, table_list, tablenr);
+ if (setup_table_attributes(thd, table_list, first_select_table, tablenr))
+ DBUG_RETURN(1);
+ }
- if (table_list->process_index_hints(table))
- DBUG_RETURN(1);
- }
- tablenr++;
+ if (select_insert)
+ {
/*
- We test the max tables here as we setup_table_map() should not be called
- with tablenr >= 64
+ The table/view in which the data is inserted must not be included into
+ the leaf_tables list. But we need this table/view to setup attributes
+ for it. So build a temporary list of leaves and setup attributes for
+ the tables included
*/
- if (tablenr > MAX_TABLES)
+ List<TABLE_LIST> leaves;
+ TABLE_LIST *table= tables;
+
+ make_leaves_for_single_table(thd, leaves, table, full_table_list,
+ first_select_table);
+
+ List_iterator<TABLE_LIST> ti(leaves);
+ while ((table_list= ti++))
{
- my_error(ER_TOO_MANY_TABLES,MYF(0), static_cast<int>(MAX_TABLES));
- DBUG_RETURN(1);
+ if (setup_table_attributes(thd, table_list, first_select_table,
+ tablenr))
+ DBUG_RETURN(1);
}
}
}
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 658909c5961..7b229082822 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -1876,7 +1876,7 @@ void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var,
extern std::atomic<my_thread_id> shutdown_thread_id;
void THD::awake_no_mutex(killed_state state_to_set)
{
- DBUG_ENTER("THD::awake");
+ DBUG_ENTER("THD::awake_no_mutex");
DBUG_PRINT("enter", ("this: %p current_thd: %p state: %d",
this, current_thd, (int) state_to_set));
THD_CHECK_SENTRY(this);
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 10dab7db4a0..14a83442829 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -1346,8 +1346,12 @@ values_loop_end:
thd->lex->current_select->save_leaf_tables(thd);
thd->lex->current_select->first_cond_optimization= 0;
}
- if (readbuff)
- my_free(readbuff);
+
+ my_free(readbuff);
+#ifndef EMBEDDED_LIBRARY
+ if (lock_type == TL_WRITE_DELAYED && table->expr_arena)
+ table->expr_arena->free_items();
+#endif
DBUG_RETURN(FALSE);
abort:
@@ -1364,6 +1368,8 @@ abort:
*/
for (Field **ptr= table_list->table->field ; *ptr ; ptr++)
(*ptr)->free();
+ if (table_list->table->expr_arena)
+ table_list->table->expr_arena->free_items();
}
#endif
if (table != NULL)
@@ -1542,8 +1548,7 @@ static bool mysql_prepare_insert_check_table(THD *thd, TABLE_LIST *table_list,
if (insert_into_view && !fields.elements)
{
thd->lex->empty_field_list_on_rset= 1;
- if (!thd->lex->first_select_lex()->leaf_tables.head()->table ||
- table_list->is_multitable())
+ if (!table_list->table || table_list->is_multitable())
{
my_error(ER_VIEW_NO_INSERT_FIELD_LIST, MYF(0),
table_list->view_db.str, table_list->view_name.str);
@@ -3789,7 +3794,6 @@ int mysql_insert_select_prepare(THD *thd, select_result *sel_res)
if (sel_res)
sel_res->prepare(lex->returning()->item_list, NULL);
- DBUG_ASSERT(select_lex->leaf_tables.elements != 0);
List_iterator<TABLE_LIST> ti(select_lex->leaf_tables);
TABLE_LIST *table;
uint insert_tables;
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index f351210a862..2176f28514c 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -10490,11 +10490,13 @@ void LEX::relink_hack(st_select_lex *select_lex)
{
if (!select_stack_top) // Statements of the second type
{
- if (!select_lex->get_master()->get_master())
- ((st_select_lex *) select_lex->get_master())->
- set_master(&builtin_select);
- if (!builtin_select.get_slave())
- builtin_select.set_slave(select_lex->get_master());
+ if (!select_lex->outer_select() &&
+ !builtin_select.first_inner_unit())
+ {
+ builtin_select.register_unit(select_lex->master_unit(),
+ &builtin_select.context);
+ builtin_select.add_statistics(select_lex->master_unit());
+ }
}
}
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index a9040f7c3d2..fc6e55944a4 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -766,7 +766,6 @@ public:
}
inline st_select_lex_node* get_master() { return master; }
- inline st_select_lex_node* get_slave() { return slave; }
void include_down(st_select_lex_node *upper);
void add_slave(st_select_lex_node *slave_arg);
void include_neighbour(st_select_lex_node *before);
@@ -1745,15 +1744,6 @@ public:
Sroutine_hash_entry **sroutines_list_own_last;
uint sroutines_list_own_elements;
- /**
- Number of tables which were open by open_tables() and to be locked
- by lock_tables().
- Note that we set this member only in some cases, when this value
- needs to be passed from open_tables() to lock_tables() which are
- separated by some amount of code.
- */
- uint table_count;
-
/*
These constructor and destructor serve for creation/destruction
of Query_tables_list instances which are used as backup storage.
@@ -3450,7 +3440,7 @@ public:
stores total number of tables. For LEX representing multi-delete
holds number of tables from which we will delete records.
*/
- uint table_count;
+ uint table_count_update;
uint8 describe;
/*
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 0aae981db27..6048b83a60a 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -3325,6 +3325,7 @@ bool run_set_statement_if_requested(THD *thd, LEX *lex)
{
switch (v->var->option.var_type & GET_TYPE_MASK)
{
+ case GET_BIT:
case GET_BOOL:
case GET_INT:
case GET_LONG:
@@ -4855,7 +4856,7 @@ mysql_execute_command(THD *thd, bool is_called_from_prepared_stmt)
if (likely(!thd->is_fatal_error))
{
result= new (thd->mem_root) multi_delete(thd, aux_tables,
- lex->table_count);
+ lex->table_count_update);
if (likely(result))
{
if (unlikely(select_lex->vers_setup_conds(thd, aux_tables)))
@@ -9750,12 +9751,12 @@ bool multi_delete_set_locks_and_link_aux_tables(LEX *lex)
TABLE_LIST *target_tbl;
DBUG_ENTER("multi_delete_set_locks_and_link_aux_tables");
- lex->table_count= 0;
+ lex->table_count_update= 0;
for (target_tbl= lex->auxiliary_table_list.first;
target_tbl; target_tbl= target_tbl->next_local)
{
- lex->table_count++;
+ lex->table_count_update++;
/* All tables in aux_tables must be found in FROM PART */
TABLE_LIST *walk= multi_delete_table_match(lex, target_tbl, tables);
if (!walk)
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 7cce643efd2..5fca48f403f 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -1441,7 +1441,7 @@ static int mysql_test_update(Prepared_statement *stmt,
DBUG_ASSERT(update_source_table || table_list->view != 0);
DBUG_PRINT("info", ("Switch to multi-update"));
/* pass counter value */
- thd->lex->table_count= table_count;
+ thd->lex->table_count_update= table_count;
/* convert to multiupdate */
DBUG_RETURN(2);
}
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 447ec9c2f73..5cf7f7a95e4 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -787,7 +787,22 @@ void remove_redundant_subquery_clauses(st_select_lex *subq_select_lex)
Here SUBQ cannot be removed.
*/
if (!ord->in_field_list)
+ {
(*ord->item)->walk(&Item::eliminate_subselect_processor, FALSE, NULL);
+ /*
+ Remove from the JOIN::all_fields list any reference to the elements
+ of the eliminated GROUP BY list unless it is 'in_field_list'.
+ This is needed in order not to confuse JOIN::make_aggr_tables_info()
+ when it constructs different structure for execution phase.
+ */
+ List_iterator<Item> li(subq_select_lex->join->all_fields);
+ Item *item;
+ while ((item= li++))
+ {
+ if (item == *ord->item)
+ li.remove();
+ }
+ }
}
subq_select_lex->join->group_list= NULL;
subq_select_lex->group_list.empty();
@@ -2012,7 +2027,6 @@ JOIN::optimize_inner()
/* Merge all mergeable derived tables/views in this SELECT. */
if (select_lex->handle_derived(thd->lex, DT_MERGE))
DBUG_RETURN(TRUE);
- table_count= select_lex->leaf_tables.elements;
}
if (select_lex->first_cond_optimization &&
@@ -2060,8 +2074,6 @@ JOIN::optimize_inner()
eval_select_list_used_tables();
- table_count= select_lex->leaf_tables.elements;
-
if (select_lex->options & OPTION_SCHEMA_TABLE &&
optimize_schema_tables_memory_usage(select_lex->leaf_tables))
DBUG_RETURN(1);
@@ -9214,7 +9226,8 @@ greedy_search(JOIN *join,
picked semi-join operation is in best_pos->...picker, but we need to
update the global state in the JOIN object, too.
*/
- update_sj_state(join, best_table, idx, remaining_tables);
+ if (!join->emb_sjm_nest)
+ update_sj_state(join, best_table, idx, remaining_tables);
/* find the position of 'best_table' in 'join->best_ref' */
best_idx= idx;
@@ -14395,7 +14408,6 @@ void JOIN::cleanup(bool full)
/* Free the original optimized join created for the group_by_handler */
join_tab= original_join_tab;
original_join_tab= 0;
- table_count= original_table_count;
}
if (join_tab)
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 7a72d0efe42..d72fc82be81 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -1289,7 +1289,6 @@ public:
Pushdown_query *pushdown_query;
JOIN_TAB *original_join_tab;
- uint original_table_count;
/******* Join optimization state members start *******/
/*
diff --git a/sql/sql_sequence.cc b/sql/sql_sequence.cc
index 07571c3bbac..11b5109c349 100644
--- a/sql/sql_sequence.cc
+++ b/sql/sql_sequence.cc
@@ -706,7 +706,9 @@ longlong SEQUENCE::next_value(TABLE *table, bool second_round, int *error)
{
longlong res_value, org_reserved_until, add_to;
bool out_of_values;
+ THD *thd= table->in_use;
DBUG_ENTER("SEQUENCE::next_value");
+ DBUG_ASSERT(thd);
*error= 0;
if (!second_round)
@@ -771,7 +773,8 @@ longlong SEQUENCE::next_value(TABLE *table, bool second_round, int *error)
DBUG_RETURN(next_value(table, 1, error));
}
- if (unlikely((*error= write(table, 0))))
+ if (unlikely((*error= write(table, thd->variables.binlog_row_image !=
+ BINLOG_ROW_IMAGE_MINIMAL))))
{
reserved_until= org_reserved_until;
next_free_value= res_value;
@@ -838,7 +841,9 @@ int SEQUENCE::set_value(TABLE *table, longlong next_val, ulonglong next_round,
longlong org_reserved_until= reserved_until;
longlong org_next_free_value= next_free_value;
ulonglong org_round= round;
+ THD *thd= table->in_use;
DBUG_ENTER("SEQUENCE::set_value");
+ DBUG_ASSERT(thd);
write_lock(table);
if (is_used)
@@ -877,7 +882,8 @@ int SEQUENCE::set_value(TABLE *table, longlong next_val, ulonglong next_round,
needs_to_be_stored)
{
reserved_until= next_free_value;
- if (write(table, 0))
+ if (write(table,
+ thd->variables.binlog_row_image != BINLOG_ROW_IMAGE_MINIMAL))
{
reserved_until= org_reserved_until;
next_free_value= org_next_free_value;
diff --git a/sql/sql_type.cc b/sql/sql_type.cc
index 8e150de7556..e5c9b87ad74 100644
--- a/sql/sql_type.cc
+++ b/sql/sql_type.cc
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2015, 2021, MariaDB
+ Copyright (c) 2015, 2022, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -1959,6 +1959,9 @@ Type_collection_std::aggregate_for_comparison(const Type_handler *ha,
return ha;
}
}
+ if ((a == INT_RESULT && b == STRING_RESULT) ||
+ (b == INT_RESULT && a == STRING_RESULT))
+ return &type_handler_newdecimal;
if ((a == INT_RESULT || a == DECIMAL_RESULT) &&
(b == INT_RESULT || b == DECIMAL_RESULT))
return &type_handler_newdecimal;
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 454fed52242..d341206732f 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -427,7 +427,7 @@ int mysql_update(THD *thd,
DBUG_ASSERT(update_source_table || table_list->view != 0);
DBUG_PRINT("info", ("Switch to multi-update"));
/* pass counter value */
- thd->lex->table_count= table_count;
+ thd->lex->table_count_update= table_count;
if (thd->lex->period_conditions.is_set())
{
my_error(ER_NOT_SUPPORTED_YET, MYF(0),
@@ -1857,7 +1857,7 @@ int mysql_multi_update_prepare(THD *thd)
TABLE_LIST *table_list= lex->query_tables;
TABLE_LIST *tl;
Multiupdate_prelocking_strategy prelocking_strategy;
- uint table_count= lex->table_count;
+ uint table_count= lex->table_count_update;
DBUG_ENTER("mysql_multi_update_prepare");
/*
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 172ea82b1a6..31837f113f4 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -2489,6 +2489,7 @@ create:
{
if (Lex->main_select_push())
MYSQL_YYABORT;
+ Lex->inc_select_stack_outer_barrier();
if (Lex->add_create_view(thd, $1 | $5,
DTYPE_ALGORITHM_UNDEFINED, $3, $6))
MYSQL_YYABORT;
@@ -2504,6 +2505,7 @@ create:
MYSQL_YYABORT;
if (Lex->main_select_push())
MYSQL_YYABORT;
+ Lex->inc_select_stack_outer_barrier();
}
view_list_opt AS view_select
{