summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorMonty <monty@mariadb.org>2018-01-01 19:39:59 +0200
committerMonty <monty@mariadb.org>2018-01-01 19:39:59 +0200
commitfbab79c9b8a58b90292e900cf46ab6d6632ebb68 (patch)
tree49738ad2561489954fa0973d82be10fafbf42e0b /sql
parentafbb72b3b6988f4c5242d46588754517724b2950 (diff)
parentaed2050e40cb332d54e8d40eb7242309b962c4e1 (diff)
downloadmariadb-git-fbab79c9b8a58b90292e900cf46ab6d6632ebb68.tar.gz
Merge remote-tracking branch 'origin/10.2' into bb-10.2-ext
Conflicts: cmake/make_dist.cmake.in mysql-test/r/func_json.result mysql-test/r/ps.result mysql-test/t/func_json.test mysql-test/t/ps.test sql/item_cmpfunc.h
Diffstat (limited to 'sql')
-rw-r--r--sql/events.cc2
-rw-r--r--sql/filesort.cc26
-rw-r--r--sql/filesort_utils.cc2
-rw-r--r--sql/ha_partition.cc69
-rw-r--r--sql/item.cc9
-rw-r--r--sql/item_cmpfunc.cc13
-rw-r--r--sql/item_cmpfunc.h1
-rw-r--r--sql/item_jsonfunc.cc6
-rw-r--r--sql/item_sum.cc2
-rw-r--r--sql/log.cc110
-rw-r--r--sql/log.h9
-rw-r--r--sql/mysqld.cc21
-rw-r--r--sql/opt_range.cc9
-rw-r--r--sql/partition_info.cc20
-rw-r--r--sql/partition_info.h1
-rw-r--r--sql/share/errmsg-utf8.txt48
-rw-r--r--sql/signal_handler.cc4
-rw-r--r--sql/slave.h2
-rw-r--r--sql/sp.cc4
-rw-r--r--sql/sql_acl.cc61
-rw-r--r--sql/sql_connect.cc49
-rw-r--r--sql/sql_cte.cc2
-rw-r--r--sql/sql_explain.cc8
-rw-r--r--sql/sql_explain.h1
-rw-r--r--sql/sql_lex.cc4
-rw-r--r--sql/sql_partition.cc21
-rw-r--r--sql/sql_prepare.cc12
-rw-r--r--sql/sql_select.cc16
-rw-r--r--sql/sql_table.cc2
-rw-r--r--sql/sql_view.cc3
-rw-r--r--sql/sql_window.cc7
-rw-r--r--sql/sql_yacc.yy8
-rw-r--r--sql/table.cc16
-rw-r--r--sql/wsrep_mysqld.cc10
34 files changed, 375 insertions, 203 deletions
diff --git a/sql/events.cc b/sql/events.cc
index f1559aec60c..0830fa7c611 100644
--- a/sql/events.cc
+++ b/sql/events.cc
@@ -421,7 +421,7 @@ Events::create_event(THD *thd, Event_parse_data *parse_data)
DBUG_RETURN(ret);
#ifdef WITH_WSREP
error:
- DBUG_RETURN(true);
+ DBUG_RETURN(TRUE);
#endif /* WITH_WSREP */
}
diff --git a/sql/filesort.cc b/sql/filesort.cc
index 4fffd0dca91..476b0bfb7dd 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -714,6 +714,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
handler *file;
MY_BITMAP *save_read_set, *save_write_set, *save_vcol_set;
Item *sort_cond;
+ ha_rows retval;
DBUG_ENTER("find_all_keys");
DBUG_PRINT("info",("using: %s",
(select ? select->quick ? "ranges" : "where":
@@ -771,7 +772,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
if (quick_select)
{
if (select->quick->reset())
- DBUG_RETURN(HA_POS_ERROR);
+ goto err;
}
DEBUG_SYNC(thd, "after_index_merge_phase1");
@@ -808,7 +809,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
(void) file->extra(HA_EXTRA_NO_CACHE);
file->ha_rnd_end();
}
- DBUG_RETURN(HA_POS_ERROR); /* purecov: inspected */
+ goto err; /* purecov: inspected */
}
bool write_record= false;
@@ -856,7 +857,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
if (idx == param->max_keys_per_buffer)
{
if (write_keys(param, fs_info, idx, buffpek_pointers, tempfile))
- DBUG_RETURN(HA_POS_ERROR);
+ goto err;
idx= 0;
indexpos++;
}
@@ -882,12 +883,12 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
file->ha_rnd_end();
}
- if (thd->is_error())
- DBUG_RETURN(HA_POS_ERROR);
-
/* Signal we should use orignal column read and write maps */
sort_form->column_bitmaps_set(save_read_set, save_write_set, save_vcol_set);
+ if (thd->is_error())
+ DBUG_RETURN(HA_POS_ERROR);
+
DBUG_PRINT("test",("error: %d indexpos: %d",error,indexpos));
if (error != HA_ERR_END_OF_FILE)
{
@@ -897,11 +898,15 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
if (indexpos && idx &&
write_keys(param, fs_info, idx, buffpek_pointers, tempfile))
DBUG_RETURN(HA_POS_ERROR); /* purecov: inspected */
- const ha_rows retval=
- my_b_inited(tempfile) ?
- (ha_rows) (my_b_tell(tempfile)/param->rec_length) : idx;
+ retval= (my_b_inited(tempfile) ?
+ (ha_rows) (my_b_tell(tempfile)/param->rec_length) :
+ idx);
DBUG_PRINT("info", ("find_all_keys return %llu", (ulonglong) retval));
DBUG_RETURN(retval);
+
+err:
+ sort_form->column_bitmaps_set(save_read_set, save_write_set, save_vcol_set);
+ DBUG_RETURN(HA_POS_ERROR);
} /* find_all_keys */
@@ -999,7 +1004,8 @@ Type_handler_string_result::make_sort_key(uchar *to, Item *item,
if (maybe_null)
*to++= 1;
char *tmp_buffer= param->tmp_buffer ? param->tmp_buffer : (char*) to;
- String tmp(tmp_buffer, param->sort_length, cs);
+ String tmp(tmp_buffer, param->tmp_buffer ? param->sort_length :
+ sort_field->length, cs);
String *res= item->str_result(&tmp);
if (!res)
{
diff --git a/sql/filesort_utils.cc b/sql/filesort_utils.cc
index a0bc5ee6aa2..cb0b2d52b6f 100644
--- a/sql/filesort_utils.cc
+++ b/sql/filesort_utils.cc
@@ -105,7 +105,7 @@ uchar **Filesort_buffer::alloc_sort_buffer(uint num_records,
DBUG_EXECUTE_IF("alloc_sort_buffer_fail",
DBUG_SET("+d,simulate_out_of_memory"););
- buff_size= num_records * (record_length + sizeof(uchar*));
+ buff_size= ((size_t)num_records) * (record_length + sizeof(uchar*));
set_if_bigger(buff_size, record_length * MERGEBUFF2);
if (!m_idx_array.is_null())
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index ed3028451c5..843b854e5f7 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -2186,38 +2186,19 @@ void ha_partition::update_create_info(HA_CREATE_INFO *create_info)
DBUG_ASSERT(sub_elem);
part= i * num_subparts + j;
DBUG_ASSERT(part < m_file_tot_parts && m_file[part]);
- if (ha_legacy_type(m_file[part]->ht) == DB_TYPE_INNODB)
- {
- dummy_info.data_file_name= dummy_info.index_file_name = NULL;
- m_file[part]->update_create_info(&dummy_info);
-
- if (dummy_info.data_file_name || sub_elem->data_file_name)
- {
- sub_elem->data_file_name = (char*) dummy_info.data_file_name;
- }
- if (dummy_info.index_file_name || sub_elem->index_file_name)
- {
- sub_elem->index_file_name = (char*) dummy_info.index_file_name;
- }
- }
+ dummy_info.data_file_name= dummy_info.index_file_name = NULL;
+ m_file[part]->update_create_info(&dummy_info);
+ sub_elem->data_file_name = (char*) dummy_info.data_file_name;
+ sub_elem->index_file_name = (char*) dummy_info.index_file_name;
}
}
else
{
DBUG_ASSERT(m_file[i]);
- if (ha_legacy_type(m_file[i]->ht) == DB_TYPE_INNODB)
- {
- dummy_info.data_file_name= dummy_info.index_file_name= NULL;
- m_file[i]->update_create_info(&dummy_info);
- if (dummy_info.data_file_name || part_elem->data_file_name)
- {
- part_elem->data_file_name = (char*) dummy_info.data_file_name;
- }
- if (dummy_info.index_file_name || part_elem->index_file_name)
- {
- part_elem->index_file_name = (char*) dummy_info.index_file_name;
- }
- }
+ dummy_info.data_file_name= dummy_info.index_file_name= NULL;
+ m_file[i]->update_create_info(&dummy_info);
+ part_elem->data_file_name = (char*) dummy_info.data_file_name;
+ part_elem->index_file_name = (char*) dummy_info.index_file_name;
}
}
DBUG_VOID_RETURN;
@@ -8165,20 +8146,36 @@ uint ha_partition::alter_table_flags(uint flags)
bool ha_partition::check_if_incompatible_data(HA_CREATE_INFO *create_info,
uint table_changes)
{
- handler **file;
- bool ret= COMPATIBLE_DATA_YES;
-
/*
The check for any partitioning related changes have already been done
in mysql_alter_table (by fix_partition_func), so it is only up to
the underlying handlers.
*/
- for (file= m_file; *file; file++)
- if ((ret= (*file)->check_if_incompatible_data(create_info,
- table_changes)) !=
- COMPATIBLE_DATA_YES)
- break;
- return ret;
+ List_iterator<partition_element> part_it(m_part_info->partitions);
+ HA_CREATE_INFO dummy_info= *create_info;
+ uint i=0;
+ while (partition_element *part_elem= part_it++)
+ {
+ if (m_is_sub_partitioned)
+ {
+ List_iterator<partition_element> subpart_it(part_elem->subpartitions);
+ while (partition_element *sub_elem= subpart_it++)
+ {
+ dummy_info.data_file_name= sub_elem->data_file_name;
+ dummy_info.index_file_name= sub_elem->index_file_name;
+ if (m_file[i++]->check_if_incompatible_data(&dummy_info, table_changes))
+ return COMPATIBLE_DATA_NO;
+ }
+ }
+ else
+ {
+ dummy_info.data_file_name= part_elem->data_file_name;
+ dummy_info.index_file_name= part_elem->index_file_name;
+ if (m_file[i++]->check_if_incompatible_data(&dummy_info, table_changes))
+ return COMPATIBLE_DATA_NO;
+ }
+ }
+ return COMPATIBLE_DATA_YES;
}
diff --git a/sql/item.cc b/sql/item.cc
index 6fdcd29af29..a753913a537 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -2180,8 +2180,10 @@ bool Item_name_const::fix_fields(THD *thd, Item **ref)
String s(buf, sizeof(buf), &my_charset_bin);
s.length(0);
- if (value_item->fix_fields(thd, &value_item) ||
- name_item->fix_fields(thd, &name_item) ||
+ if ((!value_item->fixed &&
+ value_item->fix_fields(thd, &value_item)) ||
+ (!name_item->fixed &&
+ name_item->fix_fields(thd, &name_item)) ||
!value_item->const_item() ||
!name_item->const_item() ||
!(item_name= name_item->val_str(&s))) // Can't have a NULL name
@@ -5317,7 +5319,8 @@ resolve_ref_in_select_and_group(THD *thd, Item_ident *ref, SELECT_LEX *select)
if (thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY &&
select->having_fix_field &&
- select_ref != not_found_item && !group_by_ref)
+ select_ref != not_found_item && !group_by_ref &&
+ !ref->alias_name_used)
{
/*
Report the error if fields was found only in the SELECT item list and
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index d7429af76f7..313bd457313 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -1839,6 +1839,19 @@ bool Item_func_opt_neg::eq(const Item *item, bool binary_cmp) const
}
+bool Item_func_interval::fix_fields(THD *thd, Item **ref)
+{
+ if (Item_long_func::fix_fields(thd, ref))
+ return true;
+ for (uint i= 0 ; i < row->cols(); i++)
+ {
+ if (row->element_index(i)->check_cols(1))
+ return true;
+ }
+ return false;
+}
+
+
void Item_func_interval::fix_length_and_dec()
{
uint rows= row->cols();
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index e18315fde87..b4a833d2290 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -976,6 +976,7 @@ public:
Item_func_interval(THD *thd, Item_row *a):
Item_long_func(thd, a), row(a), intervals(0)
{ }
+ bool fix_fields(THD *, Item **);
longlong val_int();
void fix_length_and_dec();
const char *func_name() const { return "interval"; }
diff --git a/sql/item_jsonfunc.cc b/sql/item_jsonfunc.cc
index 38d6e4feba0..069bdf2e079 100644
--- a/sql/item_jsonfunc.cc
+++ b/sql/item_jsonfunc.cc
@@ -781,10 +781,10 @@ String *Item_func_json_extract::read_json(String *str,
{
str->set_charset(js->charset());
str->length(0);
- }
- if (possible_multiple_values && str->append("[", 1))
- goto error;
+ if (possible_multiple_values && str->append("[", 1))
+ goto error;
+ }
json_get_path_start(&je, js->charset(),(const uchar *) js->ptr(),
(const uchar *) js->ptr() + js->length(), &p);
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 4fecd7de269..23e63f709cd 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -3682,7 +3682,7 @@ void Item_func_group_concat::print(String *str, enum_query_type query_type)
}
}
str->append(STRING_WITH_LEN(" separator \'"));
- str->append(*separator);
+ str->append_for_single_quote(separator->ptr(), separator->length());
str->append(STRING_WITH_LEN("\')"));
}
diff --git a/sql/log.cc b/sql/log.cc
index 36239cfa055..e632fe629f5 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -2470,7 +2470,7 @@ static int find_uniq_filename(char *name, ulong next_log_number)
char buff[FN_REFLEN], ext_buf[FN_REFLEN];
struct st_my_dir *dir_info;
reg1 struct fileinfo *file_info;
- ulong max_found, next, number;
+ ulong max_found, next, UNINIT_VAR(number);
size_t buf_length, length;
char *start, *end;
int error= 0;
@@ -3630,6 +3630,7 @@ bool MYSQL_BIN_LOG::open(const char *log_name,
new_xid_list_entry->binlog_name= name_mem;
new_xid_list_entry->binlog_name_len= len;
new_xid_list_entry->xid_count= 0;
+ new_xid_list_entry->notify_count= 0;
/*
Find the name for the Initial binlog checkpoint.
@@ -4990,7 +4991,55 @@ MYSQL_BIN_LOG::is_xidlist_idle_nolock()
return true;
}
+#ifdef WITH_WSREP
+inline bool
+is_gtid_cached_internal(IO_CACHE *file)
+{
+ uchar data[EVENT_TYPE_OFFSET+1];
+ bool result= false;
+ my_off_t write_pos= my_b_tell(file);
+ if (reinit_io_cache(file, READ_CACHE, 0, 0, 0))
+ return false;
+ /*
+ In the cache we have gtid event if , below condition is true,
+ */
+ my_b_read(file, data, sizeof(data));
+ uint event_type= (uchar)data[EVENT_TYPE_OFFSET];
+ if (event_type == GTID_LOG_EVENT)
+ result= true;
+ /*
+ Cleanup , Why because we have not read the full buffer
+ and this will cause next to next reinit_io_cache(called in write_cache)
+ to make cache empty.
+ */
+ file->read_pos= file->read_end;
+ if (reinit_io_cache(file, WRITE_CACHE, write_pos, 0, 0))
+ return false;
+ return result;
+}
+#endif
+#ifdef WITH_WSREP
+inline bool
+MYSQL_BIN_LOG::is_gtid_cached(THD *thd)
+{
+ binlog_cache_mngr *mngr= (binlog_cache_mngr *) thd_get_ha_data(
+ thd, binlog_hton);
+ if (!mngr)
+ return false;
+ binlog_cache_data *cache_trans= mngr->get_binlog_cache_data(
+ use_trans_cache(thd, true));
+ binlog_cache_data *cache_stmt= mngr->get_binlog_cache_data(
+ use_trans_cache(thd, false));
+ if (cache_trans && !cache_trans->empty() &&
+ is_gtid_cached_internal(&cache_trans->cache_log))
+ return true;
+ if (cache_stmt && !cache_stmt->empty() &&
+ is_gtid_cached_internal(&cache_stmt->cache_log))
+ return true;
+ return false;
+}
+#endif
/**
Create a new log file name.
@@ -5582,7 +5631,36 @@ THD::binlog_start_trans_and_stmt()
cache_mngr->trx_cache.get_prev_position() == MY_OFF_T_UNDEF)
{
this->binlog_set_stmt_begin();
- if (in_multi_stmt_transaction_mode())
+ bool mstmt_mode= in_multi_stmt_transaction_mode();
+#ifdef WITH_WSREP
+ /* Write Gtid
+ Get domain id only when gtid mode is set
+ If this event is replicate through a master then ,
+ we will forward the same gtid another nodes
+ We have to do this only one time in mysql transaction.
+ Since this function is called multiple times , We will check for
+ ha_info->is_started()
+ */
+ Ha_trx_info *ha_info;
+ ha_info= this->ha_data[binlog_hton->slot].ha_info + (mstmt_mode ? 1 : 0);
+
+ if (!ha_info->is_started() && wsrep_gtid_mode
+ && this->variables.gtid_seq_no)
+ {
+ binlog_cache_mngr *const cache_mngr=
+ (binlog_cache_mngr*) thd_get_ha_data(this, binlog_hton);
+ binlog_cache_data *cache_data= cache_mngr->get_binlog_cache_data(1);
+ IO_CACHE *file= &cache_data->cache_log;
+ Log_event_writer writer(file, cache_data);
+ Gtid_log_event gtid_event(this, this->variables.gtid_seq_no,
+ this->variables.gtid_domain_id,
+ true, LOG_EVENT_SUPPRESS_USE_F,
+ true, 0);
+ gtid_event.server_id= this->variables.server_id;
+ writer.write(&gtid_event);
+ }
+#endif
+ if (mstmt_mode)
trans_register_ha(this, TRUE, binlog_hton);
trans_register_ha(this, FALSE, binlog_hton);
/*
@@ -5862,7 +5940,7 @@ MYSQL_BIN_LOG::write_gtid_event(THD *thd, bool standalone,
DBUG_PRINT("enter", ("standalone: %d", standalone));
#ifdef WITH_WSREP
- if (WSREP(thd) && thd->wsrep_trx_meta.gtid.seqno != -1 && wsrep_gtid_mode)
+ if (WSREP(thd) && thd->wsrep_trx_meta.gtid.seqno != -1 && wsrep_gtid_mode && !thd->variables.gtid_seq_no)
{
domain_id= wsrep_gtid_domain_id;
} else {
@@ -5914,6 +5992,12 @@ MYSQL_BIN_LOG::write_gtid_event(THD *thd, bool standalone,
/* Write the event to the binary log. */
DBUG_ASSERT(this == &mysql_bin_log);
+
+#ifdef WITH_WSREP
+ if (wsrep_gtid_mode && is_gtid_cached(thd))
+ DBUG_RETURN(false);
+#endif
+
if (write_event(&gtid_event))
DBUG_RETURN(true);
status_var_add(thd->status_var.binlog_bytes_written, gtid_event.data_written);
@@ -9718,9 +9802,20 @@ void
TC_LOG_BINLOG::commit_checkpoint_notify(void *cookie)
{
xid_count_per_binlog *entry= static_cast<xid_count_per_binlog *>(cookie);
+ bool found_entry= false;
mysql_mutex_lock(&LOCK_binlog_background_thread);
- entry->next_in_queue= binlog_background_thread_queue;
- binlog_background_thread_queue= entry;
+ /* count the same notification kind from different engines */
+ for (xid_count_per_binlog *link= binlog_background_thread_queue;
+ link && !found_entry; link= link->next_in_queue)
+ {
+ if ((found_entry= (entry == link)))
+ entry->notify_count++;
+ }
+ if (!found_entry)
+ {
+ entry->next_in_queue= binlog_background_thread_queue;
+ binlog_background_thread_queue= entry;
+ }
mysql_cond_signal(&COND_binlog_background_thread);
mysql_mutex_unlock(&LOCK_binlog_background_thread);
}
@@ -9815,13 +9910,16 @@ binlog_background_thread(void *arg __attribute__((unused)))
);
while (queue)
{
+ long count= queue->notify_count;
THD_STAGE_INFO(thd, stage_binlog_processing_checkpoint_notify);
DEBUG_SYNC(thd, "binlog_background_thread_before_mark_xid_done");
/* Set the thread start time */
thd->set_time();
/* Grab next pointer first, as mark_xid_done() may free the element. */
next= queue->next_in_queue;
- mysql_bin_log.mark_xid_done(queue->binlog_id, true);
+ queue->notify_count= 0;
+ for (long i= 0; i <= count; i++)
+ mysql_bin_log.mark_xid_done(queue->binlog_id, true);
queue= next;
DBUG_EXECUTE_IF("binlog_background_checkpoint_processed",
diff --git a/sql/log.h b/sql/log.h
index e2abcea9fdf..a5548ef4e60 100644
--- a/sql/log.h
+++ b/sql/log.h
@@ -562,7 +562,13 @@ class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG
bool write_transaction_to_binlog_events(group_commit_entry *entry);
void trx_group_commit_leader(group_commit_entry *leader);
bool is_xidlist_idle_nolock();
-
+#ifdef WITH_WSREP
+ /*
+ When this mariadb node is slave and galera enabled. So in this case
+ we write the gtid in wsrep_run_commit itself.
+ */
+ inline bool is_gtid_cached(THD *thd);
+#endif
public:
/*
A list of struct xid_count_per_binlog is used to keep track of how many
@@ -582,6 +588,7 @@ public:
ulong binlog_id;
/* Total prepared XIDs and pending checkpoint requests in this binlog. */
long xid_count;
+ long notify_count;
/* For linking in requests to the binlog background thread. */
xid_count_per_binlog *next_in_queue;
xid_count_per_binlog(); /* Give link error if constructor used. */
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 320771a859b..5f10d980910 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -3638,8 +3638,6 @@ void my_message_sql(uint error, const char *str, myf MyFlags)
extern "C" void *my_str_malloc_mysqld(size_t size);
-extern "C" void my_str_free_mysqld(void *ptr);
-extern "C" void *my_str_realloc_mysqld(void *ptr, size_t size);
void *my_str_malloc_mysqld(size_t size)
{
@@ -3647,17 +3645,6 @@ void *my_str_malloc_mysqld(size_t size)
}
-void my_str_free_mysqld(void *ptr)
-{
- my_free(ptr);
-}
-
-void *my_str_realloc_mysqld(void *ptr, size_t size)
-{
- return my_realloc(ptr, size, MYF(MY_FAE));
-}
-
-
#ifdef __WIN__
pthread_handler_t handle_shutdown(void *arg)
@@ -3713,14 +3700,8 @@ check_enough_stack_size(int recurse_level)
}
-/*
- Initialize my_str_malloc() and my_str_free()
-*/
static void init_libstrings()
{
- my_str_malloc= &my_str_malloc_mysqld;
- my_str_free= &my_str_free_mysqld;
- my_str_realloc= &my_str_realloc_mysqld;
#ifndef EMBEDDED_LIBRARY
my_string_stack_guard= check_enough_stack_size;
#endif
@@ -3731,7 +3712,7 @@ ulonglong my_pcre_frame_size;
static void init_pcre()
{
pcre_malloc= pcre_stack_malloc= my_str_malloc_mysqld;
- pcre_free= pcre_stack_free= my_str_free_mysqld;
+ pcre_free= pcre_stack_free= my_free;
pcre_stack_guard= check_enough_stack_size_slow;
/* See http://pcre.org/original/doc/html/pcrestack.html */
my_pcre_frame_size= -pcre_exec(NULL, NULL, NULL, -999, -999, 0, NULL, 0);
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index c2d46d62200..74b33083d02 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -1540,7 +1540,7 @@ end:
head->file= org_file;
/* Restore head->read_set (and write_set) to what they had before the call */
- head->column_bitmaps_set(save_read_set, save_write_set);
+ head->column_bitmaps_set(save_read_set, save_write_set, save_vcol_set);
if (reset())
{
@@ -6729,7 +6729,7 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
bool update_tbl_stats,
double read_time)
{
- uint idx, best_idx;
+ uint idx, UNINIT_VAR(best_idx);
SEL_ARG *key_to_read= NULL;
ha_rows UNINIT_VAR(best_records); /* protected by key_to_read */
uint UNINIT_VAR(best_mrr_flags), /* protected by key_to_read */
@@ -11386,7 +11386,10 @@ int QUICK_RANGE_SELECT::reset()
buf_size/= 2;
}
if (!mrr_buf_desc)
- DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+ {
+ error= HA_ERR_OUT_OF_MEM;
+ goto err;
+ }
/* Initialize the handler buffer. */
mrr_buf_desc->buffer= mrange_buff;
diff --git a/sql/partition_info.cc b/sql/partition_info.cc
index 1224c1bf9f3..d721453a181 100644
--- a/sql/partition_info.cc
+++ b/sql/partition_info.cc
@@ -2383,6 +2383,24 @@ end:
DBUG_RETURN(result);
}
+
+bool partition_info::error_if_requires_values() const
+{
+ switch (part_type) {
+ case NOT_A_PARTITION:
+ case HASH_PARTITION:
+ break;
+ case RANGE_PARTITION:
+ my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0), "RANGE", "LESS THAN");
+ return true;
+ case LIST_PARTITION:
+ my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0), "LIST", "IN");
+ return true;
+ }
+ return false;
+}
+
+
/**
Fix partition data from parser.
@@ -2473,6 +2491,8 @@ bool partition_info::fix_parser_data(THD *thd)
part_elem= it++;
List_iterator<part_elem_value> list_val_it(part_elem->list_val_list);
num_elements= part_elem->list_val_list.elements;
+ if (!num_elements && error_if_requires_values())
+ DBUG_RETURN(true);
DBUG_ASSERT(part_type == RANGE_PARTITION ?
num_elements == 1U : TRUE);
diff --git a/sql/partition_info.h b/sql/partition_info.h
index 38a353c8507..fc13d48e5ee 100644
--- a/sql/partition_info.h
+++ b/sql/partition_info.h
@@ -332,6 +332,7 @@ public:
size_t file_name_size, uint32 *part_id);
void report_part_expr_error(bool use_subpart_expr);
bool has_same_partitioning(partition_info *new_part_info);
+ bool error_if_requires_values() const;
private:
static int list_part_cmp(const void* a, const void* b);
bool set_up_default_partitions(THD *thd, handler *file, HA_CREATE_INFO *info,
diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt
index 5d457bc57a4..7812cbd951d 100644
--- a/sql/share/errmsg-utf8.txt
+++ b/sql/share/errmsg-utf8.txt
@@ -1804,30 +1804,30 @@ ER_WRONG_AUTO_KEY 42000 S1009
ER_BINLOG_CANT_DELETE_GTID_DOMAIN
eng "Could not delete gtid domain. Reason: %s."
ER_NORMAL_SHUTDOWN
- cze "%s (%s): normální ukončení\n"
- dan "%s (%s): Normal nedlukning\n"
- nla "%s (%s): Normaal afgesloten \n"
- eng "%s (%s): Normal shutdown\n"
- est "%s (%s): MariaDB lõpetas\n"
- fre "%s (%s): Arrêt normal du serveur\n"
- ger "%s (%s): Normal heruntergefahren\n"
- greek "%s (%s): Φυσιολογική διαδικασία shutdown\n"
- hindi "%s (%s): सामान्य शटडाउन\n"
- hun "%s (%s): Normal leallitas\n"
- ita "%s (%s): Shutdown normale\n"
- jpn "%s (%s): 通常シャットダウン\n"
- kor "%s (%s): 정상적인 shutdown\n"
- nor "%s (%s): Normal avslutning\n"
- norwegian-ny "%s (%s): Normal nedkopling\n"
- pol "%s (%s): Standardowe zakończenie działania\n"
- por "%s (%s): 'Shutdown' normal\n"
- rum "%s (%s): Terminare normala\n"
- rus "%s (%s): Корректная остановка\n"
- serbian "%s (%s): Normalno gašenje\n"
- slo "%s (%s): normálne ukončenie\n"
- spa "%s (%s): Apagado normal\n"
- swe "%s (%s): Normal avslutning\n"
- ukr "%s (%s): Нормальне завершення\n"
+ cze "%s (%s): normální ukončení"
+ dan "%s (%s): Normal nedlukning"
+ nla "%s (%s): Normaal afgesloten "
+ eng "%s (%s): Normal shutdown"
+ est "%s (%s): MariaDB lõpetas"
+ fre "%s (%s): Arrêt normal du serveur"
+ ger "%s (%s): Normal heruntergefahren"
+ greek "%s (%s): Φυσιολογική διαδικασία shutdown"
+ hindi "%s (%s): सामान्य शटडाउन"
+ hun "%s (%s): Normal leallitas"
+ ita "%s (%s): Shutdown normale"
+ jpn "%s (%s): 通常シャットダウン"
+ kor "%s (%s): 정상적인 shutdown"
+ nor "%s (%s): Normal avslutning"
+ norwegian-ny "%s (%s): Normal nedkopling"
+ pol "%s (%s): Standardowe zakończenie działania"
+ por "%s (%s): 'Shutdown' normal"
+ rum "%s (%s): Terminare normala"
+ rus "%s (%s): Корректная остановка"
+ serbian "%s (%s): Normalno gašenje"
+ slo "%s (%s): normálne ukončenie"
+ spa "%s (%s): Apagado normal"
+ swe "%s (%s): Normal avslutning"
+ ukr "%s (%s): Нормальне завершення"
ER_GOT_SIGNAL
cze "%s: přijat signal %d, končím\n"
dan "%s: Fangede signal %d. Afslutter!!\n"
diff --git a/sql/signal_handler.cc b/sql/signal_handler.cc
index 6e21d6249ab..68e801d5885 100644
--- a/sql/signal_handler.cc
+++ b/sql/signal_handler.cc
@@ -298,7 +298,9 @@ extern "C" sig_handler handle_fatal_signal(int sig)
#ifdef HAVE_WRITE_CORE
if (test_flags & TEST_CORE_ON_SIGNAL)
{
- my_safe_printf_stderr("%s", "Writing a core file\n");
+ char buff[80];
+ my_getwd(buff, sizeof(buff), 0);
+ my_safe_printf_stderr("Writing a core file at %s\n", buff);
fflush(stderr);
my_write_core(sig);
}
diff --git a/sql/slave.h b/sql/slave.h
index 431e6847abe..aec5f0a1d02 100644
--- a/sql/slave.h
+++ b/sql/slave.h
@@ -51,7 +51,7 @@
#define SLAVE_NET_TIMEOUT 60
-#define MAX_SLAVE_ERROR 2000
+#define MAX_SLAVE_ERROR ER_ERROR_LAST+1
#define MAX_REPLICATION_THREAD 64
diff --git a/sql/sp.cc b/sql/sp.cc
index a275781f74a..12d303ed85a 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -1317,8 +1317,8 @@ log:
{
thd->clear_error();
- String log_query;
- log_query.set_charset(system_charset_info);
+ StringBuffer<128> log_query(thd->variables.character_set_client);
+ DBUG_ASSERT(log_query.charset()->mbminlen == 1);
if (show_create_sp(thd, &log_query,
sp->m_explicit_name ? sp->m_db : null_clex_str,
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 9935b5a8bd6..e3bc6a2f0c1 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -2865,37 +2865,42 @@ static void acl_insert_user(const char *user, const char *host,
}
-static void acl_update_db(const char *user, const char *host, const char *db,
+static bool acl_update_db(const char *user, const char *host, const char *db,
ulong privileges)
{
mysql_mutex_assert_owner(&acl_cache->lock);
+ bool updated= false;
+
for (uint i=0 ; i < acl_dbs.elements ; i++)
{
ACL_DB *acl_db=dynamic_element(&acl_dbs,i,ACL_DB*);
if ((!acl_db->user && !user[0]) ||
- (acl_db->user &&
- !strcmp(user,acl_db->user)))
+ (acl_db->user &&
+ !strcmp(user,acl_db->user)))
{
if ((!acl_db->host.hostname && !host[0]) ||
- (acl_db->host.hostname &&
- !strcmp(host, acl_db->host.hostname)))
+ (acl_db->host.hostname &&
+ !strcmp(host, acl_db->host.hostname)))
{
- if ((!acl_db->db && !db[0]) ||
- (acl_db->db && !strcmp(db,acl_db->db)))
+ if ((!acl_db->db && !db[0]) ||
+ (acl_db->db && !strcmp(db,acl_db->db)))
- {
- if (privileges)
+ {
+ if (privileges)
{
acl_db->access= privileges;
acl_db->initial_access= acl_db->access;
}
- else
- delete_dynamic_element(&acl_dbs,i);
- }
+ else
+ delete_dynamic_element(&acl_dbs,i);
+ updated= true;
+ }
}
}
}
+
+ return updated;
}
@@ -4387,9 +4392,21 @@ static int replace_db_table(TABLE *table, const char *db,
acl_cache->clear(1); // Clear privilege cache
if (old_row_exists)
acl_update_db(combo.user.str,combo.host.str,db,rights);
- else
- if (rights)
- acl_insert_db(combo.user.str,combo.host.str,db,rights);
+ else if (rights)
+ {
+ /*
+ If we did not have an already existing row, for users, we must always
+ insert an ACL_DB entry. For roles however, it is possible that one was
+ already created when DB privileges were propagated from other granted
+ roles onto the current role. For this case, first try to update the
+ existing entry, otherwise insert a new one.
+ */
+ if (!combo.is_role() ||
+ !acl_update_db(combo.user.str, combo.host.str, db, rights))
+ {
+ acl_insert_db(combo.user.str,combo.host.str,db,rights);
+ }
+ }
DBUG_RETURN(0);
/* This could only happen if the grant tables got corrupted */
@@ -6312,9 +6329,12 @@ static int merge_role_privileges(ACL_ROLE *role __attribute__((unused)),
{
PRIVS_TO_MERGE *data= (PRIVS_TO_MERGE *)context;
+ DBUG_ASSERT(grantee->counter > 0);
if (--grantee->counter)
return 1; // don't recurse into grantee just yet
+ grantee->counter= 1; // Mark the grantee as merged.
+
/* if we'll do db/table/routine privileges, create a hash of role names */
role_hash_t role_hash(role_key);
if (data->what != PRIVS_TO_MERGE::GLOBAL)
@@ -7407,11 +7427,10 @@ end_index_init:
DBUG_RETURN(return_val);
}
-
-static my_bool role_propagate_grants_action(void *ptr,
- void *unused __attribute__((unused)))
+static my_bool propagate_role_grants_action(void *role_ptr,
+ void *ptr __attribute__((unused)))
{
- ACL_ROLE *role= (ACL_ROLE *)ptr;
+ ACL_ROLE *role= static_cast<ACL_ROLE *>(role_ptr);
if (role->counter)
return 0;
@@ -7487,7 +7506,7 @@ bool grant_reload(THD *thd)
}
mysql_mutex_lock(&acl_cache->lock);
- my_hash_iterate(&acl_roles, role_propagate_grants_action, NULL);
+ my_hash_iterate(&acl_roles, propagate_role_grants_action, NULL);
mysql_mutex_unlock(&acl_cache->lock);
mysql_rwlock_unlock(&LOCK_grant);
@@ -10221,13 +10240,13 @@ bool mysql_create_user(THD *thd, List <LEX_USER> &list, bool handle_as_role)
}
}
- binlog= true;
if (replace_user_table(thd, tables.user_table(), *user_name, 0, 0, 1, 0))
{
append_user(thd, &wrong_users, user_name);
result= TRUE;
continue;
}
+ binlog= true;
// every created role is automatically granted to its creator-admin
if (handle_as_role)
diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc
index 0e5d8d43c0c..0b6a79bae89 100644
--- a/sql/sql_connect.cc
+++ b/sql/sql_connect.cc
@@ -314,13 +314,9 @@ extern "C" void free_user(struct user_conn *uc)
void init_max_user_conn(void)
{
#ifndef NO_EMBEDDED_ACCESS_CHECKS
- if (my_hash_init(&hash_user_connections,system_charset_info,max_connections,
- 0,0, (my_hash_get_key) get_key_conn,
- (my_hash_free_key) free_user, 0))
- {
- sql_print_error("Initializing hash_user_connections failed.");
- exit(1);
- }
+ my_hash_init(&hash_user_connections, system_charset_info, max_connections,
+ 0, 0, (my_hash_get_key) get_key_conn,
+ (my_hash_free_key) free_user, 0);
#endif
}
@@ -479,24 +475,16 @@ void init_user_stats(USER_STATS *user_stats,
void init_global_user_stats(void)
{
- if (my_hash_init(&global_user_stats, system_charset_info, max_connections,
- 0, 0, (my_hash_get_key) get_key_user_stats,
- (my_hash_free_key)free_user_stats, 0))
- {
- sql_print_error("Initializing global_user_stats failed.");
- exit(1);
- }
+ my_hash_init(&global_user_stats, system_charset_info, max_connections,
+ 0, 0, (my_hash_get_key) get_key_user_stats,
+ (my_hash_free_key) free_user_stats, 0);
}
void init_global_client_stats(void)
{
- if (my_hash_init(&global_client_stats, system_charset_info, max_connections,
- 0, 0, (my_hash_get_key) get_key_user_stats,
- (my_hash_free_key)free_user_stats, 0))
- {
- sql_print_error("Initializing global_client_stats failed.");
- exit(1);
- }
+ my_hash_init(&global_client_stats, system_charset_info, max_connections,
+ 0, 0, (my_hash_get_key) get_key_user_stats,
+ (my_hash_free_key) free_user_stats, 0);
}
extern "C" uchar *get_key_table_stats(TABLE_STATS *table_stats, size_t *length,
@@ -513,12 +501,9 @@ extern "C" void free_table_stats(TABLE_STATS* table_stats)
void init_global_table_stats(void)
{
- if (my_hash_init(&global_table_stats, system_charset_info, max_connections,
- 0, 0, (my_hash_get_key) get_key_table_stats,
- (my_hash_free_key)free_table_stats, 0)) {
- sql_print_error("Initializing global_table_stats failed.");
- exit(1);
- }
+ my_hash_init(&global_table_stats, system_charset_info, max_connections,
+ 0, 0, (my_hash_get_key) get_key_table_stats,
+ (my_hash_free_key) free_table_stats, 0);
}
extern "C" uchar *get_key_index_stats(INDEX_STATS *index_stats, size_t *length,
@@ -535,13 +520,9 @@ extern "C" void free_index_stats(INDEX_STATS* index_stats)
void init_global_index_stats(void)
{
- if (my_hash_init(&global_index_stats, system_charset_info, max_connections,
- 0, 0, (my_hash_get_key) get_key_index_stats,
- (my_hash_free_key)free_index_stats, 0))
- {
- sql_print_error("Initializing global_index_stats failed.");
- exit(1);
- }
+ my_hash_init(&global_index_stats, system_charset_info, max_connections,
+ 0, 0, (my_hash_get_key) get_key_index_stats,
+ (my_hash_free_key) free_index_stats, 0);
}
diff --git a/sql/sql_cte.cc b/sql/sql_cte.cc
index a4a2ff16c62..44fab462c08 100644
--- a/sql/sql_cte.cc
+++ b/sql/sql_cte.cc
@@ -664,7 +664,7 @@ void With_element::move_anchors_ahead()
{
st_select_lex *next_sl;
st_select_lex *new_pos= spec->first_select();
- st_select_lex *last_sl;
+ st_select_lex *UNINIT_VAR(last_sl);
new_pos->linkage= UNION_TYPE;
for (st_select_lex *sl= new_pos; sl; sl= next_sl)
{
diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc
index ca276eb87ac..c82948e52ae 100644
--- a/sql/sql_explain.cc
+++ b/sql/sql_explain.cc
@@ -974,6 +974,7 @@ Explain_aggr_filesort::Explain_aggr_filesort(MEM_ROOT *mem_root,
for (ORDER *ord= filesort->order; ord; ord= ord->next)
{
sort_items.push_back(ord->item[0], mem_root);
+ sort_directions.push_back(&ord->direction, mem_root);
}
filesort->tracker= &tracker;
}
@@ -987,10 +988,13 @@ void Explain_aggr_filesort::print_json_members(Json_writer *writer,
str.length(0);
List_iterator_fast<Item> it(sort_items);
- Item *item;
+ List_iterator_fast<ORDER::enum_order> it_dir(sort_directions);
+ Item* item;
+ ORDER::enum_order *direction;
bool first= true;
while ((item= it++))
{
+ direction= it_dir++;
if (first)
first= false;
else
@@ -998,6 +1002,8 @@ void Explain_aggr_filesort::print_json_members(Json_writer *writer,
str.append(", ");
}
append_item_to_str(&str, item);
+ if (*direction == ORDER::ORDER_DESC)
+ str.append(" desc");
}
writer->add_member("sort_key").add_str(str.c_ptr_safe());
diff --git a/sql/sql_explain.h b/sql/sql_explain.h
index 154769fe289..fb524263a8e 100644
--- a/sql/sql_explain.h
+++ b/sql/sql_explain.h
@@ -286,6 +286,7 @@ public:
class Explain_aggr_filesort : public Explain_aggr_node
{
List<Item> sort_items;
+ List<ORDER::enum_order> sort_directions;
public:
enum_explain_aggr_node_type get_type() { return AGGR_OP_FILESORT; }
Filesort_tracker tracker;
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 96d61655e90..aed04842c90 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -2408,10 +2408,8 @@ void st_select_lex_node::move_as_slave(st_select_lex_node *new_master)
prev= &curr->next;
}
else
- {
prev= &new_master->slave;
- new_master->slave= this;
- }
+ *prev= this;
next= 0;
master= new_master;
}
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index b8029cf6e7e..f951a81331b 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -4602,16 +4602,11 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
my_error(ER_PARTITION_WRONG_VALUES_ERROR, MYF(0),
"LIST", "IN");
}
- else if (tab_part_info->part_type == RANGE_PARTITION)
- {
- my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
- "RANGE", "LESS THAN");
- }
else
{
- DBUG_ASSERT(tab_part_info->part_type == LIST_PARTITION);
- my_error(ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
- "LIST", "IN");
+ DBUG_ASSERT(tab_part_info->part_type == RANGE_PARTITION ||
+ tab_part_info->part_type == LIST_PARTITION);
+ (void) tab_part_info->error_if_requires_values();
}
goto err;
}
@@ -8076,8 +8071,11 @@ int create_partition_name(char *out, size_t outlen, const char *in1,
end= strxnmov(out, outlen-1, in1, "#P#", transl_part, NullS);
else if (name_variant == TEMP_PART_NAME)
end= strxnmov(out, outlen-1, in1, "#P#", transl_part, "#TMP#", NullS);
- else if (name_variant == RENAMED_PART_NAME)
+ else
+ {
+ DBUG_ASSERT(name_variant == RENAMED_PART_NAME);
end= strxnmov(out, outlen-1, in1, "#P#", transl_part, "#REN#", NullS);
+ }
if (end - out == static_cast<ptrdiff_t>(outlen-1))
{
my_error(ER_PATH_LENGTH, MYF(0), longest_str(in1, transl_part));
@@ -8117,9 +8115,12 @@ int create_subpartition_name(char *out, size_t outlen,
else if (name_variant == TEMP_PART_NAME)
end= strxnmov(out, outlen-1, in1, "#P#", transl_part_name,
"#SP#", transl_subpart_name, "#TMP#", NullS);
- else if (name_variant == RENAMED_PART_NAME)
+ else
+ {
+ DBUG_ASSERT(name_variant == RENAMED_PART_NAME);
end= strxnmov(out, outlen-1, in1, "#P#", transl_part_name,
"#SP#", transl_subpart_name, "#REN#", NullS);
+ }
if (end - out == static_cast<ptrdiff_t>(outlen-1))
{
my_error(ER_PATH_LENGTH, MYF(0),
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 5932fe136d7..a2eb2d8d2de 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -4732,7 +4732,19 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor)
if (error == 0 && this->lex->sql_command == SQLCOM_CALL)
{
if (is_sql_prepare())
+ {
+ /*
+ Here we have the diagnostics area status already set to DA_OK.
+ sent_out_parameters() can raise errors when assigning OUT parameters:
+ DECLARE a DATETIME;
+ EXECUTE IMMEDIATE 'CALL p1(?)' USING a;
+ when the procedure p1 assigns a DATETIME-incompatible value (e.g. 10)
+ to the out parameter. Allow to overwrite status (to DA_ERROR).
+ */
+ thd->get_stmt_da()->set_overwrite_status(true);
thd->protocol_text.send_out_parameters(&this->lex->param_list);
+ thd->get_stmt_da()->set_overwrite_status(false);
+ }
else
thd->protocol->send_out_parameters(&this->lex->param_list);
}
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index f894f37caf4..ea96e971608 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -862,6 +862,22 @@ JOIN::prepare(TABLE_LIST *tables_init,
}
}
+ /*
+ After setting up window functions, we may have discovered additional
+ used tables from the PARTITION BY and ORDER BY list. Update all items
+ that contain window functions.
+ */
+ if (select_lex->have_window_funcs())
+ {
+ List_iterator_fast<Item> it(select_lex->item_list);
+ Item *item;
+ while ((item= it++))
+ {
+ if (item->with_window_func)
+ item->update_used_tables();
+ }
+ }
+
With_clause *with_clause=select_lex->get_with_clause();
if (with_clause && with_clause->prepare_unreferenced_elements(thd))
DBUG_RETURN(1);
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index c4c520490df..9abf1db76f4 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -4990,7 +4990,7 @@ err:
/* Write log if no error or if we already deleted a table */
if (!result || thd->log_current_statement)
{
- if (result && create_info->table_was_deleted)
+ if (result && create_info->table_was_deleted && pos_in_locked_tables)
{
/*
Possible locked table was dropped. We should remove meta data locks
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index 775105af88b..c5a2555178d 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -643,7 +643,8 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
if (!res && mysql_bin_log.is_open())
{
- String buff;
+ StringBuffer<128> buff(thd->variables.character_set_client);
+ DBUG_ASSERT(buff.charset()->mbminlen == 1);
const LEX_STRING command[3]=
{{ C_STRING_WITH_LEN("CREATE ") },
{ C_STRING_WITH_LEN("ALTER ") },
diff --git a/sql/sql_window.cc b/sql/sql_window.cc
index 3869628dfea..3e48fe6d267 100644
--- a/sql/sql_window.cc
+++ b/sql/sql_window.cc
@@ -298,13 +298,6 @@ setup_windows(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables,
}
}
- List_iterator_fast<Item_window_func> li(win_funcs);
- Item_window_func *win_func_item;
- while ((win_func_item= li++))
- {
- win_func_item->update_used_tables();
- }
-
DBUG_RETURN(0);
}
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 05f0998683e..a695d6afd60 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -5313,12 +5313,8 @@ opt_part_values:
partition_info *part_info= lex->part_info;
if (! lex->is_partition_management())
{
- if (part_info->part_type == RANGE_PARTITION)
- my_yyabort_error((ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
- "RANGE", "LESS THAN"));
- if (part_info->part_type == LIST_PARTITION)
- my_yyabort_error((ER_PARTITION_REQUIRES_VALUES_ERROR, MYF(0),
- "LIST", "IN"));
+ if (part_info->error_if_requires_values())
+ MYSQL_YYABORT;
}
else
part_info->part_type= HASH_PARTITION;
diff --git a/sql/table.cc b/sql/table.cc
index 5173cc7250e..c65e29a44d1 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -1000,7 +1000,7 @@ bool parse_vcol_defs(THD *thd, MEM_ROOT *mem_root, TABLE *table,
Virtual_column_info **check_constraint_ptr= table->check_constraints;
sql_mode_t saved_mode= thd->variables.sql_mode;
Query_arena backup_arena;
- Virtual_column_info *vcol;
+ Virtual_column_info *vcol= 0;
StringBuffer<MAX_FIELD_WIDTH> expr_str;
bool res= 1;
DBUG_ENTER("parse_vcol_defs");
@@ -1171,7 +1171,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
uint new_frm_ver, field_pack_length, new_field_pack_flag;
uint interval_count, interval_parts, read_length, int_length;
uint db_create_options, keys, key_parts, n_length;
- uint com_length, null_bit_pos, mysql57_vcol_null_bit_pos, bitmap_count;
+ uint com_length, null_bit_pos, UNINIT_VAR(mysql57_vcol_null_bit_pos), bitmap_count;
uint i;
bool use_hash, mysql57_null_bits= 0;
char *keynames, *names, *comment_pos;
@@ -2121,6 +2121,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
}
}
+ key_first_info= keyinfo;
for (uint key=0 ; key < keys ; key++,keyinfo++)
{
uint usable_parts= 0;
@@ -2138,9 +2139,6 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
keyinfo->name_length+1);
}
- if (!key)
- key_first_info= keyinfo;
-
if (ext_key_parts > share->key_parts && key)
{
KEY_PART_INFO *new_key_part= (keyinfo-1)->key_part +
@@ -6784,6 +6782,14 @@ void TABLE::create_key_part_by_field(KEY_PART_INFO *key_part_info,
might be reused.
*/
key_part_info->store_length= key_part_info->length;
+ /*
+ For BIT fields null_bit is not set to 0 even if the field is defined
+ as NOT NULL, look at Field_bit::Field_bit
+ */
+ if (!field->real_maybe_null())
+ {
+ key_part_info->null_bit= 0;
+ }
/*
The total store length of the key part is the raw length of the field +
diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc
index 00f1b777f17..6c5eaf4a60c 100644
--- a/sql/wsrep_mysqld.cc
+++ b/sql/wsrep_mysqld.cc
@@ -1252,6 +1252,16 @@ int wsrep_to_buf_helper(
if (!ret && writer.write(&gtid_ev)) ret= 1;
}
#endif /* GTID_SUPPORT */
+ if (wsrep_gtid_mode && thd->variables.gtid_seq_no)
+ {
+ Gtid_log_event gtid_event(thd, thd->variables.gtid_seq_no,
+ thd->variables.gtid_domain_id,
+ true, LOG_EVENT_SUPPRESS_USE_F,
+ true, 0);
+ gtid_event.server_id= thd->variables.server_id;
+ if (!gtid_event.is_valid()) ret= 0;
+ ret= writer.write(&gtid_event);
+ }
/* if there is prepare query, add event for it */
if (!ret && thd->wsrep_TOI_pre_query)