summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/debug_sync.cc2
-rw-r--r--sql/field.cc23
-rw-r--r--sql/field.h8
-rw-r--r--sql/filesort.cc36
-rw-r--r--sql/ha_partition.h13
-rw-r--r--sql/ha_sequence.cc2
-rw-r--r--sql/handler.cc62
-rw-r--r--sql/item.cc29
-rw-r--r--sql/item.h27
-rw-r--r--sql/item_cmpfunc.cc26
-rw-r--r--sql/item_cmpfunc.h6
-rw-r--r--sql/item_jsonfunc.h11
-rw-r--r--sql/item_strfunc.cc2
-rw-r--r--sql/item_subselect.cc16
-rw-r--r--sql/item_sum.cc3
-rw-r--r--sql/key.cc3
-rw-r--r--sql/log.cc5
-rw-r--r--sql/log_event.h4
-rw-r--r--sql/log_event_server.cc200
-rw-r--r--sql/mysqld.cc30
-rw-r--r--sql/mysqld.h4
-rw-r--r--sql/opt_range.cc49
-rw-r--r--sql/opt_split.cc216
-rw-r--r--sql/opt_subselect.cc19
-rw-r--r--sql/opt_subselect.h1
-rw-r--r--sql/rpl_parallel.cc53
-rw-r--r--sql/rpl_parallel.h3
-rw-r--r--sql/rpl_rli.cc5
-rw-r--r--sql/semisync_master.cc6
-rw-r--r--sql/signal_handler.cc29
-rw-r--r--sql/slave.cc35
-rw-r--r--sql/slave.h3
-rw-r--r--sql/sql_acl.cc9
-rw-r--r--sql/sql_analyze_stmt.h74
-rw-r--r--sql/sql_base.cc33
-rw-r--r--sql/sql_class.h4
-rw-r--r--sql/sql_delete.cc5
-rw-r--r--sql/sql_derived.cc21
-rw-r--r--sql/sql_explain.cc42
-rw-r--r--sql/sql_explain.h17
-rw-r--r--sql/sql_insert.cc5
-rw-r--r--sql/sql_join_cache.cc6
-rw-r--r--sql/sql_lex.cc141
-rw-r--r--sql/sql_lex.h58
-rw-r--r--sql/sql_locale.cc21
-rw-r--r--sql/sql_parse.cc26
-rw-r--r--sql/sql_priv.h65
-rw-r--r--sql/sql_repl.cc2
-rw-r--r--sql/sql_schema.cc61
-rw-r--r--sql/sql_schema.h11
-rw-r--r--sql/sql_select.cc1069
-rw-r--r--sql/sql_select.h32
-rw-r--r--sql/sql_sequence.cc17
-rw-r--r--sql/sql_show.cc4
-rw-r--r--sql/sql_sort.h1
-rw-r--r--sql/sql_statistics.cc50
-rw-r--r--sql/sql_string.cc2
-rw-r--r--sql/sql_string.h77
-rw-r--r--sql/sql_table.cc13
-rw-r--r--sql/sql_tvc.cc1
-rw-r--r--sql/sql_type.cc6
-rw-r--r--sql/sql_type.h32
-rw-r--r--sql/sql_type_fixedbin.h4
-rw-r--r--sql/sql_update.cc8
-rw-r--r--sql/sql_view.cc11
-rw-r--r--sql/sql_yacc.yy115
-rw-r--r--sql/structs.h24
-rw-r--r--sql/sys_vars.cc30
-rw-r--r--sql/table.cc107
-rw-r--r--sql/table.h21
-rw-r--r--sql/tztime.cc4
-rw-r--r--sql/wsrep_client_service.cc2
-rw-r--r--sql/wsrep_high_priority_service.cc15
-rw-r--r--sql/wsrep_schema.cc5
-rw-r--r--sql/wsrep_thd.h9
-rw-r--r--sql/wsrep_trans_observer.h15
76 files changed, 2273 insertions, 933 deletions
diff --git a/sql/debug_sync.cc b/sql/debug_sync.cc
index eac111d32d7..f363816fe49 100644
--- a/sql/debug_sync.cc
+++ b/sql/debug_sync.cc
@@ -1451,7 +1451,7 @@ uchar *debug_sync_value_ptr(THD *thd)
if (opt_debug_sync_timeout)
{
- static char on[]= "ON - current signals: '";
+ static const char on[]= "ON - current signals: '";
// Ensure exclusive access to debug_sync_global.ds_signal
mysql_mutex_lock(&debug_sync_global.ds_mutex);
diff --git a/sql/field.cc b/sql/field.cc
index 5a618a5a2a9..2418d692c72 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -2557,6 +2557,7 @@ Field *Field::make_new_field(MEM_ROOT *root, TABLE *new_table,
tmp->key_start.init(0);
tmp->part_of_key.init(0);
tmp->part_of_sortkey.init(0);
+ tmp->read_stats= NULL;
/*
TODO: it is not clear why this method needs to reset unireg_check.
Try not to reset it, or explain why it needs to be reset.
@@ -7622,7 +7623,8 @@ int Field_string::cmp(const uchar *a_ptr, const uchar *b_ptr) const
return field_charset()->coll->strnncollsp_nchars(field_charset(),
a_ptr, field_length,
b_ptr, field_length,
- Field_string::char_length());
+ Field_string::char_length(),
+ MY_STRNNCOLLSP_NCHARS_EMULATE_TRIMMED_TRAILING_SPACES);
}
@@ -8002,10 +8004,11 @@ int Field_varstring::cmp(const uchar *a_ptr, const uchar *b_ptr) const
int Field_varstring::cmp_prefix(const uchar *a_ptr, const uchar *b_ptr,
- size_t prefix_len) const
+ size_t prefix_char_len) const
{
- /* avoid expensive well_formed_char_length if possible */
- if (prefix_len == table->field[field_index]->field_length)
+ /* avoid more expensive strnncollsp_nchars() if possible */
+ if (prefix_char_len * field_charset()->mbmaxlen ==
+ table->field[field_index]->field_length)
return Field_varstring::cmp(a_ptr, b_ptr);
size_t a_length, b_length;
@@ -8025,8 +8028,8 @@ int Field_varstring::cmp_prefix(const uchar *a_ptr, const uchar *b_ptr,
a_length,
b_ptr + length_bytes,
b_length,
- prefix_len /
- field_charset()->mbmaxlen);
+ prefix_char_len,
+ 0);
}
@@ -8813,7 +8816,7 @@ int Field_blob::cmp(const uchar *a_ptr, const uchar *b_ptr) const
int Field_blob::cmp_prefix(const uchar *a_ptr, const uchar *b_ptr,
- size_t prefix_len) const
+ size_t prefix_char_len) const
{
uchar *blob1,*blob2;
memcpy(&blob1, a_ptr+packlength, sizeof(char*));
@@ -8822,8 +8825,8 @@ int Field_blob::cmp_prefix(const uchar *a_ptr, const uchar *b_ptr,
return field_charset()->coll->strnncollsp_nchars(field_charset(),
blob1, a_len,
blob2, b_len,
- prefix_len /
- field_charset()->mbmaxlen);
+ prefix_char_len,
+ 0);
}
@@ -10008,7 +10011,7 @@ my_decimal *Field_bit::val_decimal(my_decimal *deciaml_value)
(not the table->record[0] necessarily)
*/
int Field_bit::cmp_prefix(const uchar *a, const uchar *b,
- size_t prefix_len) const
+ size_t prefix_char_len) const
{
my_ptrdiff_t a_diff= a - ptr;
my_ptrdiff_t b_diff= b - ptr;
diff --git a/sql/field.h b/sql/field.h
index d4b59a88f59..642456b9774 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -1301,7 +1301,7 @@ public:
Currently it's only used in partitioning.
*/
virtual int cmp_prefix(const uchar *a, const uchar *b,
- size_t prefix_len) const
+ size_t prefix_char_len) const
{ return cmp(a, b); }
virtual int cmp(const uchar *,const uchar *) const=0;
virtual int cmp_binary(const uchar *a,const uchar *b, uint32 max_length=~0U) const
@@ -4201,7 +4201,7 @@ public:
my_decimal *val_decimal(my_decimal *) override;
bool send(Protocol *protocol) override;
int cmp(const uchar *a,const uchar *b) const override;
- int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len) const
+ int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_char_len) const
override;
void sort_string(uchar *buff,uint length) override;
uint get_key_image(uchar *buff, uint length,
@@ -4494,7 +4494,7 @@ public:
String *val_str(String *, String *) override;
my_decimal *val_decimal(my_decimal *) override;
int cmp(const uchar *a, const uchar *b) const override;
- int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len) const
+ int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_char_len) const
override;
int cmp(const uchar *a, uint32 a_length, const uchar *b, uint32 b_length)
const;
@@ -4973,7 +4973,7 @@ public:
int cmp_binary_offset(uint row_offset) override
{ return cmp_offset(row_offset); }
int cmp_prefix(const uchar *a, const uchar *b,
- size_t max_length) const override;
+ size_t prefix_char_length) const override;
int key_cmp(const uchar *a, const uchar *b) const override
{ return cmp_binary((uchar *) a, (uchar *) b); }
int key_cmp(const uchar *str, uint length) const override;
diff --git a/sql/filesort.cc b/sql/filesort.cc
index 96eabfdab89..026c9d0d670 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -242,7 +242,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
MYSQL_FILESORT_START(table->s->db.str, table->s->table_name.str);
DEBUG_SYNC(thd, "filesort_start");
- if (!(sort= new SORT_INFO))
+ if (!(sort= new SORT_INFO)) // Note that this is not automatically freed!
return 0;
if (subselect && subselect->filesort_buffer.is_allocated())
@@ -490,6 +490,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
error= 0;
err:
+ param.tmp_buffer.free();
if (!subselect || !subselect->is_uncacheable())
{
if (!param.using_addon_fields())
@@ -1169,7 +1170,7 @@ void store_length(uchar *to, uint length, uint pack_length)
void
Type_handler_string_result::make_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const
+ String *tmp_buffer) const
{
CHARSET_INFO *cs= item->collation.collation;
bool maybe_null= item->maybe_null();
@@ -1177,7 +1178,7 @@ Type_handler_string_result::make_sort_key_part(uchar *to, Item *item,
if (maybe_null)
*to++= 1;
- String *res= item->str_result(&param->tmp_buffer);
+ Binary_string *res= item->str_result(tmp_buffer);
if (!res)
{
if (maybe_null)
@@ -1239,7 +1240,7 @@ Type_handler_string_result::make_sort_key_part(uchar *to, Item *item,
void
Type_handler_int_result::make_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const
+ String *tmp_buffer) const
{
longlong value= item->val_int_result();
make_sort_key_longlong(to, item->maybe_null(), item->null_value,
@@ -1250,7 +1251,7 @@ Type_handler_int_result::make_sort_key_part(uchar *to, Item *item,
void
Type_handler_temporal_result::make_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const
+ String *tmp_buffer) const
{
MYSQL_TIME buf;
// This is a temporal type. No nanoseconds. Rounding mode is not important.
@@ -1272,7 +1273,7 @@ Type_handler_temporal_result::make_sort_key_part(uchar *to, Item *item,
void
Type_handler_timestamp_common::make_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const
+ String *tmp_buffer) const
{
THD *thd= current_thd;
uint binlen= my_timestamp_binary_length(item->decimals);
@@ -1365,7 +1366,7 @@ Type_handler::make_packed_sort_key_longlong(uchar *to, bool maybe_null,
void
Type_handler_decimal_result::make_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const
+ String *tmp_buffer) const
{
my_decimal dec_buf, *dec_val= item->val_decimal_result(&dec_buf);
if (item->maybe_null())
@@ -1385,7 +1386,7 @@ Type_handler_decimal_result::make_sort_key_part(uchar *to, Item *item,
void
Type_handler_real_result::make_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const
+ String *tmp_buffer) const
{
double value= item->val_result();
if (item->maybe_null())
@@ -2492,7 +2493,7 @@ void Sort_param::try_to_pack_sortkeys()
uint
Type_handler_string_result::make_packed_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const
+ String *tmp) const
{
CHARSET_INFO *cs= item->collation.collation;
bool maybe_null= item->maybe_null();
@@ -2500,7 +2501,7 @@ Type_handler_string_result::make_packed_sort_key_part(uchar *to, Item *item,
if (maybe_null)
*to++= 1;
- Binary_string *res= item->str_result(&param->tmp_buffer);
+ Binary_string *res= item->str_result(tmp);
if (!res)
{
if (maybe_null)
@@ -2531,7 +2532,7 @@ Type_handler_string_result::make_packed_sort_key_part(uchar *to, Item *item,
uint
Type_handler_int_result::make_packed_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const
+ String *tmp) const
{
longlong value= item->val_int_result();
return make_packed_sort_key_longlong(to, item->maybe_null(),
@@ -2543,7 +2544,7 @@ Type_handler_int_result::make_packed_sort_key_part(uchar *to, Item *item,
uint
Type_handler_decimal_result::make_packed_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const
+ String *tmp) const
{
my_decimal dec_buf, *dec_val= item->val_decimal_result(&dec_buf);
if (item->maybe_null())
@@ -2565,7 +2566,7 @@ Type_handler_decimal_result::make_packed_sort_key_part(uchar *to, Item *item,
uint
Type_handler_real_result::make_packed_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const
+ String *tmp) const
{
double value= item->val_result();
if (item->maybe_null())
@@ -2586,7 +2587,7 @@ Type_handler_real_result::make_packed_sort_key_part(uchar *to, Item *item,
uint
Type_handler_temporal_result::make_packed_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const
+ String *tmp) const
{
MYSQL_TIME buf;
// This is a temporal type. No nanoseconds. Rounding mode is not important.
@@ -2608,7 +2609,7 @@ Type_handler_temporal_result::make_packed_sort_key_part(uchar *to, Item *item,
uint
Type_handler_timestamp_common::make_packed_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const
+ String *tmp) const
{
THD *thd= current_thd;
uint binlen= my_timestamp_binary_length(item->decimals);
@@ -2952,7 +2953,8 @@ static uint make_sortkey(Sort_param *param, uchar *to)
{ // Item
sort_field->item->type_handler()->make_sort_key_part(to,
sort_field->item,
- sort_field, param);
+ sort_field,
+ &param->tmp_buffer);
if ((maybe_null= sort_field->item->maybe_null()))
to++;
}
@@ -3005,7 +3007,7 @@ static uint make_packed_sortkey(Sort_param *param, uchar *to)
Item *item= sort_field->item;
length= item->type_handler()->make_packed_sort_key_part(to, item,
sort_field,
- param);
+ &param->tmp_buffer);
if ((maybe_null= sort_field->item->maybe_null()))
to++;
}
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index 86d8cdb7cee..bb8cd2b2db1 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -1302,7 +1302,18 @@ public:
The following code is not safe if you are using different
storage engines or different index types per partition.
*/
- return m_file[0]->index_flags(inx, part, all_parts);
+ ulong part_flags= m_file[0]->index_flags(inx, part, all_parts);
+
+ /*
+ The underlying storage engine might support Rowid Filtering. But
+ ha_partition does not forward the needed SE API calls, so the feature
+ will not be used.
+
+ Note: It's the same with IndexConditionPushdown, except for its variant
+ of IndexConditionPushdown+BatchedKeyAccess (that one works). Because of
+ that, we do not clear HA_DO_INDEX_COND_PUSHDOWN here.
+ */
+ return part_flags & ~HA_DO_RANGE_FILTER_PUSHDOWN;
}
/**
diff --git a/sql/ha_sequence.cc b/sql/ha_sequence.cc
index b348e6e7025..bab0614706d 100644
--- a/sql/ha_sequence.cc
+++ b/sql/ha_sequence.cc
@@ -250,6 +250,8 @@ int ha_sequence::write_row(const uchar *buf)
on master and slaves
- Check that the new row is an accurate SEQUENCE object
*/
+ /* mark a full binlog image insert to force non-parallel slave */
+ thd->transaction->stmt.mark_trans_did_ddl();
if (table->s->tmp_table == NO_TMP_TABLE &&
thd->mdl_context.upgrade_shared_lock(table->mdl_ticket,
MDL_EXCLUSIVE,
diff --git a/sql/handler.cc b/sql/handler.cc
index 595e76c708b..a50cdeaf9dd 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -1863,7 +1863,19 @@ int ha_commit_trans(THD *thd, bool all)
ordering is normally done. Commit ordering must be done here.
*/
if (run_wsrep_hooks)
- error= wsrep_before_commit(thd, all);
+ {
+ // This commit involves more than one storage engine and requires
+ // two phases, but some engines don't support it.
+ // Issue a message to the client and roll back the transaction.
+ if (trans->no_2pc && rw_ha_count > 1)
+ {
+ my_message(ER_ERROR_DURING_COMMIT, "Transactional commit not supported "
+ "by involved engine(s)", MYF(0));
+ error= 1;
+ }
+ else
+ error= wsrep_before_commit(thd, all);
+ }
if (error)
{
ha_rollback_trans(thd, FALSE);
@@ -2201,8 +2213,11 @@ int ha_rollback_trans(THD *thd, bool all)
rollback without signalling following transactions. And in release
builds, we explicitly do the signalling before rolling back.
*/
- DBUG_ASSERT(!(thd->rgi_slave && thd->rgi_slave->did_mark_start_commit) ||
- thd->transaction->xid_state.is_explicit_XA());
+ DBUG_ASSERT(
+ !(thd->rgi_slave && thd->rgi_slave->did_mark_start_commit) ||
+ (thd->transaction->xid_state.is_explicit_XA() ||
+ (thd->rgi_slave->gtid_ev_flags2 & Gtid_log_event::FL_PREPARED_XA)));
+
if (thd->rgi_slave && thd->rgi_slave->did_mark_start_commit)
thd->rgi_slave->unmark_start_commit();
}
@@ -4823,7 +4838,7 @@ int handler::check_collation_compatibility()
{
ulong mysql_version= table->s->mysql_version;
- if (mysql_version < 50124)
+ if (mysql_version < Charset::latest_mariadb_version_with_collation_change())
{
KEY *key= table->key_info;
KEY *key_end= key + table->s->keys;
@@ -4837,18 +4852,7 @@ int handler::check_collation_compatibility()
continue;
Field *field= table->field[key_part->fieldnr - 1];
uint cs_number= field->charset()->number;
- if ((mysql_version < 50048 &&
- (cs_number == 11 || /* ascii_general_ci - bug #29499, bug #27562 */
- cs_number == 41 || /* latin7_general_ci - bug #29461 */
- cs_number == 42 || /* latin7_general_cs - bug #29461 */
- cs_number == 20 || /* latin7_estonian_cs - bug #29461 */
- cs_number == 21 || /* latin2_hungarian_ci - bug #29461 */
- cs_number == 22 || /* koi8u_general_ci - bug #29461 */
- cs_number == 23 || /* cp1251_ukrainian_ci - bug #29461 */
- cs_number == 26)) || /* cp1250_general_ci - bug #29461 */
- (mysql_version < 50124 &&
- (cs_number == 33 || /* utf8mb3_general_ci - bug #27877 */
- cs_number == 35))) /* ucs2_general_ci - bug #27877 */
+ if (Charset::collation_changed_order(mysql_version, cs_number))
return HA_ADMIN_NEEDS_UPGRADE;
}
}
@@ -7381,7 +7385,13 @@ static int wsrep_after_row(THD *thd)
thd->wsrep_affected_rows > wsrep_max_ws_rows &&
wsrep_thd_is_local(thd))
{
- trans_rollback_stmt(thd) || trans_rollback(thd);
+ /*
+ If we are inside stored function or trigger we should not commit or
+ rollback current statement transaction. See comment in ha_commit_trans()
+ call for more information.
+ */
+ if (!thd->in_sub_stmt)
+ trans_rollback_stmt(thd) || trans_rollback(thd);
my_message(ER_ERROR_DURING_COMMIT, "wsrep_max_ws_rows exceeded", MYF(0));
DBUG_RETURN(ER_ERROR_DURING_COMMIT);
}
@@ -8171,11 +8181,13 @@ static
int del_global_index_stats_for_table(THD *thd, uchar* cache_key, size_t cache_key_length)
{
int res = 0;
+ uint to_delete_counter= 0;
+ INDEX_STATS *index_stats_to_delete[MAX_INDEXES];
DBUG_ENTER("del_global_index_stats_for_table");
mysql_mutex_lock(&LOCK_global_index_stats);
- for (uint i= 0; i < global_index_stats.records;)
+ for (uint i= 0; i < global_index_stats.records; i++)
{
INDEX_STATS *index_stats =
(INDEX_STATS*) my_hash_element(&global_index_stats, i);
@@ -8185,19 +8197,13 @@ int del_global_index_stats_for_table(THD *thd, uchar* cache_key, size_t cache_ke
index_stats->index_name_length >= cache_key_length &&
!memcmp(index_stats->index, cache_key, cache_key_length))
{
- res= my_hash_delete(&global_index_stats, (uchar*)index_stats);
- /*
- In our HASH implementation on deletion one elements
- is moved into a place where a deleted element was,
- and the last element is moved into the empty space.
- Thus we need to re-examine the current element, but
- we don't have to restart the search from the beginning.
- */
+ index_stats_to_delete[to_delete_counter++]= index_stats;
}
- else
- i++;
}
+ for (uint i= 0; i < to_delete_counter; i++)
+ res= my_hash_delete(&global_index_stats, (uchar*)index_stats_to_delete[i]);
+
mysql_mutex_unlock(&LOCK_global_index_stats);
DBUG_RETURN(res);
}
diff --git a/sql/item.cc b/sql/item.cc
index 499d23aa0f2..c55348bcdf3 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -5770,7 +5770,8 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference)
max_arg_level for the function if it's needed.
*/
if (thd->lex->in_sum_func &&
- thd->lex == context->select_lex->parent_lex &&
+ last_checked_context->select_lex->parent_lex ==
+ context->select_lex->parent_lex &&
thd->lex->in_sum_func->nest_level >= select->nest_level)
{
Item::Type ref_type= (*reference)->type();
@@ -5796,7 +5797,8 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference)
(Item_ident*) (*reference) :
0), false);
if (thd->lex->in_sum_func &&
- thd->lex == context->select_lex->parent_lex &&
+ last_checked_context->select_lex->parent_lex ==
+ context->select_lex->parent_lex &&
thd->lex->in_sum_func->nest_level >= select->nest_level)
{
set_if_bigger(thd->lex->in_sum_func->max_arg_level,
@@ -6145,10 +6147,8 @@ bool Item_field::fix_fields(THD *thd, Item **reference)
goto mark_non_agg_field;
}
- if (!thd->lex->current_select->no_wrap_view_item &&
+ if (select && !thd->lex->current_select->no_wrap_view_item &&
thd->lex->in_sum_func &&
- select &&
- thd->lex == select->parent_lex &&
thd->lex->in_sum_func->nest_level ==
select->nest_level)
set_if_bigger(thd->lex->in_sum_func->max_arg_level,
@@ -8215,7 +8215,8 @@ bool Item_ref::fix_fields(THD *thd, Item **reference)
max_arg_level for the function if it's needed.
*/
if (thd->lex->in_sum_func &&
- thd->lex == context->select_lex->parent_lex &&
+ last_checked_context->select_lex->parent_lex ==
+ context->select_lex->parent_lex &&
thd->lex->in_sum_func->nest_level >=
last_checked_context->select_lex->nest_level)
set_if_bigger(thd->lex->in_sum_func->max_arg_level,
@@ -8239,7 +8240,8 @@ bool Item_ref::fix_fields(THD *thd, Item **reference)
max_arg_level for the function if it's needed.
*/
if (thd->lex->in_sum_func &&
- thd->lex == context->select_lex->parent_lex &&
+ last_checked_context->select_lex->parent_lex ==
+ context->select_lex->parent_lex &&
thd->lex->in_sum_func->nest_level >=
last_checked_context->select_lex->nest_level)
set_if_bigger(thd->lex->in_sum_func->max_arg_level,
@@ -8254,7 +8256,8 @@ bool Item_ref::fix_fields(THD *thd, Item **reference)
1. outer reference (will be fixed later by the fix_inner_refs function);
2. an unnamed reference inside an aggregate function.
*/
- if (!((*ref)->type() == REF_ITEM &&
+ if (!set_properties_only &&
+ !((*ref)->type() == REF_ITEM &&
((Item_ref *)(*ref))->ref_type() == OUTER_REF) &&
(((*ref)->with_sum_func() && name.str &&
!(current_sel->get_linkage() != GLOBAL_OPTIONS_TYPE &&
@@ -10591,7 +10594,8 @@ int Item_cache_str::save_in_field(Field *field, bool no_conversions)
bool Item_cache_row::allocate(THD *thd, uint num)
{
item_count= num;
- return (!(values=
+ return (!values &&
+ !(values=
(Item_cache **) thd->calloc(sizeof(Item_cache *)*item_count)));
}
@@ -10627,11 +10631,12 @@ bool Item_cache_row::setup(THD *thd, Item *item)
return 1;
for (uint i= 0; i < item_count; i++)
{
- Item_cache *tmp;
Item *el= item->element_index(i);
- if (!(tmp= values[i]= el->get_cache(thd)))
+
+ if ((!values[i]) && !(values[i]= el->get_cache(thd)))
return 1;
- tmp->setup(thd, el);
+
+ values[i]->setup(thd, el);
}
return 0;
}
diff --git a/sql/item.h b/sql/item.h
index 5b8b858c13c..ae503636116 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -7107,6 +7107,9 @@ public:
}
virtual void keep_array() {}
+#ifndef DBUG_OFF
+ bool is_array_kept() { return TRUE; }
+#endif
void print(String *str, enum_query_type query_type) override;
bool eq_def(const Field *field)
{
@@ -7595,13 +7598,14 @@ public:
bool null_inside() override;
void bring_value() override;
void keep_array() override { save_array= 1; }
+#ifndef DBUG_OFF
+ bool is_array_kept() { return save_array; }
+#endif
void cleanup() override
{
DBUG_ENTER("Item_cache_row::cleanup");
Item_cache::cleanup();
- if (save_array)
- bzero(values, item_count*sizeof(Item**));
- else
+ if (!save_array)
values= 0;
DBUG_VOID_RETURN;
}
@@ -7832,7 +7836,7 @@ public:
Item *get_tmp_table_item(THD *thd)
{ return m_item->get_tmp_table_item(thd); }
Item *get_copy(THD *thd)
- { return m_item->get_copy(thd); }
+ { return get_item_copy<Item_direct_ref_to_item>(thd, this); }
COND *build_equal_items(THD *thd, COND_EQUAL *inherited,
bool link_item_fields,
COND_EQUAL **cond_equal_ref)
@@ -7900,7 +7904,20 @@ public:
bool excl_dep_on_grouping_fields(st_select_lex *sel)
{ return m_item->excl_dep_on_grouping_fields(sel); }
bool is_expensive() { return m_item->is_expensive(); }
- Item* build_clone(THD *thd) { return get_copy(thd); }
+ void set_item(Item *item) { m_item= item; }
+ Item *build_clone(THD *thd)
+ {
+ Item *clone_item= m_item->build_clone(thd);
+ if (clone_item)
+ {
+ Item_direct_ref_to_item *copy= (Item_direct_ref_to_item *) get_copy(thd);
+ if (!copy)
+ return 0;
+ copy->set_item(clone_item);
+ return copy;
+ }
+ return 0;
+ }
void split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array,
List<Item> &fields, uint flags)
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index b5f747219a5..8577bc4fb6b 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -1292,9 +1292,22 @@ bool Item_in_optimizer::fix_left(THD *thd)
ref0= args[1]->get_IN_subquery()->left_exp_ptr();
args[0]= (*ref0);
}
- if ((*ref0)->fix_fields_if_needed(thd, ref0) ||
- (!cache && !(cache= (*ref0)->get_cache(thd))))
+ if ((*ref0)->fix_fields_if_needed(thd, ref0))
DBUG_RETURN(1);
+ if (!cache)
+ {
+ Query_arena *arena, backup;
+ arena= thd->activate_stmt_arena_if_needed(&backup);
+
+ bool rc= !(cache= (*ref0)->get_cache(thd));
+
+ if (arena)
+ thd->restore_active_arena(arena, &backup);
+
+ if (rc)
+ DBUG_RETURN(1);
+ cache->keep_array();
+ }
/*
During fix_field() expression could be substituted.
So we copy changes before use
@@ -1654,19 +1667,10 @@ longlong Item_in_optimizer::val_int()
}
-void Item_in_optimizer::keep_top_level_cache()
-{
- cache->keep_array();
- save_cache= 1;
-}
-
-
void Item_in_optimizer::cleanup()
{
DBUG_ENTER("Item_in_optimizer::cleanup");
Item_bool_func::cleanup();
- if (!save_cache)
- cache= 0;
expr_cache= 0;
DBUG_VOID_RETURN;
}
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index d34bad95a57..556374eff6a 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -375,8 +375,7 @@ class Item_in_optimizer: public Item_bool_func
protected:
Item_cache *cache;
Item *expr_cache;
- bool save_cache;
- /*
+ /*
Stores the value of "NULL IN (SELECT ...)" for uncorrelated subqueries:
UNKNOWN - "NULL in (SELECT ...)" has not yet been evaluated
FALSE - result is FALSE
@@ -386,7 +385,7 @@ protected:
public:
Item_in_optimizer(THD *thd, Item *a, Item *b):
Item_bool_func(thd, a, b), cache(0), expr_cache(0),
- save_cache(0), result_for_null_param(UNKNOWN)
+ result_for_null_param(UNKNOWN)
{
with_flags|= item_with_t::SUBQUERY;
}
@@ -403,7 +402,6 @@ public:
return name;
}
Item_cache **get_cache() { return &cache; }
- void keep_top_level_cache();
Item *transform(THD *thd, Item_transformer transformer, uchar *arg) override;
Item *expr_cache_insert_transformer(THD *thd, uchar *unused) override;
bool is_expensive_processor(void *arg) override;
diff --git a/sql/item_jsonfunc.h b/sql/item_jsonfunc.h
index b352391c083..17ffe943938 100644
--- a/sql/item_jsonfunc.h
+++ b/sql/item_jsonfunc.h
@@ -493,9 +493,14 @@ class Item_func_json_length: public Item_long_func
{
bool check_arguments() const override
{
- return args[0]->check_type_can_return_text(func_name_cstring()) ||
- (arg_count > 1 &&
- args[1]->check_type_general_purpose_string(func_name_cstring()));
+ const LEX_CSTRING name= func_name_cstring();
+ if (arg_count == 0 || arg_count > 2)
+ {
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name.str);
+ return true;
+ }
+ return args[0]->check_type_can_return_text(name) ||
+ (arg_count > 1 && args[1]->check_type_general_purpose_string(name));
}
protected:
json_path_with_flags path;
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index 4b2f1aecf45..53c5ecde103 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -3970,6 +3970,7 @@ String *Item_func_weight_string::val_str(String *str)
weigth_flags);
DBUG_ASSERT(frm_length <= tmp_length);
+ str->set_charset(&my_charset_bin);
str->length(frm_length);
null_value= 0;
return str;
@@ -4049,6 +4050,7 @@ String *Item_func_unhex::val_str(String *str)
from= res->ptr();
null_value= 0;
+ str->set_charset(&my_charset_bin);
str->length(length);
to= (char*) str->ptr();
if (res->length() % 2)
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index 9e6c205ca76..e614421030a 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -407,11 +407,11 @@ bool Item_subselect::mark_as_dependent(THD *thd, st_select_lex *select,
{
is_correlated= TRUE;
Ref_to_outside *upper;
- if (!(upper= new (thd->stmt_arena->mem_root) Ref_to_outside()))
+ if (!(upper= new (thd->mem_root) Ref_to_outside()))
return TRUE;
upper->select= select;
upper->item= item;
- if (upper_refs.push_back(upper, thd->stmt_arena->mem_root))
+ if (upper_refs.push_back(upper, thd->mem_root))
return TRUE;
}
return FALSE;
@@ -2117,7 +2117,7 @@ Item_in_subselect::single_value_transformer(JOIN *join)
thd->lex->current_select= current;
/* We will refer to upper level cache array => we have to save it for SP */
- optimizer->keep_top_level_cache();
+ DBUG_ASSERT(optimizer->get_cache()[0]->is_array_kept());
/*
As far as Item_in_optimizer does not substitute itself on fix_fields
@@ -2517,7 +2517,7 @@ Item_in_subselect::row_value_transformer(JOIN *join)
}
// we will refer to upper level cache array => we have to save it in PS
- optimizer->keep_top_level_cache();
+ DBUG_ASSERT(optimizer->get_cache()[0]->is_array_kept());
thd->lex->current_select= current;
/*
@@ -4680,6 +4680,12 @@ void subselect_uniquesubquery_engine::print(String *str,
{
str->append(STRING_WITH_LEN("<primary_index_lookup>("));
tab->ref.items[0]->print(str, query_type);
+ if (!tab->table)
+ {
+ // table is not opened so unknown
+ str->append(')');
+ return;
+ }
str->append(STRING_WITH_LEN(" in "));
if (tab->table->s->table_category == TABLE_CATEGORY_TEMPORARY)
{
@@ -5268,7 +5274,7 @@ bool subselect_hash_sj_engine::init(List<Item> *tmp_columns, uint subquery_id)
//fprintf(stderr, "Q: %s\n", current_thd->query());
DBUG_ASSERT(0);
DBUG_ASSERT(
- tmp_table->s->uniques ||
+ (tmp_table->key_info->flags & HA_UNIQUE_HASH) ||
tmp_table->key_info->key_length >= tmp_table->file->max_key_length() ||
tmp_table->key_info->user_defined_key_parts >
tmp_table->file->max_key_parts());
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 72de21ee483..ffac6dbb912 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -428,7 +428,8 @@ bool Item_sum::register_sum_func(THD *thd, Item **ref)
sl= sl->master_unit()->outer_select() )
sl->master_unit()->item->with_flags|= item_with_t::SUM_FUNC;
}
- thd->lex->current_select->mark_as_dependent(thd, aggr_sel, NULL);
+ if (aggr_sel)
+ thd->lex->current_select->mark_as_dependent(thd, aggr_sel, NULL);
if ((thd->lex->describe & DESCRIBE_EXTENDED) && aggr_sel)
{
diff --git a/sql/key.cc b/sql/key.cc
index b65851cf7c1..4e40a3354ce 100644
--- a/sql/key.cc
+++ b/sql/key.cc
@@ -615,7 +615,8 @@ int key_rec_cmp(void *key_p, uchar *first_rec, uchar *second_rec)
that take the max length into account.
*/
if ((result= field->cmp_prefix(field->ptr+first_diff, field->ptr+sec_diff,
- key_part->length)))
+ key_part->length /
+ field->charset()->mbmaxlen)))
DBUG_RETURN(result * GREATER);
next_loop:
key_part++;
diff --git a/sql/log.cc b/sql/log.cc
index 48e5145210c..7545d5baed5 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -418,7 +418,7 @@ private:
Rows_log_event *m_pending;
/*
- Bit flags for what has been writting to cache. Used to
+ Bit flags for what has been writing to cache. Used to
discard logs without any data changes.
see enum_logged_status;
*/
@@ -11980,7 +11980,10 @@ get_gtid_list_event(IO_CACHE *cache, Gtid_list_log_event **out_gtid_list)
if (typ == START_ENCRYPTION_EVENT)
{
if (fdle->start_decryption((Start_encryption_log_event*) ev))
+ {
errormsg= "Could not set up decryption for binlog.";
+ break;
+ }
}
delete ev;
if (typ == ROTATE_EVENT || typ == STOP_EVENT ||
diff --git a/sql/log_event.h b/sql/log_event.h
index 6b8853493be..41989192b53 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -721,7 +721,7 @@ enum Log_event_type
/*
- Bit flags for what has been writting to cache. Used to
+ Bit flags for what has been writing to cache. Used to
discard logs with table map events but not row events and
nothing else important. This is stored by cache.
*/
@@ -2716,7 +2716,7 @@ private:
virtual int do_commit()= 0;
virtual int do_apply_event(rpl_group_info *rgi);
int do_record_gtid(THD *thd, rpl_group_info *rgi, bool in_trans,
- void **out_hton);
+ void **out_hton, bool force_err= false);
enum_skip_reason do_shall_skip(rpl_group_info *rgi);
virtual const char* get_query()= 0;
#endif
diff --git a/sql/log_event_server.cc b/sql/log_event_server.cc
index 3910d910da1..6c71381a1fb 100644
--- a/sql/log_event_server.cc
+++ b/sql/log_event_server.cc
@@ -153,6 +153,30 @@ is_parallel_retry_error(rpl_group_info *rgi, int err)
return has_temporary_error(rgi->thd);
}
+/**
+ Accumulate a Diagnostics_area's errors and warnings into an output buffer
+
+ @param errbuf The output buffer to write error messages
+ @param errbuf_size The size of the output buffer
+ @param da The Diagnostics_area to check for errors
+*/
+static void inline aggregate_da_errors(char *errbuf, size_t errbuf_size,
+ Diagnostics_area *da)
+{
+ const char *errbuf_end= errbuf + errbuf_size;
+ char *slider;
+ Diagnostics_area::Sql_condition_iterator it= da->sql_conditions();
+ const Sql_condition *err;
+ size_t len;
+ for (err= it++, slider= errbuf; err && slider < errbuf_end - 1;
+ slider += len, err= it++)
+ {
+ len= my_snprintf(slider, errbuf_end - slider,
+ " %s, Error_code: %d;", err->get_message_text(),
+ err->get_sql_errno());
+ }
+}
+
/**
Error reporting facility for Rows_log_event::do_apply_event
@@ -173,13 +197,8 @@ static void inline slave_rows_error_report(enum loglevel level, int ha_error,
const char *log_name, my_off_t pos)
{
const char *handler_error= (ha_error ? HA_ERR(ha_error) : NULL);
- char buff[MAX_SLAVE_ERRMSG], *slider;
- const char *buff_end= buff + sizeof(buff);
- size_t len;
- Diagnostics_area::Sql_condition_iterator it=
- thd->get_stmt_da()->sql_conditions();
+ char buff[MAX_SLAVE_ERRMSG];
Relay_log_info const *rli= rgi->rli;
- const Sql_condition *err;
buff[0]= 0;
int errcode= thd->is_error() ? thd->get_stmt_da()->sql_errno() : 0;
@@ -192,13 +211,7 @@ static void inline slave_rows_error_report(enum loglevel level, int ha_error,
if (is_parallel_retry_error(rgi, errcode))
return;
- for (err= it++, slider= buff; err && slider < buff_end - 1;
- slider += len, err= it++)
- {
- len= my_snprintf(slider, buff_end - slider,
- " %s, Error_code: %d;", err->get_message_text(),
- err->get_sql_errno());
- }
+ aggregate_da_errors(buff, sizeof(buff), thd->get_stmt_da());
if (ha_error != 0 && !thd->killed)
rli->report(level, errcode, rgi->gtid_info(),
@@ -3571,7 +3584,8 @@ bool slave_execute_deferred_events(THD *thd)
#if defined(HAVE_REPLICATION)
int Xid_apply_log_event::do_record_gtid(THD *thd, rpl_group_info *rgi,
- bool in_trans, void **out_hton)
+ bool in_trans, void **out_hton,
+ bool force_err)
{
int err= 0;
Relay_log_info const *rli= rgi->rli;
@@ -3586,14 +3600,26 @@ int Xid_apply_log_event::do_record_gtid(THD *thd, rpl_group_info *rgi,
int ec= thd->get_stmt_da()->sql_errno();
/*
Do not report an error if this is really a kill due to a deadlock.
- In this case, the transaction will be re-tried instead.
+ In this case, the transaction will be re-tried instead. Unless force_err
+ is set, as in the case of XA PREPARE, as the GTID state is updated as a
+ separate transaction, and if that fails, we should not retry but exit in
+ error immediately.
*/
- if (!is_parallel_retry_error(rgi, ec))
+ if (!is_parallel_retry_error(rgi, ec) || force_err)
+ {
+ char buff[MAX_SLAVE_ERRMSG];
+ buff[0]= 0;
+ aggregate_da_errors(buff, sizeof(buff), thd->get_stmt_da());
+
+ if (force_err)
+ thd->clear_error();
+
rli->report(ERROR_LEVEL, ER_CANNOT_UPDATE_GTID_STATE, rgi->gtid_info(),
"Error during XID COMMIT: failed to update GTID state in "
- "%s.%s: %d: %s",
+ "%s.%s: %d: %s the event's master log %s, end_log_pos %llu",
"mysql", rpl_gtid_slave_state_table_name.str, ec,
- thd->get_stmt_da()->message());
+ buff, RPL_LOG_NAME, log_pos);
+ }
thd->is_slave_error= 1;
}
@@ -3667,7 +3693,7 @@ int Xid_apply_log_event::do_apply_event(rpl_group_info *rgi)
{
DBUG_ASSERT(!thd->transaction->xid_state.is_explicit_XA());
- if ((err= do_record_gtid(thd, rgi, false, &hton)))
+ if ((err= do_record_gtid(thd, rgi, false, &hton, true)))
return err;
}
@@ -4988,7 +5014,8 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
to avoid query cache being polluted with stale entries,
*/
# ifdef WITH_WSREP
- if (!WSREP(thd) && !wsrep_thd_is_applying(thd))
+ /* Query cache is not invalidated on wsrep applier here */
+ if (!(WSREP(thd) && wsrep_thd_is_applying(thd)))
# endif /* WITH_WSREP */
query_cache.invalidate_locked_for_write(thd, rgi->tables_to_lock);
#endif /* HAVE_QUERY_CACHE */
@@ -6871,8 +6898,18 @@ Rows_log_event::write_row(rpl_group_info *rgi,
int Rows_log_event::update_sequence()
{
TABLE *table= m_table; // pointer to event's table
+ bool old_master= false;
+ int err= 0;
- if (!bitmap_is_set(table->rpl_write_set, MIN_VALUE_FIELD_NO))
+ if (!bitmap_is_set(table->rpl_write_set, MIN_VALUE_FIELD_NO) ||
+ (
+#if defined(WITH_WSREP)
+ ! WSREP(thd) &&
+#endif
+ !(table->in_use->rgi_slave->gtid_ev_flags2 & Gtid_log_event::FL_DDL) &&
+ !(old_master=
+ rpl_master_has_bug(thd->rgi_slave->rli,
+ 29621, FALSE, FALSE, FALSE, TRUE))))
{
/* This event come from a setval function executed on the master.
Update the sequence next_number and round, like we do with setval()
@@ -6885,12 +6922,27 @@ int Rows_log_event::update_sequence()
return table->s->sequence->set_value(table, nextval, round, 0) > 0;
}
-
+ if (old_master && !WSREP(thd) && thd->rgi_slave->is_parallel_exec)
+ {
+ DBUG_ASSERT(thd->rgi_slave->parallel_entry);
+ /*
+ With parallel replication enabled, we can't execute alongside any other
+ transaction in which we may depend, so we force retry to release
+ the server layer table lock for possible prior in binlog order
+ same table transactions.
+ */
+ if (thd->rgi_slave->parallel_entry->last_committed_sub_id <
+ thd->rgi_slave->wait_commit_sub_id)
+ {
+ err= ER_LOCK_DEADLOCK;
+ my_error(err, MYF(0));
+ }
+ }
/*
Update all fields in table and update the active sequence, like with
ALTER SEQUENCE
*/
- return table->file->ha_write_row(table->record[0]);
+ return err == 0 ? table->file->ha_write_row(table->record[0]) : err;
}
@@ -6906,19 +6958,21 @@ Write_rows_log_event::do_exec_row(rpl_group_info *rgi)
const char *tmp= thd->get_proc_info();
LEX_CSTRING tmp_db= thd->db;
char *message, msg[128];
- const char *table_name= m_table->s->table_name.str;
- char quote_char= get_quote_char_for_identifier(thd, STRING_WITH_LEN(table_name));
- my_snprintf(msg, sizeof(msg),"Write_rows_log_event::write_row() on table %c%s%c",
- quote_char, table_name, quote_char);
+ const LEX_CSTRING &table_name= m_table->s->table_name;
+ const char quote_char=
+ get_quote_char_for_identifier(thd, table_name.str, table_name.length);
+ my_snprintf(msg, sizeof msg,
+ "Write_rows_log_event::write_row() on table %c%.*s%c",
+ quote_char, int(table_name.length), table_name.str, quote_char);
thd->reset_db(&m_table->s->db);
message= msg;
int error;
#ifdef WSREP_PROC_INFO
my_snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1,
- "Write_rows_log_event::write_row(%lld) on table %c%s%c",
- (long long) wsrep_thd_trx_seqno(thd), quote_char, table_name,
- quote_char);
+ "Write_rows_log_event::write_row(%lld) on table %c%.*s%c",
+ (long long) wsrep_thd_trx_seqno(thd), quote_char,
+ int(table_name.length), table_name.str, quote_char);
message= thd->wsrep_info;
#endif /* WSREP_PROC_INFO */
@@ -6957,7 +7011,7 @@ uint8 Write_rows_log_event::get_trg_event_map()
Returns TRUE if different.
*/
-static bool record_compare(TABLE *table)
+static bool record_compare(TABLE *table, bool vers_from_plain= false)
{
bool result= FALSE;
/**
@@ -6990,10 +7044,19 @@ static bool record_compare(TABLE *table)
/* Compare fields */
for (Field **ptr=table->field ; *ptr ; ptr++)
{
- if (table->versioned() && (*ptr)->vers_sys_field())
- {
+ /*
+ If the table is versioned, don't compare using the version if there is a
+ primary key. If there isn't a primary key, we need the version to
+ identify the correct record if there are duplicate rows in the data set.
+ However, if the primary server is unversioned (vers_from_plain is true),
+ then we implicitly use row_end as the primary key on our side. This is
+ because the implicit row_end value will be set to the maximum value for
+ the latest row update (which is what we care about).
+ */
+ if (table->versioned() && (*ptr)->vers_sys_field() &&
+ (table->s->primary_key < MAX_KEY ||
+ (vers_from_plain && table->vers_start_field() == (*ptr))))
continue;
- }
/**
We only compare field contents that are not null.
NULL fields (i.e., their null bits) were compared
@@ -7387,7 +7450,7 @@ int Rows_log_event::find_row(rpl_group_info *rgi)
/* We use this to test that the correct key is used in test cases. */
DBUG_EXECUTE_IF("slave_crash_if_index_scan", abort(););
- while (record_compare(table))
+ while (record_compare(table, m_vers_from_plain))
{
while ((error= table->file->ha_index_next(table->record[0])))
{
@@ -7440,7 +7503,7 @@ int Rows_log_event::find_row(rpl_group_info *rgi)
goto end;
}
}
- while (record_compare(table));
+ while (record_compare(table, m_vers_from_plain));
/*
Note: above record_compare will take into accout all record fields
@@ -7528,10 +7591,12 @@ int Delete_rows_log_event::do_exec_row(rpl_group_info *rgi)
const char *tmp= thd->get_proc_info();
LEX_CSTRING tmp_db= thd->db;
char *message, msg[128];
- const char *table_name= m_table->s->table_name.str;
- char quote_char= get_quote_char_for_identifier(thd, STRING_WITH_LEN(table_name));
- my_snprintf(msg, sizeof(msg),"Delete_rows_log_event::find_row() on table %c%s%c",
- quote_char, table_name, quote_char);
+ const LEX_CSTRING &table_name= m_table->s->table_name;
+ const char quote_char=
+ get_quote_char_for_identifier(thd, table_name.str, table_name.length);
+ my_snprintf(msg, sizeof msg,
+ "Delete_rows_log_event::find_row() on table %c%.*s%c",
+ quote_char, int(table_name.length), table_name.str, quote_char);
thd->reset_db(&m_table->s->db);
message= msg;
const bool invoke_triggers= (m_table->triggers && do_invoke_trigger());
@@ -7539,26 +7604,29 @@ int Delete_rows_log_event::do_exec_row(rpl_group_info *rgi)
#ifdef WSREP_PROC_INFO
my_snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1,
- "Delete_rows_log_event::find_row(%lld) on table %c%s%c",
- (long long) wsrep_thd_trx_seqno(thd), quote_char, table_name,
+ "Delete_rows_log_event::find_row(%lld) on table %c%.*s%c",
+ (long long) wsrep_thd_trx_seqno(thd), quote_char,
+ int(table_name.length), table_name.str,
quote_char);
message= thd->wsrep_info;
#endif /* WSREP_PROC_INFO */
thd_proc_info(thd, message);
if (likely(!(error= find_row(rgi))))
- {
+ {
/*
Delete the record found, located in record[0]
*/
- my_snprintf(msg, sizeof(msg),"Delete_rows_log_event::ha_delete_row() on table %c%s%c",
- quote_char, table_name, quote_char);
+ my_snprintf(msg, sizeof msg,
+ "Delete_rows_log_event::ha_delete_row() on table %c%.*s%c",
+ quote_char, int(table_name.length), table_name.str,
+ quote_char);
message= msg;
#ifdef WSREP_PROC_INFO
snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1,
- "Delete_rows_log_event::ha_delete_row(%lld) on table %c%s%c",
- (long long) wsrep_thd_trx_seqno(thd), quote_char, table_name,
- quote_char);
+ "Delete_rows_log_event::ha_delete_row(%lld) on table %c%.*s%c",
+ (long long) wsrep_thd_trx_seqno(thd), quote_char,
+ int(table_name.length), table_name.str, quote_char);
message= thd->wsrep_info;
#endif
thd_proc_info(thd, message);
@@ -7690,17 +7758,20 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi)
DBUG_ASSERT(m_table != NULL);
LEX_CSTRING tmp_db= thd->db;
char *message, msg[128];
- const char *table_name= m_table->s->table_name.str;
- char quote_char= get_quote_char_for_identifier(thd, STRING_WITH_LEN(table_name));
- my_snprintf(msg, sizeof(msg),"Update_rows_log_event::find_row() on table %c%s%c",
- quote_char, table_name, quote_char);
+ const LEX_CSTRING &table_name= m_table->s->table_name;
+ const char quote_char=
+ get_quote_char_for_identifier(thd, table_name.str, table_name.length);
+ my_snprintf(msg, sizeof msg,
+ "Update_rows_log_event::find_row() on table %c%.*s%c",
+ quote_char, int(table_name.length), table_name.str, quote_char);
thd->reset_db(&m_table->s->db);
message= msg;
#ifdef WSREP_PROC_INFO
my_snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1,
- "Update_rows_log_event::find_row(%lld) on table %c%s%c",
- (long long) wsrep_thd_trx_seqno(thd), quote_char, table_name,
+ "Update_rows_log_event::find_row(%lld) on table %c%.*s%c",
+ (long long) wsrep_thd_trx_seqno(thd), quote_char,
+ int(table_name.length), table_name.str,
quote_char);
message= thd->wsrep_info;
#endif /* WSREP_PROC_INFO */
@@ -7740,14 +7811,15 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi)
store_record(m_table,record[1]);
m_curr_row= m_curr_row_end;
- my_snprintf(msg, sizeof(msg),"Update_rows_log_event::unpack_current_row() on table %c%s%c",
- quote_char, table_name, quote_char);
+ my_snprintf(msg, sizeof msg,
+ "Update_rows_log_event::unpack_current_row() on table %c%.*s%c",
+ quote_char, int(table_name.length), table_name.str, quote_char);
message= msg;
#ifdef WSREP_PROC_INFO
my_snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1,
- "Update_rows_log_event::unpack_current_row(%lld) on table %c%s%c",
- (long long) wsrep_thd_trx_seqno(thd), quote_char, table_name,
- quote_char);
+ "Update_rows_log_event::unpack_current_row(%lld) on table %c%.*s%c",
+ (long long) wsrep_thd_trx_seqno(thd), quote_char,
+ int(table_name.length), table_name.str, quote_char);
message= thd->wsrep_info;
#endif /* WSREP_PROC_INFO */
@@ -7770,13 +7842,15 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi)
DBUG_DUMP("new values", m_table->record[0], m_table->s->reclength);
#endif
- my_snprintf(msg, sizeof(msg),"Update_rows_log_event::ha_update_row() on table %c%s%c",
- quote_char, table_name, quote_char);
+ my_snprintf(msg, sizeof msg,
+ "Update_rows_log_event::ha_update_row() on table %c%.*s%c",
+ quote_char, int(table_name.length), table_name.str, quote_char);
message= msg;
#ifdef WSREP_PROC_INFO
my_snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1,
- "Update_rows_log_event::ha_update_row(%lld) on table %c%s%c",
- (long long) wsrep_thd_trx_seqno(thd), quote_char, table_name, quote_char);
+ "Update_rows_log_event::ha_update_row(%lld) on table %c%.*s%c",
+ (long long) wsrep_thd_trx_seqno(thd), quote_char,
+ int(table_name.length), table_name.str, quote_char);
message= thd->wsrep_info;
#endif /* WSREP_PROC_INFO */
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 8ec9ad4ea17..8ee4b0408ef 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -332,6 +332,7 @@ static my_bool opt_debugging= 0, opt_external_locking= 0, opt_console= 0;
static my_bool opt_short_log_format= 0, opt_silent_startup= 0;
ulong max_used_connections;
+time_t max_used_connections_time;
static const char *mysqld_user, *mysqld_chroot;
static char *default_character_set_name;
static char *character_set_filesystem_name;
@@ -4704,7 +4705,10 @@ static void init_ssl()
{
sql_print_error("Failed to setup SSL");
sql_print_error("SSL error: %s", sslGetErrString(error));
- unireg_abort(1);
+ if (!opt_bootstrap)
+ unireg_abort(1);
+ opt_use_ssl = 0;
+ have_ssl= SHOW_OPTION_DISABLED;
}
else
ssl_acceptor_stats.init();
@@ -6182,7 +6186,10 @@ void create_new_thread(CONNECT *connect)
uint sum= connection_count + extra_connection_count;
if (sum > max_used_connections)
+ {
max_used_connections= sum;
+ max_used_connections_time= time(nullptr);
+ }
/*
The initialization of thread_id is done in create_embedded_thd() for
@@ -6514,8 +6521,6 @@ struct my_option my_long_options[]=
{"console", OPT_CONSOLE, "Write error output on screen; don't remove the console window on windows.",
&opt_console, &opt_console, 0, GET_BOOL, NO_ARG, 0, 0, 0,
0, 0, 0},
- {"core-file", OPT_WANT_CORE, "Write core on errors.", 0, 0, 0, GET_NO_ARG,
- NO_ARG, 0, 0, 0, 0, 0, 0},
#ifdef DBUG_OFF
{"debug", '#', "Built in DBUG debugger. Disabled in this build.",
&current_dbug_option, &current_dbug_option, 0, GET_STR, OPT_ARG,
@@ -7015,9 +7020,20 @@ static int show_heartbeat_period(THD *thd, SHOW_VAR *var, char *buff,
return 0;
}
-
#endif /* HAVE_REPLICATION */
+
+static int show_max_used_connections_time(THD *thd, SHOW_VAR *var, char *buff,
+ enum enum_var_type scope)
+{
+ var->type= SHOW_CHAR;
+ var->value= buff;
+
+ get_date(buff, GETDATE_DATE_TIME | GETDATE_FIXEDLENGTH, max_used_connections_time);
+ return 0;
+}
+
+
static int show_open_tables(THD *thd, SHOW_VAR *var, char *buff,
enum enum_var_type scope)
{
@@ -7493,6 +7509,7 @@ SHOW_VAR status_vars[]= {
{"Master_gtid_wait_timeouts", (char*) offsetof(STATUS_VAR, master_gtid_wait_timeouts), SHOW_LONG_STATUS},
{"Master_gtid_wait_time", (char*) offsetof(STATUS_VAR, master_gtid_wait_time), SHOW_LONG_STATUS},
{"Max_used_connections", (char*) &max_used_connections, SHOW_LONG},
+ {"Max_used_connections_time",(char*) &show_max_used_connections_time, SHOW_SIMPLE_FUNC},
{"Memory_used", (char*) &show_memory_used, SHOW_SIMPLE_FUNC},
{"Memory_used_initial", (char*) &start_memory_used, SHOW_LONGLONG},
{"Resultset_metadata_skipped", (char *) offsetof(STATUS_VAR, skip_metadata_count),SHOW_LONG_STATUS},
@@ -7831,6 +7848,7 @@ static int mysql_init_variables(void)
specialflag= 0;
binlog_cache_use= binlog_cache_disk_use= 0;
max_used_connections= slow_launch_threads = 0;
+ max_used_connections_time= 0;
mysqld_user= mysqld_chroot= opt_init_file= opt_bin_logname = 0;
prepared_stmt_count= 0;
mysqld_unix_port= opt_mysql_tmpdir= my_bind_addr_str= NullS;
@@ -8269,9 +8287,6 @@ mysqld_get_one_option(const struct my_option *opt, const char *argument,
case (int) OPT_SKIP_HOST_CACHE:
opt_specialflag|= SPECIAL_NO_HOST_CACHE;
break;
- case (int) OPT_WANT_CORE:
- test_flags |= TEST_CORE_ON_SIGNAL;
- break;
case OPT_CONSOLE:
if (opt_console)
opt_error_log= 0; // Force logs to stdout
@@ -9209,6 +9224,7 @@ void refresh_status(THD *thd)
connections. This is not perfect, but status data is not exact anyway.
*/
max_used_connections= connection_count + extra_connection_count;
+ max_used_connections_time= time(nullptr);
}
#ifdef HAVE_PSI_INTERFACE
diff --git a/sql/mysqld.h b/sql/mysqld.h
index 54cafdcde15..43194dec639 100644
--- a/sql/mysqld.h
+++ b/sql/mysqld.h
@@ -917,10 +917,12 @@ enum enum_query_type
// it evaluates to. Should be used for error messages, so that they
// don't reveal values.
QT_NO_DATA_EXPANSION= (1 << 9),
+ // Remove wrappers added for TVC when creating or showing view
+ QT_NO_WRAPPERS_FOR_TVC_IN_VIEW= (1 << 12),
// The temporary tables used by the query might be freed by the time
// this print() call is made.
- QT_DONT_ACCESS_TMP_TABLES= (1 << 12)
+ QT_DONT_ACCESS_TMP_TABLES= (1 << 13)
};
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 242ddb9af19..1bf9c84bae2 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -464,7 +464,7 @@ void print_range_for_non_indexed_field(String *out, Field *field,
static void print_min_range_operator(String *out, const ha_rkey_function flag);
static void print_max_range_operator(String *out, const ha_rkey_function flag);
-static bool is_field_an_unique_index(RANGE_OPT_PARAM *param, Field *field);
+static bool is_field_an_unique_index(Field *field);
/*
SEL_IMERGE is a list of possible ways to do index merge, i.e. it is
@@ -3690,7 +3690,10 @@ end_of_range_loop:
}
else
{
+ enum_check_fields save_count_cuted_fields= thd->count_cuted_fields;
+ thd->count_cuted_fields= CHECK_FIELD_IGNORE;
rows= records_in_column_ranges(&param, idx, key);
+ thd->count_cuted_fields= save_count_cuted_fields;
if (rows != DBL_MAX)
{
key->field->cond_selectivity= rows/table_records;
@@ -8039,8 +8042,13 @@ SEL_TREE *Item_func_ne::get_func_mm_tree(RANGE_OPT_PARAM *param,
If this condition is a "col1<>...", where there is a UNIQUE KEY(col1),
do not construct a SEL_TREE from it. A condition that excludes just one
row in the table is not selective (unless there are only a few rows)
+
+ Note: this logic must be in sync with code in
+ check_group_min_max_predicates(). That function walks an Item* condition
+ and checks if the range optimizer would produce an equivalent range for
+ it.
*/
- if (is_field_an_unique_index(param, field))
+ if (param->using_real_indexes && is_field_an_unique_index(field))
DBUG_RETURN(NULL);
DBUG_RETURN(get_ne_mm_tree(param, field, value, value));
}
@@ -8152,7 +8160,7 @@ SEL_TREE *Item_func_in::get_func_mm_tree(RANGE_OPT_PARAM *param,
- if there are a lot of constants, the overhead of building and
processing enormous range list is not worth it.
*/
- if (is_field_an_unique_index(param, field))
+ if (param->using_real_indexes && is_field_an_unique_index(field))
DBUG_RETURN(0);
/* Get a SEL_TREE for "(-inf|NULL) < X < c_0" interval. */
@@ -8861,24 +8869,18 @@ SEL_TREE *Item_equal::get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr)
In the future we could also add "almost unique" indexes where any value is
present only in a few rows (but necessarily exactly one row)
*/
-static bool is_field_an_unique_index(RANGE_OPT_PARAM *param, Field *field)
+static bool is_field_an_unique_index(Field *field)
{
DBUG_ENTER("is_field_an_unique_index");
-
- // The check for using_real_indexes is there because of the heuristics
- // this function is used for.
- if (param->using_real_indexes)
+ key_map::Iterator it(field->key_start);
+ uint key_no;
+ while ((key_no= it++) != key_map::Iterator::BITMAP_END)
{
- key_map::Iterator it(field->key_start);
- uint key_no;
- while ((key_no= it++) != key_map::Iterator::BITMAP_END)
+ KEY *key_info= &field->table->key_info[key_no];
+ if (key_info->user_defined_key_parts == 1 &&
+ (key_info->flags & HA_NOSAME))
{
- KEY *key_info= &field->table->key_info[key_no];
- if (key_info->user_defined_key_parts == 1 &&
- (key_info->flags & HA_NOSAME))
- {
- DBUG_RETURN(true);
- }
+ DBUG_RETURN(true);
}
}
DBUG_RETURN(false);
@@ -13894,7 +13896,7 @@ cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
- (C between const_i and const_j)
- C IS NULL
- C IS NOT NULL
- - C != const
+ - C != const (unless C is the primary key)
SA4. If Q has a GROUP BY clause, there are no other aggregate functions
except MIN and MAX. For queries with DISTINCT, aggregate functions
are allowed.
@@ -14846,6 +14848,17 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item,
if (!simple_pred(pred, args, &inv))
DBUG_RETURN(FALSE);
+ /*
+ Follow the logic in Item_func_ne::get_func_mm_tree(): condition
+ in form "tbl.primary_key <> const" is not used to produce intervals.
+
+ If the condition doesn't have an equivalent interval, this means we
+ fail LooseScan's condition SA3. Return FALSE to indicate this.
+ */
+ if (pred_type == Item_func::NE_FUNC &&
+ is_field_an_unique_index(min_max_arg_item->field))
+ DBUG_RETURN(FALSE);
+
if (args[0] && args[1]) // this is a binary function or BETWEEN
{
DBUG_ASSERT(pred->fixed_type_handler());
diff --git a/sql/opt_split.cc b/sql/opt_split.cc
index f2d536cd47b..fa304f816dd 100644
--- a/sql/opt_split.cc
+++ b/sql/opt_split.cc
@@ -65,7 +65,7 @@
If we have only one equi-join condition then we either push it as
for Q1R or we don't. In a general case we may have much more options.
Consider the query (Q3)
- SELECT
+ SELECT *
FROM t1,t2 (SELECT t3.a, t3.b, MIN(t3.c) as min
FROM t3 GROUP BY a,b) t
WHERE t.a = t1.a AND t.b = t2.b
@@ -102,6 +102,47 @@
If we just drop the index on t3(a,b) the chances that the splitting
will be used becomes much lower but they still exists providing that
the fanout of the partial join of t1 and t2 is small enough.
+
+ The lateral derived table LT formed as a result of SM optimization applied
+ to a materialized derived table DT must be joined after all parameters
+ of splitting has been evaluated, i.e. after all expressions used in the
+ equalities pushed into DT that make the employed splitting effective
+ could be evaluated. With the chosen join order all the parameters can be
+ evaluated after the last table LPT that contains any columns referenced in
+ the parameters has been joined and the table APT following LPT in the chosen
+ join order is accessed.
+ Usually the formed lateral derived table LT is accessed right after the table
+ LPT. As in such cases table LT must be refilled for each combination of
+ splitting parameters this table must be populated before each access to LT
+ and the estimate of the expected number of refills that could be suggested in
+ such cases is the number of rows in the partial join ending with table LPT.
+ However in other cases the chosen join order may contain tables between LPT
+ and LT.
+ Consider the query (Q4)
+ SELECT *
+ FROM t1 JOIN t2 ON t1.b = t2.b
+ LEFT JOIN (SELECT t3.a, t3.b, MIN(t3.c) as min
+ FROM t3 GROUP BY a,b) t
+ ON t.a = t1.a AND t.c > 0
+ [WHERE P(t1,t2)];
+ Let's assume that the join order t1,t2,t was chosen for this query and
+ SP optimization was applied to t with splitting over t3.a using the index
+ on column t3.a. Here the table t1 serves as LPT, t2 as APT while t with
+ pushed condition t.a = t1.a serves as LT. Note that here LT is accessed
+ after t2, not right after t1. Here the number of refills of the lateral
+ derived is not more that the number of key values of t1.a that might be
+ less than the cardinality of the partial join (t1,t2). That's why it makes
+ sense to signal that t3 has to be refilled just before t2 is accessed.
+ However if the cardinality of the partial join (t1,t2) happens to be less
+ than the cardinality of the partial join (t1) due to additional selective
+ condition P(t1,t2) then the flag informing about necessity of a new refill
+ can be set either when accessing t2 or right after it has been joined.
+ The current code sets such flag right after generating a record of the
+ partial join with minimal cardinality for all those partial joins that
+ end between APT and LT. It allows sometimes to push extra conditions
+ into the lateral derived without any increase of the number of refills.
+ However this flag can be set only after the last join table between
+ APT and LT using join buffer has been joined.
*/
/*
@@ -250,6 +291,7 @@ public:
double unsplit_oper_cost;
/* Cardinality of T when nothing is pushed */
double unsplit_card;
+ double last_refills;
SplM_plan_info *find_plan(TABLE *table, uint key, uint parts);
};
@@ -347,6 +389,9 @@ bool JOIN::check_for_splittable_materialized()
if (!partition_list)
return false;
+ Json_writer_object trace_wrapper(thd);
+ Json_writer_object trace_split(thd, "check_split_materialized");
+
ORDER *ord;
Dynamic_array<SplM_field_ext_info> candidates(PSI_INSTRUMENT_MEM);
@@ -392,7 +437,10 @@ bool JOIN::check_for_splittable_materialized()
}
}
if (candidates.elements() == 0) // no candidates satisfying (8.1) && (8.2)
+ {
+ trace_split.add("not_applicable", "group list has no candidates");
return false;
+ }
/*
For each table from this join find the keys that can be used for ref access
@@ -451,7 +499,11 @@ bool JOIN::check_for_splittable_materialized()
}
if (!spl_field_cnt) // No candidate field can be accessed by ref => !(9)
+ {
+ trace_split.add("not_applicable",
+ "no candidate field can be accessed through ref");
return false;
+ }
/*
Create a structure of the type SplM_opt_info and fill it with
@@ -469,16 +521,22 @@ bool JOIN::check_for_splittable_materialized()
spl_opt_info->tables_usable_for_splitting= 0;
spl_opt_info->spl_field_cnt= spl_field_cnt;
spl_opt_info->spl_fields= spl_field;
- for (cand= cand_start; cand < cand_end; cand++)
+
{
- if (!cand->is_usable_for_ref_access)
- continue;
- spl_field->producing_item= cand->producing_item;
- spl_field->underlying_field= cand->underlying_field;
- spl_field->mat_field= cand->mat_field;
- spl_opt_info->tables_usable_for_splitting|=
- cand->underlying_field->table->map;
- spl_field++;
+ Json_writer_array trace_range(thd, "split_candidates");
+ for (cand= cand_start; cand < cand_end; cand++)
+ {
+ if (!cand->is_usable_for_ref_access)
+ continue;
+ trace_range.add(cand->producing_item);
+
+ spl_field->producing_item= cand->producing_item;
+ spl_field->underlying_field= cand->underlying_field;
+ spl_field->mat_field= cand->mat_field;
+ spl_opt_info->tables_usable_for_splitting|=
+ cand->underlying_field->table->map;
+ spl_field++;
+ }
}
/* Attach this info to the table T */
@@ -740,7 +798,7 @@ void JOIN::add_keyuses_for_splitting()
bzero((char*) &keyuse_ext_end, sizeof(keyuse_ext_end));
if (ext_keyuses_for_splitting->push(keyuse_ext_end))
goto err;
-
+ // psergey-todo: trace anything here?
/*
Use the number of rows that was computed by
TABLE_LIST::fetch_number_of_rows():
@@ -844,13 +902,13 @@ SplM_plan_info *SplM_opt_info::find_plan(TABLE *table, uint key, uint parts)
static
void reset_validity_vars_for_keyuses(KEYUSE_EXT *key_keyuse_ext_start,
TABLE *table, uint key,
- table_map remaining_tables,
+ table_map excluded_tables,
bool validity_val)
{
KEYUSE_EXT *keyuse_ext= key_keyuse_ext_start;
do
{
- if (!(keyuse_ext->needed_in_prefix & remaining_tables))
+ if (!(keyuse_ext->needed_in_prefix & excluded_tables))
{
/*
The enabling/disabling flags are set just in KEYUSE_EXT structures.
@@ -870,8 +928,11 @@ void reset_validity_vars_for_keyuses(KEYUSE_EXT *key_keyuse_ext_start,
Choose the best splitting to extend the evaluated partial join
@param
- record_count estimated cardinality of the extended partial join
+ idx index for joined table T in current partial join P
remaining_tables tables not joined yet
+ spl_pd_boundary OUT bitmap of the table from P extended by T that
+ starts the sub-sequence of tables S from which
+ no conditions are allowed to be pushed into T.
@details
This function is called during the search for the best execution
@@ -893,10 +954,12 @@ void reset_validity_vars_for_keyuses(KEYUSE_EXT *key_keyuse_ext_start,
@retval
Pointer to the info on the found plan that employs the pushed equalities
if the plan has been chosen, NULL - otherwise.
+ If the function returns NULL the value of spl_param_tables is set to 0.
*/
-SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
- table_map remaining_tables)
+SplM_plan_info * JOIN_TAB::choose_best_splitting(uint idx,
+ table_map remaining_tables,
+ table_map *spl_pd_boundary)
{
SplM_opt_info *spl_opt_info= table->spl_opt_info;
DBUG_ASSERT(spl_opt_info != NULL);
@@ -911,8 +974,10 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
SplM_plan_info *spl_plan= 0;
uint best_key= 0;
uint best_key_parts= 0;
+ table_map best_param_tables;
bool chosen, already_printed;
-
+ Json_writer_object trace_obj(thd, "choose_best_splitting");
+ Json_writer_array trace_arr(thd, "considered_keys");
/*
Check whether there are keys that can be used to join T employing splitting
and if so, select the best out of such keys
@@ -930,6 +995,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
uint key= keyuse_ext->key;
KEYUSE_EXT *key_keyuse_ext_start= keyuse_ext;
key_part_map found_parts= 0;
+ table_map needed_in_prefix= 0;
do
{
if (keyuse_ext->needed_in_prefix & remaining_tables)
@@ -955,6 +1021,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
KEY *key_info= table->key_info + key;
double rec_per_key=
key_info->actual_rec_per_key(keyuse_ext->keypart);
+ needed_in_prefix|= keyuse_ext->needed_in_prefix;
if (rec_per_key < best_rec_per_key)
{
best_table= keyuse_ext->table;
@@ -962,6 +1029,14 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
best_key_parts= keyuse_ext->keypart + 1;
best_rec_per_key= rec_per_key;
best_key_keyuse_ext_start= key_keyuse_ext_start;
+ best_param_tables= needed_in_prefix;
+ // trace table, key_name, parts, needed_tables.
+ Json_writer_object cur_index(thd);
+ cur_index.
+ add("table_name", best_table->alias.ptr()).
+ add("index", best_table->key_info[best_key].name).
+ add("rec_per_key", best_rec_per_key).
+ add("param_tables", best_param_tables);
}
keyuse_ext++;
}
@@ -969,14 +1044,41 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
}
while (keyuse_ext->table == table);
}
+ trace_arr.end();
chosen= 0;
+
+ double refills= DBL_MAX;
+ table_map excluded_tables= remaining_tables | this->join->sjm_lookup_tables;
if (best_table)
{
+ *spl_pd_boundary= this->table->map;
+ if (!best_param_tables)
+ refills= 1;
+ else
+ {
+ table_map last_found= this->table->map;
+ for (POSITION *pos= &this->join->positions[idx - 1]; ; pos--)
+ {
+ if (pos->table->table->map & excluded_tables)
+ continue;
+ if (pos->partial_join_cardinality < refills)
+ {
+ *spl_pd_boundary= last_found;
+ refills= pos->partial_join_cardinality;
+ }
+ last_found= pos->table->table->map;
+ if ((last_found & best_param_tables) || pos->use_join_buffer)
+ break;
+ }
+ }
+
+ trace_obj.add("refills", refills).
+ add("spl_pd_boundary", *spl_pd_boundary);
+
/*
The key for splitting was chosen, look for the plan for this key
in the cache
*/
- Json_writer_array spl_trace(thd, "choose_best_splitting");
spl_plan= spl_opt_info->find_plan(best_table, best_key, best_key_parts);
if (!spl_plan)
{
@@ -984,11 +1086,13 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
The plan for the chosen key has not been found in the cache.
Build a new plan and save info on it in the cache
*/
+ Json_writer_array wrapper(thd, "split_plan_search");
table_map all_table_map= (((table_map) 1) << join->table_count) - 1;
reset_validity_vars_for_keyuses(best_key_keyuse_ext_start, best_table,
- best_key, remaining_tables, true);
+ best_key, excluded_tables, true);
choose_plan(join, all_table_map & ~join->const_table_map, 0);
+ wrapper.end();
/*
Check that the chosen plan is really a splitting plan.
If not or if there is not enough memory to save the plan in the cache
@@ -1005,7 +1109,8 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
spl_opt_info->plan_cache.push_back(spl_plan))
{
reset_validity_vars_for_keyuses(best_key_keyuse_ext_start, best_table,
- best_key, remaining_tables, false);
+ best_key, excluded_tables, false);
+ trace_obj.add("split_plan_discarded", "constructed unapplicable query plan");
return 0;
}
@@ -1024,12 +1129,12 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
spl_plan->cost= (join->best_positions[join->table_count-1].read_time +
oper_cost);
- chosen= (record_count * spl_plan->cost + COST_EPS <
+ chosen= (refills * spl_plan->cost + COST_EPS <
spl_opt_info->unsplit_cost);
if (unlikely(thd->trace_started()))
{
- Json_writer_object wrapper(thd);
+ //psergey-merge:Json_writer_object wrapper(thd);
Json_writer_object find_trace(thd, "split_materialized");
find_trace.
add("table", best_table->alias.c_ptr()).
@@ -1040,24 +1145,26 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
add("unsplit_postjoin_cost", spl_opt_info->unsplit_oper_cost).
add("unsplit_cost", spl_opt_info->unsplit_cost).
add("rows", split_card).
- add("outer_rows", record_count).
- add("total_splitting_cost", record_count * spl_plan->cost).
+ add("refills", refills).
+ add("total_splitting_cost", refills * spl_plan->cost).
add("chosen", chosen);
}
memcpy((char *) spl_plan->best_positions,
(char *) join->best_positions,
sizeof(POSITION) * join->table_count);
reset_validity_vars_for_keyuses(best_key_keyuse_ext_start, best_table,
- best_key, remaining_tables, false);
+ best_key, excluded_tables, false);
already_printed= 1;
}
else
{
- chosen= (record_count * spl_plan->cost + COST_EPS <
+ trace_obj.add("cached_plan_found", 1);
+ chosen= (refills * spl_plan->cost + COST_EPS <
spl_opt_info->unsplit_cost);
already_printed= 0;
}
- }
+ }
+
/* Set the cost of the preferred materialization for this partial join */
if (chosen)
@@ -1066,10 +1173,8 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
The best plan that employs splitting is cheaper than
the plan without splitting
*/
-
- startup_cost= record_count * spl_plan->cost;
+ startup_cost= spl_opt_info->last_refills * spl_plan->cost;
records= (ha_rows) (spl_opt_info->unsplit_card * spl_plan->split_sel);
-
if (unlikely(thd->trace_started()) && ! already_printed)
{
Json_writer_object trace(thd, "split_materialized");
@@ -1086,6 +1191,7 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
records= (ha_rows) spl_opt_info->unsplit_card;
spl_plan= 0;
}
+
return spl_plan;
}
@@ -1095,13 +1201,13 @@ SplM_plan_info * JOIN_TAB::choose_best_splitting(double record_count,
Inject equalities for splitting used by the materialization join
@param
- excluded_tables used to filter out the equalities that cannot
- be pushed.
+ excluded_tables used to filter out the equalities that are not
+ to be pushed.
@details
This function injects equalities pushed into a derived table T for which
the split optimization has been chosen by the optimizer. The function
- is called by JOIN::inject_splitting_cond_for_all_tables_with_split_op().
+ is called by JOIN::inject_splitting_cond_for_all_tables_with_split_opt().
All equalities usable for splitting T whose right parts do not depend on
any of the 'excluded_tables' can be pushed into the where clause of the
derived table T.
@@ -1189,7 +1295,7 @@ bool is_eq_cond_injected_for_split_opt(Item_func_eq *eq_item)
@param
spl_plan info on the splitting plan chosen for the splittable table T
- remaining_tables the table T is joined just before these tables
+ excluded_tables tables that cannot be used in equalities pushed into T
is_const_table the table T is a constant table
@details
@@ -1204,7 +1310,7 @@ bool is_eq_cond_injected_for_split_opt(Item_func_eq *eq_item)
*/
bool JOIN_TAB::fix_splitting(SplM_plan_info *spl_plan,
- table_map remaining_tables,
+ table_map excluded_tables,
bool is_const_table)
{
SplM_opt_info *spl_opt_info= table->spl_opt_info;
@@ -1212,6 +1318,7 @@ bool JOIN_TAB::fix_splitting(SplM_plan_info *spl_plan,
JOIN *md_join= spl_opt_info->join;
if (spl_plan && !is_const_table)
{
+ is_split_derived= true;
memcpy((char *) md_join->best_positions,
(char *) spl_plan->best_positions,
sizeof(POSITION) * md_join->table_count);
@@ -1222,7 +1329,7 @@ bool JOIN_TAB::fix_splitting(SplM_plan_info *spl_plan,
reset_validity_vars_for_keyuses(spl_plan->keyuse_ext_start,
spl_plan->table,
spl_plan->key,
- remaining_tables,
+ excluded_tables,
true);
}
else if (md_join->save_qep)
@@ -1258,8 +1365,21 @@ bool JOIN::fix_all_splittings_in_plan()
if (tab->table->is_splittable())
{
SplM_plan_info *spl_plan= cur_pos->spl_plan;
+ table_map excluded_tables= (all_tables & ~prev_tables) |
+ sjm_lookup_tables;
+ ;
+ if (spl_plan)
+ {
+ POSITION *pos= cur_pos;
+ table_map spl_pd_boundary= pos->spl_pd_boundary;
+ do
+ {
+ excluded_tables|= pos->table->table->map;
+ }
+ while (!((pos--)->table->table->map & spl_pd_boundary));
+ }
if (tab->fix_splitting(spl_plan,
- all_tables & ~prev_tables,
+ excluded_tables,
tablenr < const_tables ))
return true;
}
@@ -1298,13 +1418,21 @@ bool JOIN::inject_splitting_cond_for_all_tables_with_split_opt()
continue;
SplM_opt_info *spl_opt_info= tab->table->spl_opt_info;
JOIN *join= spl_opt_info->join;
- /*
- Currently the equalities referencing columns of SJM tables with
- look-up access cannot be pushed into materialized derived.
- */
- if (join->inject_best_splitting_cond((all_tables & ~prev_tables) |
- sjm_lookup_tables))
- return true;
+ table_map excluded_tables= (all_tables & ~prev_tables) | sjm_lookup_tables;
+ table_map spl_pd_boundary= cur_pos->spl_pd_boundary;
+ for (POSITION *pos= cur_pos; ; pos--)
+ {
+ excluded_tables|= pos->table->table->map;
+ pos->table->no_forced_join_cache= true;
+ if (pos->table->table->map & spl_pd_boundary)
+ {
+ pos->table->split_derived_to_update|= tab->table->map;
+ break;
+ }
+ }
+
+ if (join->inject_best_splitting_cond(excluded_tables))
+ return true;
}
return false;
}
diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc
index 9c8fc6dc0b3..1bdbcf0deb9 100644
--- a/sql/opt_subselect.cc
+++ b/sql/opt_subselect.cc
@@ -4148,6 +4148,7 @@ void fix_semijoin_strategies_for_picked_join_order(JOIN *join)
*/
join->cur_sj_inner_tables= 0;
Json_writer_object semijoin_strategy(thd);
+ double inner_fanout= 1.0;
semijoin_strategy.add("semi_join_strategy","FirstMatch");
Json_writer_array semijoin_plan(thd, "join_order");
for (idx= first; idx <= tablenr; idx++)
@@ -4165,10 +4166,22 @@ void fix_semijoin_strategies_for_picked_join_order(JOIN *join)
TRUE /* no jbuf */,
record_count, join->best_positions + idx, &dummy);
}
+ /*
+ TODO: We should also compute the selectivity here, as well as adjust
+ the records_out according to the fraction of records removed by
+ the semi-join.
+ */
+ double rec_out= join->best_positions[idx].records_out;
+ if (join->best_positions[idx].table->emb_sj_nest)
+ inner_fanout *= rec_out;
+
record_count *= join->best_positions[idx].records_out;
rem_tables &= ~join->best_positions[idx].table->table->map;
}
+ if (inner_fanout > 1.0)
+ join->best_positions[tablenr].records_out /= inner_fanout;
}
+
if (pos->sj_strategy == SJ_OPT_LOOSE_SCAN)
{
@@ -4947,11 +4960,13 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd)
{
DBUG_PRINT("info",("Creating group key in temporary table"));
share->keys=1;
- share->uniques= MY_TEST(using_unique_constraint);
table->key_info= share->key_info= keyinfo;
keyinfo->key_part=key_part_info;
- keyinfo->flags=HA_NOSAME;
+ keyinfo->flags= HA_NOSAME | (using_unique_constraint ? HA_UNIQUE_HASH : 0);
+ keyinfo->ext_key_flags= keyinfo->flags;
keyinfo->usable_key_parts= keyinfo->user_defined_key_parts= 1;
+ keyinfo->ext_key_parts= 1;
+ share->key_parts= 1;
keyinfo->key_length=0;
keyinfo->rec_per_key=0;
keyinfo->algorithm= HA_KEY_ALG_UNDEF;
diff --git a/sql/opt_subselect.h b/sql/opt_subselect.h
index c0398fc8539..8140a01974d 100644
--- a/sql/opt_subselect.h
+++ b/sql/opt_subselect.h
@@ -316,6 +316,7 @@ public:
pos->loosescan_picker.loosescan_key= best_loose_scan_key;
pos->loosescan_picker.loosescan_parts= best_max_loose_keypart + 1;
pos->use_join_buffer= FALSE;
+ pos->firstmatch_with_join_buf= FALSE;
pos->table= tab;
pos->range_rowid_filter_info= tab->range_rowid_filter_info;
pos->ref_depend_map= best_ref_depend_map;
diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc
index b3917942874..1e07ca582da 100644
--- a/sql/rpl_parallel.cc
+++ b/sql/rpl_parallel.cc
@@ -264,6 +264,12 @@ finish_event_group(rpl_parallel_thread *rpt, uint64 sub_id,
STRING_WITH_LEN("now WAIT_FOR proceed_by_1000"));
}
});
+ DBUG_EXECUTE_IF("hold_worker2_favor_worker3", {
+ if (rgi->current_gtid.seq_no == 2001) {
+ DBUG_ASSERT(!rgi->worker_error || entry->stop_on_error_sub_id == sub_id);
+ debug_sync_set_action(thd, STRING_WITH_LEN("now SIGNAL cont_worker3"));
+ }
+ });
#endif
if (rgi->killed_for_retry == rpl_group_info::RETRY_KILL_PENDING)
@@ -289,6 +295,11 @@ signal_error_to_sql_driver_thread(THD *thd, rpl_group_info *rgi, int err)
In case we get an error during commit, inform following transactions that
we aborted our commit.
*/
+ DBUG_EXECUTE_IF("hold_worker2_favor_worker3", {
+ if (rgi->current_gtid.seq_no == 2002) {
+ debug_sync_set_action(thd, STRING_WITH_LEN("now WAIT_FOR cont_worker2"));
+ }});
+
rgi->unmark_start_commit();
rgi->cleanup_context(thd, true);
rgi->rli->abort_slave= true;
@@ -823,7 +834,14 @@ do_retry:
thd->reset_killed();
thd->clear_error();
rgi->killed_for_retry = rpl_group_info::RETRY_KILL_NONE;
-
+#ifdef ENABLED_DEBUG_SYNC
+ DBUG_EXECUTE_IF("hold_worker2_favor_worker3", {
+ if (rgi->current_gtid.seq_no == 2003) {
+ debug_sync_set_action(thd,
+ STRING_WITH_LEN("now WAIT_FOR cont_worker3"));
+ }
+ });
+#endif
/*
If we retry due to a deadlock kill that occurred during the commit step, we
might have already updated (but not committed) an update of table
@@ -842,13 +860,10 @@ do_retry:
for (;;)
{
mysql_mutex_lock(&entry->LOCK_parallel_entry);
- if (entry->stop_on_error_sub_id == (uint64) ULONGLONG_MAX ||
- DBUG_IF("simulate_mdev_12746") ||
- rgi->gtid_sub_id < entry->stop_on_error_sub_id)
- {
- register_wait_for_prior_event_group_commit(rgi, entry);
- }
- else
+ register_wait_for_prior_event_group_commit(rgi, entry);
+ if (entry->stop_on_error_sub_id != (uint64) ULONGLONG_MAX &&
+ !DBUG_IF("simulate_mdev_12746") &&
+ rgi->gtid_sub_id >= entry->stop_on_error_sub_id)
{
/*
A failure of a preceding "parent" transaction may not be
@@ -2073,6 +2088,9 @@ rpl_parallel_thread::get_gco(uint64 wait_count, group_commit_orderer *prev,
gco->prior_sub_id= prior_sub_id;
gco->installed= false;
gco->flags= 0;
+#ifndef DBUG_OFF
+ gco->gc_done= false;
+#endif
return gco;
}
@@ -2080,6 +2098,10 @@ rpl_parallel_thread::get_gco(uint64 wait_count, group_commit_orderer *prev,
void
rpl_parallel_thread::loc_free_gco(group_commit_orderer *gco)
{
+#ifndef DBUG_OFF
+ DBUG_ASSERT(!gco->gc_done);
+ gco->gc_done= true;
+#endif
if (!loc_gco_list)
loc_gco_last_ptr_ptr= &gco->next_gco;
else
@@ -2561,14 +2583,16 @@ rpl_parallel::find(uint32 domain_id, Relay_log_info *rli)
e->pause_sub_id= (uint64)ULONGLONG_MAX;
e->pending_start_alters= 0;
e->rli= rli;
+ mysql_mutex_init(key_LOCK_parallel_entry, &e->LOCK_parallel_entry,
+ MY_MUTEX_INIT_FAST);
+ mysql_cond_init(key_COND_parallel_entry, &e->COND_parallel_entry, NULL);
if (my_hash_insert(&domain_hash, (uchar *)e))
{
+ mysql_cond_destroy(&e->COND_parallel_entry);
+ mysql_mutex_destroy(&e->LOCK_parallel_entry);
my_free(e);
return NULL;
}
- mysql_mutex_init(key_LOCK_parallel_entry, &e->LOCK_parallel_entry,
- MY_MUTEX_INIT_FAST);
- mysql_cond_init(key_COND_parallel_entry, &e->COND_parallel_entry, NULL);
}
else
{
@@ -3149,7 +3173,12 @@ rpl_parallel::do_event(rpl_group_info *serial_rgi, Log_event *ev,
if (mode <= SLAVE_PARALLEL_MINIMAL ||
!(gtid_flags & Gtid_log_event::FL_GROUP_COMMIT_ID) ||
- e->last_commit_id != gtid_ev->commit_id)
+ e->last_commit_id != gtid_ev->commit_id ||
+ /*
+ MULTI_BATCH is also set when the current gtid even being a member
+ of a commit group is flagged as DDL which disallows parallel.
+ */
+ (gtid_flags & Gtid_log_event::FL_DDL))
flags|= group_commit_orderer::MULTI_BATCH;
/* Make sure we do not attempt to run DDL in parallel speculatively. */
if (gtid_flags & Gtid_log_event::FL_DDL)
diff --git a/sql/rpl_parallel.h b/sql/rpl_parallel.h
index 9a8dfc7e386..f2bf36aa4a1 100644
--- a/sql/rpl_parallel.h
+++ b/sql/rpl_parallel.h
@@ -91,6 +91,9 @@ struct group_commit_orderer {
FORCE_SWITCH= 2
};
uint8 flags;
+#ifndef DBUG_OFF
+ bool gc_done;
+#endif
};
diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc
index a8af950fa08..bd55c14e447 100644
--- a/sql/rpl_rli.cc
+++ b/sql/rpl_rli.cc
@@ -2423,8 +2423,13 @@ mark_start_commit_inner(rpl_parallel_entry *e, group_commit_orderer *gco,
uint64 count= ++e->count_committing_event_groups;
/* Signal any following GCO whose wait_count has been reached now. */
tmp= gco;
+
+ DBUG_ASSERT(!tmp->gc_done);
+
while ((tmp= tmp->next_gco))
{
+ DBUG_ASSERT(!tmp->gc_done);
+
uint64 wait_count= tmp->wait_count;
if (wait_count > count)
break;
diff --git a/sql/semisync_master.cc b/sql/semisync_master.cc
index b57fc199826..17adeed86e7 100644
--- a/sql/semisync_master.cc
+++ b/sql/semisync_master.cc
@@ -317,8 +317,8 @@ void Active_tranx::clear_active_tranx_nodes(const char *log_file_name,
/*******************************************************************************
*
- * <Repl_semi_sync_master> class: the basic code layer for syncsync master.
- * <Repl_semi_sync_slave> class: the basic code layer for syncsync slave.
+ * <Repl_semi_sync_master> class: the basic code layer for semisync master.
+ * <Repl_semi_sync_slave> class: the basic code layer for semisync slave.
*
* The most important functions during semi-syn replication listed:
*
@@ -809,8 +809,6 @@ void Repl_semi_sync_master::dump_end(THD* thd)
remove_slave();
ack_receiver.remove_slave(thd);
-
- return;
}
int Repl_semi_sync_master::commit_trx(const char* trx_wait_binlog_name,
diff --git a/sql/signal_handler.cc b/sql/signal_handler.cc
index 9991f261202..fb3f94d019e 100644
--- a/sql/signal_handler.cc
+++ b/sql/signal_handler.cc
@@ -27,6 +27,7 @@
#ifdef _WIN32
#include <crtdbg.h>
+#include <direct.h>
#define SIGNAL_FMT "exception 0x%x"
#else
#define SIGNAL_FMT "signal %d"
@@ -66,9 +67,9 @@ static inline void output_core_info()
(int) len, buff);
}
#ifdef __FreeBSD__
- if ((fd= my_open("/proc/curproc/rlimit", O_RDONLY, MYF(MY_NO_REGISTER))) >= 0)
+ if ((fd= open("/proc/curproc/rlimit", O_RDONLY)) >= 0)
#else
- if ((fd= my_open("/proc/self/limits", O_RDONLY, MYF(MY_NO_REGISTER))) >= 0)
+ if ((fd= open("/proc/self/limits", O_RDONLY)) >= 0)
#endif
{
my_safe_printf_stderr("Resource Limits:\n");
@@ -76,21 +77,20 @@ static inline void output_core_info()
{
my_write_stderr(buff, len);
}
- my_close(fd, MYF(0));
+ close(fd);
}
#ifdef __linux__
- if ((fd= my_open("/proc/sys/kernel/core_pattern", O_RDONLY,
- MYF(MY_NO_REGISTER))) >= 0)
+ if ((fd= open("/proc/sys/kernel/core_pattern", O_RDONLY)) >= 0)
{
len= read(fd, (uchar*)buff, sizeof(buff));
my_safe_printf_stderr("Core pattern: %.*s\n", (int) len, buff);
- my_close(fd, MYF(0));
+ close(fd);
}
- if ((fd= my_open("/proc/version", O_RDONLY, MYF(0))) >= 0)
+ if ((fd= open("/proc/version", O_RDONLY)) >= 0)
{
- len= my_read(fd, (uchar*)buff, sizeof(buff), MYF(0));
+ len= read(fd, (uchar*)buff, sizeof(buff));
my_safe_printf_stderr("Kernel version: %.*s\n", (int) len, buff);
- my_close(fd, MYF(0));
+ close(fd);
}
#endif
#elif defined(__APPLE__) || defined(__FreeBSD__)
@@ -104,11 +104,14 @@ static inline void output_core_info()
{
my_safe_printf_stderr("Kernel version: %.*s\n", (int) len, buff);
}
-#else
+#elif defined(HAVE_GETCWD)
char buff[80];
- my_getwd(buff, sizeof(buff), 0);
- my_safe_printf_stderr("Writing a core file at %s\n", buff);
- fflush(stderr);
+
+ if (getcwd(buff, sizeof(buff)))
+ {
+ my_safe_printf_stderr("Writing a core file at %.*s\n", (int) sizeof(buff), buff);
+ fflush(stderr);
+ }
#endif
}
diff --git a/sql/slave.cc b/sql/slave.cc
index d24e5ab50e4..7bc61d33b11 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -7900,14 +7900,15 @@ end:
@return TRUE if master has the bug, FALSE if it does not.
*/
bool rpl_master_has_bug(const Relay_log_info *rli, uint bug_id, bool report,
- bool (*pred)(const void *), const void *param)
+ bool (*pred)(const void *), const void *param,
+ bool maria_master)
{
struct st_version_range_for_one_bug {
uint bug_id;
Version introduced_in; // first version with bug
Version fixed_in; // first version with fix
};
- static struct st_version_range_for_one_bug versions_for_all_bugs[]=
+ static struct st_version_range_for_one_bug versions_for_their_bugs[]=
{
{24432, { 5, 0, 24 }, { 5, 0, 38 } },
{24432, { 5, 1, 12 }, { 5, 1, 17 } },
@@ -7915,11 +7916,27 @@ bool rpl_master_has_bug(const Relay_log_info *rli, uint bug_id, bool report,
{33029, { 5, 1, 0 }, { 5, 1, 12 } },
{37426, { 5, 1, 0 }, { 5, 1, 26 } },
};
+ static struct st_version_range_for_one_bug versions_for_our_bugs[]=
+ {
+ {29621, { 10, 3, 36 }, { 10, 3, 39 } },
+ {29621, { 10, 4, 26 }, { 10, 4, 29 } },
+ {29621, { 10, 5, 17 }, { 10, 5, 20 } },
+ {29621, { 10, 6, 9 }, { 10, 6, 13 } },
+ {29621, { 10, 7, 5 }, { 10, 7, 9 } },
+ {29621, { 10, 8, 4 }, { 10, 8, 8 } },
+ {29621, { 10, 9, 2 }, { 10, 9, 6 } },
+ {29621, { 10, 10,1 }, { 10, 10,4 } },
+ {29621, { 10, 11,1 }, { 10, 11,3 } },
+ };
const Version &master_ver=
rli->relay_log.description_event_for_exec->server_version_split;
+ struct st_version_range_for_one_bug* versions_for_all_bugs= maria_master ?
+ versions_for_our_bugs : versions_for_their_bugs;
+ uint all_size= maria_master ?
+ sizeof(versions_for_our_bugs)/sizeof(*versions_for_our_bugs) :
+ sizeof(versions_for_their_bugs)/sizeof(*versions_for_their_bugs);
- for (uint i= 0;
- i < sizeof(versions_for_all_bugs)/sizeof(*versions_for_all_bugs);i++)
+ for (uint i= 0; i < all_size; i++)
{
const Version &introduced_in= versions_for_all_bugs[i].introduced_in;
const Version &fixed_in= versions_for_all_bugs[i].fixed_in;
@@ -7928,18 +7945,21 @@ bool rpl_master_has_bug(const Relay_log_info *rli, uint bug_id, bool report,
fixed_in > master_ver &&
(pred == NULL || (*pred)(param)))
{
+ const char *bug_source= maria_master ?
+ "https://jira.mariadb.org/browse/MDEV-" :
+ "http://bugs.mysql.com/bug.php?id=";
if (!report)
return TRUE;
// a short message for SHOW SLAVE STATUS (message length constraints)
my_printf_error(ER_UNKNOWN_ERROR, "master may suffer from"
- " http://bugs.mysql.com/bug.php?id=%u"
+ " %s%u"
" so slave stops; check error log on slave"
- " for more info", MYF(0), bug_id);
+ " for more info", MYF(0), bug_source, bug_id);
// a verbose message for the error log
rli->report(ERROR_LEVEL, ER_UNKNOWN_ERROR, NULL,
"According to the master's version ('%s'),"
" it is probable that master suffers from this bug:"
- " http://bugs.mysql.com/bug.php?id=%u"
+ " %s%u"
" and thus replicating the current binary log event"
" may make the slave's data become different from the"
" master's data."
@@ -7953,6 +7973,7 @@ bool rpl_master_has_bug(const Relay_log_info *rli, uint bug_id, bool report,
" equal to '%d.%d.%d'. Then replication can be"
" restarted.",
rli->relay_log.description_event_for_exec->server_version,
+ bug_source,
bug_id,
fixed_in[0], fixed_in[1], fixed_in[2]);
return TRUE;
diff --git a/sql/slave.h b/sql/slave.h
index e2bd5cec1b9..02de9135c2a 100644
--- a/sql/slave.h
+++ b/sql/slave.h
@@ -231,7 +231,8 @@ bool show_all_master_info(THD* thd);
void show_binlog_info_get_fields(THD *thd, List<Item> *field_list);
bool show_binlog_info(THD* thd);
bool rpl_master_has_bug(const Relay_log_info *rli, uint bug_id, bool report,
- bool (*pred)(const void *), const void *param);
+ bool (*pred)(const void *), const void *param,
+ bool maria_master= false);
bool rpl_master_erroneous_autoinc(THD* thd);
const char *print_slave_db_safe(const char *db);
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index 6f2fa9bf672..d2ca8717f40 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -2597,6 +2597,8 @@ static bool acl_load(THD *thd, const Grant_tables& tables)
"possible to remove this privilege using REVOKE.",
host.host.hostname, host.db);
}
+ else if (!host.db)
+ host.db= const_cast<char*>(host_not_specified.str);
host.access= host_table.get_access();
host.access= fix_rights_for_db(host.access);
host.sort= get_magic_sort("hd", host.host.hostname, host.db);
@@ -2605,8 +2607,7 @@ static bool acl_load(THD *thd, const Grant_tables& tables)
{
sql_print_warning("'host' entry '%s|%s' "
"ignored in --skip-name-resolve mode.",
- safe_str(host.host.hostname),
- safe_str(host.db));
+ host.host.hostname, host.db);
continue;
}
#ifndef TO_BE_REMOVED
@@ -3728,7 +3729,7 @@ privilege_t acl_get(const char *host, const char *ip,
ACL_HOST *acl_host=dynamic_element(&acl_hosts,i,ACL_HOST*);
if (compare_hostname(&acl_host->host,host,ip))
{
- if (!acl_host->db || !wild_compare(db,acl_host->db,db_is_pattern))
+ if (!wild_compare(db, acl_host->db, db_is_pattern))
{
host_access=acl_host->access; // Fully specified. Take it
break;
@@ -6773,6 +6774,7 @@ static int update_role_columns(GRANT_TABLE *merged,
}
}
+restart:
for (uint i=0 ; i < mh->records ; i++)
{
GRANT_COLUMN *col = (GRANT_COLUMN *)my_hash_element(mh, i);
@@ -6781,6 +6783,7 @@ static int update_role_columns(GRANT_TABLE *merged,
{
changed= 1;
my_hash_delete(mh, (uchar*)col);
+ goto restart;
}
}
DBUG_ASSERT(rights == merged->cols);
diff --git a/sql/sql_analyze_stmt.h b/sql/sql_analyze_stmt.h
index 8f60d4b523a..7fad2ad9cb9 100644
--- a/sql/sql_analyze_stmt.h
+++ b/sql/sql_analyze_stmt.h
@@ -38,6 +38,16 @@ $stmt").
*/
+/* fake microseconds as cycles if cycles isn't available */
+static inline double timer_tracker_frequency()
+{
+#if (MY_TIMER_ROUTINE_CYCLES)
+ return static_cast<double>(sys_timer_info.cycles.frequency);
+#else
+ return static_cast<double>(sys_timer_info.microseconds.frequency);
+#endif
+}
+
class Gap_time_tracker;
void attach_gap_time_tracker(THD *thd, Gap_time_tracker *gap_tracker, ulonglong timeval);
void process_gap_time_tracker(THD *thd, ulonglong timeval);
@@ -52,9 +62,18 @@ protected:
ulonglong cycles;
ulonglong last_start;
+ ulonglong measure() const
+ {
+#if (MY_TIMER_ROUTINE_CYCLES)
+ return my_timer_cycles();
+#else
+ return my_timer_microseconds();
+#endif
+ }
+
void cycles_stop_tracking(THD *thd)
{
- ulonglong end= my_timer_cycles();
+ ulonglong end= measure();
cycles += end - last_start;
if (unlikely(end < last_start))
cycles += ULONGLONG_MAX;
@@ -63,19 +82,24 @@ protected:
if (my_gap_tracker)
attach_gap_time_tracker(thd, my_gap_tracker, end);
}
-public:
- Exec_time_tracker() : count(0), cycles(0), my_gap_tracker(NULL) {}
/*
- The time spent between stop_tracking() call on this object and any
- other time measurement will be billed to this tracker.
+ The time spent after stop_tracking() call on this object and any
+ subsequent time tracking call will be billed to this tracker.
*/
Gap_time_tracker *my_gap_tracker;
+public:
+ Exec_time_tracker() : count(0), cycles(0), my_gap_tracker(NULL) {}
+
+ void set_gap_tracker(Gap_time_tracker *gap_tracker)
+ {
+ my_gap_tracker= gap_tracker;
+ }
// interface for collecting time
void start_tracking(THD *thd)
{
- last_start= my_timer_cycles();
+ last_start= measure();
process_gap_time_tracker(thd, last_start);
}
@@ -91,7 +115,7 @@ public:
{
// convert 'cycles' to milliseconds.
return 1000.0 * static_cast<double>(cycles) /
- static_cast<double>(sys_timer_info.cycles.frequency);
+ timer_tracker_frequency();
}
bool has_timed_statistics() const { return cycles > 0; }
@@ -117,13 +141,11 @@ public:
double get_time_ms() const
{
// convert 'cycles' to milliseconds.
- return 1000.0 * static_cast<double>(cycles) /
- static_cast<double>(sys_timer_info.cycles.frequency);
+ return 1000.0 * static_cast<double>(cycles) / timer_tracker_frequency();
}
};
-
/*
A class for counting certain actions (in all queries), and optionally
collecting the timings (in ANALYZE queries).
@@ -160,6 +182,25 @@ public:
if (unlikely((tracker)->timed)) \
{ (tracker)->stop_tracking(thd); }
+
+/*
+ Just a counter to increment one value. Wrapped in a class to be uniform
+ with other counters used by ANALYZE.
+*/
+
+class Counter_tracker
+{
+public:
+ Counter_tracker() : r_scans(0) {}
+ ha_rows r_scans;
+
+ inline void on_scan_init() { r_scans++; }
+
+ bool has_scans() const { return (r_scans != 0); }
+ ha_rows get_loops() const { return r_scans; }
+};
+
+
/*
A class for collecting read statistics.
@@ -170,20 +211,16 @@ public:
It can be used to track reading from files, buffers, etc).
*/
-class Table_access_tracker
+class Table_access_tracker
{
public:
- Table_access_tracker() :
- r_scans(0), r_rows(0), /*r_rows_after_table_cond(0),*/
- r_rows_after_where(0)
+ Table_access_tracker() : r_scans(0), r_rows(0), r_rows_after_where(0)
{}
- ha_rows r_scans; /* How many scans were ran on this join_tab */
+ ha_rows r_scans; /* how many scans were ran on this join_tab */
ha_rows r_rows; /* How many rows we've got after that */
ha_rows r_rows_after_where; /* Rows after applying attached part of WHERE */
- bool has_scans() const { return (r_scans != 0); }
- ha_rows get_loops() const { return r_scans; }
double get_avg_rows() const
{
return r_scans
@@ -202,6 +239,9 @@ public:
inline void on_scan_init() { r_scans++; }
inline void on_record_read() { r_rows++; }
inline void on_record_after_where() { r_rows_after_where++; }
+
+ bool has_scans() const { return (r_scans != 0); }
+ ha_rows get_loops() const { return r_scans; }
};
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index bae35cd3264..8acee122e07 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -6347,7 +6347,10 @@ find_field_in_table(THD *thd, TABLE *table, const char *name, size_t length,
if (cached_field_index < table->s->fields &&
!my_strcasecmp(system_charset_info,
table->field[cached_field_index]->field_name.str, name))
+ {
field= table->field[cached_field_index];
+ DEBUG_SYNC(thd, "table_field_cached");
+ }
else
{
LEX_CSTRING fname= {name, length};
@@ -6809,6 +6812,13 @@ find_field_in_tables(THD *thd, Item_ident *item,
if (last_table)
last_table= last_table->next_name_resolution_table;
+ field_index_t fake_index_for_duplicate_search= NO_CACHED_FIELD_INDEX;
+ /*
+ For the field search it will point to field cache, but for duplicate
+ search it will point to fake_index_for_duplicate_search (no cache
+ present).
+ */
+ field_index_t *current_cache= &(item->cached_field_index);
for (; cur_table != last_table ;
cur_table= cur_table->next_name_resolution_table)
{
@@ -6823,7 +6833,7 @@ find_field_in_tables(THD *thd, Item_ident *item,
SQLCOM_SHOW_FIELDS)
? false : check_privileges,
allow_rowid,
- &(item->cached_field_index),
+ current_cache,
register_tree_change,
&actual_table);
if (cur_field)
@@ -6838,7 +6848,7 @@ find_field_in_tables(THD *thd, Item_ident *item,
item->name.str, db, table_name,
ignored_tables, ref, false,
allow_rowid,
- &(item->cached_field_index),
+ current_cache,
register_tree_change,
&actual_table);
if (cur_field)
@@ -6855,8 +6865,19 @@ find_field_in_tables(THD *thd, Item_ident *item,
Store the original table of the field, which may be different from
cur_table in the case of NATURAL/USING join.
*/
- item->cached_table= (!actual_table->cacheable_table || found) ?
- 0 : actual_table;
+ if (actual_table->cacheable_table /*(1)*/ && !found /*(2)*/)
+ {
+ /*
+ We have just found a field allowed to cache (1) and
+ it is not dublicate search (2).
+ */
+ item->cached_table= actual_table;
+ }
+ else
+ {
+ item->cached_table= NULL;
+ item->cached_field_index= NO_CACHED_FIELD_INDEX;
+ }
DBUG_ASSERT(thd->where);
/*
@@ -6875,6 +6896,7 @@ find_field_in_tables(THD *thd, Item_ident *item,
return (Field*) 0;
}
found= cur_field;
+ current_cache= &fake_index_for_duplicate_search;
}
}
@@ -8332,9 +8354,8 @@ bool setup_tables(THD *thd, Name_resolution_context *context,
table_list;
table_list= table_list->next_local)
{
- if (table_list->merge_underlying_list)
+ if (table_list->is_merged_derived() && table_list->merge_underlying_list)
{
- DBUG_ASSERT(table_list->is_merged_derived());
Query_arena *arena, backup;
arena= thd->activate_stmt_arena_if_needed(&backup);
bool res;
diff --git a/sql/sql_class.h b/sql/sql_class.h
index bc94d583398..0661eb9014e 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -1897,7 +1897,7 @@ show_system_thread(enum_thread_type thread)
RETURN_NAME_AS_STRING(SYSTEM_THREAD_SLAVE_BACKGROUND);
RETURN_NAME_AS_STRING(SYSTEM_THREAD_SEMISYNC_MASTER_BACKGROUND);
default:
- sprintf(buf, "<UNKNOWN SYSTEM THREAD: %d>", thread);
+ snprintf(buf, sizeof(buf), "<UNKNOWN SYSTEM THREAD: %d>", thread);
return buf;
}
#undef RETURN_NAME_AS_STRING
@@ -7772,7 +7772,7 @@ public:
if (unlikely(!(dst->str= tmp= (char*) alloc_root(mem_root,
dst->length + 1))))
return true;
- sprintf(tmp, "%.*s%.*s%.*s",
+ snprintf(tmp, dst->length + 1, "%.*s%.*s%.*s",
(int) m_db.length, (m_db.length ? m_db.str : ""),
dot, ".",
(int) m_name.length, m_name.str);
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index dcf61e9b085..9598a1c58f1 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -1805,7 +1805,10 @@ bool Sql_cmd_delete::execute_inner(THD *thd)
else
{
if (thd->lex->describe || thd->lex->analyze_stmt)
- res= thd->lex->explain->send_explain(thd);
+ {
+ bool extended= thd->lex->describe & DESCRIBE_EXTENDED;
+ res= thd->lex->explain->send_explain(thd, extended);
+ }
}
}
diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc
index 22dd7734aea..4ceebcc1978 100644
--- a/sql/sql_derived.cc
+++ b/sql/sql_derived.cc
@@ -351,24 +351,6 @@ bool mysql_derived_merge(THD *thd, LEX *lex, TABLE_LIST *derived)
DBUG_RETURN(FALSE);
}
- if (dt_select->uncacheable & UNCACHEABLE_RAND)
- {
- /* There is random function => fall back to materialization. */
- cause= "Random function in the select";
- if (unlikely(thd->trace_started()))
- {
- OPT_TRACE_VIEWS_TRANSFORM(thd, trace_wrapper, trace_derived,
- derived->is_derived() ? "derived" : "view",
- derived->alias.str ? derived->alias.str : "<NULL>",
- derived->get_unit()->first_select()->select_number,
- "materialized");
- trace_derived.add("cause", cause);
- }
- derived->change_refs_to_fields();
- derived->set_materialized_derived();
- DBUG_RETURN(FALSE);
- }
-
if (derived->dt_handler)
{
derived->change_refs_to_fields();
@@ -821,6 +803,9 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
cursor->outer_join|= JOIN_TYPE_OUTER;
}
}
+ // Prevent it for possible ORDER BY clause
+ if (unit->fake_select_lex)
+ unit->fake_select_lex->context.outer_context= 0;
if (unlikely(thd->trace_started()))
{
diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc
index e98728eb443..639a45bf848 100644
--- a/sql/sql_explain.cc
+++ b/sql/sql_explain.cc
@@ -188,7 +188,7 @@ void Explain_query::notify_tables_are_closed()
Send EXPLAIN output to the client.
*/
-int Explain_query::send_explain(THD *thd)
+int Explain_query::send_explain(THD *thd, bool extended)
{
select_result *result;
LEX *lex= thd->lex;
@@ -201,8 +201,22 @@ int Explain_query::send_explain(THD *thd)
if (thd->lex->explain_json)
print_explain_json(result, thd->lex->analyze_stmt, false /*is_show_cmd*/);
else
+ {
res= print_explain(result, lex->describe, thd->lex->analyze_stmt);
-
+ if (extended)
+ {
+ char buff[1024];
+ String str(buff,(uint32) sizeof(buff), system_charset_info);
+ str.length(0);
+ /*
+ The warnings system requires input in utf8, @see
+ mysqld_show_warnings().
+ */
+ lex->unit.print(&str, QT_EXPLAIN_EXTENDED);
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_YES, str.c_ptr_safe());
+ }
+ }
if (res)
result->abort_result_set();
else
@@ -212,6 +226,7 @@ int Explain_query::send_explain(THD *thd)
}
+
/*
The main entry point to print EXPLAIN of the entire query
*/
@@ -2027,7 +2042,8 @@ void Explain_table_access::print_explain_json(Explain_query *query,
if (is_analyze)
{
- //writer->add_member("r_loops").add_ll(jbuf_tracker.get_loops());
+ writer->add_member("r_loops").add_ll(jbuf_loops_tracker.get_loops());
+
writer->add_member("r_filtered");
if (jbuf_tracker.has_scans())
writer->add_double(jbuf_tracker.get_filtered_after_where()*100.0);
@@ -2036,6 +2052,26 @@ void Explain_table_access::print_explain_json(Explain_query *query,
writer->add_member("r_unpack_time_ms");
writer->add_double(jbuf_unpack_tracker.get_time_ms());
+
+ writer->add_member("r_other_time_ms").
+ add_double(jbuf_extra_time_tracker.get_time_ms());
+ /*
+ effective_rows is average number of matches we got for an incoming
+ row. The row is stored in the join buffer and then is read
+ from there, possibly multiple times. We can't count this number
+ directly. Infer it as:
+ total_number_of_row_combinations_considered / r_loops.
+ */
+ writer->add_member("r_effective_rows");
+ if (jbuf_loops_tracker.has_scans())
+ {
+ double loops= (double)jbuf_loops_tracker.get_loops();
+ double row_combinations= (double)jbuf_tracker.r_rows;
+ writer->add_double(row_combinations / loops);
+ }
+ else
+ writer->add_null();
+
}
}
diff --git a/sql/sql_explain.h b/sql/sql_explain.h
index 03eb8821a34..2ab2775820b 100644
--- a/sql/sql_explain.h
+++ b/sql/sql_explain.h
@@ -495,7 +495,7 @@ public:
bool is_analyze);
/* Send tabular EXPLAIN to the client */
- int send_explain(THD *thd);
+ int send_explain(THD *thd, bool extended);
/* Return tabular EXPLAIN output as a text string */
bool print_explain_str(THD *thd, String *out_str, bool is_analyze);
@@ -882,9 +882,22 @@ public:
Exec_time_tracker op_tracker;
Gap_time_tracker extra_time_tracker;
+ /* When using join buffer: Track the reads from join buffer */
Table_access_tracker jbuf_tracker;
+
+ /* When using join buffer: time spent unpacking rows from the join buffer */
Time_and_counter_tracker jbuf_unpack_tracker;
-
+
+ /*
+ When using join buffer: time spent after unpacking rows from the join
+ buffer. This will capture the time spent checking the Join Condition:
+ the condition that depends on this table and preceding tables.
+ */
+ Gap_time_tracker jbuf_extra_time_tracker;
+
+ /* When using join buffer: Track the number of incoming record combinations */
+ Counter_tracker jbuf_loops_tracker;
+
Explain_rowid_filter *rowid_filter;
int print_explain(select_result_sink *output, uint8 explain_flags,
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 6e042d25805..475bca62f02 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -879,7 +879,8 @@ bool mysql_insert(THD *thd, TABLE_LIST *table_list,
save_insert_query_plan(thd, table_list);
if (thd->lex->describe)
{
- retval= thd->lex->explain->send_explain(thd);
+ bool extended= thd->lex->describe & DESCRIBE_EXTENDED;
+ retval= thd->lex->explain->send_explain(thd, extended);
goto abort;
}
@@ -2269,6 +2270,7 @@ int check_that_all_fields_are_given_values(THD *thd, TABLE *entry, TABLE_LIST *t
for (Field **field=entry->field ; *field ; field++)
{
if (!bitmap_is_set(write_set, (*field)->field_index) &&
+ !(*field)->vcol_info &&
has_no_default_value(thd, *field, table_list))
err=1;
}
@@ -4221,6 +4223,7 @@ bool select_insert::store_values(List<Item> &values)
DBUG_ENTER("select_insert::store_values");
bool error;
+ table->reset_default_fields();
if (fields->elements)
error= fill_record_n_invoke_before_triggers(thd, table, *fields, values,
true, TRG_EVENT_INSERT);
diff --git a/sql/sql_join_cache.cc b/sql/sql_join_cache.cc
index 3c19674ff96..413849db6b2 100644
--- a/sql/sql_join_cache.cc
+++ b/sql/sql_join_cache.cc
@@ -1620,7 +1620,7 @@ bool JOIN_CACHE::get_record()
pos+= referenced_fields*size_of_fld_ofs;
if (prev_cache)
prev_cache->get_record_by_pos(prev_rec_ptr);
- }
+ }
ANALYZE_STOP_TRACKING(thd(), join_tab->jbuf_unpack_tracker);
return res;
}
@@ -2397,7 +2397,9 @@ enum_nested_loop_state JOIN_CACHE::join_matching_records(bool skip_last)
(join_tab->first_inner && !not_exists_opt_is_applicable) ||
!skip_next_candidate_for_match(rec_ptr))
{
- read_next_candidate_for_match(rec_ptr);
+ ANALYZE_START_TRACKING(join->thd, join_tab->jbuf_unpack_tracker);
+ read_next_candidate_for_match(rec_ptr);
+ ANALYZE_STOP_TRACKING(join->thd, join_tab->jbuf_unpack_tracker);
rc= generate_full_extensions(rec_ptr);
if (rc != NESTED_LOOP_OK && rc != NESTED_LOOP_NO_MORE_ROWS)
goto finish;
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 2b175823ad5..5206a43f5eb 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -40,8 +40,6 @@
#ifdef WITH_WSREP
#include "mysql/service_wsrep.h"
#endif
-#include "sql_update.h" // class Sql_cmd_update
-#include "sql_delete.h" // class Sql_cmd_delete
void LEX::parse_error(uint err_number)
{
@@ -862,7 +860,7 @@ void Lex_input_stream::body_utf8_start(THD *thd, const char *begin_ptr)
}
-size_t Lex_input_stream::get_body_utf8_maximum_length(THD *thd)
+size_t Lex_input_stream::get_body_utf8_maximum_length(THD *thd) const
{
/*
String literals can grow during escaping:
@@ -1299,6 +1297,8 @@ void LEX::start(THD *thd_arg)
frame_bottom_bound= NULL;
win_spec= NULL;
+ upd_del_where= NULL;
+
vers_conditions.empty();
period_conditions.empty();
@@ -1367,7 +1367,7 @@ Yacc_state::~Yacc_state()
}
int Lex_input_stream::find_keyword(Lex_ident_cli_st *kwd,
- uint len, bool function)
+ uint len, bool function) const
{
const char *tok= m_tok_start;
@@ -3034,6 +3034,7 @@ void st_select_lex::init_select()
in_funcs.empty();
curr_tvc_name= 0;
versioned_tables= 0;
+ is_tvc_wrapper= false;
nest_flags= 0;
item_list_usage= MARK_COLUMNS_READ;
}
@@ -3931,57 +3932,54 @@ LEX::LEX()
}
+bool LEX::can_be_merged()
+{
+ return unit.can_be_merged();
+}
+
+
/*
- Check whether the merging algorithm can be used on this VIEW
+ Check whether the merging algorithm can be used for this unit
SYNOPSIS
- LEX::can_be_merged()
+ st_select_lex_unit::can_be_merged()
DESCRIPTION
- We can apply merge algorithm if it is single SELECT view with
- subqueries only in WHERE clause (we do not count SELECTs of underlying
- views, and second level subqueries) and we have not grpouping, ordering,
- HAVING clause, aggregate functions, DISTINCT clause, LIMIT clause and
- several underlying tables.
+ We can apply merge algorithm for a unit if it is single SELECT with
+ subqueries only in WHERE clauses or in ON conditions or in select list
+ (we do not count SELECTs of underlying views/derived tables/CTEs and
+ second level subqueries) and we have no grouping, ordering, HAVING
+ clause, aggregate functions, DISTINCT clause, LIMIT clause.
RETURN
FALSE - only temporary table algorithm can be used
TRUE - merge algorithm can be used
*/
-bool LEX::can_be_merged()
+bool st_select_lex_unit::can_be_merged()
{
// TODO: do not forget implement case when select_lex.table_list.elements==0
/* find non VIEW subqueries/unions */
- bool selects_allow_merge= (first_select_lex()->next_select() == 0 &&
- !(first_select_lex()->uncacheable &
- UNCACHEABLE_RAND));
- if (selects_allow_merge)
- {
- for (SELECT_LEX_UNIT *tmp_unit= first_select_lex()->first_inner_unit();
- tmp_unit;
- tmp_unit= tmp_unit->next_unit())
- {
- if (tmp_unit->first_select()->parent_lex == this &&
- (tmp_unit->item != 0 &&
- (tmp_unit->item->place() != IN_WHERE &&
- tmp_unit->item->place() != IN_ON &&
- tmp_unit->item->place() != SELECT_LIST)))
- {
- selects_allow_merge= 0;
- break;
- }
- }
- }
-
- return (selects_allow_merge &&
- first_select_lex()->group_list.elements == 0 &&
- first_select_lex()->having == 0 &&
- first_select_lex()->with_sum_func == 0 &&
- first_select_lex()->table_list.elements >= 1 &&
- !(first_select_lex()->options & SELECT_DISTINCT) &&
- first_select_lex()->limit_params.select_limit == 0);
+ st_select_lex *fs= first_select();
+
+ if (fs->next_select() ||
+ (fs->uncacheable & UNCACHEABLE_RAND) ||
+ (fs->options & SELECT_DISTINCT) ||
+ fs->group_list.elements || fs->having ||
+ fs->with_sum_func ||
+ fs->table_list.elements < 1 ||
+ fs->limit_params.select_limit)
+ return false;
+ for (SELECT_LEX_UNIT *tmp_unit= fs->first_inner_unit();
+ tmp_unit;
+ tmp_unit= tmp_unit->next_unit())
+ if ((tmp_unit->item != 0 &&
+ (tmp_unit->item->place() != IN_WHERE &&
+ tmp_unit->item->place() != IN_ON &&
+ tmp_unit->item->place() != SELECT_LIST)))
+ return false;
+ return true;
}
@@ -4028,9 +4026,6 @@ bool LEX::can_use_merged()
SYNOPSIS
LEX::can_not_use_merged()
- @param forced_no_merge_for_update_delete Set to 1 if we can't use merge with
- multiple-table updates/deletes
-
DESCRIPTION
Temporary table algorithm will be used on all SELECT levels for queries
listed here (see also LEX::can_use_merged()).
@@ -4040,7 +4035,7 @@ bool LEX::can_use_merged()
TRUE - VIEWs with MERGE algorithms can be used
*/
-bool LEX::can_not_use_merged(bool forced_no_merge_for_update_delete)
+bool LEX::can_not_use_merged()
{
switch (sql_command) {
case SQLCOM_CREATE_VIEW:
@@ -4053,30 +4048,6 @@ bool LEX::can_not_use_merged(bool forced_no_merge_for_update_delete)
case SQLCOM_SHOW_FIELDS:
return TRUE;
- case SQLCOM_UPDATE_MULTI:
- if (forced_no_merge_for_update_delete)
- return TRUE;
- /* Fall through */
-
- case SQLCOM_UPDATE:
- if (forced_no_merge_for_update_delete &&
- (((Sql_cmd_update *) m_sql_cmd)->is_multitable() ||
- query_tables->is_multitable()))
- return TRUE;
- return FALSE;
-
- case SQLCOM_DELETE_MULTI:
- if (forced_no_merge_for_update_delete)
- return TRUE;
- /* Fall through */
-
- case SQLCOM_DELETE:
- if (forced_no_merge_for_update_delete &&
- (((Sql_cmd_delete *) m_sql_cmd)->is_multitable() ||
- query_tables->is_multitable()))
- return TRUE;
- return FALSE;
-
default:
return FALSE;
}
@@ -9390,22 +9361,6 @@ bool LEX::add_grant_command(THD *thd, const List<LEX_COLUMN> &columns)
}
-Item *LEX::make_item_func_substr(THD *thd, Item *a, Item *b, Item *c)
-{
- return (thd->variables.sql_mode & MODE_ORACLE) ?
- new (thd->mem_root) Item_func_substr_oracle(thd, a, b, c) :
- new (thd->mem_root) Item_func_substr(thd, a, b, c);
-}
-
-
-Item *LEX::make_item_func_substr(THD *thd, Item *a, Item *b)
-{
- return (thd->variables.sql_mode & MODE_ORACLE) ?
- new (thd->mem_root) Item_func_substr_oracle(thd, a, b) :
- new (thd->mem_root) Item_func_substr(thd, a, b);
-}
-
-
Item *LEX::make_item_func_sysdate(THD *thd, uint fsp)
{
/*
@@ -9426,17 +9381,6 @@ Item *LEX::make_item_func_sysdate(THD *thd, uint fsp)
}
-Item *LEX::make_item_func_replace(THD *thd,
- Item *org,
- Item *find,
- Item *replace)
-{
- return (thd->variables.sql_mode & MODE_ORACLE) ?
- new (thd->mem_root) Item_func_replace_oracle(thd, org, find, replace) :
- new (thd->mem_root) Item_func_replace(thd, org, find, replace);
-}
-
-
bool SELECT_LEX::vers_push_field(THD *thd, TABLE_LIST *table,
const LEX_CSTRING field_name)
{
@@ -11918,6 +11862,13 @@ bool SELECT_LEX_UNIT::explainable() const
false;
}
+
+bool st_select_lex::is_query_topmost(THD *thd)
+{
+ return get_master() == &thd->lex->unit;
+}
+
+
/*
Determines whether the derived table was eliminated during
the call of eliminate_tables(JOIN *) made at the optimization stage
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index acf23d5ffa2..adb887be380 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -1044,6 +1044,8 @@ public:
bool set_lock_to_the_last_select(Lex_select_lock l);
+ bool can_be_merged();
+
friend class st_select_lex;
private:
@@ -1306,6 +1308,8 @@ public:
st_select_lex.
*/
uint curr_tvc_name;
+ /* true <=> select has been created a TVC wrapper */
+ bool is_tvc_wrapper;
uint fields_in_window_functions;
uint insert_tables;
enum_parsing_place parsing_place; /* where we are parsing expression */
@@ -1390,6 +1394,7 @@ public:
return (st_select_lex_unit*) slave;
}
st_select_lex* outer_select();
+ bool is_query_topmost(THD *thd);
st_select_lex* next_select() { return (st_select_lex*) next; }
st_select_lex* next_select_in_list()
{
@@ -1476,6 +1481,10 @@ public:
}
bool setup_ref_array(THD *thd, uint order_group_num);
void print(THD *thd, String *str, enum_query_type query_type);
+ void print_item_list(THD *thd, String *str, enum_query_type query_type);
+ void print_set_clause(THD *thd, String *str, enum_query_type query_type);
+ void print_on_duplicate_key_clause(THD *thd, String *str,
+ enum_query_type query_type);
static void print_order(String *str,
ORDER *order,
enum_query_type query_type);
@@ -2548,7 +2557,7 @@ private:
Get the last character accepted.
@return the last character accepted.
*/
- unsigned char yyGetLast()
+ unsigned char yyGetLast() const
{
return m_ptr[-1];
}
@@ -2556,7 +2565,7 @@ private:
/**
Look at the next character to parse, but do not accept it.
*/
- unsigned char yyPeek()
+ unsigned char yyPeek() const
{
return m_ptr[0];
}
@@ -2565,7 +2574,7 @@ private:
Look ahead at some character to parse.
@param n offset of the character to look up
*/
- unsigned char yyPeekn(int n)
+ unsigned char yyPeekn(int n) const
{
return m_ptr[n];
}
@@ -2626,7 +2635,7 @@ private:
@param n number of characters expected
@return true if there are less than n characters to parse
*/
- bool eof(int n)
+ bool eof(int n) const
{
return ((m_ptr + n) >= m_end_of_query);
}
@@ -2657,10 +2666,10 @@ private:
Get the maximum length of the utf8-body buffer.
The utf8 body can grow because of the character set conversion and escaping.
*/
- size_t get_body_utf8_maximum_length(THD *thd);
+ size_t get_body_utf8_maximum_length(THD *thd) const;
/** Get the length of the current token, in the raw buffer. */
- uint yyLength()
+ uint yyLength() const
{
/*
The assumption is that the lexical analyser is always 1 character ahead,
@@ -2685,31 +2694,31 @@ public:
End of file indicator for the query text to parse.
@return true if there are no more characters to parse
*/
- bool eof()
+ bool eof() const
{
return (m_ptr >= m_end_of_query);
}
/** Get the raw query buffer. */
- const char *get_buf()
+ const char *get_buf() const
{
return m_buf;
}
/** Get the pre-processed query buffer. */
- const char *get_cpp_buf()
+ const char *get_cpp_buf() const
{
return m_cpp_buf;
}
/** Get the end of the raw query buffer. */
- const char *get_end_of_query()
+ const char *get_end_of_query() const
{
return m_end_of_query;
}
/** Get the token start position, in the raw buffer. */
- const char *get_tok_start()
+ const char *get_tok_start() const
{
return has_lookahead() ? m_tok_start_prev : m_tok_start;
}
@@ -2720,25 +2729,25 @@ public:
}
/** Get the token end position, in the raw buffer. */
- const char *get_tok_end()
+ const char *get_tok_end() const
{
return m_tok_end;
}
/** Get the current stream pointer, in the raw buffer. */
- const char *get_ptr()
+ const char *get_ptr() const
{
return m_ptr;
}
/** Get the token start position, in the pre-processed buffer. */
- const char *get_cpp_tok_start()
+ const char *get_cpp_tok_start() const
{
return has_lookahead() ? m_cpp_tok_start_prev : m_cpp_tok_start;
}
/** Get the token end position, in the pre-processed buffer. */
- const char *get_cpp_tok_end()
+ const char *get_cpp_tok_end() const
{
return m_cpp_tok_end;
}
@@ -2747,7 +2756,7 @@ public:
Get the token end position in the pre-processed buffer,
with trailing spaces removed.
*/
- const char *get_cpp_tok_end_rtrim()
+ const char *get_cpp_tok_end_rtrim() const
{
const char *p;
for (p= m_cpp_tok_end;
@@ -2758,7 +2767,7 @@ public:
}
/** Get the current stream pointer, in the pre-processed buffer. */
- const char *get_cpp_ptr()
+ const char *get_cpp_ptr() const
{
return m_cpp_ptr;
}
@@ -2767,7 +2776,7 @@ public:
Get the current stream pointer, in the pre-processed buffer,
with traling spaces removed.
*/
- const char *get_cpp_ptr_rtrim()
+ const char *get_cpp_ptr_rtrim() const
{
const char *p;
for (p= m_cpp_ptr;
@@ -2777,13 +2786,13 @@ public:
return p;
}
/** Get the utf8-body string. */
- const char *get_body_utf8_str()
+ const char *get_body_utf8_str() const
{
return m_body_utf8;
}
/** Get the utf8-body length. */
- size_t get_body_utf8_length()
+ size_t get_body_utf8_length() const
{
return (size_t) (m_body_utf8_ptr - m_body_utf8);
}
@@ -2819,7 +2828,7 @@ private:
bool consume_comment(int remaining_recursions_permitted);
int lex_one_token(union YYSTYPE *yylval, THD *thd);
- int find_keyword(Lex_ident_cli_st *str, uint len, bool function);
+ int find_keyword(Lex_ident_cli_st *str, uint len, bool function) const;
LEX_CSTRING get_token(uint skip, uint length);
int scan_ident_sysvar(THD *thd, Lex_ident_cli_st *str);
int scan_ident_start(THD *thd, Lex_ident_cli_st *str);
@@ -3610,6 +3619,8 @@ public:
Window_frame_bound *frame_bottom_bound;
Window_spec *win_spec;
+ Item *upd_del_where;
+
/* System Versioning */
vers_select_conds_t vers_conditions;
vers_select_conds_t period_conditions;
@@ -3691,7 +3702,7 @@ public:
bool can_be_merged();
bool can_use_merged();
- bool can_not_use_merged(bool no_update_or_delete);
+ bool can_not_use_merged();
bool only_view_structure();
bool need_correct_ident();
uint8 get_effective_with_check(TABLE_LIST *view);
@@ -4167,9 +4178,6 @@ public:
Item *create_item_query_expression(THD *thd, st_select_lex_unit *unit);
- Item *make_item_func_replace(THD *thd, Item *org, Item *find, Item *replace);
- Item *make_item_func_substr(THD *thd, Item *a, Item *b, Item *c);
- Item *make_item_func_substr(THD *thd, Item *a, Item *b);
Item *make_item_func_sysdate(THD *thd, uint fsp);
Item *make_item_func_call_generic(THD *thd, Lex_ident_cli_st *db,
Lex_ident_cli_st *name, List<Item> *args);
diff --git a/sql/sql_locale.cc b/sql/sql_locale.cc
index eb5e3fbbc5a..713ee1fe476 100644
--- a/sql/sql_locale.cc
+++ b/sql/sql_locale.cc
@@ -29,7 +29,7 @@
enum err_msgs_index
{
- en_US= 0, cs_CZ, da_DK, nl_NL, et_EE, fr_FR, de_DE, el_GR, hu_HU, it_IT,
+ en_US= 0, zh_CN, cs_CZ, da_DK, nl_NL, et_EE, fr_FR, de_DE, el_GR, hu_HU, it_IT,
ja_JP, ko_KR, no_NO, nn_NO, pl_PL, pt_PT, ro_RO, ru_RU, sr_RS, sk_SK,
es_ES, sv_SE, uk_UA, hi_IN, ka_GE
} ERR_MSGS_INDEX;
@@ -38,6 +38,7 @@ enum err_msgs_index
MY_LOCALE_ERRMSGS global_errmsgs[]=
{
{"english", NULL},
+ {"chinese", NULL},
{"czech", NULL},
{"danish", NULL},
{"dutch", NULL},
@@ -2096,7 +2097,7 @@ MY_LOCALE my_locale_zh_CN
'.', /* decimal point zh_CN */
',', /* thousands_sep zh_CN */
"\x03", /* grouping zh_CN */
- &global_errmsgs[en_US]
+ &global_errmsgs[zh_CN]
);
/***** LOCALE END zh_CN *****/
@@ -3320,7 +3321,21 @@ MY_LOCALE my_locale_rm_CH
/***** LOCALE BEGIN ka_GE: Georgian - Georgia *****/
static const char *my_locale_month_names_ka_GE[13] =
- {"იანვარი","თებერვალი","მარტი","აპრილი","მაისი","ივნისი","ივლისი","სექტემბერი","ოქტომბერი","ნოემბერი","დეკემბერი", NullS };
+{
+ "იანვარი", // January
+ "თებერვალი", // February
+ "მარტი", // March
+ "აპრილი", // April
+ "მაისი", // May
+ "ივნისი", // June
+ "ივლისი", // July
+ "აგვისტო", // August
+ "სექტემბერი", // September
+ "ოქტომბერი", // October
+ "ნოემბერი", // November
+ "დეკემბერი", // December
+ NullS
+};
static const char *my_locale_ab_month_names_ka_GE[13] =
{"იან","თებ","მარ","აპრ","მაი","ივნ","ივლ","აგვ","სექტ","ოქტ","ნოე","დეკ", NullS };
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 67353a1f082..5dccd3c12ae 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -1334,7 +1334,7 @@ dispatch_command_return do_command(THD *thd, bool blocking)
in wsrep_before_command().
*/
WSREP_LOG_THD(thd, "enter found BF aborted");
- DBUG_ASSERT(!thd->mdl_context.has_locks());
+ DBUG_ASSERT(!thd->mdl_context.has_transactional_locks());
DBUG_ASSERT(!thd->get_stmt_da()->is_set());
/* We let COM_QUIT and COM_STMT_CLOSE to execute even if wsrep aborted. */
if (command == COM_STMT_EXECUTE)
@@ -4455,7 +4455,10 @@ mysql_execute_command(THD *thd, bool is_called_from_prepared_stmt)
thd->protocol= save_protocol;
}
if (!res && thd->lex->analyze_stmt)
- res= thd->lex->explain->send_explain(thd);
+ {
+ bool extended= thd->lex->describe & DESCRIBE_EXTENDED;
+ res= thd->lex->explain->send_explain(thd, extended);
+ }
delete sel_result;
MYSQL_INSERT_DONE(res, (ulong) thd->get_row_count_func());
/*
@@ -4635,7 +4638,10 @@ mysql_execute_command(THD *thd, bool is_called_from_prepared_stmt)
thd->protocol= save_protocol;
}
if (!res && (explain || lex->analyze_stmt))
- res= thd->lex->explain->send_explain(thd);
+ {
+ bool extended= thd->lex->describe & DESCRIBE_EXTENDED;
+ res= thd->lex->explain->send_explain(thd, extended);
+ }
/* revert changes for SP */
MYSQL_INSERT_SELECT_DONE(res, (ulong) thd->get_row_count_func());
@@ -6033,7 +6039,10 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables)
thd->protocol= save_protocol;
}
if (!res)
- res= thd->lex->explain->send_explain(thd);
+ {
+ bool extended= thd->lex->describe & DESCRIBE_EXTENDED;
+ res= thd->lex->explain->send_explain(thd, extended);
+ }
}
}
}
@@ -9039,7 +9048,9 @@ static my_bool kill_threads_callback(THD *thd, kill_threads_callback_arg *arg)
if (!(arg->thd->security_ctx->master_access &
PRIV_KILL_OTHER_USER_PROCESS) &&
!arg->thd->security_ctx->user_matches(thd->security_ctx))
- return 1;
+ {
+ return MY_TEST(arg->thd->security_ctx->master_access & PROCESS_ACL);
+ }
if (!arg->threads_to_kill.push_back(thd, arg->thd->mem_root))
{
mysql_mutex_lock(&thd->LOCK_thd_kill); // Lock from delete
@@ -9159,7 +9170,10 @@ sql_kill_user(THD *thd, LEX_USER *user, killed_state state)
my_ok(thd, rows);
break;
case ER_KILL_DENIED_ERROR:
- my_error(error, MYF(0), (long long) thd->thread_id);
+ char buf[DEFINER_LENGTH+1];
+ strxnmov(buf, sizeof(buf), user->user.str, "@", user->host.str, NULL);
+ my_printf_error(ER_KILL_DENIED_ERROR, ER_THD(thd, ER_CANNOT_USER), MYF(0),
+ "KILL USER", buf);
break;
case ER_OUT_OF_RESOURCES:
default:
diff --git a/sql/sql_priv.h b/sql/sql_priv.h
index 6f4eff4880c..5f2074851e1 100644
--- a/sql/sql_priv.h
+++ b/sql/sql_priv.h
@@ -238,38 +238,39 @@
#define OPTIMIZER_SWITCH_USE_ROWID_FILTER (1ULL << 33)
#define OPTIMIZER_SWITCH_COND_PUSHDOWN_FROM_HAVING (1ULL << 34)
#define OPTIMIZER_SWITCH_NOT_NULL_RANGE_SCAN (1ULL << 35)
-
-#define OPTIMIZER_SWITCH_DEFAULT (OPTIMIZER_SWITCH_INDEX_MERGE | \
- OPTIMIZER_SWITCH_INDEX_MERGE_UNION | \
- OPTIMIZER_SWITCH_INDEX_MERGE_SORT_UNION | \
- OPTIMIZER_SWITCH_INDEX_MERGE_INTERSECT | \
- OPTIMIZER_SWITCH_INDEX_COND_PUSHDOWN | \
- OPTIMIZER_SWITCH_DERIVED_MERGE | \
- OPTIMIZER_SWITCH_DERIVED_WITH_KEYS | \
- OPTIMIZER_SWITCH_TABLE_ELIMINATION | \
- OPTIMIZER_SWITCH_EXTENDED_KEYS | \
- OPTIMIZER_SWITCH_IN_TO_EXISTS | \
- OPTIMIZER_SWITCH_MATERIALIZATION | \
- OPTIMIZER_SWITCH_PARTIAL_MATCH_ROWID_MERGE|\
- OPTIMIZER_SWITCH_PARTIAL_MATCH_TABLE_SCAN|\
- OPTIMIZER_SWITCH_OUTER_JOIN_WITH_CACHE | \
- OPTIMIZER_SWITCH_SEMIJOIN_WITH_CACHE | \
- OPTIMIZER_SWITCH_JOIN_CACHE_INCREMENTAL | \
- OPTIMIZER_SWITCH_JOIN_CACHE_HASHED | \
- OPTIMIZER_SWITCH_JOIN_CACHE_BKA | \
- OPTIMIZER_SWITCH_SUBQUERY_CACHE | \
- OPTIMIZER_SWITCH_SEMIJOIN | \
- OPTIMIZER_SWITCH_FIRSTMATCH | \
- OPTIMIZER_SWITCH_LOOSE_SCAN | \
- OPTIMIZER_SWITCH_EXISTS_TO_IN | \
- OPTIMIZER_SWITCH_ORDERBY_EQ_PROP | \
- OPTIMIZER_SWITCH_COND_PUSHDOWN_FOR_DERIVED | \
- OPTIMIZER_SWITCH_SPLIT_MATERIALIZED | \
- OPTIMIZER_SWITCH_COND_PUSHDOWN_FOR_SUBQUERY | \
- OPTIMIZER_SWITCH_USE_ROWID_FILTER | \
- OPTIMIZER_SWITCH_COND_PUSHDOWN_FROM_HAVING | \
- OPTIMIZER_SWITCH_OPTIMIZE_JOIN_BUFFER_SIZE)
-
+#define OPTIMIZER_SWITCH_HASH_JOIN_CARDINALITY (1ULL << 36)
+
+#define OPTIMIZER_SWITCH_DEFAULT (OPTIMIZER_SWITCH_INDEX_MERGE | \
+ OPTIMIZER_SWITCH_INDEX_MERGE_UNION | \
+ OPTIMIZER_SWITCH_INDEX_MERGE_SORT_UNION | \
+ OPTIMIZER_SWITCH_INDEX_MERGE_INTERSECT | \
+ OPTIMIZER_SWITCH_INDEX_COND_PUSHDOWN | \
+ OPTIMIZER_SWITCH_DERIVED_MERGE | \
+ OPTIMIZER_SWITCH_DERIVED_WITH_KEYS | \
+ OPTIMIZER_SWITCH_TABLE_ELIMINATION | \
+ OPTIMIZER_SWITCH_EXTENDED_KEYS | \
+ OPTIMIZER_SWITCH_IN_TO_EXISTS | \
+ OPTIMIZER_SWITCH_MATERIALIZATION | \
+ OPTIMIZER_SWITCH_PARTIAL_MATCH_ROWID_MERGE|\
+ OPTIMIZER_SWITCH_PARTIAL_MATCH_TABLE_SCAN|\
+ OPTIMIZER_SWITCH_OUTER_JOIN_WITH_CACHE | \
+ OPTIMIZER_SWITCH_SEMIJOIN_WITH_CACHE | \
+ OPTIMIZER_SWITCH_JOIN_CACHE_INCREMENTAL | \
+ OPTIMIZER_SWITCH_JOIN_CACHE_HASHED | \
+ OPTIMIZER_SWITCH_JOIN_CACHE_BKA | \
+ OPTIMIZER_SWITCH_SUBQUERY_CACHE | \
+ OPTIMIZER_SWITCH_SEMIJOIN | \
+ OPTIMIZER_SWITCH_FIRSTMATCH | \
+ OPTIMIZER_SWITCH_LOOSE_SCAN | \
+ OPTIMIZER_SWITCH_EXISTS_TO_IN | \
+ OPTIMIZER_SWITCH_ORDERBY_EQ_PROP | \
+ OPTIMIZER_SWITCH_COND_PUSHDOWN_FOR_DERIVED | \
+ OPTIMIZER_SWITCH_SPLIT_MATERIALIZED | \
+ OPTIMIZER_SWITCH_COND_PUSHDOWN_FOR_SUBQUERY |\
+ OPTIMIZER_SWITCH_USE_ROWID_FILTER | \
+ OPTIMIZER_SWITCH_COND_PUSHDOWN_FROM_HAVING | \
+ OPTIMIZER_SWITCH_OPTIMIZE_JOIN_BUFFER_SIZE |\
+ OPTIMIZER_SWITCH_HASH_JOIN_CARDINALITY)
/*
Replication uses 8 bytes to store SQL_MODE in the binary log. The day you
use strictly more than 64 bits by adding one more define above, you should
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index 80a746f32b7..dc27ab9ff8b 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -432,7 +432,7 @@ static int send_file(THD *thd)
/**
Internal to mysql_binlog_send() routine that recalculates checksum for
- 1. FD event (asserted) that needs additional arranment prior sending to slave.
+ 1. FD event (asserted) that needs additional arrangement prior sending to slave.
2. Start_encryption_log_event whose Ignored flag is set
TODO DBUG_ASSERT can be removed if this function is used for more general cases
*/
diff --git a/sql/sql_schema.cc b/sql/sql_schema.cc
index 0bf4a63c2f8..f08204d272d 100644
--- a/sql/sql_schema.cc
+++ b/sql/sql_schema.cc
@@ -32,6 +32,14 @@ public:
return thd->type_handler_for_datetime();
return src;
}
+
+ Item *make_item_func_replace(THD *thd,
+ Item *subj,
+ Item *find,
+ Item *replace) const;
+ Item *make_item_func_substr(THD *thd,
+ const Lex_substring_spec_st &spec) const;
+ Item *make_item_func_trim(THD *thd, const Lex_trim_st &spec) const;
};
@@ -78,3 +86,56 @@ Schema *Schema::find_implied(THD *thd)
return &maxdb_schema;
return &mariadb_schema;
}
+
+
+Item *Schema::make_item_func_replace(THD *thd,
+ Item *subj,
+ Item *find,
+ Item *replace) const
+{
+ return new (thd->mem_root) Item_func_replace(thd, subj, find, replace);
+}
+
+
+Item *Schema::make_item_func_substr(THD *thd,
+ const Lex_substring_spec_st &spec) const
+{
+ return spec.m_for ?
+ new (thd->mem_root) Item_func_substr(thd, spec.m_subject, spec.m_from,
+ spec.m_for) :
+ new (thd->mem_root) Item_func_substr(thd, spec.m_subject, spec.m_from);
+}
+
+
+Item *Schema::make_item_func_trim(THD *thd, const Lex_trim_st &spec) const
+{
+ return spec.make_item_func_trim_std(thd);
+}
+
+
+Item *Schema_oracle::make_item_func_replace(THD *thd,
+ Item *subj,
+ Item *find,
+ Item *replace) const
+{
+ return new (thd->mem_root) Item_func_replace_oracle(thd, subj, find, replace);
+}
+
+
+Item *Schema_oracle::make_item_func_substr(THD *thd,
+ const Lex_substring_spec_st &spec) const
+{
+ return spec.m_for ?
+ new (thd->mem_root) Item_func_substr_oracle(thd, spec.m_subject,
+ spec.m_from,
+ spec.m_for) :
+ new (thd->mem_root) Item_func_substr_oracle(thd, spec.m_subject,
+ spec.m_from);
+}
+
+
+Item *Schema_oracle::make_item_func_trim(THD *thd,
+ const Lex_trim_st &spec) const
+{
+ return spec.make_item_func_trim_oracle(thd);
+}
diff --git a/sql/sql_schema.h b/sql/sql_schema.h
index 37f8ceb7250..0258ff2dc97 100644
--- a/sql/sql_schema.h
+++ b/sql/sql_schema.h
@@ -33,6 +33,17 @@ public:
{
return src;
}
+
+ // Builders for native SQL function with a special syntax in sql_yacc.yy
+ virtual Item *make_item_func_replace(THD *thd,
+ Item *subj,
+ Item *find,
+ Item *replace) const;
+ virtual Item *make_item_func_substr(THD *thd,
+ const Lex_substring_spec_st &spec) const;
+
+ virtual Item *make_item_func_trim(THD *thd, const Lex_trim_st &spec) const;
+
/*
For now we have *hard-coded* compatibility schemas:
schema_mariadb, schema_oracle, schema_maxdb.
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index e98121231a7..4501168ec72 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -276,10 +276,12 @@ static bool find_field_in_item_list (Field *field, void *data);
static bool find_field_in_order_list (Field *field, void *data);
int create_sort_index(THD *thd, JOIN *join, JOIN_TAB *tab, Filesort *fsort);
static int remove_dup_with_compare(THD *thd, TABLE *entry, Field **field,
- Item *having);
+ SORT_FIELD *sortorder, ulong keylength,
+ Item *having);
static int remove_dup_with_hash_index(THD *thd,TABLE *table,
- uint field_count, Field **first_field,
- ulong key_length,Item *having);
+ uint field_count, Field **first_field,
+ SORT_FIELD *sortorder,
+ ulong key_length,Item *having);
static bool cmp_buffer_with_ref(THD *thd, TABLE *table, TABLE_REF *tab_ref);
static bool setup_new_fields(THD *thd, List<Item> &fields,
List<Item> &all_fields, ORDER *new_order);
@@ -339,6 +341,9 @@ static Item **get_sargable_cond(JOIN *join, TABLE *table);
bool is_eq_cond_injected_for_split_opt(Item_func_eq *eq_item);
+void print_list_item(String *str, List_item *list,
+ enum_query_type query_type);
+
static
bool build_notnull_conds_for_range_scans(JOIN *join, COND *cond,
table_map allowed);
@@ -442,6 +447,7 @@ POSITION::POSITION()
key= 0;
forced_index= 0;
use_join_buffer= 0;
+ firstmatch_with_join_buf= false;
sj_strategy= SJ_OPT_NONE;
n_sj_tables= 0;
spl_plan= 0;
@@ -5932,7 +5938,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
/*
Perform range analysis if there are keys it could use (1).
Don't do range analysis for materialized subqueries (2).
- Don't do range analysis for materialized derived tables (3)
+ Don't do range analysis for materialized derived tables/views (3)
*/
if ((!s->const_keys.is_clear_all() ||
!bitmap_is_clear_all(&s->table->cond_set)) && // (1)
@@ -7764,6 +7770,7 @@ void set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key)
join->positions[idx].records_init=1.0; /* This is a const table */
join->positions[idx].cond_selectivity= 1.0;
join->positions[idx].ref_depend_map= 0;
+ join->positions[idx].partial_join_cardinality= 1;
// join->positions[idx].loosescan_key= MAX_KEY; /* Not a LooseScan */
join->positions[idx].sj_strategy= SJ_OPT_NONE;
@@ -7781,6 +7788,7 @@ void set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key)
}
join->best_ref[idx]=table;
join->positions[idx].spl_plan= 0;
+ join->positions[idx].spl_pd_boundary= 0;
}
@@ -7795,7 +7803,6 @@ void set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key)
@return 0.0 No matching rows
@return >= 1.0 Number of expected matching rows
- @details
Estimate how many records we will get if we
- read the given table with its "independent" access method (either quick
select or full table/index scan),
@@ -7808,7 +7815,7 @@ void set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key)
*/
static double apply_selectivity_for_table(JOIN_TAB *s,
- uint use_cond_selectivity)
+ uint use_cond_selectivity)
{
double dbl_records;
@@ -7869,8 +7876,7 @@ static double apply_selectivity_for_table(JOIN_TAB *s,
This heuristic is supposed to force tables used in exprZ to be before
this table in join order.
*/
-
-static double use_found_constraint(double records)
+inline double use_found_constraint(double records)
{
records-= records/4;
return records ? MY_MAX(records, MIN_ROWS_AFTER_FILTERING) : 0.0;
@@ -8060,6 +8066,155 @@ apply_filter(THD *thd, TABLE *table, ALL_READ_COST *cost,
}
+/*
+ @brief
+ Compute the fanout of hash join operation using EITS data
+
+ @param join JOIN structure
+ @param tab JOIN_TAB for the current table
+ @param remaining_tables Map of tables not yet accessable
+ @param rnd_records Number of accepted rows in the table, after taking
+ selectivity into account.
+ @param hj_start_key Pointer to hash key
+ @param stats_found Is set to 1 if we found any usable hash key part
+ with statistics from analyze.
+*/
+
+double hash_join_fanout(JOIN *join, JOIN_TAB *tab, table_map remaining_tables,
+ double rnd_records, KEYUSE *hj_start_key,
+ bool *stats_found)
+{
+ THD *thd= join->thd;
+ /*
+ Before doing the hash join, we will scan the table and apply the local part
+ of the WHERE condition. This will produce rnd_records.
+
+ The EITS statistics describes the entire table. Calling
+
+ table->field[N]->get_avg_frequency()
+
+ produces average #rows in the table with some value.
+
+ What happens if we filter out rows so that rnd_records rows are left?
+ Something between the two outcomes:
+ A. filtering removes a fraction of rows for each value:
+ avg_frequency=avg_frequency * condition_selectivity
+
+ B. filtering removes entire groups of rows with the same value, but
+ the remaining groups remain of the same size.
+
+ We make pessimistic assumption and assume B.
+ We also handle an edge case: if rnd_records is less than avg_frequency,
+ assume we'll get rnd_records rows with the same value, and return
+ rnd_records as the fanout estimate.
+ */
+ double min_freq= (double) tab->table->stat_records();
+ bool found_not_usable_field= 0;
+ bool found_usable_field __attribute__((unused))= 0;
+ DBUG_ENTER("hash_join_cardinality");
+
+ Json_writer_object trace_obj(thd, "hash_join_cardinality");
+
+ /*
+ There can be multiple KEYUSE referring to same or different columns
+
+ KEYUSE(tbl.col1 = ...)
+ KEYUSE(tbl.col1 = ...)
+ KEYUSE(tbl.col2 = ...)
+
+ Hash join code can use multiple columns: (col1, col2) for joining.
+ We need n_distinct({col1, col2}).
+
+ EITS only has statistics on individual columns: n_distinct(col1),
+ n_distinct(col2).
+
+ Our current solution is to be very conservative and use selectivity
+ of one column with the lowest avg_frequency.
+
+ In the future, we should an approach that cautiosly takes into account
+ multiple KEYUSEs either multiply by number of equalities or by sqrt
+ of the second most selective equality.
+ */
+ Json_writer_array trace_arr(thd, "hash_join_columns");
+ for (KEYUSE *keyuse= hj_start_key;
+ keyuse->table == tab->table && is_hash_join_key_no(keyuse->key);
+ keyuse++)
+ {
+ if (!(remaining_tables & keyuse->used_tables) &&
+ (!keyuse->validity_ref || *keyuse->validity_ref) &&
+ tab->access_from_tables_is_allowed(keyuse->used_tables,
+ join->sjm_lookup_tables))
+ {
+ Field *field= tab->table->field[keyuse->keypart];
+ found_usable_field= 1;
+ if (is_eits_usable(field))
+ {
+ double freq= field->read_stats->get_avg_frequency();
+
+ Json_writer_object trace_field(thd);
+ trace_field.add("field",field->field_name.str).
+ add("avg_frequency", freq);
+ if (freq < min_freq)
+ min_freq= freq;
+ *stats_found= 1;
+ continue;
+ }
+ }
+ if (!keyuse->validity_ref || *keyuse->validity_ref)
+ found_not_usable_field= 1;
+ }
+ /* Ensure that some part of hash_key is usable */
+ DBUG_ASSERT(found_usable_field);
+
+ trace_arr.end();
+ if (found_not_usable_field)
+ {
+ /*
+ We did not't have data for all key fields. Assume that the hash
+ will at least limit the number of matched rows to HASH_FANOUT.
+ This makes the cost same as when 'hash_join_cardinality=off'
+ in the case when no analyze of the tables have been made.
+
+ However, it may cause problems when min_freq is higher than
+ HASH_FANOUT as the optimizer will then assume it is better to
+ put the table earlier in the plan when all key parts are not
+ usable.
+ Note that min_freq can become less than 1.0. This is intentional
+ as it matches what happens if OPTIMIZER_SWITCH_HASH_JOIN_CARDINALITY
+ is not used.
+ */
+ double max_expected_records= rnd_records * HASH_FANOUT;
+ set_if_smaller(min_freq, max_expected_records);
+ trace_obj.add("using_default_hash_fanout", HASH_FANOUT);
+ }
+ else
+ {
+ /*
+ Before joining the table with the contents of join buffer, we will
+ use the quick select and/or apply the table condition.
+
+ This will reduce the number of rows joined to rnd_records.
+ How will this affect n_distinct?
+ Depending on which rows are removed, this can either leave n_distinct as
+ is (for some value X, some rows are removed but some are left, leaving the
+ number of distinct values the same), or reduce n_distinct in proportion
+ with the fraction of rows removed (for some values of X, either all or
+ none of the rows with that value are removed).
+
+ We assume the latter: n_distinct is reduced in proportion the condition
+ and quick select's selectivity.
+ This is in effect same as applying apply_selectivity_for_table() on
+ min_freq as we have already done on rnd_records
+ */
+ min_freq*= rnd_records / tab->table->stat_records();
+ set_if_bigger(min_freq, HASH_FANOUT);
+ }
+
+ trace_obj.add("rows", min_freq);
+ DBUG_RETURN(min_freq);
+}
+
+
/**
Find the best access path for an extension of a partial execution
plan and add this path to the plan.
@@ -8138,6 +8293,7 @@ best_access_path(JOIN *join,
ha_rows rec;
MY_BITMAP *eq_join_set= &s->table->eq_join_set;
KEYUSE *hj_start_key= 0;
+ table_map spl_pd_boundary= 0;
Loose_scan_opt loose_scan_opt;
struct best_plan best;
Json_writer_object trace_wrapper(thd, "best_access_path");
@@ -8179,15 +8335,17 @@ best_access_path(JOIN *join,
loose_scan_opt.init(join, s, remaining_tables);
if (table->is_splittable())
- best.spl_plan= s->choose_best_splitting(record_count, remaining_tables);
+ best.spl_plan= s->choose_best_splitting(idx,
+ remaining_tables,
+ &spl_pd_boundary);
if (unlikely(thd->trace_started()))
{
Json_writer_object info(thd, "plan_details");
info.add("record_count", record_count);
}
- Json_writer_array trace_paths(thd, "considered_access_paths");
+ Json_writer_array trace_paths(thd, "considered_access_paths");
if (s->keyuse)
{ /* Use key if possible */
KEYUSE *keyuse, *start_key= 0;
@@ -8762,26 +8920,34 @@ best_access_path(JOIN *join,
Here we have:
cost_of_fetching_1_row = tmp/rows
cost_of_fetching_1_key_tuple = keyread_tmp/rows
- access_cost_factor is the gain we expect for using rowid filter.
- An access_cost_factor of 1.0 means that keyread_tmp is 0
- (using key read is infinitely fast) and the gain for each row when
- using filter is great.
- An access_cost_factor if 0.0 means that using keyread has the
- same cost as reading rows, so there is no gain to get with
- filter.
- access_cost_factor should never be bigger than 1.0 (if all
- calculations are correct) as the cost of keyread should always be
- smaller than the cost of fetching the same number of keys + rows.
- access_cost_factor should also never be smaller than 0.0.
- The one exception is if number of records is 1 (eq_ref), then
- because we are comparing rows to cost of keyread_tmp, keyread_tmp
- is higher by 1.0. This is a big that will be fixed in a later
- version.
-
- If we have limited the cost (=tmp) of reading rows with 'worst_seek'
- we cannot use filters as the cost calculation below would cause
- tmp to become negative. The future resultion is to not limit
- cost with worst_seek.
+ Here's a more detailed explanation that uses the formulas behind
+ the function the call filter->get_adjusted_gain(). The function
+ takes as a parameter the number of probes/look-ups into the filter
+ that is equal to the number of fetched key entries that is equal to
+ the number of row fetches when no filter is used (assuming no
+ index condition pushdown is employed for the used key access).
+ Let this number be N. Then the total gain from using the filter is
+ N*a_adj - b where b is the cost of building the filter and
+ a_adj is calcilated as follows:
+ a - (1-access_cost_factor)*(1-s) =
+ (1+1_cond_eval_cost)*(1-s)-1_probe_cost - (1-access_cost_factor)*(1-s)
+ = (1-s)*(1_cond_eval_cost+access_cost_factor) - 1_probe_cost.
+ Here ((1-s)*(1_cond_eval_cost) * N is the gain from checking less
+ conditions pushed into the table, 1_probe_cost*N is the cost of the
+ probes and (1*s) * access_cost_factor * N must be the gain from
+ accessing less rows.
+ It does not matter how we calculate the cost of N full row fetches
+ cost_of_fetching_N_rows or
+ how we calculate the cost of fetching N key entries
+ cost_of_fetching_N_key_entries
+ the gain from less row fetches will be
+ (cost_of_fetching_N_rows - cost_of_fetching_N_key_entries) * (1-s)
+ and this should be equal to (1*s) * access_cost_factor * N.
+ Thus access_cost_factor must be calculated as
+ (cost_of_fetching_N_rows - cost_of_fetching_N_key_entries) / N.
+
+ For safety we clip cost_of_fetching_N_key_entries by the value
+ of cost_of_fetching_N_row though formally it's not necessary.
We cannot use filter with JT_EQ_REF as in this case 'tmp' is
number of rows from prev_record_read() and keyread_tmp is 0. These
@@ -8794,6 +8960,7 @@ best_access_path(JOIN *join,
prev_record_count,
&records_best_filter);
set_if_smaller(best.records_out, records_best_filter);
+
if (filter)
filter= filter->apply_filter(thd, table, &tmp,
&records_after_filter,
@@ -8912,15 +9079,49 @@ best_access_path(JOIN *join,
(!(table->map & join->outer_join) ||
join->allowed_outer_join_with_cache)) // (2)
{
- double refills, row_copy_cost, cmp_time, cur_cost, records_table_filter;
+ Json_writer_object trace_access_hash(thd);
+ double refills, row_copy_cost, copy_cost, cur_cost, where_cost;
+ double matching_combinations, fanout, join_sel;
+ trace_access_hash.add("type", "hash");
+ trace_access_hash.add("index", "hj-key");
/* Estimate the cost of the hash join access to the table */
- double rnd_records= apply_selectivity_for_table(s, use_cond_selectivity);
- records_table_filter= ((found_constraint) ?
- use_found_constraint(rnd_records) :
- rnd_records);
+ double rnd_records;
+ bool stats_found= 0;
+ rnd_records= apply_selectivity_for_table(s, use_cond_selectivity);
DBUG_ASSERT(rnd_records <= rows2double(s->found_records) + 0.5);
- set_if_smaller(best.records_out, records_table_filter);
+ DBUG_ASSERT(hj_start_key);
+
+ fanout= rnd_records;
+ if (optimizer_flag(thd, OPTIMIZER_SWITCH_HASH_JOIN_CARDINALITY))
+ {
+ /*
+ Starting from this point, rnd_records should not be used anymore.
+ Use "fanout" for an estimate of # matching records.
+ */
+ fanout= hash_join_fanout(join, s, remaining_tables, rnd_records,
+ hj_start_key, &stats_found);
+ set_if_smaller(best.records_out, fanout);
+ join_sel= 1.0;
+ }
+ if (!stats_found)
+ {
+ /*
+ No OPTIMIZER_SWITCH_HASH_JOIN_CARDINALITY or no field statistics
+ found.
+
+ Take into account if there is non constant constraints used with
+ earlier tables in the where expression.
+ If yes, this will set fanout to rnd_records/4.
+ We estimate that there will be HASH_FANOUT (10%)
+ hash matches / row.
+ */
+ fanout= ((found_constraint) ?
+ use_found_constraint(rnd_records) :
+ rnd_records);
+ set_if_smaller(best.records_out, fanout * HASH_FANOUT);
+ join_sel= HASH_FANOUT;
+ }
/*
The following cost calculation is identical to the cost calculation for
@@ -8943,40 +9144,39 @@ best_access_path(JOIN *join,
(double) thd->variables.join_buff_size));
cur_cost= COST_MULT(cur_cost, refills);
+
/*
Cost of doing the hash lookup and check all matching rows with the
WHERE clause.
We assume here that, thanks to the hash, we don't have to compare all
- row combinations, only a HASH_FANOUT (10%) rows in the cache.
+ row combinations, only a fanout or HASH_FANOUT (10%) rows in the cache.
*/
row_copy_cost= (ROW_COPY_COST_THD(thd) *
JOIN_CACHE_ROW_COPY_COST_FACTOR(thd));
- cmp_time= (record_count * row_copy_cost +
- rnd_records * record_count * HASH_FANOUT *
- ((idx - join->const_tables) * row_copy_cost +
- WHERE_COST_THD(thd)));
- cur_cost= COST_ADD(cur_cost, cmp_time);
+ matching_combinations= fanout * join_sel * record_count;
+ copy_cost= (record_count * row_copy_cost +
+ matching_combinations *
+ ((idx - join->const_tables) * row_copy_cost));
+ where_cost= matching_combinations * WHERE_COST_THD(thd);
+ cur_cost= COST_ADD(cur_cost, copy_cost + where_cost);
best.cost= cur_cost;
best.records_read= best.records_after_filter= rows2double(s->records);
- best.records= rnd_records;
-#ifdef NOT_YET
- set_if_smaller(best.records_out, rnd_records * HASH_FANOUT);
-#endif
+ best.records= rnd_records; // Records after where (Legacy value)
best.key= hj_start_key;
best.ref_depends_map= 0;
best.use_join_buffer= TRUE;
best.filter= 0;
best.type= JT_HASH;
best.refills= (ulonglong) ceil(refills);
- Json_writer_object trace_access_hash(thd);
if (unlikely(trace_access_hash.trace_started()))
trace_access_hash.
- add("type", "hash").
- add("index", "hj-key").
add("rows", rnd_records).
+ add("rows_after_hash", fanout * join_sel).
add("refills", refills).
- add("cost", best.cost).
+ add("jbuf_use_cost", copy_cost).
+ add("extra_cond_check_cost", where_cost).
+ add("total_cost", best.cost).
add("chosen", true);
}
@@ -9037,7 +9237,6 @@ best_access_path(JOIN *join,
uint forced_index= MAX_KEY;
bool force_plan= 0, use_join_buffer= 0;
ulonglong refills= 1;
-
/*
Range optimizer never proposes a RANGE if it isn't better
than FULL: so if RANGE is present, it's always preferred to FULL.
@@ -9208,8 +9407,13 @@ best_access_path(JOIN *join,
s->cached_forced_index_cost= cur_cost;
s->cached_forced_index= forced_index;
}
-
- if (disable_jbuf || (table->map & join->outer_join))
+
+ /*
+ Note: the condition checked here is very out of date and incorrect.
+ Below, we use a more accurate check when assigning the value of
+ best.use_join_buffer.
+ */
+ if ((s->table->map & join->outer_join) || disable_jbuf)
{
/*
Simple scan
@@ -9320,7 +9524,10 @@ best_access_path(JOIN *join,
best.filter= filter;
/* range/index_merge/ALL/index access method are "independent", so: */
best.ref_depends_map= 0;
- best.use_join_buffer= use_join_buffer;
+ best.use_join_buffer= use_join_buffer ||
+ MY_TEST(!disable_jbuf &&
+ (join->allowed_outer_join_with_cache ||
+ !(s->table->map & join->outer_join)));
best.refills= (ulonglong) ceil(refills);
best.spl_plan= 0;
best.type= type;
@@ -9359,6 +9566,7 @@ best_access_path(JOIN *join,
pos->use_join_buffer= best.use_join_buffer;
pos->firstmatch_with_join_buf= 0;
pos->spl_plan= best.spl_plan;
+ pos->spl_pd_boundary= best.spl_plan ? spl_pd_boundary: 0;
pos->range_rowid_filter_info= best.filter;
pos->key_dependent= (best.type == JT_EQ_REF ? (table_map) 0 :
key_dependent & remaining_tables);
@@ -9922,10 +10130,12 @@ optimize_straight_join(JOIN *join, table_map remaining_tables)
}
position->cond_selectivity= pushdown_cond_selectivity;
position->records_out= records_out;
- current_record_count= COST_MULT(record_count, records_out);
+ current_record_count= COST_MULT(record_count, records_out);
}
else
position->cond_selectivity= 1.0;
+
+ position->partial_join_cardinality= current_record_count;
++idx;
record_count= current_record_count;
}
@@ -11318,6 +11528,7 @@ best_extension_by_limited_search(JOIN *join,
join->positions[idx].cond_selectivity= pushdown_cond_selectivity;
partial_join_cardinality= record_count * position->records_out;
+ join->positions[idx].partial_join_cardinality= partial_join_cardinality;
if (unlikely(thd->trace_started()) && pushdown_cond_selectivity < 1.0 &&
partial_join_cardinality < current_record_count)
@@ -11325,9 +11536,9 @@ best_extension_by_limited_search(JOIN *join,
.add("selectivity", pushdown_cond_selectivity)
.add("estimated_join_cardinality", partial_join_cardinality);
- if (search_depth > 1 &&
- ((remaining_tables & ~real_table_bit) & join->allowed_tables))
+ if ((search_depth > 1) && (remaining_tables & ~real_table_bit) &
+ allowed_tables)
{
/* Recursively expand the current partial plan */
Json_writer_array trace_rest(thd, "rest_of_plan");
@@ -14627,6 +14838,9 @@ uint check_join_cache_usage(JOIN_TAB *tab,
join->return_tab= 0;
+ if (tab->no_forced_join_cache)
+ goto no_join_cache;
+
/*
Don't use join cache if @@join_cache_level==0 or this table is the first
one join suborder (either at top level or inside a bush)
@@ -15673,7 +15887,8 @@ bool JOIN_TAB::preread_init()
DBUG_RETURN(TRUE);
if (!(derived->get_unit()->uncacheable & UNCACHEABLE_DEPENDENT) ||
- derived->is_nonrecursive_derived_with_rec_ref())
+ derived->is_nonrecursive_derived_with_rec_ref() ||
+ is_split_derived)
preread_init_done= TRUE;
if (select && select->quick)
select->quick->replace_handler(table->file);
@@ -19284,9 +19499,14 @@ void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab,
table_after_join_selectivity(join, i, rs,
reopt_remaining_tables &
~real_table_bit, &records_out);
+ join->positions[i].partial_join_cardinality= rec_count * pushdown_cond_selectivity;
}
+ else
+ join->positions[i].partial_join_cardinality= COST_MULT(rec_count, records_out);
+
rec_count= COST_MULT(rec_count, records_out);
*outer_rec_count= COST_MULT(*outer_rec_count, records_out);
+
if (rs->emb_sj_nest)
inner_fanout= COST_MULT(inner_fanout, records_out);
@@ -21218,12 +21438,13 @@ bool Create_tmp_table::finalize(THD *thd,
table->group= m_group; /* Table is grouped by key */
param->group_buff= m_group_buff;
share->keys=1;
- share->uniques= MY_TEST(m_using_unique_constraint);
table->key_info= table->s->key_info= keyinfo;
table->keys_in_use_for_query.set_bit(0);
share->keys_in_use.set_bit(0);
keyinfo->key_part= m_key_part_info;
keyinfo->flags=HA_NOSAME | HA_BINARY_PACK_KEY | HA_PACK_KEY;
+ if (m_using_unique_constraint)
+ keyinfo->flags|= HA_UNIQUE_HASH;
keyinfo->ext_key_flags= keyinfo->flags;
keyinfo->usable_key_parts=keyinfo->user_defined_key_parts=
param->group_parts;
@@ -21322,6 +21543,7 @@ bool Create_tmp_table::finalize(THD *thd,
*/
DBUG_PRINT("info",("hidden_field_count: %d", param->hidden_field_count));
+ keyinfo->flags= 0;
if (m_blobs_count[distinct])
{
/*
@@ -21329,10 +21551,11 @@ bool Create_tmp_table::finalize(THD *thd,
indexes on blobs with arbitrary length. Such indexes cannot be
used for lookups.
*/
- share->uniques= 1;
+ keyinfo->flags|= HA_UNIQUE_HASH;
}
keyinfo->user_defined_key_parts= m_field_count[distinct] +
- (share->uniques ? MY_TEST(null_pack_length[distinct]) : 0);
+ ((keyinfo->flags & HA_UNIQUE_HASH) ?
+ MY_TEST(null_pack_length[distinct]) : 0);
keyinfo->ext_key_parts= keyinfo->user_defined_key_parts;
keyinfo->usable_key_parts= keyinfo->user_defined_key_parts;
table->distinct= 1;
@@ -21347,7 +21570,8 @@ bool Create_tmp_table::finalize(THD *thd,
share->keys_in_use.set_bit(0);
table->key_info= table->s->key_info= keyinfo;
keyinfo->key_part= m_key_part_info;
- keyinfo->flags=HA_NOSAME | HA_NULL_ARE_EQUAL | HA_BINARY_PACK_KEY | HA_PACK_KEY;
+ keyinfo->flags|= (HA_NOSAME | HA_NULL_ARE_EQUAL | HA_BINARY_PACK_KEY |
+ HA_PACK_KEY);
keyinfo->ext_key_flags= keyinfo->flags;
keyinfo->key_length= 0; // Will compute the sum of the parts below.
keyinfo->name= distinct_key;
@@ -21376,7 +21600,7 @@ bool Create_tmp_table::finalize(THD *thd,
blobs can distinguish NULL from 0. This extra field is not needed
when we do not use UNIQUE indexes for blobs.
*/
- if (null_pack_length[distinct] && share->uniques)
+ if (null_pack_length[distinct] && (keyinfo->flags & HA_UNIQUE_HASH))
{
m_key_part_info->null_bit=0;
m_key_part_info->offset= null_pack_base[distinct];
@@ -21794,113 +22018,125 @@ bool open_tmp_table(TABLE *table)
*/
-bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
+bool create_internal_tmp_table(TABLE *table, KEY *org_keyinfo,
TMP_ENGINE_COLUMNDEF *start_recinfo,
TMP_ENGINE_COLUMNDEF **recinfo,
ulonglong options)
{
int error;
- MARIA_KEYDEF keydef;
+ MARIA_KEYDEF *keydefs= 0, *keydef;
MARIA_UNIQUEDEF uniquedef;
TABLE_SHARE *share= table->s;
MARIA_CREATE_INFO create_info;
+ bool use_unique= false;
DBUG_ENTER("create_internal_tmp_table");
if (share->keys)
{ // Get keys for ni_create
- bool using_unique_constraint=0;
- HA_KEYSEG *seg= (HA_KEYSEG*) alloc_root(&table->mem_root,
- sizeof(*seg) * keyinfo->user_defined_key_parts);
- if (!seg)
- goto err;
-
- bzero(seg, sizeof(*seg) * keyinfo->user_defined_key_parts);
- /*
- Note that a similar check is performed during
- subquery_types_allow_materialization. See MDEV-7122 for more details as
- to why. Whenever this changes, it must be updated there as well, for
- all tmp_table engines.
- */
- if (keyinfo->key_length > table->file->max_key_length() ||
- keyinfo->user_defined_key_parts > table->file->max_key_parts() ||
- share->uniques)
- {
- if (!share->uniques && !(keyinfo->flags & HA_NOSAME))
- {
- my_error(ER_INTERNAL_ERROR, MYF(0),
- "Using too big key for internal temp tables");
- DBUG_RETURN(1);
- }
+ HA_KEYSEG *seg;
+ DBUG_ASSERT(share->key_parts);
- /* Can't create a key; Make a unique constraint instead of a key */
- share->keys= 0;
- share->key_parts= share->ext_key_parts= 0;
- share->uniques= 1;
- using_unique_constraint=1;
- bzero((char*) &uniquedef,sizeof(uniquedef));
- uniquedef.keysegs=keyinfo->user_defined_key_parts;
- uniquedef.seg=seg;
- uniquedef.null_are_equal=1;
+ if (!(multi_alloc_root(&table->mem_root,
+ &seg, sizeof(*seg) * share->key_parts,
+ &keydefs, sizeof(*keydefs) * share->keys,
+ NullS)))
+ goto err;
+ keydef= keydefs;
- /* Create extra column for hash value */
- bzero((uchar*) *recinfo,sizeof(**recinfo));
- (*recinfo)->type= FIELD_CHECK;
- (*recinfo)->length= MARIA_UNIQUE_HASH_LENGTH;
- (*recinfo)++;
+ bzero(seg, sizeof(*seg) * share->key_parts);
- /* Avoid warnings from valgrind */
- bzero(table->record[0]+ share->reclength, MARIA_UNIQUE_HASH_LENGTH);
- bzero(share->default_values+ share->reclength, MARIA_UNIQUE_HASH_LENGTH);
- share->reclength+= MARIA_UNIQUE_HASH_LENGTH;
- }
- else
- {
- /* Create a key */
- bzero((char*) &keydef,sizeof(keydef));
- keydef.flag= keyinfo->flags & HA_NOSAME;
- keydef.keysegs= keyinfo->user_defined_key_parts;
- keydef.seg= seg;
- }
- for (uint i=0; i < keyinfo->user_defined_key_parts ; i++,seg++)
+ /* Note that share->keys may change in the loop ! */
+ for (KEY *keyinfo= org_keyinfo, *end_keyinfo= keyinfo + share->keys;
+ keyinfo < end_keyinfo ;
+ keyinfo++)
{
- Field *field=keyinfo->key_part[i].field;
- seg->flag= 0;
- seg->language= field->charset()->number;
- seg->length= keyinfo->key_part[i].length;
- seg->start= keyinfo->key_part[i].offset;
- if (field->flags & BLOB_FLAG)
+ /*
+ Note that a similar check is performed during
+ subquery_types_allow_materialization. See MDEV-7122 for more details as
+ to why. Whenever this changes, it must be updated there as well, for
+ all tmp_table engines.
+ */
+ if (keyinfo->key_length > table->file->max_key_length() ||
+ keyinfo->user_defined_key_parts > table->file->max_key_parts() ||
+ (keyinfo->flags & HA_UNIQUE_HASH))
{
- seg->type=
- ((keyinfo->key_part[i].key_type & FIELDFLAG_BINARY) ?
- HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2);
- seg->bit_start= (uint8)(field->pack_length() -
- portable_sizeof_char_ptr);
- seg->flag= HA_BLOB_PART;
- seg->length=0; // Whole blob in unique constraint
+ if (!(keyinfo->flags & (HA_NOSAME | HA_UNIQUE_HASH)))
+ {
+ my_error(ER_INTERNAL_ERROR, MYF(0),
+ "Using too big key for internal temp tables");
+ DBUG_RETURN(1);
+ }
+ /* Can't create a key; Make a unique constraint instead of a key */
+ share->keys--;
+ share->key_parts-= keyinfo->user_defined_key_parts;
+ share->ext_key_parts-= keyinfo->ext_key_parts;
+ use_unique= true;
+ bzero((char*) &uniquedef,sizeof(uniquedef));
+ uniquedef.keysegs= keyinfo->user_defined_key_parts;
+ uniquedef.seg=seg;
+ uniquedef.null_are_equal=1;
+ keyinfo->flags|= HA_UNIQUE_HASH;
+ keyinfo->algorithm= HA_KEY_ALG_UNIQUE_HASH;
+
+ /* Create extra column for hash value */
+ bzero((uchar*) *recinfo,sizeof(**recinfo));
+ (*recinfo)->type= FIELD_CHECK;
+ (*recinfo)->length= MARIA_UNIQUE_HASH_LENGTH;
+ (*recinfo)++;
+
+ /* Avoid warnings from valgrind */
+ bzero(table->record[0]+ share->reclength, MARIA_UNIQUE_HASH_LENGTH);
+ bzero(share->default_values+ share->reclength,
+ MARIA_UNIQUE_HASH_LENGTH);
+ share->reclength+= MARIA_UNIQUE_HASH_LENGTH;
}
else
{
- seg->type= keyinfo->key_part[i].type;
- /* Tell handler if it can do suffic space compression */
- if (field->real_type() == MYSQL_TYPE_STRING &&
- keyinfo->key_part[i].length > 32)
- seg->flag|= HA_SPACE_PACK;
- }
- if (!(field->flags & NOT_NULL_FLAG))
- {
- seg->null_bit= field->null_bit;
- seg->null_pos= (uint) (field->null_ptr - (uchar*) table->record[0]);
- /*
- We are using a GROUP BY on something that contains NULL
- In this case we have to tell Aria that two NULL should
- on INSERT be regarded at the same value
- */
- if (!using_unique_constraint)
- keydef.flag|= HA_NULL_ARE_EQUAL;
+ /* Create a key */
+ bzero((char*) keydef,sizeof(*keydef));
+ /*
+ We are using a GROUP BY on something that contains NULL
+ In this case we have to tell Aria that two NULL should
+ on INSERT be regarded at the same value.
+ */
+ keydef->flag= (keyinfo->flags & HA_NOSAME) | HA_NULL_ARE_EQUAL;
+ keydef->keysegs= keyinfo->user_defined_key_parts;
+ keydef->seg= seg;
+ keydef++;
+ }
+ for (uint i=0; i < keyinfo->user_defined_key_parts ; i++,seg++)
+ {
+ Field *field=keyinfo->key_part[i].field;
+ seg->flag= 0;
+ seg->language= field->charset()->number;
+ seg->length= keyinfo->key_part[i].length;
+ seg->start= keyinfo->key_part[i].offset;
+ if (field->flags & BLOB_FLAG)
+ {
+ seg->type=
+ ((keyinfo->key_part[i].key_type & FIELDFLAG_BINARY) ?
+ HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2);
+ seg->bit_start= (uint8)(field->pack_length() -
+ portable_sizeof_char_ptr);
+ seg->flag= HA_BLOB_PART;
+ seg->length=0; // Whole blob in unique constraint
+ }
+ else
+ {
+ seg->type= keyinfo->key_part[i].type;
+ /* Tell handler if it can do suffic space compression */
+ if (field->real_type() == MYSQL_TYPE_STRING &&
+ keyinfo->key_part[i].length > 32)
+ seg->flag|= HA_SPACE_PACK;
+ }
+ if (!(field->flags & NOT_NULL_FLAG))
+ {
+ seg->null_bit= field->null_bit;
+ seg->null_pos= (uint) (field->null_ptr - (uchar*) table->record[0]);
+ }
}
- }
- if (share->keys)
keyinfo->index_flags= table->file->index_flags(0, 0, 1);
+ }
}
bzero((char*) &create_info,sizeof(create_info));
create_info.data_file_length= table->in_use->variables.tmp_disk_table_size;
@@ -21946,8 +22182,8 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
}
if (unlikely((error= maria_create(share->path.str, file_type, share->keys,
- &keydef, (uint) (*recinfo-start_recinfo),
- start_recinfo, share->uniques, &uniquedef,
+ keydefs, (uint) (*recinfo-start_recinfo),
+ start_recinfo, use_unique, &uniquedef,
&create_info, create_flags))))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
@@ -21999,7 +22235,7 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
/* Create internal MyISAM temporary table */
-bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
+bool create_internal_tmp_table(TABLE *table, KEY *org_keyinfo,
TMP_ENGINE_COLUMNDEF *start_recinfo,
TMP_ENGINE_COLUMNDEF **recinfo,
ulonglong options)
@@ -22014,11 +22250,12 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
{ // Get keys for ni_create
bool using_unique_constraint=0;
HA_KEYSEG *seg= (HA_KEYSEG*) alloc_root(&table->mem_root,
- sizeof(*seg) * keyinfo->user_defined_key_parts);
+ sizeof(*seg) *
+ share->user_defined_key_parts);
if (!seg)
goto err;
- bzero(seg, sizeof(*seg) * keyinfo->user_defined_key_parts);
+ bzero(seg, sizeof(*seg) * share->user_defined_key_parts);
/*
Note that a similar check is performed during
subquery_types_allow_materialization. See MDEV-7122 for more details as
@@ -22354,7 +22591,7 @@ void set_postjoin_aggr_write_func(JOIN_TAB *tab)
Note for MyISAM tmp tables: if uniques is true keys won't be
created.
*/
- if (table->s->keys && !table->s->uniques)
+ if (table->s->keys && !table->s->have_unique_constraint())
{
DBUG_PRINT("info",("Using end_update"));
aggr->set_write_func(end_update);
@@ -22664,6 +22901,8 @@ bool instantiate_tmp_table(TABLE *table, KEY *keyinfo,
TMP_ENGINE_COLUMNDEF **recinfo,
ulonglong options)
{
+ DBUG_ASSERT(table->s->keys == 0 || table->key_info == keyinfo);
+ DBUG_ASSERT(table->s->keys <= 1);
if (table->s->db_type() == TMP_ENGINE_HTON)
{
/*
@@ -22808,6 +23047,8 @@ sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
/* The user has aborted the execution of the query */
DBUG_RETURN(NESTED_LOOP_KILLED);
}
+ join_tab->jbuf_loops_tracker->on_scan_init();
+
if (!test_if_use_dynamic_range_scan(join_tab))
{
if (!cache->put_record())
@@ -22958,6 +23199,16 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
enum_nested_loop_state rc;
DBUG_ENTER("sub_select");
+ if (join_tab->split_derived_to_update && !end_of_records)
+ {
+ table_map tab_map= join_tab->split_derived_to_update;
+ for (uint i= 0; tab_map; i++, tab_map>>= 1)
+ {
+ if (tab_map & 1)
+ join->map2table[i]->preread_init_done= false;
+ }
+ }
+
if (join_tab->last_inner)
{
JOIN_TAB *last_inner_tab= join_tab->last_inner;
@@ -24574,7 +24825,6 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
DBUG_RETURN(NESTED_LOOP_ERROR); // Not a table_is_full error
if (is_duplicate)
goto end;
- table->s->uniques=0; // To ensure rows are the same
}
if (++join_tab->send_records >=
join_tab->tmp_table_param->end_write_records &&
@@ -26511,39 +26761,70 @@ JOIN_TAB::remove_duplicates()
{
bool error;
- ulong keylength= 0;
- uint field_count;
+ ulong keylength= 0, sort_field_keylength= 0;
+ uint field_count, item_count;
List<Item> *fields= (this-1)->fields;
+ Item *item;
THD *thd= join->thd;
-
+ SORT_FIELD *sortorder, *sorder;
DBUG_ENTER("remove_duplicates");
DBUG_ASSERT(join->aggr_tables > 0 && table->s->tmp_table != NO_TMP_TABLE);
THD_STAGE_INFO(join->thd, stage_removing_duplicates);
- //join->explain->ops_tracker.report_duplicate_removal();
-
- table->reginfo.lock_type=TL_WRITE;
+ if (!(sortorder= (SORT_FIELD*) my_malloc(PSI_INSTRUMENT_ME,
+ (fields->elements+1) *
+ sizeof(SORT_FIELD),
+ MYF(MY_WME | MY_ZEROFILL))))
+ DBUG_RETURN(TRUE);
/* Calculate how many saved fields there is in list */
- field_count=0;
+ field_count= item_count= 0;
+
List_iterator<Item> it(*fields);
- Item *item;
- while ((item=it++))
+ for (sorder= sortorder ; (item=it++) ;)
{
- if (item->get_tmp_table_field() && ! item->const_item())
- field_count++;
+ if (!item->const_item())
+ {
+ if (item->get_tmp_table_field())
+ {
+ /* Field is stored in temporary table, skipp */
+ field_count++;
+ }
+ else
+ {
+ /* Item is not stored in temporary table, remember it */
+ sorder->item= item;
+ /* Calculate sorder->length */
+ item->type_handler()->sort_length(thd, item, sorder);
+ sorder++;
+ item_count++;
+ }
+ }
}
+ sorder->item= 0; // End marker
- if (!field_count && !(join->select_options & OPTION_FOUND_ROWS) && !having)
- { // only const items with no OPTION_FOUND_ROWS
+ if ((field_count + item_count == 0) && ! having &&
+ !(join->select_options & OPTION_FOUND_ROWS))
+ {
+ // only const items with no OPTION_FOUND_ROWS
join->unit->lim.set_single_row(); // Only send first row
+ my_free(sortorder);
DBUG_RETURN(false);
}
+ /*
+ The table contains first fields that will be in the output, then
+ temporary results pointed to by the fields list.
+ Example: SELECT DISTINCT sum(a), sum(d) > 2 FROM ...
+ In this case the temporary table contains sum(a), sum(d).
+ */
+
Field **first_field=table->field+table->s->fields - field_count;
for (Field **ptr=first_field; *ptr; ptr++)
keylength+= (*ptr)->sort_length() + (*ptr)->maybe_null();
+ for (SORT_FIELD *ptr= sortorder ; ptr->item ; ptr++)
+ sort_field_keylength+= ptr->length + (ptr->item->maybe_null() ? 1 : 0);
/*
Disable LIMIT ROWS EXAMINED in order to avoid interrupting prematurely
@@ -26554,30 +26835,80 @@ JOIN_TAB::remove_duplicates()
thd->reset_killed();
table->file->info(HA_STATUS_VARIABLE);
+ table->reginfo.lock_type=TL_WRITE;
+
if (table->s->db_type() == heap_hton ||
(!table->s->blob_fields &&
((ALIGN_SIZE(keylength) + HASH_OVERHEAD) * table->file->stats.records <
thd->variables.sortbuff_size)))
- error=remove_dup_with_hash_index(join->thd, table, field_count, first_field,
- keylength, having);
+ error= remove_dup_with_hash_index(join->thd, table, field_count,
+ first_field, sortorder,
+ keylength + sort_field_keylength, having);
else
- error=remove_dup_with_compare(join->thd, table, first_field, having);
+ error=remove_dup_with_compare(join->thd, table, first_field, sortorder,
+ sort_field_keylength, having);
if (join->select_lex != join->select_lex->master_unit()->fake_select_lex)
thd->lex->set_limit_rows_examined();
free_blobs(first_field);
+ my_free(sortorder);
DBUG_RETURN(error);
}
+/*
+ Create a sort/compare key from items
+
+ Key is of fixed length and binary comparable
+*/
+
+static uchar *make_sort_key(SORT_FIELD *sortorder, uchar *key_buffer,
+ String *tmp_value)
+{
+ for (SORT_FIELD *ptr= sortorder ; ptr->item ; ptr++)
+ {
+ ptr->item->type_handler()->make_sort_key_part(key_buffer,
+ ptr->item,
+ ptr, tmp_value);
+ key_buffer+= (ptr->item->maybe_null() ? 1 : 0) + ptr->length;
+ }
+ return key_buffer;
+}
+
+
+/*
+ Remove duplicates by comparing all rows with all other rows
+
+ @param thd THD
+ @param table Temporary table
+ @param first_field Pointer to fields in temporary table that are part of
+ distinct, ends with null pointer
+ @param sortorder An array of Items part of distsinct. Terminated with an
+ element N with sortorder[N]->item=NULL.
+ @param keylength Length of key produced by sortorder
+ @param having Having expression (NULL if no having)
+*/
+
static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
+ SORT_FIELD *sortorder, ulong keylength,
Item *having)
{
handler *file=table->file;
- uchar *record=table->record[0];
+ uchar *record=table->record[0], *key_buffer, *key_buffer2;
+ char *tmp_buffer;
int error;
+ String tmp_value;
DBUG_ENTER("remove_dup_with_compare");
+ if (unlikely(!my_multi_malloc(PSI_INSTRUMENT_ME,
+ MYF(MY_WME),
+ &key_buffer, keylength,
+ &key_buffer2, keylength,
+ &tmp_buffer, keylength+1,
+ NullS)))
+ DBUG_RETURN(1);
+ tmp_value.set(tmp_buffer, keylength, &my_charset_bin);
+
if (unlikely(file->ha_rnd_init_with_error(1)))
DBUG_RETURN(1);
@@ -26586,8 +26917,8 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
{
if (unlikely(thd->check_killed()))
{
- error=0;
- goto err;
+ error= 1;
+ goto end;
}
if (unlikely(error))
{
@@ -26606,9 +26937,10 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
{
my_message(ER_OUTOFMEMORY, ER_THD(thd,ER_OUTOFMEMORY),
MYF(ME_FATAL));
- error=0;
- goto err;
+ error= 1;
+ goto end;
}
+ make_sort_key(sortorder, key_buffer, &tmp_value);
store_record(table,record[1]);
/* Read through rest of file and mark duplicated rows deleted */
@@ -26621,7 +26953,10 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
break;
goto err;
}
- if (compare_record(table, first_field) == 0)
+ make_sort_key(sortorder, key_buffer2, &tmp_value);
+ if (compare_record(table, first_field) == 0 &&
+ (!keylength ||
+ memcmp(key_buffer, key_buffer2, keylength) == 0))
{
if (unlikely((error= file->ha_delete_row(record))))
goto err;
@@ -26640,38 +26975,52 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
goto err;
}
+ error= 0;
+end:
+ my_free(key_buffer);
file->extra(HA_EXTRA_NO_CACHE);
(void) file->ha_rnd_end();
- DBUG_RETURN(0);
+ DBUG_RETURN(error);
+
err:
- file->extra(HA_EXTRA_NO_CACHE);
- (void) file->ha_rnd_end();
- if (error)
- file->print_error(error,MYF(0));
- DBUG_RETURN(1);
+ DBUG_ASSERT(error);
+ file->print_error(error,MYF(0));
+ goto end;
}
/**
- Generate a hash index for each row to quickly find duplicate rows.
+ Generate a hash index for each row to quickly find duplicate rows.
+
+ @param thd THD
+ @param table Temporary table
+ @param field_count Number of fields part of distinct
+ @param first_field Pointer to fields in temporary table that are part of
+ distinct, ends with null pointer
+ @param sortorder An array of Items part of distsinct. Terminated with an
+ element N with sortorder[N]->item=NULL.
+ @param keylength Length of hash key
+ @param having Having expression (NULL if no having)
- @note
- Note that this will not work on tables with blobs!
+ @note
+ Note that this will not work on tables with blobs!
*/
static int remove_dup_with_hash_index(THD *thd, TABLE *table,
uint field_count,
Field **first_field,
+ SORT_FIELD *sortorder,
ulong key_length,
Item *having)
{
uchar *key_buffer, *key_pos, *record=table->record[0];
+ char *tmp_buffer;
int error;
handler *file= table->file;
ulong extra_length= ALIGN_SIZE(key_length)-key_length;
uint *field_lengths, *field_length;
HASH hash;
- Field **ptr;
+ String tmp_value;
DBUG_ENTER("remove_dup_with_hash_index");
if (!my_multi_malloc(key_memory_hash_index_key_buffer, MYF(MY_WME),
@@ -26680,10 +27029,13 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
(long) file->stats.records),
&field_lengths,
(uint) (field_count*sizeof(*field_lengths)),
+ &tmp_buffer, key_length+1,
NullS))
DBUG_RETURN(1);
- for (ptr= first_field, field_length=field_lengths ; *ptr ; ptr++)
+ tmp_value.set(tmp_buffer, key_length, &my_charset_bin);
+ field_length= field_lengths;
+ for (Field **ptr= first_field ; *ptr ; ptr++)
(*field_length++)= (*ptr)->sort_length();
if (my_hash_init(key_memory_hash_index_key_buffer, &hash, &my_charset_bin,
@@ -26697,7 +27049,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
if (unlikely((error= file->ha_rnd_init(1))))
goto err;
- key_pos=key_buffer;
+ key_pos= key_buffer;
for (;;)
{
uchar *org_key_pos;
@@ -26722,11 +27074,14 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
/* copy fields to key buffer */
org_key_pos= key_pos;
field_length=field_lengths;
- for (ptr= first_field ; *ptr ; ptr++)
+ for (Field **ptr= first_field ; *ptr ; ptr++)
{
(*ptr)->make_sort_key_part(key_pos, *field_length);
key_pos+= (*ptr)->maybe_null() + *field_length++;
}
+ /* Copy result fields not stored in table to key buffer */
+ key_pos= make_sort_key(sortorder, key_pos, &tmp_value);
+
/* Check if it exists before */
if (my_hash_search(&hash, org_key_pos, key_length))
{
@@ -29038,13 +29393,16 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
// psergey-todo: data for filtering!
tracker= &eta->tracker;
jbuf_tracker= &eta->jbuf_tracker;
+ jbuf_loops_tracker= &eta->jbuf_loops_tracker;
jbuf_unpack_tracker= &eta->jbuf_unpack_tracker;
/* Enable the table access time tracker only for "ANALYZE stmt" */
if (thd->lex->analyze_stmt)
{
table->file->set_time_tracker(&eta->op_tracker);
- eta->op_tracker.my_gap_tracker = &eta->extra_time_tracker;
+ eta->op_tracker.set_gap_tracker(&eta->extra_time_tracker);
+
+ eta->jbuf_unpack_tracker.set_gap_tracker(&eta->jbuf_extra_time_tracker);
}
/* No need to save id and select_type here, they are kept in Explain_select */
@@ -30211,6 +30569,162 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str,
}
}
+enum explainable_cmd_type
+{
+ SELECT_CMD, INSERT_CMD, REPLACE_CMD, UPDATE_CMD, DELETE_CMD, NO_CMD
+};
+
+static
+const LEX_CSTRING explainable_cmd_name []=
+{
+ {STRING_WITH_LEN("select ")},
+ {STRING_WITH_LEN("insert ")},
+ {STRING_WITH_LEN("replace ")},
+ {STRING_WITH_LEN("update ")},
+ {STRING_WITH_LEN("delete ")},
+};
+
+static
+const LEX_CSTRING* get_explainable_cmd_name(enum explainable_cmd_type cmd)
+{
+ return explainable_cmd_name + cmd;
+}
+
+static
+enum explainable_cmd_type get_explainable_cmd_type(THD *thd)
+{
+ switch (thd->lex->sql_command) {
+ case SQLCOM_SELECT:
+ return SELECT_CMD;
+ case SQLCOM_INSERT:
+ case SQLCOM_INSERT_SELECT:
+ return INSERT_CMD;
+ case SQLCOM_REPLACE:
+ case SQLCOM_REPLACE_SELECT:
+ return REPLACE_CMD;
+ case SQLCOM_UPDATE:
+ case SQLCOM_UPDATE_MULTI:
+ return UPDATE_CMD;
+ case SQLCOM_DELETE:
+ case SQLCOM_DELETE_MULTI:
+ return DELETE_CMD;
+ default:
+ return SELECT_CMD;
+ }
+}
+
+
+void TABLE_LIST::print_leaf_tables(THD *thd, String *str,
+ enum_query_type query_type)
+{
+ if (merge_underlying_list)
+ {
+ for (TABLE_LIST *tbl= merge_underlying_list; tbl; tbl= tbl->next_local)
+ tbl->print_leaf_tables(thd, str, query_type);
+ }
+ else
+ print(thd, 0, str, query_type);
+}
+
+
+void st_select_lex::print_item_list(THD *thd, String *str,
+ enum_query_type query_type)
+{
+ bool first= 1;
+ /*
+ outer_select() can not be used here because it is for name resolution
+ and will return NULL at any end of name resolution chain (view/derived)
+ */
+ bool top_level= is_query_topmost(thd);
+ List_iterator_fast<Item> it(item_list);
+ Item *item;
+ while ((item= it++))
+ {
+ if (first)
+ first= 0;
+ else
+ str->append(',');
+
+ if ((is_subquery_function() && !item->is_explicit_name()) ||
+ !item->name.str)
+ {
+ /*
+ Do not print auto-generated aliases in subqueries. It has no purpose
+ in a view definition or other contexts where the query is printed.
+ */
+ item->print(str, query_type);
+ }
+ else
+ {
+ /*
+ Do not print illegal names (if it is not top level SELECT).
+ Top level view checked (and correct name are assigned),
+ other cases of top level SELECT are not important, because
+ it is not "table field".
+ */
+ if (top_level ||
+ item->is_explicit_name() ||
+ !check_column_name(item->name.str))
+ item->print_item_w_name(str, query_type);
+ else
+ item->print(str, query_type);
+ }
+ }
+}
+
+
+void st_select_lex::print_set_clause(THD *thd, String *str,
+ enum_query_type query_type)
+{
+ bool first= 1;
+ /*
+ outer_select() can not be used here because it is for name resolution
+ and will return NULL at any end of name resolution chain (view/derived)
+ */
+ List_iterator_fast<Item> it(item_list);
+ List_iterator_fast<Item> vt(thd->lex->value_list);
+ Item *item;
+ Item *val;
+ while ((item= it++, val= vt++ ))
+ {
+ if (first)
+ {
+ str->append(STRING_WITH_LEN(" set "));
+ first= 0;
+ }
+ else
+ str->append(',');
+
+ item->print(str, query_type);
+ str->append(STRING_WITH_LEN(" = "));
+ val->print(str, query_type);
+ }
+}
+
+
+void st_select_lex::print_on_duplicate_key_clause(THD *thd, String *str,
+ enum_query_type query_type)
+{
+ bool first= 1;
+ List_iterator_fast<Item> it(thd->lex->update_list);
+ List_iterator_fast<Item> vt(thd->lex->value_list);
+ Item *item;
+ Item *val;
+ while ((item= it++, val= vt++ ))
+ {
+ if (first)
+ {
+ str->append(STRING_WITH_LEN(" on duplicate key update "));
+ first= 0;
+ }
+ else
+ str->append(',');
+
+ item->print(str, query_type);
+ str->append(STRING_WITH_LEN(" = "));
+ val->print(str, query_type);
+ }
+}
void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
{
@@ -30222,6 +30736,67 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
return;
}
+ if (is_tvc_wrapper && (query_type & QT_NO_WRAPPERS_FOR_TVC_IN_VIEW))
+ {
+ first_inner_unit()->first_select()->print(thd, str, query_type);
+ return;
+ }
+
+ bool top_level= is_query_topmost(thd);
+ enum explainable_cmd_type sel_type= SELECT_CMD;
+ if (top_level)
+ sel_type= get_explainable_cmd_type(thd);
+
+ if (sel_type == INSERT_CMD || sel_type == REPLACE_CMD)
+ {
+ str->append(get_explainable_cmd_name(sel_type));
+ str->append(STRING_WITH_LEN("into "));
+ TABLE_LIST *tbl= thd->lex->query_tables;
+ while (tbl->merge_underlying_list)
+ tbl= tbl->merge_underlying_list;
+ tbl->print(thd, 0, str, query_type);
+ if (thd->lex->field_list.elements)
+ {
+ str->append ('(');
+ List_iterator_fast<Item> it(thd->lex->field_list);
+ Item *item;
+ bool first= true;
+ while ((item= it++))
+ {
+ if (first)
+ first= false;
+ else
+ str->append(',');
+ str->append(item->name);
+ }
+ str->append(')');
+ }
+
+ str->append(' ');
+
+ if (thd->lex->sql_command == SQLCOM_INSERT ||
+ thd->lex->sql_command == SQLCOM_REPLACE)
+ {
+ str->append(STRING_WITH_LEN("values "));
+ bool is_first_elem= true;
+ List_iterator_fast<List_item> li(thd->lex->many_values);
+ List_item *list;
+
+ while ((list= li++))
+ {
+ if (is_first_elem)
+ is_first_elem= false;
+ else
+ str->append(',');
+
+ print_list_item(str, list, query_type);
+ }
+ if (thd->lex->update_list.elements)
+ print_on_duplicate_key_clause(thd, str, query_type);
+ return;
+ }
+ }
+
if ((query_type & QT_SHOW_SELECT_NUMBER) &&
thd->lex->all_selects_list &&
thd->lex->all_selects_list->link_next &&
@@ -30244,7 +30819,10 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
str->append(STRING_WITH_LEN(" */ "));
}
- str->append(STRING_WITH_LEN("select "));
+ if (sel_type == SELECT_CMD ||
+ sel_type == INSERT_CMD ||
+ sel_type == REPLACE_CMD)
+ str->append(STRING_WITH_LEN("select "));
if (join && join->cleaned)
{
@@ -30290,57 +30868,66 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
}
//Item List
- bool first= 1;
+ if (sel_type == SELECT_CMD ||
+ sel_type == INSERT_CMD ||
+ sel_type == REPLACE_CMD)
+ print_item_list(thd, str, query_type);
/*
- outer_select() can not be used here because it is for name resolution
- and will return NULL at any end of name resolution chain (view/derived)
+ from clause
+ TODO: support USING/FORCE/IGNORE index
*/
- bool top_level= (get_master()->get_master() == 0);
- List_iterator_fast<Item> it(item_list);
- Item *item;
- while ((item= it++))
+ if (table_list.elements)
{
- if (first)
- first= 0;
- else
- str->append(',');
-
- if ((is_subquery_function() && !item->is_explicit_name()) ||
- !item->name.str)
+ if (sel_type == SELECT_CMD ||
+ sel_type == INSERT_CMD ||
+ sel_type == REPLACE_CMD)
{
- /*
- Do not print auto-generated aliases in subqueries. It has no purpose
- in a view definition or other contexts where the query is printed.
- */
- item->print(str, query_type);
+ str->append(STRING_WITH_LEN(" from "));
+ /* go through join tree */
+ print_join(thd, join? join->eliminated_tables: 0, str, &top_join_list,
+ query_type);
}
- else
+ if (sel_type == UPDATE_CMD || sel_type == DELETE_CMD)
+ str->append(get_explainable_cmd_name(sel_type));
+ if (sel_type == DELETE_CMD)
{
- /*
- Do not print illegal names (if it is not top level SELECT).
- Top level view checked (and correct name are assigned),
- other cases of top level SELECT are not important, because
- it is not "table field".
- */
- if (top_level ||
- item->is_explicit_name() ||
- !check_column_name(item->name.str))
- item->print_item_w_name(str, query_type);
+ str->append(STRING_WITH_LEN(" from "));
+ bool first= true;
+ for (TABLE_LIST *target_tbl= thd->lex->auxiliary_table_list.first;
+ target_tbl;
+ target_tbl= target_tbl->next_local)
+ {
+ if (first)
+ first= false;
+ else
+ str->append(',');
+ target_tbl->correspondent_table->print_leaf_tables(thd, str,
+ query_type);
+ }
+
+ if (!first)
+ str->append(STRING_WITH_LEN(" using "));
+ }
+ if (sel_type == UPDATE_CMD || sel_type == DELETE_CMD)
+ {
+ if (join)
+ print_join(thd, 0, str, &top_join_list, query_type);
else
- item->print(str, query_type);
+ {
+ bool first= true;
+ List_iterator_fast<TABLE_LIST> li(leaf_tables);
+ TABLE_LIST *tbl;
+ while ((tbl= li++))
+ {
+ if (first)
+ first= false;
+ else
+ str->append(',');
+ tbl->print(thd, 0, str, query_type);
+ }
+ }
}
}
-
- /*
- from clause
- TODO: support USING/FORCE/IGNORE index
- */
- if (table_list.elements)
- {
- str->append(STRING_WITH_LEN(" from "));
- /* go through join tree */
- print_join(thd, join? join->eliminated_tables: 0, str, &top_join_list, query_type);
- }
else if (where)
{
/*
@@ -30350,10 +30937,15 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
str->append(STRING_WITH_LEN(" from DUAL "));
}
+ if (sel_type == UPDATE_CMD)
+ print_set_clause(thd, str, query_type);
+
// Where
Item *cur_where= where;
if (join)
cur_where= join->conds;
+ else if (sel_type == UPDATE_CMD || sel_type == DELETE_CMD)
+ cur_where= thd->lex->upd_del_where;
if (cur_where || cond_value != Item::COND_UNDEF)
{
str->append(STRING_WITH_LEN(" where "));
@@ -30412,6 +31004,15 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
if (unlikely(skip_locked))
str->append(STRING_WITH_LEN(" skip locked"));
+ if ((sel_type == INSERT_CMD || sel_type == REPLACE_CMD) &&
+ thd->lex->update_list.elements)
+ print_on_duplicate_key_clause(thd, str, query_type);
+
+ // returning clause
+ if (sel_type == DELETE_CMD && !item_list.elements)
+ {
+ print_item_list(thd, str, query_type);
+ }
// PROCEDURE unsupported here
}
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 0d53d4c9798..ed8f459fd6c 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -310,6 +310,7 @@ typedef struct st_join_table {
Table_access_tracker *jbuf_tracker;
Time_and_counter_tracker *jbuf_unpack_tracker;
+ Counter_tracker *jbuf_loops_tracker;
// READ_RECORD::Setup_func materialize_table;
READ_RECORD::Setup_func read_first_record;
@@ -440,6 +441,8 @@ typedef struct st_join_table {
*/
bool idx_cond_fact_out;
bool use_join_cache;
+ /* TRUE <=> it is prohibited to join this table using join buffer */
+ bool no_forced_join_cache;
uint used_join_cache_level;
ulong join_buffer_size_limit;
JOIN_CACHE *cache;
@@ -566,6 +569,16 @@ typedef struct st_join_table {
bool preread_init_done;
+ /* true <=> split optimization has been applied to this materialized table */
+ bool is_split_derived;
+
+ /*
+ Bitmap of split materialized derived tables that can be filled just before
+ this join table is to be joined. All parameters of the split derived tables
+ belong to tables preceding this join table.
+ */
+ table_map split_derived_to_update;
+
/*
Cost info to the range filter used when joining this join table
(Defined when the best join order has been already chosen)
@@ -729,9 +742,10 @@ typedef struct st_join_table {
void partial_cleanup();
void add_keyuses_for_splitting();
- SplM_plan_info *choose_best_splitting(double record_count,
- table_map remaining_tables);
- bool fix_splitting(SplM_plan_info *spl_plan, table_map remaining_tables,
+ SplM_plan_info *choose_best_splitting(uint idx,
+ table_map remaining_tables,
+ table_map *spl_pd_boundary);
+ bool fix_splitting(SplM_plan_info *spl_plan, table_map excluded_tables,
bool is_const_table);
} JOIN_TAB;
@@ -1039,9 +1053,21 @@ public:
*/
KEYUSE *key;
+ /* Cardinality of current partial join ending with this position */
+ double partial_join_cardinality;
+
/* Info on splitting plan used at this position */
SplM_plan_info *spl_plan;
+ /*
+ If spl_plan is NULL the value of spl_pd_boundary is 0. Otherwise
+ spl_pd_boundary contains the bitmap of the table from the current
+ partial join ending at this position that starts the sub-sequence of
+ tables S from which no conditions are allowed to be used in the plan
+ spl_plan for the split table joined at this position.
+ */
+ table_map spl_pd_boundary;
+
/* Cost info for the range filter used at this position */
Range_rowid_filter_cost_info *range_rowid_filter_info;
diff --git a/sql/sql_sequence.cc b/sql/sql_sequence.cc
index 60da595afd0..fdb9c647727 100644
--- a/sql/sql_sequence.cc
+++ b/sql/sql_sequence.cc
@@ -1011,10 +1011,19 @@ bool Sql_cmd_alter_sequence::execute(THD *thd)
else
table->file->print_error(error, MYF(0));
seq->write_unlock(table);
- if (trans_commit_stmt(thd))
- error= 1;
- if (trans_commit_implicit(thd))
- error= 1;
+ {
+ wait_for_commit* suspended_wfc= thd->suspend_subsequent_commits();
+ if (trans_commit_stmt(thd))
+ error= 1;
+ if (trans_commit_implicit(thd))
+ error= 1;
+ thd->resume_subsequent_commits(suspended_wfc);
+ DBUG_EXECUTE_IF("hold_worker_on_schedule",
+ {
+ /* delay binlogging of a parent trx in rpl_parallel_seq */
+ my_sleep(100000);
+ });
+ }
if (likely(!error))
error= write_bin_log(thd, 1, thd->query(), thd->query_length());
if (likely(!error))
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 8879d5f61c1..fa95aa66b0f 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -2659,7 +2659,8 @@ static int show_create_view(THD *thd, TABLE_LIST *table, String *buff)
a different syntax, like when ANSI_QUOTES is defined.
*/
table->view->unit.print(buff, enum_query_type(QT_VIEW_INTERNAL |
- QT_ITEM_ORIGINAL_FUNC_NULLIF));
+ QT_ITEM_ORIGINAL_FUNC_NULLIF |
+ QT_NO_WRAPPERS_FOR_TVC_IN_VIEW));
if (table->with_check != VIEW_CHECK_NONE)
{
@@ -8840,7 +8841,6 @@ bool optimize_schema_tables_memory_usage(List<TABLE_LIST> &tables)
TMP_TABLE_PARAM *p= table_list->schema_table_param;
TMP_ENGINE_COLUMNDEF *from_recinfo, *to_recinfo;
DBUG_ASSERT(table->s->keys == 0);
- DBUG_ASSERT(table->s->uniques == 0);
uchar *cur= table->field[0]->ptr;
/* first recinfo could be a NULL bitmap, not an actual Field */
diff --git a/sql/sql_sort.h b/sql/sql_sort.h
index 88f719b3593..2b2c08b59cd 100644
--- a/sql/sql_sort.h
+++ b/sql/sql_sort.h
@@ -20,6 +20,7 @@
#include "my_base.h" /* ha_rows */
#include <my_sys.h> /* qsort2_cmp */
#include "queues.h"
+#include "sql_string.h"
#include "sql_class.h"
class Field;
diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc
index ecde847c8d4..ccf1ebb9ef9 100644
--- a/sql/sql_statistics.cc
+++ b/sql/sql_statistics.cc
@@ -4029,50 +4029,16 @@ double Histogram_binary::point_selectivity(Field *field, key_range *endpoint,
}
else
{
- /*
+ /*
The value 'pos' fits within one single histogram bucket.
- Histogram_binary buckets have the same numbers of rows, but they cover
- different ranges of values.
-
- We assume that values are uniformly distributed across the [0..1] value
- range.
- */
-
- /*
- If all buckets covered value ranges of the same size, the width of
- value range would be:
+ We also have avg_sel which is per-table average selectivity of col=const.
+ If there are popular values, this may be larger than one bucket, so
+ cap the returned number by the selectivity of one bucket.
*/
double avg_bucket_width= 1.0 / (get_width() + 1);
-
- /*
- Let's see what is the width of value range that our bucket is covering.
- (min==max currently. they are kept in the formula just in case we
- will want to extend it to handle multi-bucket case)
- */
- double inv_prec_factor= (double) 1.0 / prec_factor();
- double current_bucket_width=
- (max + 1 == get_width() ? 1.0 : (get_value(max) * inv_prec_factor)) -
- (min == 0 ? 0.0 : (get_value(min-1) * inv_prec_factor));
-
- DBUG_ASSERT(current_bucket_width); /* We shouldn't get a one zero-width bucket */
-
- /*
- So:
- - each bucket has the same #rows
- - values are unformly distributed across the [min_value,max_value] domain.
- If a bucket has value range that's N times bigger then average, than
- each value will have to have N times fewer rows than average.
- */
- sel= avg_sel * avg_bucket_width / current_bucket_width;
-
- /*
- (Q: if we just follow this proportion we may end up in a situation
- where number of different values we expect to find in this bucket
- exceeds the number of rows that this histogram has in a bucket. Are
- we ok with this or we would want to have certain caps?)
- */
+ sel= MY_MIN(avg_bucket_width, avg_sel);
}
return sel;
}
@@ -4147,7 +4113,7 @@ bool is_eits_usable(Field *field)
if (!col_stats)
return false;
- DBUG_ASSERT(field->table->stats_is_read);
+ DBUG_ASSERT(field->orig_table->stats_is_read);
/*
(1): checks if we have EITS statistics for a particular column
@@ -4160,8 +4126,8 @@ bool is_eits_usable(Field *field)
return !col_stats->no_stat_values_provided() && //(1)
field->type() != MYSQL_TYPE_GEOMETRY && //(2)
#ifdef WITH_PARTITION_STORAGE_ENGINE
- (!field->table->part_info ||
- !field->table->part_info->field_in_partition_expr(field)) && //(3)
+ (!field->orig_table->part_info ||
+ !field->orig_table->part_info->field_in_partition_expr(field)) && //(3)
#endif
true;
}
diff --git a/sql/sql_string.cc b/sql/sql_string.cc
index 14db5ad7c7d..679cbdfc2b5 100644
--- a/sql/sql_string.cc
+++ b/sql/sql_string.cc
@@ -563,7 +563,7 @@ bool String::append(const char *s,size_t size)
}
/*
- For an ASCII compatinble string we can just append.
+ For an ASCII compatible string we can just append.
*/
return Binary_string::append(s, arg_length);
}
diff --git a/sql/sql_string.h b/sql/sql_string.h
index 9729d9e85fd..dbb4760ab34 100644
--- a/sql/sql_string.h
+++ b/sql/sql_string.h
@@ -203,6 +203,83 @@ public:
{
return m_charset != &my_charset_bin;
}
+
+ /*
+ The MariaDB version when the last collation change happened,
+ e.g. due to a bug fix. See functions below.
+ */
+ static ulong latest_mariadb_version_with_collation_change()
+ {
+ return 110002;
+ }
+
+ /*
+ Check if the collation with the given ID changed its order
+ since the given MariaDB version.
+ */
+ static bool collation_changed_order(ulong mysql_version, uint cs_number)
+ {
+ if ((mysql_version < 50048 &&
+ (cs_number == 11 || /* ascii_general_ci - bug #29499, bug #27562 */
+ cs_number == 41 || /* latin7_general_ci - bug #29461 */
+ cs_number == 42 || /* latin7_general_cs - bug #29461 */
+ cs_number == 20 || /* latin7_estonian_cs - bug #29461 */
+ cs_number == 21 || /* latin2_hungarian_ci - bug #29461 */
+ cs_number == 22 || /* koi8u_general_ci - bug #29461 */
+ cs_number == 23 || /* cp1251_ukrainian_ci - bug #29461 */
+ cs_number == 26)) || /* cp1250_general_ci - bug #29461 */
+ (mysql_version < 50124 &&
+ (cs_number == 33 || /* utf8mb3_general_ci - bug #27877 */
+ cs_number == 35))) /* ucs2_general_ci - bug #27877 */
+ return true;
+
+ if (cs_number == 159 && /* ucs2_general_mysql500_ci - MDEV-30746 */
+ ((mysql_version >= 100400 && mysql_version < 100429) ||
+ (mysql_version >= 100500 && mysql_version < 100520) ||
+ (mysql_version >= 100600 && mysql_version < 100613) ||
+ (mysql_version >= 100700 && mysql_version < 100708) ||
+ (mysql_version >= 100800 && mysql_version < 100808) ||
+ (mysql_version >= 100900 && mysql_version < 100906) ||
+ (mysql_version >= 101000 && mysql_version < 101004) ||
+ (mysql_version >= 101100 && mysql_version < 101103) ||
+ (mysql_version >= 110000 && mysql_version < 110002)))
+ return true;
+ return false;
+ }
+
+ /**
+ Check if a collation has changed ID since the given version.
+ Return the new ID.
+
+ @param mysql_version
+ @param cs_number - collation ID
+
+ @retval the new collation ID (or cs_number, if no change)
+ */
+
+ static uint upgrade_collation_id(ulong mysql_version, uint cs_number)
+ {
+ if (mysql_version >= 50300 && mysql_version <= 50399)
+ {
+ switch (cs_number) {
+ case 149: return MY_PAGE2_COLLATION_ID_UCS2; // ucs2_crotian_ci
+ case 213: return MY_PAGE2_COLLATION_ID_UTF8; // utf8_crotian_ci
+ }
+ }
+ if ((mysql_version >= 50500 && mysql_version <= 50599) ||
+ (mysql_version >= 100000 && mysql_version <= 100005))
+ {
+ switch (cs_number) {
+ case 149: return MY_PAGE2_COLLATION_ID_UCS2; // ucs2_crotian_ci
+ case 213: return MY_PAGE2_COLLATION_ID_UTF8; // utf8_crotian_ci
+ case 214: return MY_PAGE2_COLLATION_ID_UTF32; // utf32_croatian_ci
+ case 215: return MY_PAGE2_COLLATION_ID_UTF16; // utf16_croatian_ci
+ case 245: return MY_PAGE2_COLLATION_ID_UTF8MB4;// utf8mb4_croatian_ci
+ }
+ }
+ return cs_number;
+ }
+
};
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 26a58e66ffe..8e951bc48de 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -12299,6 +12299,19 @@ bool Sql_cmd_create_table_like::execute(THD *thd)
}
#endif
+#ifdef WITH_WSREP
+ if (select_lex->item_list.elements && // With SELECT
+ WSREP(thd) && thd->variables.wsrep_trx_fragment_size > 0)
+ {
+ my_message(
+ ER_NOT_ALLOWED_COMMAND,
+ "CREATE TABLE AS SELECT is not supported with streaming replication",
+ MYF(0));
+ res= 1;
+ goto end_with_restore_list;
+ }
+#endif /* WITH_WSREP */
+
if (select_lex->item_list.elements || select_lex->tvc) // With select or TVC
{
select_result *result;
diff --git a/sql/sql_tvc.cc b/sql/sql_tvc.cc
index b4a7a0d5091..cb1971a1746 100644
--- a/sql/sql_tvc.cc
+++ b/sql/sql_tvc.cc
@@ -706,6 +706,7 @@ st_select_lex *wrap_tvc(THD *thd, st_select_lex *tvc_sl,
wrapper_sl->select_number= ++thd->lex->stmt_lex->current_select_number;
wrapper_sl->parent_lex= lex; /* Used in init_query. */
wrapper_sl->make_empty_select();
+ wrapper_sl->is_tvc_wrapper= true;
wrapper_sl->nest_level= tvc_sl->nest_level;
wrapper_sl->parsing_place= tvc_sl->parsing_place;
diff --git a/sql/sql_type.cc b/sql/sql_type.cc
index 213de62705f..269532e2390 100644
--- a/sql/sql_type.cc
+++ b/sql/sql_type.cc
@@ -9035,13 +9035,13 @@ Type_handler_timestamp_common::Item_val_native_with_conversion(THD *thd,
Item *item,
Native *to) const
{
- MYSQL_TIME ltime;
if (item->type_handler()->type_handler_for_native_format() ==
&type_handler_timestamp2)
return item->val_native(thd, to);
+ Datetime dt(thd, item, Datetime::Options(TIME_NO_ZERO_IN_DATE, thd));
return
- item->get_date(thd, &ltime, Datetime::Options(TIME_NO_ZERO_IN_DATE, thd)) ||
- TIME_to_native(thd, &ltime, to, item->datetime_precision(thd));
+ !dt.is_valid_datetime() ||
+ TIME_to_native(thd, dt.get_mysql_time(), to, item->datetime_precision(thd));
}
bool Type_handler_null::union_element_finalize(Item_type_holder *item) const
diff --git a/sql/sql_type.h b/sql/sql_type.h
index d931c7ffb6d..52c17d61d2e 100644
--- a/sql/sql_type.h
+++ b/sql/sql_type.h
@@ -4060,14 +4060,14 @@ public:
*/
virtual void make_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const= 0;
+ String *tmp) const= 0;
/*
create a compact size key part for a sort key
*/
virtual uint make_packed_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const=0;
+ String *tmp) const=0;
virtual void sort_length(THD *thd,
const Type_std_attributes *item,
@@ -4473,12 +4473,12 @@ public:
uint32 flags) const override;
void make_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const override
+ String *tmp) const override
{
MY_ASSERT_UNREACHABLE();
}
uint make_packed_sort_key_part(uchar *, Item *, const SORT_FIELD_ATTR *,
- Sort_param *) const override
+ String *) const override
{
MY_ASSERT_UNREACHABLE();
return 0;
@@ -4818,10 +4818,10 @@ public:
const override;
void make_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const override;
+ String *tmp) const override;
uint make_packed_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const override;
+ String *tmp) const override;
void sort_length(THD *thd,
const Type_std_attributes *item,
SORT_FIELD_ATTR *attr) const override;
@@ -4930,10 +4930,10 @@ public:
const override;
void make_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const override;
+ String *tmp) const override;
uint make_packed_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const override;
+ String *tmp) const override;
void
Column_definition_attributes_frm_pack(const Column_definition_attributes *at,
uchar *buff) const override;
@@ -5186,10 +5186,10 @@ public:
TABLE_SHARE *share) const override;
void make_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const override;
+ String *tmp) const override;
uint make_packed_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const override;
+ String *tmp) const override;
void
Column_definition_attributes_frm_pack(const Column_definition_attributes *at,
uchar *buff) const override;
@@ -5297,10 +5297,10 @@ public:
uchar *buff) const override;
void make_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const override;
+ String *tmp) const override;
uint make_packed_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const override;
+ String *tmp) const override;
void sort_length(THD *thd,
const Type_std_attributes *item,
SORT_FIELD_ATTR *attr) const override;
@@ -5397,10 +5397,10 @@ public:
CHARSET_INFO *cs) const override;
void make_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const override;
+ String *tmp) const override;
uint make_packed_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const override;
+ String *tmp) const override;
void sort_length(THD *thd,
const Type_std_attributes *item,
SORT_FIELD_ATTR *attr) const override;
@@ -6620,10 +6620,10 @@ public:
const override;
void make_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const override;
+ String *tmp) const override;
uint make_packed_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const override;
+ String *tmp) const override;
void sort_length(THD *thd,
const Type_std_attributes *item,
SORT_FIELD_ATTR *attr) const override;
diff --git a/sql/sql_type_fixedbin.h b/sql/sql_type_fixedbin.h
index c1be1c9ccba..852ec1269eb 100644
--- a/sql/sql_type_fixedbin.h
+++ b/sql/sql_type_fixedbin.h
@@ -468,7 +468,7 @@ public:
return def->frm_unpack_charset(share, buffer);
}
void make_sort_key_part(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const override
+ String *) const override
{
DBUG_ASSERT(item->type_handler() == this);
NativeBuffer<FbtImpl::binary_length()+1> tmp;
@@ -489,7 +489,7 @@ public:
}
uint make_packed_sort_key_part(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
- Sort_param *param) const override
+ String *) const override
{
DBUG_ASSERT(item->type_handler() == this);
NativeBuffer<FbtImpl::binary_length()+1> tmp;
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 2be2a85b889..3b2a690860b 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -1279,7 +1279,8 @@ produce_explain_and_leave:
goto err;
emit_explain_and_leave:
- int err2= thd->lex->explain->send_explain(thd);
+ bool extended= thd->lex->describe & DESCRIBE_EXTENDED;
+ int err2= thd->lex->explain->send_explain(thd, extended);
delete select;
free_underlaid_joins(thd, select_lex);
@@ -3070,7 +3071,10 @@ bool Sql_cmd_update::execute_inner(THD *thd)
else
{
if (thd->lex->describe || thd->lex->analyze_stmt)
- res= thd->lex->explain->send_explain(thd);
+ {
+ bool extended= thd->lex->describe & DESCRIBE_EXTENDED;
+ res= thd->lex->explain->send_explain(thd, extended);
+ }
}
}
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index 89251e33f7f..9eb4bf382b6 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -1006,10 +1006,12 @@ static int mysql_register_view(THD *thd, DDL_LOG_STATE *ddl_log_state,
Sql_mode_instant_remove sms(thd, MODE_ANSI_QUOTES);
lex->unit.print(&view_query, enum_query_type(QT_VIEW_INTERNAL |
- QT_ITEM_ORIGINAL_FUNC_NULLIF));
+ QT_ITEM_ORIGINAL_FUNC_NULLIF |
+ QT_NO_WRAPPERS_FOR_TVC_IN_VIEW));
lex->unit.print(&is_query, enum_query_type(QT_TO_SYSTEM_CHARSET |
QT_WITHOUT_INTRODUCERS |
- QT_ITEM_ORIGINAL_FUNC_NULLIF));
+ QT_ITEM_ORIGINAL_FUNC_NULLIF |
+ QT_NO_WRAPPERS_FOR_TVC_IN_VIEW));
}
DBUG_PRINT("info", ("View: %.*s", view_query.length(), view_query.ptr()));
@@ -1806,7 +1808,7 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table,
if (view_is_mergeable &&
(table->select_lex->master_unit() != &old_lex->unit ||
old_lex->can_use_merged()) &&
- !old_lex->can_not_use_merged(0))
+ !old_lex->can_not_use_merged())
{
/* lex should contain at least one table */
DBUG_ASSERT(view_main_select_tables != 0);
@@ -1839,8 +1841,11 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table,
*/
if (!table->select_lex->master_unit()->is_unit_op() &&
table->select_lex->order_list.elements == 0)
+ {
table->select_lex->order_list.
push_back(&lex->first_select_lex()->order_list);
+ lex->first_select_lex()->order_list.empty();
+ }
else
{
if (old_lex->sql_command == SQLCOM_SELECT &&
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index c8a9ec0cb41..24284402e84 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -223,6 +223,7 @@ void _CONCAT_UNDERSCORED(turn_parser_debug_on,yyparse)()
Lex_for_loop_bounds_st for_loop_bounds;
Lex_trim_st trim;
Json_table_column::On_response json_on_response;
+ Lex_substring_spec_st substring_spec;
vers_history_point_t vers_history_point;
struct
{
@@ -347,9 +348,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
*/
%ifdef MARIADB
-%expect 82
+%expect 64
%else
-%expect 83
+%expect 65
%endif
/*
@@ -634,7 +635,6 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%token <kwd> RELEASE_SYM /* SQL-2003-R */
%token <kwd> RENAME
%token <kwd> REPEAT_SYM /* MYSQL-FUNC */
-%token <kwd> REPLACE /* MYSQL-FUNC */
%token <kwd> REQUIRE_SYM
%token <kwd> RESIGNAL_SYM /* SQL-2003-R */
%token <kwd> RESTRICT
@@ -671,7 +671,6 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%token <kwd> STDDEV_SAMP_SYM /* SQL-2003-N */
%token <kwd> STD_SYM
%token <kwd> STRAIGHT_JOIN
-%token <kwd> SUBSTRING /* SQL-2003-N */
%token <kwd> SUM_SYM /* SQL-2003-N */
%token <kwd> SYSDATE
%token <kwd> TABLE_REF_PRIORITY
@@ -684,7 +683,6 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%token <kwd> TO_SYM /* SQL-2003-R */
%token <kwd> TRAILING /* SQL-2003-R */
%token <kwd> TRIGGER_SYM /* SQL-2003-R */
-%token <kwd> TRIM /* SQL-2003-N */
%token <kwd> TRUE_SYM /* SQL-2003-R */
%token <kwd> UNDO_SYM /* FUTURE-USE */
%token <kwd> UNION_SYM /* SQL-2003-R */
@@ -730,6 +728,14 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%token <kwd> ROWNUM_SYM /* Oracle-R */
/*
+ SQL functions with a special syntax
+*/
+%token <kwd> REPLACE /* MYSQL-FUNC */
+%token <kwd> SUBSTRING /* SQL-2003-N */
+%token <kwd> TRIM /* SQL-2003-N */
+
+
+/*
Non-reserved keywords
*/
@@ -1180,7 +1186,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%left PREC_BELOW_NOT
-%nonassoc LOW_PRIORITY_NOT
+/* The precendence of boolean NOT is in fact here. See the comment below. */
+
%left '=' EQUAL_SYM GE '>' LE '<' NE
%nonassoc IS
%right BETWEEN_SYM
@@ -1192,6 +1199,24 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%left '*' '/' '%' DIV_SYM MOD_SYM
%left '^'
%left MYSQL_CONCAT_SYM
+/*
+ Boolean negation has a special branch in "expr" starting with NOT_SYM.
+ The precedence of logical negation is determined by the grammar itself
+ (without using Bison terminal symbol precedence) in this order
+ - Boolean factor (i.e. logical AND)
+ - Boolean NOT
+ - Boolean test (such as '=', IS NULL, IS TRUE)
+
+ But we also need a precedence for NOT_SYM in other contexts,
+ to shift (without reduce) in these cases:
+ predicate <here> NOT IN ...
+ predicate <here> NOT BETWEEN ...
+ predicate <here> NOT LIKE ...
+ predicate <here> NOT REGEXP ...
+ If the precedence of NOT_SYM was low, it would reduce immediately
+ after scanning "predicate" and then produce a syntax error on "NOT".
+*/
+%nonassoc NOT_SYM
%nonassoc NEG '~' NOT2_SYM BINARY
%nonassoc COLLATE_SYM
%nonassoc SUBQUERY_AS_EXPR
@@ -1480,6 +1505,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
literal insert_ident order_ident temporal_literal
simple_ident expr sum_expr in_sum_expr
variable variable_aux
+ boolean_test
predicate bit_expr parenthesized_expr
table_wild simple_expr column_default_non_parenthesized_expr udf_expr
primary_expr string_factor_expr mysql_concatenation_expr
@@ -1755,6 +1781,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%type <for_loop> sp_for_loop_index_and_bounds
%type <for_loop_bounds> sp_for_loop_bounds
%type <trim> trim_operands
+%type <substring_spec> substring_operands
%type <num> opt_sp_for_loop_direction
%type <spvar_mode> sp_parameter_type
%type <index_hint> index_hint_type
@@ -9139,79 +9166,83 @@ expr:
MYSQL_YYABORT;
}
}
- | NOT_SYM expr %prec LOW_PRIORITY_NOT
+ | NOT_SYM expr
{
$$= negate_expression(thd, $2);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | expr IS TRUE_SYM %prec IS
+ | boolean_test %prec PREC_BELOW_NOT
+ ;
+
+boolean_test:
+ boolean_test IS TRUE_SYM %prec IS
{
$$= new (thd->mem_root) Item_func_istrue(thd, $1);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | expr IS not TRUE_SYM %prec IS
+ | boolean_test IS not TRUE_SYM %prec IS
{
$$= new (thd->mem_root) Item_func_isnottrue(thd, $1);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | expr IS FALSE_SYM %prec IS
+ | boolean_test IS FALSE_SYM %prec IS
{
$$= new (thd->mem_root) Item_func_isfalse(thd, $1);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | expr IS not FALSE_SYM %prec IS
+ | boolean_test IS not FALSE_SYM %prec IS
{
$$= new (thd->mem_root) Item_func_isnotfalse(thd, $1);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | expr IS UNKNOWN_SYM %prec IS
+ | boolean_test IS UNKNOWN_SYM %prec IS
{
$$= new (thd->mem_root) Item_func_isnull(thd, $1);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | expr IS not UNKNOWN_SYM %prec IS
+ | boolean_test IS not UNKNOWN_SYM %prec IS
{
$$= new (thd->mem_root) Item_func_isnotnull(thd, $1);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | expr IS NULL_SYM %prec PREC_BELOW_NOT
+ | boolean_test IS NULL_SYM %prec IS
{
$$= new (thd->mem_root) Item_func_isnull(thd, $1);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | expr IS not NULL_SYM %prec IS
+ | boolean_test IS not NULL_SYM %prec IS
{
$$= new (thd->mem_root) Item_func_isnotnull(thd, $1);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | expr EQUAL_SYM predicate %prec EQUAL_SYM
+ | boolean_test EQUAL_SYM predicate %prec EQUAL_SYM
{
$$= new (thd->mem_root) Item_func_equal(thd, $1, $3);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | expr comp_op predicate %prec '='
+ | boolean_test comp_op predicate %prec '='
{
$$= (*$2)(0)->create(thd, $1, $3);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | expr comp_op all_or_any '(' subselect ')' %prec '='
+ | boolean_test comp_op all_or_any '(' subselect ')' %prec '='
{
$$= all_any_subquery_creator(thd, $1, $2, $3, $5);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | predicate
+ | predicate %prec BETWEEN_SYM
;
predicate:
@@ -9965,7 +9996,8 @@ function_call_keyword:
}
| TRIM '(' trim_operands ')'
{
- if (unlikely(!($$= $3.make_item_func_trim(thd))))
+ if (unlikely(!($$= Schema::find_implied(thd)->
+ make_item_func_trim(thd, $3))))
MYSQL_YYABORT;
}
| USER_SYM '(' ')'
@@ -9985,6 +10017,26 @@ function_call_keyword:
}
;
+substring_operands:
+ expr ',' expr ',' expr
+ {
+ $$= Lex_substring_spec_st::init($1, $3, $5);
+ }
+ | expr ',' expr
+ {
+ $$= Lex_substring_spec_st::init($1, $3);
+ }
+ | expr FROM expr FOR_SYM expr
+ {
+ $$= Lex_substring_spec_st::init($1, $3, $5);
+ }
+ | expr FROM expr
+ {
+ $$= Lex_substring_spec_st::init($1, $3);
+ }
+ ;
+
+
/*
Function calls using non reserved keywords, with special syntaxic forms.
Dedicated grammar rules are needed because of the syntax,
@@ -10117,24 +10169,10 @@ function_call_nonkeyword:
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
- | SUBSTRING '(' expr ',' expr ',' expr ')'
- {
- if (unlikely(!($$= Lex->make_item_func_substr(thd, $3, $5, $7))))
- MYSQL_YYABORT;
- }
- | SUBSTRING '(' expr ',' expr ')'
- {
- if (unlikely(!($$= Lex->make_item_func_substr(thd, $3, $5))))
- MYSQL_YYABORT;
- }
- | SUBSTRING '(' expr FROM expr FOR_SYM expr ')'
- {
- if (unlikely(!($$= Lex->make_item_func_substr(thd, $3, $5, $7))))
- MYSQL_YYABORT;
- }
- | SUBSTRING '(' expr FROM expr ')'
+ | SUBSTRING '(' substring_operands ')'
{
- if (unlikely(!($$= Lex->make_item_func_substr(thd, $3, $5))))
+ if (unlikely(!($$= Schema::find_implied(thd)->
+ make_item_func_substr(thd, $3))))
MYSQL_YYABORT;
}
%ifdef ORACLE
@@ -10347,7 +10385,8 @@ function_call_conflict:
}
| REPLACE '(' expr ',' expr ',' expr ')'
{
- if (unlikely(!($$= Lex->make_item_func_replace(thd, $3, $5, $7))))
+ if (unlikely(!($$= Schema::find_implied(thd)->
+ make_item_func_replace(thd, $3, $5, $7))))
MYSQL_YYABORT;
}
| REVERSE_SYM '(' expr ')'
diff --git a/sql/structs.h b/sql/structs.h
index 214fcb242ff..5b43948a563 100644
--- a/sql/structs.h
+++ b/sql/structs.h
@@ -903,6 +903,11 @@ public:
}
Item *make_item_func_trim_std(THD *thd) const;
Item *make_item_func_trim_oracle(THD *thd) const;
+ /*
+ This method is still used to handle LTRIM and RTRIM,
+ while the special syntax TRIM(... BOTH|LEADING|TRAILING)
+ is now handled by Schema::make_item_func_trim().
+ */
Item *make_item_func_trim(THD *thd) const;
};
@@ -914,6 +919,25 @@ public:
};
+class Lex_substring_spec_st
+{
+public:
+ Item *m_subject;
+ Item *m_from;
+ Item *m_for;
+ static Lex_substring_spec_st init(Item *subject,
+ Item *from,
+ Item *xfor= NULL)
+ {
+ Lex_substring_spec_st res;
+ res.m_subject= subject;
+ res.m_from= from;
+ res.m_for= xfor;
+ return res;
+ }
+};
+
+
class st_select_lex;
class Lex_select_lock
diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc
index 541a3658d99..c98a8b60746 100644
--- a/sql/sys_vars.cc
+++ b/sql/sys_vars.cc
@@ -604,10 +604,9 @@ bool check_has_super(sys_var *self, THD *thd, set_var *var)
return false;
}
-static Sys_var_bit Sys_core_file("core_file", "write a core-file on crashes",
- READ_ONLY GLOBAL_VAR(test_flags), NO_CMD_LINE,
- TEST_CORE_ON_SIGNAL, DEFAULT(IF_WIN(TRUE,FALSE)), NO_MUTEX_GUARD, NOT_IN_BINLOG,
- 0,0,0);
+static Sys_var_bit Sys_core_file("core_file", "Write core on crashes",
+ READ_ONLY GLOBAL_VAR(test_flags), CMD_LINE(OPT_ARG),
+ TEST_CORE_ON_SIGNAL, DEFAULT(IF_WIN(TRUE,FALSE)));
static bool binlog_format_check(sys_var *self, THD *thd, set_var *var)
{
@@ -804,12 +803,26 @@ static bool check_charset(sys_var *self, THD *thd, set_var *var)
{
int csno= (int)var->value->val_int();
CHARSET_INFO *cs;
- if (!(var->save_result.ptr= cs= get_charset(csno, MYF(0))) ||
- !(cs->state & MY_CS_PRIMARY))
+ if ((var->save_result.ptr= cs= get_charset(csno, MYF(0))))
{
- my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), llstr(csno, buff));
- return true;
+ /*
+ Backward compatibility: pre MDEV-30824 servers
+ can write non-default collation IDs to binary log:
+ SET character_set_client=83; -- utf8mb3_bin
+ Convert a non-default collation to the compiled default collation,
+ e.g. utf8mb3_bin to utf8mb3_general_ci, but only if
+ - THD is a slave thread or
+ - is processing a mysqlbinlog output.
+ */
+ if ((cs->state & MY_CS_PRIMARY) ||
+ ((thd->variables.pseudo_slave_mode || thd->slave_thread) &&
+ (var->save_result.ptr=
+ Lex_exact_charset_opt_extended_collate(cs, true).
+ find_default_collation())))
+ return false;
}
+ my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), llstr(csno, buff));
+ return true;
}
return false;
}
@@ -2878,6 +2891,7 @@ export const char *optimizer_switch_names[]=
"rowid_filter",
"condition_pushdown_from_having",
"not_null_range_scan",
+ "hash_join_cardinality",
"default",
NullS
};
diff --git a/sql/table.cc b/sql/table.cc
index a8eeeb53d72..b8ed7a99156 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -46,6 +46,10 @@
#include "sql_show.h"
#include "opt_trace.h"
#include "sql_db.h" // get_default_db_collation
+#include "sql_update.h" // class Sql_cmd_update
+#include "sql_delete.h" // class Sql_cmd_delete
+
+
/* For MySQL 5.7 virtual fields */
#define MYSQL57_GENERATED_FIELD 128
@@ -942,39 +946,6 @@ static uint enum_value_with_check(THD *thd, TABLE_SHARE *share,
}
-/**
- Check if a collation has changed number
-
- @param mysql_version
- @param current collation number
-
- @retval new collation number (same as current collation number of no change)
-*/
-
-static uint upgrade_collation(ulong mysql_version, uint cs_number)
-{
- if (mysql_version >= 50300 && mysql_version <= 50399)
- {
- switch (cs_number) {
- case 149: return MY_PAGE2_COLLATION_ID_UCS2; // ucs2_crotian_ci
- case 213: return MY_PAGE2_COLLATION_ID_UTF8; // utf8_crotian_ci
- }
- }
- if ((mysql_version >= 50500 && mysql_version <= 50599) ||
- (mysql_version >= 100000 && mysql_version <= 100005))
- {
- switch (cs_number) {
- case 149: return MY_PAGE2_COLLATION_ID_UCS2; // ucs2_crotian_ci
- case 213: return MY_PAGE2_COLLATION_ID_UTF8; // utf8_crotian_ci
- case 214: return MY_PAGE2_COLLATION_ID_UTF32; // utf32_croatian_ci
- case 215: return MY_PAGE2_COLLATION_ID_UTF16; // utf16_croatian_ci
- case 245: return MY_PAGE2_COLLATION_ID_UTF8MB4;// utf8mb4_croatian_ci
- }
- }
- return cs_number;
-}
-
-
void Column_definition_attributes::frm_pack_basic(uchar *buff) const
{
int2store(buff + 3, length);
@@ -1034,7 +1005,7 @@ bool Column_definition_attributes::frm_unpack_charset(TABLE_SHARE *share,
const uchar *buff)
{
uint cs_org= buff[14] + (((uint) buff[11]) << 8);
- uint cs_new= upgrade_collation(share->mysql_version, cs_org);
+ uint cs_new= Charset::upgrade_collation_id(share->mysql_version, cs_org);
if (cs_org != cs_new)
share->incompatible_version|= HA_CREATE_USED_CHARSET;
if (cs_new && !(charset= get_charset(cs_new, MYF(0))))
@@ -1894,7 +1865,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
if (!frm_image[32]) // New frm file in 3.23
{
uint cs_org= (((uint) frm_image[41]) << 8) + (uint) frm_image[38];
- uint cs_new= upgrade_collation(share->mysql_version, cs_org);
+ uint cs_new= Charset::upgrade_collation_id(share->mysql_version, cs_org);
if (cs_org != cs_new)
share->incompatible_version|= HA_CREATE_USED_CHARSET;
@@ -3110,6 +3081,9 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
goto err;
field= key_part->field= share->field[key_part->fieldnr-1];
+ if (Charset::collation_changed_order(share->mysql_version,
+ field->charset()->number))
+ share->incompatible_version|= HA_CREATE_USED_CHARSET;
key_part->type= field->key_type();
if (field->invisible > INVISIBLE_USER && !field->vers_sys_field())
@@ -3572,7 +3546,6 @@ int TABLE_SHARE::init_from_sql_statement_string(THD *thd, bool write,
char *sql_copy;
handler *file;
LEX *old_lex;
- Query_arena *arena, backup;
LEX tmp_lex;
KEY *unused1;
uint unused2;
@@ -3599,11 +3572,6 @@ int TABLE_SHARE::init_from_sql_statement_string(THD *thd, bool write,
old_lex= thd->lex;
thd->lex= &tmp_lex;
- arena= thd->stmt_arena;
- if (arena->is_conventional())
- arena= 0;
- else
- thd->set_n_backup_active_arena(arena, &backup);
/*
THD::reset_db() does not set THD::db_charset,
@@ -3655,8 +3623,6 @@ ret:
lex_end(thd->lex);
thd->reset_db(&db_backup);
thd->lex= old_lex;
- if (arena)
- thd->restore_active_arena(arena, &backup);
reenable_binlog(thd);
thd->variables.character_set_client= old_cs;
if (unlikely(thd->is_error() || error))
@@ -6886,6 +6852,9 @@ bool TABLE_LIST::prepare_security(THD *thd)
#ifndef DBUG_OFF
void TABLE_LIST::set_check_merged()
{
+ if (is_view())
+ return;
+
DBUG_ASSERT(derived);
/*
It is not simple to check all, but at least this should be checked:
@@ -6903,7 +6872,7 @@ void TABLE_LIST::set_check_materialized()
DBUG_ENTER("TABLE_LIST::set_check_materialized");
SELECT_LEX_UNIT *derived= this->derived;
if (view)
- derived= &view->unit;
+ derived= this->derived= &view->unit;
DBUG_ASSERT(derived);
DBUG_ASSERT(!derived->is_excluded());
if (!derived->first_select()->exclude_from_table_unique_test)
@@ -7213,10 +7182,9 @@ void Field_iterator_table_ref::set_field_iterator()
table_ref->alias.str));
}
/* This is a merge view, so use field_translation. */
- else if (table_ref->field_translation &&
- !table_ref->is_materialized_derived())
+ else if (!table_ref->is_materialized_derived() &&
+ table_ref->is_merged_derived() && table_ref->field_translation)
{
- DBUG_ASSERT(table_ref->is_merged_derived());
field_it= &view_field_it;
DBUG_PRINT("info", ("field_it for '%s' is Field_iterator_view",
table_ref->alias.str));
@@ -9303,7 +9271,7 @@ bool TABLE::check_period_overlaps(const KEY &key,
return false;
uint kp_len= key.key_part[part_nr].length;
if (f->cmp_prefix(f->ptr_in_record(lhs), f->ptr_in_record(rhs),
- kp_len) != 0)
+ kp_len / f->charset()->mbmaxlen) != 0)
return false;
}
@@ -9722,8 +9690,13 @@ void TABLE_LIST::wrap_into_nested_join(List<TABLE_LIST> &join_list)
static inline bool derived_table_optimization_done(TABLE_LIST *table)
{
- return table->derived &&
- (table->derived->is_excluded() ||
+ SELECT_LEX_UNIT *derived= (table->derived ?
+ table->derived :
+ (table->view ?
+ &table->view->unit:
+ NULL));
+ return derived &&
+ (derived->is_excluded() ||
table->is_materialized_derived());
}
@@ -9785,20 +9758,36 @@ bool TABLE_LIST::init_derived(THD *thd, bool init_view)
set_derived();
}
- if (!is_view() &&
- !derived_table_optimization_done(this))
+ if (!derived_table_optimization_done(this))
{
/* A subquery might be forced to be materialized due to a side-effect. */
- bool forced_no_merge_for_update_delete=
- belong_to_view ? belong_to_view->updating :
- !unit->outer_select()->outer_select();
- if (!is_materialized_derived() && first_select->is_mergeable() &&
- (unit->outer_select() && !unit->outer_select()->with_rownum) &&
+ if (!is_materialized_derived() && unit->can_be_merged() &&
+ /*
+ Following is special case of
+ SELECT * FROM (<limited-select>) WHERE ROWNUM() <= nnn
+ */
+ (unit->outer_select() &&
+ !(unit->outer_select()->with_rownum &&
+ unit->outer_select()->table_list.elements == 1 &&
+ (thd->lex->sql_command == SQLCOM_SELECT ||
+ !unit->outer_select()->is_query_topmost(thd)) &&
+ !is_view())) &&
+
(!thd->lex->with_rownum ||
(!first_select->group_list.elements &&
!first_select->order_list.elements)) &&
- optimizer_flag(thd, OPTIMIZER_SWITCH_DERIVED_MERGE) &&
- !thd->lex->can_not_use_merged(forced_no_merge_for_update_delete) &&
+ (is_view() ||
+ optimizer_flag(thd, OPTIMIZER_SWITCH_DERIVED_MERGE)) &&
+ !thd->lex->can_not_use_merged() &&
+ !(!is_view() &&
+ (thd->lex->sql_command == SQLCOM_UPDATE_MULTI ||
+ thd->lex->sql_command == SQLCOM_DELETE_MULTI ||
+ (thd->lex->sql_command == SQLCOM_UPDATE &&
+ (((Sql_cmd_update *) thd->lex->m_sql_cmd)->is_multitable() ||
+ thd->lex->query_tables->is_multitable())) ||
+ (thd->lex->sql_command == SQLCOM_DELETE &&
+ (((Sql_cmd_delete *) thd->lex->m_sql_cmd)->is_multitable() ||
+ thd->lex->query_tables->is_multitable())))) &&
!is_recursive_with_table())
set_merged_derived();
else
diff --git a/sql/table.h b/sql/table.h
index cad6720570d..d0d6f1e178b 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -853,7 +853,18 @@ struct TABLE_SHARE
uint keys, key_parts;
uint ext_key_parts; /* Total number of key parts in extended keys */
uint max_key_length, max_unique_length;
- uint uniques; /* Number of UNIQUE index */
+
+ /*
+ Older versions had TABLE_SHARE::uniques but now it is replaced with
+ per-index HA_UNIQUE_HASH flag
+ */
+ bool have_unique_constraint() const
+ {
+ for (uint i=0; i < keys; i++)
+ if (key_info[i].flags & HA_UNIQUE_HASH)
+ return true;
+ return false;
+ }
uint db_create_options; /* Create options from database */
uint db_options_in_use; /* Options in use */
uint db_record_offset; /* if HA_REC_IN_SEQ */
@@ -2817,6 +2828,8 @@ struct TABLE_LIST
}
void print(THD *thd, table_map eliminated_tables, String *str,
enum_query_type query_type);
+ void print_leaf_tables(THD *thd, String *str,
+ enum_query_type query_type);
bool check_single_table(TABLE_LIST **table, table_map map,
TABLE_LIST *view);
bool set_insert_values(MEM_ROOT *mem_root);
@@ -2957,8 +2970,7 @@ struct TABLE_LIST
DBUG_PRINT("enter", ("Alias: '%s' Unit: %p",
(alias.str ? alias.str : "<NULL>"),
get_unit()));
- derived_type= static_cast<uint8>((derived_type & DTYPE_MASK) |
- DTYPE_TABLE | DTYPE_MERGE);
+ derived_type= static_cast<uint8>((derived_type & DTYPE_MASK) | DTYPE_MERGE);
set_check_merged();
DBUG_VOID_RETURN;
}
@@ -2972,10 +2984,9 @@ struct TABLE_LIST
DBUG_PRINT("enter", ("Alias: '%s' Unit: %p",
(alias.str ? alias.str : "<NULL>"),
get_unit()));
- derived= get_unit();
derived_type= static_cast<uint8>((derived_type &
(derived ? DTYPE_MASK : DTYPE_VIEW)) |
- DTYPE_TABLE | DTYPE_MATERIALIZE);
+ DTYPE_MATERIALIZE);
set_check_materialized();
DBUG_VOID_RETURN;
}
diff --git a/sql/tztime.cc b/sql/tztime.cc
index a2add055deb..e77a7529332 100644
--- a/sql/tztime.cc
+++ b/sql/tztime.cc
@@ -2435,9 +2435,9 @@ print_tz_as_sql(const char* tz_name, const TIME_ZONE_INFO *sp)
#define SAVE_ENGINE(e) \
- "\"select ENGINE into @" e "_engine" \
+ "'select ENGINE into @" e "_engine" \
" from information_schema.TABLES" \
- " where TABLE_SCHEMA=DATABASE() and TABLE_NAME='" e "'\""
+ " where TABLE_SCHEMA=DATABASE() and TABLE_NAME=''" e "'''"
/*
Print info about leap seconds in time zone as SQL statements
diff --git a/sql/wsrep_client_service.cc b/sql/wsrep_client_service.cc
index 628a408b49b..d3b4a18195b 100644
--- a/sql/wsrep_client_service.cc
+++ b/sql/wsrep_client_service.cc
@@ -374,8 +374,6 @@ int Wsrep_client_service::bf_rollback()
m_thd->global_read_lock.unlock_global_read_lock(m_thd);
}
m_thd->release_transactional_locks();
- mysql_ull_cleanup(m_thd);
- m_thd->mdl_context.release_explicit_locks();
}
DBUG_RETURN(ret);
diff --git a/sql/wsrep_high_priority_service.cc b/sql/wsrep_high_priority_service.cc
index 7d8296a75a1..53ef20f3e78 100644
--- a/sql/wsrep_high_priority_service.cc
+++ b/sql/wsrep_high_priority_service.cc
@@ -391,8 +391,6 @@ int Wsrep_high_priority_service::rollback(const wsrep::ws_handle& ws_handle,
m_thd->killed);
m_thd->release_transactional_locks();
- mysql_ull_cleanup(m_thd);
- m_thd->mdl_context.release_explicit_locks();
free_root(m_thd->mem_root, MYF(MY_KEEP_PREALLOC));
@@ -502,7 +500,13 @@ int Wsrep_high_priority_service::log_dummy_write_set(const wsrep::ws_handle& ws_
if (!WSREP_EMULATE_BINLOG(m_thd))
{
wsrep_register_for_group_commit(m_thd);
- ret = ret || cs.provider().commit_order_leave(ws_handle, ws_meta, err);
+ /* wait_for_prior_commit() ensures that all preceding transactions
+ have been committed and seqno has been synced into
+ storage engine. We don't release commit order here yet to
+ avoid following transactions to sync seqno before
+ wsrep_set_SE_checkpoint() below returns. This effectively pauses
+ group commit for the checkpoint operation, but is the only way to
+ ensure proper ordering. */
m_thd->wait_for_prior_commit();
}
@@ -512,10 +516,7 @@ int Wsrep_high_priority_service::log_dummy_write_set(const wsrep::ws_handle& ws_
{
wsrep_unregister_from_group_commit(m_thd);
}
- else
- {
- ret= ret || cs.provider().commit_order_leave(ws_handle, ws_meta, err);
- }
+ ret= ret || cs.provider().commit_order_leave(ws_handle, ws_meta, err);
cs.after_applying();
}
DBUG_RETURN(ret);
diff --git a/sql/wsrep_schema.cc b/sql/wsrep_schema.cc
index 3634d636e7c..443f3c4fcd2 100644
--- a/sql/wsrep_schema.cc
+++ b/sql/wsrep_schema.cc
@@ -1038,10 +1038,9 @@ int Wsrep_schema::append_fragment(THD* thd,
Wsrep_schema_impl::store(frag_table, 3, flags);
Wsrep_schema_impl::store(frag_table, 4, data.data(), data.size());
- int error;
- if ((error= Wsrep_schema_impl::insert(frag_table))) {
- WSREP_ERROR("Failed to write to frag table: %d", error);
+ if (Wsrep_schema_impl::insert(frag_table)) {
trans_rollback_stmt(thd);
+ close_thread_tables(thd);
thd->lex->restore_backup_query_tables_list(&query_tables_list_backup);
DBUG_RETURN(1);
}
diff --git a/sql/wsrep_thd.h b/sql/wsrep_thd.h
index 3d1bf3733a8..0ce612d6097 100644
--- a/sql/wsrep_thd.h
+++ b/sql/wsrep_thd.h
@@ -228,7 +228,14 @@ static inline void wsrep_override_error(THD* thd,
break;
case wsrep::e_append_fragment_error:
/* TODO: Figure out better error number */
- wsrep_override_error(thd, ER_ERROR_DURING_COMMIT, 0, status);
+ if (status)
+ wsrep_override_error(thd, ER_ERROR_DURING_COMMIT,
+ "Error while appending streaming replication fragment"
+ "(provider status: %s)",
+ wsrep::provider::to_string(status).c_str());
+ else
+ wsrep_override_error(thd, ER_ERROR_DURING_COMMIT,
+ "Error while appending streaming replication fragment");
break;
case wsrep::e_not_supported_error:
wsrep_override_error(thd, ER_NOT_SUPPORTED_YET);
diff --git a/sql/wsrep_trans_observer.h b/sql/wsrep_trans_observer.h
index 6ba14f96dd8..812452f451a 100644
--- a/sql/wsrep_trans_observer.h
+++ b/sql/wsrep_trans_observer.h
@@ -1,4 +1,4 @@
-/* Copyright 2016-2022 Codership Oy <http://www.codership.com>
+/* Copyright 2016-2023 Codership Oy <http://www.codership.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -217,6 +217,19 @@ static inline bool wsrep_run_commit_hook(THD* thd, bool all)
}
mysql_mutex_unlock(&thd->LOCK_thd_data);
}
+
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+ /* Transaction creating sequence is TOI or RSU,
+ CREATE [TEMPORARY] SEQUENCE = CREATE + INSERT (initial value)
+ and replicated using statement based replication, thus
+ the commit hooks will be skipped */
+ if (ret &&
+ (thd->wsrep_cs().mode() == wsrep::client_state::m_toi ||
+ thd->wsrep_cs().mode() == wsrep::client_state::m_rsu) &&
+ thd->lex->sql_command == SQLCOM_CREATE_SEQUENCE)
+ ret= false;
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+
DBUG_PRINT("wsrep", ("return: %d", ret));
DBUG_RETURN(ret);
}