summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorOleksandr Byelkin <sanja@mariadb.com>2020-03-11 17:52:49 +0100
committerOleksandr Byelkin <sanja@mariadb.com>2020-03-11 17:52:49 +0100
commitfad47df9957d754bec12d4d327c77ae04f71d903 (patch)
tree58aaf6077adcb0bc0003228d30bed59687527650 /sql
parent9d7ed94f6a526748eff29dae2939a3fd341f118b (diff)
parentb7362d5fbc37dec340aeacd1fb0967c4226c022a (diff)
downloadmariadb-git-fad47df9957d754bec12d4d327c77ae04f71d903.tar.gz
Merge branch '10.4' into 10.5
Diffstat (limited to 'sql')
-rw-r--r--sql/events.cc8
-rw-r--r--sql/ha_partition.cc40
-rw-r--r--sql/ha_partition.h426
-rw-r--r--sql/handler.cc20
-rw-r--r--sql/handler.h14
-rw-r--r--sql/item_strfunc.h2
-rw-r--r--sql/log_event_server.cc10
-rw-r--r--sql/mdl.cc11
-rw-r--r--sql/multi_range_read.cc46
-rw-r--r--sql/multi_range_read.h16
-rw-r--r--sql/my_json_writer.cc121
-rw-r--r--sql/my_json_writer.h208
-rw-r--r--sql/opt_index_cond_pushdown.cc15
-rw-r--r--sql/opt_range.cc21
-rw-r--r--sql/opt_subselect.cc30
-rw-r--r--sql/opt_trace.cc113
-rw-r--r--sql/opt_trace.h2
-rw-r--r--sql/opt_trace_context.h54
-rw-r--r--sql/slave.cc31
-rw-r--r--sql/sql_class.cc6
-rw-r--r--sql/sql_class.h6
-rw-r--r--sql/sql_join_cache.cc2
-rw-r--r--sql/sql_reload.cc6
-rw-r--r--sql/sql_select.cc43
-rw-r--r--sql/sql_show.cc2
-rw-r--r--sql/sql_table.cc118
-rw-r--r--sql/sql_test.cc5
-rw-r--r--sql/sql_yacc.yy38
-rw-r--r--sql/table.cc2
-rw-r--r--sql/wsrep_thd.cc8
30 files changed, 838 insertions, 586 deletions
diff --git a/sql/events.cc b/sql/events.cc
index 3bed25e20c3..91adc0c23ba 100644
--- a/sql/events.cc
+++ b/sql/events.cc
@@ -1218,9 +1218,9 @@ Events::load_events_from_db(THD *thd)
#ifdef WITH_WSREP
/**
- IF SST is done from a galera node that is also acting as MASTER
- newly synced node in galera eco-system will also copy-over the event state
- enabling duplicate event in galera eco-system.
+ If SST is done from a galera node that is also acting as MASTER
+ newly synced node in galera eco-system will also copy-over the
+ event state enabling duplicate event in galera eco-system.
DISABLE such events if the current node is not event orginator.
(Also, make sure you skip disabling it if is already disabled to avoid
creation of redundant action)
@@ -1230,7 +1230,7 @@ Events::load_events_from_db(THD *thd)
Infact, based on galera use-case it seems like it recommends to have each
node with different server-id.
*/
- if (et->originator != thd->variables.server_id)
+ if (WSREP(thd) && et->originator != thd->variables.server_id)
{
if (et->status == Event_parse_data::SLAVESIDE_DISABLED)
continue;
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index f30ef6a9688..2e5355580c7 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -6317,9 +6317,10 @@ static bool partition_multi_range_key_skip_record(range_seq_t seq,
{
PARTITION_PART_KEY_MULTI_RANGE_HLD *hld=
(PARTITION_PART_KEY_MULTI_RANGE_HLD *)seq;
+ PARTITION_KEY_MULTI_RANGE *pkmr= (PARTITION_KEY_MULTI_RANGE *)range_info;
DBUG_ENTER("partition_multi_range_key_skip_record");
DBUG_RETURN(hld->partition->m_seq_if->skip_record(hld->partition->m_seq,
- range_info, rowid));
+ pkmr->ptr, rowid));
}
@@ -6328,9 +6329,10 @@ static bool partition_multi_range_key_skip_index_tuple(range_seq_t seq,
{
PARTITION_PART_KEY_MULTI_RANGE_HLD *hld=
(PARTITION_PART_KEY_MULTI_RANGE_HLD *)seq;
+ PARTITION_KEY_MULTI_RANGE *pkmr= (PARTITION_KEY_MULTI_RANGE *)range_info;
DBUG_ENTER("partition_multi_range_key_skip_index_tuple");
DBUG_RETURN(hld->partition->m_seq_if->skip_index_tuple(hld->partition->m_seq,
- range_info));
+ pkmr->ptr));
}
ha_rows ha_partition::multi_range_read_info_const(uint keyno,
@@ -11802,6 +11804,40 @@ void ha_partition::clear_top_table_fields()
DBUG_VOID_RETURN;
}
+bool
+ha_partition::can_convert_string(const Field_string* field,
+ const Column_definition& new_type) const
+{
+ for (uint index= 0; index < m_tot_parts; index++)
+ {
+ if (!m_file[index]->can_convert_string(field, new_type))
+ return false;
+ }
+ return true;
+}
+
+bool
+ha_partition::can_convert_varstring(const Field_varstring* field,
+ const Column_definition& new_type) const{
+ for (uint index= 0; index < m_tot_parts; index++)
+ {
+ if (!m_file[index]->can_convert_varstring(field, new_type))
+ return false;
+ }
+ return true;
+}
+
+bool
+ha_partition::can_convert_blob(const Field_blob* field,
+ const Column_definition& new_type) const
+{
+ for (uint index= 0; index < m_tot_parts; index++)
+ {
+ if (!m_file[index]->can_convert_blob(field, new_type))
+ return false;
+ }
+ return true;
+}
struct st_mysql_storage_engine partition_storage_engine=
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index ff3093098a4..e50ed2aeef7 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -3,7 +3,7 @@
/*
Copyright (c) 2005, 2012, Oracle and/or its affiliates.
- Copyright (c) 2009, 2013, Monty Program Ab & SkySQL Ab.
+ Copyright (c) 2009, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -224,7 +224,7 @@ typedef struct st_partition_key_multi_range
/*
- List of ranges to be scanned in a certain [sub]partition.
+ List of ranges to be scanned in a certain [sub]partition
The idea is that there's a list of ranges to be scanned in the table
(formed by PARTITION_KEY_MULTI_RANGE structures),
@@ -260,10 +260,10 @@ typedef struct st_partition_part_key_multi_range_hld
/* Owner object */
ha_partition *partition;
- // id of the the partition this structure is for
+ /* id of the the partition this structure is for */
uint32 part_id;
- // Current range we're iterating through.
+ /* Current range we're iterating through */
PARTITION_PART_KEY_MULTI_RANGE *partition_part_key_multi_range;
} PARTITION_PART_KEY_MULTI_RANGE_HLD;
@@ -431,18 +431,18 @@ private:
/** Stores shared auto_increment etc. */
Partition_share *part_share;
/** Fix spurious -Werror=overloaded-virtual in GCC 9 */
- virtual void restore_auto_increment(ulonglong prev_insert_id)
+ virtual void restore_auto_increment(ulonglong prev_insert_id) override
{
handler::restore_auto_increment(prev_insert_id);
}
/** Store and restore next_auto_inc_val over duplicate key errors. */
- virtual void store_auto_increment()
+ void store_auto_increment() override
{
DBUG_ASSERT(part_share);
part_share->prev_auto_inc_val= part_share->next_auto_inc_val;
handler::store_auto_increment();
}
- virtual void restore_auto_increment()
+ void restore_auto_increment() override
{
DBUG_ASSERT(part_share);
part_share->next_auto_inc_val= part_share->prev_auto_inc_val;
@@ -450,7 +450,7 @@ private:
}
void sum_copy_info(handler *file);
void sum_copy_infos();
- void reset_copy_info();
+ void reset_copy_info() override;
/** Temporary storage for new partitions Handler_shares during ALTER */
List<Parts_share_refs> m_new_partitions_share_refs;
/** Sorted array of partition ids in descending order of number of rows. */
@@ -483,16 +483,16 @@ public:
return NO_CURRENT_PART_ID;
}
Partition_share *get_part_share() { return part_share; }
- handler *clone(const char *name, MEM_ROOT *mem_root);
- virtual void set_part_info(partition_info *part_info)
+ handler *clone(const char *name, MEM_ROOT *mem_root) override;
+ virtual void set_part_info(partition_info *part_info) override
{
m_part_info= part_info;
m_is_sub_partitioned= part_info->is_sub_partitioned();
}
- virtual void return_record_by_parent();
+ void return_record_by_parent() override;
- virtual bool vers_can_native(THD *thd)
+ bool vers_can_native(THD *thd) override
{
if (thd->lex->part_info)
{
@@ -551,31 +551,30 @@ public:
object needed in opening the object in openfrm
-------------------------------------------------------------------------
*/
- virtual int delete_table(const char *from);
- virtual int rename_table(const char *from, const char *to);
- virtual int create(const char *name, TABLE *form,
- HA_CREATE_INFO *create_info);
- virtual int create_partitioning_metadata(const char *name,
- const char *old_name, int action_flag);
- virtual void update_create_info(HA_CREATE_INFO *create_info);
- virtual char *update_table_comment(const char *comment);
- virtual int change_partitions(HA_CREATE_INFO *create_info,
- const char *path,
- ulonglong * const copied,
- ulonglong * const deleted,
- const uchar *pack_frm_data,
- size_t pack_frm_len);
- virtual int drop_partitions(const char *path);
- virtual int rename_partitions(const char *path);
- bool get_no_parts(const char *name, uint *num_parts)
+ int delete_table(const char *from) override;
+ int rename_table(const char *from, const char *to) override;
+ int create(const char *name, TABLE *form,
+ HA_CREATE_INFO *create_info) override;
+ int create_partitioning_metadata(const char *name,
+ const char *old_name, int action_flag)
+ override;
+ void update_create_info(HA_CREATE_INFO *create_info) override;
+ char *update_table_comment(const char *comment) override;
+ int change_partitions(HA_CREATE_INFO *create_info, const char *path,
+ ulonglong * const copied, ulonglong * const deleted,
+ const uchar *pack_frm_data, size_t pack_frm_len)
+ override;
+ int drop_partitions(const char *path) override;
+ int rename_partitions(const char *path) override;
+ bool get_no_parts(const char *, uint *num_parts) override
{
DBUG_ENTER("ha_partition::get_no_parts");
*num_parts= m_tot_parts;
DBUG_RETURN(0);
}
- virtual void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share);
- virtual bool check_if_incompatible_data(HA_CREATE_INFO *create_info,
- uint table_changes);
+ void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share) override;
+ bool check_if_incompatible_data(HA_CREATE_INFO *create_info,
+ uint table_changes) override;
void update_part_create_info(HA_CREATE_INFO *create_info, uint part_id)
{
m_file[part_id]->update_create_info(create_info);
@@ -614,7 +613,7 @@ private:
bool is_subpart);
bool populate_partition_name_hash();
Partition_share *get_share();
- bool set_ha_share_ref(Handler_share **ha_share);
+ bool set_ha_share_ref(Handler_share **ha_share) override;
void fix_data_dir(char* path);
bool init_partition_bitmaps();
void free_partition_bitmaps();
@@ -634,8 +633,8 @@ public:
being used for normal queries (not before meta-data changes always.
If the object was opened it will also be closed before being deleted.
*/
- virtual int open(const char *name, int mode, uint test_if_locked);
- virtual int close(void);
+ int open(const char *name, int mode, uint test_if_locked) override;
+ int close() override;
/*
-------------------------------------------------------------------------
@@ -650,31 +649,31 @@ public:
and these go directly to the handlers supporting transactions
-------------------------------------------------------------------------
*/
- virtual THR_LOCK_DATA **store_lock(THD * thd, THR_LOCK_DATA ** to,
- enum thr_lock_type lock_type);
- virtual int external_lock(THD * thd, int lock_type);
- LEX_CSTRING *engine_name() { return hton_name(partition_ht()); }
+ THR_LOCK_DATA **store_lock(THD * thd, THR_LOCK_DATA ** to,
+ enum thr_lock_type lock_type) override;
+ int external_lock(THD * thd, int lock_type) override;
+ LEX_CSTRING *engine_name() override { return hton_name(partition_ht()); }
/*
When table is locked a statement is started by calling start_stmt
instead of external_lock
*/
- virtual int start_stmt(THD * thd, thr_lock_type lock_type);
+ int start_stmt(THD * thd, thr_lock_type lock_type) override;
/*
Lock count is number of locked underlying handlers (I assume)
*/
- virtual uint lock_count(void) const;
+ uint lock_count() const override;
/*
Call to unlock rows not to be updated in transaction
*/
- virtual void unlock_row();
+ void unlock_row() override;
/*
Check if semi consistent read
*/
- virtual bool was_semi_consistent_read();
+ bool was_semi_consistent_read() override;
/*
Call to hint about semi consistent read
*/
- virtual void try_semi_consistent_read(bool);
+ void try_semi_consistent_read(bool) override;
/*
NOTE: due to performance and resource issues with many partitions,
@@ -708,28 +707,28 @@ public:
start_bulk_insert and end_bulk_insert is called before and after a
number of calls to write_row.
*/
- virtual int write_row(const uchar * buf);
- virtual bool start_bulk_update();
- virtual int exec_bulk_update(ha_rows *dup_key_found);
- virtual int end_bulk_update();
- virtual int bulk_update_row(const uchar *old_data, const uchar *new_data,
- ha_rows *dup_key_found);
- virtual int update_row(const uchar * old_data, const uchar * new_data);
- virtual int direct_update_rows_init(List<Item> *update_fields);
- virtual int pre_direct_update_rows_init(List<Item> *update_fields);
- virtual int direct_update_rows(ha_rows *update_rows, ha_rows *found_rows);
- virtual int pre_direct_update_rows();
- virtual bool start_bulk_delete();
- virtual int end_bulk_delete();
- virtual int delete_row(const uchar * buf);
- virtual int direct_delete_rows_init();
- virtual int pre_direct_delete_rows_init();
- virtual int direct_delete_rows(ha_rows *delete_rows);
- virtual int pre_direct_delete_rows();
- virtual int delete_all_rows(void);
- virtual int truncate();
- virtual void start_bulk_insert(ha_rows rows, uint flags);
- virtual int end_bulk_insert();
+ int write_row(const uchar * buf) override;
+ bool start_bulk_update() override;
+ int exec_bulk_update(ha_rows *dup_key_found) override;
+ int end_bulk_update() override;
+ int bulk_update_row(const uchar *old_data, const uchar *new_data,
+ ha_rows *dup_key_found) override;
+ int update_row(const uchar * old_data, const uchar * new_data) override;
+ int direct_update_rows_init(List<Item> *update_fields) override;
+ int pre_direct_update_rows_init(List<Item> *update_fields) override;
+ int direct_update_rows(ha_rows *update_rows, ha_rows *found_rows) override;
+ int pre_direct_update_rows() override;
+ bool start_bulk_delete() override;
+ int end_bulk_delete() override;
+ int delete_row(const uchar * buf) override;
+ int direct_delete_rows_init() override;
+ int pre_direct_delete_rows_init() override;
+ int direct_delete_rows(ha_rows *delete_rows) override;
+ int pre_direct_delete_rows() override;
+ int delete_all_rows() override;
+ int truncate() override;
+ void start_bulk_insert(ha_rows rows, uint flags) override;
+ int end_bulk_insert() override;
private:
ha_rows guess_bulk_insert_rows();
void start_part_bulk_insert(THD *thd, uint part_id);
@@ -745,7 +744,7 @@ public:
*/
int truncate_partition(Alter_info *, bool *binlog_stmt);
- virtual bool is_fatal_error(int error, uint flags)
+ bool is_fatal_error(int error, uint flags) override
{
if (!handler::is_fatal_error(error, flags) ||
error == HA_ERR_NO_PARTITION_FOUND ||
@@ -780,12 +779,12 @@ public:
position it to the start of the table, no need to deallocate
and allocate it again
*/
- virtual int rnd_init(bool scan);
- virtual int rnd_end();
- virtual int rnd_next(uchar * buf);
- virtual int rnd_pos(uchar * buf, uchar * pos);
- virtual int rnd_pos_by_record(uchar *record);
- virtual void position(const uchar * record);
+ int rnd_init(bool scan) override;
+ int rnd_end() override;
+ int rnd_next(uchar * buf) override;
+ int rnd_pos(uchar * buf, uchar * pos) override;
+ int rnd_pos_by_record(uchar *record) override;
+ void position(const uchar * record) override;
/*
-------------------------------------------------------------------------
@@ -819,11 +818,11 @@ public:
index_init initializes an index before using it and index_end does
any end processing needed.
*/
- virtual int index_read_map(uchar * buf, const uchar * key,
- key_part_map keypart_map,
- enum ha_rkey_function find_flag);
- virtual int index_init(uint idx, bool sorted);
- virtual int index_end();
+ int index_read_map(uchar * buf, const uchar * key,
+ key_part_map keypart_map,
+ enum ha_rkey_function find_flag) override;
+ int index_init(uint idx, bool sorted) override;
+ int index_end() override;
/**
@breif
@@ -831,36 +830,36 @@ public:
row if available. If the key value is null, begin at first key of the
index.
*/
- virtual int index_read_idx_map(uchar *buf, uint index, const uchar *key,
- key_part_map keypart_map,
- enum ha_rkey_function find_flag);
+ int index_read_idx_map(uchar *buf, uint index, const uchar *key,
+ key_part_map keypart_map,
+ enum ha_rkey_function find_flag) override;
/*
These methods are used to jump to next or previous entry in the index
scan. There are also methods to jump to first and last entry.
*/
- virtual int index_next(uchar * buf);
- virtual int index_prev(uchar * buf);
- virtual int index_first(uchar * buf);
- virtual int index_last(uchar * buf);
- virtual int index_next_same(uchar * buf, const uchar * key, uint keylen);
+ int index_next(uchar * buf) override;
+ int index_prev(uchar * buf) override;
+ int index_first(uchar * buf) override;
+ int index_last(uchar * buf) override;
+ int index_next_same(uchar * buf, const uchar * key, uint keylen) override;
int index_read_last_map(uchar *buf,
const uchar *key,
- key_part_map keypart_map);
+ key_part_map keypart_map) override;
/*
read_first_row is virtual method but is only implemented by
handler.cc, no storage engine has implemented it so neither
will the partition handler.
- virtual int read_first_row(uchar *buf, uint primary_key);
+ int read_first_row(uchar *buf, uint primary_key) override;
*/
- virtual int read_range_first(const key_range * start_key,
- const key_range * end_key,
- bool eq_range, bool sorted);
- virtual int read_range_next();
+ int read_range_first(const key_range * start_key,
+ const key_range * end_key,
+ bool eq_range, bool sorted) override;
+ int read_range_next() override;
HANDLER_BUFFER *m_mrr_buffer;
@@ -870,28 +869,31 @@ public:
uint m_mrr_new_full_buffer_size;
MY_BITMAP m_mrr_used_partitions;
uint *m_stock_range_seq;
- // not used: uint m_current_range_seq;
+ /* not used: uint m_current_range_seq; */
- // Value of mrr_mode passed to ha_partition::multi_range_read_init
+ /* Value of mrr_mode passed to ha_partition::multi_range_read_init */
uint m_mrr_mode;
- // Value of n_ranges passed to ha_partition::multi_range_read_init
+ /* Value of n_ranges passed to ha_partition::multi_range_read_init */
uint m_mrr_n_ranges;
/*
Ordered MRR mode: m_range_info[N] has the range_id of the last record that
- we've got from partition N.
+ we've got from partition N
*/
range_id_t *m_range_info;
- // TRUE <=> This ha_partition::multi_range_read_next() call is the first one
+ /*
+ TRUE <=> This ha_partition::multi_range_read_next() call is the first one
+ */
bool m_multi_range_read_first;
- // not used: uint m_mrr_range_init_flags;
+
+ /* not used: uint m_mrr_range_init_flags; */
/* Number of elements in the list pointed by m_mrr_range_first. Not used */
uint m_mrr_range_length;
- // Linked list of ranges to scan
+ /* Linked list of ranges to scan */
PARTITION_KEY_MULTI_RANGE *m_mrr_range_first;
PARTITION_KEY_MULTI_RANGE *m_mrr_range_current;
@@ -900,41 +902,39 @@ public:
*/
uint *m_part_mrr_range_length;
- /*
- For each partition: List of ranges to scan in this partition.
- */
+ /* For each partition: List of ranges to scan in this partition */
PARTITION_PART_KEY_MULTI_RANGE **m_part_mrr_range_first;
PARTITION_PART_KEY_MULTI_RANGE **m_part_mrr_range_current;
PARTITION_PART_KEY_MULTI_RANGE_HLD *m_partition_part_key_multi_range_hld;
/*
- Sequence of ranges to be scanned (TODO: why not stores this in
+ Sequence of ranges to be scanned (TODO: why not store this in
handler::mrr_{iter,funcs}?)
*/
range_seq_t m_seq;
RANGE_SEQ_IF *m_seq_if;
- // Range iterator structure to be supplied to partitions
+ /* Range iterator structure to be supplied to partitions */
RANGE_SEQ_IF m_part_seq_if;
virtual int multi_range_key_create_key(
RANGE_SEQ_IF *seq,
range_seq_t seq_it
);
- virtual ha_rows multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
- void *seq_init_param,
- uint n_ranges, uint *bufsz,
- uint *mrr_mode,
- Cost_estimate *cost);
- virtual ha_rows multi_range_read_info(uint keyno, uint n_ranges, uint keys,
- uint key_parts, uint *bufsz,
- uint *mrr_mode, Cost_estimate *cost);
- virtual int multi_range_read_init(RANGE_SEQ_IF *seq, void *seq_init_param,
- uint n_ranges, uint mrr_mode,
- HANDLER_BUFFER *buf);
- virtual int multi_range_read_next(range_id_t *range_info);
- virtual int multi_range_read_explain_info(uint mrr_mode, char *str,
- size_t size);
+ ha_rows multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
+ void *seq_init_param,
+ uint n_ranges, uint *bufsz,
+ uint *mrr_mode,
+ Cost_estimate *cost) override;
+ ha_rows multi_range_read_info(uint keyno, uint n_ranges, uint keys,
+ uint key_parts, uint *bufsz,
+ uint *mrr_mode, Cost_estimate *cost) override;
+ int multi_range_read_init(RANGE_SEQ_IF *seq, void *seq_init_param,
+ uint n_ranges, uint mrr_mode,
+ HANDLER_BUFFER *buf) override;
+ int multi_range_read_next(range_id_t *range_info) override;
+ int multi_range_read_explain_info(uint mrr_mode, char *str, size_t size)
+ override;
uint last_part() { return m_last_part; }
private:
@@ -962,21 +962,20 @@ public:
purposes.
-------------------------------------------------------------------------
*/
- virtual int info(uint);
- void get_dynamic_partition_info(PARTITION_STATS *stat_info,
- uint part_id);
- void set_partitions_to_open(List<String> *partition_names);
- int change_partitions_to_open(List<String> *partition_names);
+ int info(uint) override;
+ void get_dynamic_partition_info(PARTITION_STATS *stat_info, uint part_id)
+ override;
+ void set_partitions_to_open(List<String> *partition_names) override;
+ int change_partitions_to_open(List<String> *partition_names) override;
int open_read_partitions(char *name_buff, size_t name_buff_size);
- virtual int extra(enum ha_extra_function operation);
- virtual int extra_opt(enum ha_extra_function operation, ulong arg);
- virtual int reset(void);
- virtual uint count_query_cache_dependant_tables(uint8 *tables_type);
- virtual my_bool
- register_query_cache_dependant_tables(THD *thd,
- Query_cache *cache,
- Query_cache_block_table **block,
- uint *n);
+ int extra(enum ha_extra_function operation) override;
+ int extra_opt(enum ha_extra_function operation, ulong arg) override;
+ int reset() override;
+ uint count_query_cache_dependant_tables(uint8 *tables_type) override;
+ my_bool register_query_cache_dependant_tables(THD *thd,
+ Query_cache *cache,
+ Query_cache_block_table **block,
+ uint *n) override;
private:
typedef int handler_callback(handler *, void *);
@@ -1031,40 +1030,40 @@ public:
index-only scanning when performing an ORDER BY query.
Only called from one place in sql_select.cc
*/
- virtual const key_map *keys_to_use_for_scanning();
+ const key_map *keys_to_use_for_scanning() override;
/*
Called in test_quick_select to determine if indexes should be used.
*/
- virtual double scan_time();
+ double scan_time() override;
- virtual double key_scan_time(uint inx);
+ double key_scan_time(uint inx) override;
- virtual double keyread_time(uint inx, uint ranges, ha_rows rows);
+ double keyread_time(uint inx, uint ranges, ha_rows rows) override;
/*
The next method will never be called if you do not implement indexes.
*/
- virtual double read_time(uint index, uint ranges, ha_rows rows);
+ double read_time(uint index, uint ranges, ha_rows rows) override;
/*
For the given range how many records are estimated to be in this range.
Used by optimiser to calculate cost of using a particular index.
*/
- virtual ha_rows records_in_range(uint inx, key_range * min_key,
- key_range * max_key);
+ ha_rows records_in_range(uint inx, key_range * min_key, key_range * max_key)
+ override;
/*
Upper bound of number records returned in scan is sum of all
underlying handlers.
*/
- virtual ha_rows estimate_rows_upper_bound();
+ ha_rows estimate_rows_upper_bound() override;
/*
table_cache_type is implemented by the underlying handler but all
underlying handlers must have the same implementation for it to work.
*/
- virtual uint8 table_cache_type();
- virtual ha_rows records();
+ uint8 table_cache_type() override;
+ ha_rows records() override;
/* Calculate hash value for PARTITION BY KEY tables. */
static uint32 calculate_key_hash_value(Field **field_array);
@@ -1082,19 +1081,19 @@ public:
Here we must ensure that all handlers use the same index type
for each index created.
*/
- virtual const char *index_type(uint inx);
+ const char *index_type(uint inx) override;
/* The name of the table type that will be used for display purposes */
- virtual const char *table_type() const;
+ const char *table_type() const;
/* The name of the row type used for the underlying tables. */
- virtual enum row_type get_row_type() const;
+ enum row_type get_row_type() const override;
/*
Handler specific error messages
*/
- virtual void print_error(int error, myf errflag);
- virtual bool get_error_message(int error, String * buf);
+ void print_error(int error, myf errflag) override;
+ bool get_error_message(int error, String * buf) override;
/*
-------------------------------------------------------------------------
MODULE handler characteristics
@@ -1248,7 +1247,7 @@ public:
HA_CAN_INSERT_DELAYED, HA_PRIMARY_KEY_REQUIRED_FOR_POSITION is disabled
until further investigated.
*/
- virtual Table_flags table_flags() const;
+ Table_flags table_flags() const override;
/*
This is a bitmap of flags that says how the storage engine
@@ -1306,7 +1305,7 @@ public:
must be updated in the row.
(InnoDB, MyISAM)
*/
- virtual ulong index_flags(uint inx, uint part, bool all_parts) const
+ ulong index_flags(uint inx, uint part, bool all_parts) const override
{
/*
The following code is not safe if you are using different
@@ -1319,7 +1318,8 @@ public:
wrapper function for handlerton alter_table_flags, since
the ha_partition_hton cannot know all its capabilities
*/
- virtual alter_table_operations alter_table_flags(alter_table_operations flags);
+ alter_table_operations alter_table_flags(alter_table_operations flags)
+ override;
/*
unireg.cc will call the following to make sure that the storage engine
can handle the data it is about to send.
@@ -1327,19 +1327,18 @@ public:
The maximum supported values is the minimum of all handlers in the table
*/
uint min_of_the_max_uint(uint (handler::*operator_func)(void) const) const;
- virtual uint max_supported_record_length() const;
- virtual uint max_supported_keys() const;
- virtual uint max_supported_key_parts() const;
- virtual uint max_supported_key_length() const;
- virtual uint max_supported_key_part_length() const;
- virtual uint min_record_length(uint options) const;
+ uint max_supported_record_length() const override;
+ uint max_supported_keys() const override;
+ uint max_supported_key_parts() const override;
+ uint max_supported_key_length() const override;
+ uint max_supported_key_part_length() const override;
+ uint min_record_length(uint options) const override;
/*
Primary key is clustered can only be true if all underlying handlers have
this feature.
*/
- virtual bool primary_key_is_clustered()
- { return m_pkey_is_clustered; }
+ bool primary_key_is_clustered() override { return m_pkey_is_clustered; }
/*
-------------------------------------------------------------------------
@@ -1357,7 +1356,7 @@ public:
to check whether the rest of the reference part is also the same.
-------------------------------------------------------------------------
*/
- virtual int cmp_ref(const uchar * ref1, const uchar * ref2);
+ int cmp_ref(const uchar * ref1, const uchar * ref2) override;
/*
-------------------------------------------------------------------------
MODULE auto increment
@@ -1371,15 +1370,15 @@ public:
auto_increment_column_changed
-------------------------------------------------------------------------
*/
- virtual bool need_info_for_auto_inc();
- virtual bool can_use_for_auto_inc_init();
- virtual void get_auto_increment(ulonglong offset, ulonglong increment,
- ulonglong nb_desired_values,
- ulonglong *first_value,
- ulonglong *nb_reserved_values);
- virtual void release_auto_increment();
+ bool need_info_for_auto_inc() override;
+ bool can_use_for_auto_inc_init() override;
+ void get_auto_increment(ulonglong offset, ulonglong increment,
+ ulonglong nb_desired_values,
+ ulonglong *first_value,
+ ulonglong *nb_reserved_values) override;
+ void release_auto_increment() override;
private:
- virtual int reset_auto_increment(ulonglong value);
+ int reset_auto_increment(ulonglong value) override;
void update_next_auto_inc_val();
virtual void lock_auto_increment()
{
@@ -1441,7 +1440,7 @@ public:
This method is a special InnoDB method called before a HANDLER query.
-------------------------------------------------------------------------
*/
- virtual void init_table_handle_for_HANDLER();
+ void init_table_handle_for_HANDLER() override;
/*
The remainder of this file defines the handler methods not implemented
@@ -1469,20 +1468,20 @@ public:
List<FOREIGN_KEY_INFO> *f_key_list)
virtual uint referenced_by_foreign_key()
*/
- virtual bool can_switch_engines();
+ bool can_switch_engines() override;
/*
-------------------------------------------------------------------------
MODULE fulltext index
-------------------------------------------------------------------------
*/
void ft_close_search(FT_INFO *handler);
- virtual int ft_init();
- virtual int pre_ft_init();
- virtual void ft_end();
- virtual int pre_ft_end();
- virtual FT_INFO *ft_init_ext(uint flags, uint inx, String *key);
- virtual int ft_read(uchar *buf);
- virtual int pre_ft_read(bool use_parallel);
+ int ft_init() override;
+ int pre_ft_init() override;
+ void ft_end() override;
+ int pre_ft_end() override;
+ FT_INFO *ft_init_ext(uint flags, uint inx, String *key) override;
+ int ft_read(uchar *buf) override;
+ int pre_ft_read(bool use_parallel) override;
/*
-------------------------------------------------------------------------
@@ -1490,7 +1489,7 @@ public:
-------------------------------------------------------------------------
The following method is only used by MyISAM when used as
temporary tables in a join.
- virtual int restart_rnd_next(uchar *buf, uchar *pos);
+ int restart_rnd_next(uchar *buf, uchar *pos) override;
*/
/*
@@ -1501,16 +1500,18 @@ public:
They are used for in-place alter table:
-------------------------------------------------------------------------
*/
- virtual enum_alter_inplace_result
+ enum_alter_inplace_result
check_if_supported_inplace_alter(TABLE *altered_table,
- Alter_inplace_info *ha_alter_info);
- virtual bool prepare_inplace_alter_table(TABLE *altered_table,
- Alter_inplace_info *ha_alter_info);
- virtual bool inplace_alter_table(TABLE *altered_table,
- Alter_inplace_info *ha_alter_info);
- virtual bool commit_inplace_alter_table(TABLE *altered_table,
- Alter_inplace_info *ha_alter_info,
- bool commit);
+ Alter_inplace_info *ha_alter_info)
+ override;
+ bool prepare_inplace_alter_table(TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info)
+ override;
+ bool inplace_alter_table(TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info) override;
+ bool commit_inplace_alter_table(TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info,
+ bool commit) override;
/*
-------------------------------------------------------------------------
MODULE tablespace support
@@ -1533,24 +1534,24 @@ public:
all partitions.
-------------------------------------------------------------------------
*/
- virtual int optimize(THD* thd, HA_CHECK_OPT *check_opt);
- virtual int analyze(THD* thd, HA_CHECK_OPT *check_opt);
- virtual int check(THD* thd, HA_CHECK_OPT *check_opt);
- virtual int repair(THD* thd, HA_CHECK_OPT *check_opt);
- virtual bool check_and_repair(THD *thd);
- virtual bool auto_repair(int error) const;
- virtual bool is_crashed() const;
- virtual int check_for_upgrade(HA_CHECK_OPT *check_opt);
+ int optimize(THD* thd, HA_CHECK_OPT *check_opt) override;
+ int analyze(THD* thd, HA_CHECK_OPT *check_opt) override;
+ int check(THD* thd, HA_CHECK_OPT *check_opt) override;
+ int repair(THD* thd, HA_CHECK_OPT *check_opt) override;
+ bool check_and_repair(THD *thd) override;
+ bool auto_repair(int error) const override;
+ bool is_crashed() const override;
+ int check_for_upgrade(HA_CHECK_OPT *check_opt) override;
/*
-------------------------------------------------------------------------
MODULE condition pushdown
-------------------------------------------------------------------------
*/
- virtual const COND *cond_push(const COND *cond);
- virtual void cond_pop();
- virtual void clear_top_table_fields();
- virtual int info_push(uint info_type, void *info);
+ const COND *cond_push(const COND *cond) override;
+ void cond_pop() override;
+ void clear_top_table_fields() override;
+ int info_push(uint info_type, void *info) override;
private:
int handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt, uint flags);
@@ -1564,13 +1565,13 @@ public:
void append_row_to_str(String &str);
public:
- virtual int pre_calculate_checksum();
- virtual int calculate_checksum();
+ int pre_calculate_checksum() override;
+ int calculate_checksum() override;
/* Enabled keycache for performance reasons, WL#4571 */
- virtual int assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt);
- virtual int preload_keys(THD* thd, HA_CHECK_OPT* check_opt);
- virtual TABLE_LIST *get_next_global_for_child();
+ int assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt) override;
+ int preload_keys(THD* thd, HA_CHECK_OPT* check_opt) override;
+ TABLE_LIST *get_next_global_for_child() override;
/*
-------------------------------------------------------------------------
@@ -1579,9 +1580,9 @@ public:
Enable/Disable Indexes are only supported by HEAP and MyISAM.
-------------------------------------------------------------------------
*/
- virtual int disable_indexes(uint mode);
- virtual int enable_indexes(uint mode);
- virtual int indexes_are_disabled(void);
+ int disable_indexes(uint mode) override;
+ int enable_indexes(uint mode) override;
+ int indexes_are_disabled() override;
/*
-------------------------------------------------------------------------
@@ -1609,7 +1610,7 @@ public:
this can also be done before partition will support a mix of engines,
but preferably together with other incompatible API changes.
*/
- virtual handlerton *partition_ht() const
+ handlerton *partition_ht() const override
{
handlerton *h= m_file[0]->ht;
for (uint i=1; i < m_tot_parts; i++)
@@ -1638,5 +1639,16 @@ public:
friend int cmp_key_rowid_part_id(void *ptr, uchar *ref1, uchar *ref2);
friend int cmp_key_part_id(void *key_p, uchar *ref1, uchar *ref2);
+ bool can_convert_string(
+ const Field_string* field,
+ const Column_definition& new_field) const override;
+
+ bool can_convert_varstring(
+ const Field_varstring* field,
+ const Column_definition& new_field) const override;
+
+ bool can_convert_blob(
+ const Field_blob* field,
+ const Column_definition& new_field) const override;
};
#endif /* HA_PARTITION_INCLUDED */
diff --git a/sql/handler.cc b/sql/handler.cc
index 4dd915d8b91..c3fee4c78c9 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -2748,11 +2748,13 @@ double handler::keyread_time(uint index, uint ranges, ha_rows rows)
size_t len= table->key_info[index].key_length + ref_length;
if (index == table->s->primary_key && table->file->primary_key_is_clustered())
len= table->s->stored_rec_length;
- uint keys_per_block= (uint) (stats.block_size/2.0/len+1);
- ulonglong blocks= !rows ? 0 : (rows-1) / keys_per_block + 1;
double cost= (double)rows*len/(stats.block_size+1)*IDX_BLOCK_COPY_COST;
if (ranges)
+ {
+ uint keys_per_block= (uint) (stats.block_size/2.0/len+1);
+ ulonglong blocks= !rows ? 0 : (rows-1) / keys_per_block + 1;
cost+= blocks;
+ }
return cost;
}
@@ -7042,6 +7044,20 @@ void handler::set_lock_type(enum thr_lock_type lock)
table->reginfo.lock_type= lock;
}
+Compare_keys handler::compare_key_parts(const Field &old_field,
+ const Column_definition &new_field,
+ const KEY_PART_INFO &old_part,
+ const KEY_PART_INFO &new_part) const
+{
+ if (!old_field.is_equal(new_field))
+ return Compare_keys::NotEqual;
+
+ if (old_part.length != new_part.length)
+ return Compare_keys::NotEqual;
+
+ return Compare_keys::Equal;
+}
+
#ifdef WITH_WSREP
/**
@details
diff --git a/sql/handler.h b/sql/handler.h
index f17c303571f..9ed6858874a 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -2940,6 +2940,13 @@ public:
virtual ~Handler_share() {}
};
+enum class Compare_keys : uint32_t
+{
+ Equal= 0,
+ EqualButKeyPartLength,
+ EqualButComment,
+ NotEqual
+};
/**
The handler class is the interface for dynamically loadable
@@ -4911,6 +4918,13 @@ public:
return false;
}
+ /* Used for ALTER TABLE.
+ Some engines can handle some differences in indexes by themself. */
+ virtual Compare_keys compare_key_parts(const Field &old_field,
+ const Column_definition &new_field,
+ const KEY_PART_INFO &old_part,
+ const KEY_PART_INFO &new_part) const;
+
protected:
Handler_share *get_ha_share_ptr();
void set_ha_share_ptr(Handler_share *arg_ha_share);
diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h
index d96e361d05d..186efdf7fb9 100644
--- a/sql/item_strfunc.h
+++ b/sql/item_strfunc.h
@@ -1203,7 +1203,7 @@ public:
bool fix_length_and_dec()
{
collation.set(default_charset());
- max_length=64;
+ fix_char_length(64);
maybe_null= 1;
return FALSE;
}
diff --git a/sql/log_event_server.cc b/sql/log_event_server.cc
index 2267f91f0f9..ba904a254e9 100644
--- a/sql/log_event_server.cc
+++ b/sql/log_event_server.cc
@@ -3859,10 +3859,16 @@ int Xid_log_event::do_apply_event(rpl_group_info *rgi)
thd->variables.option_bits&= ~OPTION_GTID_BEGIN;
res= trans_commit(thd); /* Automatically rolls back on error. */
thd->mdl_context.release_transactional_locks();
-
+#ifdef WITH_WSREP
+ if (WSREP(thd)) mysql_mutex_lock(&thd->LOCK_thd_data);
+ if ((!res || (WSREP(thd) && thd->wsrep_trx().state() == wsrep::transaction::s_must_replay )) && sub_id)
+#else
if (likely(!res) && sub_id)
+#endif /* WITH_WSREP */
rpl_global_gtid_slave_state->update_state_hash(sub_id, &gtid, hton, rgi);
-
+#ifdef WITH_WSREP
+ if (WSREP(thd)) mysql_mutex_unlock(&thd->LOCK_thd_data);
+#endif /* WITH_WSREP */
/*
Increment the global status commit count variable
*/
diff --git a/sql/mdl.cc b/sql/mdl.cc
index 5f229f73b2f..98a9d8f0d01 100644
--- a/sql/mdl.cc
+++ b/sql/mdl.cc
@@ -3227,19 +3227,14 @@ void MDL_context::set_transaction_duration_for_all_locks()
DBUG_ASSERT(m_tickets[MDL_STATEMENT].is_empty());
- /* Don't swap locks if this thread is running backup stages */
- if (current_thd->current_backup_stage == BACKUP_FINISHED)
- m_tickets[MDL_TRANSACTION].swap(m_tickets[MDL_EXPLICIT]);
+ m_tickets[MDL_TRANSACTION].swap(m_tickets[MDL_EXPLICIT]);
Ticket_iterator it_ticket(m_tickets[MDL_EXPLICIT]);
while ((ticket= it_ticket++))
{
- if (ticket->get_key()->mdl_namespace() != MDL_key::BACKUP)
- {
- m_tickets[MDL_EXPLICIT].remove(ticket);
- m_tickets[MDL_TRANSACTION].push_front(ticket);
- }
+ m_tickets[MDL_EXPLICIT].remove(ticket);
+ m_tickets[MDL_TRANSACTION].push_front(ticket);
}
#ifndef DBUG_OFF
diff --git a/sql/multi_range_read.cc b/sql/multi_range_read.cc
index 7e4c2ed1f53..c000187626f 100644
--- a/sql/multi_range_read.cc
+++ b/sql/multi_range_read.cc
@@ -702,7 +702,8 @@ static int rowid_cmp_reverse(void *file, uchar *a, uchar *b)
int Mrr_ordered_rndpos_reader::init(handler *h_arg,
Mrr_index_reader *index_reader_arg,
uint mode,
- Lifo_buffer *buf)
+ Lifo_buffer *buf,
+ Rowid_filter *filter)
{
file= h_arg;
index_reader= index_reader_arg;
@@ -710,19 +711,7 @@ int Mrr_ordered_rndpos_reader::init(handler *h_arg,
is_mrr_assoc= !MY_TEST(mode & HA_MRR_NO_ASSOCIATION);
index_reader_exhausted= FALSE;
index_reader_needs_refill= TRUE;
-
- /*
- Currently usage of a rowid filter within InnoDB engine is not supported
- if the table is accessed by the primary key.
- With optimizer switches ''mrr' and 'mrr_sort_keys' are both enabled
- any access by a secondary index is converted to the rndpos access. In
- InnoDB the rndpos access is always uses the primary key.
- Do not use pushed rowid filter if the table is accessed actually by the
- primary key. Use the rowid filter outside the engine code (see
- Mrr_ordered_rndpos_reader::refill_from_index_reader).
- */
- if (file->pushed_rowid_filter && file->primary_key_is_clustered())
- file->cancel_pushed_rowid_filter();
+ rowid_filter= filter;
return 0;
}
@@ -817,10 +806,8 @@ int Mrr_ordered_rndpos_reader::refill_from_index_reader()
index_reader->position();
/*
- If the built rowid filter cannot be used at the engine level use it here.
+ If the built rowid filter cannot be used at the engine level, use it here.
*/
- Rowid_filter *rowid_filter=
- file->get_table()->reginfo.join_tab->rowid_filter;
if (rowid_filter && !file->pushed_rowid_filter &&
!rowid_filter->check((char *)index_rowid))
continue;
@@ -960,7 +947,8 @@ int DsMrr_impl::dsmrr_init(handler *h_arg, RANGE_SEQ_IF *seq_funcs,
void *seq_init_param, uint n_ranges, uint mode,
HANDLER_BUFFER *buf)
{
- THD *thd= h_arg->get_table()->in_use;
+ TABLE *table= h_arg->get_table();
+ THD *thd= table->in_use;
int res;
Key_parameters keypar;
uint UNINIT_VAR(key_buff_elem_size); /* set/used when do_sort_keys==TRUE */
@@ -1015,6 +1003,21 @@ int DsMrr_impl::dsmrr_init(handler *h_arg, RANGE_SEQ_IF *seq_funcs,
if (!(keyno == table->s->primary_key && h_idx->primary_key_is_clustered()))
{
strategy= disk_strategy= &reader_factory.ordered_rndpos_reader;
+ if (h_arg->pushed_rowid_filter)
+ {
+ /*
+ Currently usage of a rowid filter within InnoDB engine is not supported
+ if the table is accessed by the primary key.
+ With optimizer switches ''mrr' and 'mrr_sort_keys' are both enabled
+ any access by a secondary index is converted to the rndpos access. In
+ InnoDB the rndpos access is always uses the primary key.
+ Do not use pushed rowid filter if the table is accessed actually by the
+ primary key. Use the rowid filter outside the engine code (see
+ Mrr_ordered_rndpos_reader::refill_from_index_reader).
+ */
+ rowid_filter= h_arg->pushed_rowid_filter;
+ h_arg->cancel_pushed_rowid_filter();
+ }
}
full_buf= buf->buffer;
@@ -1094,14 +1097,18 @@ int DsMrr_impl::dsmrr_init(handler *h_arg, RANGE_SEQ_IF *seq_funcs,
goto use_default_impl;
}
+ // setup_two_handlers() will call dsmrr_close() will clears the filter.
+ // Save its value and restore afterwards.
+ Rowid_filter *tmp = rowid_filter;
if ((res= setup_two_handlers()))
goto error;
+ rowid_filter= tmp;
if ((res= index_strategy->init(secondary_file, seq_funcs, seq_init_param,
n_ranges, mode, &keypar, key_buffer,
&buf_manager)) ||
(res= disk_strategy->init(primary_file, index_strategy, mode,
- &rowid_buffer)))
+ &rowid_buffer, rowid_filter)))
{
goto error;
}
@@ -1283,6 +1290,7 @@ void DsMrr_impl::close_second_handler()
void DsMrr_impl::dsmrr_close()
{
DBUG_ENTER("DsMrr_impl::dsmrr_close");
+ rowid_filter= NULL;
close_second_handler();
strategy= NULL;
DBUG_VOID_RETURN;
diff --git a/sql/multi_range_read.h b/sql/multi_range_read.h
index 0473fef04ae..37a00e3086f 100644
--- a/sql/multi_range_read.h
+++ b/sql/multi_range_read.h
@@ -364,7 +364,7 @@ class Mrr_ordered_rndpos_reader : public Mrr_reader
{
public:
int init(handler *file, Mrr_index_reader *index_reader, uint mode,
- Lifo_buffer *buf);
+ Lifo_buffer *buf, Rowid_filter *filter);
int get_next(range_id_t *range_info);
int refill_buffer(bool initial);
private:
@@ -399,6 +399,9 @@ private:
/* Buffer to store (rowid, range_id) pairs */
Lifo_buffer *rowid_buffer;
+ /* Rowid filter to be checked against (if any) */
+ Rowid_filter *rowid_filter;
+
int refill_from_index_reader();
};
@@ -554,7 +557,8 @@ public:
typedef void (handler::*range_check_toggle_func_t)(bool on);
DsMrr_impl()
- : secondary_file(NULL) {};
+ : secondary_file(NULL),
+ rowid_filter(NULL) {};
void init(handler *h_arg, TABLE *table_arg)
{
@@ -591,7 +595,13 @@ private:
to run both index scan and rnd_pos() scan at the same time)
*/
handler *secondary_file;
-
+
+ /*
+ The rowid filter that DS-MRR has "unpushed" from the storage engine.
+ If it's present, DS-MRR will use it.
+ */
+ Rowid_filter *rowid_filter;
+
uint keyno; /* index we're running the scan on */
/* TRUE <=> need range association, buffers hold {rowid, range_id} pairs */
bool is_mrr_assoc;
diff --git a/sql/my_json_writer.cc b/sql/my_json_writer.cc
index 4200769b1dc..f4cf8204d61 100644
--- a/sql/my_json_writer.cc
+++ b/sql/my_json_writer.cc
@@ -79,7 +79,8 @@ void Json_writer::end_array()
Json_writer& Json_writer::add_member(const char *name)
{
- if (fmt_helper.on_add_member(name))
+ size_t len= strlen(name);
+ if (fmt_helper.on_add_member(name, len))
return *this; // handled
// assert that we are in an object
@@ -87,7 +88,22 @@ Json_writer& Json_writer::add_member(const char *name)
start_element();
output.append('"');
- output.append(name);
+ output.append(name, len);
+ output.append("\": ", 3);
+ return *this;
+}
+
+Json_writer& Json_writer::add_member(const char *name, size_t len)
+{
+ if (fmt_helper.on_add_member(name, len))
+ return *this; // handled
+
+ // assert that we are in an object
+ DBUG_ASSERT(!element_started);
+ start_element();
+
+ output.append('"');
+ output.append(name, len);
output.append("\": ");
return *this;
}
@@ -141,28 +157,31 @@ void Json_writer::add_ull(ulonglong val)
void Json_writer::add_size(longlong val)
{
char buf[64];
+ size_t len;
if (val < 1024)
- my_snprintf(buf, sizeof(buf), "%lld", val);
+ len= my_snprintf(buf, sizeof(buf), "%lld", val);
else if (val < 1024*1024*16)
{
/* Values less than 16MB are specified in KB for precision */
- size_t len= my_snprintf(buf, sizeof(buf), "%lld", val/1024);
+ len= my_snprintf(buf, sizeof(buf), "%lld", val/1024);
strcpy(buf + len, "Kb");
+ len+= 2;
}
else
{
- size_t len= my_snprintf(buf, sizeof(buf), "%lld", val/(1024*1024));
+ len= my_snprintf(buf, sizeof(buf), "%lld", val/(1024*1024));
strcpy(buf + len, "Mb");
+ len+= 2;
}
- add_str(buf);
+ add_str(buf, len);
}
void Json_writer::add_double(double val)
{
char buf[64];
- my_snprintf(buf, sizeof(buf), "%lg", val);
- add_unquoted_str(buf);
+ size_t len= my_snprintf(buf, sizeof(buf), "%lg", val);
+ add_unquoted_str(buf, len);
}
@@ -174,32 +193,46 @@ void Json_writer::add_bool(bool val)
void Json_writer::add_null()
{
- add_unquoted_str("null");
+ add_unquoted_str("null", (size_t) 4);
}
void Json_writer::add_unquoted_str(const char* str)
{
- if (fmt_helper.on_add_str(str, 0))
+ size_t len= strlen(str);
+ if (fmt_helper.on_add_str(str, len))
return;
if (!element_started)
start_element();
- output.append(str);
+ output.append(str, len);
+ element_started= false;
+}
+
+void Json_writer::add_unquoted_str(const char* str, size_t len)
+{
+ if (fmt_helper.on_add_str(str, len))
+ return;
+
+ if (!element_started)
+ start_element();
+
+ output.append(str, len);
element_started= false;
}
void Json_writer::add_str(const char *str)
{
- if (fmt_helper.on_add_str(str, 0))
+ size_t len= strlen(str);
+ if (fmt_helper.on_add_str(str, len))
return;
if (!element_started)
start_element();
output.append('"');
- output.append(str);
+ output.append(str, len);
output.append('"');
element_started= false;
}
@@ -227,50 +260,6 @@ void Json_writer::add_str(const String &str)
add_str(str.ptr(), str.length());
}
-Json_writer_object::Json_writer_object(THD *thd) :
- Json_writer_struct(thd)
-{
- if (my_writer)
- my_writer->start_object();
-}
-
-Json_writer_object::Json_writer_object(THD* thd, const char *str) :
- Json_writer_struct(thd)
-{
- if (my_writer)
- my_writer->add_member(str).start_object();
-}
-
-Json_writer_object::~Json_writer_object()
-{
- if (!closed && my_writer)
- my_writer->end_object();
- closed= TRUE;
-}
-
-Json_writer_array::Json_writer_array(THD *thd) :
- Json_writer_struct(thd)
-{
- if (my_writer)
- my_writer->start_array();
-}
-
-Json_writer_array::Json_writer_array(THD *thd, const char *str) :
- Json_writer_struct(thd)
-{
- if (my_writer)
- my_writer->add_member(str).start_array();
-
-}
-Json_writer_array::~Json_writer_array()
-{
- if (!closed && my_writer)
- {
- my_writer->end_array();
- closed= TRUE;
- }
-}
-
Json_writer_temp_disable::Json_writer_temp_disable(THD *thd_arg)
{
thd= thd_arg;
@@ -281,7 +270,8 @@ Json_writer_temp_disable::~Json_writer_temp_disable()
thd->opt_trace.enable_tracing_if_required();
}
-bool Single_line_formatting_helper::on_add_member(const char *name)
+bool Single_line_formatting_helper::on_add_member(const char *name,
+ size_t len)
{
DBUG_ASSERT(state== INACTIVE || state == DISABLED);
if (state != DISABLED)
@@ -290,7 +280,6 @@ bool Single_line_formatting_helper::on_add_member(const char *name)
buf_ptr= buffer;
//append member name to the array
- size_t len= strlen(name);
if (len < MAX_LINE_LEN)
{
memcpy(buf_ptr, name, len);
@@ -344,12 +333,10 @@ void Single_line_formatting_helper::on_start_object()
bool Single_line_formatting_helper::on_add_str(const char *str,
- size_t num_bytes)
+ size_t len)
{
if (state == IN_ARRAY)
{
- size_t len= num_bytes ? num_bytes : strlen(str);
-
// New length will be:
// "$string",
// quote + quote + comma + space = 4
@@ -425,9 +412,11 @@ void Single_line_formatting_helper::disable_and_flush()
while (ptr < buf_ptr)
{
char *str= ptr;
+ size_t len= strlen(str);
+
if (nr == 0)
{
- owner->add_member(str);
+ owner->add_member(str, len);
if (start_array)
owner->start_array();
}
@@ -435,13 +424,11 @@ void Single_line_formatting_helper::disable_and_flush()
{
//if (nr == 1)
// owner->start_array();
- owner->add_str(str);
+ owner->add_str(str, len);
}
nr++;
- while (*ptr!=0)
- ptr++;
- ptr++;
+ ptr+= len+1;
}
buf_ptr= buffer;
state= INACTIVE;
diff --git a/sql/my_json_writer.h b/sql/my_json_writer.h
index f1f1be70bb0..bc8002de529 100644
--- a/sql/my_json_writer.h
+++ b/sql/my_json_writer.h
@@ -86,7 +86,7 @@ public:
void init(Json_writer *owner_arg) { owner= owner_arg; }
- bool on_add_member(const char *name);
+ bool on_add_member(const char *name, size_t len);
bool on_start_array();
bool on_end_array();
@@ -184,6 +184,7 @@ class Json_writer
public:
/* Add a member. We must be in an object. */
Json_writer& add_member(const char *name);
+ Json_writer& add_member(const char *name, size_t len);
/* Add atomic values */
void add_str(const char* val);
@@ -202,6 +203,7 @@ public:
private:
void add_unquoted_str(const char* val);
+ void add_unquoted_str(const char* val, size_t len);
public:
/* Start a child object */
void start_object();
@@ -254,63 +256,51 @@ public:
void init(Json_writer *my_writer) { writer= my_writer; }
void add_str(const char* val)
{
- if (writer)
writer->add_str(val);
}
void add_str(const char* val, size_t length)
{
- if (writer)
writer->add_str(val, length);
}
void add_str(const String &str)
{
- if (writer)
- writer->add_str(str);
+ writer->add_str(str.ptr(), str.length());
}
- void add_str(LEX_CSTRING str)
+ void add_str(const LEX_CSTRING &str)
{
- if (writer)
- writer->add_str(str.str);
+ writer->add_str(str.str, str.length);
}
void add_str(Item *item)
{
- if (writer)
writer->add_str(item);
}
void add_ll(longlong val)
{
- if (writer)
writer->add_ll(val);
}
void add_size(longlong val)
{
- if (writer)
writer->add_size(val);
}
void add_double(double val)
{
- if (writer)
writer->add_double(val);
}
void add_bool(bool val)
{
- if (writer)
writer->add_bool(val);
}
void add_null()
{
- if (writer)
writer->add_null();
}
void add_table_name(const JOIN_TAB *tab)
{
- if (writer)
writer->add_table_name(tab);
}
void add_table_name(const TABLE* table)
{
- if (writer)
writer->add_table_name(table);
}
};
@@ -333,6 +323,10 @@ public:
context.init(my_writer);
closed= false;
}
+ bool trace_started()
+ {
+ return my_writer != 0;
+ }
};
@@ -349,55 +343,90 @@ class Json_writer_object : public Json_writer_struct
private:
void add_member(const char *name)
{
- if (my_writer)
- my_writer->add_member(name);
+ my_writer->add_member(name);
}
public:
- explicit Json_writer_object(THD *thd);
- explicit Json_writer_object(THD *thd, const char *str);
+ explicit Json_writer_object(THD *thd)
+ : Json_writer_struct(thd)
+ {
+ if (unlikely(my_writer))
+ my_writer->start_object();
+ }
+
+ explicit Json_writer_object(THD* thd, const char *str)
+ : Json_writer_struct(thd)
+ {
+ if (unlikely(my_writer))
+ my_writer->add_member(str).start_object();
+ }
+
+ ~Json_writer_object()
+ {
+ if (my_writer && !closed)
+ my_writer->end_object();
+ closed= TRUE;
+ }
Json_writer_object& add(const char *name, bool value)
{
DBUG_ASSERT(!closed);
- add_member(name);
- context.add_bool(value);
+ if (my_writer)
+ {
+ add_member(name);
+ context.add_bool(value);
+ }
return *this;
}
Json_writer_object& add(const char *name, ulonglong value)
{
DBUG_ASSERT(!closed);
- add_member(name);
- context.add_ll(static_cast<longlong>(value));
+ if (my_writer)
+ {
+ add_member(name);
+ context.add_ll(static_cast<longlong>(value));
+ }
return *this;
}
Json_writer_object& add(const char *name, longlong value)
{
DBUG_ASSERT(!closed);
- add_member(name);
- context.add_ll(value);
+ if (my_writer)
+ {
+ add_member(name);
+ context.add_ll(value);
+ }
return *this;
}
Json_writer_object& add(const char *name, double value)
{
DBUG_ASSERT(!closed);
- add_member(name);
- context.add_double(value);
+ if (my_writer)
+ {
+ add_member(name);
+ context.add_double(value);
+ }
return *this;
}
#ifndef _WIN64
Json_writer_object& add(const char *name, size_t value)
{
DBUG_ASSERT(!closed);
- add_member(name);
- context.add_ll(static_cast<longlong>(value));
+ if (my_writer)
+ {
+ add_member(name);
+ context.add_ll(static_cast<longlong>(value));
+ }
return *this;
}
#endif
Json_writer_object& add(const char *name, const char *value)
{
DBUG_ASSERT(!closed);
- add_member(name);
- context.add_str(value);
+ if (my_writer)
+ {
+ add_member(name);
+ context.add_str(value);
+ }
return *this;
}
Json_writer_object& add(const char *name, const char *value, size_t num_bytes)
@@ -406,59 +435,76 @@ public:
context.add_str(value, num_bytes);
return *this;
}
- Json_writer_object& add(const char *name, LEX_CSTRING value)
+ Json_writer_object& add(const char *name, const LEX_CSTRING &value)
{
DBUG_ASSERT(!closed);
- add_member(name);
- context.add_str(value.str);
+ if (my_writer)
+ {
+ add_member(name);
+ context.add_str(value.str, value.length);
+ }
return *this;
}
Json_writer_object& add(const char *name, Item *value)
{
DBUG_ASSERT(!closed);
- add_member(name);
- context.add_str(value);
+ if (my_writer)
+ {
+ add_member(name);
+ context.add_str(value);
+ }
return *this;
}
Json_writer_object& add_null(const char*name)
{
DBUG_ASSERT(!closed);
- add_member(name);
- context.add_null();
+ if (my_writer)
+ {
+ add_member(name);
+ context.add_null();
+ }
return *this;
}
Json_writer_object& add_table_name(const JOIN_TAB *tab)
{
DBUG_ASSERT(!closed);
- add_member("table");
- context.add_table_name(tab);
+ if (my_writer)
+ {
+ add_member("table");
+ context.add_table_name(tab);
+ }
return *this;
}
Json_writer_object& add_table_name(const TABLE *table)
{
DBUG_ASSERT(!closed);
- add_member("table");
- context.add_table_name(table);
+ if (my_writer)
+ {
+ add_member("table");
+ context.add_table_name(table);
+ }
return *this;
}
Json_writer_object& add_select_number(uint select_number)
{
DBUG_ASSERT(!closed);
- add_member("select_id");
- if (unlikely(select_number >= INT_MAX))
- context.add_str("fake");
- else
- context.add_ll(static_cast<longlong>(select_number));
+ if (my_writer)
+ {
+ add_member("select_id");
+ if (unlikely(select_number >= INT_MAX))
+ context.add_str("fake");
+ else
+ context.add_ll(static_cast<longlong>(select_number));
+ }
return *this;
}
void end()
{
DBUG_ASSERT(!closed);
- if (my_writer)
+ if (unlikely(my_writer))
my_writer->end_object();
closed= TRUE;
}
- ~Json_writer_object();
};
@@ -473,12 +519,29 @@ public:
class Json_writer_array : public Json_writer_struct
{
public:
- Json_writer_array(THD *thd);
- Json_writer_array(THD *thd, const char *str);
+ Json_writer_array(THD *thd): Json_writer_struct(thd)
+ {
+ if (unlikely(my_writer))
+ my_writer->start_array();
+ }
+
+ Json_writer_array(THD *thd, const char *str) : Json_writer_struct(thd)
+ {
+ if (unlikely(my_writer))
+ my_writer->add_member(str).start_array();
+ }
+ ~Json_writer_array()
+ {
+ if (unlikely(my_writer && !closed))
+ {
+ my_writer->end_array();
+ closed= TRUE;
+ }
+ }
void end()
{
DBUG_ASSERT(!closed);
- if (my_writer)
+ if (unlikely(my_writer))
my_writer->end_array();
closed= TRUE;
}
@@ -486,78 +549,89 @@ public:
Json_writer_array& add(bool value)
{
DBUG_ASSERT(!closed);
- context.add_bool(value);
+ if (my_writer)
+ context.add_bool(value);
return *this;
}
Json_writer_array& add(ulonglong value)
{
DBUG_ASSERT(!closed);
- context.add_ll(static_cast<longlong>(value));
+ if (my_writer)
+ context.add_ll(static_cast<longlong>(value));
return *this;
}
Json_writer_array& add(longlong value)
{
DBUG_ASSERT(!closed);
- context.add_ll(value);
+ if (my_writer)
+ context.add_ll(value);
return *this;
}
Json_writer_array& add(double value)
{
DBUG_ASSERT(!closed);
- context.add_double(value);
+ if (my_writer)
+ context.add_double(value);
return *this;
}
#ifndef _WIN64
Json_writer_array& add(size_t value)
{
DBUG_ASSERT(!closed);
- context.add_ll(static_cast<longlong>(value));
+ if (my_writer)
+ context.add_ll(static_cast<longlong>(value));
return *this;
}
#endif
Json_writer_array& add(const char *value)
{
DBUG_ASSERT(!closed);
- context.add_str(value);
+ if (my_writer)
+ context.add_str(value);
return *this;
}
Json_writer_array& add(const char *value, size_t num_bytes)
{
DBUG_ASSERT(!closed);
- context.add_str(value, num_bytes);
+ if (my_writer)
+ context.add_str(value, num_bytes);
return *this;
}
- Json_writer_array& add(LEX_CSTRING value)
+ Json_writer_array& add(const LEX_CSTRING &value)
{
DBUG_ASSERT(!closed);
- context.add_str(value.str);
+ if (my_writer)
+ context.add_str(value.str, value.length);
return *this;
}
Json_writer_array& add(Item *value)
{
DBUG_ASSERT(!closed);
- context.add_str(value);
+ if (my_writer)
+ context.add_str(value);
return *this;
}
Json_writer_array& add_null()
{
DBUG_ASSERT(!closed);
- context.add_null();
+ if (my_writer)
+ context.add_null();
return *this;
}
Json_writer_array& add_table_name(const JOIN_TAB *tab)
{
DBUG_ASSERT(!closed);
- context.add_table_name(tab);
+ if (my_writer)
+ context.add_table_name(tab);
return *this;
}
Json_writer_array& add_table_name(const TABLE *table)
{
DBUG_ASSERT(!closed);
- context.add_table_name(table);
+ if (my_writer)
+ context.add_table_name(table);
return *this;
}
- ~Json_writer_array();
};
/*
diff --git a/sql/opt_index_cond_pushdown.cc b/sql/opt_index_cond_pushdown.cc
index a87d5664e29..f8d11da1d5e 100644
--- a/sql/opt_index_cond_pushdown.cc
+++ b/sql/opt_index_cond_pushdown.cc
@@ -390,8 +390,23 @@ void push_index_cond(JOIN_TAB *tab, uint keyno)
~(tab->table->map | tab->join->const_table_map)))
tab->cache_idx_cond= idx_cond;
else
+ {
idx_remainder_cond= tab->table->file->idx_cond_push(keyno, idx_cond);
+ /*
+ If (1) there is an index condition that we couldn't push using ICP,
+ (2) we are using Join Buffering
+ (3) and we are using BKA
+ then use BKA's Index Condition Pushdown mechanism to check it.
+ */
+ if (idx_remainder_cond && tab->use_join_cache && // (1) && (2)
+ tab->icp_other_tables_ok) // (3)
+ {
+ tab->cache_idx_cond= idx_remainder_cond;
+ idx_remainder_cond= NULL;
+ }
+ }
+
/*
Disable eq_ref's "lookup cache" if we've pushed down an index
condition.
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 5822aaa8250..d086257a446 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -2254,6 +2254,7 @@ public:
void TRP_RANGE::trace_basic_info(PARAM *param,
Json_writer_object *trace_object) const
{
+ DBUG_ASSERT(trace_object->trace_started());
DBUG_ASSERT(param->using_real_indexes);
const uint keynr_in_table= param->real_keynr[key_idx];
@@ -2318,6 +2319,7 @@ void TRP_ROR_UNION::trace_basic_info(PARAM *param,
Json_writer_object *trace_object) const
{
THD *thd= param->thd;
+ DBUG_ASSERT(trace_object->trace_started());
trace_object->add("type", "index_roworder_union");
Json_writer_array smth_trace(thd, "union_of");
for (TABLE_READ_PLAN **current= first_ror; current != last_ror; current++)
@@ -2353,6 +2355,7 @@ void TRP_INDEX_INTERSECT::trace_basic_info(PARAM *param,
Json_writer_object *trace_object) const
{
THD *thd= param->thd;
+ DBUG_ASSERT(trace_object->trace_started());
trace_object->add("type", "index_sort_intersect");
Json_writer_array smth_trace(thd, "index_sort_intersect_of");
for (TRP_RANGE **current= range_scans; current != range_scans_end;
@@ -2386,6 +2389,7 @@ void TRP_INDEX_MERGE::trace_basic_info(PARAM *param,
Json_writer_object *trace_object) const
{
THD *thd= param->thd;
+ DBUG_ASSERT(trace_object->trace_started());
trace_object->add("type", "index_merge");
Json_writer_array smth_trace(thd, "index_merge_of");
for (TRP_RANGE **current= range_scans; current != range_scans_end; current++)
@@ -2454,6 +2458,8 @@ void TRP_GROUP_MIN_MAX::trace_basic_info(PARAM *param,
Json_writer_object *trace_object) const
{
THD *thd= param->thd;
+ DBUG_ASSERT(trace_object->trace_started());
+
trace_object->add("type", "index_group").add("index", index_info->name);
if (min_max_arg_part)
@@ -2837,7 +2843,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
{
{
Json_writer_array trace_range_summary(thd,
- "setup_range_conditions");
+ "setup_range_conditions");
if (cond)
tree= cond->get_mm_tree(&param, &cond);
if (notnull_cond_tree)
@@ -2909,7 +2915,8 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
remove_nonrange_trees(&param, tree);
/* Get best 'range' plan and prepare data for making other plans */
- if ((range_trp= get_key_scans_params(&param, tree, FALSE, TRUE,
+ if ((range_trp= get_key_scans_params(&param, tree,
+ only_single_index_range_scan, TRUE,
best_read_time)))
{
best_trp= range_trp;
@@ -4895,7 +4902,8 @@ double get_sweep_read_cost(const PARAM *param, ha_rows records)
{
double result;
DBUG_ENTER("get_sweep_read_cost");
- if (param->table->file->primary_key_is_clustered())
+ if (param->table->file->primary_key_is_clustered() ||
+ param->table->file->stats.block_size == 0 /* HEAP */)
{
/*
We are using the primary key to find the rows.
@@ -5600,6 +5608,8 @@ ha_rows get_table_cardinality_for_index_intersect(TABLE *table)
static
void print_keyparts(THD *thd, KEY *key, uint key_parts)
{
+ DBUG_ASSERT(thd->trace_started());
+
KEY_PART_INFO *part= key->key_part;
Json_writer_array keyparts= Json_writer_array(thd, "keyparts");
for(uint i= 0; i < key_parts; i++, part++)
@@ -6389,6 +6399,8 @@ void TRP_ROR_INTERSECT::trace_basic_info(PARAM *param,
Json_writer_object *trace_object) const
{
THD *thd= param->thd;
+ DBUG_ASSERT(trace_object->trace_started());
+
trace_object->add("type", "index_roworder_intersect");
trace_object->add("rows", records);
trace_object->add("cost", read_cost);
@@ -7428,10 +7440,12 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
{
trace_idx.add("chosen", false);
if (found_records == HA_POS_ERROR)
+ {
if (key->type == SEL_ARG::Type::MAYBE_KEY)
trace_idx.add("cause", "depends on unread values");
else
trace_idx.add("cause", "unknown");
+ }
else
trace_idx.add("cause", "cost");
}
@@ -15818,6 +15832,7 @@ static void trace_ranges(Json_writer_array *range_trace,
sel_arg_range_seq_next, 0, 0};
KEY *keyinfo= param->table->key_info + param->real_keynr[idx];
uint n_key_parts= param->table->actual_n_key_parts(keyinfo);
+ DBUG_ASSERT(range_trace->trace_started());
seq.keyno= idx;
seq.real_keyno= param->real_keynr[idx];
seq.param= param;
diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc
index 6c491300d17..e855f56f832 100644
--- a/sql/opt_subselect.cc
+++ b/sql/opt_subselect.cc
@@ -3050,12 +3050,13 @@ bool Sj_materialization_picker::check_qep(JOIN *join,
}
else
{
- Json_writer_object trace(join->thd);
- trace.add("strategy", "SJ-Materialization");
/* This is SJ-Materialization with lookups */
Cost_estimate prefix_cost;
signed int first_tab= (int)idx - mat_info->tables;
double prefix_rec_count;
+ Json_writer_object trace(join->thd);
+ trace.add("strategy", "SJ-Materialization");
+
if (first_tab < (int)join->const_tables)
{
prefix_cost.reset();
@@ -3084,7 +3085,7 @@ bool Sj_materialization_picker::check_qep(JOIN *join,
*record_count= prefix_rec_count;
*handled_fanout= new_join_tab->emb_sj_nest->sj_inner_tables;
*strategy= SJ_OPT_MATERIALIZE;
- if (unlikely(join->thd->trace_started()))
+ if (unlikely(trace.trace_started()))
{
trace.add("records", *record_count);
trace.add("read_time", *read_time);
@@ -3166,7 +3167,7 @@ bool Sj_materialization_picker::check_qep(JOIN *join,
*/
*record_count= prefix_rec_count;
*handled_fanout= mat_nest->sj_inner_tables;
- if (unlikely(join->thd->trace_started()))
+ if (unlikely(trace.trace_started()))
{
trace.add("records", *record_count);
trace.add("read_time", *read_time);
@@ -3266,7 +3267,7 @@ bool LooseScan_picker::check_qep(JOIN *join,
*/
*strategy= SJ_OPT_LOOSE_SCAN;
*handled_fanout= first->table->emb_sj_nest->sj_inner_tables;
- if (unlikely(join->thd->trace_started()))
+ if (unlikely(trace.trace_started()))
{
trace.add("records", *record_count);
trace.add("read_time", *read_time);
@@ -3384,7 +3385,7 @@ bool Firstmatch_picker::check_qep(JOIN *join,
*handled_fanout= firstmatch_need_tables;
/* *record_count and *read_time were set by the above call */
*strategy= SJ_OPT_FIRST_MATCH;
- if (unlikely(join->thd->trace_started()))
+ if (unlikely(trace.trace_started()))
{
trace.add("records", *record_count);
trace.add("read_time", *read_time);
@@ -3469,6 +3470,7 @@ bool Duplicate_weedout_picker::check_qep(JOIN *join,
uint temptable_rec_size;
Json_writer_object trace(join->thd);
trace.add("strategy", "DuplicateWeedout");
+
if (first_tab == join->const_tables)
{
prefix_rec_count= 1.0;
@@ -3529,7 +3531,7 @@ bool Duplicate_weedout_picker::check_qep(JOIN *join,
*record_count= prefix_rec_count * sj_outer_fanout;
*handled_fanout= dups_removed_fanout;
*strategy= SJ_OPT_DUPS_WEEDOUT;
- if (unlikely(join->thd->trace_started()))
+ if (unlikely(trace.trace_started()))
{
trace.add("records", *record_count);
trace.add("read_time", *read_time);
@@ -3727,18 +3729,20 @@ static void recalculate_prefix_record_count(JOIN *join, uint start, uint end)
void fix_semijoin_strategies_for_picked_join_order(JOIN *join)
{
- uint table_count=join->table_count;
- uint tablenr;
- table_map remaining_tables= 0;
- table_map handled_tabs= 0;
join->sjm_lookup_tables= 0;
join->sjm_scan_tables= 0;
- THD *thd= join->thd;
if (!join->select_lex->sj_nests.elements)
return;
+
+ THD *thd= join->thd;
+ uint table_count=join->table_count;
+ uint tablenr;
+ table_map remaining_tables= 0;
+ table_map handled_tabs= 0;
Json_writer_object trace_wrapper(thd);
Json_writer_array trace_semijoin_strategies(thd,
- "fix_semijoin_strategies_for_picked_join_order");
+ "fix_semijoin_strategies_for_picked_join_order");
+
for (tablenr= table_count - 1 ; tablenr != join->const_tables - 1; tablenr--)
{
POSITION *pos= join->best_positions + tablenr;
diff --git a/sql/opt_trace.cc b/sql/opt_trace.cc
index 057c6f3cc0a..a8676eec411 100644
--- a/sql/opt_trace.cc
+++ b/sql/opt_trace.cc
@@ -108,8 +108,8 @@ void opt_trace_print_expanded_query(THD *thd, SELECT_LEX *select_lex,
Json_writer_object *writer)
{
- if (!thd->trace_started())
- return;
+ DBUG_ASSERT(thd->trace_started());
+
StringBuffer<1024> str(system_charset_info);
ulonglong save_option_bits= thd->variables.option_bits;
thd->variables.option_bits &= ~OPTION_QUOTE_SHOW_CREATE;
@@ -198,12 +198,11 @@ void opt_trace_disable_if_no_stored_proc_func_access(THD *thd, sp_head *sp)
{
if (likely(!(thd->variables.optimizer_trace &
Opt_trace_context::FLAG_ENABLED)) ||
- thd->system_thread)
+ thd->system_thread ||
+ !thd->trace_started())
return;
Opt_trace_context *const trace= &thd->opt_trace;
- if (!thd->trace_started())
- return;
bool full_access;
Security_context *const backup_thd_sctx= thd->security_context();
thd->set_security_context(&thd->main_security_ctx);
@@ -232,13 +231,12 @@ void opt_trace_disable_if_no_stored_proc_func_access(THD *thd, sp_head *sp)
void opt_trace_disable_if_no_tables_access(THD *thd, TABLE_LIST *tbl)
{
if (likely(!(thd->variables.optimizer_trace &
- Opt_trace_context::FLAG_ENABLED)) || thd->system_thread)
- return;
- Opt_trace_context *const trace= &thd->opt_trace;
-
- if (!thd->trace_started())
+ Opt_trace_context::FLAG_ENABLED)) ||
+ thd->system_thread ||
+ !thd->trace_started())
return;
+ Opt_trace_context *const trace= &thd->opt_trace;
Security_context *const backup_thd_sctx= thd->security_context();
thd->set_security_context(&thd->main_security_ctx);
const TABLE_LIST *const first_not_own_table= thd->lex->first_not_own_table();
@@ -293,12 +291,11 @@ void opt_trace_disable_if_no_view_access(THD *thd, TABLE_LIST *view,
if (likely(!(thd->variables.optimizer_trace &
Opt_trace_context::FLAG_ENABLED)) ||
- thd->system_thread)
- return;
- Opt_trace_context *const trace= &thd->opt_trace;
- if (!thd->trace_started())
+ thd->system_thread ||
+ !thd->trace_started())
return;
+ Opt_trace_context *const trace= &thd->opt_trace;
Security_context *const backup_table_sctx= view->security_ctx;
Security_context *const backup_thd_sctx= thd->security_context();
const GRANT_INFO backup_grant_info= view->grant;
@@ -334,64 +331,33 @@ void opt_trace_disable_if_no_view_access(THD *thd, TABLE_LIST *view,
The trace of one statement.
*/
-class Opt_trace_stmt {
- public:
- /**
- Constructor, starts a trace for information_schema and dbug.
- @param ctx_arg context
- */
- Opt_trace_stmt(Opt_trace_context *ctx_arg)
- {
- ctx= ctx_arg;
- current_json= new Json_writer();
- missing_priv= false;
- I_S_disabled= 0;
- }
- ~Opt_trace_stmt()
- {
- delete current_json;
- }
- void set_query(const char *query_ptr, size_t length, const CHARSET_INFO *charset);
- void open_struct(const char *key, char opening_bracket);
- void close_struct(const char *saved_key, char closing_bracket);
- void fill_info(Opt_trace_info* info);
- void add(const char *key, char *opening_bracket, size_t val_length);
- Json_writer* get_current_json() {return current_json;}
- void missing_privilege();
- void disable_tracing_for_children();
- void enable_tracing_for_children();
- bool is_enabled();
-
- void set_allowed_mem_size(size_t mem_size);
- size_t get_length() { return current_json->output.length(); }
- size_t get_truncated_bytes() { return current_json->get_truncated_bytes(); }
- bool get_missing_priv() { return missing_priv; }
-
-private:
- Opt_trace_context *ctx;
- String query; // store the query sent by the user
- Json_writer *current_json; // stores the trace
- bool missing_priv; ///< whether user lacks privilege to see this trace
- /*
- 0 <=> this trace should be in information_schema.
- !=0 tracing is disabled, this currently happens when we want to trace a
- sub-statement. For now traces are only collect for the top statement
- not for the sub-statments.
- */
- uint I_S_disabled;
-};
+Opt_trace_stmt::Opt_trace_stmt(Opt_trace_context *ctx_arg)
+{
+ ctx= ctx_arg;
+ current_json= new Json_writer();
+ missing_priv= false;
+ I_S_disabled= 0;
+}
-void Opt_trace_stmt::set_query(const char *query_ptr, size_t length,
- const CHARSET_INFO *charset)
+Opt_trace_stmt::~Opt_trace_stmt()
{
- query.append(query_ptr, length, charset);
+ delete current_json;
}
-Json_writer* Opt_trace_context::get_current_json()
+size_t Opt_trace_stmt::get_length()
{
- if (!is_started())
- return NULL;
- return current_trace->get_current_json();
+ return current_json->output.length();
+}
+
+size_t Opt_trace_stmt::get_truncated_bytes()
+{
+ return current_json->get_truncated_bytes();
+}
+
+void Opt_trace_stmt::set_query(const char *query_ptr, size_t length,
+ const CHARSET_INFO *charset)
+{
+ query.append(query_ptr, length, charset);
}
void Opt_trace_context::missing_privilege()
@@ -579,11 +545,6 @@ void Opt_trace_stmt::enable_tracing_for_children()
--I_S_disabled;
}
-bool Opt_trace_stmt::is_enabled()
-{
- return I_S_disabled == 0;
-}
-
void Opt_trace_stmt::set_allowed_mem_size(size_t mem_size)
{
current_json->set_size_limit(mem_size);
@@ -595,6 +556,7 @@ void Opt_trace_stmt::set_allowed_mem_size(size_t mem_size)
void Json_writer::add_table_name(const JOIN_TAB *tab)
{
+ DBUG_ASSERT(tab->join->thd->trace_started());
if (tab != NULL)
{
char table_name_buffer[SAFE_NAME_LEN];
@@ -633,6 +595,7 @@ void Json_writer::add_table_name(const TABLE *table)
void add_table_scan_values_to_trace(THD *thd, JOIN_TAB *tab)
{
+ DBUG_ASSERT(thd->trace_started());
Json_writer_object table_records(thd);
table_records.add_table_name(tab);
Json_writer_object table_rec(thd, "table_scan");
@@ -658,6 +621,8 @@ void add_table_scan_values_to_trace(THD *thd, JOIN_TAB *tab)
void trace_plan_prefix(JOIN *join, uint idx, table_map join_tables)
{
THD *const thd= join->thd;
+ DBUG_ASSERT(thd->trace_started());
+
Json_writer_array plan_prefix(thd, "plan_prefix");
for (uint i= 0; i < idx; i++)
{
@@ -682,6 +647,8 @@ void trace_plan_prefix(JOIN *join, uint idx, table_map join_tables)
void print_final_join_order(JOIN *join)
{
+ DBUG_ASSERT(join->thd->trace_started());
+
Json_writer_object join_order(join->thd);
Json_writer_array best_order(join->thd, "best_join_order");
JOIN_TAB *j;
@@ -695,6 +662,8 @@ void print_final_join_order(JOIN *join)
void print_best_access_for_table(THD *thd, POSITION *pos,
enum join_type type)
{
+ DBUG_ASSERT(thd->trace_started());
+
Json_writer_object trace_best_access(thd, "chosen_access_method");
trace_best_access.add("type", type == JT_ALL ? "scan" :
join_type_str[type]);
diff --git a/sql/opt_trace.h b/sql/opt_trace.h
index 46adbec2c3c..550f18c0797 100644
--- a/sql/opt_trace.h
+++ b/sql/opt_trace.h
@@ -21,8 +21,6 @@ class Item;
class THD;
struct TABLE_LIST;
-class Opt_trace_stmt;
-
/*
User-visible information about a trace.
*/
diff --git a/sql/opt_trace_context.h b/sql/opt_trace_context.h
index e5df16b1e3b..f578a0c67ec 100644
--- a/sql/opt_trace_context.h
+++ b/sql/opt_trace_context.h
@@ -3,7 +3,50 @@
#include "sql_array.h"
-class Opt_trace_stmt;
+class Opt_trace_context;
+struct Opt_trace_info;
+class Json_writer;
+
+class Opt_trace_stmt {
+ public:
+ /**
+ Constructor, starts a trace for information_schema and dbug.
+ @param ctx_arg context
+ */
+ Opt_trace_stmt(Opt_trace_context *ctx_arg);
+ ~Opt_trace_stmt();
+ void set_query(const char *query_ptr, size_t length, const CHARSET_INFO *charset);
+ void open_struct(const char *key, char opening_bracket);
+ void close_struct(const char *saved_key, char closing_bracket);
+ void fill_info(Opt_trace_info* info);
+ void add(const char *key, char *opening_bracket, size_t val_length);
+ Json_writer* get_current_json() {return current_json;}
+ void missing_privilege();
+ void disable_tracing_for_children();
+ void enable_tracing_for_children();
+ bool is_enabled()
+ {
+ return I_S_disabled == 0;
+ }
+ void set_allowed_mem_size(size_t mem_size);
+ size_t get_length();
+ size_t get_truncated_bytes();
+ bool get_missing_priv() { return missing_priv; }
+
+private:
+ Opt_trace_context *ctx;
+ String query; // store the query sent by the user
+ Json_writer *current_json; // stores the trace
+ bool missing_priv; ///< whether user lacks privilege to see this trace
+ /*
+ 0 <=> this trace should be in information_schema.
+ !=0 tracing is disabled, this currently happens when we want to trace a
+ sub-statement. For now traces are only collect for the top statement
+ not for the sub-statments.
+ */
+ uint I_S_disabled;
+};
+
class Opt_trace_context
{
@@ -48,7 +91,12 @@ public:
This returns the current trace, to which we are still writing and has not been finished
*/
- Json_writer* get_current_json();
+ Json_writer* get_current_json()
+ {
+ if (!is_started())
+ return NULL;
+ return current_trace->get_current_json();
+ }
bool empty()
{
@@ -57,7 +105,7 @@ public:
bool is_started()
{
- return current_trace && is_enabled();
+ return current_trace && current_trace->is_enabled();
}
bool disable_tracing_if_required();
diff --git a/sql/slave.cc b/sql/slave.cc
index aba10b8bd6e..436d5e0b5c5 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -3997,19 +3997,26 @@ apply_event_and_update_pos_apply(Log_event* ev, THD* thd, rpl_group_info *rgi,
exec_res= ev->apply_event(rgi);
#ifdef WITH_WSREP
- if (WSREP_ON)
- {
- mysql_mutex_lock(&thd->LOCK_thd_data);
- if (exec_res &&
- thd->wsrep_trx().state() != wsrep::transaction::s_executing)
- {
- WSREP_DEBUG("SQL apply failed, res %d conflict state: %s",
- exec_res, wsrep_thd_transaction_state_str(thd));
- rli->abort_slave= 1;
- rli->report(ERROR_LEVEL, ER_UNKNOWN_COM_ERROR, rgi->gtid_info(),
- "Node has dropped from cluster");
+ if (WSREP_ON) {
+ if (exec_res) {
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+ switch(thd->wsrep_trx().state()) {
+ case wsrep::transaction::s_must_replay:
+ /* this transaction will be replayed,
+ so not raising slave error here */
+ WSREP_DEBUG("SQL apply failed for MUST_REPLAY, res %d", exec_res);
+ exec_res = 0;
+ break;
+ default:
+ WSREP_DEBUG("SQL apply failed, res %d conflict state: %s",
+ exec_res, wsrep_thd_transaction_state_str(thd));
+ rli->abort_slave= 1;
+ rli->report(ERROR_LEVEL, ER_UNKNOWN_COM_ERROR, rgi->gtid_info(),
+ "Node has dropped from cluster");
+ break;
+ }
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
}
- mysql_mutex_unlock(&thd->LOCK_thd_data);
}
#endif
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 2143a9d4008..06b00e8799d 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -2231,11 +2231,6 @@ void THD::reset_globals()
net.thd= 0;
}
-bool THD::trace_started()
-{
- return opt_trace.is_started();
-}
-
/*
Cleanup after query.
@@ -5690,6 +5685,7 @@ void THD::leave_locked_tables_mode()
{
if (locked_tables_mode == LTM_LOCK_TABLES)
{
+ DBUG_ASSERT(current_backup_stage == BACKUP_FINISHED);
/*
When leaving LOCK TABLES mode we have to change the duration of most
of the metadata locks being held, except for HANDLER and GRL locks,
diff --git a/sql/sql_class.h b/sql/sql_class.h
index acd304e24ea..19bdde022b0 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -3390,7 +3390,10 @@ public:
void reset_for_reuse();
bool store_globals();
void reset_globals();
- bool trace_started();
+ bool trace_started()
+ {
+ return opt_trace.is_started();
+ }
#ifdef SIGNAL_WITH_VIO_CLOSE
inline void set_active_vio(Vio* vio)
{
@@ -5137,6 +5140,7 @@ public:
Item *sp_fix_func_item(Item **it_addr);
Item *sp_prepare_func_item(Item **it_addr, uint cols= 1);
bool sp_eval_expr(Field *result_field, Item **expr_item_ptr);
+
};
/** A short cut for thd->get_stmt_da()->set_ok_status(). */
diff --git a/sql/sql_join_cache.cc b/sql/sql_join_cache.cc
index b3d0d985582..5a243cd0a6d 100644
--- a/sql/sql_join_cache.cc
+++ b/sql/sql_join_cache.cc
@@ -2254,6 +2254,8 @@ enum_nested_loop_state JOIN_CACHE::join_matching_records(bool skip_last)
if ((rc= join_tab_execution_startup(join_tab)) < 0)
goto finish2;
+ join_tab->build_range_rowid_filter_if_needed();
+
/* Prepare to retrieve all records of the joined table */
if (unlikely((error= join_tab_scan->open())))
{
diff --git a/sql/sql_reload.cc b/sql/sql_reload.cc
index 8eedffdf41a..63b233faea6 100644
--- a/sql/sql_reload.cc
+++ b/sql/sql_reload.cc
@@ -525,6 +525,12 @@ bool flush_tables_with_read_lock(THD *thd, TABLE_LIST *all_tables)
goto error;
}
+ if (thd->current_backup_stage != BACKUP_FINISHED)
+ {
+ my_error(ER_BACKUP_LOCK_IS_ACTIVE, MYF(0));
+ return true;
+ }
+
if (thd->lex->type & REFRESH_READ_LOCK)
{
/*
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index e8fd275c55b..40f6d5d9fa8 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -360,8 +360,10 @@ bool dbug_user_var_equals_int(THD *thd, const char *name, int value)
static void trace_table_dependencies(THD *thd,
JOIN_TAB *join_tabs, uint table_count)
{
+ DBUG_ASSERT(thd->trace_started());
Json_writer_object trace_wrapper(thd);
Json_writer_array trace_dep(thd, "table_dependencies");
+
for (uint i= 0; i < table_count; i++)
{
TABLE_LIST *table_ref= join_tabs[i].tab_list;
@@ -1474,6 +1476,7 @@ JOIN::prepare(TABLE_LIST *tables_init, COND *conds_init, uint og_num,
}
}
+ if (thd->trace_started())
{
Json_writer_object trace_wrapper(thd);
opt_trace_print_expanded_query(thd, select_lex, &trace_wrapper);
@@ -5350,6 +5353,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
{
Json_writer_object rows_estimation_wrapper(thd);
Json_writer_array rows_estimation(thd, "rows_estimation");
+
for (s=stat ; s < stat_end ; s++)
{
s->startup_cost= 0;
@@ -5494,10 +5498,16 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
if (select)
delete select;
else
- add_table_scan_values_to_trace(thd, s);
+ {
+ if (thd->trace_started())
+ add_table_scan_values_to_trace(thd, s);
+ }
}
else
- add_table_scan_values_to_trace(thd, s);
+ {
+ if (thd->trace_started())
+ add_table_scan_values_to_trace(thd, s);
+ }
}
}
@@ -7404,7 +7414,7 @@ best_access_path(JOIN *join,
Json_writer_object trace_access_idx(thd);
/*
- ft-keys require special treatment
+ full text keys require special treatment
*/
if (ft_key)
{
@@ -7416,7 +7426,7 @@ best_access_path(JOIN *join,
records= 1.0;
type= JT_FT;
trace_access_idx.add("access_type", join_type_str[type])
- .add("index", keyinfo->name);
+ .add("full-text index", keyinfo->name);
}
else
{
@@ -11870,18 +11880,21 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
i++;
}
- trace_attached_comp.end();
- Json_writer_array trace_attached_summary(thd,
- "attached_conditions_summary");
- for (tab= first_depth_first_tab(join); tab;
- tab= next_depth_first_tab(join, tab))
+ if (unlikely(thd->trace_started()))
{
- if (!tab->table)
- continue;
- Item *const cond = tab->select_cond;
- Json_writer_object trace_one_table(thd);
- trace_one_table.add_table_name(tab);
- trace_one_table.add("attached", cond);
+ trace_attached_comp.end();
+ Json_writer_array trace_attached_summary(thd,
+ "attached_conditions_summary");
+ for (tab= first_depth_first_tab(join); tab;
+ tab= next_depth_first_tab(join, tab))
+ {
+ if (!tab->table)
+ continue;
+ Item *const cond = tab->select_cond;
+ Json_writer_object trace_one_table(thd);
+ trace_one_table.add_table_name(tab);
+ trace_one_table.add("attached", cond);
+ }
}
}
DBUG_RETURN(0);
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 02f6278140c..2b1be46b807 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -465,7 +465,7 @@ static struct show_privileges_st sys_privileges[]=
{"Create view", "Tables", "To create new views"},
{"Create user", "Server Admin", "To create new users"},
{"Delete", "Tables", "To delete existing rows"},
- {"Delete versioning rows", "Tables", "To delete versioning table historical rows"},
+ {"Delete history", "Tables", "To delete versioning table historical rows"},
{"Drop", "Databases,Tables", "To drop databases, tables, and views"},
#ifdef HAVE_EVENT_SCHEDULER
{"Event","Server Admin","To create, alter, drop and execute events"},
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index a499f91397a..2d4ff71d0c1 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -6599,38 +6599,68 @@ static int compare_uint(const uint *s, const uint *t)
return (*s < *t) ? -1 : ((*s > *t) ? 1 : 0);
}
-enum class Compare_keys : uint32_t
-{
- Equal= 0,
- EqualButKeyPartLength,
- EqualButComment,
- NotEqual
-};
+static Compare_keys merge(Compare_keys current, Compare_keys add) {
+ if (current == Compare_keys::Equal)
+ return add;
+
+ if (add == Compare_keys::Equal)
+ return current;
+
+ if (current == add)
+ return current;
+
+ if (current == Compare_keys::EqualButComment) {
+ return Compare_keys::NotEqual;
+ }
+
+ if (current == Compare_keys::EqualButKeyPartLength) {
+ if (add == Compare_keys::EqualButComment)
+ return Compare_keys::NotEqual;
+ DBUG_ASSERT(add == Compare_keys::NotEqual);
+ return Compare_keys::NotEqual;
+ }
+
+ DBUG_ASSERT(current == Compare_keys::NotEqual);
+ return current;
+}
Compare_keys compare_keys_but_name(const KEY *table_key, const KEY *new_key,
Alter_info *alter_info, const TABLE *table,
const KEY *const new_pk,
const KEY *const old_pk)
{
- Compare_keys result= Compare_keys::Equal;
+ if (table_key->algorithm != new_key->algorithm)
+ return Compare_keys::NotEqual;
+
+ if ((table_key->flags & HA_KEYFLAG_MASK) !=
+ (new_key->flags & HA_KEYFLAG_MASK))
+ return Compare_keys::NotEqual;
- if ((table_key->algorithm != new_key->algorithm) ||
- ((table_key->flags & HA_KEYFLAG_MASK) !=
- (new_key->flags & HA_KEYFLAG_MASK)) ||
- (table_key->user_defined_key_parts != new_key->user_defined_key_parts))
+ if (table_key->user_defined_key_parts != new_key->user_defined_key_parts)
return Compare_keys::NotEqual;
if (table_key->block_size != new_key->block_size)
return Compare_keys::NotEqual;
+ /*
+ Rebuild the index if following condition get satisfied:
+
+ (i) Old table doesn't have primary key, new table has it and vice-versa
+ (ii) Primary key changed to another existing index
+ */
+ if ((new_key == new_pk) != (table_key == old_pk))
+ return Compare_keys::NotEqual;
+
if (engine_options_differ(table_key->option_struct, new_key->option_struct,
table->file->ht->index_options))
return Compare_keys::NotEqual;
- const KEY_PART_INFO *end=
- table_key->key_part + table_key->user_defined_key_parts;
- for (const KEY_PART_INFO *key_part= table_key->key_part,
- *new_part= new_key->key_part;
+ Compare_keys result= Compare_keys::Equal;
+
+ for (const KEY_PART_INFO *
+ key_part= table_key->key_part,
+ *new_part= new_key->key_part,
+ *end= table_key->key_part + table_key->user_defined_key_parts;
key_part < end; key_part++, new_part++)
{
/*
@@ -6638,61 +6668,23 @@ Compare_keys compare_keys_but_name(const KEY *table_key, const KEY *new_key,
object with adjusted length. So below we have to check field
indexes instead of simply comparing pointers to Field objects.
*/
- Create_field *new_field= alter_info->create_list.elem(new_part->fieldnr);
- if (!new_field->field ||
- new_field->field->field_index != key_part->fieldnr - 1)
- return Compare_keys::NotEqual;
-
- /*
- If there is a change in index length due to column expansion
- like varchar(X) changed to varchar(X + N) and has a compatible
- packed data representation, we mark it for fast/INPLACE change
- in index definition. InnoDB supports INPLACE for this cases
-
- Key definition has changed if we are using a different field or
- if the user key part length is different.
- */
- const Field *old_field= table->field[key_part->fieldnr - 1];
+ const Create_field &new_field=
+ *alter_info->create_list.elem(new_part->fieldnr);
- bool is_equal= key_part->field->is_equal(*new_field);
- /* TODO: below is an InnoDB specific code which should be moved to InnoDB */
- if (!is_equal)
+ if (!new_field.field ||
+ new_field.field->field_index != key_part->fieldnr - 1)
{
- if (!key_part->field->can_be_converted_by_engine(*new_field))
- return Compare_keys::NotEqual;
-
- if (!Charset(old_field->charset())
- .eq_collation_specific_names(new_field->charset))
- return Compare_keys::NotEqual;
+ return Compare_keys::NotEqual;
}
- if (key_part->length != new_part->length)
- {
- if (key_part->length != old_field->field_length ||
- key_part->length >= new_part->length || is_equal)
- {
- return Compare_keys::NotEqual;
- }
- result= Compare_keys::EqualButKeyPartLength;
- }
+ auto compare= table->file->compare_key_parts(
+ *table->field[key_part->fieldnr - 1], new_field, *key_part, *new_part);
+ result= merge(result, compare);
}
- /*
- Rebuild the index if following condition get satisfied:
-
- (i) Old table doesn't have primary key, new table has it and vice-versa
- (ii) Primary key changed to another existing index
-*/
- if ((new_key == new_pk) != (table_key == old_pk))
- return Compare_keys::NotEqual;
-
/* Check that key comment is not changed. */
if (cmp(table_key->comment, new_key->comment) != 0)
- {
- if (result != Compare_keys::Equal)
- return Compare_keys::NotEqual;
- result= Compare_keys::EqualButComment;
- }
+ result= merge(result, Compare_keys::EqualButComment);
return result;
}
diff --git a/sql/sql_test.cc b/sql/sql_test.cc
index ffd42599527..e353c2a6b46 100644
--- a/sql/sql_test.cc
+++ b/sql/sql_test.cc
@@ -656,9 +656,12 @@ Memory allocated by threads: %s\n",
void print_keyuse_array_for_trace(THD *thd, DYNAMIC_ARRAY *keyuse_array)
{
+ DBUG_ASSERT(thd->trace_started());
+
Json_writer_object wrapper(thd);
Json_writer_array trace_key_uses(thd, "ref_optimizer_key_uses");
- for(uint i=0; i < keyuse_array->elements; i++)
+
+ for (uint i=0; i < keyuse_array->elements; i++)
{
KEYUSE *keyuse= (KEYUSE*)dynamic_array_ptr(keyuse_array, i);
Json_writer_object keyuse_elem(thd);
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 3f0f7251d5c..e49a82dab80 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -7262,10 +7262,10 @@ alter:
lex->server_options.reset($3);
} OPTIONS_SYM '(' server_options_list ')' { }
/* ALTER USER foo is allowed for MySQL compatibility. */
- | ALTER opt_if_exists USER_SYM clear_privileges grant_list
+ | ALTER USER_SYM opt_if_exists clear_privileges grant_list
opt_require_clause opt_resource_options opt_account_locking opt_password_expiration
{
- Lex->create_info.set($2);
+ Lex->create_info.set($3);
Lex->sql_command= SQLCOM_ALTER_USER;
}
| ALTER SEQUENCE_SYM opt_if_exists
@@ -8300,9 +8300,13 @@ rename:
RENAME table_or_tables
{
Lex->sql_command= SQLCOM_RENAME_TABLE;
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
}
table_to_table_list
- {}
+ {
+ Lex->pop_select(); //main select
+ }
| RENAME USER_SYM clear_privileges rename_list
{
Lex->sql_command = SQLCOM_RENAME_USER;
@@ -8400,9 +8404,13 @@ preload:
LEX *lex=Lex;
lex->sql_command=SQLCOM_PRELOAD_KEYS;
lex->alter_info.reset();
+ if (lex->main_select_push())
+ MYSQL_YYABORT;
}
preload_list_or_parts
- {}
+ {
+ Lex->pop_select(); //main select
+ }
;
preload_list_or_parts:
@@ -12585,11 +12593,16 @@ drop:
}
table_list opt_lock_wait_timeout opt_restrict
{}
- | DROP INDEX_SYM opt_if_exists_table_element ident ON table_ident opt_lock_wait_timeout
+ | DROP INDEX_SYM
+ {
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
+ }
+ opt_if_exists_table_element ident ON table_ident opt_lock_wait_timeout
{
LEX *lex=Lex;
Alter_drop *ad= (new (thd->mem_root)
- Alter_drop(Alter_drop::KEY, $4.str, $3));
+ Alter_drop(Alter_drop::KEY, $5.str, $4));
if (unlikely(ad == NULL))
MYSQL_YYABORT;
lex->sql_command= SQLCOM_DROP_INDEX;
@@ -12597,10 +12610,11 @@ drop:
lex->alter_info.flags= ALTER_DROP_INDEX;
lex->alter_info.drop_list.push_back(ad, thd->mem_root);
if (unlikely(!lex->current_select->
- add_table_to_list(thd, $6, NULL, TL_OPTION_UPDATING,
+ add_table_to_list(thd, $7, NULL, TL_OPTION_UPDATING,
TL_READ_NO_INSERT,
MDL_SHARED_UPGRADABLE)))
MYSQL_YYABORT;
+ Lex->pop_select(); //main select
}
| DROP DATABASE opt_if_exists ident
{
@@ -14106,12 +14120,18 @@ backup_statements:
Lex->backup_stage= (backup_stages) (type-1);
break;
}
- | LOCK_SYM table_ident
+ | LOCK_SYM
{
- if (unlikely(!Select->add_table_to_list(thd, $2, NULL, 0,
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
+ }
+ table_ident
+ {
+ if (unlikely(!Select->add_table_to_list(thd, $3, NULL, 0,
TL_READ, MDL_SHARED_HIGH_PRIO)))
MYSQL_YYABORT;
Lex->sql_command= SQLCOM_BACKUP_LOCK;
+ Lex->pop_select(); //main select
}
| UNLOCK_SYM
{
diff --git a/sql/table.cc b/sql/table.cc
index 10c44013538..2b31cbef083 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -3904,7 +3904,7 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
*/
field= key_part->field=field->make_new_field(&outparam->mem_root,
outparam, 0);
- field->field_length= key_part->length;
+ const_cast<uint32_t&>(field->field_length)= key_part->length;
}
}
if (!share->use_ext_keys)
diff --git a/sql/wsrep_thd.cc b/sql/wsrep_thd.cc
index 5ae583212f3..b605ff0496d 100644
--- a/sql/wsrep_thd.cc
+++ b/sql/wsrep_thd.cc
@@ -54,8 +54,9 @@ static void wsrep_replication_process(THD *thd,
Wsrep_applier_service applier_service(thd);
/* thd->system_thread_info.rpl_sql_info isn't initialized. */
- thd->system_thread_info.rpl_sql_info=
- new rpl_sql_thread_info(thd->wsrep_rgi->rli->mi->rpl_filter);
+ if (!thd->slave_thread)
+ thd->system_thread_info.rpl_sql_info=
+ new rpl_sql_thread_info(thd->wsrep_rgi->rli->mi->rpl_filter);
WSREP_INFO("Starting applier thread %llu", thd->thread_id);
enum wsrep::provider::status
@@ -67,7 +68,8 @@ static void wsrep_replication_process(THD *thd,
mysql_cond_broadcast(&COND_wsrep_slave_threads);
mysql_mutex_unlock(&LOCK_wsrep_slave_threads);
- delete thd->system_thread_info.rpl_sql_info;
+ if (!thd->slave_thread)
+ delete thd->system_thread_info.rpl_sql_info;
delete thd->wsrep_rgi->rli->mi;
delete thd->wsrep_rgi->rli;