summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/field.cc24
-rw-r--r--sql/handler.cc4
-rw-r--r--sql/handler.h8
-rw-r--r--sql/my_json_writer.cc20
-rw-r--r--sql/my_json_writer.h20
-rw-r--r--sql/opt_range.cc105
-rw-r--r--sql/opt_subselect.cc77
-rw-r--r--sql/opt_table_elimination.cc23
-rw-r--r--sql/opt_trace.cc160
-rw-r--r--sql/opt_trace.h15
-rw-r--r--sql/opt_trace_context.h15
-rw-r--r--sql/rowid_filter.cc2
-rw-r--r--sql/sql_class.cc5
-rw-r--r--sql/sql_derived.cc43
-rw-r--r--sql/sql_parse.cc12
-rw-r--r--sql/sql_priv.h5
-rw-r--r--sql/sql_select.cc44
-rw-r--r--sql/sql_table.cc3
-rw-r--r--sql/sql_test.cc10
-rw-r--r--sql/sys_vars.cc6
-rw-r--r--sql/wsrep_client_service.cc8
-rw-r--r--sql/wsrep_server_service.cc20
-rw-r--r--sql/wsrep_sst.cc2
-rw-r--r--sql/wsrep_trans_observer.h8
-rw-r--r--sql/wsrep_var.cc3
25 files changed, 339 insertions, 303 deletions
diff --git a/sql/field.cc b/sql/field.cc
index 6cef622bf1e..bb4a0f06fc4 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -7084,9 +7084,6 @@ uint Field_str::is_equal(Create_field *new_field)
return new_field->charset == field_charset
? IS_EQUAL_YES : IS_EQUAL_PACK_LENGTH;
- if (table->file->ha_table_flags() & HA_EXTENDED_TYPES_CONVERSION)
- return IS_EQUAL_PACK_LENGTH_EXT;
-
return IS_EQUAL_NO;
}
@@ -7947,11 +7944,6 @@ uint Field_varstring::is_equal(Create_field *new_field)
(table->file->ha_table_flags() & HA_EXTENDED_TYPES_CONVERSION))
return IS_EQUAL_PACK_LENGTH; // VARCHAR, longer length
}
- else if (new_type_handler == &type_handler_string) // converting to CHAR
- {
- if (table->file->ha_table_flags() & HA_EXTENDED_TYPES_CONVERSION)
- return IS_EQUAL_PACK_LENGTH_EXT;
- }
return IS_EQUAL_NO;
}
@@ -9568,22 +9560,6 @@ uint Field_num::is_equal(Create_field *new_field)
format for integers, we can only return IS_EQUAL_YES for the TINYINT
conversion. */
- if (table->file->ha_table_flags() & HA_EXTENDED_TYPES_CONVERSION)
- {
- /* For now, prohibit instant conversion between BIT and integers.
- Note: pack_length(), which is compared below, is measured in
- bytes, and for BIT the last byte may be partially occupied. We
- must not allow instant conversion to BIT such that the last byte
- is partially occupied.
- We could allow converting TINYINT UNSIGNED to BIT(8) or wider. */
- if (th != new_th &&
- (th == &type_handler_bit || new_th == &type_handler_bit))
- return IS_EQUAL_NO;
- if (th->result_type() == new_th->result_type() &&
- new_field->pack_length >= pack_length())
- return IS_EQUAL_PACK_LENGTH_EXT;
- }
-
return IS_EQUAL_NO;
}
diff --git a/sql/handler.cc b/sql/handler.cc
index 9f656806b5d..bc183837903 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -6256,7 +6256,9 @@ static int write_locked_table_maps(THD *thd)
MYSQL_LOCK *locks[2];
locks[0]= thd->extra_lock;
locks[1]= thd->lock;
- my_bool with_annotate= thd->variables.binlog_annotate_row_events &&
+ my_bool with_annotate= IF_WSREP(!wsrep_fragments_certified_for_stmt(thd),
+ true) &&
+ thd->variables.binlog_annotate_row_events &&
thd->query() && thd->query_length();
for (uint i= 0 ; i < sizeof(locks)/sizeof(*locks) ; ++i )
diff --git a/sql/handler.h b/sql/handler.h
index afe45f2b8d6..09c9c9a6849 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -751,14 +751,6 @@ typedef ulonglong alter_table_operations;
*/
#define ALTER_COLUMN_INDEX_LENGTH (1ULL << 60)
-/**
- Change the column length or type such that no rebuild is needed.
- Only set if ALTER_COLUMN_EQUAL_PACK_LENGTH does not apply, and
- if HA_EXTENDED_TYPES_CONVERSION holds.
- @see IS_EQUAL_PACK_LENGTH_EXT
-*/
-#define ALTER_COLUMN_EQUAL_PACK_LENGTH_EXT (1ULL << 61)
-
/*
Flags set in partition_flags when altering partitions
*/
diff --git a/sql/my_json_writer.cc b/sql/my_json_writer.cc
index 7ae0c58bd7d..3755f8d4bcb 100644
--- a/sql/my_json_writer.cc
+++ b/sql/my_json_writer.cc
@@ -219,15 +219,15 @@ void Json_writer::add_str(const String &str)
add_str(str.ptr(), str.length());
}
-Json_writer_object::Json_writer_object(THD *thd) :
+Json_writer_object::Json_writer_object(THD *thd) :
Json_writer_struct(thd)
{
if (my_writer)
my_writer->start_object();
}
-Json_writer_object::Json_writer_object(THD* thd, const char *str)
- : Json_writer_struct(thd)
+Json_writer_object::Json_writer_object(THD* thd, const char *str) :
+ Json_writer_struct(thd)
{
if (my_writer)
my_writer->add_member(str).start_object();
@@ -247,8 +247,8 @@ Json_writer_array::Json_writer_array(THD *thd) :
my_writer->start_array();
}
-Json_writer_array::Json_writer_array(THD *thd, const char *str)
- :Json_writer_struct(thd)
+Json_writer_array::Json_writer_array(THD *thd, const char *str) :
+ Json_writer_struct(thd)
{
if (my_writer)
my_writer->add_member(str).start_array();
@@ -263,6 +263,16 @@ Json_writer_array::~Json_writer_array()
}
}
+Json_writer_temp_disable::Json_writer_temp_disable(THD *thd_arg)
+{
+ thd= thd_arg;
+ thd->opt_trace.disable_tracing_if_required();
+}
+Json_writer_temp_disable::~Json_writer_temp_disable()
+{
+ thd->opt_trace.enable_tracing_if_required();
+}
+
bool Single_line_formatting_helper::on_add_member(const char *name)
{
DBUG_ASSERT(state== INACTIVE || state == DISABLED);
diff --git a/sql/my_json_writer.h b/sql/my_json_writer.h
index 3234c748f5d..dbd7cd133e9 100644
--- a/sql/my_json_writer.h
+++ b/sql/my_json_writer.h
@@ -215,12 +215,11 @@ public:
*/
void set_size_limit(size_t mem_size) { output.set_size_limit(mem_size); }
- // psergey: return how many bytes would be required to store everything
size_t get_truncated_bytes() { return output.get_truncated_bytes(); }
Json_writer() :
indent_level(0), document_start(true), element_started(false),
- first_child(true), allowed_mem_size(0)
+ first_child(true)
{
fmt_helper.init(this);
}
@@ -235,12 +234,6 @@ private:
bool element_started;
bool first_child;
- /*
- True when we are using the optimizer trace
- FALSE otherwise
- */
- size_t allowed_mem_size;
-
Single_line_formatting_helper fmt_helper;
void append_indent();
@@ -566,6 +559,17 @@ public:
~Json_writer_array();
};
+/*
+ RAII-based class to disable writing into the JSON document
+*/
+
+class Json_writer_temp_disable
+{
+public:
+ Json_writer_temp_disable(THD *thd_arg);
+ ~Json_writer_temp_disable();
+ THD *thd;
+};
/*
RAII-based helper class to detect incorrect use of Json_writer.
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 468d16c095e..fcf0d2228a5 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -675,7 +675,7 @@ int SEL_IMERGE::or_sel_tree_with_checks(RANGE_OPT_PARAM *param,
{
bool was_ored= FALSE;
*is_last_check_pass= is_first_check_pass;
- SEL_TREE** or_tree = trees;
+ SEL_TREE** or_tree= trees;
for (uint i= 0; i < n_trees; i++, or_tree++)
{
SEL_TREE *result= 0;
@@ -872,7 +872,7 @@ SEL_IMERGE::SEL_IMERGE(SEL_IMERGE *arg, uint cnt,
trees_next= trees + (cnt ? cnt : arg->trees_next-arg->trees);
trees_end= trees + elements;
- for (SEL_TREE **tree = trees, **arg_tree= arg->trees; tree < trees_next;
+ for (SEL_TREE **tree= trees, **arg_tree= arg->trees; tree < trees_next;
tree++, arg_tree++)
{
if (!(*tree= new SEL_TREE(*arg_tree, TRUE, param)))
@@ -2211,7 +2211,7 @@ public:
@param trace_object The optimizer trace object the info is appended to
*/
virtual void trace_basic_info(const PARAM *param,
- Json_writer_object *trace_object) const = 0;
+ Json_writer_object *trace_object) const= 0;
};
@@ -2261,10 +2261,10 @@ void TRP_RANGE::trace_basic_info(const PARAM *param,
Json_writer_object *trace_object) const
{
DBUG_ASSERT(param->using_real_indexes);
- const uint keynr_in_table = param->real_keynr[key_idx];
+ const uint keynr_in_table= param->real_keynr[key_idx];
- const KEY &cur_key = param->table->key_info[keynr_in_table];
- const KEY_PART_INFO *key_part = cur_key.key_part;
+ const KEY &cur_key= param->table->key_info[keynr_in_table];
+ const KEY_PART_INFO *key_part= cur_key.key_part;
trace_object->add("type", "range_scan")
.add("index", cur_key.name)
@@ -2329,7 +2329,7 @@ void TRP_ROR_UNION::trace_basic_info(const PARAM *param,
THD *thd= param->thd;
trace_object->add("type", "index_roworder_union");
Json_writer_array smth_trace(thd, "union_of");
- for (TABLE_READ_PLAN **current = first_ror; current != last_ror; current++)
+ for (TABLE_READ_PLAN **current= first_ror; current != last_ror; current++)
{
Json_writer_object trp_info(thd);
(*current)->trace_basic_info(param, &trp_info);
@@ -2364,7 +2364,7 @@ void TRP_INDEX_INTERSECT::trace_basic_info(const PARAM *param,
THD *thd= param->thd;
trace_object->add("type", "index_sort_intersect");
Json_writer_array smth_trace(thd, "index_sort_intersect_of");
- for (TRP_RANGE **current = range_scans; current != range_scans_end;
+ for (TRP_RANGE **current= range_scans; current != range_scans_end;
current++)
{
Json_writer_object trp_info(thd);
@@ -2466,9 +2466,9 @@ void TRP_GROUP_MIN_MAX::trace_basic_info(const PARAM *param,
trace_object->add("type", "index_group").add("index", index_info->name);
if (min_max_arg_part)
- trace_object->add("group_attribute", min_max_arg_part->field->field_name);
+ trace_object->add("min_max_arg", min_max_arg_part->field->field_name);
else
- trace_object->add_null("group_attribute");
+ trace_object->add_null("min_max_arg");
trace_object->add("min_aggregate", have_min)
.add("max_aggregate", have_max)
@@ -2476,12 +2476,12 @@ void TRP_GROUP_MIN_MAX::trace_basic_info(const PARAM *param,
.add("rows", records)
.add("cost", read_cost);
- const KEY_PART_INFO *key_part = index_info->key_part;
+ const KEY_PART_INFO *key_part= index_info->key_part;
{
Json_writer_array trace_keyparts(thd, "key_parts_used_for_access");
- for (uint partno = 0; partno < used_key_parts; partno++)
+ for (uint partno= 0; partno < used_key_parts; partno++)
{
- const KEY_PART_INFO *cur_key_part = key_part + partno;
+ const KEY_PART_INFO *cur_key_part= key_part + partno;
trace_keyparts.add(cur_key_part->field->field_name);
}
}
@@ -3438,7 +3438,7 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
{
rows= 0;
table->reginfo.impossible_range= 1;
- selectivity_for_column.add("selectivity_from_histograms", rows);
+ selectivity_for_column.add("selectivity_from_histogram", rows);
selectivity_for_column.add("cause", "impossible range");
goto free_alloc;
}
@@ -3448,7 +3448,7 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
if (rows != DBL_MAX)
{
key->field->cond_selectivity= rows/table_records;
- selectivity_for_column.add("selectivity_from_histograms",
+ selectivity_for_column.add("selectivity_from_histogram",
key->field->cond_selectivity);
}
}
@@ -3472,6 +3472,7 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
free_root(&alloc, MYF(0));
}
+ selectivity_for_columns.end();
if (quick && (quick->get_type() == QUICK_SELECT_I::QS_TYPE_ROR_UNION ||
quick->get_type() == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE))
@@ -3546,7 +3547,7 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
table->cond_selectivity_sampling_explain= &dt->list;
}
}
-
+ trace_wrapper.add("cond_selectivity", table->cond_selectivity);
DBUG_RETURN(FALSE);
}
@@ -5073,7 +5074,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
trace_idx.add("chosen", false).add("cause", "cost");
continue;
}
- const uint keynr_in_table = param->real_keynr[(*cur_child)->key_idx];
+ const uint keynr_in_table= param->real_keynr[(*cur_child)->key_idx];
imerge_cost += (*cur_child)->read_cost;
all_scans_ror_able &= ((*ptree)->n_ror_scans > 0);
all_scans_rors &= (*cur_child)->is_ror;
@@ -5134,7 +5135,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
*/
double rid_comp_cost= static_cast<double>(non_cpk_scan_records) /
TIME_FOR_COMPARE_ROWID;
- imerge_cost += rid_comp_cost;
+ imerge_cost+= rid_comp_cost;
trace_best_disjunct.add("cost_of_mapping_rowid_in_non_clustered_pk_scan",
rid_comp_cost);
}
@@ -5142,7 +5143,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
/* Calculate cost(rowid_to_row_scan) */
{
double sweep_cost= get_sweep_read_cost(param, non_cpk_scan_records);
- imerge_cost += sweep_cost;
+ imerge_cost+= sweep_cost;
trace_best_disjunct.add("cost_sort_rowid_and_read_disk", sweep_cost);
}
DBUG_PRINT("info",("index_merge cost with rowid-to-row scan: %g",
@@ -5169,7 +5170,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
}
{
- const double dup_removal_cost = Unique::get_use_cost(
+ const double dup_removal_cost= Unique::get_use_cost(
param->imerge_cost_buff, (uint)non_cpk_scan_records,
param->table->file->ref_length,
(size_t)param->thd->variables.sortbuff_size,
@@ -6371,11 +6372,11 @@ void TRP_ROR_INTERSECT::trace_basic_info(const PARAM *param,
trace_object->add("clustered_pk_scan", cpk_scan != NULL);
Json_writer_array smth_trace(thd, "intersect_of");
- for (ROR_SCAN_INFO **cur_scan = first_scan; cur_scan != last_scan;
+ for (ROR_SCAN_INFO **cur_scan= first_scan; cur_scan != last_scan;
cur_scan++)
{
- const KEY &cur_key = param->table->key_info[(*cur_scan)->keynr];
- const KEY_PART_INFO *key_part = cur_key.key_part;
+ const KEY &cur_key= param->table->key_info[(*cur_scan)->keynr];
+ const KEY_PART_INFO *key_part= cur_key.key_part;
Json_writer_object trace_isect_idx(thd);
trace_isect_idx.add("type", "range_scan");
@@ -6383,15 +6384,15 @@ void TRP_ROR_INTERSECT::trace_basic_info(const PARAM *param,
trace_isect_idx.add("rows", (*cur_scan)->records);
Json_writer_array trace_range(thd, "ranges");
- for (const SEL_ARG *current = (*cur_scan)->sel_arg->first(); current;
- current = current->next)
+ for (const SEL_ARG *current= (*cur_scan)->sel_arg->first(); current;
+ current= current->next)
{
String range_info;
range_info.set_charset(system_charset_info);
- for (const SEL_ARG *part = current; part;
- part = part->next_key_part ? part->next_key_part : nullptr)
+ for (const SEL_ARG *part= current; part;
+ part= part->next_key_part ? part->next_key_part : nullptr)
{
- const KEY_PART_INFO *cur_key_part = key_part + part->part;
+ const KEY_PART_INFO *cur_key_part= key_part + part->part;
append_range(&range_info, cur_key_part, part->min_value,
part->max_value, part->min_flag | part->max_flag);
}
@@ -6816,7 +6817,7 @@ static bool ror_intersect_add(ROR_INTERSECT_INFO *info,
*/
const double idx_cost= rows2double(info->index_records) /
TIME_FOR_COMPARE_ROWID;
- info->index_scan_costs += idx_cost;
+ info->index_scan_costs+= idx_cost;
trace_costs->add("index_scan_cost", idx_cost);
}
else
@@ -6840,7 +6841,7 @@ static bool ror_intersect_add(ROR_INTERSECT_INFO *info,
{
double sweep_cost= get_sweep_read_cost(info->param,
double2rows(info->out_rows));
- info->total_cost += sweep_cost;
+ info->total_cost+= sweep_cost;
trace_costs->add("disk_sweep_cost", sweep_cost);
DBUG_PRINT("info", ("info->total_cost= %g", info->total_cost));
}
@@ -7371,8 +7372,8 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
{
Json_writer_array trace_range(thd, "ranges");
- const KEY &cur_key = param->table->key_info[keynr];
- const KEY_PART_INFO *key_part = cur_key.key_part;
+ const KEY &cur_key= param->table->key_info[keynr];
+ const KEY_PART_INFO *key_part= cur_key.key_part;
String range_info;
range_info.set_charset(system_charset_info);
@@ -13384,7 +13385,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
key_part_nr= get_field_keypart(cur_index_info, min_max_arg_item->field);
if (key_part_nr <= cur_group_key_parts)
{
- cause = "aggregate column not suffix in idx";
+ cause= "aggregate column not suffix in idx";
goto next_index;
}
min_max_arg_part= cur_index_info->key_part + key_part_nr - 1;
@@ -13438,7 +13439,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
&cur_key_infix_len,
&first_non_infix_part))
{
- cause = "nonconst equality gap attribute";
+ cause= "nonconst equality gap attribute";
goto next_index;
}
}
@@ -13449,7 +13450,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
There is a gap but no range tree, thus no predicates at all for the
non-group keyparts.
*/
- cause = "no nongroup keypart predicate";
+ cause= "no nongroup keypart predicate";
goto next_index;
}
else if (first_non_group_part && join->conds)
@@ -13474,7 +13475,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
if (join->conds->walk(&Item::find_item_in_field_list_processor, 0,
key_part_range))
{
- cause = "keypart reference from where clause";
+ cause= "keypart reference from where clause";
goto next_index;
}
}
@@ -13492,7 +13493,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
{
if (bitmap_is_set(table->read_set, cur_part->field->field_index))
{
- cause = "keypart after infix in query";
+ cause= "keypart after infix in query";
goto next_index;
}
}
@@ -13511,7 +13512,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
index_range_tree, &cur_range) ||
(cur_range && cur_range->type != SEL_ARG::KEY_RANGE))
{
- cause = "minmax keypart in disjunctive query";
+ cause= "minmax keypart in disjunctive query";
goto next_index;
}
}
@@ -13538,7 +13539,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
{
Json_writer_array trace_range(thd, "ranges");
- const KEY_PART_INFO *key_part = cur_index_info->key_part;
+ const KEY_PART_INFO *key_part= cur_index_info->key_part;
String range_info;
range_info.set_charset(system_charset_info);
@@ -13578,7 +13579,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
if (cause)
{
trace_idx.add("usable", false).add("cause", cause);
- cause = NULL;
+ cause= NULL;
}
}
@@ -15776,9 +15777,9 @@ static void append_range_all_keyparts(Json_writer_array *range_trace,
DBUG_ASSERT(keypart && keypart != &null_element);
// Navigate to first interval in red-black tree
- const KEY_PART_INFO *cur_key_part = key_parts + keypart->part;
- const SEL_ARG *keypart_range = keypart->first();
- const size_t save_range_so_far_length = range_so_far->length();
+ const KEY_PART_INFO *cur_key_part= key_parts + keypart->part;
+ const SEL_ARG *keypart_range= keypart->first();
+ const size_t save_range_so_far_length= range_so_far->length();
while (keypart_range)
@@ -15827,9 +15828,10 @@ static void append_range_all_keyparts(Json_writer_array *range_trace,
static void print_key_value(String *out, const KEY_PART_INFO *key_part,
const uchar *key)
{
- Field *field = key_part->field;
+ Field *field= key_part->field;
- if (field->flags & BLOB_FLAG) {
+ if (field->flags & BLOB_FLAG)
+ {
// Byte 0 of a nullable key is the null-byte. If set, key is NULL.
if (field->real_maybe_null() && *key)
out->append(STRING_WITH_LEN("NULL"));
@@ -15840,7 +15842,7 @@ static void print_key_value(String *out, const KEY_PART_INFO *key_part,
return;
}
- uint store_length = key_part->store_length;
+ uint store_length= key_part->store_length;
if (field->real_maybe_null())
{
@@ -15849,7 +15851,8 @@ static void print_key_value(String *out, const KEY_PART_INFO *key_part,
Otherwise, print the key value starting immediately after the
null-byte
*/
- if (*key) {
+ if (*key)
+ {
out->append(STRING_WITH_LEN("NULL"));
return;
}
@@ -15862,9 +15865,11 @@ static void print_key_value(String *out, const KEY_PART_INFO *key_part,
optimizer trace expects. If the column is binary, the hex
representation is printed to the trace instead.
*/
- if (field->flags & BINARY_FLAG) {
+ if (field->flags & BINARY_FLAG)
+ {
out->append("0x");
- for (uint i = 0; i < store_length; i++) {
+ for (uint i = 0; i < store_length; i++)
+ {
out->append(_dig_vec_lower[*(key + i) >> 4]);
out->append(_dig_vec_lower[*(key + i) & 0x0F]);
}
@@ -15872,7 +15877,7 @@ static void print_key_value(String *out, const KEY_PART_INFO *key_part,
}
StringBuffer<128> tmp(system_charset_info);
- TABLE *table = field->table;
+ TABLE *table= field->table;
my_bitmap_map *old_sets[2];
dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set);
diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc
index 4f45d77a717..4b38d58eb6d 100644
--- a/sql/opt_subselect.cc
+++ b/sql/opt_subselect.cc
@@ -456,6 +456,7 @@ void best_access_path(JOIN *join, JOIN_TAB *s,
table_map remaining_tables, uint idx,
bool disable_jbuf, double record_count,
POSITION *pos, POSITION *loose_scan_pos);
+void trace_plan_prefix(JOIN *join, uint idx, table_map remaining_tables);
static Item *create_subq_in_equalities(THD *thd, SJ_MATERIALIZATION_INFO *sjm,
Item_in_subselect *subq_pred);
@@ -697,9 +698,10 @@ int check_and_do_in_subquery_rewrites(JOIN *join)
if (arena)
thd->restore_active_arena(arena, &backup);
in_subs->is_registered_semijoin= TRUE;
- OPT_TRACE_TRANSFORM(thd, oto0, oto1, select_lex->select_number,
+ OPT_TRACE_TRANSFORM(thd, trace_wrapper, trace_transform,
+ select_lex->select_number,
"IN (SELECT)", "semijoin");
- oto1.add("chosen", true);
+ trace_transform.add("chosen", true);
}
}
else
@@ -840,7 +842,7 @@ bool subquery_types_allow_materialization(THD* thd, Item_in_subselect *in_subs)
in_subs->types_allow_materialization= FALSE; // Assign default values
in_subs->sjm_scan_allowed= FALSE;
- OPT_TRACE_TRANSFORM(thd, oto0, oto1,
+ OPT_TRACE_TRANSFORM(thd, trace_wrapper, trace_transform,
in_subs->get_select_lex()->select_number,
"IN (SELECT)", "materialization");
@@ -856,8 +858,8 @@ bool subquery_types_allow_materialization(THD* thd, Item_in_subselect *in_subs)
if (!inner->type_handler()->subquery_type_allows_materialization(inner,
outer))
{
- oto1.add("possible", false);
- oto1.add("cause", "types mismatch");
+ trace_transform.add("possible", false);
+ trace_transform.add("cause", "types mismatch");
DBUG_RETURN(FALSE);
}
}
@@ -879,12 +881,12 @@ bool subquery_types_allow_materialization(THD* thd, Item_in_subselect *in_subs)
{
in_subs->types_allow_materialization= TRUE;
in_subs->sjm_scan_allowed= all_are_fields;
- oto1.add("sjm_scan_allowed", all_are_fields)
- .add("possible", true);
+ trace_transform.add("sjm_scan_allowed", all_are_fields)
+ .add("possible", true);
DBUG_PRINT("info",("subquery_types_allow_materialization: ok, allowed"));
DBUG_RETURN(TRUE);
}
- oto1.add("possible", false).add("cause", cause);
+ trace_transform.add("possible", false).add("cause", cause);
DBUG_RETURN(FALSE);
}
@@ -1236,29 +1238,30 @@ bool convert_join_subqueries_to_semijoins(JOIN *join)
/* Stop processing if we've reached a subquery that's attached to the ON clause */
if (in_subq->do_not_convert_to_sj)
{
- OPT_TRACE_TRANSFORM(thd, oto0, oto1,
+ OPT_TRACE_TRANSFORM(thd, trace_wrapper, trace_transform,
in_subq->get_select_lex()->select_number,
"IN (SELECT)", "semijoin");
- oto1.add("converted_to_semi_join", false)
- .add("cause", "subquery attached to the ON clause");
+ trace_transform.add("converted_to_semi_join", false)
+ .add("cause", "subquery attached to the ON clause");
break;
}
if (in_subq->is_flattenable_semijoin)
{
- OPT_TRACE_TRANSFORM(thd, oto0, oto1,
+ OPT_TRACE_TRANSFORM(thd, trace_wrapper, trace_transform,
in_subq->get_select_lex()->select_number,
"IN (SELECT)", "semijoin");
if (join->table_count +
in_subq->unit->first_select()->join->table_count >= MAX_TABLES)
{
- oto1.add("converted_to_semi_join", false);
- oto1.add("cause", "table in parent join now exceeds MAX_TABLES");
+ trace_transform.add("converted_to_semi_join", false);
+ trace_transform.add("cause",
+ "table in parent join now exceeds MAX_TABLES");
break;
}
if (convert_subq_to_sj(join, in_subq))
goto restore_arena_and_fail;
- oto1.add("converted_to_semi_join", true);
+ trace_transform.add("converted_to_semi_join", true);
}
else
{
@@ -2380,6 +2383,8 @@ bool optimize_semijoin_nests(JOIN *join, table_map all_table_map)
THD *thd= join->thd;
List_iterator<TABLE_LIST> sj_list_it(join->select_lex->sj_nests);
TABLE_LIST *sj_nest;
+ if (!join->select_lex->sj_nests.elements)
+ DBUG_RETURN(FALSE);
Json_writer_object wrapper(thd);
Json_writer_object trace_semijoin_nest(thd,
"execution_plan_for_potential_materialization");
@@ -2939,6 +2944,7 @@ bool Sj_materialization_picker::check_qep(JOIN *join,
{
bool sjm_scan;
SJ_MATERIALIZATION_INFO *mat_info;
+ THD *thd= join->thd;
if ((mat_info= at_sjmat_pos(join, remaining_tables,
new_join_tab, idx, &sjm_scan)))
{
@@ -3040,6 +3046,7 @@ bool Sj_materialization_picker::check_qep(JOIN *join,
POSITION curpos, dummy;
/* Need to re-run best-access-path as we prefix_rec_count has changed */
bool disable_jbuf= (join->thd->variables.join_cache_level == 0);
+ Json_writer_temp_disable trace_semijoin_mat_scan(thd);
for (i= first_tab + mat_info->tables; i <= idx; i++)
{
best_access_path(join, join->positions[i].table, rem_tables, i,
@@ -3590,6 +3597,12 @@ void fix_semijoin_strategies_for_picked_join_order(JOIN *join)
table_map handled_tabs= 0;
join->sjm_lookup_tables= 0;
join->sjm_scan_tables= 0;
+ THD *thd= join->thd;
+ if (!join->select_lex->sj_nests.elements)
+ return;
+ Json_writer_object trace_wrapper(thd);
+ Json_writer_array trace_semijoin_strategies(thd,
+ "fix_semijoin_strategies_for_picked_join_order");
for (tablenr= table_count - 1 ; tablenr != join->const_tables - 1; tablenr--)
{
POSITION *pos= join->best_positions + tablenr;
@@ -3614,8 +3627,18 @@ void fix_semijoin_strategies_for_picked_join_order(JOIN *join)
first= tablenr - sjm->tables + 1;
join->best_positions[first].n_sj_tables= sjm->tables;
join->best_positions[first].sj_strategy= SJ_OPT_MATERIALIZE;
+ Json_writer_object semijoin_strategy(thd);
+ semijoin_strategy.add("semi_join_strategy","sj_materialize");
+ Json_writer_array semijoin_plan(thd, "join_order");
for (uint i= first; i < first+ sjm->tables; i++)
+ {
+ if (unlikely(thd->trace_started()))
+ {
+ Json_writer_object trace_one_table(thd);
+ trace_one_table.add_table_name(join->best_positions[i].table);
+ }
join->sjm_lookup_tables |= join->best_positions[i].table->table->map;
+ }
}
else if (pos->sj_strategy == SJ_OPT_MATERIALIZE_SCAN)
{
@@ -3653,8 +3676,16 @@ void fix_semijoin_strategies_for_picked_join_order(JOIN *join)
POSITION dummy;
join->cur_sj_inner_tables= 0;
+ Json_writer_object semijoin_strategy(thd);
+ semijoin_strategy.add("semi_join_strategy","sj_materialize_scan");
+ Json_writer_array semijoin_plan(thd, "join_order");
for (i= first + sjm->tables; i <= tablenr; i++)
{
+ if (unlikely(thd->trace_started()))
+ {
+ Json_writer_object trace_one_table(thd);
+ trace_one_table.add_table_name(join->best_positions[i].table);
+ }
best_access_path(join, join->best_positions[i].table, rem_tables, i,
FALSE, prefix_rec_count,
join->best_positions + i, &dummy);
@@ -3683,8 +3714,16 @@ void fix_semijoin_strategies_for_picked_join_order(JOIN *join)
join buffering
*/
join->cur_sj_inner_tables= 0;
+ Json_writer_object semijoin_strategy(thd);
+ semijoin_strategy.add("semi_join_strategy","firstmatch");
+ Json_writer_array semijoin_plan(thd, "join_order");
for (idx= first; idx <= tablenr; idx++)
{
+ if (unlikely(thd->trace_started()))
+ {
+ Json_writer_object trace_one_table(thd);
+ trace_one_table.add_table_name(join->best_positions[idx].table);
+ }
if (join->best_positions[idx].use_join_buffer)
{
best_access_path(join, join->best_positions[idx].table,
@@ -3713,8 +3752,16 @@ void fix_semijoin_strategies_for_picked_join_order(JOIN *join)
join buffering
*/
join->cur_sj_inner_tables= 0;
+ Json_writer_object semijoin_strategy(thd);
+ semijoin_strategy.add("semi_join_strategy","sj_materialize");
+ Json_writer_array semijoin_plan(thd, "join_order");
for (idx= first; idx <= tablenr; idx++)
{
+ if (unlikely(thd->trace_started()))
+ {
+ Json_writer_object trace_one_table(thd);
+ trace_one_table.add_table_name(join->best_positions[idx].table);
+ }
if (join->best_positions[idx].use_join_buffer || (idx == first))
{
best_access_path(join, join->best_positions[idx].table,
diff --git a/sql/opt_table_elimination.cc b/sql/opt_table_elimination.cc
index 03516146de2..422b21cb541 100644
--- a/sql/opt_table_elimination.cc
+++ b/sql/opt_table_elimination.cc
@@ -525,7 +525,7 @@ eliminate_tables_for_list(JOIN *join,
table_map tables_in_list,
Item *on_expr,
table_map tables_used_elsewhere,
- Json_writer_array* eliminate_tables);
+ Json_writer_array* trace_eliminate_tables);
static
bool check_func_dependency(JOIN *join,
table_map dep_tables,
@@ -545,7 +545,7 @@ Dep_module_expr *merge_eq_mods(Dep_module_expr *start,
Dep_module_expr *new_fields,
Dep_module_expr *end, uint and_level);
static void mark_as_eliminated(JOIN *join, TABLE_LIST *tbl,
- Json_writer_array* eliminate_tables);
+ Json_writer_array* trace_eliminate_tables);
static
void add_module_expr(Dep_analysis_context *dac, Dep_module_expr **eq_mod,
uint and_level, Dep_value_field *field_val, Item *right,
@@ -671,12 +671,12 @@ void eliminate_tables(JOIN *join)
}
table_map all_tables= join->all_tables_map();
- Json_writer_array eliminated_tables(thd,"eliminated_tables");
+ Json_writer_array trace_eliminated_tables(thd,"eliminated_tables");
if (all_tables & ~used_tables)
{
/* There are some tables that we probably could eliminate. Try it. */
eliminate_tables_for_list(join, join->join_list, all_tables, NULL,
- used_tables, &eliminated_tables);
+ used_tables, &trace_eliminated_tables);
}
DBUG_VOID_RETURN;
}
@@ -720,7 +720,7 @@ static bool
eliminate_tables_for_list(JOIN *join, List<TABLE_LIST> *join_list,
table_map list_tables, Item *on_expr,
table_map tables_used_elsewhere,
- Json_writer_array *eliminate_tables)
+ Json_writer_array *trace_eliminate_tables)
{
TABLE_LIST *tbl;
List_iterator<TABLE_LIST> it(*join_list);
@@ -742,9 +742,10 @@ eliminate_tables_for_list(JOIN *join, List<TABLE_LIST> *join_list,
&tbl->nested_join->join_list,
tbl->nested_join->used_tables,
tbl->on_expr,
- outside_used_tables, eliminate_tables))
+ outside_used_tables,
+ trace_eliminate_tables))
{
- mark_as_eliminated(join, tbl, eliminate_tables);
+ mark_as_eliminated(join, tbl, trace_eliminate_tables);
}
else
all_eliminated= FALSE;
@@ -756,7 +757,7 @@ eliminate_tables_for_list(JOIN *join, List<TABLE_LIST> *join_list,
check_func_dependency(join, tbl->table->map, NULL, tbl,
tbl->on_expr))
{
- mark_as_eliminated(join, tbl, eliminate_tables);
+ mark_as_eliminated(join, tbl, trace_eliminate_tables);
}
else
all_eliminated= FALSE;
@@ -1797,7 +1798,7 @@ Dep_module* Dep_value_field::get_next_unbound_module(Dep_analysis_context *dac,
*/
static void mark_as_eliminated(JOIN *join, TABLE_LIST *tbl,
- Json_writer_array* eliminate_tables)
+ Json_writer_array* trace_eliminate_tables)
{
TABLE *table;
/*
@@ -1810,7 +1811,7 @@ static void mark_as_eliminated(JOIN *join, TABLE_LIST *tbl,
TABLE_LIST *child;
List_iterator<TABLE_LIST> it(tbl->nested_join->join_list);
while ((child= it++))
- mark_as_eliminated(join, child, eliminate_tables);
+ mark_as_eliminated(join, child, trace_eliminate_tables);
}
else if ((table= tbl->table))
{
@@ -1821,7 +1822,7 @@ static void mark_as_eliminated(JOIN *join, TABLE_LIST *tbl,
tab->type= JT_CONST;
tab->table->const_table= 1;
join->eliminated_tables |= table->map;
- eliminate_tables->add(table->alias.c_ptr_safe());
+ trace_eliminate_tables->add(table->alias.c_ptr_safe());
join->const_table_map|= table->map;
set_position(join, join->const_tables++, tab, (KEYUSE*)0);
}
diff --git a/sql/opt_trace.cc b/sql/opt_trace.cc
index ca05f36579a..befc7934a3a 100644
--- a/sql/opt_trace.cc
+++ b/sql/opt_trace.cc
@@ -24,7 +24,7 @@
#include "my_json_writer.h"
#include "sp_head.h"
-const char I_S_table_name[] = "OPTIMIZER_TRACE";
+const char I_S_table_name[]= "OPTIMIZER_TRACE";
/**
Whether a list of tables contains information_schema.OPTIMIZER_TRACE.
@@ -38,7 +38,7 @@ const char I_S_table_name[] = "OPTIMIZER_TRACE";
*/
bool list_has_optimizer_trace_table(const TABLE_LIST *tbl)
{
- for (; tbl; tbl = tbl->next_global)
+ for (; tbl; tbl= tbl->next_global)
{
if (tbl->schema_table &&
0 == strcmp(tbl->schema_table->table_name, I_S_table_name))
@@ -59,14 +59,15 @@ bool sets_var_optimizer_trace(enum enum_sql_command sql_command,
{
List_iterator_fast<set_var_base> it(*set_vars);
const set_var_base *var;
- while ((var = it++))
+ while ((var= it++))
if (var->is_var_optimizer_trace()) return true;
}
return false;
}
-ST_FIELD_INFO optimizer_trace_info[] = {
+ST_FIELD_INFO optimizer_trace_info[]=
+{
/* name, length, type, value, maybe_null, old_name, open_method */
{"QUERY", 65535, MYSQL_TYPE_STRING, 0, false, NULL, SKIP_OPEN_TABLE},
{"TRACE", 65535, MYSQL_TYPE_STRING, 0, false, NULL, SKIP_OPEN_TABLE},
@@ -74,12 +75,13 @@ ST_FIELD_INFO optimizer_trace_info[] = {
SKIP_OPEN_TABLE},
{"INSUFFICIENT_PRIVILEGES", 1, MYSQL_TYPE_TINY, 0, false, NULL,
SKIP_OPEN_TABLE},
- {NULL, 0, MYSQL_TYPE_STRING, 0, true, NULL, 0}};
+ {NULL, 0, MYSQL_TYPE_STRING, 0, true, NULL, 0}
+};
/*
TODO: one-line needs to be implemented seperately
*/
-const char *Opt_trace_context::flag_names[] = {"enabled", "default",
+const char *Opt_trace_context::flag_names[]= {"enabled", "default",
NullS};
/*
@@ -105,15 +107,15 @@ void opt_trace_print_expanded_query(THD *thd, SELECT_LEX *select_lex,
{
if (!thd->trace_started())
return;
- char buff[1024];
- String str(buff, sizeof(buff), system_charset_info);
- str.length(0);
+ StringBuffer<1024> str(system_charset_info);
+ ulonglong save_option_bits= thd->variables.option_bits;
+ thd->variables.option_bits &= ~OPTION_QUOTE_SHOW_CREATE;
select_lex->print(thd, &str,
enum_query_type(QT_TO_SYSTEM_CHARSET |
QT_SHOW_SELECT_NUMBER |
QT_ITEM_IDENT_SKIP_DB_NAMES |
- QT_VIEW_INTERNAL
- ));
+ QT_VIEW_INTERNAL));
+ thd->variables.option_bits= save_option_bits;
/*
The output is not very pretty lots of back-ticks, the output
is as the one in explain extended , lets try to improved it here.
@@ -141,7 +143,7 @@ void opt_trace_disable_if_no_security_context_access(THD *thd)
*/
return;
}
- Opt_trace_context *const trace = &thd->opt_trace;
+ Opt_trace_context *const trace= &thd->opt_trace;
if (!thd->trace_started())
{
/*
@@ -187,7 +189,6 @@ void opt_trace_disable_if_no_security_context_access(THD *thd)
thd->main_security_ctx.priv_host,
thd->security_context()->priv_host)))
trace->missing_privilege();
- return;
}
void opt_trace_disable_if_no_stored_proc_func_access(THD *thd, sp_head *sp)
@@ -197,17 +198,16 @@ void opt_trace_disable_if_no_stored_proc_func_access(THD *thd, sp_head *sp)
thd->system_thread)
return;
- Opt_trace_context *const trace = &thd->opt_trace;
+ Opt_trace_context *const trace= &thd->opt_trace;
if (!thd->trace_started())
return;
bool full_access;
- Security_context *const backup_thd_sctx = thd->security_context();
+ Security_context *const backup_thd_sctx= thd->security_context();
thd->set_security_context(&thd->main_security_ctx);
- const bool rc = check_show_routine_access(thd, sp, &full_access) || !full_access;
+ const bool rc= check_show_routine_access(thd, sp, &full_access) || !full_access;
thd->set_security_context(backup_thd_sctx);
if (rc)
trace->missing_privilege();
- return;
}
/**
@@ -231,16 +231,16 @@ void opt_trace_disable_if_no_tables_access(THD *thd, TABLE_LIST *tbl)
if (likely(!(thd->variables.optimizer_trace &
Opt_trace_context::FLAG_ENABLED)) || thd->system_thread)
return;
- Opt_trace_context *const trace = &thd->opt_trace;
+ Opt_trace_context *const trace= &thd->opt_trace;
if (!thd->trace_started())
return;
- Security_context *const backup_thd_sctx = thd->security_context();
+ Security_context *const backup_thd_sctx= thd->security_context();
thd->set_security_context(&thd->main_security_ctx);
- const TABLE_LIST *const first_not_own_table = thd->lex->first_not_own_table();
- for (TABLE_LIST *t = tbl; t != NULL && t != first_not_own_table;
- t = t->next_global)
+ const TABLE_LIST *const first_not_own_table= thd->lex->first_not_own_table();
+ for (TABLE_LIST *t= tbl; t != NULL && t != first_not_own_table;
+ t= t->next_global)
{
/*
Anonymous derived tables (as in
@@ -248,9 +248,9 @@ void opt_trace_disable_if_no_tables_access(THD *thd, TABLE_LIST *tbl)
*/
if (!t->is_anonymous_derived_table())
{
- const GRANT_INFO backup_grant_info = t->grant;
- Security_context *const backup_table_sctx = t->security_ctx;
- t->security_ctx = NULL;
+ const GRANT_INFO backup_grant_info= t->grant;
+ Security_context *const backup_table_sctx= t->security_ctx;
+ t->security_ctx= NULL;
/*
(1) check_table_access() fills t->grant.privilege.
(2) Because SELECT privileges can be column-based,
@@ -271,8 +271,8 @@ void opt_trace_disable_if_no_tables_access(THD *thd, TABLE_LIST *tbl)
*/
rc |= check_table_access(thd, SHOW_VIEW_ACL, t, false, 1, true);
}
- t->security_ctx = backup_table_sctx;
- t->grant = backup_grant_info;
+ t->security_ctx= backup_table_sctx;
+ t->grant= backup_grant_info;
if (rc)
{
trace->missing_privilege();
@@ -292,22 +292,22 @@ void opt_trace_disable_if_no_view_access(THD *thd, TABLE_LIST *view,
Opt_trace_context::FLAG_ENABLED)) ||
thd->system_thread)
return;
- Opt_trace_context *const trace = &thd->opt_trace;
+ Opt_trace_context *const trace= &thd->opt_trace;
if (!thd->trace_started())
return;
- Security_context *const backup_table_sctx = view->security_ctx;
- Security_context *const backup_thd_sctx = thd->security_context();
- const GRANT_INFO backup_grant_info = view->grant;
+ Security_context *const backup_table_sctx= view->security_ctx;
+ Security_context *const backup_thd_sctx= thd->security_context();
+ const GRANT_INFO backup_grant_info= view->grant;
- view->security_ctx = NULL; // no SUID context for view
+ view->security_ctx= NULL; // no SUID context for view
// no SUID context for THD
thd->set_security_context(&thd->main_security_ctx);
- const int rc = check_table_access(thd, SHOW_VIEW_ACL, view, false, 1, true);
+ const int rc= check_table_access(thd, SHOW_VIEW_ACL, view, false, 1, true);
- view->security_ctx = backup_table_sctx;
+ view->security_ctx= backup_table_sctx;
thd->set_security_context(backup_thd_sctx);
- view->grant = backup_grant_info;
+ view->grant= backup_grant_info;
if (rc)
{
@@ -347,16 +347,13 @@ class Opt_trace_stmt {
~Opt_trace_stmt()
{
delete current_json;
- missing_priv= false;
- ctx= NULL;
- I_S_disabled= 0;
}
void set_query(const char *query_ptr, size_t length, const CHARSET_INFO *charset);
void open_struct(const char *key, char opening_bracket);
void close_struct(const char *saved_key, char closing_bracket);
void fill_info(Opt_trace_info* info);
void add(const char *key, char *opening_bracket, size_t val_length);
- Json_writer* get_current_json(){return current_json;}
+ Json_writer* get_current_json() {return current_json;}
void missing_privilege();
void disable_tracing_for_children();
void enable_tracing_for_children();
@@ -372,6 +369,12 @@ private:
String query; // store the query sent by the user
Json_writer *current_json; // stores the trace
bool missing_priv; ///< whether user lacks privilege to see this trace
+ /*
+ 0 <=> this trace should be in information_schema.
+ !=0 tracing is disabled, this currently happens when we want to trace a
+ sub-statement. For now traces are only collect for the top statement
+ not for the sub-statments.
+ */
uint I_S_disabled;
};
@@ -440,28 +443,11 @@ bool Opt_trace_context::is_enabled()
Opt_trace_context::Opt_trace_context()
{
current_trace= NULL;
- inited= FALSE;
- traces= NULL;
max_mem_size= 0;
}
Opt_trace_context::~Opt_trace_context()
{
- inited= FALSE;
- /*
- would be nice to move this to a function
- */
- if (traces)
- {
- while (traces->elements())
- {
- Opt_trace_stmt *prev= traces->at(0);
- delete prev;
- traces->del(0);
- }
- delete traces;
- traces= NULL;
- }
- max_mem_size= 0;
+ delete_traces();
}
void Opt_trace_context::set_query(const char *query, size_t length, const CHARSET_INFO *charset)
@@ -487,26 +473,21 @@ void Opt_trace_context::start(THD *thd, TABLE_LIST *tbl,
DBUG_ASSERT(!current_trace);
current_trace= new Opt_trace_stmt(this);
max_mem_size= max_mem_size_arg;
- if (!inited)
- {
- traces= new Dynamic_array<Opt_trace_stmt*>();
- inited= TRUE;
- }
set_allowed_mem_size(remaining_mem_size());
}
void Opt_trace_context::end()
{
if (current_trace)
- traces->push(current_trace);
+ traces.push(current_trace);
- if (!traces->elements())
+ if (!traces.elements())
return;
- if (traces->elements() > 1)
+ if (traces.elements() > 1)
{
- Opt_trace_stmt *prev= traces->at(0);
+ Opt_trace_stmt *prev= traces.at(0);
delete prev;
- traces->del(0);
+ traces.del(0);
}
current_trace= NULL;
}
@@ -522,7 +503,7 @@ Opt_trace_start::Opt_trace_start(THD *thd, TABLE_LIST *tbl,
if optimizer trace is enabled and the statment we have is traceable,
then we start the context.
*/
- const ulonglong var = thd->variables.optimizer_trace;
+ const ulonglong var= thd->variables.optimizer_trace;
traceable= FALSE;
if (unlikely(var & Opt_trace_context::FLAG_ENABLED) &&
sql_command_can_be_traced(sql_command) &&
@@ -554,21 +535,21 @@ Opt_trace_start::~Opt_trace_start()
void Opt_trace_stmt::fill_info(Opt_trace_info* info)
{
- if (unlikely(info->missing_priv = get_missing_priv()))
+ if (unlikely(info->missing_priv= get_missing_priv()))
{
- info->trace_ptr = info->query_ptr = "";
- info->trace_length = info->query_length = 0;
- info->query_charset = &my_charset_bin;
- info->missing_bytes = 0;
+ info->trace_ptr= info->query_ptr= "";
+ info->trace_length= info->query_length= 0;
+ info->query_charset= &my_charset_bin;
+ info->missing_bytes= 0;
}
else
{
- info->trace_ptr = current_json->output.get_string()->ptr();
- info->trace_length = get_length();
- info->query_ptr = query.ptr();
- info->query_length = query.length();
- info->query_charset = query.charset();
- info->missing_bytes = get_truncated_bytes();
+ info->trace_ptr= current_json->output.get_string()->ptr();
+ info->trace_length= get_length();
+ info->query_ptr= query.ptr();
+ info->query_length= query.length();
+ info->query_charset= query.charset();
+ info->missing_bytes= get_truncated_bytes();
info->missing_priv= get_missing_priv();
}
}
@@ -659,9 +640,7 @@ void Json_writer::add_str(Item *item)
if (item)
{
THD *thd= current_thd;
- char buff[256];
- String str(buff, sizeof(buff), system_charset_info);
- str.length(0);
+ StringBuffer<256> str(system_charset_info);
ulonglong save_option_bits= thd->variables.option_bits;
thd->variables.option_bits &= ~OPTION_QUOTE_SHOW_CREATE;
@@ -675,26 +654,23 @@ void Json_writer::add_str(Item *item)
add_null();
}
-void Opt_trace_context::flush_optimizer_trace()
+void Opt_trace_context::delete_traces()
{
- inited= false;
- if (traces)
+ if (traces.elements())
{
- while (traces->elements())
+ while (traces.elements())
{
- Opt_trace_stmt *prev= traces->at(0);
+ Opt_trace_stmt *prev= traces.at(0);
delete prev;
- traces->del(0);
+ traces.del(0);
}
- delete traces;
- traces= NULL;
}
}
int fill_optimizer_trace_info(THD *thd, TABLE_LIST *tables, Item *)
{
- TABLE *table = tables->table;
+ TABLE *table= tables->table;
Opt_trace_info info;
/* get_values of trace, query , missing bytes and missing_priv
@@ -703,7 +679,7 @@ int fill_optimizer_trace_info(THD *thd, TABLE_LIST *tables, Item *)
*/
Opt_trace_context* ctx= &thd->opt_trace;
- if (thd->opt_trace.empty())
+ if (!thd->opt_trace.empty())
{
Opt_trace_stmt *stmt= ctx->get_top_trace();
stmt->fill_info(&info);
diff --git a/sql/opt_trace.h b/sql/opt_trace.h
index 0e2d0146a49..52318bc6b7f 100644
--- a/sql/opt_trace.h
+++ b/sql/opt_trace.h
@@ -193,9 +193,16 @@ void opt_trace_disable_if_no_stored_proc_func_access(THD *thd, sp_head *sp);
*/
int fill_optimizer_trace_info(THD *thd, TABLE_LIST *tables, Item *);
-#define OPT_TRACE_TRANSFORM(writer, object_level0, object_level1, \
+#define OPT_TRACE_TRANSFORM(thd, object_level0, object_level1, \
select_number, from, to) \
- Json_writer_object object_level0(writer); \
- Json_writer_object object_level1(writer, "transformation"); \
+ Json_writer_object object_level0(thd); \
+ Json_writer_object object_level1(thd, "transformation"); \
object_level1.add_select_number(select_number).add("from", from).add("to", to);
-#endif \ No newline at end of file
+
+#define OPT_TRACE_VIEWS_TRANSFORM(thd, object_level0, object_level1, \
+ derived, name, select_number, algorithm) \
+ Json_writer_object trace_wrapper(thd); \
+ Json_writer_object trace_derived(thd, derived); \
+ trace_derived.add("table", name).add_select_number(select_number) \
+ .add("algorithm", algorithm);
+#endif
diff --git a/sql/opt_trace_context.h b/sql/opt_trace_context.h
index 87317f67e22..e5df16b1e3b 100644
--- a/sql/opt_trace_context.h
+++ b/sql/opt_trace_context.h
@@ -19,14 +19,14 @@ public:
ulong max_mem_size_arg);
void end();
void set_query(const char *query, size_t length, const CHARSET_INFO *charset);
- void flush_optimizer_trace();
+ void delete_traces();
void set_allowed_mem_size(size_t mem_size);
size_t remaining_mem_size();
private:
Opt_trace_stmt* top_trace()
{
- return *(traces->front());
+ return *(traces.front());
}
public:
@@ -39,7 +39,7 @@ public:
Opt_trace_stmt* get_top_trace()
{
- if (!traces || !traces->elements())
+ if (!traces.elements())
return NULL;
return top_trace();
}
@@ -52,7 +52,7 @@ public:
bool empty()
{
- return traces && (static_cast<uint>(traces->elements()) != 0);
+ return static_cast<uint>(traces.elements()) == 0;
}
bool is_started()
@@ -79,13 +79,8 @@ private:
/*
List of traces (currently it stores only 1 trace)
*/
- Dynamic_array<Opt_trace_stmt*> *traces;
+ Dynamic_array<Opt_trace_stmt*> traces;
Opt_trace_stmt *current_trace;
- /*
- TRUE: if we allocate memory for list of traces
- FALSE: otherwise
- */
- bool inited;
size_t max_mem_size;
};
diff --git a/sql/rowid_filter.cc b/sql/rowid_filter.cc
index d6ea22c1704..7e7447d4c37 100644
--- a/sql/rowid_filter.cc
+++ b/sql/rowid_filter.cc
@@ -434,7 +434,7 @@ TABLE::best_range_rowid_filter_for_partial_join(uint access_key_no,
double records,
double access_cost_factor)
{
- if (!this || range_rowid_filter_cost_info_elems == 0 ||
+ if (range_rowid_filter_cost_info_elems == 0 ||
covering_keys.is_set(access_key_no))
return 0;
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index d10391271a9..8c5cadcf5a6 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -1412,7 +1412,7 @@ void THD::change_user(void)
sp_cache_clear(&sp_func_cache);
sp_cache_clear(&sp_package_spec_cache);
sp_cache_clear(&sp_package_body_cache);
- opt_trace.flush_optimizer_trace();
+ opt_trace.delete_traces();
}
/**
@@ -5988,7 +5988,8 @@ int THD::decide_logging_format(TABLE_LIST *tables)
binlog by filtering rules.
*/
#ifdef WITH_WSREP
- if (WSREP_CLIENT_NNULL(this) && variables.wsrep_trx_fragment_size > 0)
+ if (WSREP_CLIENT_NNULL(this) && wsrep_thd_is_local(this) &&
+ variables.wsrep_trx_fragment_size > 0)
{
if (!is_current_stmt_binlog_format_row())
{
diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc
index 9ffe10e115d..c8a892a0c36 100644
--- a/sql/sql_derived.cc
+++ b/sql/sql_derived.cc
@@ -34,6 +34,7 @@
#include "sql_class.h"
#include "sql_cte.h"
#include "my_json_writer.h"
+#include "opt_trace.h"
typedef bool (*dt_processor)(THD *thd, LEX *lex, TABLE_LIST *derived);
@@ -384,6 +385,15 @@ bool mysql_derived_merge(THD *thd, LEX *lex, TABLE_LIST *derived)
{
/* There is random function => fall back to materialization. */
cause= "Random function in the select";
+ if (unlikely(thd->trace_started()))
+ {
+ OPT_TRACE_VIEWS_TRANSFORM(thd, trace_wrapper, trace_derived,
+ derived->is_derived() ? "derived" : "view",
+ derived->alias.str ? derived->alias.str : "<NULL>",
+ derived->get_unit()->first_select()->select_number,
+ "materialized");
+ trace_derived.add("cause", cause);
+ }
derived->change_refs_to_fields();
derived->set_materialized_derived();
DBUG_RETURN(FALSE);
@@ -497,19 +507,12 @@ unconditional_materialization:
if (unlikely(thd->trace_started()))
{
- /*
- Add to the optimizer trace the change in choice for merged
- derived tables/views to materialised ones.
- */
- Json_writer_object trace_wrapper(thd);
- Json_writer_object trace_derived(thd, derived->is_derived() ?
- "derived" : "view");
- trace_derived.add("table", derived->alias.str ? derived->alias.str : "<NULL>")
- .add_select_number(derived->get_unit()->
- first_select()->select_number)
- .add("initial_choice", "merged")
- .add("final_choice", "materialized")
- .add("cause", cause);
+ OPT_TRACE_VIEWS_TRANSFORM(thd,trace_wrapper, trace_derived,
+ derived->is_derived() ? "derived" : "view",
+ derived->alias.str ? derived->alias.str : "<NULL>",
+ derived->get_unit()->first_select()->select_number,
+ "materialized");
+ trace_derived.add("cause", cause);
}
derived->change_refs_to_fields();
@@ -778,15 +781,11 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
Add to optimizer trace whether a derived table/view
is merged into the parent select or not.
*/
- Json_writer_object trace_wrapper(thd);
- Json_writer_object trace_derived(thd, derived->is_derived() ?
- "derived" : "view");
- trace_derived.add("table", derived->alias.str ? derived->alias.str : "<NULL>")
- .add_select_number(derived->get_unit()->first_select()->select_number);
- if (derived->is_materialized_derived())
- trace_derived.add("materialized", true);
- if (derived->is_merged_derived())
- trace_derived.add("merged", true);
+ OPT_TRACE_VIEWS_TRANSFORM(thd, trace_wrapper, trace_derived,
+ derived->is_derived() ? "derived" : "view",
+ derived->alias.str ? derived->alias.str : "<NULL>",
+ derived->get_unit()->first_select()->select_number,
+ derived->is_merged_derived() ? "merged" : "materialized");
}
/*
Above cascade call of prepare is important for PS protocol, but after it
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 25dfdbcbc71..57cec6bacff 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -1729,11 +1729,23 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
case COM_STMT_BULK_EXECUTE:
{
mysqld_stmt_bulk_execute(thd, packet, packet_length);
+#ifdef WITH_WSREP
+ if (WSREP_ON)
+ {
+ (void)wsrep_after_statement(thd);
+ }
+#endif /* WITH_WSREP */
break;
}
case COM_STMT_EXECUTE:
{
mysqld_stmt_execute(thd, packet, packet_length);
+#ifdef WITH_WSREP
+ if (WSREP_ON)
+ {
+ (void)wsrep_after_statement(thd);
+ }
+#endif /* WITH_WSREP */
break;
}
case COM_STMT_FETCH:
diff --git a/sql/sql_priv.h b/sql/sql_priv.h
index d9908d2f2b5..0f28f5b82e1 100644
--- a/sql/sql_priv.h
+++ b/sql/sql_priv.h
@@ -359,11 +359,6 @@
data dictionary without changing table rows
*/
#define IS_EQUAL_PACK_LENGTH 2
-/**
- new_field has a representation that is compatible with the old type
- when the storage engine advertises HA_EXTENDED_TYPES_CONVERSION
-*/
-#define IS_EQUAL_PACK_LENGTH_EXT 3
enum enum_parsing_place
{
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index c598532cc9d..3c108094b9e 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -121,6 +121,7 @@ static bool best_extension_by_limited_search(JOIN *join,
double read_time, uint depth,
uint prune_level,
uint use_cond_selectivity);
+void trace_plan_prefix(JOIN *join, uint idx, table_map remaining_tables);
static uint determine_search_depth(JOIN* join);
C_MODE_START
static int join_tab_cmp(const void *dummy, const void* ptr1, const void* ptr2);
@@ -302,8 +303,6 @@ void set_postjoin_aggr_write_func(JOIN_TAB *tab);
static Item **get_sargable_cond(JOIN *join, TABLE *table);
-static void trace_plan_prefix(JOIN *join, uint idx, table_map remaining_tables);
-
#ifndef DBUG_OFF
/*
@@ -359,16 +358,16 @@ static void trace_table_dependencies(THD *thd,
{
Json_writer_object trace_wrapper(thd);
Json_writer_array trace_dep(thd, "table_dependencies");
- for (uint i = 0; i < table_count; i++)
+ for (uint i= 0; i < table_count; i++)
{
- TABLE_LIST *table_ref = join_tabs[i].tab_list;
+ TABLE_LIST *table_ref= join_tabs[i].tab_list;
Json_writer_object trace_one_table(thd);
trace_one_table.add_table_name(&join_tabs[i]);
trace_one_table.add("row_may_be_null",
(bool)table_ref->table->maybe_null);
- const table_map map = table_ref->get_map();
+ const table_map map= table_ref->get_map();
DBUG_ASSERT(map < (1ULL << table_count));
- for (uint j = 0; j < table_count; j++)
+ for (uint j= 0; j < table_count; j++)
{
if (map & (1ULL << j))
{
@@ -377,14 +376,10 @@ static void trace_table_dependencies(THD *thd,
}
}
Json_writer_array depends_on(thd, "depends_on_map_bits");
- static_assert(sizeof(table_ref->get_map()) <= 64,
- "RAND_TABLE_BIT may be in join_tabs[i].dependent, so we test "
- "all 64 bits.");
- for (uint j = 0; j < 64; j++)
- {
- if (join_tabs[i].dependent & (1ULL << j))
- depends_on.add(static_cast<longlong>(j));
- }
+ Table_map_iterator it(join_tabs[i].dependent);
+ uint dep_bit;
+ while ((dep_bit= it++) != Table_map_iterator::BITMAP_END)
+ depends_on.add(static_cast<longlong>(dep_bit));
}
}
@@ -9082,13 +9077,13 @@ double table_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
}
-static void trace_plan_prefix(JOIN *join, uint idx, table_map remaining_tables)
+void trace_plan_prefix(JOIN *join, uint idx, table_map remaining_tables)
{
- THD *const thd = join->thd;
+ THD *const thd= join->thd;
Json_writer_array plan_prefix(thd, "plan_prefix");
- for (uint i = 0; i < idx; i++)
+ for (uint i= 0; i < idx; i++)
{
- TABLE_LIST *const tr = join->positions[i].table->tab_list;
+ TABLE_LIST *const tr= join->positions[i].table->tab_list;
if (!(tr->map & remaining_tables))
plan_prefix.add_table_name(join->positions[i].table);
}
@@ -9298,9 +9293,6 @@ best_extension_by_limited_search(JOIN *join,
current_record_count / (double) TIME_FOR_COMPARE -
filter_cmp_gain;
- /*
- TODO add filtering estimates here
- */
advance_sj_state(join, remaining_tables, idx, &current_record_count,
&current_read_time, &loose_scan_pos);
@@ -11107,12 +11099,6 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
tab->table->intersect_keys.is_set(tab->ref.key))))
{
/* Range uses longer key; Use this instead of ref on key */
-
- /*
- We can trace here, changing ref access to range access here
- have a range that uses longer key.
- Lets take @spetrunia's opinion
- */
Json_writer_object ref_to_range(thd);
ref_to_range.add("ref_to_range", true);
ref_to_range.add("cause", "range uses longer key");
@@ -16455,6 +16441,8 @@ void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab,
double cost, rec_count;
table_map reopt_remaining_tables= last_remaining_tables;
uint i;
+ THD *thd= join->thd;
+ Json_writer_temp_disable trace_wo_join_buffering(thd);
if (first_tab > join->const_tables)
{
@@ -16489,7 +16477,7 @@ void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab,
{
JOIN_TAB *rs= join->positions[i].table;
POSITION pos, loose_scan_pos;
-
+
if ((i == first_tab && first_alt) || join->positions[i].use_join_buffer)
{
/* Find the best access method that would not use join buffering */
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index a40febf58f4..4dd9d43a7b9 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -6605,9 +6605,6 @@ static bool fill_alter_inplace_info(THD *thd,
*/
ha_alter_info->handler_flags|= ALTER_COLUMN_EQUAL_PACK_LENGTH;
break;
- case IS_EQUAL_PACK_LENGTH_EXT:
- ha_alter_info->handler_flags|= ALTER_COLUMN_EQUAL_PACK_LENGTH_EXT;
- break;
default:
DBUG_ASSERT(0);
/* Safety. */
diff --git a/sql/sql_test.cc b/sql/sql_test.cc
index f247fb10f89..93085251711 100644
--- a/sql/sql_test.cc
+++ b/sql/sql_test.cc
@@ -665,11 +665,11 @@ void print_keyuse_array_for_trace(THD *thd, DYNAMIC_ARRAY *keyuse_array)
KEYUSE *keyuse= (KEYUSE*)dynamic_array_ptr(keyuse_array, i);
Json_writer_object keyuse_elem(thd);
keyuse_elem.add_table_name(keyuse->table->reginfo.join_tab);
- keyuse_elem.add("field", (keyuse->keypart == FT_KEYPART) ? "<fulltext>"
- : (keyuse->is_for_hash_join()
- ? keyuse->table->field[keyuse->keypart]
- ->field_name.str
- : keyuse->table->key_info[keyuse->key]
+ keyuse_elem.add("field", (keyuse->keypart == FT_KEYPART) ? "<fulltext>":
+ (keyuse->is_for_hash_join() ?
+ keyuse->table->field[keyuse->keypart]
+ ->field_name.str :
+ keyuse->table->key_info[keyuse->key]
.key_part[keyuse->keypart]
.field->field_name.str));
keyuse_elem.add("equals",keyuse->val);
diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc
index 0472e4881cc..9826107b996 100644
--- a/sql/sys_vars.cc
+++ b/sql/sys_vars.cc
@@ -474,7 +474,7 @@ bool check_has_super(sys_var *self, THD *thd, set_var *var)
static Sys_var_bit Sys_core_file("core_file", "write a core-file on crashes",
READ_ONLY GLOBAL_VAR(test_flags), NO_CMD_LINE,
- TEST_CORE_ON_SIGNAL, DEFAULT(FALSE), NO_MUTEX_GUARD, NOT_IN_BINLOG,
+ TEST_CORE_ON_SIGNAL, DEFAULT(IF_WIN(TRUE,FALSE)), NO_MUTEX_GUARD, NOT_IN_BINLOG,
0,0,0);
static bool binlog_format_check(sys_var *self, THD *thd, set_var *var)
@@ -5937,7 +5937,7 @@ static Sys_var_ulong Sys_histogram_size(
"Number of bytes used for a histogram. "
"If set to 0, no histograms are created by ANALYZE.",
SESSION_VAR(histogram_size), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(0, 255), DEFAULT(0), BLOCK_SIZE(1));
+ VALID_RANGE(0, 255), DEFAULT(254), BLOCK_SIZE(1));
extern const char *histogram_types[];
static Sys_var_enum Sys_histogram_type(
@@ -5947,7 +5947,7 @@ static Sys_var_enum Sys_histogram_type(
"SINGLE_PREC_HB - single precision height-balanced, "
"DOUBLE_PREC_HB - double precision height-balanced.",
SESSION_VAR(histogram_type), CMD_LINE(REQUIRED_ARG),
- histogram_types, DEFAULT(0));
+ histogram_types, DEFAULT(1));
static Sys_var_mybool Sys_no_thread_alarm(
"debug_no_thread_alarm",
diff --git a/sql/wsrep_client_service.cc b/sql/wsrep_client_service.cc
index 994fa97db60..62b90cc032f 100644
--- a/sql/wsrep_client_service.cc
+++ b/sql/wsrep_client_service.cc
@@ -229,8 +229,12 @@ size_t Wsrep_client_service::bytes_generated() const
IO_CACHE* cache= wsrep_get_trans_cache(m_thd);
if (cache)
{
- m_thd->binlog_flush_pending_rows_event(true);
- return my_b_tell(cache);
+ size_t pending_rows_event_length= 0;
+ if (Rows_log_event* ev= m_thd->binlog_get_pending_rows_event(true))
+ {
+ pending_rows_event_length= ev->get_data_size();
+ }
+ return my_b_tell(cache) + pending_rows_event_length;
}
return 0;
}
diff --git a/sql/wsrep_server_service.cc b/sql/wsrep_server_service.cc
index 7efff35f2b1..14b294c4214 100644
--- a/sql/wsrep_server_service.cc
+++ b/sql/wsrep_server_service.cc
@@ -176,10 +176,12 @@ void Wsrep_server_service::log_view(
{
Wsrep_id id;
Wsrep_view prev_view= wsrep_schema->restore_view(applier->m_thd, id);
+ bool checkpoint_was_reset= false;
if (prev_view.state_id().id() != view.state_id().id())
{
WSREP_DEBUG("New cluster UUID was generated, resetting position info");
wsrep_set_SE_checkpoint(wsrep::gtid::undefined());
+ checkpoint_was_reset= true;
}
if (wsrep_debug)
@@ -188,7 +190,7 @@ void Wsrep_server_service::log_view(
os << "Storing cluster view:\n" << view;
WSREP_INFO("%s", os.str().c_str());
DBUG_ASSERT(prev_view.state_id().id() != view.state_id().id() ||
- view.state_id().seqno() > prev_view.state_id().seqno());
+ view.state_id().seqno().get() >= prev_view.state_id().seqno().get());
}
if (trans_begin(applier->m_thd, MYSQL_START_TRANS_OPT_READ_WRITE))
@@ -216,7 +218,21 @@ void Wsrep_server_service::log_view(
applier->m_thd->mdl_context.release_transactional_locks();
}
- wsrep_set_SE_checkpoint(view.state_id());
+ /*
+ Backwards compatibility: When running in mixed cluster with
+ Galera 3.x, the provider does not generate unique sequence numbers
+ for views. This condition can be checked by inspecting last
+ committed as returned by the provider. If the last_committed
+ matches to view state_id seqno, the cluster runs in backwards
+ compatibility mode and we skip setting the checkpoint for
+ view.
+ */
+ wsrep::seqno last_committed=
+ Wsrep_server_state::instance().provider().last_committed_gtid().seqno();
+ if (checkpoint_was_reset || last_committed != view.state_id().seqno())
+ {
+ wsrep_set_SE_checkpoint(view.state_id());
+ }
DBUG_ASSERT(wsrep_get_SE_checkpoint().id() == view.state_id().id());
}
else
diff --git a/sql/wsrep_sst.cc b/sql/wsrep_sst.cc
index 103afa1c412..d79b7771571 100644
--- a/sql/wsrep_sst.cc
+++ b/sql/wsrep_sst.cc
@@ -337,7 +337,7 @@ static int generate_binlog_index_opt_val(char** ret)
{
DBUG_ASSERT(ret);
*ret= NULL;
- if (opt_bin_log)
+ if (opt_binlog_index_name)
{
*ret= strcmp(opt_binlog_index_name, "0") ?
my_strdup(opt_binlog_index_name, MYF(0)) : my_strdup("", MYF(0));
diff --git a/sql/wsrep_trans_observer.h b/sql/wsrep_trans_observer.h
index a3acc9e78fb..04a1224701e 100644
--- a/sql/wsrep_trans_observer.h
+++ b/sql/wsrep_trans_observer.h
@@ -83,6 +83,14 @@ static inline bool wsrep_streaming_enabled(THD* thd)
return (thd->wsrep_sr().fragment_size() > 0);
}
+/*
+ Return number of fragments succesfully certified for the
+ current statement.
+ */
+static inline size_t wsrep_fragments_certified_for_stmt(THD* thd)
+{
+ return thd->wsrep_trx().fragments_certified_for_statement();
+}
static inline int wsrep_start_transaction(THD* thd, wsrep_trx_id_t trx_id)
{
diff --git a/sql/wsrep_var.cc b/sql/wsrep_var.cc
index 7ba4ed3c14c..d091348f481 100644
--- a/sql/wsrep_var.cc
+++ b/sql/wsrep_var.cc
@@ -757,7 +757,7 @@ static int show_var_cmp(const void *var1, const void *var2)
{
return strcasecmp(((SHOW_VAR*)var1)->name, ((SHOW_VAR*)var2)->name);
}
-#endif /* UNUSED */
+
/*
* Status variables stuff below
*/
@@ -780,6 +780,7 @@ wsrep_assign_to_mysql (SHOW_VAR* mysql, wsrep_stats_var* wsrep_var)
break;
}
}
+#endif /* UNUSED */
#if DYNAMIC
// somehow this mysql status thing works only with statically allocated arrays.