summaryrefslogtreecommitdiff
path: root/sql/sql_select.cc
diff options
context:
space:
mode:
Diffstat (limited to 'sql/sql_select.cc')
-rw-r--r--sql/sql_select.cc2546
1 files changed, 1764 insertions, 782 deletions
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 6c090ea5352..12d0a9e824e 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -64,6 +64,10 @@
#include "sys_vars_shared.h"
#include "sp_head.h"
#include "sp_rcontext.h"
+#include "rowid_filter.h"
+#include "select_handler.h"
+#include "my_json_writer.h"
+#include "opt_trace.h"
/*
A key part number that means we're using a fulltext scan.
@@ -100,6 +104,9 @@ static int sort_keyuse(KEYUSE *a,KEYUSE *b);
static bool are_tables_local(JOIN_TAB *jtab, table_map used_tables);
static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse,
bool allow_full_scan, table_map used_tables);
+static ha_rows get_quick_record_count(THD *thd, SQL_SELECT *select,
+ TABLE *table,
+ const key_map *keys,ha_rows limit);
static void optimize_straight_join(JOIN *join, table_map join_tables);
static bool greedy_search(JOIN *join, table_map remaining_tables,
uint depth, uint prune_level,
@@ -147,7 +154,8 @@ static COND *build_equal_items(JOIN *join, COND *cond,
static COND* substitute_for_best_equal_field(THD *thd, JOIN_TAB *context_tab,
COND *cond,
COND_EQUAL *cond_equal,
- void *table_join_idx);
+ void *table_join_idx,
+ bool do_substitution);
static COND *simplify_joins(JOIN *join, List<TABLE_LIST> *join_list,
COND *conds, bool top, bool in_sj);
static bool check_interleaving_with_nj(JOIN_TAB *next);
@@ -342,7 +350,63 @@ bool dbug_user_var_equals_int(THD *thd, const char *name, int value)
}
return FALSE;
}
-#endif
+#endif /* DBUG_OFF */
+
+/*
+ Intialize POSITION structure.
+*/
+
+POSITION::POSITION()
+{
+ table= 0;
+ records_read= cond_selectivity= read_time= 0.0;
+ prefix_record_count= 0.0;
+ key= 0;
+ use_join_buffer= 0;
+ sj_strategy= SJ_OPT_NONE;
+ n_sj_tables= 0;
+ spl_plan= 0;
+ range_rowid_filter_info= 0;
+ ref_depend_map= dups_producing_tables= 0;
+ inner_tables_handled_with_other_sjs= 0;
+ dups_weedout_picker.set_empty();
+ firstmatch_picker.set_empty();
+ loosescan_picker.set_empty();
+ sjmat_picker.set_empty();
+}
+
+
+static void trace_table_dependencies(THD *thd,
+ JOIN_TAB *join_tabs, uint table_count)
+{
+ DBUG_ASSERT(thd->trace_started());
+ Json_writer_object trace_wrapper(thd);
+ Json_writer_array trace_dep(thd, "table_dependencies");
+
+ for (uint i= 0; i < table_count; i++)
+ {
+ TABLE_LIST *table_ref= join_tabs[i].tab_list;
+ Json_writer_object trace_one_table(thd);
+ trace_one_table.add_table_name(&join_tabs[i]);
+ trace_one_table.add("row_may_be_null",
+ (bool)table_ref->table->maybe_null);
+ const table_map map= table_ref->get_map();
+ DBUG_ASSERT(map < (1ULL << table_count));
+ for (uint j= 0; j < table_count; j++)
+ {
+ if (map & (1ULL << j))
+ {
+ trace_one_table.add("map_bit", static_cast<longlong>(j));
+ break;
+ }
+ }
+ Json_writer_array depends_on(thd, "depends_on_map_bits");
+ Table_map_iterator it(join_tabs[i].dependent);
+ uint dep_bit;
+ while ((dep_bit= it++) != Table_map_iterator::BITMAP_END)
+ depends_on.add(static_cast<longlong>(dep_bit));
+ }
+}
/**
@@ -353,7 +417,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result,
ulong setup_tables_done_option)
{
bool res;
- SELECT_LEX *select_lex = &lex->select_lex;
+ SELECT_LEX *select_lex= lex->first_select_lex();
DBUG_ENTER("handle_select");
MYSQL_SELECT_START(thd->query());
@@ -696,8 +760,9 @@ bool vers_select_conds_t::init_from_sysvar(THD *thd)
if (type != SYSTEM_TIME_UNSPECIFIED && type != SYSTEM_TIME_ALL)
{
DBUG_ASSERT(type == SYSTEM_TIME_AS_OF);
+ Datetime dt(&in.ltime);
start.item= new (thd->mem_root)
- Item_datetime_literal(thd, &in.ltime, TIME_SECOND_PART_DIGITS);
+ Item_datetime_literal(thd, &dt, TIME_SECOND_PART_DIGITS);
if (!start.item)
return true;
}
@@ -734,6 +799,130 @@ void vers_select_conds_t::print(String *str, enum_query_type query_type) const
}
static
+Item* period_get_condition(THD *thd, TABLE_LIST *table, SELECT_LEX *select,
+ vers_select_conds_t *conds, bool timestamp)
+{
+ DBUG_ASSERT(table);
+ DBUG_ASSERT(table->table);
+#define newx new (thd->mem_root)
+ TABLE_SHARE *share= table->table->s;
+ const TABLE_SHARE::period_info_t *period= conds->period;
+
+ const LEX_CSTRING &fstart= period->start_field(share)->field_name;
+ const LEX_CSTRING &fend= period->end_field(share)->field_name;
+
+ conds->field_start= newx Item_field(thd, &select->context,
+ table->db.str, table->alias.str,
+ thd->make_clex_string(fstart));
+ conds->field_end= newx Item_field(thd, &select->context,
+ table->db.str, table->alias.str,
+ thd->make_clex_string(fend));
+
+ Item *cond1= NULL, *cond2= NULL, *cond3= NULL, *curr= NULL;
+ if (timestamp)
+ {
+ MYSQL_TIME max_time;
+ switch (conds->type)
+ {
+ case SYSTEM_TIME_UNSPECIFIED:
+ case SYSTEM_TIME_HISTORY:
+ {
+ thd->variables.time_zone->gmt_sec_to_TIME(&max_time, TIMESTAMP_MAX_VALUE);
+ max_time.second_part= TIME_MAX_SECOND_PART;
+ Datetime dt(&max_time);
+ curr= newx Item_datetime_literal(thd, &dt, TIME_SECOND_PART_DIGITS);
+ if (conds->type == SYSTEM_TIME_UNSPECIFIED)
+ cond1= newx Item_func_eq(thd, conds->field_end, curr);
+ else
+ cond1= newx Item_func_lt(thd, conds->field_end, curr);
+ break;
+ }
+ case SYSTEM_TIME_AS_OF:
+ cond1= newx Item_func_le(thd, conds->field_start, conds->start.item);
+ cond2= newx Item_func_gt(thd, conds->field_end, conds->start.item);
+ break;
+ case SYSTEM_TIME_FROM_TO:
+ cond1= newx Item_func_lt(thd, conds->field_start, conds->end.item);
+ cond2= newx Item_func_gt(thd, conds->field_end, conds->start.item);
+ cond3= newx Item_func_lt(thd, conds->start.item, conds->end.item);
+ break;
+ case SYSTEM_TIME_BETWEEN:
+ cond1= newx Item_func_le(thd, conds->field_start, conds->end.item);
+ cond2= newx Item_func_gt(thd, conds->field_end, conds->start.item);
+ cond3= newx Item_func_le(thd, conds->start.item, conds->end.item);
+ break;
+ case SYSTEM_TIME_BEFORE:
+ cond1= newx Item_func_history(thd, conds->field_end);
+ cond2= newx Item_func_lt(thd, conds->field_end, conds->start.item);
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
+ }
+ else
+ {
+ DBUG_ASSERT(table->table->s && table->table->s->db_plugin);
+
+ Item *trx_id0= conds->start.item;
+ Item *trx_id1= conds->end.item;
+ if (conds->start.item && conds->start.unit == VERS_TIMESTAMP)
+ {
+ bool backwards= conds->type != SYSTEM_TIME_AS_OF;
+ trx_id0= newx Item_func_trt_id(thd, conds->start.item,
+ TR_table::FLD_TRX_ID, backwards);
+ }
+ if (conds->end.item && conds->end.unit == VERS_TIMESTAMP)
+ {
+ trx_id1= newx Item_func_trt_id(thd, conds->end.item,
+ TR_table::FLD_TRX_ID, false);
+ }
+
+ switch (conds->type)
+ {
+ case SYSTEM_TIME_UNSPECIFIED:
+ case SYSTEM_TIME_HISTORY:
+ curr= newx Item_int(thd, ULONGLONG_MAX);
+ if (conds->type == SYSTEM_TIME_UNSPECIFIED)
+ cond1= newx Item_func_eq(thd, conds->field_end, curr);
+ else
+ cond1= newx Item_func_lt(thd, conds->field_end, curr);
+ break;
+ DBUG_ASSERT(!conds->start.item);
+ DBUG_ASSERT(!conds->end.item);
+ break;
+ case SYSTEM_TIME_AS_OF:
+ cond1= newx Item_func_trt_trx_sees_eq(thd, trx_id0, conds->field_start);
+ cond2= newx Item_func_trt_trx_sees(thd, conds->field_end, trx_id0);
+ DBUG_ASSERT(!conds->end.item);
+ break;
+ case SYSTEM_TIME_FROM_TO:
+ cond1= newx Item_func_trt_trx_sees(thd, trx_id1, conds->field_start);
+ cond2= newx Item_func_trt_trx_sees_eq(thd, conds->field_end, trx_id0);
+ cond3= newx Item_func_lt(thd, conds->start.item, conds->end.item);
+ break;
+ case SYSTEM_TIME_BETWEEN:
+ cond1= newx Item_func_trt_trx_sees_eq(thd, trx_id1, conds->field_start);
+ cond2= newx Item_func_trt_trx_sees_eq(thd, conds->field_end, trx_id0);
+ cond3= newx Item_func_le(thd, conds->start.item, conds->end.item);
+ break;
+ case SYSTEM_TIME_BEFORE:
+ cond1= newx Item_func_history(thd, conds->field_end);
+ cond2= newx Item_func_trt_trx_sees(thd, trx_id0, conds->field_end);
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
+ }
+
+ if (cond1)
+ {
+ cond1= and_items(thd, cond2, cond1);
+ cond1= and_items(thd, cond3, cond1);
+ }
+ return cond1;
+}
+
+static
bool skip_setup_conds(THD *thd)
{
return (!thd->stmt_arena->is_conventional()
@@ -741,17 +930,54 @@ bool skip_setup_conds(THD *thd)
|| thd->lex->is_view_context_analysis();
}
-int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables)
+int SELECT_LEX::period_setup_conds(THD *thd, TABLE_LIST *tables)
{
- DBUG_ENTER("SELECT_LEX::vers_setup_cond");
-#define newx new (thd->mem_root)
+ DBUG_ENTER("SELECT_LEX::period_setup_conds");
+ const bool update_conds= !skip_setup_conds(thd);
+
+ Query_arena backup;
+ Query_arena *arena= thd->activate_stmt_arena_if_needed(&backup);
+
+ DBUG_ASSERT(!tables->next_local && tables->table);
+
+ Item *result= NULL;
+ for (TABLE_LIST *table= tables; table; table= table->next_local)
+ {
+ if (!table->table)
+ continue;
+ vers_select_conds_t &conds= table->period_conditions;
+ if (!table->table->s->period.name.streq(conds.name))
+ {
+ my_error(ER_PERIOD_NOT_FOUND, MYF(0), conds.name.str);
+ if (arena)
+ thd->restore_active_arena(arena, &backup);
+ DBUG_RETURN(-1);
+ }
+
+ if (update_conds)
+ {
+ conds.period= &table->table->s->period;
+ result= and_items(thd, result,
+ period_get_condition(thd, table, this, &conds, true));
+ }
+ }
+ if (update_conds)
+ where= and_items(thd, where, result);
+ if (arena)
+ thd->restore_active_arena(arena, &backup);
+
+ DBUG_RETURN(0);
+}
+
+int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables)
+{
+ DBUG_ENTER("SELECT_LEX::vers_setup_conds");
const bool update_conds= !skip_setup_conds(thd);
- TABLE_LIST *table;
if (!versioned_tables)
{
- for (table= tables; table; table= table->next_local)
+ for (TABLE_LIST *table= tables; table; table= table->next_local)
{
if (table->table && table->table->versioned())
versioned_tables++;
@@ -808,7 +1034,7 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables)
break;
}
- for (table= tables; table; table= table->next_local)
+ for (TABLE_LIST *table= tables; table; table= table->next_local)
{
if (!table->table || table->is_view() || !table->table->versioned())
continue;
@@ -830,7 +1056,7 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables)
DBUG_RETURN(-1);
}
else if (!vers_conditions.is_set())
- vers_conditions.type= SYSTEM_TIME_ALL;
+ vers_conditions.set_all();
}
#endif
@@ -862,16 +1088,6 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables)
continue;
}
- const LEX_CSTRING *fstart=
- thd->make_clex_string(table->table->vers_start_field()->field_name);
- const LEX_CSTRING *fend=
- thd->make_clex_string(table->table->vers_end_field()->field_name);
-
- Item *row_start=
- newx Item_field(thd, &this->context, table->db.str, table->alias.str, fstart);
- Item *row_end=
- newx Item_field(thd, &this->context, table->db.str, table->alias.str, fend);
-
bool timestamps_only= table->table->versioned(VERS_TIMESTAMP);
if (vers_conditions.is_set() && vers_conditions.type != SYSTEM_TIME_HISTORY)
@@ -891,126 +1107,30 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables)
}
}
- if (!update_conds)
- continue;
-
- Item *cond1= NULL, *cond2= NULL, *cond3= NULL, *curr= NULL;
- Item *point_in_time1= vers_conditions.start.item;
- Item *point_in_time2= vers_conditions.end.item;
- TABLE *t= table->table;
- if (t->versioned(VERS_TIMESTAMP))
- {
- MYSQL_TIME max_time;
- switch (vers_conditions.type)
- {
- case SYSTEM_TIME_UNSPECIFIED:
- case SYSTEM_TIME_HISTORY:
- thd->variables.time_zone->gmt_sec_to_TIME(&max_time, TIMESTAMP_MAX_VALUE);
- max_time.second_part= TIME_MAX_SECOND_PART;
- curr= newx Item_datetime_literal(thd, &max_time, TIME_SECOND_PART_DIGITS);
- if (vers_conditions.type == SYSTEM_TIME_UNSPECIFIED)
- cond1= newx Item_func_eq(thd, row_end, curr);
- else
- cond1= newx Item_func_lt(thd, row_end, curr);
- break;
- case SYSTEM_TIME_AS_OF:
- cond1= newx Item_func_le(thd, row_start, point_in_time1);
- cond2= newx Item_func_gt(thd, row_end, point_in_time1);
- break;
- case SYSTEM_TIME_FROM_TO:
- cond1= newx Item_func_lt(thd, row_start, point_in_time2);
- cond2= newx Item_func_gt(thd, row_end, point_in_time1);
- cond3= newx Item_func_lt(thd, point_in_time1, point_in_time2);
- break;
- case SYSTEM_TIME_BETWEEN:
- cond1= newx Item_func_le(thd, row_start, point_in_time2);
- cond2= newx Item_func_gt(thd, row_end, point_in_time1);
- cond3= newx Item_func_le(thd, point_in_time1, point_in_time2);
- break;
- case SYSTEM_TIME_BEFORE:
- cond1= newx Item_func_history(thd, row_end);
- cond2= newx Item_func_lt(thd, row_end, point_in_time1);
- break;
- default:
- DBUG_ASSERT(0);
- }
- }
- else
+ if (update_conds)
{
- DBUG_ASSERT(table->table->s && table->table->s->db_plugin);
-
- Item *trx_id0, *trx_id1;
-
- switch (vers_conditions.type)
- {
- case SYSTEM_TIME_UNSPECIFIED:
- case SYSTEM_TIME_HISTORY:
- curr= newx Item_int(thd, ULONGLONG_MAX);
- if (vers_conditions.type == SYSTEM_TIME_UNSPECIFIED)
- cond1= newx Item_func_eq(thd, row_end, curr);
- else
- cond1= newx Item_func_lt(thd, row_end, curr);
- break;
- case SYSTEM_TIME_AS_OF:
- trx_id0= vers_conditions.start.unit == VERS_TIMESTAMP
- ? newx Item_func_trt_id(thd, point_in_time1, TR_table::FLD_TRX_ID)
- : point_in_time1;
- cond1= newx Item_func_trt_trx_sees_eq(thd, trx_id0, row_start);
- cond2= newx Item_func_trt_trx_sees(thd, row_end, trx_id0);
- break;
- case SYSTEM_TIME_FROM_TO:
- cond3= newx Item_func_lt(thd, point_in_time1, point_in_time2);
- /* fall through */
- case SYSTEM_TIME_BETWEEN:
- trx_id0= vers_conditions.start.unit == VERS_TIMESTAMP
- ? newx Item_func_trt_id(thd, point_in_time1, TR_table::FLD_TRX_ID, true)
- : point_in_time1;
- trx_id1= vers_conditions.end.unit == VERS_TIMESTAMP
- ? newx Item_func_trt_id(thd, point_in_time2, TR_table::FLD_TRX_ID, false)
- : point_in_time2;
- cond1= vers_conditions.type == SYSTEM_TIME_FROM_TO
- ? newx Item_func_trt_trx_sees(thd, trx_id1, row_start)
- : newx Item_func_trt_trx_sees_eq(thd, trx_id1, row_start);
- cond2= newx Item_func_trt_trx_sees_eq(thd, row_end, trx_id0);
- if (!cond3)
- cond3= newx Item_func_le(thd, point_in_time1, point_in_time2);
- break;
- case SYSTEM_TIME_BEFORE:
- trx_id0= vers_conditions.start.unit == VERS_TIMESTAMP
- ? newx Item_func_trt_id(thd, point_in_time1, TR_table::FLD_TRX_ID, true)
- : point_in_time1;
- cond1= newx Item_func_history(thd, row_end);
- cond2= newx Item_func_trt_trx_sees(thd, trx_id0, row_end);
- break;
- default:
- DBUG_ASSERT(0);
- }
- }
-
- if (cond1)
- {
- cond1= and_items(thd, cond2, cond1);
- cond1= and_items(thd, cond3, cond1);
+ vers_conditions.period = &table->table->s->vers;
+ Item *cond= period_get_condition(thd, table, this, &vers_conditions,
+ timestamps_only);
if (is_select)
- table->on_expr= and_items(thd, table->on_expr, cond1);
+ table->on_expr= and_items(thd, table->on_expr, cond);
else
{
if (join)
{
- where= and_items(thd, join->conds, cond1);
+ where= and_items(thd, join->conds, cond);
join->conds= where;
}
else
- where= and_items(thd, where, cond1);
- table->where= and_items(thd, table->where, cond1);
+ where= and_items(thd, where, cond);
+ table->where= and_items(thd, table->where, cond);
}
- }
- table->vers_conditions.type= SYSTEM_TIME_ALL;
+ table->vers_conditions.set_all();
+ }
} // for (table= tables; ...)
DBUG_RETURN(0);
-#undef newx
}
/*****************************************************************************
@@ -1052,10 +1172,17 @@ JOIN::prepare(TABLE_LIST *tables_init,
proc_param= proc_param_init;
tables_list= tables_init;
select_lex= select_lex_arg;
+ DBUG_PRINT("info", ("select %p (%u) = JOIN %p",
+ select_lex, select_lex->select_number, this));
select_lex->join= this;
join_list= &select_lex->top_join_list;
union_part= unit_arg->is_unit_op();
+ Json_writer_object trace_wrapper(thd);
+ Json_writer_object trace_prepare(thd, "join_preparation");
+ trace_prepare.add_select_number(select_lex->select_number);
+ Json_writer_array trace_steps(thd, "steps");
+
// simple check that we got usable conds
dbug_print_item(conds);
@@ -1097,7 +1224,7 @@ JOIN::prepare(TABLE_LIST *tables_init,
while ((select_el= select_it++))
{
- if (select_el->with_sum_func)
+ if (select_el->with_sum_func())
found_sum_func_elem= true;
if (select_el->with_field)
found_field_elem= true;
@@ -1282,7 +1409,7 @@ JOIN::prepare(TABLE_LIST *tables_init,
item->max_length)))
real_order= TRUE;
- if ((item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM) ||
+ if ((item->with_sum_func() && item->type() != Item::SUM_FUNC_ITEM) ||
item->with_window_func)
item->split_sum_func(thd, ref_ptrs, all_fields, SPLIT_SUM_SELECT);
}
@@ -1290,7 +1417,7 @@ JOIN::prepare(TABLE_LIST *tables_init,
order= NULL;
}
- if (having && having->with_sum_func)
+ if (having && having->with_sum_func())
having->split_sum_func2(thd, ref_ptrs, all_fields,
&having, SPLIT_SUM_SKIP_REGISTERED);
if (select_lex->inner_sum_func_list)
@@ -1400,6 +1527,12 @@ JOIN::prepare(TABLE_LIST *tables_init,
}
}
+ if (thd->trace_started())
+ {
+ Json_writer_object trace_wrapper(thd);
+ opt_trace_print_expanded_query(thd, select_lex, &trace_wrapper);
+ }
+
if (!procedure && result && result->prepare(fields_list, unit_arg))
goto err; /* purecov: inspected */
@@ -1460,6 +1593,7 @@ err:
bool JOIN::build_explain()
{
+ DBUG_ENTER("JOIN::build_explain");
have_query_plan= QEP_AVAILABLE;
/*
@@ -1476,8 +1610,7 @@ bool JOIN::build_explain()
thd->mem_root= old_mem_root;
DBUG_ASSERT(thd->free_list == old_free_list); // no Items were created
if (res)
- return 1;
-
+ DBUG_RETURN(1);
uint select_nr= select_lex->select_number;
JOIN_TAB *curr_tab= join_tab + exec_join_tab_cnt();
for (uint i= 0; i < aggr_tables; i++, curr_tab++)
@@ -1489,13 +1622,14 @@ bool JOIN::build_explain()
curr_tab->tracker= thd->lex->explain->get_union(select_nr)->
get_tmptable_read_tracker();
}
- else
+ else if (select_nr < INT_MAX)
{
- curr_tab->tracker= thd->lex->explain->get_select(select_nr)->
- get_using_temporary_read_tracker();
+ Explain_select *tmp= thd->lex->explain->get_select(select_nr);
+ if (tmp)
+ curr_tab->tracker= tmp->get_using_temporary_read_tracker();
}
}
- return 0;
+ DBUG_RETURN(0);
}
@@ -1504,7 +1638,16 @@ int JOIN::optimize()
int res= 0;
create_explain_query_if_not_exists(thd->lex, thd->mem_root);
join_optimization_state init_state= optimization_state;
- if (optimization_state == JOIN::OPTIMIZATION_PHASE_1_DONE)
+ if (select_lex->pushdown_select)
+ {
+ if (!(select_options & SELECT_DESCRIBE))
+ {
+ /* Prepare to execute the query pushed into a foreign engine */
+ res= select_lex->pushdown_select->init();
+ }
+ with_two_phase_optimization= false;
+ }
+ else if (optimization_state == JOIN::OPTIMIZATION_PHASE_1_DONE)
res= optimize_stage2();
else
{
@@ -1525,6 +1668,139 @@ int JOIN::optimize()
}
+/**
+ @brief
+ Create range filters objects needed in execution for all join tables
+
+ @details
+ For each join table from the chosen execution plan such that a range filter
+ is used when joining this table the function creates a Rowid_filter object
+ for this range filter. In order to do this the function first constructs
+ a quick select to scan the range for this range filter. Then it creates
+ a container for the range filter and finally constructs a Range_rowid_filter
+ object a pointer to which is set in the field JOIN_TAB::rowid_filter of
+ the joined table.
+
+ @retval false always
+*/
+
+bool JOIN::make_range_rowid_filters()
+{
+ DBUG_ENTER("make_range_rowid_filters");
+
+ /*
+ Do not build range filters with detected impossible WHERE.
+ Anyway conditions cannot be used anymore to extract ranges for filters.
+ */
+ if (const_table_map != found_const_table_map)
+ DBUG_RETURN(0);
+
+ JOIN_TAB *tab;
+
+ for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
+ tab;
+ tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS))
+ {
+ if (!tab->range_rowid_filter_info)
+ continue;
+
+ DBUG_ASSERT(!(tab->ref.key >= 0 &&
+ tab->ref.key == (int) tab->range_rowid_filter_info->key_no));
+ DBUG_ASSERT(!(tab->ref.key == -1 && tab->quick &&
+ tab->quick->index == tab->range_rowid_filter_info->key_no));
+
+ int err;
+ SQL_SELECT *sel= NULL;
+ Rowid_filter_container *filter_container= NULL;
+ Item **sargable_cond= get_sargable_cond(this, tab->table);
+ sel= make_select(tab->table, const_table_map, const_table_map,
+ *sargable_cond, (SORT_INFO*) 0, 1, &err);
+ if (!sel)
+ continue;
+
+ key_map filter_map;
+ filter_map.clear_all();
+ filter_map.set_bit(tab->range_rowid_filter_info->key_no);
+ filter_map.merge(tab->table->with_impossible_ranges);
+ bool force_index_save= tab->table->force_index;
+ tab->table->force_index= true;
+ int rc= sel->test_quick_select(thd, filter_map, (table_map) 0,
+ (ha_rows) HA_POS_ERROR,
+ true, false, true, true);
+ tab->table->force_index= force_index_save;
+ if (thd->is_error())
+ goto no_filter;
+ /*
+ If SUBS_IN_TO_EXISTS strtrategy is chosen for the subquery then
+ additional conditions are injected into WHERE/ON/HAVING and it may
+ happen that the call of test_quick_select() discovers impossible range.
+ */
+ if (rc == -1)
+ {
+ const_table_map|= tab->table->map;
+ goto no_filter;
+ }
+ DBUG_ASSERT(sel->quick);
+ filter_container=
+ tab->range_rowid_filter_info->create_container();
+ if (filter_container)
+ {
+ tab->rowid_filter=
+ new (thd->mem_root) Range_rowid_filter(tab->table,
+ tab->range_rowid_filter_info,
+ filter_container, sel);
+ if (tab->rowid_filter)
+ continue;
+ }
+ no_filter:
+ if (sel->quick)
+ delete sel->quick;
+ delete sel;
+ }
+
+ DBUG_RETURN(0);
+}
+
+
+/**
+ @brief
+ Allocate memory the rowid containers of the used the range filters
+
+ @details
+ For each join table from the chosen execution plan such that a range filter
+ is used when joining this table the function allocate memory for the
+ rowid container employed by the filter. On success it lets the table engine
+ know that what rowid filter will be used when accessing the table rows.
+
+ @retval false always
+*/
+
+bool
+JOIN::init_range_rowid_filters()
+{
+ DBUG_ENTER("init_range_rowid_filters");
+
+ JOIN_TAB *tab;
+
+ for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
+ tab;
+ tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS))
+ {
+ if (!tab->rowid_filter)
+ continue;
+ if (tab->rowid_filter->get_container()->alloc())
+ {
+ delete tab->rowid_filter;
+ tab->rowid_filter= 0;
+ continue;
+ }
+ tab->table->file->rowid_filter_push(tab->rowid_filter);
+ tab->is_rowid_filter_built= false;
+ }
+ DBUG_RETURN(0);
+}
+
+
int JOIN::init_join_caches()
{
JOIN_TAB *tab;
@@ -1581,6 +1857,11 @@ JOIN::optimize_inner()
set_allowed_join_cache_types();
need_distinct= TRUE;
+ Json_writer_object trace_wrapper(thd);
+ Json_writer_object trace_prepare(thd, "join_optimization");
+ trace_prepare.add_select_number(select_lex->select_number);
+ Json_writer_array trace_steps(thd, "steps");
+
/*
Needed in case optimizer short-cuts,
set properly in make_aggr_tables_info()
@@ -1664,7 +1945,7 @@ JOIN::optimize_inner()
{
/*
Item_cond_and can't be fixed after creation, so we do not check
- conds->fixed
+ conds->is_fixed()
*/
conds->fix_fields(thd, &conds);
conds->change_ref_to_fields(thd, tables_list);
@@ -1710,7 +1991,7 @@ JOIN::optimize_inner()
if (arena)
thd->restore_active_arena(arena, &backup);
}
-
+
if (optimize_constant_subqueries())
DBUG_RETURN(1);
@@ -1721,9 +2002,28 @@ JOIN::optimize_inner()
(void) having->walk(&Item::cleanup_is_expensive_cache_processor,
0, (void *) 0);
- if (setup_jtbm_semi_joins(this, join_list, &conds))
+ List<Item> eq_list;
+
+ if (setup_degenerate_jtbm_semi_joins(this, join_list, eq_list))
DBUG_RETURN(1);
+ if (eq_list.elements != 0)
+ {
+ Item *new_cond;
+
+ if (eq_list.elements == 1)
+ new_cond= eq_list.pop();
+ else
+ new_cond= new (thd->mem_root) Item_cond_and(thd, eq_list);
+
+ if (new_cond &&
+ ((new_cond->fix_fields(thd, &new_cond) ||
+ !(conds= and_items(thd, conds, new_cond)) ||
+ conds->fix_fields(thd, &conds))))
+ DBUG_RETURN(TRUE);
+ }
+ eq_list.empty();
+
if (select_lex->cond_pushed_into_where)
{
conds= and_conds(thd, conds, select_lex->cond_pushed_into_where);
@@ -1743,7 +2043,7 @@ JOIN::optimize_inner()
select_lex->having_fix_field_for_pushed_cond= 0;
}
}
-
+
bool ignore_on_expr= false;
/*
PS/SP note: on_expr of versioned table can not be reallocated
@@ -1759,7 +2059,7 @@ JOIN::optimize_inner()
}
conds= optimize_cond(this, conds, join_list, ignore_on_expr,
&cond_value, &cond_equal, OPT_LINK_EQUAL_FIELDS);
-
+
if (thd->is_error())
{
error= 1;
@@ -1767,6 +2067,58 @@ JOIN::optimize_inner()
DBUG_RETURN(1);
}
+ having= optimize_cond(this, having, join_list, TRUE,
+ &having_value, &having_equal);
+
+ if (thd->is_error())
+ {
+ error= 1;
+ DBUG_PRINT("error",("Error from optimize_cond"));
+ DBUG_RETURN(1);
+ }
+
+ /* Do not push into WHERE from HAVING if cond_value == Item::COND_FALSE */
+
+ if (thd->lex->sql_command == SQLCOM_SELECT &&
+ optimizer_flag(thd, OPTIMIZER_SWITCH_COND_PUSHDOWN_FROM_HAVING) &&
+ cond_value != Item::COND_FALSE)
+ {
+ having=
+ select_lex->pushdown_from_having_into_where(thd, having);
+ if (select_lex->attach_to_conds.elements != 0)
+ {
+ conds= and_new_conditions_to_optimized_cond(thd, conds, &cond_equal,
+ select_lex->attach_to_conds,
+ &cond_value);
+ sel->attach_to_conds.empty();
+ }
+ }
+
+ if (optimizer_flag(thd, OPTIMIZER_SWITCH_COND_PUSHDOWN_FOR_SUBQUERY))
+ {
+ TABLE_LIST *tbl;
+ List_iterator_fast<TABLE_LIST> li(select_lex->leaf_tables);
+ while ((tbl= li++))
+ if (tbl->jtbm_subselect)
+ {
+ if (tbl->jtbm_subselect->pushdown_cond_for_in_subquery(thd, conds))
+ DBUG_RETURN(1);
+ }
+ }
+
+ if (setup_jtbm_semi_joins(this, join_list, eq_list))
+ DBUG_RETURN(1);
+
+ if (eq_list.elements != 0)
+ {
+ conds= and_new_conditions_to_optimized_cond(thd, conds, &cond_equal,
+ eq_list, &cond_value);
+
+ if (!conds &&
+ cond_value != Item::COND_FALSE && cond_value != Item::COND_TRUE)
+ DBUG_RETURN(TRUE);
+ }
+
if (optimizer_flag(thd, OPTIMIZER_SWITCH_COND_PUSHDOWN_FOR_DERIVED))
{
TABLE_LIST *tbl;
@@ -1804,32 +2156,22 @@ JOIN::optimize_inner()
if (select_lex->handle_derived(thd->lex, DT_OPTIMIZE))
DBUG_RETURN(1);
}
-
{
- having= optimize_cond(this, having, join_list, TRUE,
- &having_value, &having_equal);
-
- if (unlikely(thd->is_error()))
- {
- error= 1;
- DBUG_PRINT("error",("Error from optimize_cond"));
- DBUG_RETURN(1);
- }
if (select_lex->where)
{
select_lex->cond_value= cond_value;
if (sel->where != conds && cond_value == Item::COND_OK)
thd->change_item_tree(&sel->where, conds);
- }
+ }
if (select_lex->having)
{
select_lex->having_value= having_value;
if (sel->having != having && having_value == Item::COND_OK)
- thd->change_item_tree(&sel->having, having);
+ thd->change_item_tree(&sel->having, having);
}
- if (cond_value == Item::COND_FALSE || having_value == Item::COND_FALSE ||
+ if (cond_value == Item::COND_FALSE || having_value == Item::COND_FALSE ||
(!unit->select_limit_cnt && !(select_options & OPTION_FOUND_ROWS)))
- { /* Impossible cond */
+ { /* Impossible cond */
if (unit->select_limit_cnt)
{
DBUG_PRINT("info", (having_value == Item::COND_FALSE ?
@@ -2019,6 +2361,9 @@ int JOIN::optimize_stage2()
/* Generate an execution plan from the found optimal join order. */
if (get_best_combination())
DBUG_RETURN(1);
+
+ if (make_range_rowid_filters())
+ DBUG_RETURN(1);
if (select_lex->handle_derived(thd->lex, DT_OPTIMIZE))
DBUG_RETURN(1);
@@ -2077,7 +2422,7 @@ int JOIN::optimize_stage2()
if (!conds && outer_join)
{
/* Handle the case where we have an OUTER JOIN without a WHERE */
- conds= new (thd->mem_root) Item_int(thd, (longlong) 1,1); // Always true
+ conds= new (thd->mem_root) Item_bool(thd, true); // Always true
}
if (impossible_where)
@@ -2113,7 +2458,7 @@ int JOIN::optimize_stage2()
if (conds)
{
conds= substitute_for_best_equal_field(thd, NO_PARTICULAR_TAB, conds,
- cond_equal, map2table);
+ cond_equal, map2table, true);
if (unlikely(thd->is_error()))
{
error= 1;
@@ -2126,6 +2471,23 @@ int JOIN::optimize_stage2()
"after substitute_best_equal",
QT_ORDINARY););
}
+ if (having)
+ {
+ having= substitute_for_best_equal_field(thd, NO_PARTICULAR_TAB, having,
+ having_equal, map2table, false);
+ if (thd->is_error())
+ {
+ error= 1;
+ DBUG_PRINT("error",("Error from substitute_for_best_equal"));
+ DBUG_RETURN(1);
+ }
+ if (having)
+ having->update_used_tables();
+ DBUG_EXECUTE("having",
+ print_where(having,
+ "after substitute_best_equal",
+ QT_ORDINARY););
+ }
/*
Perform the optimization on fields evaluation mentioned above
@@ -2139,7 +2501,7 @@ int JOIN::optimize_stage2()
*tab->on_expr_ref= substitute_for_best_equal_field(thd, NO_PARTICULAR_TAB,
*tab->on_expr_ref,
tab->cond_equal,
- map2table);
+ map2table, true);
if (unlikely(thd->is_error()))
{
error= 1;
@@ -2169,7 +2531,7 @@ int JOIN::optimize_stage2()
while (equals)
{
ref_item= substitute_for_best_equal_field(thd, tab, ref_item,
- equals, map2table);
+ equals, map2table, true);
if (unlikely(thd->is_fatal_error))
DBUG_RETURN(1);
@@ -2217,7 +2579,7 @@ int JOIN::optimize_stage2()
if (conds && const_table_map != found_const_table_map &&
(select_options & SELECT_DESCRIBE))
{
- conds=new (thd->mem_root) Item_int(thd, (longlong) 0, 1); // Always false
+ conds=new (thd->mem_root) Item_bool(thd, false); // Always false
}
/* Cache constant expressions in WHERE, HAVING, ON clauses. */
@@ -2510,13 +2872,13 @@ int JOIN::optimize_stage2()
elements may be lost during further having
condition transformation in JOIN::exec.
*/
- if (having && const_table_map && !having->with_sum_func)
+ if (having && const_table_map && !having->with_sum_func())
{
having->update_used_tables();
having= having->remove_eq_conds(thd, &select_lex->having_value, true);
if (select_lex->having_value == Item::COND_FALSE)
{
- having= new (thd->mem_root) Item_int(thd, (longlong) 0,1);
+ having= new (thd->mem_root) Item_bool(thd, false);
zero_result_cause= "Impossible HAVING noticed after reading const tables";
error= 0;
select_lex->mark_const_derived(zero_result_cause);
@@ -2554,7 +2916,7 @@ int JOIN::optimize_stage2()
{
JOIN_TAB *tab= &join_tab[const_tables];
- if (order)
+ if (order && !need_tmp)
{
/*
Force using of tmp table if sorting by a SP or UDF function due to
@@ -2696,6 +3058,9 @@ int JOIN::optimize_stage2()
if (init_join_caches())
DBUG_RETURN(1);
+ if (init_range_rowid_filters())
+ DBUG_RETURN(1);
+
error= 0;
if (select_options & SELECT_DESCRIBE)
@@ -3259,7 +3624,7 @@ bool JOIN::make_aggr_tables_info()
or end_write_group()) if JOIN::group is set to false.
*/
// the temporary table was explicitly requested
- DBUG_ASSERT(MY_TEST(select_options & OPTION_BUFFER_RESULT));
+ DBUG_ASSERT(select_options & OPTION_BUFFER_RESULT);
// the temporary table does not have a grouping expression
DBUG_ASSERT(!curr_tab->table->group);
}
@@ -3863,6 +4228,15 @@ bool JOIN::save_explain_data(Explain_query *output, bool can_overwrite,
bool need_tmp_table, bool need_order,
bool distinct)
{
+ DBUG_ENTER("JOIN::save_explain_data");
+ DBUG_PRINT("enter", ("Save explain Select_lex: %u (%p) parent lex: %p stmt_lex: %p present select: %u (%p)",
+ select_lex->select_number, select_lex,
+ select_lex->parent_lex, thd->lex->stmt_lex,
+ (output->get_select(select_lex->select_number) ?
+ select_lex->select_number : 0),
+ (output->get_select(select_lex->select_number) ?
+ output->get_select(select_lex->select_number)
+ ->select_lex : NULL)));
/*
If there is SELECT in this statement with the same number it must be the
same SELECT
@@ -3889,8 +4263,9 @@ bool JOIN::save_explain_data(Explain_query *output, bool can_overwrite,
/* It's a degenerate join */
message= zero_result_cause ? zero_result_cause : "No tables used";
}
- return save_explain_data_intern(thd->lex->explain, need_tmp_table, need_order,
- distinct, message);
+ bool rc= save_explain_data_intern(thd->lex->explain, need_tmp_table,
+ need_order, distinct, message);
+ DBUG_RETURN(rc);
}
/*
@@ -3912,11 +4287,11 @@ bool JOIN::save_explain_data(Explain_query *output, bool can_overwrite,
{
if (!(join_tab[i].filesort->tracker=
new Filesort_tracker(thd->lex->analyze_stmt)))
- return 1;
+ DBUG_RETURN(1);
}
}
}
- return 0;
+ DBUG_RETURN(0);
}
@@ -3958,6 +4333,12 @@ void JOIN::exec_inner()
limit in order to produce the partial query result stored in the
UNION temp table.
*/
+
+ Json_writer_object trace_wrapper(thd);
+ Json_writer_object trace_exec(thd, "join_execution");
+ trace_exec.add_select_number(select_lex->select_number);
+ Json_writer_array trace_steps(thd, "steps");
+
if (!select_lex->outer_select() && // (1)
select_lex != select_lex->master_unit()->fake_select_lex) // (2)
thd->lex->set_limit_rows_examined();
@@ -3983,7 +4364,6 @@ void JOIN::exec_inner()
if (select_options & SELECT_DESCRIBE)
select_describe(this, FALSE, FALSE, FALSE,
(zero_result_cause?zero_result_cause:"No tables used"));
-
else
{
if (result->send_result_set_metadata(*columns_list,
@@ -4082,7 +4462,8 @@ void JOIN::exec_inner()
not the case.
*/
if (exec_const_order_group_cond.elements &&
- !(select_options & SELECT_DESCRIBE))
+ !(select_options & SELECT_DESCRIBE) &&
+ !select_lex->pushdown_select)
{
List_iterator_fast<Item> const_item_it(exec_const_order_group_cond);
Item *cur_const_item;
@@ -4109,6 +4490,12 @@ void JOIN::exec_inner()
!table_count ? "No tables used" : NullS);
DBUG_VOID_RETURN;
}
+ else if (select_lex->pushdown_select)
+ {
+ /* Execute the query pushed into a foreign engine */
+ error= select_lex->pushdown_select->execute();
+ DBUG_VOID_RETURN;
+ }
else
{
/* it's a const select, materialize it. */
@@ -4156,6 +4543,9 @@ int
JOIN::destroy()
{
DBUG_ENTER("JOIN::destroy");
+
+ DBUG_PRINT("info", ("select %p (%u) <> JOIN %p",
+ select_lex, select_lex->select_number, this));
select_lex->join= 0;
cond_equal= 0;
@@ -4273,10 +4663,10 @@ mysql_select(THD *thd,
is it single SELECT in derived table, called in derived table
creation
*/
- if (select_lex->linkage != DERIVED_TABLE_TYPE ||
+ if (select_lex->get_linkage() != DERIVED_TABLE_TYPE ||
(select_options & SELECT_DESCRIBE))
{
- if (select_lex->linkage != GLOBAL_OPTIONS_TYPE)
+ if (select_lex->get_linkage() != GLOBAL_OPTIONS_TYPE)
{
/*
Original join tabs might be overwritten at first
@@ -4323,6 +4713,21 @@ mysql_select(THD *thd,
}
}
+ /* Look for a table owned by an engine with the select_handler interface */
+ select_lex->select_h= select_lex->find_select_handler(thd);
+ if (select_lex->select_h)
+ {
+ /* Create a Pushdown_select object for later execution of the query */
+ if (!(select_lex->pushdown_select=
+ new (thd->mem_root) Pushdown_select(select_lex,
+ select_lex->select_h)))
+ {
+ delete select_lex->select_h;
+ select_lex->select_h= NULL;
+ DBUG_RETURN(TRUE);
+ }
+ }
+
if ((err= join->optimize()))
{
goto err; // 1
@@ -4346,6 +4751,13 @@ mysql_select(THD *thd,
}
err:
+
+ if (select_lex->pushdown_select)
+ {
+ delete select_lex->pushdown_select;
+ select_lex->pushdown_select= NULL;
+ }
+
if (free_join)
{
THD_STAGE_INFO(thd, stage_end);
@@ -4378,7 +4790,8 @@ static ha_rows get_quick_record_count(THD *thd, SQL_SELECT *select,
select->test_quick_select(thd, *(key_map *)keys,
(table_map) 0,
limit, 0, FALSE,
- TRUE /* remove_where_parts*/)) ==
+ TRUE, /* remove_where_parts*/
+ FALSE)) ==
1))
DBUG_RETURN(select->quick->records);
if (unlikely(error == -1))
@@ -4511,6 +4924,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
SARGABLE_PARAM *sargables= 0;
List_iterator<TABLE_LIST> ti(tables_list);
TABLE_LIST *tables;
+ THD *thd= join->thd;
DBUG_ENTER("make_join_statistics");
table_count=join->table_count;
@@ -4533,6 +4947,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
/* The following should be optimized to only clear critical things */
bzero((void*)stat, sizeof(JOIN_TAB)* table_count);
+
/* Initialize POSITION objects */
for (i=0 ; i <= table_count ; i++)
(void) new ((char*) (join->positions + i)) POSITION;
@@ -4547,10 +4962,6 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
{
TABLE_LIST *embedding= tables->embedding;
stat_vector[i]=s;
- s->keys.init();
- s->const_keys.init();
- s->checked_keys.init();
- s->needed_reg.init();
table_vector[i]=s->table=table=tables->table;
s->tab_list= tables;
table->pos_in_table_list= tables;
@@ -4706,9 +5117,12 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
}
}
+ if (thd->trace_started())
+ trace_table_dependencies(thd, stat, join->table_count);
+
if (join->conds || outer_join)
{
- if (update_ref_and_keys(join->thd, keyuse_array, stat, join->table_count,
+ if (update_ref_and_keys(thd, keyuse_array, stat, join->table_count,
join->conds, ~outer_join, join->select_lex, &sargables))
goto error;
/*
@@ -4720,10 +5134,12 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
((Item_in_subselect*)join->unit->item)->test_strategy(SUBS_IN_TO_EXISTS));
if (keyuse_array->elements &&
- sort_and_filter_keyuse(join->thd, keyuse_array,
+ sort_and_filter_keyuse(thd, keyuse_array,
skip_unprefixed_keyparts))
goto error;
DBUG_EXECUTE("opt", print_keyuse_array(keyuse_array););
+ if (thd->trace_started())
+ print_keyuse_array_for_trace(thd, keyuse_array);
}
join->const_table_map= no_rows_const_tables;
@@ -4971,7 +5387,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
if (join->cond_value == Item::COND_FALSE)
{
join->impossible_where= true;
- conds= new (join->thd->mem_root) Item_int(join->thd, (longlong) 0, 1);
+ conds= new (join->thd->mem_root) Item_bool(join->thd, false);
}
join->cond_equal= NULL;
@@ -5008,143 +5424,170 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
/* Calc how many (possible) matched records in each table */
- for (s=stat ; s < stat_end ; s++)
+ /*
+ Todo: add a function so that we can add these Json_writer_objects
+ easily.
+ Another way would be to enclose them in a scope {};
+ */
{
- s->startup_cost= 0;
- if (s->type == JT_SYSTEM || s->type == JT_CONST)
- {
- /* Only one matching row */
- s->found_records= s->records= 1;
- s->read_time=1.0;
- s->worst_seeks=1.0;
- continue;
- }
- /* Approximate found rows and time to read them */
- if (s->table->is_filled_at_execution())
- {
- get_delayed_table_estimates(s->table, &s->records, &s->read_time,
- &s->startup_cost);
- s->found_records= s->records;
- table->quick_condition_rows=s->records;
- }
- else
+ Json_writer_object rows_estimation_wrapper(thd);
+ Json_writer_array rows_estimation(thd, "rows_estimation");
+
+ for (s=stat ; s < stat_end ; s++)
{
- s->scan_time();
- }
+ s->startup_cost= 0;
+ if (s->type == JT_SYSTEM || s->type == JT_CONST)
+ {
- if (s->table->is_splittable())
- s->add_keyuses_for_splitting();
+ Json_writer_object table_records(thd);
+ /* Only one matching row */
+ s->found_records= s->records= 1;
+ s->read_time=1.0;
+ s->worst_seeks=1.0;
+ table_records.add_table_name(s)
+ .add("rows", s->found_records)
+ .add("cost", s->read_time)
+ .add("table_type", s->type == JT_CONST ?
+ "const" :
+ "system");
+ continue;
+ }
+ /* Approximate found rows and time to read them */
+ if (s->table->is_filled_at_execution())
+ {
+ get_delayed_table_estimates(s->table, &s->records, &s->read_time,
+ &s->startup_cost);
+ s->found_records= s->records;
+ table->quick_condition_rows=s->records;
+ }
+ else
+ s->scan_time();
- /*
- Set a max range of how many seeks we can expect when using keys
- This is can't be to high as otherwise we are likely to use
- table scan.
- */
- s->worst_seeks= MY_MIN((double) s->found_records / 10,
- (double) s->read_time*3);
- if (s->worst_seeks < 2.0) // Fix for small tables
- s->worst_seeks=2.0;
+ if (s->table->is_splittable())
+ s->add_keyuses_for_splitting();
- /*
- Add to stat->const_keys those indexes for which all group fields or
- all select distinct fields participate in one index.
- */
- add_group_and_distinct_keys(join, s);
+ /*
+ Set a max range of how many seeks we can expect when using keys
+ This is can't be to high as otherwise we are likely to use
+ table scan.
+ */
+ s->worst_seeks= MY_MIN((double) s->found_records / 10,
+ (double) s->read_time*3);
+ if (s->worst_seeks < 2.0) // Fix for small tables
+ s->worst_seeks=2.0;
- s->table->cond_selectivity= 1.0;
-
- /*
- Perform range analysis if there are keys it could use (1).
- Don't do range analysis for materialized subqueries (2).
- Don't do range analysis for materialized derived tables (3)
- */
- if ((!s->const_keys.is_clear_all() ||
- !bitmap_is_clear_all(&s->table->cond_set)) && // (1)
- !s->table->is_filled_at_execution() && // (2)
- !(s->table->pos_in_table_list->derived && // (3)
- s->table->pos_in_table_list->is_materialized_derived())) // (3)
- {
- bool impossible_range= FALSE;
- ha_rows records= HA_POS_ERROR;
- SQL_SELECT *select= 0;
- Item **sargable_cond= NULL;
- if (!s->const_keys.is_clear_all())
- {
- sargable_cond= get_sargable_cond(join, s->table);
-
- select= make_select(s->table, found_const_table_map,
- found_const_table_map,
- *sargable_cond,
- (SORT_INFO*) 0,
- 1, &error);
- if (!select)
- goto error;
- records= get_quick_record_count(join->thd, select, s->table,
- &s->const_keys, join->row_limit);
+ /*
+ Add to stat->const_keys those indexes for which all group fields or
+ all select distinct fields participate in one index.
+ */
+ add_group_and_distinct_keys(join, s);
- /*
- Range analyzer might have modified the condition. Put it the new
- condition to where we got it from.
- */
- *sargable_cond= select->cond;
+ s->table->cond_selectivity= 1.0;
- s->quick=select->quick;
- s->needed_reg=select->needed_reg;
- select->quick=0;
- impossible_range= records == 0 && s->table->reginfo.impossible_range;
- }
- if (!impossible_range)
- {
- if (!sargable_cond)
+ /*
+ Perform range analysis if there are keys it could use (1).
+ Don't do range analysis for materialized subqueries (2).
+ Don't do range analysis for materialized derived tables (3)
+ */
+ if ((!s->const_keys.is_clear_all() ||
+ !bitmap_is_clear_all(&s->table->cond_set)) && // (1)
+ !s->table->is_filled_at_execution() && // (2)
+ !(s->table->pos_in_table_list->derived && // (3)
+ s->table->pos_in_table_list->is_materialized_derived())) // (3)
+ {
+ bool impossible_range= FALSE;
+ ha_rows records= HA_POS_ERROR;
+ SQL_SELECT *select= 0;
+ Item **sargable_cond= NULL;
+ if (!s->const_keys.is_clear_all())
+ {
sargable_cond= get_sargable_cond(join, s->table);
- if (join->thd->variables.optimizer_use_condition_selectivity > 1)
- calculate_cond_selectivity_for_table(join->thd, s->table,
- sargable_cond);
- if (s->table->reginfo.impossible_range)
- {
- impossible_range= TRUE;
- records= 0;
+
+ select= make_select(s->table, found_const_table_map,
+ found_const_table_map,
+ *sargable_cond,
+ (SORT_INFO*) 0, 1, &error);
+ if (!select)
+ goto error;
+ records= get_quick_record_count(join->thd, select, s->table,
+ &s->const_keys, join->row_limit);
+
+ /*
+ Range analyzer might have modified the condition. Put it the new
+ condition to where we got it from.
+ */
+ *sargable_cond= select->cond;
+
+ s->quick=select->quick;
+ s->needed_reg=select->needed_reg;
+ select->quick=0;
+ impossible_range= records == 0 && s->table->reginfo.impossible_range;
+ if (join->thd->lex->sql_command == SQLCOM_SELECT &&
+ optimizer_flag(join->thd, OPTIMIZER_SWITCH_USE_ROWID_FILTER))
+ s->table->init_cost_info_for_usable_range_rowid_filters(join->thd);
}
- }
- if (impossible_range)
- {
- /*
- Impossible WHERE or ON expression
- In case of ON, we mark that the we match one empty NULL row.
- In case of WHERE, don't set found_const_table_map to get the
- caller to abort with a zero row result.
- */
- TABLE_LIST *emb= s->table->pos_in_table_list->embedding;
- if (emb && !emb->sj_on_expr)
+ if (!impossible_range)
{
- /* Mark all tables in a multi-table join nest as const */
- mark_join_nest_as_const(join, emb, &found_const_table_map,
- &const_count);
+ if (!sargable_cond)
+ sargable_cond= get_sargable_cond(join, s->table);
+ if (join->thd->variables.optimizer_use_condition_selectivity > 1)
+ calculate_cond_selectivity_for_table(join->thd, s->table,
+ sargable_cond);
+ if (s->table->reginfo.impossible_range)
+ {
+ impossible_range= TRUE;
+ records= 0;
+ }
}
- else
+ if (impossible_range)
{
- join->const_table_map|= s->table->map;
- set_position(join,const_count++,s,(KEYUSE*) 0);
- s->type= JT_CONST;
- s->table->const_table= 1;
- if (*s->on_expr_ref)
+ /*
+ Impossible WHERE or ON expression
+ In case of ON, we mark that the we match one empty NULL row.
+ In case of WHERE, don't set found_const_table_map to get the
+ caller to abort with a zero row result.
+ */
+ TABLE_LIST *emb= s->table->pos_in_table_list->embedding;
+ if (emb && !emb->sj_on_expr)
{
- /* Generate empty row */
- s->info= ET_IMPOSSIBLE_ON_CONDITION;
- found_const_table_map|= s->table->map;
- mark_as_null_row(s->table); // All fields are NULL
+ /* Mark all tables in a multi-table join nest as const */
+ mark_join_nest_as_const(join, emb, &found_const_table_map,
+ &const_count);
}
+ else
+ {
+ join->const_table_map|= s->table->map;
+ set_position(join,const_count++,s,(KEYUSE*) 0);
+ s->type= JT_CONST;
+ s->table->const_table= 1;
+ if (*s->on_expr_ref)
+ {
+ /* Generate empty row */
+ s->info= ET_IMPOSSIBLE_ON_CONDITION;
+ found_const_table_map|= s->table->map;
+ mark_as_null_row(s->table); // All fields are NULL
+ }
+ }
+ }
+ if (records != HA_POS_ERROR)
+ {
+ s->found_records=records;
+ s->read_time= s->quick ? s->quick->read_time : 0.0;
+ }
+ if (select)
+ delete select;
+ else
+ {
+ if (thd->trace_started())
+ add_table_scan_values_to_trace(thd, s);
}
}
- if (records != HA_POS_ERROR)
+ else
{
- s->found_records=records;
- s->read_time= s->quick ? s->quick->read_time : 0.0;
+ if (thd->trace_started())
+ add_table_scan_values_to_trace(thd, s);
}
- if (select)
- delete select;
}
-
}
if (pull_out_semijoin_tables(join))
@@ -6788,6 +7231,7 @@ void set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key)
// join->positions[idx].loosescan_key= MAX_KEY; /* Not a LooseScan */
join->positions[idx].sj_strategy= SJ_OPT_NONE;
join->positions[idx].use_join_buffer= FALSE;
+ join->positions[idx].range_rowid_filter_info= 0;
/* Move the const table as down as possible in best_ref */
JOIN_TAB **pos=join->best_ref+idx+1;
@@ -6908,17 +7352,24 @@ best_access_path(JOIN *join,
double best_time= DBL_MAX;
double records= DBL_MAX;
table_map best_ref_depends_map= 0;
+ Range_rowid_filter_cost_info *best_filter= 0;
double tmp;
ha_rows rec;
bool best_uses_jbuf= FALSE;
MY_BITMAP *eq_join_set= &s->table->eq_join_set;
KEYUSE *hj_start_key= 0;
SplM_plan_info *spl_plan= 0;
+ Range_rowid_filter_cost_info *filter= 0;
+ const char* cause= NULL;
+ enum join_type best_type= JT_UNKNOWN, type= JT_UNKNOWN;
disable_jbuf= disable_jbuf || idx == join->const_tables;
Loose_scan_opt loose_scan_opt;
DBUG_ENTER("best_access_path");
+
+ Json_writer_object trace_wrapper(thd, "best_access_path");
+ Json_writer_array trace_paths(thd, "considered_access_paths");
bitmap_clear_all(eq_join_set);
@@ -6946,6 +7397,7 @@ best_access_path(JOIN *join,
key_part_map notnull_part=0; // key parts which won't have NULL in lookup tuple.
table_map found_ref= 0;
uint key= keyuse->key;
+ filter= 0;
bool ft_key= (keyuse->keypart == FT_KEYPART);
/* Bitmap of keyparts where the ref access is over 'keypart=const': */
key_part_map const_part= 0;
@@ -7037,8 +7489,9 @@ best_access_path(JOIN *join,
if (rec < MATCHING_ROWS_IN_OTHER_TABLE)
rec= MATCHING_ROWS_IN_OTHER_TABLE; // Fix for small tables
+ Json_writer_object trace_access_idx(thd);
/*
- ft-keys require special treatment
+ full text keys require special treatment
*/
if (ft_key)
{
@@ -7048,6 +7501,9 @@ best_access_path(JOIN *join,
*/
tmp= prev_record_reads(join_positions, idx, found_ref);
records= 1.0;
+ type= JT_FT;
+ trace_access_idx.add("access_type", join_type_str[type])
+ .add("full-text index", keyinfo->name);
}
else
{
@@ -7069,11 +7525,17 @@ best_access_path(JOIN *join,
(!(key_flags & HA_NULL_PART_KEY) || // (2)
all_key_parts == notnull_part)) // (3)
{
+ type= JT_EQ_REF;
+ trace_access_idx.add("access_type", join_type_str[type])
+ .add("index", keyinfo->name);
tmp = prev_record_reads(join_positions, idx, found_ref);
records=1.0;
}
else
{
+ type= JT_REF;
+ trace_access_idx.add("access_type", join_type_str[type])
+ .add("index", keyinfo->name);
if (!found_ref)
{ /* We found a const key */
/*
@@ -7094,11 +7556,16 @@ best_access_path(JOIN *join,
empty interval we wouldn't have got here).
*/
if (table->quick_keys.is_set(key))
+ {
records= (double) table->quick_rows[key];
+ trace_access_idx.add("used_range_estimates", true);
+ }
else
{
/* quick_range couldn't use key! */
records= (double) s->records/rec;
+ trace_access_idx.add("used_range_estimates", false)
+ .add("cause", "not available");
}
}
else
@@ -7130,7 +7597,23 @@ best_access_path(JOIN *join,
table->quick_n_ranges[key] == 1 &&
records > (double) table->quick_rows[key])
{
+
records= (double) table->quick_rows[key];
+ trace_access_idx.add("used_range_estimates", true);
+ }
+ else
+ {
+ if (table->quick_keys.is_set(key))
+ {
+ trace_access_idx.add("used_range_estimates",false)
+ .add("cause",
+ "not better than ref estimates");
+ }
+ else
+ {
+ trace_access_idx.add("used_range_estimates", false)
+ .add("cause", "not available");
+ }
}
}
/* Limit the number of matched rows */
@@ -7146,6 +7629,9 @@ best_access_path(JOIN *join,
}
else
{
+ type = ref_or_null_part ? JT_REF_OR_NULL : JT_REF;
+ trace_access_idx.add("access_type", join_type_str[type])
+ .add("index", keyinfo->name);
/*
Use as much key-parts as possible and a uniq key is better
than a not unique key
@@ -7200,6 +7686,7 @@ best_access_path(JOIN *join,
table->quick_n_ranges[key] == 1 + MY_TEST(ref_or_null_part)) //(C3)
{
tmp= records= (double) table->quick_rows[key];
+ trace_access_idx.add("used_range_estimates", true);
}
else
{
@@ -7225,7 +7712,19 @@ best_access_path(JOIN *join,
if (!found_ref && table->quick_keys.is_set(key) && // (1)
table->quick_key_parts[key] > max_key_part && // (2)
records < (double)table->quick_rows[key]) // (3)
+ {
+ trace_access_idx.add("used_range_estimates", true);
records= (double)table->quick_rows[key];
+ }
+ else
+ {
+ if (table->quick_keys.is_set(key) &&
+ table->quick_key_parts[key] < max_key_part)
+ {
+ trace_access_idx.add("chosen", false);
+ cause= "range uses more keyparts";
+ }
+ }
tmp= records;
}
@@ -7309,7 +7808,11 @@ best_access_path(JOIN *join,
tmp= COST_MULT(tmp, record_count);
}
else
- tmp= best_time; // Do nothing
+ {
+ if (!(found_part & 1))
+ cause= "no predicate for first keypart";
+ tmp= best_time; // Do nothing
+ }
}
tmp= COST_ADD(tmp, s->startup_cost);
@@ -7317,15 +7820,43 @@ best_access_path(JOIN *join,
found_ref);
} /* not ft_key */
+ if (records < DBL_MAX &&
+ (found_part & 1)) // start_key->key can be used for index access
+ {
+ double rows= record_count * records;
+ double access_cost_factor= MY_MIN(tmp / rows, 1.0);
+ filter=
+ table->best_range_rowid_filter_for_partial_join(start_key->key, rows,
+ access_cost_factor);
+ if (filter)
+ {
+ filter->get_cmp_gain(rows);
+ tmp-= filter->get_adjusted_gain(rows) - filter->get_cmp_gain(rows);
+ DBUG_ASSERT(tmp >= 0);
+ trace_access_idx.add("rowid_filter_key",
+ s->table->key_info[filter->key_no].name);
+ }
+ }
+ trace_access_idx.add("rows", records).add("cost", tmp);
+
if (tmp + 0.0001 < best_time - records/(double) TIME_FOR_COMPARE)
{
+ trace_access_idx.add("chosen", true);
best_time= COST_ADD(tmp, records/(double) TIME_FOR_COMPARE);
best= tmp;
best_records= records;
best_key= start_key;
best_max_key_part= max_key_part;
best_ref_depends_map= found_ref;
+ best_filter= filter;
+ best_type= type;
+ }
+ else
+ {
+ trace_access_idx.add("chosen", false)
+ .add("cause", cause ? cause : "cost");
}
+ cause= NULL;
} /* for each key */
records= best_records;
}
@@ -7347,6 +7878,7 @@ best_access_path(JOIN *join,
(!(s->table->map & join->outer_join) ||
join->allowed_outer_join_with_cache)) // (2)
{
+ Json_writer_object trace_access_hash(thd);
double join_sel= 0.1;
/* Estimate the cost of the hash join access to the table */
double rnd_records= matching_candidates_in_table(s, found_constraint,
@@ -7370,7 +7902,14 @@ best_access_path(JOIN *join,
best_key= hj_start_key;
best_ref_depends_map= 0;
best_uses_jbuf= TRUE;
- }
+ best_filter= 0;
+ best_type= JT_HASH;
+ trace_access_hash.add("type", "hash");
+ trace_access_hash.add("index", "hj-key");
+ trace_access_hash.add("cost", rnd_records);
+ trace_access_hash.add("cost", best);
+ trace_access_hash.add("chosen", true);
+ }
/*
Don't test table scan if it can't be better.
@@ -7406,6 +7945,7 @@ best_access_path(JOIN *join,
can be [considered to be] more expensive, which causes lookups not to
be used for cases with small datasets, which is annoying.
*/
+ Json_writer_object trace_access_scan(thd);
if ((records >= s->found_records || best > s->read_time) && // (1)
!(best_key && best_key->key == MAX_KEY) && // (2)
!(s->quick && best_key && s->quick->index == best_key->key && // (2)
@@ -7424,6 +7964,7 @@ best_access_path(JOIN *join,
Here we estimate its cost.
*/
+ filter= 0;
if (s->quick)
{
/*
@@ -7439,15 +7980,41 @@ best_access_path(JOIN *join,
tmp= COST_MULT(record_count,
COST_ADD(s->quick->read_time, cmp_time));
+ if ( s->quick->get_type() == QUICK_SELECT_I::QS_TYPE_RANGE)
+ {
+ double rows= record_count * s->found_records;
+ double access_cost_factor= MY_MIN(tmp / rows, 1.0);
+ uint key_no= s->quick->index;
+ filter=
+ s->table->best_range_rowid_filter_for_partial_join(key_no, rows,
+ access_cost_factor);
+ if (filter)
+ {
+ tmp-= filter->get_adjusted_gain(rows);
+ DBUG_ASSERT(tmp >= 0);
+ }
+ type= JT_RANGE;
+ }
+ else
+ {
+ type= JT_INDEX_MERGE;
+ best_filter= 0;
+ }
loose_scan_opt.check_range_access(join, idx, s->quick);
}
else
{
/* Estimate cost of reading table. */
if (s->table->force_index && !best_key) // index scan
+ {
+ type= JT_NEXT;
tmp= s->table->file->read_time(s->ref.key, 1, s->records);
+ }
else // table scan
+ {
tmp= s->scan_time();
+ type= JT_ALL;
+ }
if ((s->table->map & join->outer_join) || disable_jbuf) // Can't use join cache
{
@@ -7477,20 +8044,32 @@ best_access_path(JOIN *join,
}
}
+ trace_access_scan.add("access_type", type == JT_ALL ?
+ "scan" :
+ join_type_str[type]);
/* Splitting technique cannot be used with join cache */
if (s->table->is_splittable())
tmp+= s->table->get_materialization_cost();
else
tmp+= s->startup_cost;
+
/*
We estimate the cost of evaluating WHERE clause for found records
as record_count * rnd_records / TIME_FOR_COMPARE. This cost plus
tmp give us total cost of using TABLE SCAN
*/
+
+ const double best_filter_cmp_gain= best_filter
+ ? best_filter->get_cmp_gain(record_count * records)
+ : 0;
+ trace_access_scan.add("resulting_rows", rnd_records);
+ trace_access_scan.add("cost", tmp);
+
if (best == DBL_MAX ||
COST_ADD(tmp, record_count/(double) TIME_FOR_COMPARE*rnd_records) <
(best_key->is_for_hash_join() ? best_time :
- COST_ADD(best, record_count/(double) TIME_FOR_COMPARE*records)))
+ COST_ADD(best - best_filter_cmp_gain,
+ record_count/(double) TIME_FOR_COMPARE*records)))
{
/*
If the table has a range (s->quick is set) make_join_select()
@@ -7499,12 +8078,23 @@ best_access_path(JOIN *join,
best= tmp;
records= rnd_records;
best_key= 0;
+ best_filter= 0;
+ if (s->quick && s->quick->get_type() == QUICK_SELECT_I::QS_TYPE_RANGE)
+ best_filter= filter;
/* range/index_merge/ALL/index access method are "independent", so: */
best_ref_depends_map= 0;
best_uses_jbuf= MY_TEST(!disable_jbuf && !((s->table->map &
join->outer_join)));
spl_plan= 0;
+ best_type= type;
}
+ trace_access_scan.add("chosen", best_key == NULL);
+ }
+ else
+ {
+ trace_access_scan.add("type", "scan");
+ trace_access_scan.add("chosen", false);
+ trace_access_scan.add("cause", "cost");
}
/* Update the cost information for the current partial plan */
@@ -7516,6 +8106,7 @@ best_access_path(JOIN *join,
pos->loosescan_picker.loosescan_key= MAX_KEY;
pos->use_join_buffer= best_uses_jbuf;
pos->spl_plan= spl_plan;
+ pos->range_rowid_filter_info= best_filter;
loose_scan_opt.save_to_position(s, loose_scan_pos);
@@ -7523,7 +8114,15 @@ best_access_path(JOIN *join,
idx == join->const_tables &&
s->table == join->sort_by_table &&
join->unit->select_limit_cnt >= records)
+ {
+ trace_access_scan.add("use_tmp_table", true);
join->sort_by_table= (TABLE*) 1; // Must use temporary table
+ }
+ trace_access_scan.end();
+ trace_paths.end();
+
+ if (unlikely(thd->trace_started()))
+ print_best_access_for_table(thd, pos, best_type);
DBUG_VOID_RETURN;
}
@@ -7666,6 +8265,7 @@ choose_plan(JOIN *join, table_map join_tables)
uint use_cond_selectivity=
join->thd->variables.optimizer_use_condition_selectivity;
bool straight_join= MY_TEST(join->select_options & SELECT_STRAIGHT_JOIN);
+ THD *thd= join->thd;
DBUG_ENTER("choose_plan");
join->cur_embedding_map= 0;
@@ -7702,6 +8302,9 @@ choose_plan(JOIN *join, table_map join_tables)
join->table_count - join->const_tables, sizeof(JOIN_TAB*),
jtab_sort_func, (void*)join->emb_sjm_nest);
+ Json_writer_object wrapper(thd);
+ Json_writer_array trace_plan(thd,"considered_execution_plans");
+
if (!join->emb_sjm_nest)
{
choose_initial_table_order(join);
@@ -7995,19 +8598,30 @@ optimize_straight_join(JOIN *join, table_map join_tables)
uint use_cond_selectivity=
join->thd->variables.optimizer_use_condition_selectivity;
POSITION loose_scan_pos;
+ THD *thd= join->thd;
for (JOIN_TAB **pos= join->best_ref + idx ; (s= *pos) ; pos++)
{
+ POSITION *position= join->positions + idx;
+ Json_writer_object trace_one_table(thd);
+ if (unlikely(thd->trace_started()))
+ {
+ trace_plan_prefix(join, idx, join_tables);
+ trace_one_table.add_table_name(s);
+ }
/* Find the best access method from 's' to the current partial plan */
best_access_path(join, s, join_tables, join->positions, idx,
disable_jbuf, record_count,
- join->positions + idx, &loose_scan_pos);
+ position, &loose_scan_pos);
/* compute the cost of the new plan extended with 's' */
- record_count= COST_MULT(record_count, join->positions[idx].records_read);
- read_time= COST_ADD(read_time,
- COST_ADD(join->positions[idx].read_time,
- record_count / (double) TIME_FOR_COMPARE));
+ record_count= COST_MULT(record_count, position->records_read);
+ const double filter_cmp_gain= position->range_rowid_filter_info
+ ? position->range_rowid_filter_info->get_cmp_gain(record_count)
+ : 0;
+ read_time+= COST_ADD(read_time - filter_cmp_gain,
+ COST_ADD(position->read_time,
+ record_count / (double) TIME_FOR_COMPARE));
advance_sj_state(join, join_tables, idx, &record_count, &read_time,
&loose_scan_pos);
@@ -8016,7 +8630,7 @@ optimize_straight_join(JOIN *join, table_map join_tables)
if (use_cond_selectivity > 1)
pushdown_cond_selectivity= table_cond_selectivity(join, idx, s,
join_tables);
- join->positions[idx].cond_selectivity= pushdown_cond_selectivity;
+ position->cond_selectivity= pushdown_cond_selectivity;
++idx;
}
@@ -8677,7 +9291,7 @@ double table_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
fldno= table->key_info[key].key_part[keyparts-1].fieldnr - 1;
if (table->field[fldno]->cond_selectivity > 0)
- {
+ {
sel /= table->field[fldno]->cond_selectivity;
set_if_smaller(sel, 1.0);
}
@@ -8732,9 +9346,9 @@ double table_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
next_field= next_field->next_equal_field)
{
if (!(next_field->table->map & rem_tables) && next_field->table != table)
- {
+ {
if (field->cond_selectivity > 0)
- {
+ {
sel/= field->cond_selectivity;
set_if_smaller(sel, 1.0);
}
@@ -8928,6 +9542,13 @@ best_extension_by_limited_search(JOIN *join,
double current_record_count, current_read_time;
POSITION *position= join->positions + idx;
+ Json_writer_object trace_one_table(thd);
+ if (unlikely(thd->trace_started()))
+ {
+ trace_plan_prefix(join, idx, remaining_tables);
+ trace_one_table.add_table_name(s);
+ }
+
/* Find the best access method from 's' to the current partial plan */
POSITION loose_scan_pos;
best_access_path(join, s, remaining_tables, join->positions, idx,
@@ -8935,11 +9556,20 @@ best_extension_by_limited_search(JOIN *join,
/* Compute the cost of extending the plan with 's' */
current_record_count= COST_MULT(record_count, position->records_read);
+ const double filter_cmp_gain= position->range_rowid_filter_info
+ ? position->range_rowid_filter_info->get_cmp_gain(current_record_count)
+ : 0;
current_read_time=COST_ADD(read_time,
- COST_ADD(position->read_time,
+ COST_ADD(position->read_time -
+ filter_cmp_gain,
current_record_count /
(double) TIME_FOR_COMPARE));
+ if (unlikely(thd->trace_started()))
+ {
+ trace_one_table.add("rows_for_plan", current_record_count);
+ trace_one_table.add("cost_for_plan", current_read_time);
+ }
advance_sj_state(join, remaining_tables, idx, &current_record_count,
&current_read_time, &loose_scan_pos);
@@ -8951,6 +9581,7 @@ best_extension_by_limited_search(JOIN *join,
read_time,
current_read_time,
"prune_by_cost"););
+ trace_one_table.add("pruned_by_cost", true);
restore_prev_nj_state(s);
restore_prev_sj_state(remaining_tables, s, idx);
continue;
@@ -8984,6 +9615,7 @@ best_extension_by_limited_search(JOIN *join,
read_time,
current_read_time,
"pruned_by_heuristic"););
+ trace_one_table.add("pruned_by_heuristic", true);
restore_prev_nj_state(s);
restore_prev_sj_state(remaining_tables, s, idx);
continue;
@@ -8996,11 +9628,16 @@ best_extension_by_limited_search(JOIN *join,
remaining_tables &
~real_table_bit);
join->positions[idx].cond_selectivity= pushdown_cond_selectivity;
+
+ if (unlikely(thd->trace_started()) && pushdown_cond_selectivity < 1.0)
+ trace_one_table.add("selectivity", pushdown_cond_selectivity);
+
double partial_join_cardinality= current_record_count *
pushdown_cond_selectivity;
if ( (search_depth > 1) && (remaining_tables & ~real_table_bit) & allowed_tables )
{ /* Recursively expand the current partial plan */
swap_variables(JOIN_TAB*, join->best_ref[idx], *pos);
+ Json_writer_array trace_rest(thd, "rest_of_plan");
if (best_extension_by_limited_search(join,
remaining_tables & ~real_table_bit,
idx + 1,
@@ -9020,12 +9657,17 @@ best_extension_by_limited_search(JOIN *join,
if (join->sort_by_table &&
join->sort_by_table !=
join->positions[join->const_tables].table->table)
+ {
/*
We may have to make a temp table, note that this is only a
heuristic since we cannot know for sure at this point.
Hence it may be wrong.
*/
+ trace_one_table.add("cost_for_sorting", current_record_count);
current_read_time= COST_ADD(current_read_time, current_record_count);
+ }
+ trace_one_table.add("estimated_join_cardinality",
+ partial_join_cardinality);
if (current_read_time < join->best_read)
{
memcpy((uchar*) join->best_positions, (uchar*) join->positions,
@@ -9648,6 +10290,7 @@ bool JOIN::inject_cond_into_where(Item *injected_cond)
static Item * const null_ptr= NULL;
+
/*
Set up join struct according to the picked join order in
@@ -9807,6 +10450,8 @@ bool JOIN::get_best_combination()
is_hash_join_key_no(j->ref.key))
hash_join= TRUE;
+ j->range_rowid_filter_info= best_positions[tablenr].range_rowid_filter_info;
+
loop_end:
/*
Save records_read in JOIN_TAB so that select_describe()/etc don't have
@@ -9847,6 +10492,9 @@ bool JOIN::get_best_combination()
top_join_tab_count= (uint)(join_tab_ranges.head()->end -
join_tab_ranges.head()->start);
+ if (unlikely(thd->trace_started()))
+ print_final_join_order(this);
+
update_depend_map(this);
DBUG_RETURN(0);
}
@@ -10696,23 +11344,40 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
there inside the triggers.
*/
{ // Check const tables
- join->exec_const_cond=
- make_cond_for_table(thd, cond,
+ Item* const_cond= NULL;
+ const_cond= make_cond_for_table(thd, cond,
join->const_table_map,
(table_map) 0, -1, FALSE, FALSE);
/* Add conditions added by add_not_null_conds(). */
for (uint i= 0 ; i < join->const_tables ; i++)
- add_cond_and_fix(thd, &join->exec_const_cond,
+ add_cond_and_fix(thd, &const_cond,
join->join_tab[i].select_cond);
- DBUG_EXECUTE("where",print_where(join->exec_const_cond,"constants",
+ DBUG_EXECUTE("where",print_where(const_cond,"constants",
QT_ORDINARY););
- if (join->exec_const_cond && !join->exec_const_cond->is_expensive() &&
- !join->exec_const_cond->val_int())
+
+ if (const_cond)
{
- DBUG_PRINT("info",("Found impossible WHERE condition"));
- join->exec_const_cond= NULL;
- DBUG_RETURN(1); // Impossible const condition
+ Json_writer_object trace_const_cond(thd);
+ trace_const_cond.add("condition_on_constant_tables", const_cond);
+ if (const_cond->is_expensive())
+ {
+ trace_const_cond.add("evalualted", "false")
+ .add("cause", "expensive cond");
+ }
+ else
+ {
+ const bool const_cond_result = const_cond->val_int() != 0;
+ if (!const_cond_result)
+ {
+ DBUG_PRINT("info",("Found impossible WHERE condition"));
+ trace_const_cond.add("evalualted", "true")
+ .add("found", "impossible where");
+ join->exec_const_cond= NULL;
+ DBUG_RETURN(1);
+ }
+ }
+ join->exec_const_cond= const_cond;
}
if (join->table_count != join->const_tables)
@@ -10749,6 +11414,11 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
/*
Step #2: Extract WHERE/ON parts
*/
+ Json_writer_object trace_wrapper(thd);
+ Json_writer_object trace_conditions(thd, "attaching_conditions_to_tables");
+ trace_conditions.add("original_condition", cond);
+ Json_writer_array trace_attached_comp(thd,
+ "attached_conditions_computation");
uint i;
for (i= join->top_join_tab_count - 1; i >= join->const_tables; i--)
{
@@ -10805,10 +11475,13 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
(!is_hash_join_key_no(tab->ref.key) &&
tab->table->intersect_keys.is_set(tab->ref.key))))
{
- /* Range uses longer key; Use this instead of ref on key */
- tab->type=JT_ALL;
- use_quick_range=1;
- tab->use_quick=1;
+ /* Range uses longer key; Use this instead of ref on key */
+ Json_writer_object ref_to_range(thd);
+ ref_to_range.add("ref_to_range", true);
+ ref_to_range.add("cause", "range uses longer key");
+ tab->type=JT_ALL;
+ use_quick_range=1;
+ tab->use_quick=1;
tab->ref.key= -1;
tab->ref.key_parts=0; // Don't use ref key.
join->best_positions[i].records_read= rows2double(tab->quick->records);
@@ -10888,7 +11561,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
below to check if we should use 'quick' instead.
*/
DBUG_PRINT("info", ("Item_int"));
- tmp= new (thd->mem_root) Item_int(thd, (longlong) 1, 1); // Always true
+ tmp= new (thd->mem_root) Item_bool(thd, true); // Always true
}
}
@@ -10967,6 +11640,11 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
sel->quick=tab->quick; // Use value from get_quick_...
sel->quick_keys.clear_all();
sel->needed_reg.clear_all();
+ if (is_hj && tab->rowid_filter)
+ {
+ delete tab->rowid_filter;
+ tab->rowid_filter= 0;
+ }
}
else
{
@@ -11026,7 +11704,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
Yet attributes of the just built condition are not needed.
Thus we call sel->cond->quick_fix_field for safety.
*/
- if (sel->cond && !sel->cond->fixed)
+ if (sel->cond && !sel->cond->is_fixed())
sel->cond->quick_fix_field();
if (sel->test_quick_select(thd, tab->keys,
@@ -11036,7 +11714,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
OPTION_FOUND_ROWS ?
HA_POS_ERROR :
join->unit->select_limit_cnt), 0,
- FALSE, FALSE) < 0)
+ FALSE, FALSE, FALSE) < 0)
{
/*
Before reporting "Impossible WHERE" for the whole query
@@ -11050,7 +11728,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
OPTION_FOUND_ROWS ?
HA_POS_ERROR :
join->unit->select_limit_cnt),0,
- FALSE, FALSE) < 0)
+ FALSE, FALSE, FALSE) < 0)
DBUG_RETURN(1); // Impossible WHERE
}
else
@@ -11292,6 +11970,23 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
if (!tab->bush_children)
i++;
}
+
+ if (unlikely(thd->trace_started()))
+ {
+ trace_attached_comp.end();
+ Json_writer_array trace_attached_summary(thd,
+ "attached_conditions_summary");
+ for (tab= first_depth_first_tab(join); tab;
+ tab= next_depth_first_tab(join, tab))
+ {
+ if (!tab->table)
+ continue;
+ Item *const cond = tab->select_cond;
+ Json_writer_object trace_one_table(thd);
+ trace_one_table.add_table_name(tab);
+ trace_one_table.add("attached", cond);
+ }
+ }
}
DBUG_RETURN(0);
}
@@ -12675,6 +13370,37 @@ bool error_if_full_join(JOIN *join)
}
+void JOIN_TAB::build_range_rowid_filter_if_needed()
+{
+ if (rowid_filter && !is_rowid_filter_built)
+ {
+ /**
+ The same handler object (table->file) is used to build a filter
+ and to perfom a primary table access (by the main query).
+
+ To estimate the time for filter building tracker should be changed
+ and after building of the filter has been finished it should be
+ switched back to the previos tracker.
+ */
+ Exec_time_tracker *table_tracker= table->file->get_time_tracker();
+ Rowid_filter_tracker *rowid_tracker= rowid_filter->get_tracker();
+ table->file->set_time_tracker(rowid_tracker->get_time_tracker());
+ rowid_tracker->start_tracking();
+ if (!rowid_filter->build())
+ {
+ is_rowid_filter_built= true;
+ }
+ else
+ {
+ delete rowid_filter;
+ rowid_filter= 0;
+ }
+ rowid_tracker->stop_tracking();
+ table->file->set_time_tracker(table_tracker);
+ }
+}
+
+
/**
cleanup JOIN_TAB.
@@ -12694,6 +13420,11 @@ void JOIN_TAB::cleanup()
select= 0;
delete quick;
quick= 0;
+ if (rowid_filter)
+ {
+ delete rowid_filter;
+ rowid_filter= 0;
+ }
if (cache)
{
cache->free();
@@ -13066,7 +13797,8 @@ void JOIN::join_free()
!(select_options & SELECT_NO_UNLOCK) &&
!select_lex->subquery_in_having &&
(select_lex == (thd->lex->unit.fake_select_lex ?
- thd->lex->unit.fake_select_lex : &thd->lex->select_lex)))
+ thd->lex->unit.fake_select_lex :
+ thd->lex->first_select_lex())))
{
/*
TODO: unlock tables even if the join isn't top level select in the
@@ -13095,8 +13827,10 @@ void JOIN::join_free()
void JOIN::cleanup(bool full)
{
DBUG_ENTER("JOIN::cleanup");
- DBUG_PRINT("enter", ("full %u", (uint) full));
-
+ DBUG_PRINT("enter", ("select: %d (%p) join: %p full: %u",
+ select_lex->select_number, select_lex, this,
+ (uint) full));
+
if (full)
have_query_plan= QEP_DELETED;
@@ -13341,7 +14075,7 @@ static void update_depend_map_for_order(JOIN *join, ORDER *order)
order->used= 0;
// Not item_sum(), RAND() and no reference to table outside of sub select
if (!(order->depend_map & (OUTER_REF_TABLE_BIT | RAND_TABLE_BIT))
- && !order->item[0]->with_sum_func &&
+ && !order->item[0]->with_sum_func() &&
join->join_tab)
{
for (JOIN_TAB **tab=join->map2table;
@@ -13416,7 +14150,23 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond,
tab++)
tab->cached_eq_ref_table= FALSE;
- *simple_order= *join->join_tab[join->const_tables].on_expr_ref ? 0 : 1;
+ JOIN_TAB *head= join->join_tab + join->const_tables;
+ *simple_order= head->on_expr_ref[0] == NULL;
+ if (*simple_order && head->table->file->ha_table_flags() & HA_SLOW_RND_POS)
+ {
+ uint u1, u2, u3;
+ /*
+ normally the condition is (see filesort_use_addons())
+
+ length + sortlength <= max_length_for_sort_data
+
+ but for HA_SLOW_RND_POS tables we relax it a bit, as the alternative
+ is to use a temporary table, which is rather expensive.
+
+ TODO proper cost estimations
+ */
+ *simple_order= filesort_use_addons(head->table, 0, &u1, &u2, &u3);
+ }
}
else
{
@@ -13432,7 +14182,7 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond,
for (order=first_order; order ; order=order->next)
{
table_map order_tables=order->item[0]->used_tables();
- if (order->item[0]->with_sum_func ||
+ if (order->item[0]->with_sum_func() ||
order->item[0]->with_window_func ||
/*
If the outer table of an outer join is const (either by itself or
@@ -13609,7 +14359,7 @@ ORDER *simple_remove_const(ORDER *order, COND *where)
ORDER *first= NULL, *prev= NULL;
for (; order; order= order->next)
{
- DBUG_ASSERT(!order->item[0]->with_sum_func); // should never happen
+ DBUG_ASSERT(!order->item[0]->with_sum_func()); // should never happen
if (!const_expression_in_where(where, order->item[0]))
{
if (!first)
@@ -13907,31 +14657,44 @@ finish:
FALSE otherwise
*/
-static bool check_simple_equality(THD *thd, const Item::Context &ctx,
- Item *left_item, Item *right_item,
- COND_EQUAL *cond_equal)
+bool check_simple_equality(THD *thd, const Item::Context &ctx,
+ Item *left_item, Item *right_item,
+ COND_EQUAL *cond_equal)
{
Item *orig_left_item= left_item;
Item *orig_right_item= right_item;
- if (left_item->type() == Item::REF_ITEM &&
- ((Item_ref*)left_item)->ref_type() == Item_ref::VIEW_REF)
+ if (left_item->type() == Item::REF_ITEM)
{
- if (((Item_ref*)left_item)->get_depended_from())
- return FALSE;
- if (((Item_direct_view_ref*)left_item)->get_null_ref_table() !=
- NO_NULL_TABLE && !left_item->real_item()->used_tables())
- return FALSE;
- left_item= left_item->real_item();
+ Item_ref::Ref_Type left_ref= ((Item_ref*)left_item)->ref_type();
+
+ if (left_ref == Item_ref::VIEW_REF ||
+ left_ref == Item_ref::REF)
+ {
+ if (((Item_ref*)left_item)->get_depended_from())
+ return FALSE;
+ if (left_ref == Item_ref::VIEW_REF &&
+ ((Item_direct_view_ref*)left_item)->get_null_ref_table() !=
+ NO_NULL_TABLE &&
+ !left_item->real_item()->used_tables())
+ return FALSE;
+ left_item= left_item->real_item();
+ }
}
- if (right_item->type() == Item::REF_ITEM &&
- ((Item_ref*)right_item)->ref_type() == Item_ref::VIEW_REF)
+ if (right_item->type() == Item::REF_ITEM)
{
- if (((Item_ref*)right_item)->get_depended_from())
- return FALSE;
- if (((Item_direct_view_ref*)right_item)->get_null_ref_table() !=
- NO_NULL_TABLE && !right_item->real_item()->used_tables())
- return FALSE;
- right_item= right_item->real_item();
+ Item_ref::Ref_Type right_ref= ((Item_ref*)right_item)->ref_type();
+ if (right_ref == Item_ref::VIEW_REF ||
+ (right_ref == Item_ref::REF))
+ {
+ if (((Item_ref*)right_item)->get_depended_from())
+ return FALSE;
+ if (right_ref == Item_ref::VIEW_REF &&
+ ((Item_direct_view_ref*)right_item)->get_null_ref_table() !=
+ NO_NULL_TABLE &&
+ !right_item->real_item()->used_tables())
+ return FALSE;
+ right_item= right_item->real_item();
+ }
}
if (left_item->type() == Item::FIELD_ITEM &&
right_item->type() == Item::FIELD_ITEM &&
@@ -14368,7 +15131,7 @@ COND *Item_cond_and::build_equal_items(THD *thd,
if (!cond_args->elements &&
!cond_equal.current_level.elements &&
!eq_list.elements)
- return new (thd->mem_root) Item_int(thd, (longlong) 1, 1);
+ return new (thd->mem_root) Item_bool(thd, true);
List_iterator_fast<Item_equal> it(cond_equal.current_level);
while ((item_equal= it++))
@@ -14475,7 +15238,7 @@ COND *Item_func_eq::build_equal_items(THD *thd,
Item_equal *item_equal;
int n= cond_equal.current_level.elements + eq_list.elements;
if (n == 0)
- return new (thd->mem_root) Item_int(thd, (longlong) 1, 1);
+ return new (thd->mem_root) Item_bool(thd, true);
else if (n == 1)
{
if ((item_equal= cond_equal.current_level.pop()))
@@ -14869,7 +15632,7 @@ Item *eliminate_item_equal(THD *thd, COND *cond, COND_EQUAL *upper_levels,
List<Item> eq_list;
Item_func_eq *eq_item= 0;
if (((Item *) item_equal)->const_item() && !item_equal->val_int())
- return new (thd->mem_root) Item_int(thd, (longlong) 0, 1);
+ return new (thd->mem_root) Item_bool(thd, false);
Item *item_const= item_equal->get_const();
Item_equal_fields_iterator it(*item_equal);
Item *head;
@@ -14877,12 +15640,12 @@ Item *eliminate_item_equal(THD *thd, COND *cond, COND_EQUAL *upper_levels,
Item *current_sjm_head= NULL;
DBUG_ASSERT(!cond ||
- cond->type() == Item::INT_ITEM ||
+ cond->is_bool_literal() ||
(cond->type() == Item::FUNC_ITEM &&
((Item_func *) cond)->functype() == Item_func::EQ_FUNC) ||
(cond->type() == Item::COND_ITEM &&
((Item_func *) cond)->functype() == Item_func::COND_AND_FUNC));
-
+
/*
Pick the "head" item: the constant one or the first in the join order
(if the first in the join order happends to be inside an SJM nest, that's
@@ -14979,11 +15742,9 @@ Item *eliminate_item_equal(THD *thd, COND *cond, COND_EQUAL *upper_levels,
*/
Item *head_item= (!item_const && current_sjm &&
current_sjm_head != field_item) ? current_sjm_head: head;
- Item *head_real_item= head_item->real_item();
- if (head_real_item->type() == Item::FIELD_ITEM)
- head_item= head_real_item;
-
- eq_item= new (thd->mem_root) Item_func_eq(thd, field_item->real_item(), head_item);
+ eq_item= new (thd->mem_root) Item_func_eq(thd,
+ field_item->remove_item_direct_ref(),
+ head_item->remove_item_direct_ref());
if (!eq_item || eq_item->set_cmp_func())
return 0;
@@ -14998,13 +15759,13 @@ Item *eliminate_item_equal(THD *thd, COND *cond, COND_EQUAL *upper_levels,
cond AND eq_1 AND eq_2 AND eq_3 AND ...
- 'cond' is a parameter for this function, which may be NULL, an Item_int(1),
+ 'cond' is a parameter for this function, which may be NULL, an Item_bool(1),
or an Item_func_eq or an Item_cond_and.
We want to return a well-formed condition: no nested Item_cond_and objects,
or Item_cond_and with a single child:
- if 'cond' is an Item_cond_and, we add eq_i as its tail
- - if 'cond' is Item_int(1), we return eq_i
+ - if 'cond' is Item_bool(1), we return eq_i
- otherwise, we create our own Item_cond_and and put 'cond' at the front of
it.
- if we have only one condition to return, we don't create an Item_cond_and
@@ -15016,10 +15777,10 @@ Item *eliminate_item_equal(THD *thd, COND *cond, COND_EQUAL *upper_levels,
switch (eq_list.elements)
{
case 0:
- res= cond ? cond : new (thd->mem_root) Item_int(thd, (longlong) 1, 1);
+ res= cond ? cond : new (thd->mem_root) Item_bool(thd, true);
break;
case 1:
- if (!cond || cond->type() == Item::INT_ITEM)
+ if (!cond || cond->is_bool_literal())
res= eq_item;
break;
default:
@@ -15070,6 +15831,7 @@ Item *eliminate_item_equal(THD *thd, COND *cond, COND_EQUAL *upper_levels,
@param cond condition to process
@param cond_equal multiple equalities to take into consideration
@param table_join_idx index to tables determining field preference
+ @param do_substitution if false: do not do any field substitution
@note
At the first glance full sort of fields in multiple equality
@@ -15109,7 +15871,8 @@ Item *eliminate_item_equal(THD *thd, COND *cond, COND_EQUAL *upper_levels,
static COND* substitute_for_best_equal_field(THD *thd, JOIN_TAB *context_tab,
COND *cond,
COND_EQUAL *cond_equal,
- void *table_join_idx)
+ void *table_join_idx,
+ bool do_substitution)
{
Item_equal *item_equal;
COND *org_cond= cond; // Return this in case of fatal error
@@ -15138,7 +15901,8 @@ static COND* substitute_for_best_equal_field(THD *thd, JOIN_TAB *context_tab,
{
Item *new_item= substitute_for_best_equal_field(thd, context_tab,
item, cond_equal,
- table_join_idx);
+ table_join_idx,
+ do_substitution);
/*
This works OK with PS/SP re-execution as changes are made to
the arguments of AND/OR items only
@@ -15152,8 +15916,12 @@ static COND* substitute_for_best_equal_field(THD *thd, JOIN_TAB *context_tab,
COND *eq_cond= 0;
List_iterator_fast<Item_equal> it(cond_equal->current_level);
bool false_eq_cond= FALSE;
+ bool all_deleted= true;
while ((item_equal= it++))
{
+ if (item_equal->get_extraction_flag() == DELETION_FL)
+ continue;
+ all_deleted= false;
eq_cond= eliminate_item_equal(thd, eq_cond, cond_equal->upper_levels,
item_equal);
if (!eq_cond)
@@ -15161,7 +15929,7 @@ static COND* substitute_for_best_equal_field(THD *thd, JOIN_TAB *context_tab,
eq_cond= 0;
break;
}
- else if (eq_cond->type() == Item::INT_ITEM && !eq_cond->val_bool())
+ else if (eq_cond->is_bool_literal() && !eq_cond->val_bool())
{
/*
This occurs when eliminate_item_equal() founds that cond is
@@ -15186,13 +15954,13 @@ static COND* substitute_for_best_equal_field(THD *thd, JOIN_TAB *context_tab,
else
{
/* Do not add an equality condition if it's always true */
- if (eq_cond->type() != Item::INT_ITEM &&
+ if (!eq_cond->is_bool_literal() &&
cond_list->push_front(eq_cond, thd->mem_root))
eq_cond= 0;
}
}
}
- if (!eq_cond)
+ if (!eq_cond && !all_deleted)
{
/*
We are out of memory doing the transformation.
@@ -15211,10 +15979,12 @@ static COND* substitute_for_best_equal_field(THD *thd, JOIN_TAB *context_tab,
cond_equal= item_equal->upper_levels;
if (cond_equal && cond_equal->current_level.head() == item_equal)
cond_equal= cond_equal->upper_levels;
+ if (item_equal->get_extraction_flag() == DELETION_FL)
+ return 0;
cond= eliminate_item_equal(thd, 0, cond_equal, item_equal);
return cond ? cond : org_cond;
}
- else
+ else if (do_substitution)
{
while (cond_equal)
{
@@ -15265,7 +16035,7 @@ static void update_const_equal_items(THD *thd, COND *cond, JOIN_TAB *tab,
Item_func::COND_AND_FUNC));
}
else if (cond->type() == Item::FUNC_ITEM &&
- ((Item_cond*) cond)->functype() == Item_func::MULT_EQUAL_FUNC)
+ ((Item_func*) cond)->functype() == Item_func::MULT_EQUAL_FUNC)
{
Item_equal *item_equal= (Item_equal *) cond;
bool contained_const= item_equal->get_const() != NULL;
@@ -15460,7 +16230,7 @@ propagate_cond_constants(THD *thd, I_List<COND_CMP> *save_list,
(((Item_func*) cond)->functype() == Item_func::EQ_FUNC ||
((Item_func*) cond)->functype() == Item_func::EQUAL_FUNC))
{
- Item_func_eq *func=(Item_func_eq*) cond;
+ Item_bool_func2 *func= dynamic_cast<Item_bool_func2*>(cond);
Item **args= func->arguments();
bool left_const= args[0]->const_item() && !args[0]->is_expensive();
bool right_const= args[1]->const_item() && !args[1]->is_expensive();
@@ -15712,7 +16482,7 @@ simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top,
conds= and_conds(join->thd, conds, table->on_expr);
conds->top_level_item();
/* conds is always a new item as both cond and on_expr existed */
- DBUG_ASSERT(!conds->fixed);
+ DBUG_ASSERT(!conds->is_fixed());
conds->fix_fields(join->thd, &conds);
}
else
@@ -16185,6 +16955,8 @@ void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab,
double cost, rec_count;
table_map reopt_remaining_tables= last_remaining_tables;
uint i;
+ THD *thd= join->thd;
+ Json_writer_temp_disable trace_wo_join_buffering(thd);
if (first_tab > join->const_tables)
{
@@ -16219,7 +16991,7 @@ void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab,
{
JOIN_TAB *rs= join->positions[i].table;
POSITION pos, loose_scan_pos;
-
+
if ((i == first_tab && first_alt) || join->positions[i].use_join_buffer)
{
/* Find the best access method that would not use join buffering */
@@ -16283,12 +17055,24 @@ optimize_cond(JOIN *join, COND *conds,
that occurs in a function set a pointer to the multiple equality
predicate. Substitute a constant instead of this field if the
multiple equality contains a constant.
- */
+ */
+
+ Json_writer_object trace_wrapper(thd);
+ Json_writer_object trace_cond(thd, "condition_processing");
+ trace_cond.add("condition", join->conds == conds ? "WHERE" : "HAVING")
+ .add("original_condition", conds);
+
+ Json_writer_array trace_steps(thd, "steps");
DBUG_EXECUTE("where", print_where(conds, "original", QT_ORDINARY););
conds= build_equal_items(join, conds, NULL, join_list,
ignore_on_conds, cond_equal,
MY_TEST(flags & OPT_LINK_EQUAL_FIELDS));
DBUG_EXECUTE("where",print_where(conds,"after equal_items", QT_ORDINARY););
+ {
+ Json_writer_object equal_prop_wrapper(thd);
+ equal_prop_wrapper.add("transformation", "equality_propagation")
+ .add("resulting_condition", conds);
+ }
/* change field = field to field = const for each found field = const */
propagate_cond_constants(thd, (I_List<COND_CMP> *) 0, conds, conds);
@@ -16297,10 +17081,21 @@ optimize_cond(JOIN *join, COND *conds,
Remove all and-levels where CONST item != CONST item
*/
DBUG_EXECUTE("where",print_where(conds,"after const change", QT_ORDINARY););
+ {
+ Json_writer_object const_prop_wrapper(thd);
+ const_prop_wrapper.add("transformation", "constant_propagation")
+ .add("resulting_condition", conds);
+ }
conds= conds->remove_eq_conds(thd, cond_value, true);
if (conds && conds->type() == Item::COND_ITEM &&
((Item_cond*) conds)->functype() == Item_func::COND_AND_FUNC)
*cond_equal= &((Item_cond_and*) conds)->m_cond_equal;
+
+ {
+ Json_writer_object cond_removal_wrapper(thd);
+ cond_removal_wrapper.add("transformation", "trivial_condition_removal")
+ .add("resulting_condition", conds);
+ }
DBUG_EXECUTE("info",print_where(conds,"after remove", QT_ORDINARY););
}
DBUG_RETURN(conds);
@@ -16375,7 +17170,7 @@ void propagate_new_equalities(THD *thd, Item *cond,
}
}
else if (cond->type() == Item::FUNC_ITEM &&
- ((Item_cond*) cond)->functype() == Item_func::MULT_EQUAL_FUNC)
+ ((Item_func*) cond)->functype() == Item_func::MULT_EQUAL_FUNC)
{
Item_equal *equal_item;
List_iterator<Item_equal> it(*new_equalities);
@@ -16620,7 +17415,7 @@ Item_cond::remove_eq_conds(THD *thd, Item::cond_result *cond_value,
}
else if (and_level &&
new_item->type() == Item::FUNC_ITEM &&
- ((Item_cond*) new_item)->functype() ==
+ ((Item_func*) new_item)->functype() ==
Item_func::MULT_EQUAL_FUNC)
{
li.remove();
@@ -16762,7 +17557,8 @@ Item_bool_func2::remove_eq_conds(THD *thd, Item::cond_result *cond_value,
{
if (args[0]->eq(args[1], true))
{
- if (!args[0]->maybe_null || functype() == Item_func::EQUAL_FUNC)
+ if (*cond_value == Item::COND_FALSE ||
+ !args[0]->maybe_null || functype() == Item_func::EQUAL_FUNC)
return (COND*) 0; // Compare of identical items
}
}
@@ -16800,9 +17596,8 @@ Item_func_isnull::remove_eq_conds(THD *thd, Item::cond_result *cond_value,
{
Field *field= ((Item_field*) real_item)->field;
- if (((field->type() == MYSQL_TYPE_DATE) ||
- (field->type() == MYSQL_TYPE_DATETIME)) &&
- (field->flags & NOT_NULL_FLAG))
+ if ((field->flags & NOT_NULL_FLAG) &&
+ field->type_handler()->cond_notnull_field_isnull_to_field_eq_zero())
{
/* fix to replace 'NULL' dates with '0' (shreeve@uci.edu) */
/*
@@ -16818,7 +17613,7 @@ Item_func_isnull::remove_eq_conds(THD *thd, Item::cond_result *cond_value,
*/
- Item *item0= new(thd->mem_root) Item_int(thd, (longlong) 0, 1);
+ Item *item0= new(thd->mem_root) Item_bool(thd, false);
Item *eq_cond= new(thd->mem_root) Item_func_eq(thd, args[0], item0);
if (!eq_cond)
return this;
@@ -16888,7 +17683,7 @@ Item_func_isnull::remove_eq_conds(THD *thd, Item::cond_result *cond_value,
cond= new_cond;
/*
Item_func_eq can't be fixed after creation so we do not check
- cond->fixed, also it do not need tables so we use 0 as second
+ cond->is_fixed(), also it do not need tables so we use 0 as second
argument.
*/
cond->fix_fields(thd, &cond);
@@ -17048,60 +17843,6 @@ const_expression_in_where(COND *cond, Item *comp_item, Field *comp_field,
Create internal temporary table
****************************************************************************/
-/**
- Create field for temporary table from given field.
-
- @param thd Thread handler
- @param org_field field from which new field will be created
- @param name New field name
- @param table Temporary table
- @param item !=NULL if item->result_field should point to new field.
- This is relevant for how fill_record() is going to work:
- If item != NULL then fill_record() will update
- the record in the original table.
- If item == NULL then fill_record() will update
- the temporary table
-
- @retval
- NULL on error
- @retval
- new_created field
-*/
-
-Field *create_tmp_field_from_field(THD *thd, Field *org_field,
- LEX_CSTRING *name, TABLE *table,
- Item_field *item)
-{
- Field *new_field;
-
- new_field= org_field->make_new_field(thd->mem_root, table,
- table == org_field->table);
- if (new_field)
- {
- new_field->init(table);
- new_field->orig_table= org_field->orig_table;
- if (item)
- item->result_field= new_field;
- else
- new_field->field_name= *name;
- new_field->flags|= org_field->flags & NO_DEFAULT_VALUE_FLAG;
- if (org_field->maybe_null() || (item && item->maybe_null))
- new_field->flags&= ~NOT_NULL_FLAG; // Because of outer join
- if (org_field->type() == MYSQL_TYPE_VAR_STRING ||
- org_field->type() == MYSQL_TYPE_VARCHAR)
- table->s->db_create_options|= HA_OPTION_PACK_RECORD;
- else if (org_field->type() == FIELD_TYPE_DOUBLE)
- ((Field_double *) new_field)->not_fixed= TRUE;
- new_field->vcol_info= 0;
- new_field->cond_selectivity= 1.0;
- new_field->next_equal_field= NULL;
- new_field->option_list= NULL;
- new_field->option_struct= NULL;
- }
- return new_field;
-}
-
-
Field *Item::create_tmp_field_int(TABLE *table, uint convert_int_length)
{
const Type_handler *h= &type_handler_long;
@@ -17111,6 +17852,26 @@ Field *Item::create_tmp_field_int(TABLE *table, uint convert_int_length)
*this, table);
}
+Field *Item::tmp_table_field_from_field_type_maybe_null(TABLE *table,
+ Tmp_field_src *src,
+ const Tmp_field_param *param,
+ bool is_explicit_null)
+{
+ /*
+ item->type() == CONST_ITEM excluded due to making fields for counter
+ With help of Item_uint
+ */
+ DBUG_ASSERT(!param->make_copy_field() || type() == CONST_ITEM);
+ DBUG_ASSERT(!is_result_field());
+ Field *result;
+ if ((result= tmp_table_field_from_field_type(table)))
+ {
+ if (result && is_explicit_null)
+ result->is_created_from_null_item= true;
+ }
+ return result;
+}
+
Field *Item_sum::create_tmp_field(bool group, TABLE *table)
{
@@ -17142,57 +17903,6 @@ Field *Item_sum::create_tmp_field(bool group, TABLE *table)
}
-static void create_tmp_field_from_item_finalize(THD *thd,
- Field *new_field,
- Item *item,
- Item ***copy_func,
- bool modify_item)
-{
- if (copy_func &&
- (item->is_result_field() ||
- (item->real_item()->is_result_field())))
- *((*copy_func)++) = item; // Save for copy_funcs
- if (modify_item)
- item->set_result_field(new_field);
- if (item->type() == Item::NULL_ITEM)
- new_field->is_created_from_null_item= TRUE;
-}
-
-
-/**
- Create field for temporary table using type of given item.
-
- @param thd Thread handler
- @param item Item to create a field for
- @param table Temporary table
- @param copy_func If set and item is a function, store copy of
- item in this array
- @param modify_item 1 if item->result_field should point to new
- item. This is relevent for how fill_record()
- is going to work:
- If modify_item is 1 then fill_record() will
- update the record in the original table.
- If modify_item is 0 then fill_record() will
- update the temporary table
-
- @retval
- 0 on error
- @retval
- new_created field
-*/
-
-static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table,
- Item ***copy_func, bool modify_item)
-{
- DBUG_ASSERT(thd == table->in_use);
- Field* new_field= item->create_tmp_field(false, table);
- if (new_field)
- create_tmp_field_from_item_finalize(thd, new_field, item,
- copy_func, modify_item);
- return new_field;
-}
-
-
/**
Create field for information schema table.
@@ -17226,19 +17936,199 @@ Field *Item::create_field_for_schema(THD *thd, TABLE *table)
/**
+ Create a temporary field for Item_field (or its descendant),
+ either direct or referenced by an Item_ref.
+*/
+Field *
+Item_field::create_tmp_field_from_item_field(TABLE *new_table,
+ Item_ref *orig_item,
+ const Tmp_field_param *param)
+{
+ DBUG_ASSERT(!is_result_field());
+ Field *result;
+ /*
+ If item have to be able to store NULLs but underlaid field can't do it,
+ create_tmp_field_from_field() can't be used for tmp field creation.
+ */
+ if (((maybe_null && in_rollup) ||
+ (new_table->in_use->create_tmp_table_for_derived && /* for mat. view/dt */
+ orig_item && orig_item->maybe_null)) &&
+ !field->maybe_null())
+ {
+ /*
+ The item the ref points to may have maybe_null flag set while
+ the ref doesn't have it. This may happen for outer fields
+ when the outer query decided at some point after name resolution phase
+ that this field might be null. Take this into account here.
+ */
+ Record_addr rec(orig_item ? orig_item->maybe_null : maybe_null);
+ const Type_handler *handler= type_handler()->
+ type_handler_for_tmp_table(this);
+ result= handler->make_and_init_table_field(orig_item ? &orig_item->name : &name,
+ rec, *this, new_table);
+ }
+ else if (param->table_cant_handle_bit_fields() &&
+ field->type() == MYSQL_TYPE_BIT)
+ {
+ const Type_handler *handler= type_handler_long_or_longlong();
+ result= handler->make_and_init_table_field(&name,
+ Record_addr(maybe_null),
+ *this, new_table);
+ }
+ else
+ {
+ LEX_CSTRING *tmp= orig_item ? &orig_item->name : &name;
+ bool tmp_maybe_null= param->modify_item() ? maybe_null :
+ field->maybe_null();
+ result= field->create_tmp_field(new_table->in_use->mem_root, new_table,
+ tmp_maybe_null);
+ if (result)
+ result->field_name= *tmp;
+ }
+ if (result && param->modify_item())
+ result_field= result;
+ return result;
+}
+
+
+Field *Item_field::create_tmp_field_ex(TABLE *table,
+ Tmp_field_src *src,
+ const Tmp_field_param *param)
+{
+ DBUG_ASSERT(!is_result_field());
+ Field *result;
+ src->set_field(field);
+ if (!(result= create_tmp_field_from_item_field(table, NULL, param)))
+ return NULL;
+ if (field->eq_def(result))
+ src->set_default_field(field);
+ return result;
+}
+
+
+Field *Item_default_value::create_tmp_field_ex(TABLE *table,
+ Tmp_field_src *src,
+ const Tmp_field_param *param)
+{
+ if (field->default_value && (field->flags & BLOB_FLAG))
+ {
+ /*
+ We have to use a copy function when using a blob with default value
+ as the we have to calculate the default value before we can use it.
+ */
+ get_tmp_field_src(src, param);
+ return tmp_table_field_from_field_type(table);
+ }
+ /*
+ Same code as in Item_field::create_tmp_field_ex, except no default field
+ handling
+ */
+ src->set_field(field);
+ return create_tmp_field_from_item_field(table, NULL, param);
+}
+
+
+Field *Item_ref::create_tmp_field_ex(TABLE *table,
+ Tmp_field_src *src,
+ const Tmp_field_param *param)
+{
+ Item *item= real_item();
+ DBUG_ASSERT(is_result_field());
+ if (item->type() == Item::FIELD_ITEM)
+ {
+ Field *result;
+ Item_field *field= (Item_field*) item;
+ Tmp_field_param prm2(*param);
+ prm2.set_modify_item(false);
+ src->set_field(field->field);
+ if (!(result= field->create_tmp_field_from_item_field(table, this, &prm2)))
+ return NULL;
+ if (param->modify_item())
+ result_field= result;
+ return result;
+ }
+ return Item_result_field::create_tmp_field_ex(table, src, param);
+}
+
+
+void Item_result_field::get_tmp_field_src(Tmp_field_src *src,
+ const Tmp_field_param *param)
+{
+ if (param->make_copy_field())
+ {
+ DBUG_ASSERT(result_field);
+ src->set_field(result_field);
+ }
+ else
+ {
+ src->set_item_result_field(this); // Save for copy_funcs
+ }
+}
+
+
+Field *Item_result_field::create_tmp_field_ex(TABLE *table,
+ Tmp_field_src *src,
+ const Tmp_field_param *param)
+{
+ /*
+ Possible Item types:
+ - Item_cache_wrapper (only for CREATE..SELECT ?)
+ - Item_func
+ - Item_subselect
+ */
+ DBUG_ASSERT(is_result_field());
+ DBUG_ASSERT(type() != NULL_ITEM);
+ get_tmp_field_src(src, param);
+ Field *result;
+ if ((result= tmp_table_field_from_field_type(table)) && param->modify_item())
+ result_field= result;
+ return result;
+}
+
+
+Field *Item_func_user_var::create_tmp_field_ex(TABLE *table,
+ Tmp_field_src *src,
+ const Tmp_field_param *param)
+{
+ DBUG_ASSERT(is_result_field());
+ DBUG_ASSERT(type() != NULL_ITEM);
+ get_tmp_field_src(src, param);
+ Field *result;
+ if ((result= create_table_field_from_handler(table)) && param->modify_item())
+ result_field= result;
+ return result;
+}
+
+
+Field *Item_func_sp::create_tmp_field_ex(TABLE *table,
+ Tmp_field_src *src,
+ const Tmp_field_param *param)
+{
+ Field *result;
+ get_tmp_field_src(src, param);
+ if ((result= sp_result_field->create_tmp_field(table->in_use->mem_root,
+ table)))
+ {
+ result->field_name= name;
+ if (param->modify_item())
+ result_field= result;
+ }
+ return result;
+}
+
+/**
Create field for temporary table.
- @param thd Thread handler
- @param table Temporary table
- @param item Item to create a field for
- @param type Type of item (normally item->type)
- @param copy_func If set and item is a function, store copy of item
+ @param table Temporary table
+ @param item Item to create a field for
+ @param type Type of item (normally item->type)
+ @param copy_func If set and item is a function, store copy of item
in this array
@param from_field if field will be created using other field as example,
pointer example field will be written here
- @param default_field If field has a default value field, store it here
- @param group 1 if we are going to do a relative group by on result
- @param modify_item 1 if item->result_field should point to new item.
+ @param default_field If field has a default value field, store it here
+ @param group 1 if we are going to do a relative group by on result
+ @param modify_item 1 if item->result_field should point to new item.
This is relevent for how fill_record() is going to
work:
If modify_item is 1 then fill_record() will update
@@ -17253,190 +18143,28 @@ Field *Item::create_field_for_schema(THD *thd, TABLE *table)
Set when using with rollup when we want to have
an exact copy of the field.
@retval
- 0 on error
+ 0 on error
@retval
new_created field
+ Create a temporary field for Item_field (or its descendant),
+ either direct or referenced by an Item_ref.
*/
-
-Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
+Field *create_tmp_field(TABLE *table, Item *item,
Item ***copy_func, Field **from_field,
Field **default_field,
bool group, bool modify_item,
bool table_cant_handle_bit_fields,
bool make_copy_field)
{
- Field *result;
- Item::Type orig_type= type;
- Item *orig_item= 0;
-
- DBUG_ASSERT(thd == table->in_use);
-
- if (type != Item::FIELD_ITEM &&
- item->real_item()->type() == Item::FIELD_ITEM)
- {
- orig_item= item;
- item= item->real_item();
- type= Item::FIELD_ITEM;
- }
-
- switch (type) {
- case Item::TYPE_HOLDER:
- case Item::SUM_FUNC_ITEM:
- {
- result= item->create_tmp_field(group, table);
- if (!result)
- my_error(ER_OUT_OF_RESOURCES, MYF(ME_FATALERROR));
- return result;
- }
- case Item::DEFAULT_VALUE_ITEM:
- {
- Field *field= ((Item_default_value*) item)->field;
- if (field->default_value && (field->flags & BLOB_FLAG))
- {
- /*
- We have to use a copy function when using a blob with default value
- as the we have to calcuate the default value before we can use it.
- */
- return create_tmp_field_from_item(thd, item, table,
- (make_copy_field ? 0 : copy_func),
- modify_item);
- }
- }
- /* Fall through */
- case Item::FIELD_ITEM:
- case Item::CONTEXTUALLY_TYPED_VALUE_ITEM:
- case Item::INSERT_VALUE_ITEM:
- case Item::TRIGGER_FIELD_ITEM:
- {
- Item_field *field= (Item_field*) item;
- bool orig_modify= modify_item;
- if (orig_type == Item::REF_ITEM)
- modify_item= 0;
- /*
- If item have to be able to store NULLs but underlaid field can't do it,
- create_tmp_field_from_field() can't be used for tmp field creation.
- */
- if (((field->maybe_null && field->in_rollup) ||
- (thd->create_tmp_table_for_derived && /* for mat. view/dt */
- orig_item && orig_item->maybe_null)) &&
- !field->field->maybe_null())
- {
- bool save_maybe_null= FALSE;
- /*
- The item the ref points to may have maybe_null flag set while
- the ref doesn't have it. This may happen for outer fields
- when the outer query decided at some point after name resolution phase
- that this field might be null. Take this into account here.
- */
- if (orig_item)
- {
- save_maybe_null= item->maybe_null;
- item->maybe_null= orig_item->maybe_null;
- }
- result= create_tmp_field_from_item(thd, item, table, NULL,
- modify_item);
- *from_field= field->field;
- if (result && modify_item)
- field->result_field= result;
- if (orig_item)
- {
- item->maybe_null= save_maybe_null;
- result->field_name= orig_item->name;
- }
- }
- else if (table_cant_handle_bit_fields && field->field->type() ==
- MYSQL_TYPE_BIT)
- {
- const Type_handler *handler= item->type_handler_long_or_longlong();
- *from_field= field->field;
- if ((result=
- handler->make_and_init_table_field(&item->name,
- Record_addr(item->maybe_null),
- *item, table)))
- create_tmp_field_from_item_finalize(thd, result, item,
- copy_func, modify_item);
- if (result && modify_item)
- field->result_field= result;
- }
- else
- {
- LEX_CSTRING *tmp= orig_item ? &orig_item->name : &item->name;
- result= create_tmp_field_from_field(thd, (*from_field= field->field),
- tmp, table,
- modify_item ? field :
- NULL);
- }
-
- if (orig_type == Item::REF_ITEM && orig_modify)
- ((Item_ref*)orig_item)->set_result_field(result);
- /*
- Fields that are used as arguments to the DEFAULT() function already have
- their data pointers set to the default value during name resolution. See
- Item_default_value::fix_fields.
- */
- if (orig_type != Item::DEFAULT_VALUE_ITEM && field->field->eq_def(result))
- *default_field= field->field;
- return result;
- }
- /* Fall through */
- case Item::FUNC_ITEM:
- if (((Item_func *) item)->functype() == Item_func::FUNC_SP)
- {
- Item_func_sp *item_func_sp= (Item_func_sp *) item;
- Field *sp_result_field= item_func_sp->get_sp_result_field();
-
- if (make_copy_field)
- {
- DBUG_ASSERT(item_func_sp->result_field);
- *from_field= item_func_sp->result_field;
- }
- else
- {
- *((*copy_func)++)= item;
- }
- Field *result_field=
- create_tmp_field_from_field(thd,
- sp_result_field,
- &item_func_sp->name,
- table,
- NULL);
-
- if (modify_item)
- item->set_result_field(result_field);
-
- return result_field;
- }
-
- /* Fall through */
- case Item::COND_ITEM:
- case Item::SUBSELECT_ITEM:
- case Item::REF_ITEM:
- case Item::EXPR_CACHE_ITEM:
- if (make_copy_field)
- {
- DBUG_ASSERT(((Item_result_field*)item)->result_field);
- *from_field= ((Item_result_field*)item)->result_field;
- }
- /* Fall through */
- case Item::FIELD_AVG_ITEM:
- case Item::FIELD_STD_ITEM:
- case Item::PROC_ITEM:
- case Item::INT_ITEM:
- case Item::REAL_ITEM:
- case Item::DECIMAL_ITEM:
- case Item::STRING_ITEM:
- case Item::DATE_ITEM:
- case Item::NULL_ITEM:
- case Item::VARBIN_ITEM:
- case Item::CACHE_ITEM:
- case Item::WINDOW_FUNC_ITEM: // psergey-winfunc:
- case Item::PARAM_ITEM:
- return create_tmp_field_from_item(thd, item, table,
- (make_copy_field ? 0 : copy_func),
- modify_item);
- default: // Dosen't have to be stored
- return 0;
- }
+ Tmp_field_src src;
+ Tmp_field_param prm(group, modify_item, table_cant_handle_bit_fields,
+ make_copy_field);
+ Field *result= item->create_tmp_field_ex(table, &src, &prm);
+ *from_field= src.field();
+ *default_field= src.default_field();
+ if (src.item_result_field())
+ *((*copy_func)++)= src.item_result_field();
+ return result;
}
/*
@@ -17452,7 +18180,7 @@ setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps, uint field_count)
{
uint bitmap_size= bitmap_buffer_size(field_count);
- DBUG_ASSERT(table->s->virtual_fields == 0 && table->def_vcol_set == 0);
+ DBUG_ASSERT(table->s->virtual_fields == 0);
my_bitmap_init(&table->def_read_set, (my_bitmap_map*) bitmaps, field_count,
FALSE);
@@ -17682,19 +18410,15 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
table->temp_pool_slot = temp_pool_slot;
table->copy_blobs= 1;
table->in_use= thd;
- table->quick_keys.init();
- table->covering_keys.init();
- table->intersect_keys.init();
- table->keys_in_use_for_query.init();
table->no_rows_with_nulls= param->force_not_null_cols;
+ table->update_handler= NULL;
+ table->check_unique_buf= NULL;
table->s= share;
init_tmp_table_share(thd, share, "", 0, "(temporary)", tmpname);
share->blob_field= blob_field;
share->table_charset= param->table_charset;
share->primary_key= MAX_KEY; // Indicate no primary key
- share->keys_for_keyread.init();
- share->keys_in_use.init();
if (param->schema_table)
share->db= INFORMATION_SCHEMA_NAME;
@@ -17717,7 +18441,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
}
if (not_all_columns)
{
- if (item->with_sum_func && type != Item::SUM_FUNC_ITEM)
+ if (item->with_sum_func() && type != Item::SUM_FUNC_ITEM)
{
if (item->used_tables() & OUTER_REF_TABLE_BIT)
item->update_used_tables();
@@ -17747,7 +18471,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
{
Item *tmp_item;
Field *new_field=
- create_tmp_field(thd, table, arg, arg->type(), &copy_func,
+ create_tmp_field(table, arg, &copy_func,
tmp_from_field, &default_field[fieldnr],
group != 0,not_all_columns,
distinct, false);
@@ -17797,7 +18521,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
else
{
/*
- The last parameter to create_tmp_field() is a bit tricky:
+ The last parameter to create_tmp_field_ex() is a bit tricky:
We need to set it to 0 in union, to get fill_record() to modify the
temporary table.
@@ -17811,7 +18535,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
*/
Field *new_field= (param->schema_table) ?
item->create_field_for_schema(thd, table) :
- create_tmp_field(thd, table, item, type, &copy_func,
+ create_tmp_field(table, item, &copy_func,
tmp_from_field, &default_field[fieldnr],
group != 0,
!force_copy_fields &&
@@ -17825,8 +18549,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
*/
item->marker == 4 || param->bit_fields_as_long,
force_copy_fields);
-
- if (unlikely(!new_field))
+ if (!new_field)
{
if (unlikely(thd->is_fatal_error))
goto err; // Got OOM
@@ -18423,12 +19146,10 @@ bool Virtual_tmp_table::add(List<Spvar_definition> &field_list)
while ((cdef= it++))
{
Field *tmp;
- if (!(tmp= cdef->make_field(s, in_use->mem_root, 0,
- (uchar*) (f_maybe_null(cdef->pack_flag) ? "" : 0),
- f_maybe_null(cdef->pack_flag) ? 1 : 0,
- &cdef->field_name)))
+ Record_addr addr(f_maybe_null(cdef->pack_flag));
+ if (!(tmp= cdef->make_field(s, in_use->mem_root, &addr, &cdef->field_name)))
DBUG_RETURN(true);
- add(tmp);
+ add(tmp);
}
DBUG_RETURN(false);
}
@@ -18548,7 +19269,7 @@ bool Virtual_tmp_table::sp_set_all_fields_from_item_list(THD *thd,
bool Virtual_tmp_table::sp_set_all_fields_from_item(THD *thd, Item *value)
{
- DBUG_ASSERT(value->fixed);
+ DBUG_ASSERT(value->is_fixed());
DBUG_ASSERT(value->cols() == s->fields);
for (uint i= 0; i < value->cols(); i++)
{
@@ -18972,7 +19693,7 @@ create_internal_tmp_table_from_heap(THD *thd, TABLE *table,
We don't want this error to be converted to a warning, e.g. in case of
INSERT IGNORE ... SELECT.
*/
- table->file->print_error(error, MYF(ME_FATALERROR));
+ table->file->print_error(error, MYF(ME_FATAL));
DBUG_RETURN(1);
}
new_table= *table;
@@ -18995,7 +19716,7 @@ create_internal_tmp_table_from_heap(THD *thd, TABLE *table,
new_table.no_rows= table->no_rows;
if (create_internal_tmp_table(&new_table, table->key_info, start_recinfo,
recinfo,
- thd->lex->select_lex.options |
+ thd->lex->first_select_lex()->options |
thd->variables.option_bits))
goto err2;
if (open_tmp_table(&new_table))
@@ -19788,6 +20509,8 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
if (!join_tab->preread_init_done && join_tab->preread_init())
DBUG_RETURN(NESTED_LOOP_ERROR);
+ join_tab->build_range_rowid_filter_if_needed();
+
join->return_tab= join_tab;
if (join_tab->last_inner)
@@ -20711,7 +21434,8 @@ test_if_quick_select(JOIN_TAB *tab)
int res= tab->select->test_quick_select(tab->join->thd, tab->keys,
(table_map) 0, HA_POS_ERROR, 0,
- FALSE, /*remove where parts*/FALSE);
+ FALSE, /*remove where parts*/FALSE,
+ FALSE);
if (tab->explain_plan && tab->explain_plan->range_checked_fer)
tab->explain_plan->range_checked_fer->collect_data(tab->select->quick);
@@ -20733,6 +21457,9 @@ int join_init_read_record(JOIN_TAB *tab)
*/
if (tab->distinct && tab->remove_duplicates()) // Remove duplicates.
return 1;
+
+ tab->build_range_rowid_filter_if_needed();
+
if (tab->filesort && tab->sort_table()) // Sort table.
return 1;
@@ -20750,6 +21477,8 @@ int join_init_read_record(JOIN_TAB *tab)
tab->join->thd->reset_killed(););
if (!tab->preread_init_done && tab->preread_init())
return 1;
+
+
if (init_read_record(&tab->read_record, tab->join->thd, tab->table,
tab->select, tab->filesort_result, 1,1, FALSE))
return 1;
@@ -20793,6 +21522,9 @@ JOIN_TAB::sort_table()
JOIN::ordered_index_order_by :
JOIN::ordered_index_group_by));
rc= create_sort_index(join->thd, join, this, NULL);
+ /* Disactivate rowid filter if it was used when creating sort index */
+ if (rowid_filter)
+ table->file->rowid_filter_is_active= false;
return (rc != 0);
}
@@ -22634,9 +23366,9 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
res= select->test_quick_select(tab->join->thd, new_ref_key_map, 0,
(tab->join->select_options &
OPTION_FOUND_ROWS) ?
- HA_POS_ERROR :
- tab->join->unit->select_limit_cnt,TRUE,
- TRUE, FALSE) <= 0;
+ HA_POS_ERROR :
+ tab->join->unit->select_limit_cnt,TRUE,
+ TRUE, FALSE, FALSE) <= 0;
if (res)
{
select->cond= save_cond;
@@ -22738,7 +23470,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
join->select_options & OPTION_FOUND_ROWS ?
HA_POS_ERROR :
join->unit->select_limit_cnt,
- TRUE, FALSE, FALSE);
+ TRUE, FALSE, FALSE, FALSE);
if (cond_saved)
select->cond= saved_cond;
@@ -22813,6 +23545,19 @@ check_reverse_order:
join_read_first:join_read_last;
tab->type=JT_NEXT; // Read with index_first(), index_next()
+ /*
+ Currently usage of rowid filters is not supported in InnoDB
+ if the table is accessed by the primary key
+ */
+ if (tab->rowid_filter &&
+ tab->index == table->s->primary_key &&
+ table->file->primary_key_is_clustered())
+ {
+ tab->range_rowid_filter_info= 0;
+ delete tab->rowid_filter;
+ tab->rowid_filter= 0;
+ }
+
if (tab->pre_idx_push_select_cond)
{
tab->set_cond(tab->pre_idx_push_select_cond);
@@ -22846,6 +23591,12 @@ check_reverse_order:
tab->use_quick=1;
tab->ref.key= -1;
tab->ref.key_parts=0; // Don't use ref key.
+ tab->range_rowid_filter_info= 0;
+ if (tab->rowid_filter)
+ {
+ delete tab->rowid_filter;
+ tab->rowid_filter= 0;
+ }
tab->read_first_record= join_init_read_record;
if (tab->is_using_loose_index_scan())
tab->join->tmp_table_param.precomputed_group_by= TRUE;
@@ -23235,7 +23986,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
if (unlikely(copy_blobs(first_field)))
{
my_message(ER_OUTOFMEMORY, ER_THD(thd,ER_OUTOFMEMORY),
- MYF(ME_FATALERROR));
+ MYF(ME_FATAL));
error=0;
goto err;
}
@@ -23510,12 +24261,7 @@ find_order_in_list(THD *thd, Ref_ptr_array ref_pointer_array,
uint counter;
enum_resolution_type resolution;
- /*
- Local SP variables may be int but are expressions, not positions.
- (And they can't be used before fix_fields is called for them).
- */
- if (order_item->type() == Item::INT_ITEM && order_item->basic_const_item() &&
- !from_window_spec)
+ if (order_item->is_order_clause_position() && !from_window_spec)
{ /* Order by position */
uint count;
if (order->counter_used)
@@ -23629,7 +24375,7 @@ find_order_in_list(THD *thd, Ref_ptr_array ref_pointer_array,
inspite of that fix_fields() calls find_item_in_list() one more
time.
- We check order_item->fixed because Item_func_group_concat can put
+ We check order_item->is_fixed() because Item_func_group_concat can put
arguments for which fix_fields already was called.
*/
if (order_item->fix_fields_if_needed_for_order_by(thd, order->item) ||
@@ -23689,19 +24435,21 @@ int setup_order(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables,
return 1;
}
+ if (!(*order->item)->with_sum_func())
+ continue;
+
/*
UNION queries cannot be used with an aggregate function in
an ORDER BY clause
*/
- if (for_union && (*order->item)->with_sum_func)
+ if (for_union)
{
my_error(ER_AGGREGATE_ORDER_FOR_UNION, MYF(0), number);
return 1;
}
- if (from_window_spec && (*order->item)->with_sum_func &&
- (*order->item)->type() != Item::SUM_FUNC_ITEM)
+ if (from_window_spec && (*order->item)->type() != Item::SUM_FUNC_ITEM)
(*order->item)->split_sum_func(thd, ref_pointer_array,
all_fields, SPLIT_SUM_SELECT);
}
@@ -23758,7 +24506,7 @@ setup_group(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables,
all_fields, true, true, from_window_spec))
return 1;
(*ord->item)->marker= UNDEF_POS; /* Mark found */
- if ((*ord->item)->with_sum_func && context_analysis_place == IN_GROUP_BY)
+ if ((*ord->item)->with_sum_func() && context_analysis_place == IN_GROUP_BY)
{
my_error(ER_WRONG_GROUP_FIELD, MYF(0), (*ord->item)->full_name());
return 1;
@@ -23771,7 +24519,7 @@ setup_group(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables,
my_error(ER_WINDOW_FUNCTION_IN_WINDOW_SPEC, MYF(0));
return 1;
}
- if (from_window_spec && (*ord->item)->with_sum_func &&
+ if (from_window_spec && (*ord->item)->with_sum_func() &&
(*ord->item)->type() != Item::SUM_FUNC_ITEM)
(*ord->item)->split_sum_func(thd, ref_pointer_array,
all_fields, SPLIT_SUM_SELECT);
@@ -23921,7 +24669,7 @@ create_distinct_group(THD *thd, Ref_ptr_array ref_pointer_array,
li.rewind();
while ((item=li++))
{
- if (!item->const_item() && !item->with_sum_func && !item->marker)
+ if (!item->const_item() && !item->with_sum_func() && !item->marker)
{
/*
Don't put duplicate columns from the SELECT list into the
@@ -24018,9 +24766,11 @@ count_field_types(SELECT_LEX *select_lex, TMP_TABLE_PARAM *param,
}
else
{
+ With_sum_func_cache *cache= field->get_with_sum_func_cache();
param->func_count++;
- if (reset_with_sum_func)
- field->with_sum_func=0;
+ // "field" can point to Item_std_field, so "cache" can be NULL here.
+ if (reset_with_sum_func && cache)
+ cache->reset_with_sum_func();
}
}
}
@@ -24160,7 +24910,7 @@ void calc_group_buffer(TMP_TABLE_PARAM *param, ORDER *group)
{
/*
Group strings are taken as varstrings and require an length field.
- A field is not yet created by create_tmp_field()
+ A field is not yet created by create_tmp_field_ex()
and the sizes should match up.
*/
key_length+= group_item->max_length + HA_KEY_BLOB_LENGTH;
@@ -24170,7 +24920,7 @@ void calc_group_buffer(TMP_TABLE_PARAM *param, ORDER *group)
default:
/* This case should never be choosen */
DBUG_ASSERT(0);
- my_error(ER_OUT_OF_RESOURCES, MYF(ME_FATALERROR));
+ my_error(ER_OUT_OF_RESOURCES, MYF(ME_FATAL));
}
}
parts++;
@@ -24424,7 +25174,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
real_pos->real_type() == Item::SUBSELECT_ITEM ||
real_pos->type() == Item::CACHE_ITEM ||
real_pos->type() == Item::COND_ITEM) &&
- !real_pos->with_sum_func)
+ !real_pos->with_sum_func())
{ // Save for send fields
LEX_CSTRING real_name= pos->name;
pos= real_pos;
@@ -24435,7 +25185,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
on how the value is to be used: In some cases this may be an
argument in a group function, like: IF(ISNULL(col),0,COUNT(*))
*/
- if (!(pos=new (thd->mem_root) Item_copy_string(thd, pos)))
+ if (!(pos= pos->type_handler()->create_item_copy(thd, pos)))
goto err;
if (i < border) // HAVING, ORDER and GROUP BY
{
@@ -24490,8 +25240,8 @@ copy_fields(TMP_TABLE_PARAM *param)
(*ptr->do_copy)(ptr);
List_iterator_fast<Item> it(param->copy_funcs);
- Item_copy_string *item;
- while ((item = (Item_copy_string*) it++))
+ Item_copy *item;
+ while ((item= (Item_copy*) it++))
item->copy();
}
@@ -24633,7 +25383,7 @@ change_to_use_tmp_fields(THD *thd, Ref_ptr_array ref_pointer_array,
for (uint i= 0; (item= it++); i++)
{
Field *field;
- if ((item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM) ||
+ if ((item->with_sum_func() && item->type() != Item::SUM_FUNC_ITEM) ||
item->with_window_func)
item_field= item;
else if (item->type() == Item::FIELD_ITEM)
@@ -24942,7 +25692,7 @@ static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab)
}
if (unlikely(thd->is_fatal_error))
DBUG_RETURN(TRUE);
- if (!cond->fixed)
+ if (!cond->is_fixed())
{
Item *tmp_item= (Item*) cond;
cond->fix_fields(thd, &tmp_item);
@@ -25161,7 +25911,7 @@ bool JOIN::rollup_init()
Marking the expression item as 'with_sum_func' will ensure this.
*/
if (changed)
- item->with_sum_func= 1;
+ item->get_with_sum_func_cache()->set_with_sum_func();
}
}
return 0;
@@ -25503,11 +26253,13 @@ int print_explain_message_line(select_result_sink *result,
item_list.push_back(item_null, mem_root);
/* `rows` */
+ StringBuffer<64> rows_str;
if (rows)
{
- item_list.push_back(new (mem_root) Item_int(thd, *rows,
- MY_INT64_NUM_DECIMAL_DIGITS),
- mem_root);
+ rows_str.append_ulonglong((ulonglong)(*rows));
+ item_list.push_back(new (mem_root)
+ Item_string_sys(thd, rows_str.ptr(),
+ rows_str.length()), mem_root);
}
else
item_list.push_back(item_null, mem_root);
@@ -25607,7 +26359,7 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
filesort)))
return 1;
}
-
+ // psergey-todo: data for filtering!
tracker= &eta->tracker;
jbuf_tracker= &eta->jbuf_tracker;
@@ -25649,7 +26401,8 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
*/
if (real_table->merged_for_insert)
{
- TABLE_LIST *view_child= real_table->view->select_lex.table_list.first;
+ TABLE_LIST *view_child=
+ real_table->view->first_select_lex()->table_list.first;
for (;view_child; view_child= view_child->next_local)
{
if (view_child->table == table)
@@ -25707,6 +26460,22 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
// psergey-todo: ^ check for error return code
/* Build "key", "key_len", and "ref" */
+
+ if (rowid_filter)
+ {
+ Range_rowid_filter *range_filter= (Range_rowid_filter *) rowid_filter;
+ QUICK_SELECT_I *quick= range_filter->get_select()->quick;
+
+ Explain_rowid_filter *erf= new (thd->mem_root) Explain_rowid_filter;
+ erf->quick= quick->get_explain(thd->mem_root);
+ erf->selectivity= range_rowid_filter_info->selectivity;
+ erf->rows= quick->records;
+ if (!(erf->tracker= new Rowid_filter_tracker(thd->lex->analyze_stmt)))
+ return 1;
+ rowid_filter->set_tracker(erf->tracker);
+ eta->rowid_filter= erf;
+ }
+
if (tab_type == JT_NEXT)
{
key_info= table->key_info+index;
@@ -26111,8 +26880,9 @@ int JOIN::save_explain_data_intern(Explain_query *output,
{
JOIN *join= this; /* Legacy: this code used to be a non-member function */
DBUG_ENTER("JOIN::save_explain_data_intern");
- DBUG_PRINT("info", ("Select %p, type %s, message %s",
- join->select_lex, join->select_lex->type,
+ DBUG_PRINT("info", ("Select %p (%u), type %s, message %s",
+ join->select_lex, join->select_lex->select_number,
+ join->select_lex->type,
message ? message : "NULL"));
DBUG_ASSERT(have_query_plan == QEP_AVAILABLE);
/* fake_select_lex is created/printed by Explain_union */
@@ -26138,7 +26908,7 @@ int JOIN::save_explain_data_intern(Explain_query *output,
explain->select_id= join->select_lex->select_number;
explain->select_type= join->select_lex->type;
- explain->linkage= select_lex->linkage;
+ explain->linkage= select_lex->get_linkage();
explain->using_temporary= need_tmp;
explain->using_filesort= need_order_arg;
/* Setting explain->message means that all other members are invalid */
@@ -26161,7 +26931,7 @@ int JOIN::save_explain_data_intern(Explain_query *output,
explain->select_id= select_lex->select_number;
explain->select_type= select_lex->type;
- explain->linkage= select_lex->linkage;
+ explain->linkage= select_lex->get_linkage();
explain->using_temporary= need_tmp;
explain->using_filesort= need_order_arg;
explain->message= "Storage engine handles GROUP BY";
@@ -26184,8 +26954,8 @@ int JOIN::save_explain_data_intern(Explain_query *output,
join->select_lex->set_explain_type(true);
xpl_sel->select_id= join->select_lex->select_number;
xpl_sel->select_type= join->select_lex->type;
- xpl_sel->linkage= select_lex->linkage;
- xpl_sel->is_lateral= ((select_lex->linkage == DERIVED_TABLE_TYPE) &&
+ xpl_sel->linkage= select_lex->get_linkage();
+ xpl_sel->is_lateral= ((select_lex->get_linkage() == DERIVED_TABLE_TYPE) &&
(select_lex->uncacheable & UNCACHEABLE_DEPENDENT));
if (select_lex->master_unit()->derived)
xpl_sel->connection_type= Explain_node::EXPLAIN_NODE_DERIVED;
@@ -26267,24 +27037,33 @@ int JOIN::save_explain_data_intern(Explain_query *output,
output->add_node(xpl_sel);
}
- for (SELECT_LEX_UNIT *tmp_unit= join->select_lex->first_inner_unit();
- tmp_unit;
- tmp_unit= tmp_unit->next_unit())
+ /*
+ Don't try to add query plans for child selects if this select was pushed
+ down into a Smart Storage Engine:
+ - the entire statement was pushed down ("PUSHED SELECT"), or
+ - this derived table was pushed down ("PUSHED DERIVED")
+ */
+ if (!select_lex->pushdown_select && select_lex->type != pushed_derived_text)
{
- /*
- Display subqueries only if
- (1) they are not parts of ON clauses that were eliminated by table
- elimination.
- (2) they are not merged derived tables
- (3) they are not hanging CTEs (they are needed for execution)
- */
- if (!(tmp_unit->item && tmp_unit->item->eliminated) && // (1)
- (!tmp_unit->derived ||
- tmp_unit->derived->is_materialized_derived()) && // (2)
- !(tmp_unit->with_element &&
- (!tmp_unit->derived || !tmp_unit->derived->derived_result))) // (3)
- {
- explain->add_child(tmp_unit->first_select()->select_number);
+ for (SELECT_LEX_UNIT *tmp_unit= join->select_lex->first_inner_unit();
+ tmp_unit;
+ tmp_unit= tmp_unit->next_unit())
+ {
+ /*
+ Display subqueries only if
+ (1) they are not parts of ON clauses that were eliminated by table
+ elimination.
+ (2) they are not merged derived tables
+ (3) they are not hanging CTEs (they are needed for execution)
+ */
+ if (!(tmp_unit->item && tmp_unit->item->eliminated) && // (1)
+ (!tmp_unit->derived ||
+ tmp_unit->derived->is_materialized_derived()) && // (2)
+ !(tmp_unit->with_element &&
+ (!tmp_unit->derived || !tmp_unit->derived->derived_result))) // (3)
+ {
+ explain->add_child(tmp_unit->first_select()->select_number);
+ }
}
}
@@ -26317,7 +27096,16 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
THD *thd=join->thd;
select_result *result=join->result;
DBUG_ENTER("select_describe");
-
+
+ if (join->select_lex->pushdown_select)
+ {
+ /*
+ The whole statement was pushed down to a Smart Storage Engine. Do not
+ attempt to produce a query plan locally.
+ */
+ DBUG_VOID_RETURN;
+ }
+
/* Update the QPF with latest values of using_temporary, using_filesort */
for (SELECT_LEX_UNIT *unit= join->select_lex->first_inner_unit();
unit;
@@ -26331,7 +27119,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
for such queries, we'll get here before having called
subquery_expr->fix_fields(), which will cause failure to
*/
- if (unit->item && !unit->item->fixed)
+ if (unit->item && !unit->item->is_fixed())
{
Item *ref= unit->item;
if (unit->item->fix_fields(thd, &ref))
@@ -26363,6 +27151,7 @@ bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result)
DBUG_ENTER("mysql_explain_union");
bool res= 0;
SELECT_LEX *first= unit->first_select();
+ bool is_pushed_union= unit->derived && unit->derived->pushdown_derived;
for (SELECT_LEX *sl= first; sl; sl= sl->next_select())
{
@@ -26380,7 +27169,10 @@ bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result)
}
if (!(res= unit->prepare(unit->derived, result,
SELECT_NO_UNLOCK | SELECT_DESCRIBE)))
- res= unit->exec();
+ {
+ if (!is_pushed_union)
+ res= unit->exec();
+ }
}
else
{
@@ -26398,6 +27190,13 @@ bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result)
first->options | thd->variables.option_bits | SELECT_DESCRIBE,
result, unit, first);
}
+
+ if (unit->derived && unit->derived->pushdown_derived)
+ {
+ delete unit->derived->pushdown_derived;
+ unit->derived->pushdown_derived= NULL;
+ }
+
DBUG_RETURN(res || thd->is_error());
}
@@ -26655,7 +27454,8 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str,
// A view
if (!(belong_to_view &&
- belong_to_view->compact_view_format))
+ belong_to_view->compact_view_format) &&
+ !(query_type & QT_ITEM_IDENT_SKIP_DB_NAMES))
{
append_identifier(thd, str, &view_db);
str->append('.');
@@ -26684,7 +27484,8 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str,
// A normal table
if (!(belong_to_view &&
- belong_to_view->compact_view_format))
+ belong_to_view->compact_view_format) &&
+ !(query_type & QT_ITEM_IDENT_SKIP_DB_NAMES))
{
append_identifier(thd, str, &db);
str->append('.');
@@ -26771,6 +27572,18 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
{
str->append("/* select#");
str->append_ulonglong(select_number);
+ if (thd->lex->describe & DESCRIBE_EXTENDED2)
+ {
+ str->append("/");
+ str->append_ulonglong(nest_level);
+
+ if (master_unit()->fake_select_lex &&
+ master_unit()->first_select() == this)
+ {
+ str->append(" Filter Select: ");
+ master_unit()->fake_select_lex->print(thd, str, query_type);
+ }
+ }
str->append(" */ ");
}
@@ -26802,18 +27615,21 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
str->append(STRING_WITH_LEN("sql_buffer_result "));
if (options & OPTION_FOUND_ROWS)
str->append(STRING_WITH_LEN("sql_calc_found_rows "));
- switch (sql_cache)
+ if (this == parent_lex->first_select_lex())
{
- case SQL_NO_CACHE:
- str->append(STRING_WITH_LEN("sql_no_cache "));
- break;
- case SQL_CACHE:
- str->append(STRING_WITH_LEN("sql_cache "));
- break;
- case SQL_CACHE_UNSPECIFIED:
- break;
- default:
- DBUG_ASSERT(0);
+ switch (parent_lex->sql_cache)
+ {
+ case LEX::SQL_NO_CACHE:
+ str->append(STRING_WITH_LEN("sql_no_cache "));
+ break;
+ case LEX::SQL_CACHE:
+ str->append(STRING_WITH_LEN("sql_cache "));
+ break;
+ case LEX::SQL_CACHE_UNSPECIFIED:
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
}
//Item List
@@ -27203,16 +28019,22 @@ void JOIN::cache_const_exprs()
/*
- Get a cost of reading rows_limit rows through index keynr.
+ Get the cost of using index keynr to read #LIMIT matching rows
@detail
- If there is a quick select, we try to use it.
- if there is a ref(const) access, we try to use it, too.
- quick and ref(const) use different cost formulas, so if both are possible
we should make a cost-based choice.
-
+
+ rows_limit is the number of rows we would need to read when using a full
+ index scan. This is generally higher than the N from "LIMIT N" clause,
+ because there's a WHERE condition (a part of which is used to construct a
+ range access we are considering using here)
+
@param tab JOIN_TAB with table access (is NULL for single-table
UPDATE/DELETE)
+ @param rows_limit See explanation above
@param read_time OUT Cost of reading using quick or ref(const) access.
@@ -27225,6 +28047,7 @@ void JOIN::cache_const_exprs()
static bool get_range_limit_read_cost(const JOIN_TAB *tab,
const TABLE *table,
+ ha_rows table_records,
uint keynr,
ha_rows rows_limit,
double *read_time)
@@ -27291,8 +28114,32 @@ static bool get_range_limit_read_cost(const JOIN_TAB *tab,
}
}
}
+
+ /*
+ Consider an example:
+
+ SELECT *
+ FROM t1
+ WHERE key1 BETWEEN 10 AND 20 AND col2='foo'
+ ORDER BY key1 LIMIT 10
+
+ If we were using a full index scan on key1, we would need to read this
+ many rows to get 10 matches:
+
+ 10 / selectivity(key1 BETWEEN 10 AND 20 AND col2='foo')
+
+ This is the number we get in rows_limit.
+ But we intend to use range access on key1. The rows returned by quick
+ select will satisfy the range part of the condition,
+ "key1 BETWEEN 10 and 20". We will still need to filter them with
+ the remainder condition, (col2='foo').
+
+ The selectivity of the range access is (best_rows/table_records). We need
+ to discount it from the rows_limit:
+ */
+ double rows_limit_for_quick= rows_limit * (best_rows / table_records);
- if (best_rows > rows_limit)
+ if (best_rows > rows_limit_for_quick)
{
/*
LIMIT clause specifies that we will need to read fewer records than
@@ -27301,7 +28148,7 @@ static bool get_range_limit_read_cost(const JOIN_TAB *tab,
only need 1/3rd of records, it will cost us 1/3rd of quick select's
read time)
*/
- best_cost *= rows_limit / best_rows;
+ best_cost *= rows_limit_for_quick / best_rows;
}
*read_time= best_cost;
res= true;
@@ -27373,6 +28220,12 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
bool group= join && join->group && order == join->group_list;
ha_rows refkey_rows_estimate= table->quick_condition_rows;
const bool has_limit= (select_limit_arg != HA_POS_ERROR);
+ THD* thd= join ? join->thd : table->in_use;
+
+ Json_writer_object trace_wrapper(thd);
+ Json_writer_object trace_cheaper_ordering(
+ thd, "reconsidering_access_paths_for_index_ordering");
+ trace_cheaper_ordering.add("clause", group ? "GROUP BY" : "ORDER BY");
/*
If not used with LIMIT, only use keys if the whole query can be
@@ -27402,16 +28255,21 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
uint tablenr= (uint)(tab - join->join_tab);
read_time= join->best_positions[tablenr].read_time;
for (uint i= tablenr+1; i < join->table_count; i++)
+ {
fanout*= join->best_positions[i].records_read; // fanout is always >= 1
+ // But selectivity is =< 1 :
+ fanout*= join->best_positions[i].cond_selectivity;
+ }
}
else
read_time= table->file->scan_time();
+ trace_cheaper_ordering.add("fanout", fanout);
/*
TODO: add cost of sorting here.
*/
read_time += COST_EPS;
-
+ trace_cheaper_ordering.add("read_time", read_time);
/*
Calculate the selectivity of the ref_key for REF_ACCESS. For
RANGE_ACCESS we use table->quick_condition_rows.
@@ -27436,11 +28294,20 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
set_if_bigger(refkey_rows_estimate, 1);
}
+ if (tab)
+ trace_cheaper_ordering.add_table_name(tab);
+ else
+ trace_cheaper_ordering.add_table_name(table);
+ trace_cheaper_ordering.add("rows_estimation", refkey_rows_estimate);
+
+ Json_writer_array possible_keys(thd,"possible_keys");
for (nr=0; nr < table->s->keys ; nr++)
{
int direction;
ha_rows select_limit= select_limit_arg;
uint used_key_parts= 0;
+ Json_writer_object possible_key(thd);
+ possible_key.add("index", table->key_info[nr].name);
if (keys.is_set(nr) &&
(direction= test_if_order_by_key(join, order, table, nr,
@@ -27453,6 +28320,7 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
*/
DBUG_ASSERT (ref_key != (int) nr);
+ possible_key.add("can_resolve_order", true);
bool is_covering= (table->covering_keys.is_set(nr) ||
(table->file->index_flags(nr, 0, 1) &
HA_CLUSTERED_INDEX));
@@ -27548,6 +28416,24 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
*/
select_limit= (ha_rows) (select_limit < fanout ?
1 : select_limit/fanout);
+
+ /*
+ refkey_rows_estimate is E(#rows) produced by the table access
+ strategy that was picked without regard to ORDER BY ... LIMIT.
+
+ It will be used as the source of selectivity data.
+ Use table->cond_selectivity as a better estimate which includes
+ condition selectivity too.
+ */
+ {
+ // we use MIN(...), because "Using LooseScan" queries have
+ // cond_selectivity=1 while refkey_rows_estimate has a better
+ // estimate.
+ refkey_rows_estimate= MY_MIN(refkey_rows_estimate,
+ ha_rows(table_records *
+ table->cond_selectivity));
+ }
+
/*
We assume that each of the tested indexes is not correlated
with ref_key. Thus, to select first N records we have to scan
@@ -27558,12 +28444,14 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
N/(refkey_rows_estimate/table_records) > table_records
<=> N > refkey_rows_estimate.
*/
+
if (select_limit > refkey_rows_estimate)
select_limit= table_records;
else
select_limit= (ha_rows) (select_limit *
(double) table_records /
refkey_rows_estimate);
+ possible_key.add("updated_limit", select_limit);
rec_per_key= keyinfo->actual_rec_per_key(keyinfo->user_defined_key_parts-1);
set_if_bigger(rec_per_key, 1);
/*
@@ -27580,12 +28468,14 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
index_scan_time= select_limit/rec_per_key *
MY_MIN(rec_per_key, table->file->scan_time());
double range_scan_time;
- if (get_range_limit_read_cost(tab, table, nr, select_limit,
- &range_scan_time))
+ if (get_range_limit_read_cost(tab, table, table_records, nr,
+ select_limit, &range_scan_time))
{
+ possible_key.add("range_scan_time", range_scan_time);
if (range_scan_time < index_scan_time)
index_scan_time= range_scan_time;
}
+ possible_key.add("index_scan_time", index_scan_time);
if ((ref_key < 0 && (group || table->force_index || is_covering)) ||
index_scan_time < read_time)
@@ -27596,17 +28486,29 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
table->covering_keys.is_set(ref_key)) ?
refkey_rows_estimate :
HA_POS_ERROR;
- if ((is_best_covering && !is_covering) ||
- (is_covering && refkey_select_limit < select_limit))
+ if (is_best_covering && !is_covering)
+ {
+ possible_key.add("chosen", false);
+ possible_key.add("cause", "covering index already found");
continue;
+ }
+
+ if (is_covering && refkey_select_limit < select_limit)
+ {
+ possible_key.add("chosen", false);
+ possible_key.add("cause", "ref estimates better");
+ continue;
+ }
if (table->quick_keys.is_set(nr))
quick_records= table->quick_rows[nr];
+ possible_key.add("records", quick_records);
if (best_key < 0 ||
(select_limit <= MY_MIN(quick_records,best_records) ?
keyinfo->user_defined_key_parts < best_key_parts :
quick_records < best_records) ||
(!is_best_covering && is_covering))
{
+ possible_key.add("chosen", true);
best_key= nr;
best_key_parts= keyinfo->user_defined_key_parts;
if (saved_best_key_parts)
@@ -27616,8 +28518,47 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
best_key_direction= direction;
best_select_limit= select_limit;
}
+ else
+ {
+ char const *cause;
+ possible_key.add("chosen", false);
+ if (is_covering)
+ cause= "covering index already found";
+ else
+ {
+ if (select_limit <= MY_MIN(quick_records,best_records))
+ cause= "keyparts greater than the current best keyparts";
+ else
+ cause= "rows estimation greater";
+ }
+ possible_key.add("cause", cause);
+ }
+ }
+ else
+ {
+ possible_key.add("usable", false);
+ possible_key.add("cause", "cost");
}
- }
+ }
+ else
+ {
+ possible_key.add("usable", false);
+ if (!group && select_limit == HA_POS_ERROR)
+ possible_key.add("cause", "order by without limit");
+ }
+ }
+ else
+ {
+ if (keys.is_set(nr))
+ {
+ possible_key.add("can_resolve_order", false);
+ possible_key.add("cause", "order can not be resolved by key");
+ }
+ else
+ {
+ possible_key.add("can_resolve_order", false);
+ possible_key.add("cause", "not usable index for the query");
+ }
}
}
@@ -28027,6 +28968,7 @@ Item *remove_pushed_top_conjuncts(THD *thd, Item *cond)
return cond;
}
+
/*
There are 5 cases in which we shortcut the join optimization process as we
conclude that the join would be a degenerate one
@@ -28040,7 +28982,6 @@ Item *remove_pushed_top_conjuncts(THD *thd, Item *cond)
*/
-
void JOIN::handle_implicit_grouping_with_window_funcs()
{
if (select_lex->have_window_funcs() && send_row_on_empty_set())
@@ -28050,6 +28991,7 @@ void JOIN::handle_implicit_grouping_with_window_funcs()
}
+
/*
@brief
Perform a partial cleanup for the JOIN_TAB structure
@@ -28084,5 +29026,45 @@ void JOIN_TAB::partial_cleanup()
/**
+ @brief
+ Look for provision of the select_handler interface by a foreign engine
+
+ @param thd The thread handler
+
+ @details
+ The function checks that this is an upper level select and if so looks
+ through its tables searching for one whose handlerton owns a
+ create_select call-back function. If the call of this function returns
+ a select_handler interface object then the server will push the select
+ query into this engine.
+ This is a responsibility of the create_select call-back function to
+ check whether the engine can execute the query.
+
+ @retval the found select_handler if the search is successful
+ 0 otherwise
+*/
+
+select_handler *SELECT_LEX::find_select_handler(THD *thd)
+{
+ if (next_select())
+ return 0;
+ if (master_unit()->outer_select())
+ return 0;
+ for (TABLE_LIST *tbl= join->tables_list; tbl; tbl= tbl->next_global)
+ {
+ if (!tbl->table)
+ continue;
+ handlerton *ht= tbl->table->file->partition_ht();
+ if (!ht->create_select)
+ continue;
+ select_handler *sh= ht->create_select(thd, this);
+ return sh;
+ }
+ return 0;
+}
+
+
+
+/**
@} (end of group Query_Optimizer)
*/