summaryrefslogtreecommitdiff
path: root/sql/sql_select.cc
diff options
context:
space:
mode:
Diffstat (limited to 'sql/sql_select.cc')
-rw-r--r--sql/sql_select.cc2396
1 files changed, 1666 insertions, 730 deletions
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 4cca2d67eb8..e7f593326a4 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -29,7 +29,7 @@
#pragma implementation // gcc: Class implementation
#endif
-#include <my_global.h>
+#include "mariadb.h"
#include "sql_priv.h"
#include "unireg.h"
#include "sql_select.h"
@@ -50,17 +50,20 @@
#include "filesort.h" // filesort_free_buffers
#include "sql_union.h" // mysql_union
#include "opt_subselect.h"
-#include "log_slow.h"
#include "sql_derived.h"
#include "sql_statistics.h"
#include "sql_cte.h"
#include "sql_window.h"
+#include "tztime.h"
#include "debug_sync.h" // DEBUG_SYNC
#include <m_ctype.h>
#include <my_bit.h>
#include <hash.h>
#include <ft_global.h>
+#include "sys_vars_shared.h"
+#include "sp_head.h"
+#include "sp_rcontext.h"
/*
A key part number that means we're using a fulltext scan.
@@ -81,9 +84,11 @@ const char *join_type_str[]={ "UNKNOWN","system","const","eq_ref","ref",
"index_merge", "hash_ALL", "hash_range",
"hash_index", "hash_index_merge" };
+LEX_CSTRING group_key= {STRING_WITH_LEN("group_key")};
+LEX_CSTRING distinct_key= {STRING_WITH_LEN("distinct_key")};
+
struct st_sargable_param;
-static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array);
static bool make_join_statistics(JOIN *join, List<TABLE_LIST> &leaves,
DYNAMIC_ARRAY *keyuse);
static bool update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,
@@ -91,8 +96,6 @@ static bool update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,
uint tables, COND *conds,
table_map table_map, SELECT_LEX *select_lex,
SARGABLE_PARAM **sargables);
-static bool sort_and_filter_keyuse(THD *thd, DYNAMIC_ARRAY *keyuse,
- bool skip_unprefixed_keyparts);
static int sort_keyuse(KEYUSE *a,KEYUSE *b);
static bool are_tables_local(JOIN_TAB *jtab, table_map used_tables);
static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse,
@@ -220,6 +223,9 @@ static bool test_if_cheaper_ordering(const JOIN_TAB *tab,
ha_rows *new_select_limit,
uint *new_used_key_parts= NULL,
uint *saved_best_key_parts= NULL);
+static int test_if_order_by_key(JOIN *join,
+ ORDER *order, TABLE *table, uint idx,
+ uint *used_key_parts= NULL);
static bool test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,
ha_rows select_limit, bool no_changes,
const key_map *map);
@@ -283,6 +289,9 @@ static bool find_order_in_list(THD *, Ref_ptr_array, TABLE_LIST *, ORDER *,
static double table_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
table_map rem_tables);
void set_postjoin_aggr_write_func(JOIN_TAB *tab);
+
+static Item **get_sargable_cond(JOIN *join, TABLE *table);
+
#ifndef DBUG_OFF
/*
@@ -300,7 +309,7 @@ void dbug_serve_apcs(THD *thd, int n_calls)
thd_proc_info(thd, "show_explain_trap");
my_sleep(30000);
thd_proc_info(thd, save_proc_info);
- if (thd->check_killed())
+ if (unlikely(thd->check_killed(1)))
break;
}
}
@@ -321,8 +330,8 @@ void dbug_serve_apcs(THD *thd, int n_calls)
bool dbug_user_var_equals_int(THD *thd, const char *name, int value)
{
user_var_entry *var;
- LEX_STRING varname= {(char*)name, strlen(name)};
- if ((var= get_variable(&thd->user_vars, varname, FALSE)))
+ LEX_CSTRING varname= { name, strlen(name)};
+ if ((var= get_variable(&thd->user_vars, &varname, FALSE)))
{
bool null_value;
longlong var_value= var->val_int(&null_value);
@@ -346,7 +355,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result,
DBUG_ENTER("handle_select");
MYSQL_SELECT_START(thd->query());
- if (select_lex->master_unit()->is_union() ||
+ if (select_lex->master_unit()->is_unit_op() ||
select_lex->master_unit()->fake_select_lex)
res= mysql_union(thd, lex, result, &lex->unit, setup_tables_done_option);
else
@@ -377,7 +386,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result,
res|= thd->is_error();
if (unlikely(res))
result->abort_result_set();
- if (thd->killed == ABORT_QUERY)
+ if (unlikely(thd->killed == ABORT_QUERY && !thd->no_errors))
{
/*
If LIMIT ROWS EXAMINED interrupted query execution, issue a warning,
@@ -514,15 +523,15 @@ fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select,
new_ref= direct_ref ?
new (thd->mem_root) Item_direct_ref(thd, ref->context, item_ref, ref->table_name,
- ref->field_name, ref->alias_name_used) :
+ &ref->field_name, ref->alias_name_used) :
new (thd->mem_root) Item_ref(thd, ref->context, item_ref, ref->table_name,
- ref->field_name, ref->alias_name_used);
+ &ref->field_name, ref->alias_name_used);
if (!new_ref)
return TRUE;
ref->outer_ref= new_ref;
ref->ref= &ref->outer_ref;
- if (!ref->fixed && ref->fix_fields(thd, 0))
+ if (ref->fix_fields_if_needed(thd, 0))
return TRUE;
thd->lex->used_tables|= item->used_tables();
thd->lex->current_select->select_list_tables|= item->used_tables();
@@ -633,7 +642,7 @@ setup_without_group(THD *thd, Ref_ptr_array ref_pointer_array,
const bool saved_non_agg_field_used= select->non_agg_field_used();
DBUG_ENTER("setup_without_group");
- thd->lex->allow_sum_func&= ~((nesting_map)1 << select->nest_level);
+ thd->lex->allow_sum_func.clear_bit(select->nest_level);
res= setup_conds(thd, tables, leaves, conds);
if (thd->lex->current_select->first_cond_optimization)
{
@@ -646,24 +655,347 @@ setup_without_group(THD *thd, Ref_ptr_array ref_pointer_array,
/* it's not wrong to have non-aggregated columns in a WHERE */
select->set_non_agg_field_used(saved_non_agg_field_used);
- thd->lex->allow_sum_func|= (nesting_map)1 << select->nest_level;
+ thd->lex->allow_sum_func.set_bit(select->nest_level);
save_place= thd->lex->current_select->context_analysis_place;
thd->lex->current_select->context_analysis_place= IN_ORDER_BY;
res= res || setup_order(thd, ref_pointer_array, tables, fields, all_fields,
order);
- thd->lex->allow_sum_func&= ~((nesting_map)1 << select->nest_level);
+ thd->lex->allow_sum_func.clear_bit(select->nest_level);
thd->lex->current_select->context_analysis_place= IN_GROUP_BY;
res= res || setup_group(thd, ref_pointer_array, tables, fields, all_fields,
group, hidden_group_fields);
thd->lex->current_select->context_analysis_place= save_place;
- thd->lex->allow_sum_func|= (nesting_map)1 << select->nest_level;
+ thd->lex->allow_sum_func.set_bit(select->nest_level);
res= res || setup_windows(thd, ref_pointer_array, tables, fields, all_fields,
win_specs, win_funcs);
thd->lex->allow_sum_func= save_allow_sum_func;
DBUG_RETURN(res);
}
+bool vers_select_conds_t::init_from_sysvar(THD *thd)
+{
+ vers_asof_timestamp_t &in= thd->variables.vers_asof_timestamp;
+ type= (vers_system_time_t) in.type;
+ delete_history= false;
+ start.unit= VERS_TIMESTAMP;
+ if (type != SYSTEM_TIME_UNSPECIFIED && type != SYSTEM_TIME_ALL)
+ {
+ DBUG_ASSERT(type == SYSTEM_TIME_AS_OF);
+ start.item= new (thd->mem_root)
+ Item_datetime_literal(thd, &in.ltime, TIME_SECOND_PART_DIGITS);
+ if (!start.item)
+ return true;
+ }
+ else
+ start.item= NULL;
+ end.empty();
+ return false;
+}
+
+void vers_select_conds_t::print(String *str, enum_query_type query_type) const
+{
+ switch (type) {
+ case SYSTEM_TIME_UNSPECIFIED:
+ break;
+ case SYSTEM_TIME_AS_OF:
+ start.print(str, query_type, STRING_WITH_LEN(" FOR SYSTEM_TIME AS OF "));
+ break;
+ case SYSTEM_TIME_FROM_TO:
+ start.print(str, query_type, STRING_WITH_LEN(" FOR SYSTEM_TIME FROM "));
+ end.print(str, query_type, STRING_WITH_LEN(" TO "));
+ break;
+ case SYSTEM_TIME_BETWEEN:
+ start.print(str, query_type, STRING_WITH_LEN(" FOR SYSTEM_TIME BETWEEN "));
+ end.print(str, query_type, STRING_WITH_LEN(" AND "));
+ break;
+ case SYSTEM_TIME_BEFORE:
+ case SYSTEM_TIME_HISTORY:
+ DBUG_ASSERT(0);
+ break;
+ case SYSTEM_TIME_ALL:
+ str->append(" FOR SYSTEM_TIME ALL");
+ break;
+ }
+}
+
+static
+bool skip_setup_conds(THD *thd)
+{
+ return (!thd->stmt_arena->is_conventional()
+ && !thd->stmt_arena->is_stmt_prepare_or_first_sp_execute())
+ || thd->lex->is_view_context_analysis();
+}
+
+int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables)
+{
+ DBUG_ENTER("SELECT_LEX::vers_setup_cond");
+#define newx new (thd->mem_root)
+
+ const bool update_conds= !skip_setup_conds(thd);
+ TABLE_LIST *table;
+
+ if (!versioned_tables)
+ {
+ for (table= tables; table; table= table->next_local)
+ {
+ if (table->table && table->table->versioned())
+ versioned_tables++;
+ else if (table->vers_conditions.is_set() &&
+ (table->is_non_derived() || !table->vers_conditions.used))
+ {
+ my_error(ER_VERS_NOT_VERSIONED, MYF(0), table->alias.str);
+ DBUG_RETURN(-1);
+ }
+ }
+ }
+
+ if (versioned_tables == 0)
+ DBUG_RETURN(0);
+
+ /* For prepared statements we create items on statement arena,
+ because they must outlive execution phase for multiple executions. */
+ Query_arena_stmt on_stmt_arena(thd);
+
+ // find outer system_time
+ SELECT_LEX *outer_slex= outer_select();
+ TABLE_LIST* outer_table= NULL;
+
+ if (outer_slex)
+ {
+ TABLE_LIST* derived= master_unit()->derived;
+ // inner SELECT may not be a derived table (derived == NULL)
+ while (derived && outer_slex && !derived->vers_conditions.is_set())
+ {
+ derived= outer_slex->master_unit()->derived;
+ outer_slex= outer_slex->outer_select();
+ }
+ if (derived && outer_slex)
+ {
+ DBUG_ASSERT(derived->vers_conditions.is_set());
+ outer_table= derived;
+ }
+ }
+
+ bool is_select= false;
+ bool use_sysvar= false;
+ switch (thd->lex->sql_command)
+ {
+ case SQLCOM_SELECT:
+ use_sysvar= true;
+ /* fall through */
+ case SQLCOM_INSERT_SELECT:
+ case SQLCOM_REPLACE_SELECT:
+ case SQLCOM_DELETE_MULTI:
+ case SQLCOM_UPDATE_MULTI:
+ is_select= true;
+ default:
+ break;
+ }
+
+ for (table= tables; table; table= table->next_local)
+ {
+ if (!table->table || table->is_view() || !table->table->versioned())
+ continue;
+
+ vers_select_conds_t &vers_conditions= table->vers_conditions;
+
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ /*
+ if the history is stored in partitions, then partitions
+ themselves are not versioned
+ */
+ if (table->partition_names && table->table->part_info->vers_info)
+ {
+ /* If the history is stored in partitions, then partitions
+ themselves are not versioned. */
+ if (vers_conditions.was_set())
+ {
+ my_error(ER_VERS_QUERY_IN_PARTITION, MYF(0), table->alias.str);
+ DBUG_RETURN(-1);
+ }
+ else if (!vers_conditions.is_set())
+ vers_conditions.type= SYSTEM_TIME_ALL;
+ }
+#endif
+
+ if (outer_table && !vers_conditions.is_set())
+ {
+ // propagate system_time from nearest outer SELECT_LEX
+ vers_conditions= outer_table->vers_conditions;
+ outer_table->vers_conditions.used= true;
+ }
+
+ // propagate system_time from sysvar
+ if (!vers_conditions.is_set() && use_sysvar)
+ {
+ if (vers_conditions.init_from_sysvar(thd))
+ DBUG_RETURN(-1);
+ }
+
+ if (vers_conditions.is_set())
+ {
+ if (vers_conditions.was_set() &&
+ table->lock_type > TL_READ_NO_INSERT &&
+ !vers_conditions.delete_history)
+ {
+ my_error(ER_TABLE_NOT_LOCKED_FOR_WRITE, MYF(0), table->alias.str);
+ DBUG_RETURN(-1);
+ }
+
+ if (vers_conditions.type == SYSTEM_TIME_ALL)
+ continue;
+ }
+
+ const LEX_CSTRING *fstart=
+ thd->make_clex_string(table->table->vers_start_field()->field_name);
+ const LEX_CSTRING *fend=
+ thd->make_clex_string(table->table->vers_end_field()->field_name);
+
+ Item *row_start=
+ newx Item_field(thd, &this->context, table->db.str, table->alias.str, fstart);
+ Item *row_end=
+ newx Item_field(thd, &this->context, table->db.str, table->alias.str, fend);
+
+ bool timestamps_only= table->table->versioned(VERS_TIMESTAMP);
+
+ if (vers_conditions.is_set() && vers_conditions.type != SYSTEM_TIME_HISTORY)
+ {
+ thd->where= "FOR SYSTEM_TIME";
+ /* TODO: do resolve fix_length_and_dec(), fix_fields(). This requires
+ storing vers_conditions as Item and make some magic related to
+ vers_system_time_t/VERS_TRX_ID at stage of fix_fields()
+ (this is large refactoring). */
+ if (vers_conditions.resolve_units(thd))
+ DBUG_RETURN(-1);
+ if (timestamps_only && (vers_conditions.start.unit == VERS_TRX_ID ||
+ vers_conditions.end.unit == VERS_TRX_ID))
+ {
+ my_error(ER_VERS_ENGINE_UNSUPPORTED, MYF(0), table->table_name.str);
+ DBUG_RETURN(-1);
+ }
+ }
+
+ if (!update_conds)
+ continue;
+
+ Item *cond1= NULL, *cond2= NULL, *cond3= NULL, *curr= NULL;
+ Item *point_in_time1= vers_conditions.start.item;
+ Item *point_in_time2= vers_conditions.end.item;
+ TABLE *t= table->table;
+ if (t->versioned(VERS_TIMESTAMP))
+ {
+ MYSQL_TIME max_time;
+ switch (vers_conditions.type)
+ {
+ case SYSTEM_TIME_UNSPECIFIED:
+ case SYSTEM_TIME_HISTORY:
+ thd->variables.time_zone->gmt_sec_to_TIME(&max_time, TIMESTAMP_MAX_VALUE);
+ max_time.second_part= TIME_MAX_SECOND_PART;
+ curr= newx Item_datetime_literal(thd, &max_time, TIME_SECOND_PART_DIGITS);
+ if (vers_conditions.type == SYSTEM_TIME_UNSPECIFIED)
+ cond1= newx Item_func_eq(thd, row_end, curr);
+ else
+ cond1= newx Item_func_lt(thd, row_end, curr);
+ break;
+ case SYSTEM_TIME_AS_OF:
+ cond1= newx Item_func_le(thd, row_start, point_in_time1);
+ cond2= newx Item_func_gt(thd, row_end, point_in_time1);
+ break;
+ case SYSTEM_TIME_FROM_TO:
+ cond1= newx Item_func_lt(thd, row_start, point_in_time2);
+ cond2= newx Item_func_gt(thd, row_end, point_in_time1);
+ cond3= newx Item_func_lt(thd, point_in_time1, point_in_time2);
+ break;
+ case SYSTEM_TIME_BETWEEN:
+ cond1= newx Item_func_le(thd, row_start, point_in_time2);
+ cond2= newx Item_func_gt(thd, row_end, point_in_time1);
+ cond3= newx Item_func_le(thd, point_in_time1, point_in_time2);
+ break;
+ case SYSTEM_TIME_BEFORE:
+ cond1= newx Item_func_lt(thd, row_end, point_in_time1);
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
+ }
+ else
+ {
+ DBUG_ASSERT(table->table->s && table->table->s->db_plugin);
+
+ Item *trx_id0, *trx_id1;
+
+ switch (vers_conditions.type)
+ {
+ case SYSTEM_TIME_UNSPECIFIED:
+ case SYSTEM_TIME_HISTORY:
+ curr= newx Item_int(thd, ULONGLONG_MAX);
+ if (vers_conditions.type == SYSTEM_TIME_UNSPECIFIED)
+ cond1= newx Item_func_eq(thd, row_end, curr);
+ else
+ cond1= newx Item_func_lt(thd, row_end, curr);
+ break;
+ case SYSTEM_TIME_AS_OF:
+ trx_id0= vers_conditions.start.unit == VERS_TIMESTAMP
+ ? newx Item_func_trt_id(thd, point_in_time1, TR_table::FLD_TRX_ID)
+ : point_in_time1;
+ cond1= newx Item_func_trt_trx_sees_eq(thd, trx_id0, row_start);
+ cond2= newx Item_func_trt_trx_sees(thd, row_end, trx_id0);
+ break;
+ case SYSTEM_TIME_FROM_TO:
+ cond3= newx Item_func_lt(thd, point_in_time1, point_in_time2);
+ /* fall through */
+ case SYSTEM_TIME_BETWEEN:
+ trx_id0= vers_conditions.start.unit == VERS_TIMESTAMP
+ ? newx Item_func_trt_id(thd, point_in_time1, TR_table::FLD_TRX_ID, true)
+ : point_in_time1;
+ trx_id1= vers_conditions.end.unit == VERS_TIMESTAMP
+ ? newx Item_func_trt_id(thd, point_in_time2, TR_table::FLD_TRX_ID, false)
+ : point_in_time2;
+ cond1= vers_conditions.type == SYSTEM_TIME_FROM_TO
+ ? newx Item_func_trt_trx_sees(thd, trx_id1, row_start)
+ : newx Item_func_trt_trx_sees_eq(thd, trx_id1, row_start);
+ cond2= newx Item_func_trt_trx_sees_eq(thd, row_end, trx_id0);
+ if (!cond3)
+ cond3= newx Item_func_le(thd, point_in_time1, point_in_time2);
+ break;
+ case SYSTEM_TIME_BEFORE:
+ trx_id0= vers_conditions.start.unit == VERS_TIMESTAMP
+ ? newx Item_func_trt_id(thd, point_in_time1, TR_table::FLD_TRX_ID, true)
+ : point_in_time1;
+ cond1= newx Item_func_trt_trx_sees(thd, trx_id0, row_end);
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
+ }
+
+ if (cond1)
+ {
+ cond1= and_items(thd, cond2, cond1);
+ cond1= and_items(thd, cond3, cond1);
+ if (is_select)
+ table->on_expr= and_items(thd, table->on_expr, cond1);
+ else
+ {
+ if (join)
+ {
+ where= and_items(thd, join->conds, cond1);
+ join->conds= where;
+ }
+ else
+ where= and_items(thd, where, cond1);
+ table->where= and_items(thd, table->where, cond1);
+ }
+ }
+
+ table->vers_conditions.type= SYSTEM_TIME_ALL;
+ } // for (table= tables; ...)
+
+ DBUG_RETURN(0);
+#undef newx
+}
+
/*****************************************************************************
Check fields, find best join, do the select and output fields.
mysql_select assumes that all tables are already opened
@@ -705,13 +1037,13 @@ JOIN::prepare(TABLE_LIST *tables_init,
select_lex= select_lex_arg;
select_lex->join= this;
join_list= &select_lex->top_join_list;
- union_part= unit_arg->is_union();
+ union_part= unit_arg->is_unit_op();
// simple check that we got usable conds
dbug_print_item(conds);
if (select_lex->handle_derived(thd->lex, DT_PREPARE))
- DBUG_RETURN(1);
+ DBUG_RETURN(-1);
thd->lex->current_select->context_analysis_place= NO_MATTER;
thd->lex->current_select->is_item_list_lookup= 1;
@@ -742,11 +1074,15 @@ JOIN::prepare(TABLE_LIST *tables_init,
{
remove_redundant_subquery_clauses(select_lex);
}
-
+
+ /* System Versioning: handle FOR SYSTEM_TIME clause. */
+ if (select_lex->vers_setup_conds(thd, tables_list) < 0)
+ DBUG_RETURN(-1);
+
/*
TRUE if the SELECT list mixes elements with and without grouping,
and there is no GROUP BY clause. Mixing non-aggregated fields with
- aggregate functions in the SELECT list is a MySQL exptenstion that
+ aggregate functions in the SELECT list is a MySQL extenstion that
is allowed only if the ONLY_FULL_GROUP_BY sql mode is not set.
*/
mixed_implicit_grouping= false;
@@ -826,7 +1162,7 @@ JOIN::prepare(TABLE_LIST *tables_init,
select_lex->master_unit()->global_parameters())
{
nesting_map save_allow_sum_func= thd->lex->allow_sum_func;
- thd->lex->allow_sum_func|= (nesting_map)1 << select_lex->nest_level;
+ thd->lex->allow_sum_func.set_bit(select_lex->nest_level);
thd->where= "order clause";
for (ORDER *order= select_lex->order_list.first; order; order= order->next)
{
@@ -844,7 +1180,7 @@ JOIN::prepare(TABLE_LIST *tables_init,
{
nesting_map save_allow_sum_func= thd->lex->allow_sum_func;
thd->where="having clause";
- thd->lex->allow_sum_func|= (nesting_map)1 << select_lex_arg->nest_level;
+ thd->lex->allow_sum_func.set_bit(select_lex_arg->nest_level);
select_lex->having_fix_field= 1;
/*
Wrap alone field in HAVING clause in case it will be outer field
@@ -854,12 +1190,10 @@ JOIN::prepare(TABLE_LIST *tables_init,
if (having->type() == Item::REF_ITEM &&
((Item_ref *)having)->ref_type() == Item_ref::REF)
wrap_ident(thd, &having);
- bool having_fix_rc= (!having->fixed &&
- (having->fix_fields(thd, &having) ||
- having->check_cols(1)));
+ bool having_fix_rc= having->fix_fields_if_needed_for_bool(thd, &having);
select_lex->having_fix_field= 0;
- if (having_fix_rc || thd->is_error())
+ if (unlikely(having_fix_rc || thd->is_error()))
DBUG_RETURN(-1); /* purecov: inspected */
thd->lex->allow_sum_func= save_allow_sum_func;
@@ -971,6 +1305,8 @@ JOIN::prepare(TABLE_LIST *tables_init,
(*ord->item)->field_type() == MYSQL_TYPE_BIT)
{
Item_field *field= new (thd->mem_root) Item_field(thd, *(Item_field**)ord->item);
+ if (!field)
+ DBUG_RETURN(-1);
int el= all_fields.elements;
ref_ptrs[el]= field;
all_fields.push_front(field, thd->mem_root);
@@ -1003,7 +1339,7 @@ JOIN::prepare(TABLE_LIST *tables_init,
}
procedure= setup_procedure(thd, proc_param, result, fields_list, &error);
- if (error)
+ if (unlikely(error))
goto err; /* purecov: inspected */
if (procedure)
{
@@ -1103,53 +1439,70 @@ err:
DBUG_RETURN(res); /* purecov: inspected */
}
-int JOIN::optimize()
+
+bool JOIN::build_explain()
{
- // to prevent double initialization on EXPLAIN
- if (optimization_state != JOIN::NOT_OPTIMIZED)
- return FALSE;
- optimization_state= JOIN::OPTIMIZATION_IN_PROGRESS;
+ create_explain_query_if_not_exists(thd->lex, thd->mem_root);
+ have_query_plan= QEP_AVAILABLE;
- int res= optimize_inner();
- if (!res && have_query_plan != QEP_DELETED)
- {
- create_explain_query_if_not_exists(thd->lex, thd->mem_root);
- have_query_plan= QEP_AVAILABLE;
+ /*
+ explain data must be created on the Explain_query::mem_root. Because it's
+ just a memroot, not an arena, explain data must not contain any Items
+ */
+ MEM_ROOT *old_mem_root= thd->mem_root;
+ Item *old_free_list __attribute__((unused))= thd->free_list;
+ thd->mem_root= thd->lex->explain->mem_root;
+ bool res= save_explain_data(thd->lex->explain, false /* can overwrite */,
+ need_tmp,
+ !skip_sort_order && !no_order && (order || group_list),
+ select_distinct);
+ thd->mem_root= old_mem_root;
+ DBUG_ASSERT(thd->free_list == old_free_list); // no Items were created
+ if (res)
+ return 1;
- /*
- explain data must be created on the Explain_query::mem_root. Because it's
- just a memroot, not an arena, explain data must not contain any Items
- */
- MEM_ROOT *old_mem_root= thd->mem_root;
- Item *old_free_list __attribute__((unused))= thd->free_list;
- thd->mem_root= thd->lex->explain->mem_root;
- save_explain_data(thd->lex->explain, false /* can overwrite */,
- need_tmp,
- !skip_sort_order && !no_order && (order || group_list),
- select_distinct);
- thd->mem_root= old_mem_root;
- DBUG_ASSERT(thd->free_list == old_free_list); // no Items were created
-
- uint select_nr= select_lex->select_number;
- JOIN_TAB *curr_tab= join_tab + exec_join_tab_cnt();
- for (uint i= 0; i < aggr_tables; i++, curr_tab++)
+ uint select_nr= select_lex->select_number;
+ JOIN_TAB *curr_tab= join_tab + exec_join_tab_cnt();
+ for (uint i= 0; i < aggr_tables; i++, curr_tab++)
+ {
+ if (select_nr == INT_MAX)
{
- if (select_nr == INT_MAX)
- {
- /* this is a fake_select_lex of a union */
- select_nr= select_lex->master_unit()->first_select()->select_number;
- curr_tab->tracker= thd->lex->explain->get_union(select_nr)->
- get_tmptable_read_tracker();
- }
- else
- {
- curr_tab->tracker= thd->lex->explain->get_select(select_nr)->
- get_using_temporary_read_tracker();
- }
+ /* this is a fake_select_lex of a union */
+ select_nr= select_lex->master_unit()->first_select()->select_number;
+ curr_tab->tracker= thd->lex->explain->get_union(select_nr)->
+ get_tmptable_read_tracker();
+ }
+ else
+ {
+ curr_tab->tracker= thd->lex->explain->get_select(select_nr)->
+ get_using_temporary_read_tracker();
}
-
}
- optimization_state= JOIN::OPTIMIZATION_DONE;
+ return 0;
+}
+
+
+int JOIN::optimize()
+{
+ int res= 0;
+ join_optimization_state init_state= optimization_state;
+ if (optimization_state == JOIN::OPTIMIZATION_PHASE_1_DONE)
+ res= optimize_stage2();
+ else
+ {
+ // to prevent double initialization on EXPLAIN
+ if (optimization_state != JOIN::NOT_OPTIMIZED)
+ return FALSE;
+ optimization_state= JOIN::OPTIMIZATION_IN_PROGRESS;
+ res= optimize_inner();
+ }
+ if (!with_two_phase_optimization ||
+ init_state == JOIN::OPTIMIZATION_PHASE_1_DONE)
+ {
+ if (!res && have_query_plan != QEP_DELETED)
+ res= build_explain();
+ optimization_state= JOIN::OPTIMIZATION_DONE;
+ }
return res;
}
@@ -1199,10 +1552,8 @@ int JOIN::init_join_caches()
int
JOIN::optimize_inner()
{
- ulonglong select_opts_for_readinfo;
- uint no_jbuf_after;
- JOIN_TAB *tab;
DBUG_ENTER("JOIN::optimize");
+ subq_exit_fl= false;
do_send_rows = (unit->select_limit_cnt) ? 1 : 0;
DEBUG_SYNC(thd, "before_join_optimize");
@@ -1226,6 +1577,11 @@ JOIN::optimize_inner()
DBUG_RETURN(TRUE);
table_count= select_lex->leaf_tables.elements;
}
+
+ if (select_lex->first_cond_optimization &&
+ transform_in_predicates_into_in_subq(thd))
+ DBUG_RETURN(1);
+
// Update used tables after all handling derived table procedures
select_lex->update_used_tables();
@@ -1336,10 +1692,10 @@ JOIN::optimize_inner()
if (optimize_constant_subqueries())
DBUG_RETURN(1);
- if (conds && conds->has_subquery())
+ if (conds && conds->with_subquery())
(void) conds->walk(&Item::cleanup_is_expensive_cache_processor,
0, (void *) 0);
- if (having && having->has_subquery())
+ if (having && having->with_subquery())
(void) having->walk(&Item::cleanup_is_expensive_cache_processor,
0, (void *) 0);
@@ -1366,7 +1722,20 @@ JOIN::optimize_inner()
}
}
- conds= optimize_cond(this, conds, join_list, FALSE,
+ bool ignore_on_expr= false;
+ /*
+ PS/SP note: on_expr of versioned table can not be reallocated
+ (see build_equal_items() below) because it can be not rebuilt
+ at second invocation.
+ */
+ if (!thd->stmt_arena->is_conventional() && thd->mem_root != thd->stmt_arena->mem_root)
+ for (TABLE_LIST *tbl= tables_list; tbl; tbl= tbl->next_local)
+ if (tbl->table && tbl->on_expr && tbl->table->versioned())
+ {
+ ignore_on_expr= true;
+ break;
+ }
+ conds= optimize_cond(this, conds, join_list, ignore_on_expr,
&cond_value, &cond_equal, OPT_LINK_EQUAL_FIELDS);
if (thd->is_error())
@@ -1388,6 +1757,11 @@ JOIN::optimize_inner()
*/
if (tbl->is_materialized_derived())
{
+ JOIN *join= tbl->get_unit()->first_select()->join;
+ if (join &&
+ join->optimization_state == JOIN::OPTIMIZATION_PHASE_1_DONE &&
+ join->with_two_phase_optimization)
+ continue;
/*
Do not push conditions from where into materialized inner tables
of outer joins: this is not valid.
@@ -1408,12 +1782,12 @@ JOIN::optimize_inner()
if (select_lex->handle_derived(thd->lex, DT_OPTIMIZE))
DBUG_RETURN(1);
}
-
+
{
having= optimize_cond(this, having, join_list, TRUE,
&having_value, &having_equal);
- if (thd->is_error())
+ if (unlikely(thd->is_error()))
{
error= 1;
DBUG_PRINT("error",("Error from optimize_cond"));
@@ -1449,6 +1823,7 @@ JOIN::optimize_inner()
table_count= top_join_tab_count= 0;
handle_implicit_grouping_with_window_funcs();
error= 0;
+ subq_exit_fl= true;
goto setup_subq_exit;
}
}
@@ -1459,19 +1834,9 @@ JOIN::optimize_inner()
List_iterator_fast<TABLE_LIST> li(select_lex->leaf_tables);
while ((tbl= li++))
{
- /*
- If tbl->embedding!=NULL that means that this table is in the inner
- part of the nested outer join, and we can't do partition pruning
- (TODO: check if this limitation can be lifted)
- */
- if (!tbl->embedding ||
- (tbl->embedding && tbl->embedding->sj_on_expr))
- {
- Item *prune_cond= tbl->on_expr? tbl->on_expr : conds;
- tbl->table->all_partitions_pruned_away= prune_partitions(thd,
- tbl->table,
- prune_cond);
- }
+ Item **prune_cond= get_sargable_cond(this, tbl->table);
+ tbl->table->all_partitions_pruned_away=
+ prune_partitions(thd, tbl->table, *prune_cond);
}
}
#endif
@@ -1503,6 +1868,7 @@ JOIN::optimize_inner()
zero_result_cause= "No matching min/max row";
table_count= top_join_tab_count= 0;
error=0;
+ subq_exit_fl= true;
handle_implicit_grouping_with_window_funcs();
goto setup_subq_exit;
}
@@ -1547,6 +1913,7 @@ JOIN::optimize_inner()
{
DBUG_PRINT("info",("No tables"));
error= 0;
+ subq_exit_fl= true;
goto setup_subq_exit;
}
error= -1; // Error is sent to client
@@ -1563,7 +1930,7 @@ JOIN::optimize_inner()
group_list= remove_const(this, group_list, conds,
rollup.state == ROLLUP::STATE_NONE,
&simple_group);
- if (thd->is_error())
+ if (unlikely(thd->is_error()))
{
error= 1;
DBUG_RETURN(1);
@@ -1581,13 +1948,59 @@ JOIN::optimize_inner()
/* Calculate how to do the join */
THD_STAGE_INFO(thd, stage_statistics);
result->prepare_to_read_rows();
- if (make_join_statistics(this, select_lex->leaf_tables, &keyuse) ||
- thd->is_fatal_error)
+ if (unlikely(make_join_statistics(this, select_lex->leaf_tables,
+ &keyuse)) ||
+ unlikely(thd->is_fatal_error))
{
DBUG_PRINT("error",("Error: make_join_statistics() failed"));
DBUG_RETURN(1);
}
+ /*
+ If a splittable materialized derived/view dt_i is embedded into
+ into another splittable materialized derived/view dt_o then
+ splitting plans for dt_i and dt_o are evaluated independently.
+ First the optimizer looks for the best splitting plan sp_i for dt_i.
+ It happens when non-splitting plans for dt_o are evaluated.
+ The cost of sp_i is considered as the cost of materialization of dt_i
+ when evaluating any splitting plan for dt_o.
+ */
+ if (fix_all_splittings_in_plan())
+ DBUG_RETURN(1);
+
+setup_subq_exit:
+ with_two_phase_optimization= check_two_phase_optimization(thd);
+ if (with_two_phase_optimization)
+ optimization_state= JOIN::OPTIMIZATION_PHASE_1_DONE;
+ else
+ {
+ if (optimize_stage2())
+ DBUG_RETURN(1);
+ }
+ DBUG_RETURN(0);
+}
+
+
+int JOIN::optimize_stage2()
+{
+ ulonglong select_opts_for_readinfo;
+ uint no_jbuf_after;
+ JOIN_TAB *tab;
+ DBUG_ENTER("JOIN::optimize_stage2");
+
+ if (subq_exit_fl)
+ goto setup_subq_exit;
+
+ if (unlikely(thd->check_killed()))
+ DBUG_RETURN(1);
+
+ /* Generate an execution plan from the found optimal join order. */
+ if (get_best_combination())
+ DBUG_RETURN(1);
+
+ if (select_lex->handle_derived(thd->lex, DT_OPTIMIZE))
+ DBUG_RETURN(1);
+
if (optimizer_flag(thd, OPTIMIZER_SWITCH_DERIVED_WITH_KEYS))
drop_unused_derived_keys();
@@ -1631,7 +2044,14 @@ JOIN::optimize_inner()
}
if (const_tables && !thd->locked_tables_mode &&
!(select_options & SELECT_NO_UNLOCK))
- mysql_unlock_some_tables(thd, table, const_tables);
+ {
+ /*
+ Unlock all tables, except sequences, as accessing these may still
+ require table updates
+ */
+ mysql_unlock_some_tables(thd, table, const_tables,
+ GET_LOCK_SKIP_SEQUENCES);
+ }
if (!conds && outer_join)
{
/* Handle the case where we have an OUTER JOIN without a WHERE */
@@ -1649,7 +2069,7 @@ JOIN::optimize_inner()
select= make_select(*table, const_table_map,
const_table_map, conds, (SORT_INFO*) 0, 1, &error);
- if (error)
+ if (unlikely(error))
{ /* purecov: inspected */
error= -1; /* purecov: inspected */
DBUG_PRINT("error",("Error: make_select() failed"));
@@ -1672,7 +2092,7 @@ JOIN::optimize_inner()
{
conds= substitute_for_best_equal_field(thd, NO_PARTICULAR_TAB, conds,
cond_equal, map2table);
- if (thd->is_error())
+ if (unlikely(thd->is_error()))
{
error= 1;
DBUG_PRINT("error",("Error from substitute_for_best_equal"));
@@ -1698,7 +2118,7 @@ JOIN::optimize_inner()
*tab->on_expr_ref,
tab->cond_equal,
map2table);
- if (thd->is_error())
+ if (unlikely(thd->is_error()))
{
error= 1;
DBUG_PRINT("error",("Error from substitute_for_best_equal"));
@@ -1728,6 +2148,9 @@ JOIN::optimize_inner()
{
ref_item= substitute_for_best_equal_field(thd, tab, ref_item,
equals, map2table);
+ if (unlikely(thd->is_fatal_error))
+ DBUG_RETURN(1);
+
if (first_inner)
{
equals= first_inner->cond_equal;
@@ -1796,7 +2219,7 @@ JOIN::optimize_inner()
{
ORDER *org_order= order;
order=remove_const(this, order,conds,1, &simple_order);
- if (thd->is_error())
+ if (unlikely(thd->is_error()))
{
error= 1;
DBUG_RETURN(1);
@@ -1958,7 +2381,7 @@ JOIN::optimize_inner()
group_list= remove_const(this, group_list, conds,
rollup.state == ROLLUP::STATE_NONE,
&simple_group);
- if (thd->is_error())
+ if (unlikely(thd->is_error()))
{
error= 1;
DBUG_RETURN(1);
@@ -1979,7 +2402,7 @@ JOIN::optimize_inner()
{
group_list= procedure->group= remove_const(this, procedure->group, conds,
1, &simple_group);
- if (thd->is_error())
+ if (unlikely(thd->is_error()))
{
error= 1;
DBUG_RETURN(1);
@@ -2004,7 +2427,8 @@ JOIN::optimize_inner()
FORCE INDEX FOR ORDER BY can be used to prevent join buffering when
sorting on the first table.
*/
- if (!stable || !stable->force_index_order)
+ if (!stable || (!stable->force_index_order &&
+ !map2table[stable->tablenr]->keep_current_rowid))
{
if (group_list)
simple_group= 0;
@@ -2045,7 +2469,8 @@ JOIN::optimize_inner()
/* Perform FULLTEXT search before all regular searches */
if (!(select_options & SELECT_DESCRIBE))
- init_ftfuncs(thd, select_lex, MY_TEST(order));
+ if (init_ftfuncs(thd, select_lex, MY_TEST(order)))
+ DBUG_RETURN(1);
/*
It's necessary to check const part of HAVING cond as
@@ -2209,7 +2634,7 @@ JOIN::optimize_inner()
ordered_index_usage= ordered_index_order_by;
}
}
- }
+ }
if (having)
having_is_correlated= MY_TEST(having->used_tables() & OUTER_REF_TABLE_BIT);
@@ -2346,10 +2771,10 @@ bool JOIN::add_having_as_table_cond(JOIN_TAB *tab)
sort_table_cond)))
DBUG_RETURN(true);
}
- if (tab->select->cond && !tab->select->cond->fixed)
- tab->select->cond->fix_fields(thd, 0);
- if (tab->pre_idx_push_select_cond && !tab->pre_idx_push_select_cond->fixed)
- tab->pre_idx_push_select_cond->fix_fields(thd, 0);
+ if (tab->select->cond)
+ tab->select->cond->fix_fields_if_needed(thd, 0);
+ if (tab->pre_idx_push_select_cond)
+ tab->pre_idx_push_select_cond->fix_fields_if_needed(thd, 0);
tab->select->pre_idx_push_select_cond= tab->pre_idx_push_select_cond;
tab->set_select_cond(tab->select->cond, __LINE__);
tab->select_cond->top_level_item();
@@ -2367,6 +2792,25 @@ bool JOIN::add_having_as_table_cond(JOIN_TAB *tab)
}
+bool JOIN::add_fields_for_current_rowid(JOIN_TAB *cur, List<Item> *table_fields)
+{
+ /*
+ this will not walk into semi-join materialization nests but this is ok
+ because we will never need to save current rowids for those.
+ */
+ for (JOIN_TAB *tab=join_tab; tab < cur; tab++)
+ {
+ if (!tab->keep_current_rowid)
+ continue;
+ Item *item= new (thd->mem_root) Item_temptable_rowid(tab->table);
+ item->fix_fields(thd, 0);
+ table_fields->push_back(item, thd->mem_root);
+ cur->tmp_table_param->func_count++;
+ }
+ return 0;
+}
+
+
/**
Set info for aggregation tables
@@ -2422,20 +2866,22 @@ bool JOIN::make_aggr_tables_info()
/*
All optimization is done. Check if we can use the storage engines
- group by handler to evaluate the group by
+ group by handler to evaluate the group by.
+ Some storage engines, like spider can also do joins, group by and
+ distinct in the engine, so we do this for all queries, not only
+ GROUP BY queries.
*/
- if (tables_list && (tmp_table_param.sum_func_count || group_list) &&
- !procedure)
+ if (tables_list && top_join_tab_count && !procedure)
{
/*
At the moment we only support push down for queries where
all tables are in the same storage engine
*/
TABLE_LIST *tbl= tables_list;
- handlerton *ht= tbl && tbl->table ? tbl->table->file->ht : 0;
+ handlerton *ht= tbl && tbl->table ? tbl->table->file->partition_ht() : 0;
for (tbl= tbl->next_local; ht && tbl; tbl= tbl->next_local)
{
- if (!tbl->table || tbl->table->file->ht != ht)
+ if (!tbl->table || tbl->table->file->partition_ht() != ht)
ht= 0;
}
@@ -2448,7 +2894,8 @@ bool JOIN::make_aggr_tables_info()
if (gbh)
{
- pushdown_query= new (thd->mem_root) Pushdown_query(select_lex, gbh);
+ if (!(pushdown_query= new (thd->mem_root) Pushdown_query(select_lex, gbh)))
+ DBUG_RETURN(1);
/*
We must store rows in the tmp table if we need to do an ORDER BY
or DISTINCT and the storage handler can't handle it.
@@ -2465,17 +2912,19 @@ bool JOIN::make_aggr_tables_info()
curr_tab->ref.key= -1;
curr_tab->join= this;
- curr_tab->tmp_table_param= new TMP_TABLE_PARAM(tmp_table_param);
+ if (!(curr_tab->tmp_table_param= new TMP_TABLE_PARAM(tmp_table_param)))
+ DBUG_RETURN(1);
TABLE* table= create_tmp_table(thd, curr_tab->tmp_table_param,
all_fields,
NULL, query.distinct,
TRUE, select_options, HA_POS_ERROR,
- "", !need_tmp,
+ &empty_clex_str, !need_tmp,
query.order_by || query.group_by);
if (!table)
DBUG_RETURN(1);
- curr_tab->aggr= new (thd->mem_root) AGGR_OP(curr_tab);
+ if (!(curr_tab->aggr= new (thd->mem_root) AGGR_OP(curr_tab)))
+ DBUG_RETURN(1);
curr_tab->aggr->set_write_func(::end_send);
curr_tab->table= table;
/*
@@ -2668,13 +3117,13 @@ bool JOIN::make_aggr_tables_info()
(select_distinct && tmp_table_param.using_outer_summary_function))
{ /* Must copy to another table */
DBUG_PRINT("info",("Creating group table"));
-
+
calc_group_buffer(this, group_list);
count_field_types(select_lex, &tmp_table_param, tmp_all_fields1,
select_distinct && !group_list);
- tmp_table_param.hidden_field_count=
+ tmp_table_param.hidden_field_count=
tmp_all_fields1.elements - tmp_fields_list1.elements;
-
+
curr_tab++;
aggr_tables++;
bzero((void*)curr_tab, sizeof(JOIN_TAB));
@@ -2689,12 +3138,11 @@ bool JOIN::make_aggr_tables_info()
if (join_tab->is_using_loose_index_scan())
tmp_table_param.precomputed_group_by= TRUE;
- tmp_table_param.hidden_field_count=
+ tmp_table_param.hidden_field_count=
curr_all_fields->elements - curr_fields_list->elements;
ORDER *dummy= NULL; //TODO can use table->group here also
- if (create_postjoin_aggr_table(curr_tab,
- curr_all_fields, dummy, true,
+ if (create_postjoin_aggr_table(curr_tab, curr_all_fields, dummy, true,
distinct, keep_row_order))
DBUG_RETURN(true);
@@ -2830,7 +3278,7 @@ bool JOIN::make_aggr_tables_info()
!join_tab ||
!join_tab-> is_using_agg_loose_index_scan()))
DBUG_RETURN(true);
- if (setup_sum_funcs(thd, sum_funcs) || thd->is_fatal_error)
+ if (unlikely(setup_sum_funcs(thd, sum_funcs) || thd->is_fatal_error))
DBUG_RETURN(true);
}
if (group_list || order)
@@ -2923,13 +3371,16 @@ bool JOIN::make_aggr_tables_info()
curr_tab= join_tab + total_join_tab_cnt();
if (select_lex->window_funcs.elements)
{
- curr_tab->window_funcs_step= new Window_funcs_computation;
+ if (!(curr_tab->window_funcs_step= new Window_funcs_computation))
+ DBUG_RETURN(true);
if (curr_tab->window_funcs_step->setup(thd, &select_lex->window_funcs,
curr_tab))
DBUG_RETURN(true);
/* Count that we're using window functions. */
status_var_increment(thd->status_var.feature_window_functions);
}
+ if (select_lex->custom_agg_func_used())
+ status_var_increment(thd->status_var.feature_custom_aggregate_functions);
fields= curr_fields_list;
// Reset before execution
@@ -2962,15 +3413,18 @@ JOIN::create_postjoin_aggr_table(JOIN_TAB *tab, List<Item> *table_fields,
*/
ha_rows table_rows_limit= ((order == NULL || skip_sort_order) &&
!table_group &&
- !select_lex->with_sum_func) ?
- select_limit : HA_POS_ERROR;
+ !select_lex->with_sum_func) ? select_limit
+ : HA_POS_ERROR;
- tab->tmp_table_param= new TMP_TABLE_PARAM(tmp_table_param);
+ if (!(tab->tmp_table_param= new TMP_TABLE_PARAM(tmp_table_param)))
+ DBUG_RETURN(true);
+ if (tmp_table_keep_current_rowid)
+ add_fields_for_current_rowid(tab, table_fields);
tab->tmp_table_param->skip_create_table= true;
TABLE* table= create_tmp_table(thd, tab->tmp_table_param, *table_fields,
table_group, distinct,
save_sum_fields, select_options, table_rows_limit,
- "", true, keep_row_order);
+ &empty_clex_str, true, keep_row_order);
if (!table)
DBUG_RETURN(true);
tmp_table_param.using_outer_summary_function=
@@ -2979,8 +3433,7 @@ JOIN::create_postjoin_aggr_table(JOIN_TAB *tab, List<Item> *table_fields,
DBUG_ASSERT(tab > tab->join->join_tab || !top_join_tab_count || !tables_list);
if (tab > join_tab)
(tab - 1)->next_select= sub_select_postjoin_aggr;
- tab->aggr= new (thd->mem_root) AGGR_OP(tab);
- if (!tab->aggr)
+ if (!(tab->aggr= new (thd->mem_root) AGGR_OP(tab)))
goto err;
tab->table= table;
table->reginfo.join_tab= tab;
@@ -3133,33 +3586,42 @@ bool JOIN::setup_subquery_caches()
select_lex->expr_cache_may_be_used[IN_ON] ||
select_lex->expr_cache_may_be_used[NO_MATTER])
{
- if (conds)
- conds= conds->transform(thd, &Item::expr_cache_insert_transformer,
- NULL);
JOIN_TAB *tab;
+ if (conds &&
+ !(conds= conds->transform(thd, &Item::expr_cache_insert_transformer,
+ NULL)))
+ DBUG_RETURN(TRUE);
for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
tab; tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS))
{
- if (tab->select_cond)
- tab->select_cond=
- tab->select_cond->transform(thd, &Item::expr_cache_insert_transformer,
- NULL);
+ if (tab->select_cond &&
+ !(tab->select_cond=
+ tab->select_cond->transform(thd,
+ &Item::expr_cache_insert_transformer,
+ NULL)))
+ DBUG_RETURN(TRUE);
if (tab->cache_select && tab->cache_select->cond)
- tab->cache_select->cond=
- tab->cache_select->
- cond->transform(thd, &Item::expr_cache_insert_transformer,
- NULL);
-
+ if (!(tab->cache_select->cond=
+ tab->cache_select->
+ cond->transform(thd, &Item::expr_cache_insert_transformer,
+ NULL)))
+ DBUG_RETURN(TRUE);
}
- if (having)
- having= having->transform(thd, &Item::expr_cache_insert_transformer,
- NULL);
+ if (having &&
+ !(having= having->transform(thd,
+ &Item::expr_cache_insert_transformer,
+ NULL)))
+ DBUG_RETURN(TRUE);
+
if (tmp_having)
{
DBUG_ASSERT(having == NULL);
- tmp_having= tmp_having->transform(thd, &Item::expr_cache_insert_transformer,
- NULL);
+ if (!(tmp_having=
+ tmp_having->transform(thd,
+ &Item::expr_cache_insert_transformer,
+ NULL)))
+ DBUG_RETURN(TRUE);
}
}
if (select_lex->expr_cache_may_be_used[SELECT_LIST] ||
@@ -3170,9 +3632,11 @@ bool JOIN::setup_subquery_caches()
Item *item;
while ((item= li++))
{
- Item *new_item=
- item->transform(thd, &Item::expr_cache_insert_transformer,
- NULL);
+ Item *new_item;
+ if (!(new_item=
+ item->transform(thd, &Item::expr_cache_insert_transformer,
+ NULL)))
+ DBUG_RETURN(TRUE);
if (new_item != item)
{
thd->change_item_tree(li.ref(), new_item);
@@ -3180,18 +3644,22 @@ bool JOIN::setup_subquery_caches()
}
for (ORDER *tmp_group= group_list; tmp_group ; tmp_group= tmp_group->next)
{
- *tmp_group->item=
- (*tmp_group->item)->transform(thd, &Item::expr_cache_insert_transformer,
- NULL);
+ if (!(*tmp_group->item=
+ (*tmp_group->item)->transform(thd,
+ &Item::expr_cache_insert_transformer,
+ NULL)))
+ DBUG_RETURN(TRUE);
}
}
if (select_lex->expr_cache_may_be_used[NO_MATTER])
{
for (ORDER *ord= order; ord; ord= ord->next)
{
- *ord->item=
- (*ord->item)->transform(thd, &Item::expr_cache_insert_transformer,
- NULL);
+ if (!(*ord->item=
+ (*ord->item)->transform(thd,
+ &Item::expr_cache_insert_transformer,
+ NULL)))
+ DBUG_RETURN(TRUE);
}
}
DBUG_RETURN(FALSE);
@@ -3319,7 +3787,8 @@ JOIN::reinit()
}
if (!(select_options & SELECT_DESCRIBE))
- init_ftfuncs(thd, select_lex, MY_TEST(order));
+ if (init_ftfuncs(thd, select_lex, MY_TEST(order)))
+ DBUG_RETURN(1);
DBUG_RETURN(0);
}
@@ -3346,7 +3815,7 @@ bool JOIN::prepare_result(List<Item> **columns_list)
select_lex->handle_derived(thd->lex, DT_CREATE))
goto err;
- if (result->prepare2())
+ if (result->prepare2(this))
goto err;
if ((select_lex->options & OPTION_SCHEMA_TABLE) &&
@@ -3361,7 +3830,14 @@ err:
}
-void JOIN::save_explain_data(Explain_query *output, bool can_overwrite,
+/**
+ @retval
+ 0 ok
+ 1 error
+*/
+
+
+bool JOIN::save_explain_data(Explain_query *output, bool can_overwrite,
bool need_tmp_table, bool need_order,
bool distinct)
{
@@ -3369,7 +3845,7 @@ void JOIN::save_explain_data(Explain_query *output, bool can_overwrite,
If there is SELECT in this statement with the same number it must be the
same SELECT
*/
- DBUG_ASSERT(select_lex->select_number == UINT_MAX ||
+ DBUG_SLOW_ASSERT(select_lex->select_number == UINT_MAX ||
select_lex->select_number == INT_MAX ||
!output ||
!output->get_select(select_lex->select_number) ||
@@ -3391,9 +3867,8 @@ void JOIN::save_explain_data(Explain_query *output, bool can_overwrite,
/* It's a degenerate join */
message= zero_result_cause ? zero_result_cause : "No tables used";
}
- save_explain_data_intern(thd->lex->explain, need_tmp_table, need_order,
- distinct, message);
- return;
+ return save_explain_data_intern(thd->lex->explain, need_tmp_table, need_order,
+ distinct, message);
}
/*
@@ -3413,11 +3888,13 @@ void JOIN::save_explain_data(Explain_query *output, bool can_overwrite,
{
if (join_tab[i].filesort)
{
- join_tab[i].filesort->tracker=
- new Filesort_tracker(thd->lex->analyze_stmt);
+ if (!(join_tab[i].filesort->tracker=
+ new Filesort_tracker(thd->lex->analyze_stmt)))
+ return 1;
}
}
}
+ return 0;
}
@@ -3475,7 +3952,7 @@ void JOIN::exec_inner()
}
columns_list= &procedure_fields_list;
}
- if (result->prepare2())
+ if (result->prepare2(this))
DBUG_VOID_RETURN;
if (!tables_list && (table_count || !select_lex->with_sum_func) &&
@@ -3519,7 +3996,7 @@ void JOIN::exec_inner()
}
else
send_records= 0;
- if (!error)
+ if (likely(!error))
{
join_free(); // Unlock all cursors
error= (int) result->send_eof();
@@ -3545,7 +4022,7 @@ void JOIN::exec_inner()
/*
We've called exec_const_cond->val_int(). This may have caused an error.
*/
- if (thd->is_error())
+ if (unlikely(thd->is_error()))
{
error= thd->is_error();
DBUG_VOID_RETURN;
@@ -3590,7 +4067,7 @@ void JOIN::exec_inner()
while ((cur_const_item= const_item_it++))
{
cur_const_item->val_str(); // This caches val_str() to Item::str_value
- if (thd->is_error())
+ if (unlikely(thd->is_error()))
{
error= thd->is_error();
DBUG_VOID_RETURN;
@@ -3624,7 +4101,7 @@ void JOIN::exec_inner()
join_examined_rows= 0;
/* XXX: When can we have here thd->is_error() not zero? */
- if (thd->is_error())
+ if (unlikely(thd->is_error()))
{
error= thd->is_error();
DBUG_VOID_RETURN;
@@ -3635,7 +4112,8 @@ void JOIN::exec_inner()
result->send_result_set_metadata(
procedure ? procedure_fields_list : *fields,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF);
- error= do_select(this, procedure);
+
+ error= result->view_structure_only() ? false : do_select(this, procedure);
/* Accumulate the counts from all join iterations of all join parts. */
thd->inc_examined_row_count(join_examined_rows);
DBUG_PRINT("counts", ("thd->examined_row_count: %lu",
@@ -3684,7 +4162,11 @@ JOIN::destroy()
cleanup_item_list(tmp_all_fields1);
cleanup_item_list(tmp_all_fields3);
destroy_sj_tmp_tables(this);
- delete_dynamic(&keyuse);
+ delete_dynamic(&keyuse);
+ if (save_qep)
+ delete(save_qep);
+ if (ext_keyuses_for_splitting)
+ delete(ext_keyuses_for_splitting);
delete procedure;
DBUG_RETURN(error);
}
@@ -3827,7 +4309,7 @@ mysql_select(THD *thd,
join->having_history= (join->having?join->having:join->tmp_having);
}
- if (thd->is_error())
+ if (unlikely(thd->is_error()))
goto err;
join->exec();
@@ -3842,7 +4324,7 @@ err:
if (free_join)
{
THD_STAGE_INFO(thd, stage_end);
- err|= select_lex->cleanup();
+ err|= (int)(select_lex->cleanup());
DBUG_RETURN(err || thd->is_error());
}
DBUG_RETURN(join->error ? join->error: err);
@@ -3861,17 +4343,20 @@ static ha_rows get_quick_record_count(THD *thd, SQL_SELECT *select,
int error;
DBUG_ENTER("get_quick_record_count");
uchar buff[STACK_BUFF_ALLOC];
- if (check_stack_overrun(thd, STACK_MIN_SIZE, buff))
+ if (unlikely(check_stack_overrun(thd, STACK_MIN_SIZE, buff)))
DBUG_RETURN(0); // Fatal error flag is set
if (select)
{
select->head=table;
table->reginfo.impossible_range=0;
- if ((error= select->test_quick_select(thd, *(key_map *)keys,(table_map) 0,
- limit, 0, FALSE,
- TRUE /* remove_where_parts*/)) == 1)
+ if (likely((error=
+ select->test_quick_select(thd, *(key_map *)keys,
+ (table_map) 0,
+ limit, 0, FALSE,
+ TRUE /* remove_where_parts*/)) ==
+ 1))
DBUG_RETURN(select->quick->records);
- if (error == -1)
+ if (unlikely(error == -1))
{
table->reginfo.impossible_range=1;
DBUG_RETURN(0);
@@ -3896,6 +4381,84 @@ struct SARGABLE_PARAM
};
+/*
+ Mark all tables inside a join nest as constant.
+
+ @detail This is called when there is a local "Impossible WHERE" inside
+ a multi-table LEFT JOIN.
+*/
+
+void mark_join_nest_as_const(JOIN *join,
+ TABLE_LIST *join_nest,
+ table_map *found_const_table_map,
+ uint *const_count)
+{
+ List_iterator<TABLE_LIST> it(join_nest->nested_join->join_list);
+ TABLE_LIST *tbl;
+ while ((tbl= it++))
+ {
+ if (tbl->nested_join)
+ {
+ mark_join_nest_as_const(join, tbl, found_const_table_map, const_count);
+ continue;
+ }
+ JOIN_TAB *tab= tbl->table->reginfo.join_tab;
+
+ if (!(join->const_table_map & tab->table->map))
+ {
+ tab->type= JT_CONST;
+ tab->info= ET_IMPOSSIBLE_ON_CONDITION;
+ tab->table->const_table= 1;
+
+ join->const_table_map|= tab->table->map;
+ *found_const_table_map|= tab->table->map;
+ set_position(join,(*const_count)++,tab,(KEYUSE*) 0);
+ mark_as_null_row(tab->table); // All fields are NULL
+ }
+ }
+}
+
+
+/*
+ @brief Get the condition that can be used to do range analysis/partition
+ pruning/etc
+
+ @detail
+ Figure out which condition we can use:
+ - For INNER JOIN, we use the WHERE,
+ - "t1 LEFT JOIN t2 ON ..." uses t2's ON expression
+ - "t1 LEFT JOIN (...) ON ..." uses the join nest's ON expression.
+*/
+
+static Item **get_sargable_cond(JOIN *join, TABLE *table)
+{
+ Item **retval;
+ if (table->pos_in_table_list->on_expr)
+ {
+ /*
+ This is an inner table from a single-table LEFT JOIN, "t1 LEFT JOIN
+ t2 ON cond". Use the condition cond.
+ */
+ retval= &table->pos_in_table_list->on_expr;
+ }
+ else if (table->pos_in_table_list->embedding &&
+ !table->pos_in_table_list->embedding->sj_on_expr)
+ {
+ /*
+ This is the inner side of a multi-table outer join. Use the
+ appropriate ON expression.
+ */
+ retval= &(table->pos_in_table_list->embedding->on_expr);
+ }
+ else
+ {
+ /* The table is not inner wrt some LEFT JOIN. Use the WHERE clause */
+ retval= &join->conds;
+ }
+ return retval;
+}
+
+
/**
Calculate the best possible join and initialize the join structure.
@@ -3978,7 +4541,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
DBUG_EXECUTE_IF("bug11747970_raise_error",
{ join->thd->set_killed(KILL_QUERY_HARD); });
- if (error)
+ if (unlikely(error))
{
table->file->print_error(error, MYF(0));
goto error;
@@ -4214,6 +4777,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
keyuse->val->is_null() && keyuse->null_rejecting)
{
s->type= JT_CONST;
+ s->table->const_table= 1;
mark_as_null_row(table);
found_const_table_map|= table->map;
join->const_table_map|= table->map;
@@ -4319,6 +4883,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
s->type= JT_CONST;
join->const_table_map|=table->map;
set_position(join,const_count++,s,start_keyuse);
+ /* create_ref_for_key will set s->table->const_table */
if (create_ref_for_key(join, s, start_keyuse, FALSE,
found_const_table_map))
goto error;
@@ -4442,6 +5007,9 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
s->scan_time();
}
+ if (s->table->is_splittable())
+ s->add_keyuses_for_splitting();
+
/*
Set a max range of how many seeks we can expect when using keys
This is can't be to high as otherwise we are likely to use
@@ -4462,39 +5030,38 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
/*
Perform range analysis if there are keys it could use (1).
- Don't do range analysis if we're on the inner side of an outer join (2).
- Do range analysis if we're on the inner side of a semi-join (3).
- Don't do range analysis for materialized subqueries (4).
- Don't do range analysis for materialized derived tables (5)
+ Don't do range analysis for materialized subqueries (2).
+ Don't do range analysis for materialized derived tables (3)
*/
if ((!s->const_keys.is_clear_all() ||
!bitmap_is_clear_all(&s->table->cond_set)) && // (1)
- (!s->table->pos_in_table_list->embedding || // (2)
- (s->table->pos_in_table_list->embedding && // (3)
- s->table->pos_in_table_list->embedding->sj_on_expr)) && // (3)
- !s->table->is_filled_at_execution() && // (4)
- !(s->table->pos_in_table_list->derived && // (5)
- s->table->pos_in_table_list->is_materialized_derived())) // (5)
+ !s->table->is_filled_at_execution() && // (2)
+ !(s->table->pos_in_table_list->derived && // (3)
+ s->table->pos_in_table_list->is_materialized_derived())) // (3)
{
bool impossible_range= FALSE;
ha_rows records= HA_POS_ERROR;
SQL_SELECT *select= 0;
+ Item **sargable_cond= NULL;
if (!s->const_keys.is_clear_all())
{
+ sargable_cond= get_sargable_cond(join, s->table);
+
select= make_select(s->table, found_const_table_map,
found_const_table_map,
- *s->on_expr_ref ? *s->on_expr_ref : join->conds,
+ *sargable_cond,
(SORT_INFO*) 0,
1, &error);
if (!select)
goto error;
records= get_quick_record_count(join->thd, select, s->table,
&s->const_keys, join->row_limit);
- /* Range analyzer could modify the condition. */
- if (*s->on_expr_ref)
- *s->on_expr_ref= select->cond;
- else
- join->conds= select->cond;
+
+ /*
+ Range analyzer might have modified the condition. Put it the new
+ condition to where we got it from.
+ */
+ *sargable_cond= select->cond;
s->quick=select->quick;
s->needed_reg=select->needed_reg;
@@ -4503,10 +5070,11 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
}
if (!impossible_range)
{
+ if (!sargable_cond)
+ sargable_cond= get_sargable_cond(join, s->table);
if (join->thd->variables.optimizer_use_condition_selectivity > 1)
calculate_cond_selectivity_for_table(join->thd, s->table,
- *s->on_expr_ref ?
- s->on_expr_ref : &join->conds);
+ sargable_cond);
if (s->table->reginfo.impossible_range)
{
impossible_range= TRUE;
@@ -4515,23 +5083,33 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
}
if (impossible_range)
{
- /*
- Impossible WHERE or ON expression
- In case of ON, we mark that the we match one empty NULL row.
- In case of WHERE, don't set found_const_table_map to get the
- caller to abort with a zero row result.
- */
- join->const_table_map|= s->table->map;
- set_position(join,const_count++,s,(KEYUSE*) 0);
- s->type= JT_CONST;
- if (*s->on_expr_ref)
- {
- /* Generate empty row */
- s->info= ET_IMPOSSIBLE_ON_CONDITION;
- found_const_table_map|= s->table->map;
- s->type= JT_CONST;
- mark_as_null_row(s->table); // All fields are NULL
- }
+ /*
+ Impossible WHERE or ON expression
+ In case of ON, we mark that the we match one empty NULL row.
+ In case of WHERE, don't set found_const_table_map to get the
+ caller to abort with a zero row result.
+ */
+ TABLE_LIST *emb= s->table->pos_in_table_list->embedding;
+ if (emb && !emb->sj_on_expr)
+ {
+ /* Mark all tables in a multi-table join nest as const */
+ mark_join_nest_as_const(join, emb, &found_const_table_map,
+ &const_count);
+ }
+ else
+ {
+ join->const_table_map|= s->table->map;
+ set_position(join,const_count++,s,(KEYUSE*) 0);
+ s->type= JT_CONST;
+ s->table->const_table= 1;
+ if (*s->on_expr_ref)
+ {
+ /* Generate empty row */
+ s->info= ET_IMPOSSIBLE_ON_CONDITION;
+ found_const_table_map|= s->table->map;
+ mark_as_null_row(s->table); // All fields are NULL
+ }
+ }
}
if (records != HA_POS_ERROR)
{
@@ -4619,8 +5197,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
DEBUG_SYNC(join->thd, "inside_make_join_statistics");
- /* Generate an execution plan from the found optimal join order. */
- DBUG_RETURN(join->thd->check_killed() || join->get_best_combination());
+ DBUG_RETURN(0);
error:
/*
@@ -4647,23 +5224,6 @@ error:
keyuse Pointer to possible keys
*****************************************************************************/
-/// Used when finding key fields
-struct KEY_FIELD {
- Field *field;
- Item_bool_func *cond;
- Item *val; ///< May be empty if diff constant
- uint level;
- uint optimize;
- bool eq_func;
- /**
- If true, the condition this struct represents will not be satisfied
- when val IS NULL.
- */
- bool null_rejecting;
- bool *cond_guard; /* See KEYUSE::cond_guard */
- uint sj_pred_no; /* See KEYUSE::sj_pred_no */
-};
-
/**
Merge new key definitions to old ones, remove those not used in both.
@@ -5029,18 +5589,16 @@ add_key_field(JOIN *join,
(*key_fields)->level= and_level;
(*key_fields)->optimize= optimize;
/*
- If the condition has form "tbl.keypart = othertbl.field" and
- othertbl.field can be NULL, there will be no matches if othertbl.field
- has NULL value.
- We use null_rejecting in add_not_null_conds() to add
- 'othertbl.field IS NOT NULL' to tab->select_cond.
+ If the condition we are analyzing is NULL-rejecting and at least
+ one side of the equalities is NULLable, mark the KEY_FIELD object as
+ null-rejecting. This property is used by:
+ - add_not_null_conds() to add "column IS NOT NULL" conditions
+ - best_access_path() to produce better estimates for NULL-able unique keys.
*/
{
- Item *real= (*value)->real_item();
- if (((cond->functype() == Item_func::EQ_FUNC) ||
- (cond->functype() == Item_func::MULT_EQUAL_FUNC)) &&
- (real->type() == Item::FIELD_ITEM) &&
- ((Item_field*)real)->field->maybe_null())
+ if ((cond->functype() == Item_func::EQ_FUNC ||
+ cond->functype() == Item_func::MULT_EQUAL_FUNC) &&
+ ((*value)->maybe_null || field->real_maybe_null()))
(*key_fields)->null_rejecting= true;
else
(*key_fields)->null_rejecting= false;
@@ -5202,7 +5760,7 @@ Item_func_trig_cond::add_key_fields(JOIN *join, KEY_FIELD **key_fields,
if (!join->group_list && !join->order &&
join->unit->item &&
join->unit->item->substype() == Item_subselect::IN_SUBS &&
- !join->unit->is_union())
+ !join->unit->is_unit_op())
{
KEY_FIELD *save= *key_fields;
args[0]->add_key_fields(join, key_fields, and_level, usable_tables,
@@ -5314,7 +5872,7 @@ Item_func_ne::add_key_fields(JOIN *join, KEY_FIELD **key_fields,
/*
QQ: perhaps test for !is_local_field(args[1]) is not really needed here.
Other comparison functions, e.g. Item_func_le, Item_func_gt, etc,
- do not have this test. See Item_bool_func2::add_key_field_optimize_op().
+ do not have this test. See Item_bool_func2::add_key_fieldoptimize_op().
Check with the optimizer team.
*/
if (is_local_field(args[0]) && !is_local_field(args[1]))
@@ -5497,6 +6055,7 @@ add_keyuse(DYNAMIC_ARRAY *keyuse_array, KEY_FIELD *key_field,
keyuse.null_rejecting= key_field->null_rejecting;
keyuse.cond_guard= key_field->cond_guard;
keyuse.sj_pred_no= key_field->sj_pred_no;
+ keyuse.validity_ref= 0;
return (insert_dynamic(keyuse_array,(uchar*) &keyuse));
}
@@ -5542,7 +6101,9 @@ add_key_part(DYNAMIC_ARRAY *keyuse_array, KEY_FIELD *key_field)
key_field->val->used_tables())
{
if (!field->can_optimize_hash_join(key_field->cond, key_field->val))
- return false;
+ return false;
+ if (form->is_splittable())
+ form->add_splitting_info_for_key_field(key_field);
/*
If a key use is extracted from an equi-join predicate then it is
added not only as a key use for every index whose component can
@@ -5556,7 +6117,6 @@ add_key_part(DYNAMIC_ARRAY *keyuse_array, KEY_FIELD *key_field)
return FALSE;
}
-
static bool
add_ft_keys(DYNAMIC_ARRAY *keyuse_array,
JOIN_TAB *stat,COND *cond,table_map usable_tables)
@@ -5618,6 +6178,7 @@ add_ft_keys(DYNAMIC_ARRAY *keyuse_array,
keyuse.optimize= 0;
keyuse.keypart_map= 0;
keyuse.sj_pred_no= UINT_MAX;
+ keyuse.validity_ref= 0;
return insert_dynamic(keyuse_array,(uchar*) &keyuse);
}
@@ -5905,8 +6466,8 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,
Special treatment for ft-keys.
*/
-static bool sort_and_filter_keyuse(THD *thd, DYNAMIC_ARRAY *keyuse,
- bool skip_unprefixed_keyparts)
+bool sort_and_filter_keyuse(THD *thd, DYNAMIC_ARRAY *keyuse,
+ bool skip_unprefixed_keyparts)
{
KEYUSE key_end, *prev, *save_pos, *use;
uint found_eq_constant, i;
@@ -5974,7 +6535,7 @@ static bool sort_and_filter_keyuse(THD *thd, DYNAMIC_ARRAY *keyuse,
Update some values in keyuse for faster choose_plan() loop.
*/
-static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array)
+void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array)
{
KEYUSE *end,*keyuse= dynamic_element(keyuse_array, 0, KEYUSE*);
@@ -6016,7 +6577,6 @@ static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array)
}
-
/**
Check for the presence of AGGFN(DISTINCT a) queries that may be subject
to loose index scan.
@@ -6212,6 +6772,7 @@ void set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key)
next=tmp;
}
join->best_ref[idx]=table;
+ join->positions[idx].spl_plan= 0;
}
@@ -6325,6 +6886,7 @@ best_access_path(JOIN *join,
bool best_uses_jbuf= FALSE;
MY_BITMAP *eq_join_set= &s->table->eq_join_set;
KEYUSE *hj_start_key= 0;
+ SplM_plan_info *spl_plan= 0;
disable_jbuf= disable_jbuf || idx == join->const_tables;
@@ -6334,7 +6896,10 @@ best_access_path(JOIN *join,
bitmap_clear_all(eq_join_set);
loose_scan_opt.init(join, s, remaining_tables);
-
+
+ if (s->table->is_splittable())
+ spl_plan= s->choose_best_splitting(record_count, remaining_tables);
+
if (s->keyuse)
{ /* Use key if possible */
KEYUSE *keyuse;
@@ -6351,6 +6916,7 @@ best_access_path(JOIN *join,
ulong key_flags;
uint key_parts;
key_part_map found_part= 0;
+ key_part_map notnull_part=0; // key parts which won't have NULL in lookup tuple.
table_map found_ref= 0;
uint key= keyuse->key;
bool ft_key= (keyuse->keypart == FT_KEYPART);
@@ -6384,7 +6950,7 @@ best_access_path(JOIN *join,
loose_scan_opt.next_ref_key();
DBUG_PRINT("info", ("Considering ref access on key %s",
- keyuse->table->key_info[keyuse->key].name));
+ keyuse->table->key_info[keyuse->key].name.str));
do /* For each keypart */
{
@@ -6399,6 +6965,7 @@ best_access_path(JOIN *join,
2. we won't get two ref-or-null's
*/
if (!(remaining_tables & keyuse->used_tables) &&
+ (!keyuse->validity_ref || *keyuse->validity_ref) &&
s->access_from_tables_is_allowed(keyuse->used_tables,
join->sjm_lookup_tables) &&
!(ref_or_null_part && (keyuse->optimize &
@@ -6408,6 +6975,9 @@ best_access_path(JOIN *join,
if (!(keyuse->used_tables & ~join->const_table_map))
const_part|= keyuse->keypart_map;
+ if (!keyuse->val->maybe_null || keyuse->null_rejecting)
+ notnull_part|=keyuse->keypart_map;
+
double tmp2= prev_record_reads(join_positions, idx,
(found_ref | keyuse->used_tables));
if (tmp2 < best_prev_record_reads)
@@ -6458,12 +7028,19 @@ best_access_path(JOIN *join,
loose_scan_opt.check_ref_access_part1(s, key, start_key, found_part);
/* Check if we found full key */
- if (found_part == PREV_BITS(uint, key_parts) &&
- !ref_or_null_part)
+ const key_part_map all_key_parts= PREV_BITS(uint, key_parts);
+ if (found_part == all_key_parts && !ref_or_null_part)
{ /* use eq key */
max_key_part= (uint) ~0;
- if ((key_flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME ||
- MY_TEST(key_flags & HA_EXT_NOSAME))
+ /*
+ If the index is a unique index (1), and
+ - all its columns are not null (2), or
+ - equalities we are using reject NULLs (3)
+ then the estimate is rows=1.
+ */
+ if ((key_flags & (HA_NOSAME | HA_EXT_NOSAME)) && // (1)
+ (!(key_flags & HA_NULL_PART_KEY) || // (2)
+ all_key_parts == notnull_part)) // (3)
{
tmp = prev_record_reads(join_positions, idx, found_ref);
records=1.0;
@@ -6712,6 +7289,7 @@ best_access_path(JOIN *join,
loose_scan_opt.check_ref_access_part2(key, start_key, records, tmp,
found_ref);
} /* not ft_key */
+
if (tmp + 0.0001 < best_time - records/(double) TIME_FOR_COMPARE)
{
best_time= COST_ADD(tmp, records/(double) TIME_FOR_COMPARE);
@@ -6872,7 +7450,11 @@ best_access_path(JOIN *join,
}
}
- tmp += s->startup_cost;
+ /* Splitting technique cannot be used with join cache */
+ if (s->table->is_splittable())
+ tmp+= s->table->get_materialization_cost();
+ else
+ tmp+= s->startup_cost;
/*
We estimate the cost of evaluating WHERE clause for found records
as record_count * rnd_records / TIME_FOR_COMPARE. This cost plus
@@ -6894,6 +7476,7 @@ best_access_path(JOIN *join,
best_ref_depends_map= 0;
best_uses_jbuf= MY_TEST(!disable_jbuf && !((s->table->map &
join->outer_join)));
+ spl_plan= 0;
}
}
@@ -6905,6 +7488,7 @@ best_access_path(JOIN *join,
pos->ref_depend_map= best_ref_depends_map;
pos->loosescan_picker.loosescan_key= MAX_KEY;
pos->use_join_buffer= best_uses_jbuf;
+ pos->spl_plan= spl_plan;
loose_scan_opt.save_to_position(s, loose_scan_pos);
@@ -7199,7 +7783,7 @@ static int compare_embedding_subqueries(JOIN_TAB *jt1, JOIN_TAB *jt2)
b: dependent = 0x0 table->map = 0x2 found_records = 3 ptr = 0x907e838
c: dependent = 0x6 table->map = 0x10 found_records = 2 ptr = 0x907ecd0
- As for subuqueries, this function must produce order that can be fed to
+ As for subqueries, this function must produce order that can be fed to
choose_initial_table_order().
@retval
@@ -7539,7 +8123,7 @@ greedy_search(JOIN *join,
'best_read < DBL_MAX' means that optimizer managed to find
some plan and updated 'best_positions' array accordingly.
*/
- DBUG_ASSERT(join->best_read < DBL_MAX);
+ DBUG_ASSERT(join->best_read < DBL_MAX);
if (size_remain <= search_depth)
{
@@ -7743,6 +8327,7 @@ void JOIN::get_prefix_cost_and_fanout(uint n_tables,
record_count= COST_MULT(record_count, best_positions[i].records_read);
read_time= COST_ADD(read_time, best_positions[i].read_time);
}
+ /* TODO: Take into account condition selectivities here */
}
*read_time_arg= read_time;// + record_count / TIME_FOR_COMPARE;
*record_count_arg= record_count;
@@ -8279,7 +8864,7 @@ best_extension_by_limited_search(JOIN *join,
dbug_serve_apcs(thd, 1);
);
- if (thd->check_killed()) // Abort
+ if (unlikely(thd->check_killed())) // Abort
DBUG_RETURN(TRUE);
DBUG_EXECUTE("opt", print_plan(join, idx, read_time, record_count, idx,
@@ -8319,8 +8904,7 @@ best_extension_by_limited_search(JOIN *join,
/* Find the best access method from 's' to the current partial plan */
POSITION loose_scan_pos;
best_access_path(join, s, remaining_tables, join->positions, idx,
- disable_jbuf, record_count, join->positions + idx,
- &loose_scan_pos);
+ disable_jbuf, record_count, position, &loose_scan_pos);
/* Compute the cost of extending the plan with 's' */
current_record_count= COST_MULT(record_count, position->records_read);
@@ -8992,6 +9576,49 @@ JOIN_TAB *next_depth_first_tab(JOIN* join, JOIN_TAB* tab)
}
+bool JOIN::check_two_phase_optimization(THD *thd)
+{
+ if (check_for_splittable_materialized())
+ return true;
+ return false;
+}
+
+
+bool JOIN::inject_cond_into_where(Item *injected_cond)
+{
+ Item *where_item= injected_cond;
+ List<Item> *and_args= NULL;
+ if (conds && conds->type() == Item::COND_ITEM &&
+ ((Item_cond*) conds)->functype() == Item_func::COND_AND_FUNC)
+ {
+ and_args= ((Item_cond*) conds)->argument_list();
+ if (cond_equal)
+ and_args->disjoin((List<Item> *) &cond_equal->current_level);
+ }
+
+ where_item= and_items(thd, conds, where_item);
+ if (where_item->fix_fields_if_needed(thd, 0))
+ return true;
+ thd->change_item_tree(&select_lex->where, where_item);
+ select_lex->where->top_level_item();
+ conds= select_lex->where;
+
+ if (and_args && cond_equal)
+ {
+ and_args= ((Item_cond*) conds)->argument_list();
+ List_iterator<Item_equal> li(cond_equal->current_level);
+ Item_equal *elem;
+ while ((elem= li++))
+ {
+ and_args->push_back(elem, thd->mem_root);
+ }
+ }
+
+ return false;
+
+}
+
+
static Item * const null_ptr= NULL;
/*
@@ -9040,7 +9667,7 @@ bool JOIN::get_best_combination()
*/
uint aggr_tables= (group_list ? 1 : 0) +
(select_distinct ?
- (tmp_table_param. using_outer_summary_function ? 2 : 1) : 0) +
+ (tmp_table_param.using_outer_summary_function ? 2 : 1) : 0) +
(order ? 1 : 0) +
(select_options & (SELECT_BIG_RESULT | OPTION_BUFFER_RESULT) ? 1 : 0) ;
@@ -9273,7 +9900,8 @@ static bool create_hj_key_for_table(JOIN *join, JOIN_TAB *join_tab,
keyinfo->algorithm= HA_KEY_ALG_UNDEF;
keyinfo->flags= HA_GENERATED_KEY;
keyinfo->is_statistics_from_stat_tables= FALSE;
- keyinfo->name= (char *) "$hj";
+ keyinfo->name.str= "$hj";
+ keyinfo->name.length= 3;
keyinfo->rec_per_key= (ulong*) thd->calloc(sizeof(ulong)*key_parts);
if (!keyinfo->rec_per_key)
DBUG_RETURN(TRUE);
@@ -9398,6 +10026,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j,
do
{
if (!(~used_tables & keyuse->used_tables) &&
+ (!keyuse->validity_ref || *keyuse->validity_ref) &&
j->keyuse_is_valid_for_access_in_chosen_plan(join, keyuse))
{
if (are_tables_local(j, keyuse->val->used_tables()))
@@ -9452,6 +10081,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j,
uchar *key_buff=j->ref.key_buff, *null_ref_key= 0;
uint null_ref_part= NO_REF_PART;
bool keyuse_uses_no_tables= TRUE;
+ uint not_null_keyparts= 0;
if (ftkey)
{
j->ref.items[0]=((Item_func*)(keyuse->val))->key_item();
@@ -9468,6 +10098,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j,
for (i=0 ; i < keyparts ; keyuse++,i++)
{
while (((~used_tables) & keyuse->used_tables) ||
+ (keyuse->validity_ref && !(*keyuse->validity_ref)) ||
!j->keyuse_is_valid_for_access_in_chosen_plan(join, keyuse) ||
keyuse->keypart == NO_KEYPART ||
(keyuse->keypart !=
@@ -9479,35 +10110,49 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j,
uint maybe_null= MY_TEST(keyinfo->key_part[i].null_bit);
j->ref.items[i]=keyuse->val; // Save for cond removal
j->ref.cond_guards[i]= keyuse->cond_guard;
- if (keyuse->null_rejecting)
+
+ if (!keyuse->val->maybe_null || keyuse->null_rejecting)
+ not_null_keyparts++;
+ /*
+ Set ref.null_rejecting to true only if we are going to inject a
+ "keyuse->val IS NOT NULL" predicate.
+ */
+ Item *real= (keyuse->val)->real_item();
+ if (keyuse->null_rejecting && (real->type() == Item::FIELD_ITEM) &&
+ ((Item_field*)real)->field->maybe_null())
j->ref.null_rejecting|= (key_part_map)1 << i;
+
keyuse_uses_no_tables= keyuse_uses_no_tables && !keyuse->used_tables;
/*
- Todo: we should remove this check for thd->lex->describe on the next
- line. With SHOW EXPLAIN code, EXPLAIN printout code no longer depends
- on it. However, removing the check caused change in lots of query
- plans! Does the optimizer depend on the contents of
- table_ref->key_copy ? If yes, do we produce incorrect EXPLAINs?
+ We don't want to compute heavy expressions in EXPLAIN, an example would
+ select * from t1 where t1.key=(select thats very heavy);
+
+ (select thats very heavy) => is a constant here
+ eg: (select avg(order_cost) from orders) => constant but expensive
*/
if (!keyuse->val->used_tables() && !thd->lex->describe)
{ // Compare against constant
- store_key_item tmp(thd,
+ store_key_item tmp(thd,
keyinfo->key_part[i].field,
key_buff + maybe_null,
maybe_null ? key_buff : 0,
keyinfo->key_part[i].length,
keyuse->val,
FALSE);
- if (thd->is_fatal_error)
- DBUG_RETURN(TRUE);
- tmp.copy();
+ if (unlikely(thd->is_fatal_error))
+ DBUG_RETURN(TRUE);
+ tmp.copy();
j->ref.const_ref_part_map |= key_part_map(1) << i ;
}
else
- *ref_key++= get_store_key(thd,
- keyuse,join->const_table_map,
- &keyinfo->key_part[i],
- key_buff, maybe_null);
+ {
+ *ref_key++= get_store_key(thd,
+ keyuse,join->const_table_map,
+ &keyinfo->key_part[i],
+ key_buff, maybe_null);
+ if (!keyuse->val->used_tables())
+ j->ref.const_ref_part_map |= key_part_map(1) << i ;
+ }
/*
Remember if we are going to use REF_OR_NULL
But only if field _really_ can be null i.e. we force JT_REF
@@ -9527,12 +10172,18 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j,
ulong key_flags= j->table->actual_key_flags(keyinfo);
if (j->type == JT_CONST)
j->table->const_table= 1;
- else if (!((keyparts == keyinfo->user_defined_key_parts &&
- ((key_flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME)) ||
- (keyparts > keyinfo->user_defined_key_parts && // true only for extended keys
- MY_TEST(key_flags & HA_EXT_NOSAME) &&
- keyparts == keyinfo->ext_key_parts)) ||
- null_ref_key)
+ else if (!((keyparts == keyinfo->user_defined_key_parts &&
+ (
+ (key_flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME ||
+ /* Unique key and all keyparts are NULL rejecting */
+ ((key_flags & HA_NOSAME) && keyparts == not_null_keyparts)
+ )) ||
+ /* true only for extended keys */
+ (keyparts > keyinfo->user_defined_key_parts &&
+ MY_TEST(key_flags & HA_EXT_NOSAME) &&
+ keyparts == keyinfo->ext_key_parts)
+ ) ||
+ null_ref_key)
{
/* Must read with repeat */
j->type= null_ref_key ? JT_REF_OR_NULL : JT_REF;
@@ -9920,6 +10571,74 @@ make_outerjoin_info(JOIN *join)
}
+/*
+ @brief
+ Build a temporary join prefix condition for JOIN_TABs up to the last tab
+
+ @param ret OUT the condition is returned here
+
+ @return
+ false OK
+ true Out of memory
+
+ @detail
+ Walk through the join prefix (from the first table to the last_tab) and
+ build a condition:
+
+ join_tab_1_cond AND join_tab_2_cond AND ... AND last_tab_conds
+
+ The condition is only intended to be used by the range optimizer, so:
+ - it is not normalized (can have Item_cond_and inside another
+ Item_cond_and)
+ - it does not include join->exec_const_cond and other similar conditions.
+*/
+
+bool build_tmp_join_prefix_cond(JOIN *join, JOIN_TAB *last_tab, Item **ret)
+{
+ THD *const thd= join->thd;
+ Item_cond_and *all_conds= NULL;
+
+ Item *res= NULL;
+
+ // Pick the ON-expression. Use the same logic as in get_sargable_cond():
+ if (last_tab->on_expr_ref)
+ res= *last_tab->on_expr_ref;
+ else if (last_tab->table->pos_in_table_list &&
+ last_tab->table->pos_in_table_list->embedding &&
+ !last_tab->table->pos_in_table_list->embedding->sj_on_expr)
+ {
+ res= last_tab->table->pos_in_table_list->embedding->on_expr;
+ }
+
+ for (JOIN_TAB *tab= first_depth_first_tab(join);
+ tab;
+ tab= next_depth_first_tab(join, tab))
+ {
+ if (tab->select_cond)
+ {
+ if (!res)
+ res= tab->select_cond;
+ else
+ {
+ if (!all_conds)
+ {
+ if (!(all_conds= new (thd->mem_root)Item_cond_and(thd, res,
+ tab->select_cond)))
+ return true;
+ res= all_conds;
+ }
+ else
+ all_conds->add(tab->select_cond, thd->mem_root);
+ }
+ }
+ if (tab == last_tab)
+ break;
+ }
+ *ret= all_conds? all_conds: res;
+ return false;
+}
+
+
static bool
make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
{
@@ -10267,7 +10986,9 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
{
/* Join with outer join condition */
COND *orig_cond=sel->cond;
- sel->cond= and_conds(thd, sel->cond, *tab->on_expr_ref);
+
+ if (build_tmp_join_prefix_cond(join, tab, &sel->cond))
+ return true;
/*
We can't call sel->cond->fix_fields,
@@ -10343,6 +11064,13 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
if (i != join->const_tables && tab->use_quick != 2 &&
!tab->first_inner)
{ /* Read with cache */
+ /*
+ TODO: the execution also gets here when we will not be using
+ join buffer. Review these cases and perhaps, remove this call.
+ (The final decision whether to use join buffer is made in
+ check_join_cache_usage, so we should only call make_scan_filter()
+ there, too).
+ */
if (tab->make_scan_filter())
DBUG_RETURN(1);
}
@@ -10885,32 +11613,32 @@ pick_table_access_method(JOIN_TAB *tab)
{
case JT_REF:
tab->read_first_record= join_read_always_key;
- tab->read_record.read_record= join_read_next_same;
+ tab->read_record.read_record_func= join_read_next_same;
break;
case JT_REF_OR_NULL:
tab->read_first_record= join_read_always_key_or_null;
- tab->read_record.read_record= join_read_next_same_or_null;
+ tab->read_record.read_record_func= join_read_next_same_or_null;
break;
case JT_CONST:
tab->read_first_record= join_read_const;
- tab->read_record.read_record= join_no_more_records;
+ tab->read_record.read_record_func= join_no_more_records;
break;
case JT_EQ_REF:
tab->read_first_record= join_read_key;
- tab->read_record.read_record= join_no_more_records;
+ tab->read_record.read_record_func= join_no_more_records;
break;
case JT_FT:
tab->read_first_record= join_ft_read_first;
- tab->read_record.read_record= join_ft_read_next;
+ tab->read_record.read_record_func= join_ft_read_next;
break;
case JT_SYSTEM:
tab->read_first_record= join_read_system;
- tab->read_record.read_record= join_no_more_records;
+ tab->read_record.read_record_func= join_no_more_records;
break;
/* keep gcc happy */
@@ -11009,9 +11737,9 @@ end_sj_materialize(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
DBUG_RETURN(NESTED_LOOP_OK);
}
fill_record(thd, table, table->field, sjm->sjm_table_cols, TRUE, FALSE);
- if (thd->is_error())
+ if (unlikely(thd->is_error()))
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
- if ((error= table->file->ha_write_tmp_row(table->record[0])))
+ if (unlikely((error= table->file->ha_write_tmp_row(table->record[0]))))
{
/* create_myisam_from_heap will generate error if needed */
if (table->file->is_fatal_error(error, HA_CHECK_DUP) &&
@@ -11305,6 +12033,9 @@ uint check_join_cache_usage(JOIN_TAB *tab,
if ((tab->cache= new (root) JOIN_CACHE_BNL(join, tab, prev_cache)))
{
tab->icp_other_tables_ok= FALSE;
+ /* If make_join_select() hasn't called make_scan_filter(), do it now */
+ if (!tab->cache_select && tab->make_scan_filter())
+ goto no_join_cache;
return (2 - MY_TEST(!prev_cache));
}
goto no_join_cache;
@@ -11959,7 +12690,10 @@ void JOIN_TAB::cleanup()
if (table)
{
table->file->ha_end_keyread();
- table->file->ha_index_or_rnd_end();
+ if (type == JT_FT)
+ table->file->ha_ft_end();
+ else
+ table->file->ha_index_or_rnd_end();
preread_init_done= FALSE;
if (table->pos_in_table_list &&
table->pos_in_table_list->jtbm_subselect)
@@ -12102,12 +12836,15 @@ bool JOIN_TAB::preread_init()
/* Materialize derived table/view. */
if ((!derived->get_unit()->executed ||
- derived->is_recursive_with_table()) &&
+ derived->is_recursive_with_table() ||
+ derived->get_unit()->uncacheable) &&
mysql_handle_single_derived(join->thd->lex,
derived, DT_CREATE | DT_FILL))
return TRUE;
- preread_init_done= TRUE;
+ if (!(derived->get_unit()->uncacheable & UNCACHEABLE_DEPENDENT) ||
+ derived->is_nonrecursive_derived_with_rec_ref())
+ preread_init_done= TRUE;
if (select && select->quick)
select->quick->replace_handler(table->file);
@@ -12120,7 +12857,8 @@ bool JOIN_TAB::preread_init()
/* init ftfuns for just initialized derived table */
if (table->fulltext_searched)
- init_ftfuncs(join->thd, join->select_lex, MY_TEST(join->order));
+ if (init_ftfuncs(join->thd, join->select_lex, MY_TEST(join->order)))
+ return TRUE;
return FALSE;
}
@@ -12697,7 +13435,7 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond,
*simple_order=0; // Must do a temp table to sort
else if (!(order_tables & not_const_tables))
{
- if (order->item[0]->has_subquery())
+ if (order->item[0]->with_subquery())
{
/*
Delay the evaluation of constant ORDER and/or GROUP expressions that
@@ -12825,7 +13563,7 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond,
if (prev_ptr == &first_order) // Nothing to sort/group
*simple_order=1;
#ifndef DBUG_OFF
- if (join->thd->is_error())
+ if (unlikely(join->thd->is_error()))
DBUG_PRINT("error",("Error from remove_const"));
#endif
DBUG_PRINT("exit",("simple_order: %d",(int) *simple_order));
@@ -12931,7 +13669,7 @@ return_zero_rows(JOIN *join, select_result *result, List<TABLE_LIST> &tables,
bool send_error= FALSE;
if (send_row)
send_error= result->send_data(fields) > 0;
- if (!send_error)
+ if (likely(!send_error))
result->send_eof(); // Should be safe
}
DBUG_RETURN(0);
@@ -12972,6 +13710,8 @@ public:
size_t size __attribute__((unused)))
{ TRASH_FREE(ptr, size); }
+ static void operator delete(void *, MEM_ROOT*) {}
+
Item *and_level;
Item_bool_func2 *cmp_func;
COND_CMP(Item *a,Item_bool_func2 *b) :and_level(a),cmp_func(b) {}
@@ -13203,10 +13943,15 @@ static bool check_simple_equality(THD *thd, const Item::Context &ctx,
else
{
/* None of the fields was found in multiple equalities */
- Item_equal *item_equal= new (thd->mem_root) Item_equal(thd,
- orig_left_item,
- orig_right_item,
- FALSE);
+ Type_handler_hybrid_field_type
+ tmp(orig_left_item->type_handler_for_comparison());
+ if (tmp.aggregate_for_comparison(orig_right_item->
+ type_handler_for_comparison()))
+ return false;
+ Item_equal *item_equal=
+ new (thd->mem_root) Item_equal(thd, tmp.type_handler(),
+ orig_left_item, orig_right_item,
+ false);
item_equal->set_context_field((Item_field*)left_item);
cond_equal->current_level.push_back(item_equal, thd->mem_root);
}
@@ -13291,8 +14036,14 @@ static bool check_simple_equality(THD *thd, const Item::Context &ctx,
}
else
{
- item_equal= new (thd->mem_root) Item_equal(thd, const_item2,
- orig_field_item, TRUE);
+ Type_handler_hybrid_field_type
+ tmp(orig_left_item->type_handler_for_comparison());
+ if (tmp.aggregate_for_comparison(orig_right_item->
+ type_handler_for_comparison()))
+ return false;
+ item_equal= new (thd->mem_root) Item_equal(thd, tmp.type_handler(),
+ const_item2,
+ orig_field_item, true);
item_equal->set_context_field(field_item);
cond_equal->current_level.push_back(item_equal, thd->mem_root);
}
@@ -13342,6 +14093,14 @@ static bool check_row_equality(THD *thd, const Arg_comparator *comparators,
if (left_item->type() == Item::ROW_ITEM &&
right_item->type() == Item::ROW_ITEM)
{
+ /*
+ Item_splocal for ROW SP variables return Item::ROW_ITEM.
+ Here we know that left_item and right_item are not Item_splocal,
+ because ROW SP variables with nested ROWs are not supported yet.
+ It's safe to cast left_item and right_item to Item_row.
+ */
+ DBUG_ASSERT(!left_item->get_item_splocal());
+ DBUG_ASSERT(!right_item->get_item_splocal());
is_converted= check_row_equality(thd,
comparators[i].subcomparators(),
(Item_row *) left_item,
@@ -13353,7 +14112,7 @@ static bool check_row_equality(THD *thd, const Arg_comparator *comparators,
const Arg_comparator *tmp= &comparators[i];
is_converted= check_simple_equality(thd,
Item::Context(Item::ANY_SUBST,
- tmp->compare_type(),
+ tmp->compare_type_handler(),
tmp->compare_collation()),
left_item, right_item,
cond_equal);
@@ -13412,6 +14171,15 @@ bool Item_func_eq::check_equality(THD *thd, COND_EQUAL *cond_equal,
if (left_item->type() == Item::ROW_ITEM &&
right_item->type() == Item::ROW_ITEM)
{
+ /*
+ Item_splocal::type() for ROW variables returns Item::ROW_ITEM.
+ Distinguish ROW-type Item_splocal from Item_row.
+ Example query:
+ SELECT 1 FROM DUAL WHERE row_sp_variable=ROW(100,200);
+ */
+ if (left_item->get_item_splocal() ||
+ right_item->get_item_splocal())
+ return false;
return check_row_equality(thd,
cmp.subcomparators(),
(Item_row *) left_item,
@@ -13420,7 +14188,7 @@ bool Item_func_eq::check_equality(THD *thd, COND_EQUAL *cond_equal,
}
return check_simple_equality(thd,
Context(ANY_SUBST,
- compare_type(),
+ compare_type_handler(),
compare_collation()),
left_item, right_item, cond_equal);
}
@@ -14147,7 +14915,7 @@ Item *eliminate_item_equal(THD *thd, COND *cond, COND_EQUAL *upper_levels,
equals on top level, or the constant.
*/
Item *head_item= (!item_const && current_sjm &&
- current_sjm_head != field_item) ? current_sjm_head: head;
+ current_sjm_head != field_item) ? current_sjm_head: head;
Item *head_real_item= head_item->real_item();
if (head_real_item->type() == Item::FIELD_ITEM)
head_item= head_real_item;
@@ -14312,7 +15080,7 @@ static COND* substitute_for_best_equal_field(THD *thd, JOIN_TAB *context_tab,
This works OK with PS/SP re-execution as changes are made to
the arguments of AND/OR items only
*/
- if (new_item != item)
+ if (new_item && new_item != item)
li.replace(new_item);
}
@@ -14391,7 +15159,9 @@ static COND* substitute_for_best_equal_field(THD *thd, JOIN_TAB *context_tab,
while((item_equal= it++))
{
REPLACE_EQUAL_FIELD_ARG arg= {item_equal, context_tab};
- cond= cond->transform(thd, &Item::replace_equal_field, (uchar *) &arg);
+ if (!(cond= cond->transform(thd, &Item::replace_equal_field,
+ (uchar *) &arg)))
+ return 0;
}
cond_equal= cond_equal->upper_levels;
}
@@ -14495,71 +15265,11 @@ can_change_cond_ref_to_const(Item_bool_func2 *target,
Item_bool_func2 *source,
Item *source_expr, Item *source_const)
{
- if (!target_expr->eq(source_expr,0) ||
- target_value == source_const ||
- target->compare_type() != source->compare_type())
- return false;
- if (target->compare_type() == STRING_RESULT)
- {
- /*
- In this example:
- SET NAMES utf8 COLLATE utf8_german2_ci;
- DROP TABLE IF EXISTS t1;
- CREATE TABLE t1 (a CHAR(10) CHARACTER SET utf8);
- INSERT INTO t1 VALUES ('o-umlaut'),('oe');
- SELECT * FROM t1 WHERE a='oe' COLLATE utf8_german2_ci AND a='oe';
-
- the query should return only the row with 'oe'.
- It should not return 'o-umlaut', because 'o-umlaut' does not match
- the right part of the condition: a='oe'
- ('o-umlaut' is not equal to 'oe' in utf8_general_ci,
- which is the collation of the field "a").
-
- If we change the right part from:
- ... AND a='oe'
- to
- ... AND 'oe' COLLATE utf8_german2_ci='oe'
- it will be evalulated to TRUE and removed from the condition,
- so the overall query will be simplified to:
-
- SELECT * FROM t1 WHERE a='oe' COLLATE utf8_german2_ci;
-
- which will erroneously start to return both 'oe' and 'o-umlaut'.
- So changing "expr" to "const" is not possible if the effective
- collations of "target" and "source" are not exactly the same.
-
- Note, the code before the fix for MDEV-7152 only checked that
- collations of "source_const" and "target_value" are the same.
- This was not enough, as the bug report demonstrated.
- */
- return
- target->compare_collation() == source->compare_collation() &&
- target_value->collation.collation == source_const->collation.collation;
- }
- if (target->compare_type() == TIME_RESULT)
- {
- if (target_value->cmp_type() != TIME_RESULT)
- {
- /*
- Can't rewrite:
- WHERE COALESCE(time_column)='00:00:00'
- AND COALESCE(time_column)=DATE'2015-09-11'
- to
- WHERE DATE'2015-09-11'='00:00:00'
- AND COALESCE(time_column)=DATE'2015-09-11'
- because the left part will erroneously try to parse '00:00:00'
- as DATE, not as TIME.
-
- TODO: It could still be rewritten to:
- WHERE DATE'2015-09-11'=TIME'00:00:00'
- AND COALESCE(time_column)=DATE'2015-09-11'
- i.e. we need to replace both target_expr and target_value
- at the same time. This is not supported yet.
- */
- return false;
- }
- }
- return true; // Non-string comparison
+ return target_expr->eq(source_expr,0) &&
+ target_value != source_const &&
+ target->compare_type_handler()->
+ can_change_cond_ref_to_const(target, target_expr, target_value,
+ source, source_expr, source_const);
}
@@ -14608,6 +15318,7 @@ change_cond_ref_to_const(THD *thd, I_List<COND_CMP> *save_list,
{
cond->marker=1;
COND_CMP *tmp2;
+ /* Will work, even if malloc would fail */
if ((tmp2= new (thd->mem_root) COND_CMP(and_father, func)))
save_list->push_back(tmp2);
}
@@ -14640,6 +15351,7 @@ change_cond_ref_to_const(THD *thd, I_List<COND_CMP> *save_list,
thd->change_item_tree(args + 1, value);
cond->marker=1;
COND_CMP *tmp2;
+ /* Will work, even if malloc would fail */
if ((tmp2=new (thd->mem_root) COND_CMP(and_father, func)))
save_list->push_back(tmp2);
}
@@ -15115,10 +15827,15 @@ static uint build_bitmap_for_nested_joins(List<TABLE_LIST> *join_list,
/**
- Set NESTED_JOIN::counter=0 in all nested joins in passed list.
+ Set NESTED_JOIN::counter and n_tables in all nested joins in passed list.
- Recursively set NESTED_JOIN::counter=0 for all nested joins contained in
- the passed join_list.
+ For all nested joins contained in the passed join_list (including its
+ children), set:
+ - nested_join->counter=0
+ - nested_join->n_tables= {number of non-degenerate direct children}.
+
+ Non-degenerate means non-const base table or a join nest that has a
+ non-degenerate child.
@param join_list List of nested joins to process. It may also contain base
tables which will be ignored.
@@ -15141,8 +15858,11 @@ static uint reset_nj_counters(JOIN *join, List<TABLE_LIST> *join_list)
if (!nested_join->n_tables)
is_eliminated_nest= TRUE;
}
- if ((table->nested_join && !is_eliminated_nest) ||
- (!table->nested_join && (table->table->map & ~join->eliminated_tables)))
+ const table_map removed_tables= join->eliminated_tables |
+ join->const_table_map;
+
+ if ((table->nested_join && !is_eliminated_nest) ||
+ (!table->nested_join && (table->table->map & ~removed_tables)))
n++;
}
DBUG_RETURN(n);
@@ -15454,10 +16174,20 @@ void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab,
reopt_remaining_tables &= ~rs->table->map;
rec_count= COST_MULT(rec_count, pos.records_read);
cost= COST_ADD(cost, pos.read_time);
-
-
+ cost= COST_ADD(cost, rec_count / (double) TIME_FOR_COMPARE);
+ //TODO: take into account join condition selectivity here
+ double pushdown_cond_selectivity= 1.0;
+ table_map real_table_bit= rs->table->map;
+ if (join->thd->variables.optimizer_use_condition_selectivity > 1)
+ {
+ pushdown_cond_selectivity= table_cond_selectivity(join, i, rs,
+ reopt_remaining_tables &
+ ~real_table_bit);
+ }
+ (*outer_rec_count) *= pushdown_cond_selectivity;
if (!rs->emb_sj_nest)
*outer_rec_count= COST_MULT(*outer_rec_count, pos.records_read);
+
}
join->cur_sj_inner_tables= save_cur_sj_inner_tables;
@@ -16086,6 +16816,7 @@ Item_func_isnull::remove_eq_conds(THD *thd, Item::cond_result *cond_value,
query_cache_abort(thd, &thd->query_cache_tls);
#endif
COND *new_cond, *cond= this;
+ /* If this fails, we will catch it later before executing query */
if ((new_cond= new (thd->mem_root) Item_func_eq(thd, args[0],
new (thd->mem_root) Item_int(thd, "last_insert_id()",
thd->read_first_successful_insert_id_in_prev_stmt(),
@@ -16275,7 +17006,7 @@ const_expression_in_where(COND *cond, Item *comp_item, Field *comp_field,
*/
Field *create_tmp_field_from_field(THD *thd, Field *org_field,
- const char *name, TABLE *table,
+ LEX_CSTRING *name, TABLE *table,
Item_field *item)
{
Field *new_field;
@@ -16289,8 +17020,8 @@ Field *create_tmp_field_from_field(THD *thd, Field *org_field,
if (item)
item->result_field= new_field;
else
- new_field->field_name= name;
- new_field->flags|= (org_field->flags & NO_DEFAULT_VALUE_FLAG);
+ new_field->field_name= *name;
+ new_field->flags|= org_field->flags & NO_DEFAULT_VALUE_FLAG;
if (org_field->maybe_null() || (item && item->maybe_null))
new_field->flags&= ~NOT_NULL_FLAG; // Because of outer join
if (org_field->type() == MYSQL_TYPE_VAR_STRING ||
@@ -16308,45 +17039,33 @@ Field *create_tmp_field_from_field(THD *thd, Field *org_field,
}
-Field *Item::create_tmp_field(bool group, TABLE *table, uint convert_int_length)
+Field *Item::create_tmp_field_int(TABLE *table, uint convert_int_length)
+{
+ const Type_handler *h= &type_handler_long;
+ if (max_char_length() > convert_int_length)
+ h= &type_handler_longlong;
+ return h->make_and_init_table_field(&name, Record_addr(maybe_null),
+ *this, table);
+}
+
+
+Field *Item_sum::create_tmp_field(bool group, TABLE *table)
{
Field *UNINIT_VAR(new_field);
MEM_ROOT *mem_root= table->in_use->mem_root;
switch (cmp_type()) {
case REAL_RESULT:
+ {
new_field= new (mem_root)
- Field_double(max_length, maybe_null, name, decimals, TRUE);
+ Field_double(max_char_length(), maybe_null, &name, decimals, TRUE);
break;
+ }
case INT_RESULT:
- /*
- Select an integer type with the minimal fit precision.
- convert_int_length is sign inclusive, don't consider the sign.
- */
- if (max_char_length() > convert_int_length)
- new_field= new (mem_root)
- Field_longlong(max_char_length(), maybe_null, name, unsigned_flag);
- else
- new_field= new (mem_root)
- Field_long(max_char_length(), maybe_null, name, unsigned_flag);
- break;
case TIME_RESULT:
- new_field= tmp_table_field_from_field_type(table, true, false);
- break;
- case STRING_RESULT:
- DBUG_ASSERT(collation.collation);
- /*
- GEOMETRY fields have STRING_RESULT result type.
- To preserve type they needed to be handled separately.
- */
- if (field_type() == MYSQL_TYPE_GEOMETRY)
- new_field= tmp_table_field_from_field_type(table, true, false);
- else
- new_field= make_string_field(table);
- new_field->set_derivation(collation.derivation, collation.repertoire);
- break;
case DECIMAL_RESULT:
- new_field= Field_new_decimal::create_from_item(mem_root, this);
+ case STRING_RESULT:
+ new_field= tmp_table_field_from_field_type(table);
break;
case ROW_RESULT:
// This case should never be choosen
@@ -16360,6 +17079,22 @@ Field *Item::create_tmp_field(bool group, TABLE *table, uint convert_int_length)
}
+static void create_tmp_field_from_item_finalize(THD *thd,
+ Field *new_field,
+ Item *item,
+ Item ***copy_func,
+ bool modify_item)
+{
+ if (copy_func &&
+ (item->is_result_field() ||
+ (item->real_item()->is_result_field())))
+ *((*copy_func)++) = item; // Save for copy_funcs
+ if (modify_item)
+ item->set_result_field(new_field);
+ if (item->type() == Item::NULL_ITEM)
+ new_field->is_created_from_null_item= TRUE;
+}
+
/**
Create field for temporary table using type of given item.
@@ -16376,8 +17111,6 @@ Field *Item::create_tmp_field(bool group, TABLE *table, uint convert_int_length)
update the record in the original table.
If modify_item is 0 then fill_record() will
update the temporary table
- @param convert_blob_length If >0 create a varstring(convert_blob_length)
- field instead of blob.
@retval
0 on error
@@ -16389,16 +17122,10 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table,
Item ***copy_func, bool modify_item)
{
DBUG_ASSERT(thd == table->in_use);
- Field *new_field= item->Item::create_tmp_field(false, table);
-
- if (copy_func &&
- (item->is_result_field() ||
- (item->real_item()->is_result_field())))
- *((*copy_func)++) = item; // Save for copy_funcs
- if (modify_item)
- item->set_result_field(new_field);
- if (item->type() == Item::NULL_ITEM)
- new_field->is_created_from_null_item= TRUE;
+ Field* new_field= item->create_tmp_field(false, table);
+ if (new_field)
+ create_tmp_field_from_item_finalize(thd, new_field, item,
+ copy_func, modify_item);
return new_field;
}
@@ -16422,15 +17149,20 @@ Field *Item::create_field_for_schema(THD *thd, TABLE *table)
{
Field *field;
if (max_length > MAX_FIELD_VARCHARLENGTH)
- field= new Field_blob(max_length, maybe_null, name, collation.collation);
+ field= new (thd->mem_root) Field_blob(max_length, maybe_null, &name,
+ collation.collation);
+ else if (max_length > 0)
+ field= new (thd->mem_root) Field_varstring(max_length, maybe_null, &name,
+ table->s,
+ collation.collation);
else
- field= new Field_varstring(max_length, maybe_null, name,
- table->s, collation.collation);
+ field= new Field_null((uchar*) 0, 0, Field::NONE, &name,
+ collation.collation);
if (field)
field->init(table);
return field;
}
- return tmp_table_field_from_field_type(table, false, false);
+ return tmp_table_field_from_field_type(table);
}
@@ -16472,6 +17204,8 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
Item::Type orig_type= type;
Item *orig_item= 0;
+ DBUG_ASSERT(thd == table->in_use);
+
if (type != Item::FIELD_ITEM &&
item->real_item()->type() == Item::FIELD_ITEM)
{
@@ -16481,6 +17215,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
}
switch (type) {
+ case Item::TYPE_HOLDER:
case Item::SUM_FUNC_ITEM:
{
result= item->create_tmp_field(group, table);
@@ -16490,6 +17225,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
}
case Item::FIELD_ITEM:
case Item::DEFAULT_VALUE_ITEM:
+ case Item::CONTEXTUALLY_TYPED_VALUE_ITEM:
case Item::INSERT_VALUE_ITEM:
case Item::TRIGGER_FIELD_ITEM:
{
@@ -16524,24 +17260,34 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
if (result && modify_item)
field->result_field= result;
if (orig_item)
+ {
item->maybe_null= save_maybe_null;
+ result->field_name= orig_item->name;
+ }
}
else if (table_cant_handle_bit_fields && field->field->type() ==
MYSQL_TYPE_BIT)
{
+ const Type_handler *handler= item->type_handler_long_or_longlong();
*from_field= field->field;
- result= create_tmp_field_from_item(thd, item, table, copy_func,
- modify_item);
+ if ((result=
+ handler->make_and_init_table_field(&item->name,
+ Record_addr(item->maybe_null),
+ *item, table)))
+ create_tmp_field_from_item_finalize(thd, result, item,
+ copy_func, modify_item);
if (result && modify_item)
field->result_field= result;
}
else
+ {
+ LEX_CSTRING *tmp= orig_item ? &orig_item->name : &item->name;
result= create_tmp_field_from_field(thd, (*from_field= field->field),
- orig_item ? orig_item->name :
- item->name,
- table,
+ tmp, table,
modify_item ? field :
NULL);
+ }
+
if (orig_type == Item::REF_ITEM && orig_modify)
((Item_ref*)orig_item)->set_result_field(result);
/*
@@ -16569,11 +17315,10 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
{
*((*copy_func)++)= item;
}
-
Field *result_field=
create_tmp_field_from_field(thd,
sp_result_field,
- item_func_sp->name,
+ &item_func_sp->name,
table,
NULL);
@@ -16610,11 +17355,6 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
return create_tmp_field_from_item(thd, item, table,
(make_copy_field ? 0 : copy_func),
modify_item);
- case Item::TYPE_HOLDER:
- result= ((Item_type_holder *)item)->make_field_by_type(table);
- result->set_derivation(item->collation.derivation,
- item->collation.repertoire);
- return result;
default: // Dosen't have to be stored
return 0;
}
@@ -16692,13 +17432,17 @@ setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps)
temporary table
@param table_alias possible name of the temporary table that can
be used for name resolving; can be "".
+ @param do_not_open only create the TABLE object, do not
+ open the table in the engine
+ @param keep_row_order rows need to be read in the order they were
+ inserted, the engine should preserve this order
*/
TABLE *
create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
ORDER *group, bool distinct, bool save_sum_fields,
ulonglong select_options, ha_rows rows_limit,
- const char *table_alias, bool do_not_open,
+ const LEX_CSTRING *table_alias, bool do_not_open,
bool keep_row_order)
{
MEM_ROOT *mem_root_save, own_root;
@@ -16735,12 +17479,10 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
DBUG_ENTER("create_tmp_table");
DBUG_PRINT("enter",
("table_alias: '%s' distinct: %d save_sum_fields: %d "
- "rows_limit: %lu group: %d", table_alias,
+ "rows_limit: %lu group: %d", table_alias->str,
(int) distinct, (int) save_sum_fields,
(ulong) rows_limit, MY_TEST(group)));
- thd->query_plan_flags|= QPLAN_TMP_TABLE;
-
if (use_temp_pool && !(test_flags & TEST_KEEP_TMP_TABLES))
temp_pool_slot = bitmap_lock_set_next(&temp_pool);
@@ -16806,7 +17548,8 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
if (param->precomputed_group_by)
copy_func_count+= param->sum_func_count;
- init_sql_alloc(&own_root, TABLE_ALLOC_BLOCK_SIZE, 0, MYF(MY_THREAD_SPECIFIC));
+ init_sql_alloc(&own_root, "tmp_table", TABLE_ALLOC_BLOCK_SIZE, 0,
+ MYF(MY_THREAD_SPECIFIC));
if (!multi_alloc_root(&own_root,
&table, sizeof(*table),
@@ -16853,7 +17596,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
thd->mem_root= &table->mem_root;
table->field=reg_field;
- table->alias.set(table_alias, strlen(table_alias), table_alias_charset);
+ table->alias.set(table_alias->str, table_alias->length, table_alias_charset);
table->reginfo.lock_type=TL_WRITE; /* Will be updated */
table->map=1;
@@ -16923,13 +17666,15 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
Item *arg= sum_item->get_arg(i);
if (!arg->const_item())
{
- Field *new_field=
+ Item *tmp_item;
+ Field *new_field=
create_tmp_field(thd, table, arg, arg->type(), &copy_func,
tmp_from_field, &default_field[fieldnr],
group != 0,not_all_columns,
distinct, false);
if (!new_field)
goto err; // Should be OOM
+ DBUG_ASSERT(!new_field->field_name.str || strlen(new_field->field_name.str) == new_field->field_name.length);
tmp_from_field++;
reclength+=new_field->pack_length();
if (new_field->flags & BLOB_FLAG)
@@ -16947,7 +17692,10 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
string_total_length+= new_field->pack_length();
}
thd->mem_root= mem_root_save;
- arg= sum_item->set_arg(i, thd, new (thd->mem_root) Item_temptable_field(thd, new_field));
+ if (!(tmp_item= new (thd->mem_root)
+ Item_temptable_field(thd, new_field)))
+ goto err;
+ arg= sum_item->set_arg(i, thd, tmp_item);
thd->mem_root= &table->mem_root;
if (param->force_not_null_cols)
{
@@ -16999,12 +17747,13 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
item->marker == 4 || param->bit_fields_as_long,
force_copy_fields);
- if (!new_field)
+ if (unlikely(!new_field))
{
- if (thd->is_fatal_error)
+ if (unlikely(thd->is_fatal_error))
goto err; // Got OOM
continue; // Some kind of const item
}
+ DBUG_ASSERT(!new_field->field_name.str || strlen(new_field->field_name.str) == new_field->field_name.length);
if (type == Item::SUM_FUNC_ITEM)
{
Item_sum *agg_item= (Item_sum *) item;
@@ -17208,7 +17957,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
/*
Test if there is a default field value. The test for ->ptr is to skip
- 'offset' fields generated by initalize_tables
+ 'offset' fields generated by initialize_tables
*/
if (default_field[i] && default_field[i]->ptr)
{
@@ -17301,7 +18050,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
keyinfo->collected_stats= NULL;
keyinfo->algorithm= HA_KEY_ALG_UNDEF;
keyinfo->is_statistics_from_stat_tables= FALSE;
- keyinfo->name= (char*) "group_key";
+ keyinfo->name= group_key;
ORDER *cur_group= group;
for (; cur_group ; cur_group= cur_group->next, key_part_info++)
{
@@ -17412,7 +18161,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
keyinfo->flags=HA_NOSAME | HA_NULL_ARE_EQUAL | HA_BINARY_PACK_KEY | HA_PACK_KEY;
keyinfo->ext_key_flags= keyinfo->flags;
keyinfo->key_length= 0; // Will compute the sum of the parts below.
- keyinfo->name= (char*) "distinct_key";
+ keyinfo->name= distinct_key;
keyinfo->algorithm= HA_KEY_ALG_UNDEF;
keyinfo->is_statistics_from_stat_tables= FALSE;
keyinfo->read_stats= NULL;
@@ -17448,7 +18197,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
(uchar*) 0,
(uint) 0,
Field::NONE,
- NullS, &my_charset_bin);
+ &null_clex_str, &my_charset_bin);
if (!key_part_info->field)
goto err;
key_part_info->field->init(table);
@@ -17512,7 +18261,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
}
}
- if (thd->is_fatal_error) // If end of memory
+ if (unlikely(thd->is_fatal_error)) // If end of memory
goto err; /* purecov: inspected */
share->db_record_offset= 1;
table->used_for_duplicate_elimination= (param->sum_func_count == 0 &&
@@ -17542,7 +18291,6 @@ err:
}
-
/****************************************************************************/
void *Virtual_tmp_table::operator new(size_t size, THD *thd) throw()
@@ -17555,37 +18303,39 @@ bool Virtual_tmp_table::init(uint field_count)
{
uint *blob_field;
uchar *bitmaps;
+ DBUG_ENTER("Virtual_tmp_table::init");
if (!multi_alloc_root(in_use->mem_root,
&s, sizeof(*s),
&field, (field_count + 1) * sizeof(Field*),
&blob_field, (field_count + 1) * sizeof(uint),
&bitmaps, bitmap_buffer_size(field_count) * 6,
NullS))
- return true;
+ DBUG_RETURN(true);
s->reset();
s->blob_field= blob_field;
setup_tmp_table_column_bitmaps(this, bitmaps, field_count);
m_alloced_field_count= field_count;
- return false;
+ DBUG_RETURN(false);
};
-bool Virtual_tmp_table::add(List<Column_definition> &field_list)
+bool Virtual_tmp_table::add(List<Spvar_definition> &field_list)
{
/* Create all fields and calculate the total length of record */
- Column_definition *cdef; /* column definition */
- List_iterator_fast<Column_definition> it(field_list);
- for ( ; (cdef= it++); )
+ Spvar_definition *cdef; /* column definition */
+ List_iterator_fast<Spvar_definition> it(field_list);
+ DBUG_ENTER("Virtual_tmp_table::add");
+ while ((cdef= it++))
{
Field *tmp;
if (!(tmp= cdef->make_field(s, in_use->mem_root, 0,
(uchar*) (f_maybe_null(cdef->pack_flag) ? "" : 0),
f_maybe_null(cdef->pack_flag) ? 1 : 0,
- cdef->field_name)))
- return true;
- add(tmp);
+ &cdef->field_name)))
+ DBUG_RETURN(true);
+ add(tmp);
}
- return false;
+ DBUG_RETURN(false);
}
@@ -17650,12 +18400,76 @@ bool Virtual_tmp_table::open()
}
+bool Virtual_tmp_table::sp_find_field_by_name(uint *idx,
+ const LEX_CSTRING &name) const
+{
+ Field *f;
+ for (uint i= 0; (f= field[i]); i++)
+ {
+ // Use the same comparison style with sp_context::find_variable()
+ if (!my_strnncoll(system_charset_info,
+ (const uchar *) f->field_name.str,
+ f->field_name.length,
+ (const uchar *) name.str, name.length))
+ {
+ *idx= i;
+ return false;
+ }
+ }
+ return true;
+}
+
+
+bool
+Virtual_tmp_table::sp_find_field_by_name_or_error(uint *idx,
+ const LEX_CSTRING &var_name,
+ const LEX_CSTRING &field_name)
+ const
+{
+ if (sp_find_field_by_name(idx, field_name))
+ {
+ my_error(ER_ROW_VARIABLE_DOES_NOT_HAVE_FIELD, MYF(0),
+ var_name.str, field_name.str);
+ return true;
+ }
+ return false;
+}
+
+
+bool Virtual_tmp_table::sp_set_all_fields_from_item_list(THD *thd,
+ List<Item> &items)
+{
+ DBUG_ASSERT(s->fields == items.elements);
+ List_iterator<Item> it(items);
+ Item *item;
+ for (uint i= 0 ; (item= it++) ; i++)
+ {
+ if (field[i]->sp_prepare_and_store_item(thd, &item))
+ return true;
+ }
+ return false;
+}
+
+
+bool Virtual_tmp_table::sp_set_all_fields_from_item(THD *thd, Item *value)
+{
+ DBUG_ASSERT(value->fixed);
+ DBUG_ASSERT(value->cols() == s->fields);
+ for (uint i= 0; i < value->cols(); i++)
+ {
+ if (field[i]->sp_prepare_and_store_item(thd, value->addr(i)))
+ return true;
+ }
+ return false;
+}
+
+
bool open_tmp_table(TABLE *table)
{
int error;
- if ((error= table->file->ha_open(table, table->s->path.str, O_RDWR,
- HA_OPEN_TMP_TABLE |
- HA_OPEN_INTERNAL_TABLE)))
+ if (unlikely((error= table->file->ha_open(table, table->s->path.str, O_RDWR,
+ HA_OPEN_TMP_TABLE |
+ HA_OPEN_INTERNAL_TABLE))))
{
table->file->print_error(error, MYF(0)); /* purecov: inspected */
table->db_stat= 0;
@@ -17849,10 +18663,14 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
}
}
- if ((error= maria_create(share->path.str, file_type, share->keys, &keydef,
- (uint) (*recinfo-start_recinfo), start_recinfo,
- share->uniques, &uniquedef, &create_info,
- create_flags)))
+ if (unlikely((error= maria_create(share->path.str,
+ file_type,
+ share->keys, &keydef,
+ (uint) (*recinfo-start_recinfo),
+ start_recinfo,
+ share->uniques, &uniquedef,
+ &create_info,
+ create_flags))))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
table->db_stat=0;
@@ -17862,7 +18680,6 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
table->in_use->inc_status_created_tmp_disk_tables();
table->in_use->inc_status_created_tmp_tables();
- table->in_use->query_plan_flags|= QPLAN_TMP_DISK;
share->db_record_offset= 1;
table->set_created();
DBUG_RETURN(0);
@@ -18001,13 +18818,17 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
bzero((char*) &create_info,sizeof(create_info));
create_info.data_file_length= table->in_use->variables.tmp_disk_table_size;
- if ((error=mi_create(share->path.str, share->keys, &keydef,
- (uint) (*recinfo-start_recinfo), start_recinfo,
- share->uniques, &uniquedef, &create_info,
- HA_CREATE_TMP_TABLE | HA_CREATE_INTERNAL_TABLE |
- ((share->db_create_options & HA_OPTION_PACK_RECORD) ?
- HA_PACK_RECORD : 0)
- )))
+ if (unlikely((error= mi_create(share->path.str, share->keys, &keydef,
+ (uint) (*recinfo-start_recinfo),
+ start_recinfo,
+ share->uniques, &uniquedef,
+ &create_info,
+ HA_CREATE_TMP_TABLE |
+ HA_CREATE_INTERNAL_TABLE |
+ ((share->db_create_options &
+ HA_OPTION_PACK_RECORD) ?
+ HA_PACK_RECORD : 0)
+ ))))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
table->db_stat=0;
@@ -18015,7 +18836,6 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
}
table->in_use->inc_status_created_tmp_disk_tables();
table->in_use->inc_status_created_tmp_tables();
- table->in_use->query_plan_flags|= QPLAN_TMP_DISK;
share->db_record_offset= 1;
table->set_created();
DBUG_RETURN(0);
@@ -18062,11 +18882,11 @@ create_internal_tmp_table_from_heap(THD *thd, TABLE *table,
share= *table->s;
new_table.s= &share;
new_table.s->db_plugin= ha_lock_engine(thd, TMP_ENGINE_HTON);
- if (!(new_table.file= get_new_handler(&share, &new_table.mem_root,
- new_table.s->db_type())))
+ if (unlikely(!(new_table.file= get_new_handler(&share, &new_table.mem_root,
+ new_table.s->db_type()))))
DBUG_RETURN(1); // End of memory
- if (new_table.file->set_ha_share_ref(&share.ha_share))
+ if (unlikely(new_table.file->set_ha_share_ref(&share.ha_share)))
{
delete new_table.file;
DBUG_RETURN(1);
@@ -18109,16 +18929,13 @@ create_internal_tmp_table_from_heap(THD *thd, TABLE *table,
DBUG_EXECUTE_IF("raise_error", write_err= HA_ERR_FOUND_DUPP_KEY ;);
if (write_err)
goto err;
- if (thd->check_killed())
- {
- thd->send_kill_message();
+ if (unlikely(thd->check_killed()))
goto err_killed;
- }
}
if (!new_table.no_rows && new_table.file->ha_end_bulk_insert())
goto err;
/* copy row that filled HEAP table */
- if ((write_err=new_table.file->ha_write_tmp_row(table->record[0])))
+ if (unlikely((write_err=new_table.file->ha_write_tmp_row(table->record[0]))))
{
if (new_table.file->is_fatal_error(write_err, HA_CHECK_DUP) ||
!ignore_last_dupp_key_error)
@@ -18182,7 +18999,12 @@ free_tmp_table(THD *thd, TABLE *entry)
{
entry->file->ha_index_or_rnd_end();
if (entry->db_stat)
+ {
+ entry->file->info(HA_STATUS_VARIABLE);
+ thd->tmp_tables_size+= (entry->file->stats.data_file_length +
+ entry->file->stats.index_file_length);
entry->file->ha_drop_table(entry->s->path.str);
+ }
else
entry->file->ha_delete_table(entry->s->path.str);
delete entry->file;
@@ -18198,6 +19020,12 @@ free_tmp_table(THD *thd, TABLE *entry)
plugin_unlock(0, entry->s->db_plugin);
entry->alias.free();
+ if (entry->pos_in_table_list && entry->pos_in_table_list->table)
+ {
+ DBUG_ASSERT(entry->pos_in_table_list->table == entry);
+ entry->pos_in_table_list->table= NULL;
+ }
+
free_root(&own_root, MYF(0)); /* the table is allocated in its own root */
thd_proc_info(thd, save_proc_info);
@@ -18397,7 +19225,7 @@ do_select(JOIN *join, Procedure *procedure)
(the join condition and piece of where clause
relevant to this join table).
*/
- if (join->thd->is_error())
+ if (unlikely(join->thd->is_error()))
error= NESTED_LOOP_ERROR;
}
else
@@ -18415,13 +19243,14 @@ do_select(JOIN *join, Procedure *procedure)
error= NESTED_LOOP_NO_MORE_ROWS;
else
error= join->first_select(join,join_tab,0);
- if (error >= NESTED_LOOP_OK && join->thd->killed != ABORT_QUERY)
+ if (error >= NESTED_LOOP_OK && likely(join->thd->killed != ABORT_QUERY))
error= join->first_select(join,join_tab,1);
}
join->thd->limit_found_rows= join->send_records - join->duplicate_rows;
- if (error == NESTED_LOOP_NO_MORE_ROWS || join->thd->killed == ABORT_QUERY)
+ if (error == NESTED_LOOP_NO_MORE_ROWS ||
+ unlikely(join->thd->killed == ABORT_QUERY))
error= NESTED_LOOP_OK;
/*
@@ -18468,7 +19297,7 @@ do_select(JOIN *join, Procedure *procedure)
Sic: this branch works even if rc != 0, e.g. when
send_data above returns an error.
*/
- if (join->result->send_eof())
+ if (unlikely(join->result->send_eof()))
rc= 1; // Don't send error
DBUG_PRINT("info",("%ld records output", (long) join->send_records));
}
@@ -18488,7 +19317,7 @@ do_select(JOIN *join, Procedure *procedure)
int rr_sequential_and_unpack(READ_RECORD *info)
{
int error;
- if ((error= rr_sequential(info)))
+ if (unlikely((error= rr_sequential(info))))
return error;
for (Copy_field *cp= info->copy_field; cp != info->copy_field_end; cp++)
@@ -18524,6 +19353,10 @@ bool instantiate_tmp_table(TABLE *table, KEY *keyinfo,
{
if (table->s->db_type() == TMP_ENGINE_HTON)
{
+ /*
+ If it is not heap (in-memory) table then convert index to unique
+ constrain.
+ */
if (create_internal_tmp_table(table, keyinfo, start_recinfo, recinfo,
options))
return TRUE;
@@ -18655,10 +19488,9 @@ sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
rc= sub_select(join, join_tab, end_of_records);
DBUG_RETURN(rc);
}
- if (join->thd->check_killed())
+ if (unlikely(join->thd->check_killed()))
{
/* The user has aborted the execution of the query */
- join->thd->send_kill_message();
DBUG_RETURN(NESTED_LOOP_KILLED);
}
if (!test_if_use_dynamic_range_scan(join_tab))
@@ -18889,12 +19721,12 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
skip_over= TRUE;
}
- error= info->read_record(info);
+ error= info->read_record();
- if (skip_over && !error)
+ if (skip_over && likely(!error))
{
- if(!key_cmp(join_tab->table->key_info[join_tab->loosescan_key].key_part,
- join_tab->loosescan_buf, join_tab->loosescan_key_len))
+ if (!key_cmp(join_tab->table->key_info[join_tab->loosescan_key].key_part,
+ join_tab->loosescan_buf, join_tab->loosescan_key_len))
{
/*
This is the LooseScan action: skip over records with the same key
@@ -18906,7 +19738,7 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
skip_over= FALSE;
}
- if (join_tab->keep_current_rowid && !error)
+ if (join_tab->keep_current_rowid && likely(!error))
join_tab->table->file->position(join_tab->table->record[0]);
rc= evaluate_join_record(join, join_tab, error);
@@ -18951,13 +19783,13 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
" cond: %p error: %d alias %s",
join, join_tab, select_cond, error,
join_tab->table->alias.ptr()));
- if (error > 0 || (join->thd->is_error())) // Fatal error
+
+ if (error > 0 || unlikely(join->thd->is_error())) // Fatal error
DBUG_RETURN(NESTED_LOOP_ERROR);
if (error < 0)
DBUG_RETURN(NESTED_LOOP_NO_MORE_ROWS);
- if (join->thd->check_killed()) // Aborted by user
+ if (unlikely(join->thd->check_killed())) // Aborted by user
{
- join->thd->send_kill_message();
DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */
}
@@ -18968,7 +19800,7 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
select_cond_result= MY_TEST(select_cond->val_int());
/* check for errors evaluating the condition */
- if (join->thd->is_error())
+ if (unlikely(join->thd->is_error()))
DBUG_RETURN(NESTED_LOOP_ERROR);
}
@@ -19098,7 +19930,7 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
join->return_tab= return_tab;
/* check for errors evaluating the condition */
- if (join->thd->is_error())
+ if (unlikely(join->thd->is_error()))
DBUG_RETURN(NESTED_LOOP_ERROR);
if (join->return_tab < join_tab)
@@ -19247,10 +20079,11 @@ int safe_index_read(JOIN_TAB *tab)
{
int error;
TABLE *table= tab->table;
- if ((error= table->file->ha_index_read_map(table->record[0],
- tab->ref.key_buff,
- make_prev_keypart_map(tab->ref.key_parts),
- HA_READ_KEY_EXACT)))
+ if (unlikely((error=
+ table->file->ha_index_read_map(table->record[0],
+ tab->ref.key_buff,
+ make_prev_keypart_map(tab->ref.key_parts),
+ HA_READ_KEY_EXACT))))
return report_error(table, error);
return 0;
}
@@ -19298,7 +20131,7 @@ join_read_const_table(THD *thd, JOIN_TAB *tab, POSITION *pos)
}
else if (tab->type == JT_SYSTEM)
{
- if ((error=join_read_system(tab)))
+ if (unlikely((error=join_read_system(tab))))
{ // Info for DESCRIBE
tab->info= ET_CONST_ROW_NOT_FOUND;
/* Mark for EXPLAIN that the row was not found */
@@ -19324,7 +20157,7 @@ join_read_const_table(THD *thd, JOIN_TAB *tab, POSITION *pos)
}
error=join_read_const(tab);
table->file->ha_end_keyread();
- if (error)
+ if (unlikely(error))
{
tab->info= ET_UNIQUE_ROW_NOT_FOUND;
/* Mark for EXPLAIN that the row was not found */
@@ -19402,11 +20235,13 @@ join_read_system(JOIN_TAB *tab)
int error;
if (table->status & STATUS_GARBAGE) // If first read
{
- if ((error= table->file->ha_read_first_row(table->record[0],
- table->s->primary_key)))
+ if (unlikely((error=
+ table->file->ha_read_first_row(table->record[0],
+ table->s->primary_key))))
{
if (error != HA_ERR_END_OF_FILE)
return report_error(table, error);
+ table->const_table= 1;
mark_as_null_row(tab->table);
empty_record(table); // Make empty record
return -1;
@@ -19447,7 +20282,7 @@ join_read_const(JOIN_TAB *tab)
make_prev_keypart_map(tab->ref.key_parts),
HA_READ_KEY_EXACT);
}
- if (error)
+ if (unlikely(error))
{
table->status= STATUS_NOT_FOUND;
mark_as_null_row(tab->table);
@@ -19503,7 +20338,7 @@ int join_read_key2(THD *thd, JOIN_TAB *tab, TABLE *table, TABLE_REF *table_ref)
if (!table->file->inited)
{
error= table->file->ha_index_init(table_ref->key, tab ? tab->sorted : TRUE);
- if (error)
+ if (unlikely(error))
{
(void) report_error(table, error);
return 1;
@@ -19543,10 +20378,11 @@ int join_read_key2(THD *thd, JOIN_TAB *tab, TABLE *table, TABLE_REF *table_ref)
table_ref->key_buff,
make_prev_keypart_map(table_ref->key_parts),
HA_READ_KEY_EXACT);
- if (error && error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
+ if (unlikely(error) &&
+ error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
return report_error(table, error);
- if (! error)
+ if (likely(!error))
{
table_ref->has_record= TRUE;
table_ref->use_count= 1;
@@ -19607,16 +20443,19 @@ join_read_always_key(JOIN_TAB *tab)
/* Initialize the index first */
if (!table->file->inited)
{
- if ((error= table->file->ha_index_init(tab->ref.key, tab->sorted)))
+ if (unlikely((error= table->file->ha_index_init(tab->ref.key,
+ tab->sorted))))
{
(void) report_error(table, error);
return 1;
}
}
- if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
+ if (unlikely(cp_buffer_from_ref(tab->join->thd, table, &tab->ref)))
return -1;
- if ((error= table->file->prepare_index_key_scan_map(tab->ref.key_buff, make_prev_keypart_map(tab->ref.key_parts))))
+ if (unlikely((error=
+ table->file->prepare_index_key_scan_map(tab->ref.key_buff,
+ make_prev_keypart_map(tab->ref.key_parts)))))
{
report_error(table,error);
return -1;
@@ -19646,23 +20485,26 @@ join_read_last_key(JOIN_TAB *tab)
TABLE *table= tab->table;
if (!table->file->inited &&
- (error= table->file->ha_index_init(tab->ref.key, tab->sorted)))
+ unlikely((error= table->file->ha_index_init(tab->ref.key, tab->sorted))))
{
(void) report_error(table, error);
return 1;
}
- if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
+ if (unlikely(cp_buffer_from_ref(tab->join->thd, table, &tab->ref)))
return -1;
- if ((error= table->file->prepare_index_key_scan_map(tab->ref.key_buff, make_prev_keypart_map(tab->ref.key_parts))))
+ if (unlikely((error=
+ table->file->prepare_index_key_scan_map(tab->ref.key_buff,
+ make_prev_keypart_map(tab->ref.key_parts)))) )
{
report_error(table,error);
return -1;
}
- if ((error= table->file->ha_index_read_map(table->record[0],
- tab->ref.key_buff,
- make_prev_keypart_map(tab->ref.key_parts),
- HA_READ_PREFIX_LAST)))
+ if (unlikely((error=
+ table->file->ha_index_read_map(table->record[0],
+ tab->ref.key_buff,
+ make_prev_keypart_map(tab->ref.key_parts),
+ HA_READ_PREFIX_LAST))))
{
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
return report_error(table, error);
@@ -19687,9 +20529,9 @@ join_read_next_same(READ_RECORD *info)
TABLE *table= info->table;
JOIN_TAB *tab=table->reginfo.join_tab;
- if ((error= table->file->ha_index_next_same(table->record[0],
- tab->ref.key_buff,
- tab->ref.key_length)))
+ if (unlikely((error= table->file->ha_index_next_same(table->record[0],
+ tab->ref.key_buff,
+ tab->ref.key_length))))
{
if (error != HA_ERR_END_OF_FILE)
return report_error(table, error);
@@ -19707,7 +20549,7 @@ join_read_prev_same(READ_RECORD *info)
TABLE *table= info->table;
JOIN_TAB *tab=table->reginfo.join_tab;
- if ((error= table->file->ha_index_prev(table->record[0])))
+ if (unlikely((error= table->file->ha_index_prev(table->record[0]))))
return report_error(table, error);
if (key_cmp_if_same(table, tab->ref.key_buff, tab->ref.key,
tab->ref.key_length))
@@ -19730,9 +20572,9 @@ join_init_quick_read_record(JOIN_TAB *tab)
int read_first_record_seq(JOIN_TAB *tab)
{
- if (tab->read_record.table->file->ha_rnd_init_with_error(1))
+ if (unlikely(tab->read_record.table->file->ha_rnd_init_with_error(1)))
return 1;
- return (*tab->read_record.read_record)(&tab->read_record);
+ return tab->read_record.read_record();
}
static int
@@ -19796,7 +20638,7 @@ int join_init_read_record(JOIN_TAB *tab)
if (init_read_record(&tab->read_record, tab->join->thd, tab->table,
tab->select, tab->filesort_result, 1,1, FALSE))
return 1;
- return (*tab->read_record.read_record)(&tab->read_record);
+ return tab->read_record.read_record();
}
int
@@ -19816,9 +20658,9 @@ join_read_record_no_init(JOIN_TAB *tab)
tab->read_record.copy_field= save_copy;
tab->read_record.copy_field_end= save_copy_end;
- tab->read_record.read_record= rr_sequential_and_unpack;
+ tab->read_record.read_record_func= rr_sequential_and_unpack;
- return (*tab->read_record.read_record)(&tab->read_record);
+ return tab->read_record.read_record();
}
@@ -19851,15 +20693,14 @@ join_read_first(JOIN_TAB *tab)
!table->covering_keys.is_set(tab->index) ||
table->file->keyread == tab->index);
tab->table->status=0;
- tab->read_record.read_record=join_read_next;
+ tab->read_record.read_record_func= join_read_next;
tab->read_record.table=table;
- tab->read_record.index=tab->index;
- tab->read_record.record=table->record[0];
if (!table->file->inited)
error= table->file->ha_index_init(tab->index, tab->sorted);
- if (!error)
+ if (likely(!error))
error= table->file->prepare_index_scan();
- if (error || (error=tab->table->file->ha_index_first(tab->table->record[0])))
+ if (unlikely(error) ||
+ unlikely(error= tab->table->file->ha_index_first(tab->table->record[0])))
{
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
report_error(table, error);
@@ -19873,7 +20714,7 @@ static int
join_read_next(READ_RECORD *info)
{
int error;
- if ((error= info->table->file->ha_index_next(info->record)))
+ if (unlikely((error= info->table->file->ha_index_next(info->record()))))
return report_error(info->table, error);
return 0;
@@ -19891,15 +20732,14 @@ join_read_last(JOIN_TAB *tab)
!table->covering_keys.is_set(tab->index) ||
table->file->keyread == tab->index);
tab->table->status=0;
- tab->read_record.read_record=join_read_prev;
+ tab->read_record.read_record_func= join_read_prev;
tab->read_record.table=table;
- tab->read_record.index=tab->index;
- tab->read_record.record=table->record[0];
if (!table->file->inited)
error= table->file->ha_index_init(tab->index, 1);
- if (!error)
+ if (likely(!error))
error= table->file->prepare_index_scan();
- if (error || (error= tab->table->file->ha_index_last(tab->table->record[0])))
+ if (unlikely(error) ||
+ unlikely(error= tab->table->file->ha_index_last(tab->table->record[0])))
DBUG_RETURN(report_error(table, error));
DBUG_RETURN(0);
@@ -19910,7 +20750,7 @@ static int
join_read_prev(READ_RECORD *info)
{
int error;
- if ((error= info->table->file->ha_index_prev(info->record)))
+ if (unlikely((error= info->table->file->ha_index_prev(info->record()))))
return report_error(info->table, error);
return 0;
}
@@ -19931,7 +20771,7 @@ join_ft_read_first(JOIN_TAB *tab)
table->file->ft_init();
- if ((error= table->file->ha_ft_read(table->record[0])))
+ if (unlikely((error= table->file->ha_ft_read(table->record[0]))))
return report_error(table, error);
return 0;
}
@@ -19940,7 +20780,7 @@ static int
join_ft_read_next(READ_RECORD *info)
{
int error;
- if ((error= info->table->file->ha_ft_read(info->table->record[0])))
+ if (unlikely((error= info->table->file->ha_ft_read(info->record()))))
return report_error(info->table, error);
return 0;
}
@@ -19970,7 +20810,7 @@ int
join_read_next_same_or_null(READ_RECORD *info)
{
int error;
- if ((error= join_read_next_same(info)) >= 0)
+ if (unlikely((error= join_read_next_same(info)) >= 0))
return error;
JOIN_TAB *tab= info->table->reginfo.join_tab;
@@ -20042,7 +20882,7 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
int error;
/* result < 0 if row was not accepted and should not be counted */
- if ((error= join->result->send_data(*fields)))
+ if (unlikely((error= join->result->send_data(*fields))))
{
if (error > 0)
DBUG_RETURN(NESTED_LOOP_ERROR);
@@ -20191,7 +21031,7 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (join->do_send_rows)
{
error=join->result->send_data(*fields);
- if (error < 0)
+ if (unlikely(error < 0))
{
/* Duplicate row, don't count */
join->duplicate_rows++;
@@ -20201,13 +21041,13 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
join->send_records++;
join->group_sent= true;
}
- if (join->rollup.state != ROLLUP::STATE_NONE && error <= 0)
+ if (unlikely(join->rollup.state != ROLLUP::STATE_NONE && error <= 0))
{
if (join->rollup_send_data((uint) (idx+1)))
error= 1;
}
}
- if (error > 0)
+ if (unlikely(error > 0))
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
if (end_of_records)
DBUG_RETURN(NESTED_LOOP_OK);
@@ -20277,14 +21117,14 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (copy_funcs(join_tab->tmp_table_param->items_to_copy, join->thd))
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
- if (!join_tab->having || join_tab->having->val_int())
+ if (likely(!join_tab->having || join_tab->having->val_int()))
{
int error;
join->found_records++;
if ((error= table->file->ha_write_tmp_row(table->record[0])))
{
- if (!table->file->is_fatal_error(error, HA_CHECK_DUP))
- goto end;
+ if (likely(!table->file->is_fatal_error(error, HA_CHECK_DUP)))
+ goto end; // Ignore duplicate keys
bool is_duplicate;
if (create_internal_tmp_table_from_heap(join->thd, table,
join_tab->tmp_table_param->start_recinfo,
@@ -20307,9 +21147,8 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
}
}
end:
- if (join->thd->check_killed())
+ if (unlikely(join->thd->check_killed()))
{
- join->thd->send_kill_message();
DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */
}
DBUG_RETURN(NESTED_LOOP_OK);
@@ -20366,8 +21205,8 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{ /* Update old record */
restore_record(table,record[1]);
update_tmptable_sum_func(join->sum_funcs,table);
- if ((error= table->file->ha_update_tmp_row(table->record[1],
- table->record[0])))
+ if (unlikely((error= table->file->ha_update_tmp_row(table->record[1],
+ table->record[0]))))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
@@ -20376,9 +21215,10 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
}
init_tmptable_sum_functions(join->sum_funcs);
- if (copy_funcs(join_tab->tmp_table_param->items_to_copy, join->thd))
+ if (unlikely(copy_funcs(join_tab->tmp_table_param->items_to_copy,
+ join->thd)))
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
- if ((error= table->file->ha_write_tmp_row(table->record[0])))
+ if (unlikely((error= table->file->ha_write_tmp_row(table->record[0]))))
{
if (create_internal_tmp_table_from_heap(join->thd, table,
join_tab->tmp_table_param->start_recinfo,
@@ -20386,7 +21226,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
error, 0, NULL))
DBUG_RETURN(NESTED_LOOP_ERROR); // Not a table_is_full error
/* Change method to update rows */
- if ((error= table->file->ha_index_init(0, 0)))
+ if (unlikely((error= table->file->ha_index_init(0, 0))))
{
table->file->print_error(error, MYF(0));
DBUG_RETURN(NESTED_LOOP_ERROR);
@@ -20396,9 +21236,8 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
}
join_tab->send_records++;
end:
- if (join->thd->check_killed())
+ if (unlikely(join->thd->check_killed()))
{
- join->thd->send_kill_message();
DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */
}
DBUG_RETURN(NESTED_LOOP_OK);
@@ -20423,11 +21262,11 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (copy_funcs(join_tab->tmp_table_param->items_to_copy, join->thd))
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
- if (!(error= table->file->ha_write_tmp_row(table->record[0])))
+ if (likely(!(error= table->file->ha_write_tmp_row(table->record[0]))))
join_tab->send_records++; // New group
else
{
- if ((int) table->file->get_dup_key(error) < 0)
+ if (unlikely((int) table->file->get_dup_key(error) < 0))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
@@ -20441,15 +21280,15 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
table->file->print_error(error, MYF(0));
DBUG_RETURN(NESTED_LOOP_ERROR);
}
- if (table->file->ha_rnd_pos(table->record[1],table->file->dup_ref))
+ if (unlikely(table->file->ha_rnd_pos(table->record[1],table->file->dup_ref)))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
}
restore_record(table,record[1]);
update_tmptable_sum_func(join->sum_funcs,table);
- if ((error= table->file->ha_update_tmp_row(table->record[1],
- table->record[0])))
+ if (unlikely((error= table->file->ha_update_tmp_row(table->record[1],
+ table->record[0]))))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
@@ -20462,9 +21301,8 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
DBUG_RETURN(NESTED_LOOP_ERROR);
}
}
- if (join->thd->check_killed())
+ if (unlikely(join->thd->check_killed()))
{
- join->thd->send_kill_message();
DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */
}
DBUG_RETURN(NESTED_LOOP_OK);
@@ -20510,17 +21348,18 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (!join_tab->having || join_tab->having->val_int())
{
int error= table->file->ha_write_tmp_row(table->record[0]);
- if (error &&
+ if (unlikely(error) &&
create_internal_tmp_table_from_heap(join->thd, table,
join_tab->tmp_table_param->start_recinfo,
&join_tab->tmp_table_param->recinfo,
error, 0, NULL))
DBUG_RETURN(NESTED_LOOP_ERROR);
}
- if (join->rollup.state != ROLLUP::STATE_NONE)
+ if (unlikely(join->rollup.state != ROLLUP::STATE_NONE))
{
- if (join->rollup_write_data((uint) (idx+1),
- join_tab->tmp_table_param, table))
+ if (unlikely(join->rollup_write_data((uint) (idx+1),
+ join_tab->tmp_table_param,
+ table)))
{
DBUG_RETURN(NESTED_LOOP_ERROR);
}
@@ -20539,23 +21378,24 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (idx < (int) join->send_group_parts)
{
copy_fields(join_tab->tmp_table_param);
- if (copy_funcs(join_tab->tmp_table_param->items_to_copy, join->thd))
+ if (unlikely(copy_funcs(join_tab->tmp_table_param->items_to_copy,
+ join->thd)))
DBUG_RETURN(NESTED_LOOP_ERROR);
- if (init_sum_functions(join->sum_funcs, join->sum_funcs_end[idx+1]))
+ if (unlikely(init_sum_functions(join->sum_funcs,
+ join->sum_funcs_end[idx+1])))
DBUG_RETURN(NESTED_LOOP_ERROR);
- if (join->procedure)
+ if (unlikely(join->procedure))
join->procedure->add();
goto end;
}
}
- if (update_sum_func(join->sum_funcs))
+ if (unlikely(update_sum_func(join->sum_funcs)))
DBUG_RETURN(NESTED_LOOP_ERROR);
- if (join->procedure)
+ if (unlikely(join->procedure))
join->procedure->add();
end:
- if (join->thd->check_killed())
+ if (unlikely(join->thd->check_killed()))
{
- join->thd->send_kill_message();
DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */
}
DBUG_RETURN(NESTED_LOOP_OK);
@@ -21068,7 +21908,7 @@ part_of_refkey(TABLE *table,Field *field)
static int test_if_order_by_key(JOIN *join,
ORDER *order, TABLE *table, uint idx,
- uint *used_key_parts= NULL)
+ uint *used_key_parts)
{
KEY_PART_INFO *key_part,*key_part_end;
key_part=table->key_info[idx].key_part;
@@ -21927,7 +22767,7 @@ check_reverse_order:
with key part (A) and then traverse the index backwards.
*/
tab->read_first_record= join_read_last_key;
- tab->read_record.read_record= join_read_prev_same;
+ tab->read_record.read_record_func= join_read_prev_same;
/*
Cancel Pushed Index Condition, as it doesn't work for reverse scans.
*/
@@ -22057,7 +22897,8 @@ create_sort_index(THD *thd, JOIN *join, JOIN_TAB *tab, Filesort *fsort)
{
DBUG_ASSERT(tab->type == JT_REF || tab->type == JT_EQ_REF);
// Update ref value
- if ((cp_buffer_from_ref(thd, table, &tab->ref) && thd->is_fatal_error))
+ if (unlikely(cp_buffer_from_ref(thd, table, &tab->ref) &&
+ thd->is_fatal_error))
goto err; // out of memory
}
}
@@ -22065,7 +22906,7 @@ create_sort_index(THD *thd, JOIN *join, JOIN_TAB *tab, Filesort *fsort)
/* Fill schema tables with data before filesort if it's necessary */
if ((join->select_lex->options & OPTION_SCHEMA_TABLE) &&
- get_schema_tables_result(join, PROCESSED_BY_CREATE_SORT_INDEX))
+ unlikely(get_schema_tables_result(join, PROCESSED_BY_CREATE_SORT_INDEX)))
goto err;
if (table->s->tmp_table)
@@ -22089,7 +22930,7 @@ create_sort_index(THD *thd, JOIN *join, JOIN_TAB *tab, Filesort *fsort)
table->file->ha_end_keyread();
if (tab->type == JT_FT)
- table->file->ft_end();
+ table->file->ha_ft_end();
else
table->file->ha_index_or_rnd_end();
@@ -22233,37 +23074,31 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
int error;
DBUG_ENTER("remove_dup_with_compare");
- if (file->ha_rnd_init_with_error(1))
+ if (unlikely(file->ha_rnd_init_with_error(1)))
DBUG_RETURN(1);
error= file->ha_rnd_next(record);
for (;;)
{
- if (thd->check_killed())
+ if (unlikely(thd->check_killed()))
{
- thd->send_kill_message();
error=0;
goto err;
}
- if (error)
+ if (unlikely(error))
{
- if (error == HA_ERR_RECORD_DELETED)
- {
- error= file->ha_rnd_next(record);
- continue;
- }
if (error == HA_ERR_END_OF_FILE)
break;
goto err;
}
if (having && !having->val_int())
{
- if ((error= file->ha_delete_row(record)))
+ if (unlikely((error= file->ha_delete_row(record))))
goto err;
error= file->ha_rnd_next(record);
continue;
}
- if (copy_blobs(first_field))
+ if (unlikely(copy_blobs(first_field)))
{
my_message(ER_OUTOFMEMORY, ER_THD(thd,ER_OUTOFMEMORY),
MYF(ME_FATALERROR));
@@ -22276,30 +23111,28 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
bool found=0;
for (;;)
{
- if ((error= file->ha_rnd_next(record)))
+ if (unlikely((error= file->ha_rnd_next(record))))
{
- if (error == HA_ERR_RECORD_DELETED)
- continue;
if (error == HA_ERR_END_OF_FILE)
break;
goto err;
}
if (compare_record(table, first_field) == 0)
{
- if ((error= file->ha_delete_row(record)))
+ if (unlikely((error= file->ha_delete_row(record))))
goto err;
}
else if (!found)
{
found=1;
- if ((error= file->remember_rnd_pos()))
+ if (unlikely((error= file->remember_rnd_pos())))
goto err;
}
}
if (!found)
break; // End of file
/* Restart search on saved row */
- if ((error= file->restart_rnd_next(record)))
+ if (unlikely((error= file->restart_rnd_next(record))))
goto err;
}
@@ -22337,49 +23170,47 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
Field **ptr;
DBUG_ENTER("remove_dup_with_hash_index");
- if (!my_multi_malloc(MYF(MY_WME),
- &key_buffer,
- (uint) ((key_length + extra_length) *
- (long) file->stats.records),
- &field_lengths,
- (uint) (field_count*sizeof(*field_lengths)),
- NullS))
+ if (unlikely(!my_multi_malloc(MYF(MY_WME),
+ &key_buffer,
+ (uint) ((key_length + extra_length) *
+ (long) file->stats.records),
+ &field_lengths,
+ (uint) (field_count*sizeof(*field_lengths)),
+ NullS)))
DBUG_RETURN(1);
for (ptr= first_field, field_length=field_lengths ; *ptr ; ptr++)
(*field_length++)= (*ptr)->sort_length();
- if (my_hash_init(&hash, &my_charset_bin, (uint) file->stats.records, 0,
- key_length, (my_hash_get_key) 0, 0, 0))
+ if (unlikely(my_hash_init(&hash, &my_charset_bin,
+ (uint) file->stats.records, 0,
+ key_length, (my_hash_get_key) 0, 0, 0)))
{
my_free(key_buffer);
DBUG_RETURN(1);
}
- if ((error= file->ha_rnd_init(1)))
+ if (unlikely((error= file->ha_rnd_init(1))))
goto err;
key_pos=key_buffer;
for (;;)
{
uchar *org_key_pos;
- if (thd->check_killed())
+ if (unlikely(thd->check_killed()))
{
- thd->send_kill_message();
error=0;
goto err;
}
- if ((error= file->ha_rnd_next(record)))
+ if (unlikely((error= file->ha_rnd_next(record))))
{
- if (error == HA_ERR_RECORD_DELETED)
- continue;
if (error == HA_ERR_END_OF_FILE)
break;
goto err;
}
if (having && !having->val_int())
{
- if ((error= file->ha_delete_row(record)))
+ if (unlikely((error= file->ha_delete_row(record))))
goto err;
continue;
}
@@ -22396,7 +23227,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
if (my_hash_search(&hash, org_key_pos, key_length))
{
/* Duplicated found ; Remove the row */
- if ((error= file->ha_delete_row(record)))
+ if (unlikely((error= file->ha_delete_row(record))))
goto err;
}
else
@@ -22417,7 +23248,7 @@ err:
my_hash_free(&hash);
file->extra(HA_EXTRA_NO_CACHE);
(void) file->ha_rnd_end();
- if (error)
+ if (unlikely(error))
file->print_error(error,MYF(0));
DBUG_RETURN(1);
}
@@ -22587,8 +23418,8 @@ find_order_in_list(THD *thd, Ref_ptr_array ref_pointer_array,
original field name, we should additionally check if we have conflict
for this name (in case if we would perform lookup in all tables).
*/
- if (resolution == RESOLVED_BEHIND_ALIAS && !order_item->fixed &&
- order_item->fix_fields(thd, order->item))
+ if (resolution == RESOLVED_BEHIND_ALIAS &&
+ order_item->fix_fields_if_needed_for_order_by(thd, order->item))
return TRUE;
/* Lookup the current GROUP field in the FROM clause. */
@@ -22640,7 +23471,7 @@ find_order_in_list(THD *thd, Ref_ptr_array ref_pointer_array,
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_NON_UNIQ_ERROR,
ER_THD(thd, ER_NON_UNIQ_ERROR),
- ((Item_ident*) order_item)->field_name,
+ ((Item_ident*) order_item)->field_name.str,
thd->where);
}
}
@@ -22669,11 +23500,10 @@ find_order_in_list(THD *thd, Ref_ptr_array ref_pointer_array,
We check order_item->fixed because Item_func_group_concat can put
arguments for which fix_fields already was called.
*/
- if (!order_item->fixed &&
- (order_item->fix_fields(thd, order->item) ||
- (order_item= *order->item)->check_cols(1) ||
- thd->is_error()))
+ if (order_item->fix_fields_if_needed_for_order_by(thd, order->item) ||
+ thd->is_error())
return TRUE; /* Wrong field. */
+ order_item= *order->item; // Item can change during fix_fields()
if (!add_to_all_fields)
return FALSE;
@@ -22880,7 +23710,7 @@ setup_new_fields(THD *thd, List<Item> &fields,
enum_resolution_type not_used;
DBUG_ENTER("setup_new_fields");
- thd->mark_used_columns= MARK_COLUMNS_READ; // Not really needed, but...
+ thd->column_usage= MARK_COLUMNS_READ; // Not really needed, but...
for (; new_field ; new_field= new_field->next)
{
if ((item= find_item_in_list(*new_field->item, fields, &counter,
@@ -22903,6 +23733,10 @@ setup_new_fields(THD *thd, List<Item> &fields,
Try to use the fields in the order given by 'order' to allow one to
optimize away 'order by'.
+
+ @retval
+ 0 OOM error if thd->is_fatal_error is set. Otherwise group was eliminated
+ # Pointer to new group
*/
ORDER *
@@ -22965,6 +23799,8 @@ create_distinct_group(THD *thd, Ref_ptr_array ref_pointer_array,
BIT type and will be returned [el]client.
*/
Item_field *new_item= new (thd->mem_root) Item_field(thd, (Item_field*)item);
+ if (!new_item)
+ return 0;
int el= all_fields.elements;
orig_ref_pointer_array[el]= new_item;
all_fields.push_front(new_item, thd->mem_root);
@@ -23121,13 +23957,10 @@ get_sort_by_table(ORDER *a,ORDER *b, List<TABLE_LIST> &tables,
calc how big buffer we need for comparing group entries.
*/
-static void
-calc_group_buffer(JOIN *join,ORDER *group)
+void calc_group_buffer(TMP_TABLE_PARAM *param, ORDER *group)
{
uint key_length=0, parts=0, null_parts=0;
- if (group)
- join->group= 1;
for (; group ; group=group->next)
{
Item *group_item= *group->item;
@@ -23197,9 +24030,16 @@ calc_group_buffer(JOIN *join,ORDER *group)
if (group_item->maybe_null)
null_parts++;
}
- join->tmp_table_param.group_length=key_length+null_parts;
- join->tmp_table_param.group_parts=parts;
- join->tmp_table_param.group_null_parts=null_parts;
+ param->group_length= key_length + null_parts;
+ param->group_parts= parts;
+ param->group_null_parts= null_parts;
+}
+
+static void calc_group_buffer(JOIN *join, ORDER *group)
+{
+ if (group)
+ join->group= 1;
+ calc_group_buffer(&join->tmp_table_param, group);
}
@@ -23439,7 +24279,9 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
real_pos->type() == Item::COND_ITEM) &&
!real_pos->with_sum_func)
{ // Save for send fields
+ LEX_CSTRING real_name= pos->name;
pos= real_pos;
+ pos->name= real_name;
/* TODO:
In most cases this result will be sent to the user.
This should be changed to use copy_int or copy_real depending
@@ -23648,7 +24490,10 @@ change_to_use_tmp_fields(THD *thd, Ref_ptr_array ref_pointer_array,
item->with_window_func)
item_field= item;
else if (item->type() == Item::FIELD_ITEM)
- item_field= item->get_tmp_table_item(thd);
+ {
+ if (!(item_field= item->get_tmp_table_item(thd)))
+ DBUG_RETURN(true);
+ }
else if (item->type() == Item::FUNC_ITEM &&
((Item_func*)item)->functype() == Item_func::SUSERVAR_FUNC)
{
@@ -23694,14 +24539,15 @@ change_to_use_tmp_fields(THD *thd, Ref_ptr_array ref_pointer_array,
ifield->db_name= iref->db_name;
}
#ifndef DBUG_OFF
- if (!item_field->name)
+ if (!item_field->name.str)
{
char buff[256];
String str(buff,sizeof(buff),&my_charset_bin);
str.length(0);
str.extra_allocation(1024);
item->print(&str, QT_ORDINARY);
- item_field->name= thd->strmake(str.ptr(),str.length());
+ item_field->name.str= thd->strmake(str.ptr(), str.length());
+ item_field->name.length= str.length();
}
#endif
}
@@ -23755,8 +24601,13 @@ change_refs_to_tmp_fields(THD *thd, Ref_ptr_array ref_pointer_array,
if (item->type() == Item::SUM_FUNC_ITEM && item->const_item())
new_item= item;
else
- new_item= item->get_tmp_table_item(thd);
- res_all_fields.push_back(new_item, thd->mem_root);
+ {
+ if (!(new_item= item->get_tmp_table_item(thd)))
+ return 1;
+ }
+
+ if (res_all_fields.push_back(new_item, thd->mem_root))
+ return 1;
ref_pointer_array[((i < border)? all_fields.elements-i-1 : i-border)]=
new_item;
}
@@ -23908,7 +24759,7 @@ copy_funcs(Item **func_ptr, const THD *thd)
TODO: change it for a real status check when Item::val_xxx()
are extended to return status code.
*/
- if (thd->is_error())
+ if (unlikely(thd->is_error()))
return TRUE;
}
return FALSE;
@@ -23942,7 +24793,7 @@ static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab)
value),
thd->mem_root);
}
- if (thd->is_fatal_error)
+ if (unlikely(thd->is_fatal_error))
DBUG_RETURN(TRUE);
if (!cond->fixed)
{
@@ -23962,7 +24813,7 @@ static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab)
{
Item *new_cond= and_conds(thd, cond_copy,
join_tab->select->pre_idx_push_select_cond);
- if (!new_cond->fixed && new_cond->fix_fields(thd, &new_cond))
+ if (new_cond->fix_fields_if_needed(thd, &new_cond))
error= 1;
join_tab->pre_idx_push_select_cond=
join_tab->select->pre_idx_push_select_cond= new_cond;
@@ -24057,8 +24908,9 @@ static bool change_group_ref(THD *thd, Item_func *expr, ORDER *group_list,
if (item->eq(*group_tmp->item,0))
{
Item *new_item;
- if (!(new_item= new (thd->mem_root) Item_ref(thd, context, group_tmp->item, 0,
- item->name)))
+ if (!(new_item= new (thd->mem_root) Item_ref(thd, context,
+ group_tmp->item, 0,
+ &item->name)))
return 1; // fatal_error is set
thd->change_item_tree(arg, new_item);
arg_changed= TRUE;
@@ -24121,7 +24973,9 @@ bool JOIN::rollup_init()
*/
for (i= 0 ; i < send_group_parts ; i++)
{
- rollup.null_items[i]= new (thd->mem_root) Item_null_result(thd);
+ if (!(rollup.null_items[i]= new (thd->mem_root) Item_null_result(thd)))
+ return true;
+
List<Item> *rollup_fields= &rollup.fields[i];
rollup_fields->empty();
rollup.ref_pointer_arrays[i]= Ref_ptr_array(ref_array, all_fields.elements);
@@ -24431,7 +25285,8 @@ int JOIN::rollup_write_data(uint idx, TMP_TABLE_PARAM *tmp_table_param_arg, TABL
item->save_in_result_field(1);
}
copy_sum_funcs(sum_funcs_end[i+1], sum_funcs_end[i]);
- if ((write_error= table_arg->file->ha_write_tmp_row(table_arg->record[0])))
+ if (unlikely((write_error=
+ table_arg->file->ha_write_tmp_row(table_arg->record[0]))))
{
if (create_internal_tmp_table_from_heap(thd, table_arg,
tmp_table_param_arg->start_recinfo,
@@ -24465,8 +25320,12 @@ void JOIN::clear()
}
-/*
+/**
Print an EXPLAIN line with all NULLs and given message in the 'Extra' column
+
+ @retval
+ 0 ok
+ 1 OOM error or error from send_data()
*/
int print_explain_message_line(select_result_sink *result,
@@ -24525,7 +25384,7 @@ int print_explain_message_line(select_result_sink *result,
else
item_list.push_back(item_null, mem_root);
- if (result->send_data(item_list))
+ if (unlikely(thd->is_fatal_error) || unlikely(result->send_data(item_list)))
return 1;
return 0;
}
@@ -24559,13 +25418,14 @@ int append_possible_keys(MEM_ROOT *alloc, String_list &list, TABLE *table,
for (j=0 ; j < table->s->keys ; j++)
{
if (possible_keys.is_set(j))
- list.append_str(alloc, table->key_info[j].name);
+ if (!(list.append_str(alloc, table->key_info[j].name.str)))
+ return 1;
}
return 0;
}
-void JOIN_TAB::save_explain_data(Explain_table_access *eta,
+bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
table_map prefix_tables,
bool distinct_arg, JOIN_TAB *first_top_tab)
{
@@ -24594,9 +25454,11 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta,
if (filesort)
{
- eta->pre_join_sort= new (thd->mem_root) Explain_aggr_filesort(thd->mem_root,
- thd->lex->analyze_stmt,
- filesort);
+ if (!(eta->pre_join_sort=
+ new (thd->mem_root) Explain_aggr_filesort(thd->mem_root,
+ thd->lex->analyze_stmt,
+ filesort)))
+ return 1;
}
tracker= &eta->tracker;
@@ -24612,7 +25474,7 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta,
if (table->derived_select_number)
{
/* Derived table name generation */
- int len= my_snprintf(table_name_buffer, sizeof(table_name_buffer)-1,
+ size_t len= my_snprintf(table_name_buffer, sizeof(table_name_buffer)-1,
"<derived%u>",
table->derived_select_number);
eta->table_name.copy(table_name_buffer, len, cs);
@@ -24621,7 +25483,7 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta,
{
JOIN_TAB *ctab= bush_children->start;
/* table */
- int len= my_snprintf(table_name_buffer,
+ size_t len= my_snprintf(table_name_buffer,
sizeof(table_name_buffer)-1,
"<subquery%d>",
ctab->emb_sj_nest->sj_subq_pred->get_identifier());
@@ -24650,7 +25512,7 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta,
}
}
}
- eta->table_name.copy(real_table->alias, strlen(real_table->alias), cs);
+ eta->table_name.copy(real_table->alias.str, real_table->alias.length, cs);
}
/* "partitions" column */
@@ -24693,7 +25555,8 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta,
// psergey-todo: why does this use thd MEM_ROOT??? Doesn't this
// break ANALYZE ? thd->mem_root will be freed, and after that we will
// attempt to print the query plan?
- append_possible_keys(thd->mem_root, eta->possible_keys, table, keys);
+ if (append_possible_keys(thd->mem_root, eta->possible_keys, table, keys))
+ return 1;
// psergey-todo: ^ check for error return code
/* Build "key", "key_len", and "ref" */
@@ -24714,7 +25577,8 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta,
*/
if (tab_select && tab_select->quick && tab_type != JT_CONST)
{
- eta->quick_info= tab_select->quick->get_explain(thd->mem_root);
+ if (!(eta->quick_info= tab_select->quick->get_explain(thd->mem_root)))
+ return 1;
}
if (key_info) /* 'index' or 'ref' access */
@@ -24727,10 +25591,23 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta,
for (uint kp= 0; kp < ref.key_parts; kp++)
{
if ((key_part_map(1) << kp) & ref.const_ref_part_map)
- eta->ref_list.append_str(thd->mem_root, "const");
+ {
+ if (!(eta->ref_list.append_str(thd->mem_root, "const")))
+ return 1;
+ /*
+ create_ref_for_key() handles keypart=const equalities as follows:
+ - non-EXPLAIN execution will copy the "const" to lookup tuple
+ immediately and will not add an element to ref.key_copy
+ - EXPLAIN will put an element into ref.key_copy. Since we've
+ just printed "const" for it, we should skip it here
+ */
+ if (thd->lex->describe)
+ key_ref++;
+ }
else
{
- eta->ref_list.append_str(thd->mem_root, (*key_ref)->name());
+ if (!(eta->ref_list.append_str(thd->mem_root, (*key_ref)->name())))
+ return 1;
key_ref++;
}
}
@@ -24964,13 +25841,13 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta,
{
char namebuf[NAME_LEN];
/* Derived table name generation */
- int len= my_snprintf(namebuf, sizeof(namebuf)-1,
+ size_t len= my_snprintf(namebuf, sizeof(namebuf)-1,
"<derived%u>",
prev_table->derived_select_number);
eta->firstmatch_table_name.append(namebuf, len);
}
else
- eta->firstmatch_table_name.append(prev_table->pos_in_table_list->alias);
+ eta->firstmatch_table_name.append(&prev_table->pos_in_table_list->alias);
}
}
@@ -24987,7 +25864,8 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta,
if (cache)
{
eta->push_extra(ET_USING_JOIN_BUFFER);
- cache->save_explain_data(&eta->bka_type);
+ if (cache->save_explain_data(&eta->bka_type))
+ return 1;
}
}
@@ -25000,15 +25878,21 @@ void JOIN_TAB::save_explain_data(Explain_table_access *eta,
/* The same for non-merged semi-joins */
eta->non_merged_sjm_number = get_non_merged_semijoin_select();
+
+ return 0;
}
/*
Walk through join->aggr_tables and save aggregation/grouping query plan into
an Explain_select object
+
+ @retval
+ 0 ok
+ 1 error
*/
-void save_agg_explain_data(JOIN *join, Explain_select *xpl_sel)
+bool save_agg_explain_data(JOIN *join, Explain_select *xpl_sel)
{
JOIN_TAB *join_tab=join->join_tab + join->exec_join_tab_cnt();
Explain_aggr_node *prev_node;
@@ -25020,7 +25904,8 @@ void save_agg_explain_data(JOIN *join, Explain_select *xpl_sel)
{
// Each aggregate means a temp.table
prev_node= node;
- node= new (thd->mem_root) Explain_aggr_tmp_table;
+ if (!(node= new (thd->mem_root) Explain_aggr_tmp_table))
+ return 1;
node->child= prev_node;
if (join_tab->window_funcs_step)
@@ -25028,19 +25913,20 @@ void save_agg_explain_data(JOIN *join, Explain_select *xpl_sel)
Explain_aggr_node *new_node=
join_tab->window_funcs_step->save_explain_plan(thd->mem_root,
is_analyze);
- if (new_node)
- {
- prev_node=node;
- node= new_node;
- node->child= prev_node;
- }
+ if (!new_node)
+ return 1;
+
+ prev_node=node;
+ node= new_node;
+ node->child= prev_node;
}
/* The below matches execution in join_init_read_record() */
if (join_tab->distinct)
{
prev_node= node;
- node= new (thd->mem_root) Explain_aggr_remove_dups;
+ if (!(node= new (thd->mem_root) Explain_aggr_remove_dups))
+ return 1;
node->child= prev_node;
}
@@ -25048,20 +25934,27 @@ void save_agg_explain_data(JOIN *join, Explain_select *xpl_sel)
{
Explain_aggr_filesort *eaf =
new (thd->mem_root) Explain_aggr_filesort(thd->mem_root, is_analyze, join_tab->filesort);
+ if (!eaf)
+ return 1;
prev_node= node;
node= eaf;
node->child= prev_node;
}
}
xpl_sel->aggr_tree= node;
+ return 0;
}
-/*
+/**
Save Query Plan Footprint
@note
Currently, this function may be called multiple times
+
+ @retval
+ 0 ok
+ 1 error
*/
int JOIN::save_explain_data_intern(Explain_query *output,
@@ -25070,7 +25963,6 @@ int JOIN::save_explain_data_intern(Explain_query *output,
const char *message)
{
JOIN *join= this; /* Legacy: this code used to be a non-member function */
- int cur_error= 0;
DBUG_ENTER("JOIN::save_explain_data_intern");
DBUG_PRINT("info", ("Select %p, type %s, message %s",
join->select_lex, join->select_lex->type,
@@ -25088,10 +25980,10 @@ int JOIN::save_explain_data_intern(Explain_query *output,
if (message)
{
- explain= new (output->mem_root) Explain_select(output->mem_root,
- thd->lex->analyze_stmt);
- if (!explain)
- DBUG_RETURN(1); // EoM
+ if (!(explain= new (output->mem_root)
+ Explain_select(output->mem_root,
+ thd->lex->analyze_stmt)))
+ DBUG_RETURN(1);
#ifndef DBUG_OFF
explain->select_lex= select_lex;
#endif
@@ -25099,6 +25991,7 @@ int JOIN::save_explain_data_intern(Explain_query *output,
explain->select_id= join->select_lex->select_number;
explain->select_type= join->select_lex->type;
+ explain->linkage= select_lex->linkage;
explain->using_temporary= need_tmp;
explain->using_filesort= need_order_arg;
/* Setting explain->message means that all other members are invalid */
@@ -25106,17 +25999,22 @@ int JOIN::save_explain_data_intern(Explain_query *output,
if (select_lex->master_unit()->derived)
explain->connection_type= Explain_node::EXPLAIN_NODE_DERIVED;
- save_agg_explain_data(this, explain);
+ if (save_agg_explain_data(this, explain))
+ DBUG_RETURN(1);
+
output->add_node(explain);
}
else if (pushdown_query)
{
- explain= new (output->mem_root) Explain_select(output->mem_root,
- thd->lex->analyze_stmt);
+ if (!(explain= new (output->mem_root)
+ Explain_select(output->mem_root,
+ thd->lex->analyze_stmt)))
+ DBUG_RETURN(1);
select_lex->set_explain_type(true);
explain->select_id= select_lex->select_number;
explain->select_type= select_lex->type;
+ explain->linkage= select_lex->linkage;
explain->using_temporary= need_tmp;
explain->using_filesort= need_order_arg;
explain->message= "Storage engine handles GROUP BY";
@@ -25131,15 +26029,22 @@ int JOIN::save_explain_data_intern(Explain_query *output,
explain= xpl_sel=
new (output->mem_root) Explain_select(output->mem_root,
thd->lex->analyze_stmt);
+ if (!explain)
+ DBUG_RETURN(1);
+
table_map used_tables=0;
join->select_lex->set_explain_type(true);
xpl_sel->select_id= join->select_lex->select_number;
xpl_sel->select_type= join->select_lex->type;
+ xpl_sel->linkage= select_lex->linkage;
+ xpl_sel->is_lateral= ((select_lex->linkage == DERIVED_TABLE_TYPE) &&
+ (select_lex->uncacheable & UNCACHEABLE_DEPENDENT));
if (select_lex->master_unit()->derived)
xpl_sel->connection_type= Explain_node::EXPLAIN_NODE_DERIVED;
- save_agg_explain_data(this, xpl_sel);
+ if (save_agg_explain_data(this, xpl_sel))
+ DBUG_RETURN(1);
xpl_sel->exec_const_cond= exec_const_cond;
xpl_sel->outer_ref_cond= outer_ref_cond;
@@ -25172,6 +26077,8 @@ int JOIN::save_explain_data_intern(Explain_query *output,
Explain_table_access *eta= (new (output->mem_root)
Explain_table_access(output->mem_root));
+ if (!eta)
+ DBUG_RETURN(1);
if (tab->bush_root_tab != prev_bush_root_tab)
{
if (tab->bush_root_tab)
@@ -25179,7 +26086,9 @@ int JOIN::save_explain_data_intern(Explain_query *output,
/*
We've entered an SJ-Materialization nest. Create an object for it.
*/
- cur_parent= new (output->mem_root) Explain_basic_join(output->mem_root);
+ if (!(cur_parent=
+ new (output->mem_root) Explain_basic_join(output->mem_root)))
+ DBUG_RETURN(1);
JOIN_TAB *first_child= tab->bush_root_tab->bush_children->start;
cur_parent->select_id=
@@ -25199,7 +26108,8 @@ int JOIN::save_explain_data_intern(Explain_query *output,
prev_bush_root_tab= tab->bush_root_tab;
cur_parent->add_table(eta, output);
- tab->save_explain_data(eta, used_tables, distinct_arg, first_top_tab);
+ if (tab->save_explain_data(eta, used_tables, distinct_arg, first_top_tab))
+ DBUG_RETURN(1);
if (saved_join_tab)
tab= saved_join_tab;
@@ -25231,10 +26141,10 @@ int JOIN::save_explain_data_intern(Explain_query *output,
}
}
- if (!cur_error && select_lex->is_top_level_node())
+ if (select_lex->is_top_level_node())
output->query_plan_ready();
- DBUG_RETURN(cur_error);
+ DBUG_RETURN(0);
}
@@ -25313,15 +26223,16 @@ bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result)
sl->options|= SELECT_DESCRIBE;
}
- if (unit->is_union())
+ if (unit->is_unit_op())
{
if (unit->union_needs_tmp_table() && unit->fake_select_lex)
{
unit->fake_select_lex->select_number= FAKE_SELECT_LEX_ID; // just for initialization
- unit->fake_select_lex->type= "UNION RESULT";
+ unit->fake_select_lex->type= unit_operation_text[unit->common_op()];
unit->fake_select_lex->options|= SELECT_DESCRIBE;
}
- if (!(res= unit->prepare(thd, result, SELECT_NO_UNLOCK | SELECT_DESCRIBE)))
+ if (!(res= unit->prepare(unit->derived, result,
+ SELECT_NO_UNLOCK | SELECT_DESCRIBE)))
res= unit->exec();
}
else
@@ -25542,8 +26453,8 @@ Index_hint::print(THD *thd, String *str)
strlen(primary_key_name)))
str->append(primary_key_name);
else
- append_identifier(thd, str, key_name.str, key_name.length);
- }
+ append_identifier(thd, str, &key_name);
+}
str->append(')');
}
@@ -25599,10 +26510,10 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str,
if (!(belong_to_view &&
belong_to_view->compact_view_format))
{
- append_identifier(thd, str, view_db.str, view_db.length);
+ append_identifier(thd, str, &view_db);
str->append('.');
}
- append_identifier(thd, str, view_name.str, view_name.length);
+ append_identifier(thd, str, &view_name);
cmp_name= view_name.str;
}
else if (derived)
@@ -25617,8 +26528,8 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str,
}
else
{
- append_identifier(thd, str, table_name, table_name_length);
- cmp_name= table_name;
+ append_identifier(thd, str, &table_name);
+ cmp_name= table_name.str;
}
}
else
@@ -25628,19 +26539,18 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str,
if (!(belong_to_view &&
belong_to_view->compact_view_format))
{
- append_identifier(thd, str, db, db_length);
+ append_identifier(thd, str, &db);
str->append('.');
}
if (schema_table)
{
- append_identifier(thd, str, schema_table_name,
- strlen(schema_table_name));
- cmp_name= schema_table_name;
+ append_identifier(thd, str, &schema_table_name);
+ cmp_name= schema_table_name.str;
}
else
{
- append_identifier(thd, str, table_name, table_name_length);
- cmp_name= table_name;
+ append_identifier(thd, str, &table_name);
+ cmp_name= table_name.str;
}
#ifdef WITH_PARTITION_STORAGE_ENGINE
if (partition_names && partition_names->elements)
@@ -25659,23 +26569,26 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str,
}
#endif /* WITH_PARTITION_STORAGE_ENGINE */
}
- if (my_strcasecmp(table_alias_charset, cmp_name, alias))
+ if (table && table->versioned())
+ vers_conditions.print(str, query_type);
+
+ if (my_strcasecmp(table_alias_charset, cmp_name, alias.str))
{
char t_alias_buff[MAX_ALIAS_NAME];
- const char *t_alias= alias;
+ LEX_CSTRING t_alias= alias;
str->append(' ');
if (lower_case_table_names == 1)
{
- if (alias && alias[0])
+ if (alias.str && alias.str[0])
{
- strmov(t_alias_buff, alias);
- my_casedn_str(files_charset_info, t_alias_buff);
- t_alias= t_alias_buff;
+ strmov(t_alias_buff, alias.str);
+ t_alias.length= my_casedn_str(files_charset_info, t_alias_buff);
+ t_alias.str= t_alias_buff;
}
}
- append_identifier(thd, str, t_alias, strlen(t_alias));
+ append_identifier(thd, str, &t_alias);
}
if (index_hints)
@@ -25696,6 +26609,23 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str,
void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
{
DBUG_ASSERT(thd);
+
+ if (tvc)
+ {
+ tvc->print(thd, str, query_type);
+ return;
+ }
+
+ if ((query_type & QT_SHOW_SELECT_NUMBER) &&
+ thd->lex->all_selects_list &&
+ thd->lex->all_selects_list->link_next &&
+ select_number != UINT_MAX &&
+ select_number != INT_MAX)
+ {
+ str->append("/* select#");
+ str->append_ulonglong(select_number);
+ str->append(" */ ");
+ }
str->append(STRING_WITH_LEN("select "));
@@ -25868,7 +26798,7 @@ bool JOIN::change_result(select_result *new_result, select_result *old_result)
{
result= new_result;
if (result->prepare(fields_list, select_lex->master_unit()) ||
- result->prepare2())
+ result->prepare2(this))
DBUG_RETURN(true); /* purecov: inspected */
DBUG_RETURN(false);
}
@@ -26341,7 +27271,15 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
*/
if (ref_key >= 0 && ref_key != MAX_KEY && tab->type == JT_REF)
{
- if (table->quick_keys.is_set(ref_key))
+ /*
+ If ref access uses keypart=const for all its key parts,
+ and quick select uses the same # of key parts, then they are equivalent.
+ Reuse #rows estimate from quick select as it is more precise.
+ */
+ if (tab->ref.const_ref_part_map ==
+ make_prev_keypart_map(tab->ref.key_parts) &&
+ table->quick_keys.is_set(ref_key) &&
+ table->quick_key_parts[ref_key] == tab->ref.key_parts)
refkey_rows_estimate= table->quick_rows[ref_key];
else
{
@@ -26693,22 +27631,20 @@ ulong check_selectivity(THD *thd,
}
it.rewind();
- if (file->ha_rnd_init_with_error(1))
+ if (unlikely(file->ha_rnd_init_with_error(1)))
DBUG_RETURN(0);
do
{
error= file->ha_rnd_next(record);
- if (thd->killed)
+ if (unlikely(thd->killed))
{
thd->send_kill_message();
count= 0;
goto err;
}
- if (error)
+ if (unlikely(error))
{
- if (error == HA_ERR_RECORD_DELETED)
- continue;
if (error == HA_ERR_END_OF_FILE)
break;
goto err;
@@ -26859,13 +27795,13 @@ AGGR_OP::end_send()
error= join_init_read_record(join_tab);
}
else
- error= join_tab->read_record.read_record(&join_tab->read_record);
+ error= join_tab->read_record.read_record();
- if (error > 0 || (join->thd->is_error())) // Fatal error
+ if (unlikely(error > 0 || (join->thd->is_error()))) // Fatal error
rc= NESTED_LOOP_ERROR;
else if (error < 0)
break;
- else if (join->thd->killed) // Aborted by user
+ else if (unlikely(join->thd->killed)) // Aborted by user
{
join->thd->send_kill_message();
rc= NESTED_LOOP_KILLED;