summaryrefslogtreecommitdiff
path: root/sql/sql_select.cc
diff options
context:
space:
mode:
Diffstat (limited to 'sql/sql_select.cc')
-rw-r--r--sql/sql_select.cc5110
1 files changed, 3068 insertions, 2042 deletions
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 904610b9031..9e0a82aa342 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -13,8 +13,16 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+/**
+ @file
+
+ @brief
+ mysql_select and join optimization
-/* mysql_select and join optimization */
+
+ @defgroup Query_Optimizer Query Optimizer
+ @{
+*/
#ifdef USE_PRAGMA_IMPLEMENTATION
#pragma implementation // gcc: Class implementation
@@ -25,6 +33,7 @@
#include "sql_cursor.h"
#include <m_ctype.h>
+#include <my_bit.h>
#include <hash.h>
#include <ft_global.h>
@@ -72,11 +81,11 @@ static int join_tab_cmp_straight(const void* ptr1, const void* ptr2);
static bool find_best(JOIN *join,table_map rest_tables,uint index,
double record_count,double read_time);
static uint cache_record_length(JOIN *join,uint index);
-static double prev_record_reads(JOIN *join,table_map found_ref);
+static double prev_record_reads(JOIN *join, uint idx, table_map found_ref);
static bool get_best_combination(JOIN *join);
static store_key *get_store_key(THD *thd,
KEYUSE *keyuse, table_map used_tables,
- KEY_PART_INFO *key_part, char *key_buff,
+ KEY_PART_INFO *key_part, uchar *key_buff,
uint maybe_null);
static void make_outerjoin_info(JOIN *join);
static bool make_join_select(JOIN *join,SQL_SELECT *select,COND *item);
@@ -117,7 +126,7 @@ static int do_select(JOIN *join,List<Item> *fields,TABLE *tmp_table,
static enum_nested_loop_state
evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
- int error, my_bool *report_error);
+ int error);
static enum_nested_loop_state
evaluate_null_complemented_join_record(JOIN *join, JOIN_TAB *join_tab);
static enum_nested_loop_state
@@ -163,13 +172,15 @@ static COND *make_cond_for_table(COND *cond,table_map table,
static Item* part_of_refkey(TABLE *form,Field *field);
uint find_shortest_key(TABLE *table, const key_map *usable_keys);
static bool test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,
- ha_rows select_limit, bool no_changes);
+ ha_rows select_limit, bool no_changes,
+ key_map *map);
static bool list_contains_unique_index(TABLE *table,
bool (*find_func) (Field *, void *), void *data);
static bool find_field_in_item_list (Field *field, void *data);
static bool find_field_in_order_list (Field *field, void *data);
static int create_sort_index(THD *thd, JOIN *join, ORDER *order,
- ha_rows filesort_limit, ha_rows select_limit);
+ ha_rows filesort_limit, ha_rows select_limit,
+ bool is_order_by);
static int remove_duplicates(JOIN *join,TABLE *entry,List<Item> &fields,
Item *having);
static int remove_dup_with_compare(THD *thd, TABLE *entry, Field **field,
@@ -220,8 +231,8 @@ static void add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab);
static bool test_if_ref(Item_field *left_item,Item *right_item);
-/*
- This handles SELECT with and without UNION
+/**
+ This handles SELECT with and without UNION.
*/
bool handle_select(THD *thd, LEX *lex, select_result *result,
@@ -231,7 +242,8 @@ bool handle_select(THD *thd, LEX *lex, select_result *result,
register SELECT_LEX *select_lex = &lex->select_lex;
DBUG_ENTER("handle_select");
- if (select_lex->next_select() || select_lex->master_unit()->fake_select_lex)
+ if (select_lex->master_unit()->is_union() ||
+ select_lex->master_unit()->fake_select_lex)
res= mysql_union(thd, lex, result, &lex->unit, setup_tables_done_option);
else
{
@@ -243,22 +255,22 @@ bool handle_select(THD *thd, LEX *lex, select_result *result,
setup_tables_done_option changed for next rexecution
*/
res= mysql_select(thd, &select_lex->ref_pointer_array,
- (TABLE_LIST*) select_lex->table_list.first,
+ select_lex->table_list.first,
select_lex->with_wild, select_lex->item_list,
select_lex->where,
select_lex->order_list.elements +
select_lex->group_list.elements,
- (ORDER*) select_lex->order_list.first,
- (ORDER*) select_lex->group_list.first,
+ select_lex->order_list.first,
+ select_lex->group_list.first,
select_lex->having,
- (ORDER*) lex->proc_list.first,
+ lex->proc_list.first,
select_lex->options | thd->options |
setup_tables_done_option,
result, unit, select_lex);
}
DBUG_PRINT("info",("res: %d report_error: %d", res,
- thd->net.report_error));
- res|= thd->net.report_error;
+ thd->is_error()));
+ res|= thd->is_error();
if (unlikely(res))
result->abort();
@@ -275,6 +287,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result,
all_fields List of all fields used in select
select Current select
ref_pointer_array Array of references to Items used in current select
+ group_list GROUP BY list (is NULL by default)
DESCRIPTION
The function serves 3 purposes - adds fields referenced from inner
@@ -293,6 +306,8 @@ bool handle_select(THD *thd, LEX *lex, select_result *result,
function is aggregated in the select where the outer field was
resolved or in some more inner select then the Item_direct_ref
class should be used.
+ Also it should be used if we are grouping by a subquery containing
+ the outer field.
The resolution is done here and not at the fix_fields() stage as
it can be done only after sum functions are fixed and pulled up to
selects where they are have to be aggregated.
@@ -309,7 +324,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result,
bool
fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select,
- Item **ref_pointer_array)
+ Item **ref_pointer_array, ORDER *group_list)
{
Item_outer_ref *ref;
bool res= FALSE;
@@ -359,6 +374,22 @@ fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select,
}
}
}
+ else
+ {
+ /*
+ Check if GROUP BY item trees contain the outer ref:
+ in this case we have to use Item_direct_ref instead of Item_ref.
+ */
+ for (ORDER *group= group_list; group; group= group->next)
+ {
+ if ((*group->item)->walk(&Item::find_item_processor, TRUE,
+ (uchar *) ref))
+ {
+ direct_ref= TRUE;
+ break;
+ }
+ }
+ }
new_ref= direct_ref ?
new Item_direct_ref(ref->context, item_ref, ref->table_name,
ref->field_name, ref->alias_name_used) :
@@ -376,8 +407,8 @@ fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select,
return res;
}
-/*
- Function to setup clauses without sum functions
+/**
+ Function to setup clauses without sum functions.
*/
inline int setup_without_group(THD *thd, Item **ref_pointer_array,
TABLE_LIST *tables,
@@ -420,10 +451,17 @@ inline int setup_without_group(THD *thd, Item **ref_pointer_array,
mysql_select assumes that all tables are already opened
*****************************************************************************/
-/*
+/**
Prepare of whole select (including sub queries in future).
- return -1 on error
- 0 on success
+
+ @todo
+ Add check of calculation of GROUP functions and fields:
+ SELECT COUNT(*)+table.col1 from table1;
+
+ @retval
+ -1 on error
+ @retval
+ 0 on success
*/
int
JOIN::prepare(Item ***rref_pointer_array,
@@ -449,7 +487,7 @@ JOIN::prepare(Item ***rref_pointer_array,
select_lex= select_lex_arg;
select_lex->join= this;
join_list= &select_lex->top_join_list;
- union_part= (unit_arg->first_select()->next_select() != 0);
+ union_part= unit_arg->is_union();
thd->lex->current_select->is_item_list_lookup= 1;
/*
@@ -463,9 +501,8 @@ JOIN::prepare(Item ***rref_pointer_array,
if (!(select_options & OPTION_SETUP_TABLES_DONE) &&
setup_tables_and_check_access(thd, &select_lex->context, join_list,
- tables_list, &conds,
- &select_lex->leaf_tables, FALSE,
- SELECT_ACL, SELECT_ACL))
+ tables_list, &select_lex->leaf_tables,
+ FALSE, SELECT_ACL, SELECT_ACL))
DBUG_RETURN(-1);
TABLE_LIST *table_ptr;
@@ -476,7 +513,7 @@ JOIN::prepare(Item ***rref_pointer_array,
if (setup_wild(thd, tables_list, fields_list, &all_fields, wild_num) ||
select_lex->setup_ref_array(thd, og_num) ||
- setup_fields(thd, (*rref_pointer_array), fields_list, 1,
+ setup_fields(thd, (*rref_pointer_array), fields_list, MARK_COLUMNS_READ,
&all_fields, 1) ||
setup_without_group(thd, (*rref_pointer_array), tables_list,
select_lex->leaf_tables, fields_list,
@@ -496,12 +533,13 @@ JOIN::prepare(Item ***rref_pointer_array,
(having->fix_fields(thd, &having) ||
having->check_cols(1)));
select_lex->having_fix_field= 0;
- if (having_fix_rc || thd->net.report_error)
+ if (having_fix_rc || thd->is_error())
DBUG_RETURN(-1); /* purecov: inspected */
thd->lex->allow_sum_func= save_allow_sum_func;
}
- if (!thd->lex->view_prepare_mode)
+ if (!(thd->lex->context_analysis_only & CONTEXT_ANALYSIS_ONLY_VIEW) &&
+ !(select_options & SELECT_DESCRIBE))
{
Item_subselect *subselect;
/* Is it subselect? */
@@ -527,13 +565,21 @@ JOIN::prepare(Item ***rref_pointer_array,
{
Item *item= *ord->item;
/*
- Disregard sort order if there's only "{VAR}CHAR(0) NOT NULL" fields
- there. Such fields don't contain any data to sort.
+ Disregard sort order if there's only
+ zero length NOT NULL fields (e.g. {VAR}CHAR(0) NOT NULL") or
+ zero length NOT NULL string functions there.
+ Such tuples don't contain any data to sort.
*/
if (!real_order &&
- (item->type() != Item::FIELD_ITEM ||
- ((Item_field *) item)->field->maybe_null() ||
- ((Item_field *) item)->field->sort_length()))
+ /* Not a zero length NOT NULL field */
+ ((item->type() != Item::FIELD_ITEM ||
+ ((Item_field *) item)->field->maybe_null() ||
+ ((Item_field *) item)->field->sort_length()) &&
+ /* AND not a zero length NOT NULL string function. */
+ (item->type() != Item::FUNC_ITEM ||
+ item->maybe_null ||
+ item->result_type() != STRING_RESULT ||
+ item->max_length)))
real_order= TRUE;
if (item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM)
@@ -559,7 +605,8 @@ JOIN::prepare(Item ***rref_pointer_array,
}
if (select_lex->inner_refs_list.elements &&
- fix_inner_refs(thd, all_fields, select_lex, ref_pointer_array))
+ fix_inner_refs(thd, all_fields, select_lex, ref_pointer_array,
+ group_list))
DBUG_RETURN(-1);
if (group_list)
@@ -623,13 +670,6 @@ JOIN::prepare(Item ***rref_pointer_array,
goto err; /* purecov: inspected */
}
}
-#ifdef NOT_NEEDED
- else if (!group_list && procedure->flags & PROC_GROUP)
- {
- my_message(ER_NO_GROUP_FOR_PROC, MYF(0));
- goto err;
- }
-#endif
if (order && (procedure->flags & PROC_NO_SORT))
{ /* purecov: inspected */
my_message(ER_ORDER_WITH_PROC, ER(ER_ORDER_WITH_PROC),
@@ -659,8 +699,11 @@ JOIN::prepare(Item ***rref_pointer_array,
this->group= group_list != 0;
unit= unit_arg;
+ if (tmp_table_param.sum_func_count && !group_list)
+ implicit_grouping= TRUE;
+
#ifdef RESTRICTED_GROUP
- if (sum_func_count && !group_list && (func_count || field_count))
+ if (implicit_grouping)
{
my_message(ER_WRONG_SUM_SELECT,ER(ER_WRONG_SUM_SELECT),MYF(0));
goto err;
@@ -747,7 +790,7 @@ void JOIN::remove_subq_pushed_predicates(Item **where)
static void save_index_subquery_explain_info(JOIN_TAB *join_tab, Item* where)
{
join_tab->packed_info= TAB_INFO_HAVE_VALUE;
- if (join_tab->table->used_keys.is_set(join_tab->ref.key))
+ if (join_tab->table->covering_keys.is_set(join_tab->ref.key))
join_tab->packed_info |= TAB_INFO_USING_INDEX;
if (where)
join_tab->packed_info |= TAB_INFO_USING_WHERE;
@@ -762,11 +805,16 @@ static void save_index_subquery_explain_info(JOIN_TAB *join_tab, Item* where)
}
-/*
+/**
global select optimisation.
- return 0 - success
- 1 - error
- error code saved in field 'error'
+
+ @note
+ error code saved in field 'error'
+
+ @retval
+ 0 success
+ @retval
+ 1 error
*/
int
@@ -778,9 +826,6 @@ JOIN::optimize()
DBUG_RETURN(0);
optimized= 1;
- if (thd->lex->orig_sql_command != SQLCOM_SHOW_STATUS)
- thd->status_var.last_query_cost= 0.0;
-
thd_proc_info(thd, "optimizing");
row_limit= ((select_distinct || order || group_list) ? HA_POS_ERROR :
unit->select_limit_cnt);
@@ -841,7 +886,7 @@ JOIN::optimize()
}
conds= optimize_cond(this, conds, join_list, &cond_value);
- if (thd->net.report_error)
+ if (thd->is_error())
{
error= 1;
DBUG_PRINT("error",("Error from optimize_cond"));
@@ -850,7 +895,7 @@ JOIN::optimize()
{
having= optimize_cond(this, having, join_list, &having_value);
- if (thd->net.report_error)
+ if (thd->is_error())
{
error= 1;
DBUG_PRINT("error",("Error from optimize_cond"));
@@ -874,15 +919,43 @@ JOIN::optimize()
}
}
- /* Optimize count(*), min() and max() */
- if (tables_list && tmp_table_param.sum_func_count && ! group_list)
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ {
+ TABLE_LIST *tbl;
+ for (tbl= select_lex->leaf_tables; tbl; tbl= tbl->next_leaf)
+ {
+ /*
+ If tbl->embedding!=NULL that means that this table is in the inner
+ part of the nested outer join, and we can't do partition pruning
+ (TODO: check if this limitation can be lifted)
+ */
+ if (!tbl->embedding)
+ {
+ Item *prune_cond= tbl->on_expr? tbl->on_expr : conds;
+ tbl->table->no_partitions_used= prune_partitions(thd, tbl->table,
+ prune_cond);
+ }
+ }
+ }
+#endif
+
+ /*
+ Try to optimize count(*), min() and max() to const fields if
+ there is implicit grouping (aggregate functions but no
+ group_list). In this case, the result set shall only contain one
+ row.
+ */
+ if (tables_list && implicit_grouping)
{
int res;
/*
opt_sum_query() returns HA_ERR_KEY_NOT_FOUND if no rows match
to the WHERE conditions,
- or 1 if all items were resolved,
+ or 1 if all items were resolved (optimized away),
or 0, or an error number HA_ERR_...
+
+ If all items were resolved by opt_sum_query, there is no need to
+ open any tables.
*/
if ((res=opt_sum_query(select_lex->leaf_tables, all_fields, conds)))
{
@@ -890,19 +963,28 @@ JOIN::optimize()
{
DBUG_PRINT("info",("No matching min/max row"));
zero_result_cause= "No matching min/max row";
+ tables= 0;
error=0;
DBUG_RETURN(0);
}
if (res > 1)
{
- thd->fatal_error();
error= res;
DBUG_PRINT("error",("Error from opt_sum_query"));
DBUG_RETURN(1);
}
+ if (res < 0)
+ {
+ DBUG_PRINT("info",("No matching min/max row"));
+ zero_result_cause= "No matching min/max row";
+ tables= 0;
+ error=0;
+ DBUG_RETURN(0);
+ }
DBUG_PRINT("info",("Select tables optimized away"));
zero_result_cause= "Select tables optimized away";
tables_list= 0; // All tables resolved
+ const_tables= tables;
/*
Extract all table-independent conditions and replace the WHERE
clause with them. All other conditions were computed by opt_sum_query
@@ -919,7 +1001,8 @@ JOIN::optimize()
make_cond_for_table(conds, PSEUDO_TABLE_BITS, 0);
DBUG_EXECUTE("where",
print_where(table_independent_conds,
- "where after opt_sum_query()"););
+ "where after opt_sum_query()",
+ QT_ORDINARY););
conds= table_independent_conds;
}
}
@@ -1011,7 +1094,10 @@ JOIN::optimize()
{
conds= substitute_for_best_equal_field(conds, cond_equal, map2table);
conds->update_used_tables();
- DBUG_EXECUTE("where", print_where(conds, "after substitute_best_equal"););
+ DBUG_EXECUTE("where",
+ print_where(conds,
+ "after substitute_best_equal",
+ QT_ORDINARY););
}
/*
@@ -1030,11 +1116,35 @@ JOIN::optimize()
}
if (conds && const_table_map != found_const_table_map &&
- (select_options & SELECT_DESCRIBE) &&
- select_lex->master_unit() == &thd->lex->unit) // upper level SELECT
+ (select_options & SELECT_DESCRIBE))
{
conds=new Item_int((longlong) 0,1); // Always false
}
+
+ /*
+ It's necessary to check const part of HAVING cond as
+ there is a chance that some cond parts may become
+ const items after make_join_statisctics(for example
+ when Item is a reference to cost table field from
+ outer join).
+ This check is performed only for those conditions
+ which do not use aggregate functions. In such case
+ temporary table may not be used and const condition
+ elements may be lost during further having
+ condition transformation in JOIN::exec.
+ */
+ if (having && const_table_map && !having->with_sum_func)
+ {
+ having->update_used_tables();
+ having= remove_eq_conds(thd, having, &having_value);
+ if (having_value == Item::COND_FALSE)
+ {
+ having= new Item_int((longlong) 0,1);
+ zero_result_cause= "Impossible HAVING noticed after reading const tables";
+ DBUG_RETURN(0);
+ }
+ }
+
if (make_join_select(this, select, conds))
{
zero_result_cause=
@@ -1048,7 +1158,7 @@ JOIN::optimize()
{
ORDER *org_order= order;
order=remove_const(this, order,conds,1, &simple_order);
- if (thd->net.report_error)
+ if (thd->is_error())
{
error= 1;
DBUG_PRINT("error",("Error from remove_const"));
@@ -1103,6 +1213,12 @@ JOIN::optimize()
*/
if (!order || test_if_subpart(group_list, order))
order= skip_sort_order ? 0 : group_list;
+ /*
+ If we have an IGNORE INDEX FOR GROUP BY(fields) clause, this must be
+ rewritten to IGNORE INDEX FOR ORDER BY(fields).
+ */
+ join_tab->table->keys_in_use_for_order_by=
+ join_tab->table->keys_in_use_for_group_by;
group_list= 0;
group= 0;
}
@@ -1140,14 +1256,15 @@ JOIN::optimize()
JOIN_TAB *tab= &join_tab[const_tables];
bool all_order_fields_used;
if (order)
- skip_sort_order= test_if_skip_sort_order(tab, order, select_limit, 1);
+ skip_sort_order= test_if_skip_sort_order(tab, order, select_limit, 1,
+ &tab->table->keys_in_use_for_order_by);
if ((group_list=create_distinct_group(thd, select_lex->ref_pointer_array,
order, fields_list, all_fields,
&all_order_fields_used)))
{
bool skip_group= (skip_sort_order &&
- test_if_skip_sort_order(tab, group_list, select_limit,
- 1) != 0);
+ test_if_skip_sort_order(tab, group_list, select_limit, 1,
+ &tab->table->keys_in_use_for_group_by) != 0);
count_field_types(select_lex, &tmp_table_param, all_fields, 0);
if ((skip_group && all_order_fields_used) ||
select_limit == HA_POS_ERROR ||
@@ -1182,7 +1299,7 @@ JOIN::optimize()
group_list= remove_const(this, (old_group_list= group_list), conds,
rollup.state == ROLLUP::STATE_NONE,
&simple_group);
- if (thd->net.report_error)
+ if (thd->is_error())
{
error= 1;
DBUG_PRINT("error",("Error from remove_const"));
@@ -1205,7 +1322,7 @@ JOIN::optimize()
{
group_list= procedure->group= remove_const(this, procedure->group, conds,
1, &simple_group);
- if (thd->net.report_error)
+ if (thd->is_error())
{
error= 1;
DBUG_PRINT("error",("Error from remove_const"));
@@ -1218,13 +1335,22 @@ JOIN::optimize()
(!group_list && tmp_table_param.sum_func_count))
order=0;
- // Can't use sort on head table if using row cache
+ // Can't use sort on head table if using join buffering
if (full_join)
{
- if (group_list)
- simple_group=0;
- if (order)
- simple_order=0;
+ TABLE *stable= (sort_by_table == (TABLE *) 1 ?
+ join_tab[const_tables].table : sort_by_table);
+ /*
+ FORCE INDEX FOR ORDER BY can be used to prevent join buffering when
+ sorting on the first table.
+ */
+ if (!stable || !stable->force_index_order)
+ {
+ if (group_list)
+ simple_group= 0;
+ if (order)
+ simple_order= 0;
+ }
}
/*
@@ -1261,7 +1387,7 @@ JOIN::optimize()
if (!group_list && !order &&
unit->item && unit->item->substype() == Item_subselect::IN_SUBS &&
tables == 1 && conds &&
- !unit->first_select()->next_select())
+ !unit->is_union())
{
if (!having)
{
@@ -1315,23 +1441,16 @@ JOIN::optimize()
}
/*
- Need to tell Innobase that to play it safe, it should fetch all
- columns of the tables: this is because MySQL may build row
- pointers for the rows, and for all columns of the primary key the
- field->query_id has not necessarily been set to thd->query_id by
- MySQL.
+ Need to tell handlers that to play it safe, it should fetch all
+ columns of the primary key of the tables: this is because MySQL may
+ build row pointers for the rows, and for all columns of the primary key
+ the read set has not necessarily been set by the server code.
*/
-
-#ifdef HAVE_INNOBASE_DB
if (need_tmp || select_distinct || group_list || order)
{
- for (uint i_h = const_tables; i_h < tables; i_h++)
- {
- TABLE* table_h = join_tab[i_h].table;
- table_h->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY);
- }
+ for (uint i = const_tables; i < tables; i++)
+ join_tab[i].table->prepare_for_position();
}
-#endif
DBUG_EXECUTE("info",TEST_join(this););
@@ -1358,7 +1477,9 @@ JOIN::optimize()
((group_list &&
(!simple_group ||
!test_if_skip_sort_order(&join_tab[const_tables], group_list,
- unit->select_limit_cnt, 0))) ||
+ unit->select_limit_cnt, 0,
+ &join_tab[const_tables].table->
+ keys_in_use_for_group_by))) ||
select_distinct) &&
tmp_table_param.quick_group && !procedure)
{
@@ -1367,13 +1488,22 @@ JOIN::optimize()
if (order)
{
/*
+ Do we need a temporary table due to the ORDER BY not being equal to
+ the GROUP BY? The call to test_if_skip_sort_order above tests for the
+ GROUP BY clause only and hence is not valid in this case. So the
+ estimated number of rows to be read from the first table is not valid.
+ We clear it here so that it doesn't show up in EXPLAIN.
+ */
+ if (need_tmp && (select_options & SELECT_DESCRIBE) != 0)
+ join_tab[const_tables].limit= 0;
+ /*
Force using of tmp table if sorting by a SP or UDF function due to
their expensive and probably non-deterministic nature.
*/
for (ORDER *tmp_order= order; tmp_order ; tmp_order=tmp_order->next)
{
Item *item= *tmp_order->item;
- if (item->walk(&Item::is_expensive_processor,(byte*)0))
+ if (item->walk(&Item::is_expensive_processor, 0, (uchar*)0))
{
/* Force tmp table without sort */
need_tmp=1; simple_order=simple_group=0;
@@ -1429,7 +1559,7 @@ JOIN::optimize()
!thd->lex->current_select->with_sum_func) ?
select_limit : HA_POS_ERROR;
- if (!(exec_tmp_table1 =
+ if (!(exec_tmp_table1=
create_tmp_table(thd, &tmp_table_param, all_fields,
tmp_group,
group_list ? 0 : select_distinct,
@@ -1461,7 +1591,7 @@ JOIN::optimize()
DBUG_PRINT("info",("Sorting for group"));
thd_proc_info(thd, "Sorting for group");
if (create_sort_index(thd, this, group_list,
- HA_POS_ERROR, HA_POS_ERROR) ||
+ HA_POS_ERROR, HA_POS_ERROR, FALSE) ||
alloc_group_fields(this, group_list) ||
make_sum_func_list(all_fields, fields_list, 1) ||
setup_sum_funcs(thd, sum_funcs))
@@ -1482,7 +1612,7 @@ JOIN::optimize()
{
thd_proc_info(thd, "Sorting for order");
if (create_sort_index(thd, this, order,
- HA_POS_ERROR, HA_POS_ERROR))
+ HA_POS_ERROR, HA_POS_ERROR, TRUE))
{
DBUG_RETURN(1);
}
@@ -1511,7 +1641,9 @@ JOIN::optimize()
{
/* Should always succeed */
if (test_if_skip_sort_order(&join_tab[const_tables],
- order, unit->select_limit_cnt, 0))
+ order, unit->select_limit_cnt, 0,
+ &join_tab[const_tables].table->
+ keys_in_use_for_order_by))
order=0;
}
}
@@ -1526,8 +1658,8 @@ JOIN::optimize()
}
-/*
- Restore values in temporary join
+/**
+ Restore values in temporary join.
*/
void JOIN::restore_tmp()
{
@@ -1549,14 +1681,14 @@ JOIN::reinit()
if (exec_tmp_table1)
{
exec_tmp_table1->file->extra(HA_EXTRA_RESET_STATE);
- exec_tmp_table1->file->delete_all_rows();
+ exec_tmp_table1->file->ha_delete_all_rows();
free_io_cache(exec_tmp_table1);
filesort_free_buffers(exec_tmp_table1,0);
}
if (exec_tmp_table2)
{
exec_tmp_table2->file->extra(HA_EXTRA_RESET_STATE);
- exec_tmp_table2->file->delete_all_rows();
+ exec_tmp_table2->file->ha_delete_all_rows();
free_io_cache(exec_tmp_table2);
filesort_free_buffers(exec_tmp_table2,0);
}
@@ -1582,6 +1714,9 @@ JOIN::reinit()
func->clear();
}
+ if (!(select_options & SELECT_DESCRIBE))
+ init_ftfuncs(thd, select_lex, test(order));
+
DBUG_RETURN(0);
}
@@ -1612,7 +1747,7 @@ JOIN::save_join_tab()
{
if (!join_tab_save && select_lex->master_unit()->uncacheable)
{
- if (!(join_tab_save= (JOIN_TAB*)thd->memdup((gptr) join_tab,
+ if (!(join_tab_save= (JOIN_TAB*)thd->memdup((uchar*) join_tab,
sizeof(JOIN_TAB) * tables)))
return 1;
}
@@ -1620,8 +1755,16 @@ JOIN::save_join_tab()
}
-/*
- Exec select
+/**
+ Exec select.
+
+ @todo
+ Note, that create_sort_index calls test_if_skip_sort_order and may
+ finally replace sorting with index scan if there is a LIMIT clause in
+ the query. It's never shown in EXPLAIN!
+
+ @todo
+ When can we have here thd->net.report_error not zero?
*/
void
JOIN::exec()
@@ -1712,6 +1855,10 @@ JOIN::exec()
DBUG_VOID_RETURN;
}
+ if ((this->select_lex->options & OPTION_SCHEMA_TABLE) &&
+ get_schema_tables_result(this, PROCESSED_BY_JOIN_EXEC))
+ DBUG_VOID_RETURN;
+
if (select_options & SELECT_DESCRIBE)
{
/*
@@ -1734,7 +1881,9 @@ JOIN::exec()
(const_tables == tables ||
((simple_order || skip_sort_order) &&
test_if_skip_sort_order(&join_tab[const_tables], order,
- select_limit, 0))))
+ select_limit, 0,
+ &join_tab[const_tables].table->
+ keys_in_use_for_query))))
order=0;
having= tmp_having;
select_describe(this, need_tmp,
@@ -1755,13 +1904,6 @@ JOIN::exec()
*/
curr_join->examined_rows= 0;
- if ((curr_join->select_lex->options & OPTION_SCHEMA_TABLE) &&
- !thd->lex->describe &&
- get_schema_tables_result(curr_join, PROCESSED_BY_JOIN_EXEC))
- {
- DBUG_VOID_RETURN;
- }
-
/* Create a tmp table if distinct or if the sort is too complicated */
if (need_tmp)
{
@@ -1780,6 +1922,9 @@ JOIN::exec()
/* Copy data to the temporary table */
thd_proc_info(thd, "Copying to tmp table");
DBUG_PRINT("info", ("%s", thd->proc_info));
+ if (!curr_join->sort_and_group &&
+ curr_join->const_tables != curr_join->tables)
+ curr_join->join_tab[curr_join->const_tables].sorted= 0;
if ((tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table, 0)))
{
error= tmp_error;
@@ -1912,7 +2057,7 @@ JOIN::exec()
DBUG_VOID_RETURN;
}
if (create_sort_index(thd, curr_join, curr_join->group_list,
- HA_POS_ERROR, HA_POS_ERROR) ||
+ HA_POS_ERROR, HA_POS_ERROR, FALSE) ||
make_group_fields(this, curr_join))
{
DBUG_VOID_RETURN;
@@ -1941,6 +2086,9 @@ JOIN::exec()
1, TRUE))
DBUG_VOID_RETURN;
curr_join->group_list= 0;
+ if (!curr_join->sort_and_group &&
+ curr_join->const_tables != curr_join->tables)
+ curr_join->join_tab[curr_join->const_tables].sorted= 0;
if (setup_sum_funcs(curr_join->thd, curr_join->sum_funcs) ||
(tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table,
0)))
@@ -2000,7 +2148,8 @@ JOIN::exec()
count_field_types(select_lex, &curr_join->tmp_table_param,
*curr_all_fields, 0);
- if (curr_join->group || curr_join->tmp_table_param.sum_func_count ||
+ if (curr_join->group || curr_join->implicit_grouping ||
+ curr_join->tmp_table_param.sum_func_count ||
(procedure && (procedure->flags & PROC_GROUP)))
{
if (make_group_fields(this, curr_join))
@@ -2066,27 +2215,25 @@ JOIN::exec()
DBUG_VOID_RETURN;
if (!curr_table->select->cond)
curr_table->select->cond= sort_table_cond;
- else // This should never happen
+ else
{
if (!(curr_table->select->cond=
new Item_cond_and(curr_table->select->cond,
sort_table_cond)))
DBUG_VOID_RETURN;
- /*
- Item_cond_and do not need fix_fields for execution, its parameters
- are fixed or do not need fix_fields, too
- */
- curr_table->select->cond->quick_fix_field();
+ curr_table->select->cond->fix_fields(thd, 0);
}
curr_table->select_cond= curr_table->select->cond;
curr_table->select_cond->top_level_item();
DBUG_EXECUTE("where",print_where(curr_table->select->cond,
- "select and having"););
+ "select and having",
+ QT_ORDINARY););
curr_join->tmp_having= make_cond_for_table(curr_join->tmp_having,
~ (table_map) 0,
~used_tables);
DBUG_EXECUTE("where",print_where(curr_join->tmp_having,
- "having after sort"););
+ "having after sort",
+ QT_ORDINARY););
}
}
{
@@ -2133,15 +2280,26 @@ JOIN::exec()
curr_join->group_list : curr_join->order,
curr_join->select_limit,
(select_options & OPTION_FOUND_ROWS ?
- HA_POS_ERROR : unit->select_limit_cnt)))
+ HA_POS_ERROR : unit->select_limit_cnt),
+ curr_join->group_list ? TRUE : FALSE))
DBUG_VOID_RETURN;
sortorder= curr_join->sortorder;
+ if (curr_join->const_tables != curr_join->tables &&
+ !curr_join->join_tab[curr_join->const_tables].table->sort.io_cache)
+ {
+ /*
+ If no IO cache exists for the first table then we are using an
+ INDEX SCAN and no filesort. Thus we should not remove the sorted
+ attribute on the INDEX SCAN.
+ */
+ skip_sort_order= 1;
+ }
}
}
- /* XXX: When can we have here thd->net.report_error not zero? */
- if (thd->net.report_error)
+ /* XXX: When can we have here thd->is_error() not zero? */
+ if (thd->is_error())
{
- error= thd->net.report_error;
+ error= thd->is_error();
DBUG_VOID_RETURN;
}
curr_join->having= curr_join->tmp_having;
@@ -2198,8 +2356,11 @@ JOIN::exec()
}
-/*
- Clean up join. Return error that hold JOIN.
+/**
+ Clean up join.
+
+ @return
+ Return error that hold JOIN.
*/
int
@@ -2214,25 +2375,24 @@ JOIN::destroy()
{
JOIN_TAB *tab, *end;
for (tab= join_tab, end= tab+tables ; tab != end ; tab++)
- {
tab->cleanup();
- }
}
tmp_join->tmp_join= 0;
+ /*
+ We need to clean up tmp_table_param for reusable JOINs (having non-zero
+ and different from self tmp_join) because it's not being cleaned up
+ anywhere else (as we need to keep the join is reusable).
+ */
tmp_table_param.cleanup();
+ tmp_table_param.copy_field= tmp_join->tmp_table_param.copy_field= 0;
DBUG_RETURN(tmp_join->destroy());
}
cond_equal= 0;
cleanup(1);
/* Cleanup items referencing temporary table columns */
- if (!tmp_all_fields3.is_empty())
- {
- List_iterator_fast<Item> it(tmp_all_fields3);
- Item *item;
- while ((item= it++))
- item->cleanup();
- }
+ cleanup_item_list(tmp_all_fields1);
+ cleanup_item_list(tmp_all_fields3);
if (exec_tmp_table1)
free_tmp_table(thd, exec_tmp_table1);
if (exec_tmp_table2)
@@ -2243,49 +2403,61 @@ JOIN::destroy()
DBUG_RETURN(error);
}
-/*
+
+void JOIN::cleanup_item_list(List<Item> &items) const
+{
+ if (!items.is_empty())
+ {
+ List_iterator_fast<Item> it(items);
+ Item *item;
+ while ((item= it++))
+ item->cleanup();
+ }
+}
+
+
+/**
An entry point to single-unit select (a select without UNION).
- SYNOPSIS
- mysql_select()
-
- thd thread handler
- rref_pointer_array a reference to ref_pointer_array of
- the top-level select_lex for this query
- tables list of all tables used in this query.
- The tables have been pre-opened.
- wild_num number of wildcards used in the top level
- select of this query.
- For example statement
- SELECT *, t1.*, catalog.t2.* FROM t0, t1, t2;
- has 3 wildcards.
- fields list of items in SELECT list of the top-level
- select
- e.g. SELECT a, b, c FROM t1 will have Item_field
- for a, b and c in this list.
- conds top level item of an expression representing
- WHERE clause of the top level select
- og_num total number of ORDER BY and GROUP BY clauses
- arguments
- order linked list of ORDER BY agruments
- group linked list of GROUP BY arguments
- having top level item of HAVING expression
- proc_param list of PROCEDUREs
- select_options select options (BIG_RESULT, etc)
- result an instance of result set handling class.
- This object is responsible for send result
- set rows to the client or inserting them
- into a table.
- select_lex the only SELECT_LEX of this query
- unit top-level UNIT of this query
- UNIT is an artificial object created by the parser
- for every SELECT clause.
- e.g. SELECT * FROM t1 WHERE a1 IN (SELECT * FROM t2)
- has 2 unions.
-
- RETURN VALUE
- FALSE success
- TRUE an error
+ @param thd thread handler
+ @param rref_pointer_array a reference to ref_pointer_array of
+ the top-level select_lex for this query
+ @param tables list of all tables used in this query.
+ The tables have been pre-opened.
+ @param wild_num number of wildcards used in the top level
+ select of this query.
+ For example statement
+ SELECT *, t1.*, catalog.t2.* FROM t0, t1, t2;
+ has 3 wildcards.
+ @param fields list of items in SELECT list of the top-level
+ select
+ e.g. SELECT a, b, c FROM t1 will have Item_field
+ for a, b and c in this list.
+ @param conds top level item of an expression representing
+ WHERE clause of the top level select
+ @param og_num total number of ORDER BY and GROUP BY clauses
+ arguments
+ @param order linked list of ORDER BY agruments
+ @param group linked list of GROUP BY arguments
+ @param having top level item of HAVING expression
+ @param proc_param list of PROCEDUREs
+ @param select_options select options (BIG_RESULT, etc)
+ @param result an instance of result set handling class.
+ This object is responsible for send result
+ set rows to the client or inserting them
+ into a table.
+ @param select_lex the only SELECT_LEX of this query
+ @param unit top-level UNIT of this query
+ UNIT is an artificial object created by the
+ parser for every SELECT clause.
+ e.g.
+ SELECT * FROM t1 WHERE a1 IN (SELECT * FROM t2)
+ has 2 unions.
+
+ @retval
+ FALSE success
+ @retval
+ TRUE an error
*/
bool
@@ -2319,6 +2491,13 @@ mysql_select(THD *thd, Item ***rref_pointer_array,
{
DBUG_RETURN(TRUE);
}
+ /*
+ Original join tabs might be overwritten at first
+ subselect execution. So we need to restore them.
+ */
+ Item_subselect *subselect= select_lex->master_unit()->item;
+ if (subselect && subselect->is_uncacheable() && join->reinit())
+ DBUG_RETURN(TRUE);
}
else
{
@@ -2360,7 +2539,7 @@ mysql_select(THD *thd, Item ***rref_pointer_array,
join->having_history= (join->having?join->having:join->tmp_having);
}
- if (thd->net.report_error)
+ if (thd->is_error())
goto err;
join->exec();
@@ -2386,8 +2565,7 @@ err:
{
thd_proc_info(thd, "end");
err|= select_lex->cleanup();
- thd_proc_info(thd, "end");
- DBUG_RETURN(err || thd->net.report_error);
+ DBUG_RETURN(err || thd->is_error());
}
DBUG_RETURN(join->error);
}
@@ -2404,14 +2582,13 @@ static ha_rows get_quick_record_count(THD *thd, SQL_SELECT *select,
int error;
DBUG_ENTER("get_quick_record_count");
#ifndef EMBEDDED_LIBRARY // Avoid compiler warning
- char buff[STACK_BUFF_ALLOC];
+ uchar buff[STACK_BUFF_ALLOC];
#endif
if (check_stack_overrun(thd, STACK_MIN_SIZE, buff))
DBUG_RETURN(0); // Fatal error flag is set
if (select)
{
select->head=table;
- table->reginfo.impossible_range=0;
if ((error= select->test_quick_select(thd, *(key_map *)keys,(table_map) 0,
limit, 0)) == 1)
DBUG_RETURN(select->quick->records);
@@ -2439,12 +2616,13 @@ typedef struct st_sargable_param
uint num_values; /* number of values in the above array */
} SARGABLE_PARAM;
-/*
- Calculate the best possible join and initialize the join structure
+/**
+ Calculate the best possible join and initialize the join structure.
- RETURN VALUES
- 0 ok
- 1 Fatal error
+ @retval
+ 0 ok
+ @retval
+ 1 Fatal error
*/
static bool
@@ -2507,13 +2685,18 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables_arg, COND *conds,
s->dependent= tables->dep_tables;
s->key_dependent= 0;
if (tables->schema_table)
- table->file->records= 2;
+ table->file->stats.records= 2;
+ table->quick_condition_rows= table->file->stats.records;
s->on_expr_ref= &tables->on_expr;
if (*s->on_expr_ref)
{
/* s is the only inner table of an outer join */
- if (!table->file->records && !embedding)
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if ((!table->file->stats.records || table->no_partitions_used) && !embedding)
+#else
+ if (!table->file->stats.records && !embedding)
+#endif
{ // Empty table
s->dependent= 0; // Ignore LEFT JOIN depend.
set_position(join,const_count++,s,(KEYUSE*) 0);
@@ -2540,9 +2723,15 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables_arg, COND *conds,
while (embedding);
continue;
}
-
- if ((table->s->system || table->file->records <= 1) && ! s->dependent &&
- !(table->file->table_flags() & HA_NOT_EXACT_COUNT) &&
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ const bool no_partitions_used= table->no_partitions_used;
+#else
+ const bool no_partitions_used= FALSE;
+#endif
+ if ((table->s->system || table->file->stats.records <= 1 ||
+ no_partitions_used) &&
+ !s->dependent &&
+ (table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) &&
!table->fulltext_searched && !join->no_const_tables)
{
set_position(join,const_count++,s,(KEYUSE*) 0);
@@ -2556,31 +2745,53 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables_arg, COND *conds,
/*
Build transitive closure for relation 'to be dependent on'.
This will speed up the plan search for many cases with outer joins,
- as well as allow us to catch illegal cross references/
+ as well as allow us to catch illegal cross references.
Warshall's algorithm is used to build the transitive closure.
- As we use bitmaps to represent the relation the complexity
- of the algorithm is O((number of tables)^2).
+ As we may restart the outer loop upto 'table_count' times, the
+ complexity of the algorithm is O((number of tables)^3).
+ However, most of the iterations will be shortcircuited when
+ there are no pedendencies to propogate.
*/
- for (i= 0, s= stat ; i < table_count ; i++, s++)
+ for (i= 0 ; i < table_count ; i++)
{
- for (uint j= 0 ; j < table_count ; j++)
+ uint j;
+ table= stat[i].table;
+
+ if (!table->reginfo.join_tab->dependent)
+ continue;
+
+ /* Add my dependencies to other tables depending on me */
+ for (j= 0, s= stat ; j < table_count ; j++, s++)
{
- table= stat[j].table;
if (s->dependent & table->map)
+ {
+ table_map was_dependent= s->dependent;
s->dependent |= table->reginfo.join_tab->dependent;
+ /*
+ If we change dependencies for a table we already have
+ processed: Redo dependency propagation from this table.
+ */
+ if (i > j && s->dependent != was_dependent)
+ {
+ i = j-1;
+ break;
+ }
+ }
}
- if (outer_join & s->table->map)
- s->table->maybe_null= 1;
}
- /* Catch illegal cross references for outer joins */
+
for (i= 0, s= stat ; i < table_count ; i++, s++)
{
+ /* Catch illegal cross references for outer joins */
if (s->dependent & s->table->map)
{
join->tables=0; // Don't use join->table
my_message(ER_WRONG_OUTER_JOIN, ER(ER_WRONG_OUTER_JOIN), MYF(0));
goto error;
}
+
+ if (outer_join & s->table->map)
+ s->table->maybe_null= 1;
s->key_dependent= s->dependent;
}
}
@@ -2666,8 +2877,8 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables_arg, COND *conds,
// All dep. must be constants
if (s->dependent & ~(found_const_table_map))
continue;
- if (table->file->records <= 1L &&
- !(table->file->table_flags() & HA_NOT_EXACT_COUNT) &&
+ if (table->file->stats.records <= 1L &&
+ (table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) &&
!table->pos_in_table_list->embedding)
{ // system table
int tmp= 0;
@@ -2779,7 +2990,7 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables_arg, COND *conds,
continue;
}
/* Approximate found rows and time to read them */
- s->found_records=s->records=s->table->file->records;
+ s->found_records=s->records=s->table->file->stats.records;
s->read_time=(ha_rows) s->table->file->scan_time();
/*
@@ -2858,7 +3069,7 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables_arg, COND *conds,
}
else
{
- memcpy((gptr) join->best_positions,(gptr) join->positions,
+ memcpy((uchar*) join->best_positions,(uchar*) join->positions,
sizeof(POSITION)*join->const_tables);
join->best_read=1.0;
}
@@ -2886,13 +3097,14 @@ error:
keyuse Pointer to possible keys
*****************************************************************************/
-typedef struct key_field_t { // Used when finding key fields
+/// Used when finding key fields
+typedef struct key_field_t {
Field *field;
- Item *val; // May be empty if diff constant
+ Item *val; ///< May be empty if diff constant
uint level;
uint optimize;
bool eq_func;
- /*
+ /**
If true, the condition this struct represents will not be satisfied
when val IS NULL.
*/
@@ -2904,22 +3116,28 @@ typedef struct key_field_t { // Used when finding key fields
#define KEY_OPTIMIZE_EXISTS 1
#define KEY_OPTIMIZE_REF_OR_NULL 2
-/*
- Merge new key definitions to old ones, remove those not used in both
+/**
+ Merge new key definitions to old ones, remove those not used in both.
- This is called for OR between different levels
+ This is called for OR between different levels.
To be able to do 'ref_or_null' we merge a comparison of a column
and 'column IS NULL' to one test. This is useful for sub select queries
- that are internally transformed to something like:
+ that are internally transformed to something like:.
+ @code
SELECT * FROM t1 WHERE t1.key=outer_ref_field or t1.key IS NULL
+ @endcode
- KEY_FIELD::null_rejecting is processed as follows:
+ KEY_FIELD::null_rejecting is processed as follows: @n
result has null_rejecting=true if it is set for both ORed references.
for example:
- (t2.key = t1.field OR t2.key = t1.field) -> null_rejecting=true
- (t2.key = t1.field OR t2.key <=> t1.field) -> null_rejecting=false
+ - (t2.key = t1.field OR t2.key = t1.field) -> null_rejecting=true
+ - (t2.key = t1.field OR t2.key <=> t1.field) -> null_rejecting=false
+
+ @todo
+ The result of this is that we're missing some 'ref' accesses.
+ OptimizerTeam: Fix this
*/
static KEY_FIELD *
@@ -3030,25 +3248,23 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end,
}
-/*
+/**
Add a possible key to array of possible keys if it's usable as a key
- SYNPOSIS
- add_key_field()
- key_fields Pointer to add key, if usable
- and_level And level, to be stored in KEY_FIELD
- cond Condition predicate
- field Field used in comparision
- eq_func True if we used =, <=> or IS NULL
- value Value used for comparison with field
- usable_tables Tables which can be used for key optimization
- sargables IN/OUT Array of found sargable candidates
+ @param key_fields Pointer to add key, if usable
+ @param and_level And level, to be stored in KEY_FIELD
+ @param cond Condition predicate
+ @param field Field used in comparision
+ @param eq_func True if we used =, <=> or IS NULL
+ @param value Value used for comparison with field
+ @param usable_tables Tables which can be used for key optimization
+ @param sargables IN/OUT Array of found sargable candidates
- NOTES
+ @note
If we are doing a NOT NULL comparison on a NOT NULL field in a outer join
table, we store this to be able to do not exists optimization later.
- RETURN
+ @returns
*key_fields is incremented if we stored a key in the array
*/
@@ -3108,7 +3324,10 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, Item_func *cond,
bool is_const=1;
for (uint i=0; i<num_values; i++)
- is_const&= value[i]->const_item();
+ {
+ if (!(is_const&= value[i]->const_item()))
+ break;
+ }
if (is_const)
stat[0].const_keys.merge(possible_keys);
else if (!eq_func)
@@ -3196,26 +3415,25 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, Item_func *cond,
(*key_fields)++;
}
-/*
- Add possible keys to array of possible keys originated from a simple predicate
-
- SYNPOSIS
- add_key_equal_fields()
- key_fields Pointer to add key, if usable
- and_level And level, to be stored in KEY_FIELD
- cond Condition predicate
- field Field used in comparision
- eq_func True if we used =, <=> or IS NULL
- value Value used for comparison with field
- Is NULL for BETWEEN and IN
- usable_tables Tables which can be used for key optimization
- sargables IN/OUT Array of found sargable candidates
-
- NOTES
+/**
+ Add possible keys to array of possible keys originated from a simple
+ predicate.
+
+ @param key_fields Pointer to add key, if usable
+ @param and_level And level, to be stored in KEY_FIELD
+ @param cond Condition predicate
+ @param field Field used in comparision
+ @param eq_func True if we used =, <=> or IS NULL
+ @param value Value used for comparison with field
+ Is NULL for BETWEEN and IN
+ @param usable_tables Tables which can be used for key optimization
+ @param sargables IN/OUT Array of found sargable candidates
+
+ @note
If field items f1 and f2 belong to the same multiple equality and
a key is added for f1, the the same key is added for f2.
- RETURN
+ @returns
*key_fields is incremented if we stored a key in the array
*/
@@ -3323,7 +3541,7 @@ add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level,
if (!join->group_list && !join->order &&
join->unit->item &&
join->unit->item->substype() == Item_subselect::IN_SUBS &&
- !join->unit->first_select()->next_select())
+ !join->unit->is_union())
{
KEY_FIELD *save= *key_fields;
add_key_fields(join, key_fields, and_level, cond_arg, usable_tables,
@@ -3511,7 +3729,7 @@ add_key_part(DYNAMIC_ARRAY *keyuse_array,KEY_FIELD *key_field)
keyuse.optimize= key_field->optimize & KEY_OPTIMIZE_REF_OR_NULL;
keyuse.null_rejecting= key_field->null_rejecting;
keyuse.cond_guard= key_field->cond_guard;
- if (insert_dynamic(keyuse_array,(gptr) &keyuse))
+ if (insert_dynamic(keyuse_array,(uchar*) &keyuse))
return TRUE;
}
}
@@ -3540,20 +3758,20 @@ add_ft_keys(DYNAMIC_ARRAY *keyuse_array,
cond_func=(Item_func_match *)cond;
else if (func->arg_count == 2)
{
- Item_func *arg0=(Item_func *)(func->arguments()[0]),
- *arg1=(Item_func *)(func->arguments()[1]);
- if (arg1->const_item() &&
+ Item *arg0= func->arguments()[0],
+ *arg1= func->arguments()[1];
+ if (arg1->const_item() && arg1->cols() == 1 &&
((functype == Item_func::GE_FUNC && arg1->val_real() > 0) ||
- (functype == Item_func::GT_FUNC && arg1->val_real() >=0)) &&
- arg0->type() == Item::FUNC_ITEM &&
- arg0->functype() == Item_func::FT_FUNC)
- cond_func=(Item_func_match *) arg0;
- else if (arg0->const_item() &&
+ (functype == Item_func::GT_FUNC && arg1->val_real() >= 0)) &&
+ arg0->type() == Item::FUNC_ITEM &&
+ ((Item_func *) arg0)->functype() == Item_func::FT_FUNC)
+ cond_func= (Item_func_match *) arg0;
+ else if (arg0->const_item() && arg0->cols() == 1 &&
((functype == Item_func::LE_FUNC && arg0->val_real() > 0) ||
- (functype == Item_func::LT_FUNC && arg0->val_real() >=0)) &&
- arg1->type() == Item::FUNC_ITEM &&
- arg1->functype() == Item_func::FT_FUNC)
- cond_func=(Item_func_match *) arg1;
+ (functype == Item_func::LT_FUNC && arg0->val_real() >= 0)) &&
+ arg1->type() == Item::FUNC_ITEM &&
+ ((Item_func *) arg1)->functype() == Item_func::FT_FUNC)
+ cond_func= (Item_func_match *) arg1;
}
}
else if (cond->type() == Item::COND_ITEM)
@@ -3583,7 +3801,7 @@ add_ft_keys(DYNAMIC_ARRAY *keyuse_array,
keyuse.used_tables=cond_func->key_item()->used_tables();
keyuse.optimize= 0;
keyuse.keypart_map= 0;
- return insert_dynamic(keyuse_array,(gptr) &keyuse);
+ return insert_dynamic(keyuse_array,(uchar*) &keyuse);
}
@@ -3608,33 +3826,37 @@ sort_keyuse(KEYUSE *a,KEYUSE *b)
/*
- Add to KEY_FIELD array all 'ref' access candidates within nested join
+ Add to KEY_FIELD array all 'ref' access candidates within nested join.
- SYNPOSIS
- add_key_fields_for_nj()
- nested_join_table IN Nested join pseudo-table to process
- end INOUT End of the key field array
- and_level INOUT And-level
- sargables IN/OUT Array of found sargable candidates
-
- DESCRIPTION
This function populates KEY_FIELD array with entries generated from the
ON condition of the given nested join, and does the same for nested joins
contained within this nested join.
- NOTES
+ @param[in] nested_join_table Nested join pseudo-table to process
+ @param[in,out] end End of the key field array
+ @param[in,out] and_level And-level
+ @param[in,out] sargables Array of found sargable candidates
+
+
+ @note
We can add accesses to the tables that are direct children of this nested
join (1), and are not inner tables w.r.t their neighbours (2).
Example for #1 (outer brackets pair denotes nested join this function is
invoked for):
+ @code
... LEFT JOIN (t1 LEFT JOIN (t2 ... ) ) ON cond
+ @endcode
Example for #2:
+ @code
... LEFT JOIN (t1 LEFT JOIN t2 ) ON cond
+ @endcode
In examples 1-2 for condition cond, we can add 'ref' access candidates to
t1 only.
Example #3:
+ @code
... LEFT JOIN (t1, t2 LEFT JOIN t3 ON inner_cond) ON cond
+ @endcode
Here we can add 'ref' access candidates for t1 and t2, but not for t3.
*/
@@ -3660,25 +3882,25 @@ static void add_key_fields_for_nj(JOIN *join, TABLE_LIST *nested_join_table,
}
-/*
- Update keyuse array with all possible keys we can use to fetch rows
+/**
+ Update keyuse array with all possible keys we can use to fetch rows.
- SYNOPSIS
- update_ref_and_keys()
- thd
- keyuse OUT Put here ordered array of KEYUSE structures
- join_tab Array in tablenr_order
- tables Number of tables in join
- cond WHERE condition (note that the function analyzes
- join_tab[i]->on_expr too)
- normal_tables Tables not inner w.r.t some outer join (ones for which
- we can make ref access based the WHERE clause)
- select_lex current SELECT
- sargables OUT Array of found sargable candidates
+ @param thd
+ @param[out] keyuse Put here ordered array of KEYUSE structures
+ @param join_tab Array in tablenr_order
+ @param tables Number of tables in join
+ @param cond WHERE condition (note that the function analyzes
+ join_tab[i]->on_expr too)
+ @param normal_tables Tables not inner w.r.t some outer join (ones
+ for which we can make ref access based the WHERE
+ clause)
+ @param select_lex current SELECT
+ @param[out] sargables Array of found sargable candidates
- RETURN
- 0 - OK
- 1 - Out of memory.
+ @retval
+ 0 OK
+ @retval
+ 1 Out of memory.
*/
static bool
@@ -3801,7 +4023,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,
(qsort_cmp) sort_keyuse);
bzero((char*) &key_end,sizeof(key_end)); /* Add for easy testing */
- if (insert_dynamic(keyuse,(gptr) &key_end))
+ if (insert_dynamic(keyuse,(uchar*) &key_end))
return TRUE;
use=save_pos=dynamic_element(keyuse,0,KEYUSE*);
@@ -3823,7 +4045,15 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,
continue;
}
- *save_pos= *use;
+#if defined(__GNUC__) && !MY_GNUC_PREREQ(4,4)
+ /*
+ Old gcc used a memcpy(), which is undefined if save_pos==use:
+ http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19410
+ http://gcc.gnu.org/bugzilla/show_bug.cgi?id=39480
+ */
+ if (save_pos != use)
+#endif
+ *save_pos= *use;
prev=use;
found_eq_constant= !use->used_tables;
/* Save ptr to first use */
@@ -3833,14 +4063,14 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,
save_pos++;
}
i=(uint) (save_pos-(KEYUSE*) keyuse->buffer);
- VOID(set_dynamic(keyuse,(gptr) &key_end,i));
+ VOID(set_dynamic(keyuse,(uchar*) &key_end,i));
keyuse->elements=i;
}
return FALSE;
}
-/*
- Update some values in keyuse for faster choose_plan() loop
+/**
+ Update some values in keyuse for faster choose_plan() loop.
*/
static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array)
@@ -3868,7 +4098,7 @@ static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array)
if (map == 1) // Only one table
{
TABLE *tmp_table=join->all_tables[tablenr];
- keyuse->ref_table_rows= max(tmp_table->file->records, 100);
+ keyuse->ref_table_rows= max(tmp_table->file->stats.records, 100);
}
}
/*
@@ -3881,23 +4111,21 @@ static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array)
}
-/*
+/**
Discover the indexes that can be used for GROUP BY or DISTINCT queries.
- SYNOPSIS
- add_group_and_distinct_keys()
- join
- join_tab
+ If the query has a GROUP BY clause, find all indexes that contain all
+ GROUP BY fields, and add those indexes to join->const_keys.
- DESCRIPTION
- If the query has a GROUP BY clause, find all indexes that contain all
- GROUP BY fields, and add those indexes to join->const_keys.
- If the query has a DISTINCT clause, find all indexes that contain all
- SELECT fields, and add those indexes to join->const_keys.
- This allows later on such queries to be processed by a
- QUICK_GROUP_MIN_MAX_SELECT.
+ If the query has a DISTINCT clause, find all indexes that contain all
+ SELECT fields, and add those indexes to join->const_keys.
+ This allows later on such queries to be processed by a
+ QUICK_GROUP_MIN_MAX_SELECT.
- RETURN
+ @param join
+ @param join_tab
+
+ @return
None
*/
@@ -3913,8 +4141,8 @@ add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab)
if (join->group_list)
{ /* Collect all query fields referenced in the GROUP clause. */
for (cur_group= join->group_list; cur_group; cur_group= cur_group->next)
- (*cur_group->item)->walk(&Item::collect_item_field_processor,
- (byte*) &indexed_fields);
+ (*cur_group->item)->walk(&Item::collect_item_field_processor, 0,
+ (uchar*) &indexed_fields);
}
else if (join->select_distinct)
{ /* Collect all query fields referenced in the SELECT clause. */
@@ -3922,7 +4150,8 @@ add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab)
List_iterator<Item> select_items_it(select_items);
Item *item;
while ((item= select_items_it++))
- item->walk(&Item::collect_item_field_processor, (byte*) &indexed_fields);
+ item->walk(&Item::collect_item_field_processor, 0,
+ (uchar*) &indexed_fields);
}
else
return;
@@ -3948,7 +4177,7 @@ add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab)
which uses least records
*****************************************************************************/
-/* Save const tables first as used tables */
+/** Save const tables first as used tables. */
static void
set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key)
@@ -3956,6 +4185,7 @@ set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key)
join->positions[idx].table= table;
join->positions[idx].key=key;
join->positions[idx].records_read=1.0; /* This is a const table */
+ join->positions[idx].ref_depend_map= 0;
/* Move the const table as down as possible in best_ref */
JOIN_TAB **pos=join->best_ref+idx+1;
@@ -3970,31 +4200,28 @@ set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key)
}
-/*
- Find the best access path for an extension of a partial execution plan and
- add this path to the plan.
-
- SYNOPSIS
- best_access_path()
- join pointer to the structure providing all context info
- for the query
- s the table to be joined by the function
- thd thread for the connection that submitted the query
- remaining_tables set of tables not included into the partial plan yet
- idx the length of the partial plan
- record_count estimate for the number of records returned by the partial
- plan
- read_time the cost of the partial plan
-
- DESCRIPTION
- The function finds the best access path to table 's' from the passed
- partial plan where an access path is the general term for any means to
- access the data in 's'. An access path may use either an index or a scan,
- whichever is cheaper. The input partial plan is passed via the array
- 'join->positions' of length 'idx'. The chosen access method for 's' and its
- cost are stored in 'join->positions[idx]'.
-
- RETURN
+/**
+ Find the best access path for an extension of a partial execution
+ plan and add this path to the plan.
+
+ The function finds the best access path to table 's' from the passed
+ partial plan where an access path is the general term for any means to
+ access the data in 's'. An access path may use either an index or a scan,
+ whichever is cheaper. The input partial plan is passed via the array
+ 'join->positions' of length 'idx'. The chosen access method for 's' and its
+ cost are stored in 'join->positions[idx]'.
+
+ @param join pointer to the structure providing all context info
+ for the query
+ @param s the table to be joined by the function
+ @param thd thread for the connection that submitted the query
+ @param remaining_tables set of tables not included into the partial plan yet
+ @param idx the length of the partial plan
+ @param record_count estimate for the number of records returned by the
+ partial plan
+ @param read_time the cost of the partial plan
+
+ @return
None
*/
@@ -4013,6 +4240,7 @@ best_access_path(JOIN *join,
double best= DBL_MAX;
double best_time= DBL_MAX;
double records= DBL_MAX;
+ table_map best_ref_depends_map= 0;
double tmp;
ha_rows rec;
DBUG_ENTER("best_access_path");
@@ -4040,13 +4268,20 @@ best_access_path(JOIN *join,
/* Calculate how many key segments of the current key we can use */
start_key= keyuse;
- do
- { /* for each keypart */
+
+ do /* For each keypart */
+ {
uint keypart= keyuse->keypart;
table_map best_part_found_ref= 0;
double best_prev_record_reads= DBL_MAX;
- do
+
+ do /* For each way to access the keypart */
{
+
+ /*
+ if 1. expression doesn't refer to forward tables
+ 2. we won't get two ref-or-null's
+ */
if (!(remaining_tables & keyuse->used_tables) &&
!(ref_or_null_part && (keyuse->optimize &
KEY_OPTIMIZE_REF_OR_NULL)))
@@ -4054,8 +4289,9 @@ best_access_path(JOIN *join,
found_part|= keyuse->keypart_map;
if (!(keyuse->used_tables & ~join->const_table_map))
const_part|= keyuse->keypart_map;
- double tmp2= prev_record_reads(join, (found_ref |
- keyuse->used_tables));
+
+ double tmp2= prev_record_reads(join, idx, (found_ref |
+ keyuse->used_tables));
if (tmp2 < best_prev_record_reads)
{
best_part_found_ref= keyuse->used_tables & ~join->const_table_map;
@@ -4094,7 +4330,7 @@ best_access_path(JOIN *join,
Really, there should be records=0.0 (yes!)
but 1.0 would be probably safer
*/
- tmp= prev_record_reads(join, found_ref);
+ tmp= prev_record_reads(join, idx, found_ref);
records= 1.0;
}
else
@@ -4109,7 +4345,7 @@ best_access_path(JOIN *join,
max_key_part= (uint) ~0;
if ((keyinfo->flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME)
{
- tmp = prev_record_reads(join, found_ref);
+ tmp = prev_record_reads(join, idx, found_ref);
records=1.0;
}
else
@@ -4174,10 +4410,10 @@ best_access_path(JOIN *join,
/* Limit the number of matched rows */
tmp= records;
set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key);
- if (table->used_keys.is_set(key))
+ if (table->covering_keys.is_set(key))
{
/* we can use only index tree */
- uint keys_per_block= table->file->block_size/2/
+ uint keys_per_block= table->file->stats.block_size/2/
(keyinfo->key_length+table->file->ref_length)+1;
tmp= record_count*(tmp+keys_per_block-1)/keys_per_block;
}
@@ -4246,7 +4482,30 @@ best_access_path(JOIN *join,
{
/* Check if we have statistic about the distribution */
if ((records= keyinfo->rec_per_key[max_key_part-1]))
+ {
+ /*
+ Fix for the case where the index statistics is too
+ optimistic: If
+ (1) We're considering ref(const) and there is quick select
+ on the same index,
+ (2) and that quick select uses more keyparts (i.e. it will
+ scan equal/smaller interval then this ref(const))
+ (3) and E(#rows) for quick select is higher then our
+ estimate,
+ Then
+ We'll use E(#rows) from quick select.
+
+ Q: Why do we choose to use 'ref'? Won't quick select be
+ cheaper in some cases ?
+ TODO: figure this out and adjust the plan choice if needed.
+ */
+ if (!found_ref && table->quick_keys.is_set(key) && // (1)
+ table->quick_key_parts[key] > max_key_part && // (2)
+ records < (double)table->quick_rows[key]) // (3)
+ records= (double)table->quick_rows[key];
+
tmp= records;
+ }
else
{
/*
@@ -4318,10 +4577,10 @@ best_access_path(JOIN *join,
/* Limit the number of matched rows */
set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key);
- if (table->used_keys.is_set(key))
+ if (table->covering_keys.is_set(key))
{
/* we can use only index tree */
- uint keys_per_block= table->file->block_size/2/
+ uint keys_per_block= table->file->stats.block_size/2/
(keyinfo->key_length+table->file->ref_length)+1;
tmp= record_count*(tmp+keys_per_block-1)/keys_per_block;
}
@@ -4339,6 +4598,7 @@ best_access_path(JOIN *join,
best_records= records;
best_key= start_key;
best_max_key_part= max_key_part;
+ best_ref_depends_map= found_ref;
}
}
records= best_records;
@@ -4375,21 +4635,31 @@ best_access_path(JOIN *join,
if ((records >= s->found_records || best > s->read_time) && // (1)
!(s->quick && best_key && s->quick->index == best_key->key && // (2)
best_max_key_part >= s->table->quick_key_parts[best_key->key]) &&// (2)
- !((s->table->file->table_flags() & HA_TABLE_SCAN_ON_INDEX) && // (3)
- ! s->table->used_keys.is_clear_all() && best_key && !s->quick) &&// (3)
+ !((s->table->file->ha_table_flags() & HA_TABLE_SCAN_ON_INDEX) && // (3)
+ ! s->table->covering_keys.is_clear_all() && best_key && !s->quick) &&// (3)
!(s->table->force_index && best_key && !s->quick)) // (4)
{ // Check full join
ha_rows rnd_records= s->found_records;
/*
- If there is a restriction on the table, assume that 25% of the
- rows can be skipped on next part.
- This is to force tables that this table depends on before this
- table
+ If there is a filtering condition on the table (i.e. ref analyzer found
+ at least one "table.keyXpartY= exprZ", where exprZ refers only to tables
+ preceding this table in the join order we're now considering), then
+ assume that 25% of the rows will be filtered out by this condition.
+
+ This heuristic is supposed to force tables used in exprZ to be before
+ this table in join order.
*/
if (found_constraint)
rnd_records-= rnd_records/4;
/*
+ If applicable, get a more accurate estimate. Don't use the two
+ heuristics at once.
+ */
+ if (s->table->quick_condition_rows != s->found_records)
+ rnd_records= s->table->quick_condition_rows;
+
+ /*
Range optimizer never proposes a RANGE if it isn't better
than FULL: so if RANGE is present, it's always preferred to FULL.
Here we estimate its cost.
@@ -4400,6 +4670,10 @@ best_access_path(JOIN *join,
For each record we:
- read record range through 'quick'
- skip rows which does not satisfy WHERE constraints
+ TODO:
+ We take into account possible use of join cache for ALL/index
+ access (see first else-branch below), but we don't take it into
+ account here for range/index_merge access. Find out why this is so.
*/
tmp= record_count *
(s->quick->read_time +
@@ -4453,6 +4727,8 @@ best_access_path(JOIN *join,
best= tmp;
records= rows2double(rnd_records);
best_key= 0;
+ /* range/index_merge/ALL/index access method are "independent", so: */
+ best_ref_depends_map= 0;
}
}
@@ -4461,6 +4737,7 @@ best_access_path(JOIN *join,
join->positions[idx].read_time= best;
join->positions[idx].key= best_key;
join->positions[idx].table= s;
+ join->positions[idx].ref_depend_map= best_ref_depends_map;
if (!best_key &&
idx == join->const_tables &&
@@ -4472,24 +4749,26 @@ best_access_path(JOIN *join,
}
-/*
+/**
Selects and invokes a search strategy for an optimal query plan.
- SYNOPSIS
- choose_plan()
- join pointer to the structure providing all context info for
- the query
- join_tables set of the tables in the query
+ The function checks user-configurable parameters that control the search
+ strategy for an optimal plan, selects the search method and then invokes
+ it. Each specific optimization procedure stores the final optimal plan in
+ the array 'join->best_positions', and the cost of the plan in
+ 'join->best_read'.
- DESCRIPTION
- The function checks user-configurable parameters that control the search
- strategy for an optimal plan, selects the search method and then invokes
- it. Each specific optimization procedure stores the final optimal plan in
- the array 'join->best_positions', and the cost of the plan in
- 'join->best_read'.
+ @param join pointer to the structure providing all context info for
+ the query
+ @param join_tables set of the tables in the query
- RETURN VALUES
+ @todo
+ 'MAX_TABLES+2' denotes the old implementation of find_best before
+ the greedy version. Will be removed when greedy_search is approved.
+
+ @retval
FALSE ok
+ @retval
TRUE Fatal error
*/
@@ -4542,25 +4821,21 @@ choose_plan(JOIN *join, table_map join_tables)
/*
Store the cost of this query into a user variable
- Don't update last_query_cost for 'show status' command.
Don't update last_query_cost for statements that are not "flat joins" :
i.e. they have subqueries, unions or call stored procedures.
TODO: calculate a correct cost for a query with subqueries and UNIONs.
*/
- if (join->thd->lex->orig_sql_command != SQLCOM_SHOW_STATUS &&
- join->thd->lex->is_single_level_stmt())
+ if (join->thd->lex->is_single_level_stmt())
join->thd->status_var.last_query_cost= join->best_read;
DBUG_RETURN(FALSE);
}
-/*
+/**
Compare two JOIN_TAB objects based on the number of accessed records.
- SYNOPSIS
- join_tab_cmp()
- ptr1 pointer to first JOIN_TAB object
- ptr2 pointer to second JOIN_TAB object
+ @param ptr1 pointer to first JOIN_TAB object
+ @param ptr2 pointer to second JOIN_TAB object
NOTES
The order relation implemented by join_tab_cmp() is not transitive,
@@ -4573,9 +4848,11 @@ choose_plan(JOIN *join, table_map join_tables)
b: dependent = 0x0 table->map = 0x2 found_records = 3 ptr = 0x907e838
c: dependent = 0x6 table->map = 0x10 found_records = 2 ptr = 0x907ecd0
- RETURN
+ @retval
1 if first is bigger
- -1 if second is bigger
+ @retval
+ -1 if second is bigger
+ @retval
0 if equal
*/
@@ -4597,7 +4874,7 @@ join_tab_cmp(const void* ptr1, const void* ptr2)
}
-/*
+/**
Same as join_tab_cmp, but for use with SELECT_STRAIGHT_JOIN.
*/
@@ -4614,27 +4891,33 @@ join_tab_cmp_straight(const void* ptr1, const void* ptr2)
return jt1 > jt2 ? 1 : (jt1 < jt2 ? -1 : 0);
}
-/*
+/**
Heuristic procedure to automatically guess a reasonable degree of
exhaustiveness for the greedy search procedure.
- SYNOPSIS
- determine_search_depth()
- join pointer to the structure providing all context info for the query
+ The procedure estimates the optimization time and selects a search depth
+ big enough to result in a near-optimal QEP, that doesn't take too long to
+ find. If the number of tables in the query exceeds some constant, then
+ search_depth is set to this constant.
- DESCRIPTION
- The procedure estimates the optimization time and selects a search depth
- big enough to result in a near-optimal QEP, that doesn't take too long to
- find. If the number of tables in the query exceeds some constant, then
- search_depth is set to this constant.
+ @param join pointer to the structure providing all context info for
+ the query
- NOTES
+ @note
This is an extremely simplistic implementation that serves as a stub for a
more advanced analysis of the join. Ideally the search depth should be
determined by learning from previous query optimizations, because it will
depend on the CPU power (and other factors).
- RETURN
+ @todo
+ this value should be determined dynamically, based on statistics:
+ uint max_tables_for_exhaustive_opt= 7;
+
+ @todo
+ this value could be determined by some mapping of the form:
+ depth : table_count -> [max_tables_for_exhaustive_opt..MAX_EXHAUSTIVE]
+
+ @return
A positive integer that specifies the search depth (and thus the
exhaustiveness) of the depth-first search algorithm used by
'greedy_search'.
@@ -4661,16 +4944,9 @@ determine_search_depth(JOIN *join)
}
-/*
+/**
Select the best ways to access the tables in a query without reordering them.
- SYNOPSIS
- optimize_straight_join()
- join pointer to the structure providing all context info for
- the query
- join_tables set of the tables in the query
-
- DESCRIPTION
Find the best access paths for each query table and compute their costs
according to their order in the array 'join->best_ref' (thus without
reordering the join tables). The function calls sequentially
@@ -4678,15 +4954,17 @@ determine_search_depth(JOIN *join)
access method. The final optimal plan is stored in the array
'join->best_positions', and the corresponding cost in 'join->best_read'.
- NOTES
+ @param join pointer to the structure providing all context info for
+ the query
+ @param join_tables set of the tables in the query
+
+ @note
This function can be applied to:
- queries with STRAIGHT_JOIN
- internally to compute the cost of an arbitrary QEP
+ @par
Thus 'optimize_straight_join' can be used at any stage of the query
optimization process to finalize a QEP as it is.
-
- RETURN
- None
*/
static void
@@ -4713,37 +4991,30 @@ optimize_straight_join(JOIN *join, table_map join_tables)
if (join->sort_by_table &&
join->sort_by_table != join->positions[join->const_tables].table->table)
read_time+= record_count; // We have to make a temp table
- memcpy((gptr) join->best_positions, (gptr) join->positions,
+ memcpy((uchar*) join->best_positions, (uchar*) join->positions,
sizeof(POSITION)*idx);
join->best_read= read_time;
}
-/*
+/**
Find a good, possibly optimal, query execution plan (QEP) by a greedy search.
- SYNOPSIS
- join pointer to the structure providing all context info
- for the query
- remaining_tables set of tables not included into the partial plan yet
- search_depth controlls the exhaustiveness of the search
- prune_level the pruning heuristics that should be applied during
- search
-
- DESCRIPTION
The search procedure uses a hybrid greedy/exhaustive search with controlled
exhaustiveness. The search is performed in N = card(remaining_tables)
steps. Each step evaluates how promising is each of the unoptimized tables,
selects the most promising table, and extends the current partial QEP with
that table. Currenly the most 'promising' table is the one with least
- expensive extension.
+ expensive extension.\
+
There are two extreme cases:
- 1. When (card(remaining_tables) < search_depth), the estimate finds the best
- complete continuation of the partial QEP. This continuation can be
- used directly as a result of the search.
- 2. When (search_depth == 1) the 'best_extension_by_limited_search'
- consideres the extension of the current QEP with each of the remaining
- unoptimized tables.
+ -# When (card(remaining_tables) < search_depth), the estimate finds the
+ best complete continuation of the partial QEP. This continuation can be
+ used directly as a result of the search.
+ -# When (search_depth == 1) the 'best_extension_by_limited_search'
+ consideres the extension of the current QEP with each of the remaining
+ unoptimized tables.
+
All other cases are in-between these two extremes. Thus the parameter
'search_depth' controlls the exhaustiveness of the search. The higher the
value, the longer the optimizaton time and possibly the better the
@@ -4751,16 +5022,18 @@ optimize_straight_join(JOIN *join, table_map join_tables)
estimated, but the more likely to get a bad QEP.
All intermediate and final results of the procedure are stored in 'join':
- join->positions modified for every partial QEP that is explored
- join->best_positions modified for the current best complete QEP
- join->best_read modified for the current best complete QEP
- join->best_ref might be partially reordered
+ - join->positions : modified for every partial QEP that is explored
+ - join->best_positions: modified for the current best complete QEP
+ - join->best_read : modified for the current best complete QEP
+ - join->best_ref : might be partially reordered
+
The final optimal plan is stored in 'join->best_positions', and its
corresponding cost in 'join->best_read'.
- NOTES
+ @note
The following pseudocode describes the algorithm of 'greedy_search':
+ @code
procedure greedy_search
input: remaining_tables
output: pplan;
@@ -4774,6 +5047,7 @@ optimize_straight_join(JOIN *join, table_map join_tables)
return pplan;
}
+ @endcode
where 'best_extension' is a placeholder for a procedure that selects the
most "promising" of all tables in 'remaining_tables'.
Currently this estimate is performed by calling
@@ -4781,16 +5055,26 @@ optimize_straight_join(JOIN *join, table_map join_tables)
current QEP of size 'search_depth', thus the complexity of 'greedy_search'
mainly depends on that of 'best_extension_by_limited_search'.
+ @par
If 'best_extension()' == 'best_extension_by_limited_search()', then the
worst-case complexity of this algorithm is <=
O(N*N^search_depth/search_depth). When serch_depth >= N, then the
complexity of greedy_search is O(N!).
+ @par
In the future, 'greedy_search' might be extended to support other
implementations of 'best_extension', e.g. some simpler quadratic procedure.
- RETURN VALUES
+ @param join pointer to the structure providing all context info
+ for the query
+ @param remaining_tables set of tables not included into the partial plan yet
+ @param search_depth controlls the exhaustiveness of the search
+ @param prune_level the pruning heuristics that should be applied during
+ search
+
+ @retval
FALSE ok
+ @retval
TRUE Fatal error
*/
@@ -4883,29 +5167,10 @@ greedy_search(JOIN *join,
}
-/*
+/**
Find a good, possibly optimal, query execution plan (QEP) by a possibly
exhaustive search.
- SYNOPSIS
- best_extension_by_limited_search()
- join pointer to the structure providing all context info for
- the query
- remaining_tables set of tables not included into the partial plan yet
- idx length of the partial QEP in 'join->positions';
- since a depth-first search is used, also corresponds to
- the current depth of the search tree;
- also an index in the array 'join->best_ref';
- record_count estimate for the number of records returned by the best
- partial plan
- read_time the cost of the best partial plan
- search_depth maximum depth of the recursion and thus size of the found
- optimal plan (0 < search_depth <= join->tables+1).
- prune_level pruning heuristics that should be applied during
- optimization
- (values: 0 = EXHAUSTIVE, 1 = PRUNE_BY_TIME_OR_ROWS)
-
- DESCRIPTION
The procedure searches for the optimal ordering of the query tables in set
'remaining_tables' of size N, and the corresponding optimal access paths to
each table. The choice of a table order and an access path for each table
@@ -4932,16 +5197,18 @@ greedy_search(JOIN *join,
The final optimal plan is stored in 'join->best_positions'. The
corresponding cost of the optimal plan is in 'join->best_read'.
- NOTES
+ @note
The procedure uses a recursive depth-first search where the depth of the
recursion (and thus the exhaustiveness of the search) is controlled by the
parameter 'search_depth'.
+ @note
The pseudocode below describes the algorithm of
'best_extension_by_limited_search'. The worst-case complexity of this
algorithm is O(N*N^search_depth/search_depth). When serch_depth >= N, then
the complexity of greedy_search is O(N!).
+ @code
procedure best_extension_by_limited_search(
pplan in, // in, partial plan of tables-joined-so-far
pplan_cost, // in, cost of pplan
@@ -4981,18 +5248,39 @@ greedy_search(JOIN *join,
}
}
}
+ @endcode
- IMPLEMENTATION
+ @note
When 'best_extension_by_limited_search' is called for the first time,
'join->best_read' must be set to the largest possible value (e.g. DBL_MAX).
The actual implementation provides a way to optionally use pruning
heuristic (controlled by the parameter 'prune_level') to reduce the search
space by skipping some partial plans.
+
+ @note
The parameter 'search_depth' provides control over the recursion
depth, and thus the size of the resulting optimal plan.
- RETURN VALUES
+ @param join pointer to the structure providing all context info
+ for the query
+ @param remaining_tables set of tables not included into the partial plan yet
+ @param idx length of the partial QEP in 'join->positions';
+ since a depth-first search is used, also corresponds
+ to the current depth of the search tree;
+ also an index in the array 'join->best_ref';
+ @param record_count estimate for the number of records returned by the
+ best partial plan
+ @param read_time the cost of the best partial plan
+ @param search_depth maximum depth of the recursion and thus size of the
+ found optimal plan
+ (0 < search_depth <= join->tables+1).
+ @param prune_level pruning heuristics that should be applied during
+ optimization
+ (values: 0 = EXHAUSTIVE, 1 = PRUNE_BY_TIME_OR_ROWS)
+
+ @retval
FALSE ok
+ @retval
TRUE Fatal error
*/
@@ -5011,6 +5299,9 @@ best_extension_by_limited_search(JOIN *join,
if (thd->killed) // Abort
DBUG_RETURN(TRUE);
+ DBUG_EXECUTE("opt", print_plan(join, idx, read_time, record_count, idx,
+ "SOFAR:"););
+
/*
'join' is a partial plan with lower cost than the best plan so far,
so continue expanding it further with the tables in 'remaining_tables'.
@@ -5112,7 +5403,7 @@ best_extension_by_limited_search(JOIN *join,
current_read_time+= current_record_count;
if ((search_depth == 1) || (current_read_time < join->best_read))
{
- memcpy((gptr) join->best_positions, (gptr) join->positions,
+ memcpy((uchar*) join->best_positions, (uchar*) join->positions,
sizeof(POSITION) * (idx + 1));
join->best_read= current_read_time - 0.001;
}
@@ -5129,8 +5420,9 @@ best_extension_by_limited_search(JOIN *join,
}
-/*
- TODO: this function is here only temporarily until 'greedy_search' is
+/**
+ @todo
+ - TODO: this function is here only temporarily until 'greedy_search' is
tested and accepted.
RETURN VALUES
@@ -5157,7 +5449,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
read_time+=record_count; // We have to make a temp table
if (read_time < join->best_read)
{
- memcpy((gptr) join->best_positions,(gptr) join->positions,
+ memcpy((uchar*) join->best_positions,(uchar*) join->positions,
sizeof(POSITION)*idx);
join->best_read= read_time - 0.001;
}
@@ -5211,19 +5503,20 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
}
-/*
- Find how much space the prevous read not const tables takes in cache
+/**
+ Find how much space the prevous read not const tables takes in cache.
*/
static void calc_used_field_length(THD *thd, JOIN_TAB *join_tab)
{
uint null_fields,blobs,fields,rec_length;
- null_fields=blobs=fields=rec_length=0;
-
Field **f_ptr,*field;
+ MY_BITMAP *read_set= join_tab->table->read_set;;
+
+ null_fields= blobs= fields= rec_length=0;
for (f_ptr=join_tab->table->field ; (field= *f_ptr) ; f_ptr++)
{
- if (field->query_id == thd->query_id)
+ if (bitmap_is_set(read_set, field->field_index))
{
uint flags=field->flags;
fields++;
@@ -5240,7 +5533,7 @@ static void calc_used_field_length(THD *thd, JOIN_TAB *join_tab)
rec_length+=sizeof(my_bool);
if (blobs)
{
- uint blob_length=(uint) (join_tab->table->file->mean_rec_length-
+ uint blob_length=(uint) (join_tab->table->file->stats.mean_rec_length-
(join_tab->table->s->reclength- rec_length));
rec_length+=(uint) max(4,blob_length);
}
@@ -5270,26 +5563,94 @@ cache_record_length(JOIN *join,uint idx)
}
+/*
+ Get the number of different row combinations for subset of partial join
+
+ SYNOPSIS
+ prev_record_reads()
+ join The join structure
+ idx Number of tables in the partial join order (i.e. the
+ partial join order is in join->positions[0..idx-1])
+ found_ref Bitmap of tables for which we need to find # of distinct
+ row combinations.
+
+ DESCRIPTION
+ Given a partial join order (in join->positions[0..idx-1]) and a subset of
+ tables within that join order (specified in found_ref), find out how many
+ distinct row combinations of subset tables will be in the result of the
+ partial join order.
+
+ This is used as follows: Suppose we have a table accessed with a ref-based
+ method. The ref access depends on current rows of tables in found_ref.
+ We want to count # of different ref accesses. We assume two ref accesses
+ will be different if at least one of access parameters is different.
+ Example: consider a query
+
+ SELECT * FROM t1, t2, t3 WHERE t1.key=c1 AND t2.key=c2 AND t3.key=t1.field
+
+ and a join order:
+ t1, ref access on t1.key=c1
+ t2, ref access on t2.key=c2
+ t3, ref access on t3.key=t1.field
+
+ For t1: n_ref_scans = 1, n_distinct_ref_scans = 1
+ For t2: n_ref_scans = records_read(t1), n_distinct_ref_scans=1
+ For t3: n_ref_scans = records_read(t1)*records_read(t2)
+ n_distinct_ref_scans = #records_read(t1)
+
+ The reason for having this function (at least the latest version of it)
+ is that we need to account for buffering in join execution.
+
+ An edge-case example: if we have a non-first table in join accessed via
+ ref(const) or ref(param) where there is a small number of different
+ values of param, then the access will likely hit the disk cache and will
+ not require any disk seeks.
+
+ The proper solution would be to assume an LRU disk cache of some size,
+ calculate probability of cache hits, etc. For now we just count
+ identical ref accesses as one.
+
+ RETURN
+ Expected number of row combinations
+*/
+
static double
-prev_record_reads(JOIN *join,table_map found_ref)
+prev_record_reads(JOIN *join, uint idx, table_map found_ref)
{
double found=1.0;
- found_ref&= ~OUTER_REF_TABLE_BIT;
- for (POSITION *pos=join->positions ; found_ref ; pos++)
+ POSITION *pos_end= join->positions - 1;
+ for (POSITION *pos= join->positions + idx - 1; pos != pos_end; pos--)
{
if (pos->table->table->map & found_ref)
{
- found_ref&= ~pos->table->table->map;
- found*=pos->records_read;
+ found_ref|= pos->ref_depend_map;
+ /*
+ For the case of "t1 LEFT JOIN t2 ON ..." where t2 is a const table
+ with no matching row we will get position[t2].records_read==0.
+ Actually the size of output is one null-complemented row, therefore
+ we will use value of 1 whenever we get records_read==0.
+
+ Note
+ - the above case can't occur if inner part of outer join has more
+ than one table: table with no matches will not be marked as const.
+
+ - Ideally we should add 1 to records_read for every possible null-
+ complemented row. We're not doing it because: 1. it will require
+ non-trivial code and add overhead. 2. The value of records_read
+ is an inprecise estimate and adding 1 (or, in the worst case,
+ #max_nested_outer_joins=64-1) will not make it any more precise.
+ */
+ if (pos->records_read)
+ found*= pos->records_read;
}
}
return found;
}
-/*****************************************************************************
+/**
Set up join struct according to best position.
-*****************************************************************************/
+*/
static bool
get_best_combination(JOIN *join)
@@ -5399,7 +5760,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse,
j->ref.key_parts=keyparts;
j->ref.key_length=length;
j->ref.key=(int) key;
- if (!(j->ref.key_buff= (byte*) thd->calloc(ALIGN_SIZE(length)*2)) ||
+ if (!(j->ref.key_buff= (uchar*) thd->calloc(ALIGN_SIZE(length)*2)) ||
!(j->ref.key_copy= (store_key**) thd->alloc((sizeof(store_key*) *
(keyparts+1)))) ||
!(j->ref.items= (Item**) thd->alloc(sizeof(Item*)*keyparts)) ||
@@ -5415,7 +5776,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse,
keyuse=org_keyuse;
store_key **ref_key= j->ref.key_copy;
- byte *key_buff=j->ref.key_buff, *null_ref_key= 0;
+ uchar *key_buff=j->ref.key_buff, *null_ref_key= 0;
bool keyuse_uses_no_tables= TRUE;
if (ftkey)
{
@@ -5446,8 +5807,8 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse,
!(join->select_options & SELECT_DESCRIBE))
{ // Compare against constant
store_key_item tmp(thd, keyinfo->key_part[i].field,
- (char*)key_buff + maybe_null,
- maybe_null ? (char*) key_buff : 0,
+ key_buff + maybe_null,
+ maybe_null ? key_buff : 0,
keyinfo->key_part[i].length, keyuse->val);
if (thd->is_fatal_error)
DBUG_RETURN(TRUE);
@@ -5457,7 +5818,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse,
*ref_key++= get_store_key(thd,
keyuse,join->const_table_map,
&keyinfo->key_part[i],
- (char*) key_buff,maybe_null);
+ key_buff, maybe_null);
/*
Remember if we are going to use REF_OR_NULL
But only if field _really_ can be null i.e. we force JT_REF
@@ -5501,7 +5862,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse,
static store_key *
get_store_key(THD *thd, KEYUSE *keyuse, table_map used_tables,
- KEY_PART_INFO *key_part, char *key_buff, uint maybe_null)
+ KEY_PART_INFO *key_part, uchar *key_buff, uint maybe_null)
{
if (!((~used_tables) & keyuse->used_tables)) // if const item
{
@@ -5533,17 +5894,23 @@ get_store_key(THD *thd, KEYUSE *keyuse, table_map used_tables,
keyuse->val);
}
-/*
- This function is only called for const items on fields which are keys
- returns 1 if there was some conversion made when the field was stored.
+/**
+ This function is only called for const items on fields which are keys.
+
+ @return
+ returns 1 if there was some conversion made when the field was stored.
*/
bool
store_val_in_field(Field *field, Item *item, enum_check_fields check_flag)
{
bool error;
- THD *thd= field->table->in_use;
+ TABLE *table= field->table;
+ THD *thd= table->in_use;
ha_rows cuted_fields=thd->cuted_fields;
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table,
+ table->write_set);
+
/*
we should restore old value of count_cuted_fields because
store_val_in_field can be called from mysql_insert
@@ -5553,6 +5920,7 @@ store_val_in_field(Field *field, Item *item, enum_check_fields check_flag)
thd->count_cuted_fields= check_flag;
error= item->save_in_field(field, 1);
thd->count_cuted_fields= old_count_cuted_fields;
+ dbug_tmp_restore_column_map(table->write_set, old_map);
return error || cuted_fields != thd->cuted_fields;
}
@@ -5568,7 +5936,7 @@ store_val_in_field(Field *field, Item *item, enum_check_fields check_flag)
@retval TRUE error occurred
*/
bool
-JOIN::make_simple_join(JOIN *parent, TABLE *tmp_table)
+JOIN::make_simple_join(JOIN *parent, TABLE *temp_table)
{
DBUG_ENTER("JOIN::make_simple_join");
@@ -5581,12 +5949,18 @@ JOIN::make_simple_join(JOIN *parent, TABLE *tmp_table)
DBUG_RETURN(TRUE); /* purecov: inspected */
join_tab= parent->join_tab_reexec;
- table= &parent->table_reexec[0]; parent->table_reexec[0]= tmp_table;
+ table= &parent->table_reexec[0]; parent->table_reexec[0]= temp_table;
tables= 1;
const_tables= 0;
const_table_map= 0;
tmp_table_param.field_count= tmp_table_param.sum_func_count=
tmp_table_param.func_count= 0;
+ /*
+ We need to destruct the copy_field (allocated in create_tmp_table())
+ before setting it to 0 if the join is not "reusable".
+ */
+ if (!tmp_join || tmp_join != this)
+ tmp_table_param.cleanup();
tmp_table_param.copy_field= tmp_table_param.copy_field_end=0;
first_record= sort_and_group=0;
send_records= (ha_rows) 0;
@@ -5595,7 +5969,7 @@ JOIN::make_simple_join(JOIN *parent, TABLE *tmp_table)
do_send_rows= row_limit ? 1 : 0;
join_tab->cache.buff=0; /* No caching */
- join_tab->table=tmp_table;
+ join_tab->table=temp_table;
join_tab->select=0;
join_tab->select_cond=0;
join_tab->quick=0;
@@ -5612,8 +5986,8 @@ JOIN::make_simple_join(JOIN *parent, TABLE *tmp_table)
join_tab->join= this;
join_tab->ref.key_parts= 0;
bzero((char*) &join_tab->read_record,sizeof(join_tab->read_record));
- tmp_table->status=0;
- tmp_table->null_row=0;
+ temp_table->status=0;
+ temp_table->null_row=0;
DBUG_RETURN(FALSE);
}
@@ -5627,6 +6001,7 @@ inline void add_cond_and_fix(Item **e1, Item *e2)
{
*e1= res;
res->quick_fix_field();
+ res->update_used_tables();
}
}
else
@@ -5634,22 +6009,18 @@ inline void add_cond_and_fix(Item **e1, Item *e2)
}
-/*
- Add to join_tab->select_cond[i] "table.field IS NOT NULL" conditions we've
- inferred from ref/eq_ref access performed.
-
- SYNOPSIS
- add_not_null_conds()
- join Join to process
+/**
+ Add to join_tab->select_cond[i] "table.field IS NOT NULL" conditions
+ we've inferred from ref/eq_ref access performed.
- NOTES
This function is a part of "Early NULL-values filtering for ref access"
optimization.
Example of this optimization:
- For query SELECT * FROM t1,t2 WHERE t2.key=t1.field
- and plan " any-access(t1), ref(t2.key=t1.field) "
- add "t1.field IS NOT NULL" to t1's table condition.
+ For query SELECT * FROM t1,t2 WHERE t2.key=t1.field @n
+ and plan " any-access(t1), ref(t2.key=t1.field) " @n
+ add "t1.field IS NOT NULL" to t1's table condition. @n
+
Description of the optimization:
We look through equalities choosen to perform ref/eq_ref access,
@@ -5661,8 +6032,10 @@ inline void add_cond_and_fix(Item **e1, Item *e2)
Exception from that is the case when referred_tab->join != join.
I.e. don't add NOT NULL constraints from any embedded subquery.
Consider this query:
+ @code
SELECT A.f2 FROM t1 LEFT JOIN t2 A ON A.f2 = f1
WHERE A.f3=(SELECT MIN(f3) FROM t2 C WHERE A.f4 = C.f4) OR A.f3 IS NULL;
+ @endocde
Here condition A.f3 IS NOT NULL is going to be added to the WHERE
condition of the embedding query.
Another example:
@@ -5724,7 +6097,8 @@ static void add_not_null_conds(JOIN *join)
if (notnull->fix_fields(join->thd, &notnull))
DBUG_VOID_RETURN;
DBUG_EXECUTE("where",print_where(notnull,
- referred_tab->table->alias););
+ referred_tab->table->alias,
+ QT_ORDINARY););
add_cond_and_fix(&referred_tab->select_cond, notnull);
}
}
@@ -5733,36 +6107,31 @@ static void add_not_null_conds(JOIN *join)
DBUG_VOID_RETURN;
}
-/*
- Build a predicate guarded by match variables for embedding outer joins
-
- SYNOPSIS
- add_found_match_trig_cond()
- tab the first inner table for most nested outer join
- cond the predicate to be guarded
- root_tab the first inner table to stop
-
- DESCRIPTION
- The function recursively adds guards for predicate cond
- assending from tab to the first inner table next embedding
- nested outer join and so on until it reaches root_tab
- (root_tab can be 0).
-
- RETURN VALUE
- pointer to the guarded predicate, if success
- 0, otherwise
-*/
+/**
+ Build a predicate guarded by match variables for embedding outer joins.
+ The function recursively adds guards for predicate cond
+ assending from tab to the first inner table next embedding
+ nested outer join and so on until it reaches root_tab
+ (root_tab can be 0).
+
+ @param tab the first inner table for most nested outer join
+ @param cond the predicate to be guarded (must be set)
+ @param root_tab the first inner table to stop
+
+ @return
+ - pointer to the guarded predicate, if success
+ - 0, otherwise
+*/
static COND*
add_found_match_trig_cond(JOIN_TAB *tab, COND *cond, JOIN_TAB *root_tab)
{
COND *tmp;
- if (tab == root_tab || !cond)
+ DBUG_ASSERT(cond != 0);
+ if (tab == root_tab)
return cond;
if ((tmp= add_found_match_trig_cond(tab->first_upper, cond, root_tab)))
- {
tmp= new Item_func_trig_cond(tmp, &tab->found);
- }
if (tmp)
{
tmp->quick_fix_field();
@@ -5772,14 +6141,9 @@ add_found_match_trig_cond(JOIN_TAB *tab, COND *cond, JOIN_TAB *root_tab)
}
-/*
- Fill in outer join related info for the execution plan structure
-
- SYNOPSIS
- make_outerjoin_info()
- join - reference to the info fully describing the query
+/**
+ Fill in outer join related info for the execution plan structure.
- DESCRIPTION
For each outer join operation left after simplification of the
original query the function set up the following pointers in the linear
structure join->join_tab representing the selected execution plan.
@@ -5794,21 +6158,25 @@ add_found_match_trig_cond(JOIN_TAB *tab, COND *cond, JOIN_TAB *root_tab)
corresponding first inner table through the field t0->on_expr_ref.
Here ti are structures of the JOIN_TAB type.
- EXAMPLE
- For the query:
- SELECT * FROM t1
- LEFT JOIN
- (t2, t3 LEFT JOIN t4 ON t3.a=t4.a)
- ON (t1.a=t2.a AND t1.b=t3.b)
- WHERE t1.c > 5,
+ EXAMPLE. For the query:
+ @code
+ SELECT * FROM t1
+ LEFT JOIN
+ (t2, t3 LEFT JOIN t4 ON t3.a=t4.a)
+ ON (t1.a=t2.a AND t1.b=t3.b)
+ WHERE t1.c > 5,
+ @endcode
+
given the execution plan with the table order t1,t2,t3,t4
is selected, the following references will be set;
t4->last_inner=[t4], t4->first_inner=[t4], t4->first_upper=[t2]
t2->last_inner=[t4], t2->first_inner=t3->first_inner=[t2],
on expression (t1.a=t2.a AND t1.b=t3.b) will be attached to
*t2->on_expr_ref, while t3.a=t4.a will be attached to *t4->on_expr_ref.
-
- NOTES
+
+ @param join reference to the info fully describing the query
+
+ @note
The function assumes that the simplification procedure has been
already applied to the join query (see simplify_joins).
This function can be called only after the execution plan
@@ -5888,7 +6256,30 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
make_cond_for_table(cond,
join->const_table_map,
(table_map) 0);
- DBUG_EXECUTE("where",print_where(const_cond,"constants"););
+ DBUG_EXECUTE("where",print_where(const_cond,"constants", QT_ORDINARY););
+ for (JOIN_TAB *tab= join->join_tab+join->const_tables;
+ tab < join->join_tab+join->tables ; tab++)
+ {
+ if (*tab->on_expr_ref)
+ {
+ JOIN_TAB *cond_tab= tab->first_inner;
+ COND *tmp= make_cond_for_table(*tab->on_expr_ref,
+ join->const_table_map,
+ ( table_map) 0);
+ if (!tmp)
+ continue;
+ tmp= new Item_func_trig_cond(tmp, &cond_tab->not_null_compl);
+ if (!tmp)
+ DBUG_RETURN(1);
+ tmp->quick_fix_field();
+ cond_tab->select_cond= !cond_tab->select_cond ? tmp :
+ new Item_cond_and(cond_tab->select_cond,
+ tmp);
+ if (!cond_tab->select_cond)
+ DBUG_RETURN(1);
+ cond_tab->select_cond->quick_fix_field();
+ }
+ }
if (const_cond && !const_cond->val_int())
{
DBUG_PRINT("info",("Found impossible WHERE condition"));
@@ -5901,6 +6292,10 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
for (uint i=join->const_tables ; i < join->tables ; i++)
{
JOIN_TAB *tab=join->join_tab+i;
+ /*
+ first_inner is the X in queries like:
+ SELECT * FROM t1 LEFT OUTER JOIN (t2 JOIN t3) ON X
+ */
JOIN_TAB *first_inner_tab= tab->first_inner;
table_map current_map= tab->table->map;
bool use_quick_range=0;
@@ -5925,6 +6320,12 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
tab->ref.key= -1;
tab->ref.key_parts=0; // Don't use ref key.
join->best_positions[i].records_read= rows2double(tab->quick->records);
+ /*
+ We will use join cache here : prevent sorting of the first
+ table only and sort at the end.
+ */
+ if (i != join->const_tables && join->tables > join->const_tables + 1)
+ join->full_join= 1;
}
tmp= NULL;
@@ -5951,15 +6352,15 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
*/
DBUG_PRINT("info", ("Item_int"));
tmp= new Item_int((longlong) 1,1); // Always true
- DBUG_PRINT("info", ("Item_int 0x%lx", (ulong)tmp));
}
}
- if (tmp || !cond)
+ if (tmp || !cond || tab->type == JT_REF)
{
- DBUG_EXECUTE("where",print_where(tmp,tab->table->alias););
- SQL_SELECT *sel=tab->select=(SQL_SELECT*)
- thd->memdup((gptr) select, sizeof(SQL_SELECT));
+ DBUG_EXECUTE("where",print_where(tmp,tab->table->alias, QT_ORDINARY););
+ SQL_SELECT *sel= tab->select= ((SQL_SELECT*)
+ thd->memdup((uchar*) select,
+ sizeof(*select)));
if (!sel)
DBUG_RETURN(1); // End of memory
/*
@@ -5968,7 +6369,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
The guard will turn the predicate on only after
the first match for outer tables is encountered.
*/
- if (cond)
+ if (cond && tmp)
{
/*
Because of QUICK_GROUP_MIN_MAX_SELECT there may be a select without
@@ -5996,7 +6397,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
tab->select_cond= sel->cond= NULL;
sel->head=tab->table;
- DBUG_EXECUTE("where",print_where(tmp,tab->table->alias););
+ DBUG_EXECUTE("where",print_where(tmp,tab->table->alias, QT_ORDINARY););
if (tab->quick)
{
/* Use quick key read if it's a constant and it's not used
@@ -6035,7 +6436,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
*/
if ((cond &&
- (!tab->keys.is_subset(tab->const_keys) && i > 0)) ||
+ !tab->keys.is_subset(tab->const_keys) && i > 0) ||
(!tab->const_keys.is_clear_all() && i == join->const_tables &&
join->unit->select_limit_cnt <
join->best_positions[i].records_read &&
@@ -6108,9 +6509,9 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
current_map,
current_map)))
{
- DBUG_EXECUTE("where",print_where(tmp,"cache"););
+ DBUG_EXECUTE("where",print_where(tmp,"cache", QT_ORDINARY););
tab->cache.select=(SQL_SELECT*)
- thd->memdup((gptr) sel, sizeof(SQL_SELECT));
+ thd->memdup((uchar*) sel, sizeof(SQL_SELECT));
tab->cache.select->cond=tmp;
tab->cache.select->read_tables=join->const_table_map;
}
@@ -6279,9 +6680,9 @@ static void
make_join_readinfo(JOIN *join, ulonglong options)
{
uint i;
-
bool statistics= test(!(join->select_options & SELECT_DESCRIBE));
bool ordered_set= 0;
+ bool sorted= 1;
DBUG_ENTER("make_join_readinfo");
for (i=join->const_tables ; i < join->tables ; i++)
@@ -6307,6 +6708,8 @@ make_join_readinfo(JOIN *join, ulonglong options)
(join->sort_by_table == (TABLE *) 1 && i != join->const_tables)))
ordered_set= 1;
+ tab->sorted= sorted;
+ sorted= 0; // only first must be sorted
table->status=STATUS_NO_RECORD;
pick_table_access_method (tab);
@@ -6325,12 +6728,9 @@ make_join_readinfo(JOIN *join, ulonglong options)
tab->quick=0;
/* fall through */
case JT_CONST: // Only happens with left join
- if (table->used_keys.is_set(tab->ref.key) &&
+ if (table->covering_keys.is_set(tab->ref.key) &&
!table->no_keyread)
- {
- table->key_read=1;
- table->file->extra(HA_EXTRA_KEYREAD);
- }
+ table->set_keyread(TRUE);
break;
case JT_ALL:
/*
@@ -6353,8 +6753,7 @@ make_join_readinfo(JOIN *join, ulonglong options)
join->thd->server_status|=SERVER_QUERY_NO_GOOD_INDEX_USED;
tab->read_first_record= join_init_quick_read_record;
if (statistics)
- statistic_increment(join->thd->status_var.select_range_check_count,
- &LOCK_status);
+ status_var_increment(join->thd->status_var.select_range_check_count);
}
else
{
@@ -6364,15 +6763,13 @@ make_join_readinfo(JOIN *join, ulonglong options)
if (tab->select && tab->select->quick)
{
if (statistics)
- statistic_increment(join->thd->status_var.select_range_count,
- &LOCK_status);
+ status_var_increment(join->thd->status_var.select_range_count);
}
else
{
join->thd->server_status|=SERVER_QUERY_NO_INDEX_USED;
if (statistics)
- statistic_increment(join->thd->status_var.select_scan_count,
- &LOCK_status);
+ status_var_increment(join->thd->status_var.select_scan_count);
}
}
else
@@ -6380,30 +6777,36 @@ make_join_readinfo(JOIN *join, ulonglong options)
if (tab->select && tab->select->quick)
{
if (statistics)
- statistic_increment(join->thd->status_var.select_full_range_join_count,
- &LOCK_status);
+ status_var_increment(join->thd->status_var.select_full_range_join_count);
}
else
{
join->thd->server_status|=SERVER_QUERY_NO_INDEX_USED;
if (statistics)
- statistic_increment(join->thd->status_var.select_full_join_count,
- &LOCK_status);
+ status_var_increment(join->thd->status_var.select_full_join_count);
}
}
if (!table->no_keyread)
{
if (tab->select && tab->select->quick &&
tab->select->quick->index != MAX_KEY && //not index_merge
- table->used_keys.is_set(tab->select->quick->index))
- {
- table->key_read=1;
- table->file->extra(HA_EXTRA_KEYREAD);
- }
- else if (!table->used_keys.is_clear_all() &&
+ table->covering_keys.is_set(tab->select->quick->index))
+ table->set_keyread(TRUE);
+ else if (!table->covering_keys.is_clear_all() &&
!(tab->select && tab->select->quick))
{ // Only read index tree
- tab->index=find_shortest_key(table, & table->used_keys);
+ /*
+ It has turned out that the below change, while speeding things
+ up for disk-bound loads, slows them down for cases when the data
+ is in disk cache (see BUG#35850):
+ // See bug #26447: "Using the clustered index for a table scan
+ // is always faster than using a secondary index".
+ if (table->s->primary_key != MAX_KEY &&
+ table->file->primary_key_is_clustered())
+ tab->index= table->s->primary_key;
+ else
+ */
+ tab->index=find_shortest_key(table, & table->covering_keys);
tab->read_first_record= join_read_first;
tab->type=JT_NEXT; // Read with index_first / index_next
}
@@ -6426,20 +6829,18 @@ make_join_readinfo(JOIN *join, ulonglong options)
}
-/*
- Give error if we some tables are done with a full join
+/**
+ Give error if we some tables are done with a full join.
- SYNOPSIS
- error_if_full_join()
- join Join condition
+ This is used by multi_table_update and multi_table_delete when running
+ in safe mode.
- USAGE
- This is used by multi_table_update and multi_table_delete when running
- in safe mode
+ @param join Join condition
- RETURN VALUES
- 0 ok
- 1 Error (full join used)
+ @retval
+ 0 ok
+ @retval
+ 1 Error (full join used)
*/
bool error_if_full_join(JOIN *join)
@@ -6450,6 +6851,8 @@ bool error_if_full_join(JOIN *join)
{
if (tab->type == JT_ALL && (!tab->select || !tab->select->quick))
{
+ /* This error should not be ignored. */
+ join->select_lex->no_error= FALSE;
my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,
ER(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE), MYF(0));
return(1);
@@ -6459,11 +6862,8 @@ bool error_if_full_join(JOIN *join)
}
-/*
- cleanup JOIN_TAB
-
- SYNOPSIS
- JOIN_TAB::cleanup()
+/**
+ cleanup JOIN_TAB.
*/
void JOIN_TAB::cleanup()
@@ -6474,13 +6874,10 @@ void JOIN_TAB::cleanup()
quick= 0;
x_free(cache.buff);
cache.buff= 0;
+ limit= 0;
if (table)
{
- if (table->key_read)
- {
- table->key_read= 0;
- table->file->extra(HA_EXTRA_NO_KEYREAD);
- }
+ table->set_keyread(FALSE);
table->file->ha_index_or_rnd_end();
/*
We need to reset this for next select
@@ -6492,11 +6889,10 @@ void JOIN_TAB::cleanup()
}
-/*
+/**
Partially cleanup JOIN after it has executed: close index or rnd read
(table cursors), free quick selects.
- DESCRIPTION
This function is called in the end of execution of a JOIN, before the used
tables are unlocked and closed.
@@ -6516,23 +6912,24 @@ void JOIN_TAB::cleanup()
If a JOIN is executed for a subquery or if it has a subquery, we can't
do the full cleanup and need to do a partial cleanup only.
- o If a JOIN is not the top level join, we must not unlock the tables
- because the outer select may not have been evaluated yet, and we
- can't unlock only selected tables of a query.
-
- o Additionally, if this JOIN corresponds to a correlated subquery, we
- should not free quick selects and join buffers because they will be
- needed for the next execution of the correlated subquery.
-
- o However, if this is a JOIN for a [sub]select, which is not
- a correlated subquery itself, but has subqueries, we can free it
- fully and also free JOINs of all its subqueries. The exception
- is a subquery in SELECT list, e.g:
- SELECT a, (select max(b) from t1) group by c
- This subquery will not be evaluated at first sweep and its value will
- not be inserted into the temporary table. Instead, it's evaluated
- when selecting from the temporary table. Therefore, it can't be freed
- here even though it's not correlated.
+ - If a JOIN is not the top level join, we must not unlock the tables
+ because the outer select may not have been evaluated yet, and we
+ can't unlock only selected tables of a query.
+ - Additionally, if this JOIN corresponds to a correlated subquery, we
+ should not free quick selects and join buffers because they will be
+ needed for the next execution of the correlated subquery.
+ - However, if this is a JOIN for a [sub]select, which is not
+ a correlated subquery itself, but has subqueries, we can free it
+ fully and also free JOINs of all its subqueries. The exception
+ is a subquery in SELECT list, e.g: @n
+ SELECT a, (select max(b) from t1) group by c @n
+ This subquery will not be evaluated at first sweep and its value will
+ not be inserted into the temporary table. Instead, it's evaluated
+ when selecting from the temporary table. Therefore, it can't be freed
+ here even though it's not correlated.
+
+ @todo
+ Unlock tables even if the join isn't top level select in the tree
*/
void JOIN::join_free()
@@ -6592,15 +6989,15 @@ void JOIN::join_free()
}
-/*
- Free resources of given join
+/**
+ Free resources of given join.
- SYNOPSIS
- JOIN::cleanup()
- fill - true if we should free all resources, call with full==1 should be
- last, before it this function can be called with full==0
+ @param fill true if we should free all resources, call with full==1
+ should be last, before it this function can be called with
+ full==0
- NOTE: with subquery this function definitely will be called several times,
+ @note
+ With subquery this function definitely will be called several times,
but even for simple query it can be called several times.
*/
@@ -6632,7 +7029,7 @@ void JOIN::cleanup(bool full)
for (tab= join_tab, end= tab+tables; tab != end; tab++)
{
if (tab->table)
- tab->table->file->ha_index_or_rnd_end();
+ tab->table->file->ha_index_or_rnd_end();
}
}
}
@@ -6676,21 +7073,25 @@ void JOIN::cleanup(bool full)
}
-/*****************************************************************************
+/**
Remove the following expressions from ORDER BY and GROUP BY:
- Constant expressions
+ Constant expressions @n
Expression that only uses tables that are of type EQ_REF and the reference
is in the ORDER list or if all refereed tables are of the above type.
In the following, the X field can be removed:
+ @code
SELECT * FROM t1,t2 WHERE t1.a=t2.a ORDER BY t1.a,t2.X
SELECT * FROM t1,t2,t3 WHERE t1.a=t2.a AND t2.b=t3.b ORDER BY t1.a,t3.X
+ @endcode
These can't be optimized:
+ @code
SELECT * FROM t1,t2 WHERE t1.a=t2.a ORDER BY t2.X,t1.a
SELECT * FROM t1,t2 WHERE t1.a=t2.a AND t1.b=t2.b ORDER BY t1.a,t2.c
SELECT * FROM t1,t2 WHERE t1.a=t2.a ORDER BY t2.b,t1.a
-*****************************************************************************/
+ @endcode
+*/
static bool
eq_ref_table(JOIN *join, ORDER *start_order, JOIN_TAB *tab)
@@ -6761,7 +7162,7 @@ only_eq_ref_tables(JOIN *join,ORDER *order,table_map tables)
}
-/* Update the dependency map for the tables */
+/** Update the dependency map for the tables. */
static void update_depend_map(JOIN *join)
{
@@ -6788,7 +7189,7 @@ static void update_depend_map(JOIN *join)
}
-/* Update the dependency map for the sort order */
+/** Update the dependency map for the sort order. */
static void update_depend_map(JOIN *join, ORDER *order)
{
@@ -6814,25 +7215,23 @@ static void update_depend_map(JOIN *join, ORDER *order)
}
-/*
- Remove all constants and check if ORDER only contains simple expressions
-
- SYNOPSIS
- remove_const()
- join Join handler
- first_order List of SORT or GROUP order
- cond WHERE statement
- change_list Set to 1 if we should remove things from list
- If this is not set, then only simple_order is
- calculated
- simple_order Set to 1 if we are only using simple expressions
+/**
+ Remove all constants and check if ORDER only contains simple
+ expressions.
- RETURN
- Returns new sort order
+ simple_order is set to 1 if sort_order only uses fields from head table
+ and the head table is not a LEFT JOIN table.
- simple_order is set to 1 if sort_order only uses fields from head table
- and the head table is not a LEFT JOIN table
+ @param join Join handler
+ @param first_order List of SORT or GROUP order
+ @param cond WHERE statement
+ @param change_list Set to 1 if we should remove things from list.
+ If this is not set, then only simple_order is
+ calculated.
+ @param simple_order Set to 1 if we are only using simple expressions
+ @return
+ Returns new sort order
*/
static ORDER *
@@ -6857,11 +7256,24 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond,
for (order=first_order; order ; order=order->next)
{
table_map order_tables=order->item[0]->used_tables();
- if (order->item[0]->with_sum_func)
+ if (order->item[0]->with_sum_func ||
+ /*
+ If the outer table of an outer join is const (either by itself or
+ after applying WHERE condition), grouping on a field from such a
+ table will be optimized away and filesort without temporary table
+ will be used unless we prevent that now. Filesort is not fit to
+ handle joins and the join condition is not applied. We can't detect
+ the case without an expensive test, however, so we force temporary
+ table for all queries containing more than one table, ROLLUP, and an
+ outer join.
+ */
+ (join->tables > 1 && join->rollup.state == ROLLUP::STATE_INITED &&
+ join->outer_join))
*simple_order=0; // Must do a temp table to sort
else if (!(order_tables & not_const_tables))
{
- if (order->item[0]->with_subselect)
+ if (order->item[0]->with_subselect &&
+ !(join->select_lex->options & SELECT_DESCRIBE))
order->item[0]->val_str(&order->item[0]->str_value);
DBUG_PRINT("info",("removing: %s", order->item[0]->full_name()));
continue; // skip const item
@@ -6928,15 +7340,17 @@ return_zero_rows(JOIN *join, select_result *result,TABLE_LIST *tables,
if (!(result->send_fields(fields,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)))
{
+ bool send_error= FALSE;
if (send_row)
{
List_iterator_fast<Item> it(fields);
Item *item;
while ((item= it++))
item->no_rows_in_result();
- result->send_data(fields);
+ send_error= result->send_data(fields);
}
- result->send_eof(); // Should be safe
+ if (!send_error)
+ result->send_eof(); // Should be safe
}
/* Update results for FOUND_ROWS */
join->thd->limit_found_rows= join->thd->examined_row_count= 0;
@@ -6988,25 +7402,23 @@ template class List_iterator<Item_func_match>;
#endif
-/*
- Find the multiple equality predicate containing a field
-
- SYNOPSIS
- find_item_equal()
- cond_equal multiple equalities to search in
- field field to look for
- inherited_fl :out set up to TRUE if multiple equality is found
- on upper levels (not on current level of cond_equal)
-
- DESCRIPTION
- The function retrieves the multiple equalities accessed through
- the con_equal structure from current level and up looking for
- an equality containing field. It stops retrieval as soon as the equality
- is found and set up inherited_fl to TRUE if it's found on upper levels.
-
- RETURN
- Item_equal for the found multiple equality predicate if a success;
- NULL - otherwise.
+/**
+ Find the multiple equality predicate containing a field.
+
+ The function retrieves the multiple equalities accessed through
+ the con_equal structure from current level and up looking for
+ an equality containing field. It stops retrieval as soon as the equality
+ is found and set up inherited_fl to TRUE if it's found on upper levels.
+
+ @param cond_equal multiple equalities to search in
+ @param field field to look for
+ @param[out] inherited_fl set up to TRUE if multiple equality is found
+ on upper levels (not on current level of
+ cond_equal)
+
+ @return
+ - Item_equal for the found multiple equality predicate if a success;
+ - NULL otherwise.
*/
Item_equal *find_item_equal(COND_EQUAL *cond_equal, Field *field,
@@ -7032,18 +7444,9 @@ finish:
}
-/*
- Check whether an equality can be used to build multiple equalities
-
- SYNOPSIS
- check_simple_equality()
- left_item left term of the quality to be checked
- right_item right term of the equality to be checked
- item equality item if the equality originates from a condition
- predicate, 0 if the equality is the result of row elimination
- cond_equal multiple equalities that must hold together with the equality
+/**
+ Check whether an equality can be used to build multiple equalities.
- DESCRIPTION
This function first checks whether the equality (left_item=right_item)
is a simple equality i.e. the one that equates a field with another field
or a constant (field=field_item or field=const_item).
@@ -7058,22 +7461,24 @@ finish:
This guarantees that the set of multiple equalities covering equality
predicates will be minimal.
- EXAMPLE
+ EXAMPLE:
For the where condition
- WHERE a=b AND b=c AND
- (b=2 OR f=e)
+ @code
+ WHERE a=b AND b=c AND
+ (b=2 OR f=e)
+ @endcode
the check_equality will be called for the following equality
predicates a=b, b=c, b=2 and f=e.
- For a=b it will be called with *cond_equal=(0,[]) and will transform
- *cond_equal into (0,[Item_equal(a,b)]).
- For b=c it will be called with *cond_equal=(0,[Item_equal(a,b)])
- and will transform *cond_equal into CE=(0,[Item_equal(a,b,c)]).
- For b=2 it will be called with *cond_equal=(ptr(CE),[])
- and will transform *cond_equal into (ptr(CE),[Item_equal(2,a,b,c)]).
- For f=e it will be called with *cond_equal=(ptr(CE), [])
- and will transform *cond_equal into (ptr(CE),[Item_equal(f,e)]).
-
- NOTES
+ - For a=b it will be called with *cond_equal=(0,[]) and will transform
+ *cond_equal into (0,[Item_equal(a,b)]).
+ - For b=c it will be called with *cond_equal=(0,[Item_equal(a,b)])
+ and will transform *cond_equal into CE=(0,[Item_equal(a,b,c)]).
+ - For b=2 it will be called with *cond_equal=(ptr(CE),[])
+ and will transform *cond_equal into (ptr(CE),[Item_equal(2,a,b,c)]).
+ - For f=e it will be called with *cond_equal=(ptr(CE), [])
+ and will transform *cond_equal into (ptr(CE),[Item_equal(f,e)]).
+
+ @note
Now only fields that have the same type definitions (verified by
the Field::eq_def method) are placed to the same multiple equalities.
Because of this some equality predicates are not eliminated and
@@ -7085,8 +7490,8 @@ finish:
equality. But at the same time it would allow us to get rid
of constant propagation completely: it would be done by the call
to build_equal_items_for_cond.
-
- IMPLEMENTATION
+
+
The implementation does not follow exactly the above rules to
build a new multiple equality for the equality predicate.
If it processes the equality of the form field1=field2, it
@@ -7106,9 +7511,18 @@ finish:
acceptable, as this happens rarely. The implementation without
copying would be much more complicated.
- RETURN
+ @param left_item left term of the quality to be checked
+ @param right_item right term of the equality to be checked
+ @param item equality item if the equality originates from a condition
+ predicate, 0 if the equality is the result of row
+ elimination
+ @param cond_equal multiple equalities that must hold together with the
+ equality
+
+ @retval
TRUE if the predicate is a simple equality predicate to be used
- for building multiple equalities
+ for building multiple equalities
+ @retval
FALSE otherwise
*/
@@ -7277,30 +7691,30 @@ static bool check_simple_equality(Item *left_item, Item *right_item,
}
-/*
- Convert row equalities into a conjunction of regular equalities
-
- SYNOPSIS
- check_row_equality()
- thd thread handle
- left_row left term of the row equality to be processed
- right_row right term of the row equality to be processed
- cond_equal multiple equalities that must hold together with the predicate
- eq_list results of conversions of row equalities that are not simple
- enough to form multiple equalities
+/**
+ Convert row equalities into a conjunction of regular equalities.
- DESCRIPTION
The function converts a row equality of the form (E1,...,En)=(E'1,...,E'n)
into a list of equalities E1=E'1,...,En=E'n. For each of these equalities
- Ei=E'i the function checks whether it is a simple equality or a row equality.
- If it is a simple equality it is used to expand multiple equalities of
- cond_equal. If it is a row equality it converted to a sequence of equalities
- between row elements. If Ei=E'i is neither a simple equality nor a row
- equality the item for this predicate is added to eq_list.
-
- RETURN
- TRUE if conversion has succeeded (no fatal error)
- FALSE otherwise
+ Ei=E'i the function checks whether it is a simple equality or a row
+ equality. If it is a simple equality it is used to expand multiple
+ equalities of cond_equal. If it is a row equality it converted to a
+ sequence of equalities between row elements. If Ei=E'i is neither a
+ simple equality nor a row equality the item for this predicate is added
+ to eq_list.
+
+ @param thd thread handle
+ @param left_row left term of the row equality to be processed
+ @param right_row right term of the row equality to be processed
+ @param cond_equal multiple equalities that must hold together with the
+ predicate
+ @param eq_list results of conversions of row equalities that are not
+ simple enough to form multiple equalities
+
+ @retval
+ TRUE if conversion has succeeded (no fatal error)
+ @retval
+ FALSE otherwise
*/
static bool check_row_equality(THD *thd, Item *left_row, Item_row *right_row,
@@ -7342,18 +7756,9 @@ static bool check_row_equality(THD *thd, Item *left_row, Item_row *right_row,
}
-/*
- Eliminate row equalities and form multiple equalities predicates
-
- SYNOPSIS
- check_equality()
- thd thread handle
- item predicate to process
- cond_equal multiple equalities that must hold together with the predicate
- eq_list results of conversions of row equalities that are not simple
- enough to form multiple equalities
+/**
+ Eliminate row equalities and form multiple equalities predicates.
- DESCRIPTION
This function checks whether the item is a simple equality
i.e. the one that equates a field with another field or a constant
(field=field_item or field=constant_item), or, a row equality.
@@ -7361,11 +7766,20 @@ static bool check_row_equality(THD *thd, Item *left_row, Item_row *right_row,
in the lists referenced directly or indirectly by cond_equal inferring
the given simple equality. If it doesn't find any, it builds/expands
multiple equality that covers the predicate.
- Row equalities are eliminated substituted for conjunctive regular equalities
- which are treated in the same way as original equality predicates.
-
- RETURN
+ Row equalities are eliminated substituted for conjunctive regular
+ equalities which are treated in the same way as original equality
+ predicates.
+
+ @param thd thread handle
+ @param item predicate to process
+ @param cond_equal multiple equalities that must hold together with the
+ predicate
+ @param eq_list results of conversions of row equalities that are not
+ simple enough to form multiple equalities
+
+ @retval
TRUE if re-writing rules have been applied
+ @retval
FALSE otherwise, i.e.
if the predicate is not an equality,
or, if the equality is neither a simple one nor a row equality,
@@ -7397,16 +7811,9 @@ static bool check_equality(THD *thd, Item *item, COND_EQUAL *cond_equal,
}
-/*
- Replace all equality predicates in a condition by multiple equality items
-
- SYNOPSIS
- build_equal_items_for_cond()
- thd thread handle
- cond condition(expression) where to make replacement
- inherited path to all inherited multiple equality items
+/**
+ Replace all equality predicates in a condition by multiple equality items.
- DESCRIPTION
At each 'and' level the function detects items for equality predicates
and replaced them by a set of multiple equality items of class Item_equal,
taking into account inherited equalities from upper levels.
@@ -7425,7 +7832,7 @@ static bool check_equality(THD *thd, Item *item, COND_EQUAL *cond_equal,
equality lists of each Item_cond_and object assigning it to
thd->lex->current_select->max_equal_elems.
- NOTES
+ @note
Multiple equality predicate =(f1,..fn) is equivalent to the conjuction of
f1=f2, .., fn-1=fn. It substitutes any inference from these
equality predicates that is equivalent to the conjunction.
@@ -7443,7 +7850,6 @@ static bool check_equality(THD *thd, Item *item, COND_EQUAL *cond_equal,
but if additionally =(t4.d,t2.b) is inherited, it
will be replaced by (=(t1.a,t2.b,t3.c,t4.d) AND t2.b>5)
- IMPLEMENTATION
The function performs the substitution in a recursive descent by
the condtion tree, passing to the next AND level a chain of multiple
equality predicates which have been built at the upper levels.
@@ -7457,10 +7863,15 @@ static bool check_equality(THD *thd, Item *item, COND_EQUAL *cond_equal,
- join them into disjoint Item_equal() groups
- process the included OR conditions recursively to do the same for
lower AND levels.
+
We need to do things in this order as lower AND levels need to know about
all possible Item_equal objects in upper levels.
- RETURN
+ @param thd thread handle
+ @param cond condition(expression) where to make replacement
+ @param inherited path to all inherited multiple equality items
+
+ @return
pointer to the transformed condition
*/
@@ -7484,7 +7895,7 @@ static COND *build_equal_items_for_cond(THD *thd, COND *cond,
if (and_level)
{
/*
- Retrieve all conjucts of this level detecting the equality
+ Retrieve all conjuncts of this level detecting the equality
that are subject to substitution by multiple equality items and
removing each such predicate from the conjunction after having
found/created a multiple equality whose inference the predicate is.
@@ -7500,6 +7911,15 @@ static COND *build_equal_items_for_cond(THD *thd, COND *cond,
li.remove();
}
+ /*
+ Check if we eliminated all the predicates of the level, e.g.
+ (a=a AND b=b AND a=a).
+ */
+ if (!args->elements &&
+ !cond_equal.current_level.elements &&
+ !eq_list.elements)
+ return new Item_int((longlong) 1, 1);
+
List_iterator_fast<Item_equal> it(cond_equal.current_level);
while ((item_equal= it++))
{
@@ -7597,30 +8017,21 @@ static COND *build_equal_items_for_cond(THD *thd, COND *cond,
as soon the field is not of a string type or the field reference is
an argument of a comparison predicate.
*/
- byte *is_subst_valid= (byte *) 1;
+ uchar *is_subst_valid= (uchar *) 1;
cond= cond->compile(&Item::subst_argument_checker,
&is_subst_valid,
&Item::equal_fields_propagator,
- (byte *) inherited);
+ (uchar *) inherited);
cond->update_used_tables();
}
return cond;
}
-/*
+/**
Build multiple equalities for a condition and all on expressions that
- inherit these multiple equalities
-
- SYNOPSIS
- build_equal_items()
- thd thread handle
- cond condition to build the multiple equalities for
- inherited path to all inherited multiple equality items
- join_list list of join tables to which the condition refers to
- cond_equal_ref :out pointer to the structure to place built equalities in
+ inherit these multiple equalities.
- DESCRIPTION
The function first applies the build_equal_items_for_cond function
to build all multiple equalities for condition cond utilizing equalities
referred through the parameter inherited. The extended set of
@@ -7629,14 +8040,16 @@ static COND *build_equal_items_for_cond(THD *thd, COND *cond,
all on expressions whose direct references can be found in join_list
and who inherit directly the multiple equalities just having built.
- NOTES
+ @note
The on expression used in an outer join operation inherits all equalities
- from the on expression of the embedding join, if there is any, or
+ from the on expression of the embedding join, if there is any, or
otherwise - from the where condition.
This fact is not obvious, but presumably can be proved.
Consider the following query:
+ @code
SELECT * FROM (t1,t2) LEFT JOIN (t3,t4) ON t1.a=t3.a AND t2.a=t4.a
WHERE t1.a=t2.a;
+ @endcode
If the on expression in the query inherits =(t1.a,t2.a), then we
can build the multiple equality =(t1.a,t2.a,t3.a,t4.a) that infers
the equality t3.a=t4.a. Although the on expression
@@ -7646,23 +8059,38 @@ static COND *build_equal_items_for_cond(THD *thd, COND *cond,
Interesting that multiple equality =(t1.a,t2.a,t3.a,t4.a) allows us
to use t1.a=t3.a AND t3.a=t4.a under the on condition:
+ @code
SELECT * FROM (t1,t2) LEFT JOIN (t3,t4) ON t1.a=t3.a AND t3.a=t4.a
WHERE t1.a=t2.a
+ @endcode
This query equivalent to:
+ @code
SELECT * FROM (t1 LEFT JOIN (t3,t4) ON t1.a=t3.a AND t3.a=t4.a),t2
WHERE t1.a=t2.a
+ @endcode
Similarly the original query can be rewritten to the query:
+ @code
SELECT * FROM (t1,t2) LEFT JOIN (t3,t4) ON t2.a=t4.a AND t3.a=t4.a
WHERE t1.a=t2.a
+ @endcode
that is equivalent to:
+ @code
SELECT * FROM (t2 LEFT JOIN (t3,t4)ON t2.a=t4.a AND t3.a=t4.a), t1
WHERE t1.a=t2.a
+ @endcode
Thus, applying equalities from the where condition we basically
can get more freedom in performing join operations.
Althogh we don't use this property now, it probably makes sense to use
it in the future.
-
- RETURN
+ @param thd Thread handler
+ @param cond condition to build the multiple equalities for
+ @param inherited path to all inherited multiple equality items
+ @param join_list list of join tables to which the condition
+ refers to
+ @param[out] cond_equal_ref pointer to the structure to place built
+ equalities in
+
+ @return
pointer to the transformed condition containing multiple equalities
*/
@@ -7720,25 +8148,24 @@ static COND *build_equal_items(THD *thd, COND *cond,
}
-/*
- Compare field items by table order in the execution plan
-
- SYNOPSIS
- compare_fields_by_table_order()
- field1 first field item to compare
- field2 second field item to compare
- table_join_idx index to tables determining table order
+/**
+ Compare field items by table order in the execution plan.
- DESCRIPTION
field1 considered as better than field2 if the table containing
field1 is accessed earlier than the table containing field2.
The function finds out what of two fields is better according
this criteria.
- RETURN
- 1, if field1 is better than field2
- -1, if field2 is better than field1
- 0, otherwise
+ @param field1 first field item to compare
+ @param field2 second field item to compare
+ @param table_join_idx index to tables determining table order
+
+ @retval
+ 1 if field1 is better than field2
+ @retval
+ -1 if field2 is better than field1
+ @retval
+ 0 otherwise
*/
static int compare_fields_by_table_order(Item_field *field1,
@@ -7760,21 +8187,14 @@ static int compare_fields_by_table_order(Item_field *field1,
if (outer_ref)
return cmp;
JOIN_TAB **idx= (JOIN_TAB **) table_join_idx;
- cmp= (uint) (idx[field2->field->table->tablenr] - idx[field1->field->table->tablenr]);
+ cmp= idx[field2->field->table->tablenr]-idx[field1->field->table->tablenr];
return cmp < 0 ? -1 : (cmp ? 1 : 0);
}
-/*
- Generate minimal set of simple equalities equivalent to a multiple equality
-
- SYNOPSIS
- eliminate_item_equal()
- cond condition to add the generated equality to
- upper_levels structure to access multiple equality of upper levels
- item_equal multiple equality to generate simple equality from
+/**
+ Generate minimal set of simple equalities equivalent to a multiple equality.
- DESCRIPTION
The function retrieves the fields of the multiple equality item
item_equal and for each field f:
- if item_equal contains const it generates the equality f=const_item;
@@ -7782,7 +8202,11 @@ static int compare_fields_by_table_order(Item_field *field1,
f=item_equal->get_first().
All generated equality are added to the cond conjunction.
- NOTES
+ @param cond condition to add the generated equality to
+ @param upper_levels structure to access multiple equality of upper levels
+ @param item_equal multiple equality to generate simple equality from
+
+ @note
Before generating an equality function checks that it has not
been generated for multiple equalities of the upper levels.
E.g. for the following where condition
@@ -7802,10 +8226,10 @@ static int compare_fields_by_table_order(Item_field *field1,
If cond is equal to 0, then not more then one equality is generated
and a pointer to it is returned as the result of the function.
- RETURN
- The condition with generated simple equalities or
+ @return
+ - The condition with generated simple equalities or
a pointer to the simple generated equality, if success.
- 0, otherwise.
+ - 0, otherwise.
*/
static Item *eliminate_item_equal(COND *cond, COND_EQUAL *upper_levels,
@@ -7870,7 +8294,8 @@ static Item *eliminate_item_equal(COND *cond, COND_EQUAL *upper_levels,
else
{
DBUG_ASSERT(cond->type() == Item::COND_ITEM);
- ((Item_cond *) cond)->add_at_head(&eq_list);
+ if (eq_list.elements)
+ ((Item_cond *) cond)->add_at_head(&eq_list);
}
cond->quick_fix_field();
@@ -7880,17 +8305,10 @@ static Item *eliminate_item_equal(COND *cond, COND_EQUAL *upper_levels,
}
-/*
- Substitute every field reference in a condition by the best equal field
- and eliminate all multiple equality predicates
-
- SYNOPSIS
- substitute_for_best_equal_field()
- cond condition to process
- cond_equal multiple equalities to take into consideration
- table_join_idx index to tables determining field preference
+/**
+ Substitute every field reference in a condition by the best equal field
+ and eliminate all multiple equality predicates.
- DESCRIPTION
The function retrieves the cond condition and for each encountered
multiple equality predicate it sorts the field references in it
according to the order of tables specified by the table_join_idx
@@ -7901,14 +8319,17 @@ static Item *eliminate_item_equal(COND *cond, COND_EQUAL *upper_levels,
After this the function retrieves all other conjuncted
predicates substitute every field reference by the field reference
to the first equal field or equal constant if there are any.
-
- NOTES
+ @param cond condition to process
+ @param cond_equal multiple equalities to take into consideration
+ @param table_join_idx index to tables determining field preference
+
+ @note
At the first glance full sort of fields in multiple equality
seems to be an overkill. Yet it's not the case due to possible
new fields in multiple equality item of lower levels. We want
the order in them to comply with the order of upper levels.
- RETURN
+ @return
The transformed condition
*/
@@ -7983,20 +8404,17 @@ static COND* substitute_for_best_equal_field(COND *cond,
}
-/*
+/**
Check appearance of new constant items in multiple equalities
- of a condition after reading a constant table
-
- SYNOPSIS
- update_const_equal_items()
- cond condition whose multiple equalities are to be checked
- table constant table that has been read
+ of a condition after reading a constant table.
- DESCRIPTION
The function retrieves the cond condition and for each encountered
multiple equality checks whether new constants have appeared after
reading the constant (single row) table tab. If so it adjusts
the multiple equality appropriately.
+
+ @param cond condition whose multiple equalities are to be checked
+ @param table constant table that has been read
*/
static void update_const_equal_items(COND *cond, JOIN_TAB *tab)
@@ -8030,6 +8448,22 @@ static void update_const_equal_items(COND *cond, JOIN_TAB *tab)
key_map possible_keys= field->key_start;
possible_keys.intersect(field->table->keys_in_use_for_query);
stat[0].const_keys.merge(possible_keys);
+
+ /*
+ For each field in the multiple equality (for which we know that it
+ is a constant) we have to find its corresponding key part, and set
+ that key part in const_key_parts.
+ */
+ if (!possible_keys.is_clear_all())
+ {
+ TABLE *tab= field->table;
+ KEYUSE *use;
+ for (use= stat->keyuse; use && use->table == tab; use++)
+ if (possible_keys.is_set(use->key) &&
+ tab->key_info[use->key].key_part[use->keypart].field ==
+ field)
+ tab->const_key_parts[use->key]|= use->keypart_map;
+ }
}
}
}
@@ -8119,14 +8553,12 @@ change_cond_ref_to_const(THD *thd, I_List<COND_CMP> *save_list,
}
}
-/*
- Remove additional condition inserted by IN/ALL/ANY transformation
+/**
+ Remove additional condition inserted by IN/ALL/ANY transformation.
- SYNOPSIS
- remove_additional_cond()
- conds Condition for processing
+ @param conds condition for processing
- RETURN VALUES
+ @return
new conditions
*/
@@ -8214,17 +8646,10 @@ propagate_cond_constants(THD *thd, I_List<COND_CMP> *save_list,
}
-/*
- Simplify joins replacing outer joins by inner joins whenever it's possible
-
- SYNOPSIS
- simplify_joins()
- join reference to the query info
- join_list list representation of the join to be converted
- conds conditions to add on expressions for converted joins
- top true <=> conds is the where condition
+/**
+ Simplify joins replacing outer joins by inner joins whenever it's
+ possible.
- DESCRIPTION
The function, during a retrieval of join_list, eliminates those
outer joins that can be converted into inner join, possibly nested.
It also moves the on expressions for the converted outer joins
@@ -8248,26 +8673,39 @@ propagate_cond_constants(THD *thd, I_List<COND_CMP> *save_list,
The function also removes all braces that can be removed from the join
expression without changing its meaning.
- NOTES
+ @note
An outer join can be replaced by an inner join if the where condition
or the on expression for an embedding nested join contains a conjunctive
predicate rejecting null values for some attribute of the inner tables.
E.g. in the query:
+ @code
SELECT * FROM t1 LEFT JOIN t2 ON t2.a=t1.a WHERE t2.b < 5
+ @endcode
the predicate t2.b < 5 rejects nulls.
The query is converted first to:
+ @code
SELECT * FROM t1 INNER JOIN t2 ON t2.a=t1.a WHERE t2.b < 5
+ @endcode
then to the equivalent form:
- SELECT * FROM t1, t2 ON t2.a=t1.a WHERE t2.b < 5 AND t2.a=t1.a.
+ @code
+ SELECT * FROM t1, t2 ON t2.a=t1.a WHERE t2.b < 5 AND t2.a=t1.a
+ @endcode
+
Similarly the following query:
+ @code
SELECT * from t1 LEFT JOIN (t2, t3) ON t2.a=t1.a t3.b=t1.b
WHERE t2.c < 5
+ @endcode
is converted to:
+ @code
SELECT * FROM t1, (t2, t3) WHERE t2.c < 5 AND t2.a=t1.a t3.b=t1.b
+ @endcode
+
One conversion might trigger another:
+ @code
SELECT * FROM t1 LEFT JOIN t2 ON t2.a=t1.a
LEFT JOIN t3 ON t3.b=t2.b
WHERE t3 IS NOT NULL =>
@@ -8275,16 +8713,26 @@ propagate_cond_constants(THD *thd, I_List<COND_CMP> *save_list,
WHERE t3 IS NOT NULL AND t3.b=t2.b =>
SELECT * FROM t1, t2, t3
WHERE t3 IS NOT NULL AND t3.b=t2.b AND t2.a=t1.a
-
+ @endcode
+
The function removes all unnecessary braces from the expression
produced by the conversions.
- E.g. SELECT * FROM t1, (t2, t3) WHERE t2.c < 5 AND t2.a=t1.a AND t3.b=t1.b
+ E.g.
+ @code
+ SELECT * FROM t1, (t2, t3) WHERE t2.c < 5 AND t2.a=t1.a AND t3.b=t1.b
+ @endcode
finally is converted to:
+ @code
SELECT * FROM t1, t2, t3 WHERE t2.c < 5 AND t2.a=t1.a AND t3.b=t1.b
+ @endcode
+
+
It also will remove braces from the following queries:
+ @code
SELECT * from (t1 LEFT JOIN t2 ON t2.a=t1.a) LEFT JOIN t3 ON t3.b=t2.b
SELECT * from (t1, (t2,t3)) WHERE t1.a=t2.a AND t2.b=t3.b.
+ @endcode
The benefit of this simplification procedure is that it might return
a query for which the optimizer can evaluate execution plan with more
@@ -8292,20 +8740,26 @@ propagate_cond_constants(THD *thd, I_List<COND_CMP> *save_list,
consider any plan where one of the inner tables is before some of outer
tables.
- IMPLEMENTATION.
+
The function is implemented by a recursive procedure. On the recursive
ascent all attributes are calculated, all outer joins that can be
converted are replaced and then all unnecessary braces are removed.
As join list contains join tables in the reverse order sequential
elimination of outer joins does not require extra recursive calls.
- EXAMPLES
Here is an example of a join query with invalid cross references:
+ @code
SELECT * FROM t1 LEFT JOIN t2 ON t2.a=t3.a LEFT JOIN t3 ON t3.b=t1.b
-
- RETURN VALUE
- The new condition, if success
- 0, otherwise
+ @endcode
+
+ @param join reference to the query info
+ @param join_list list representation of the join to be converted
+ @param conds conditions to add on expressions for converted joins
+ @param top true <=> conds is the where condition
+
+ @return
+ - The new condition, if success
+ - 0, otherwise
*/
static COND *
@@ -8315,6 +8769,7 @@ simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top)
NESTED_JOIN *nested_join;
TABLE_LIST *prev_table= 0;
List_iterator<TABLE_LIST> li(*join_list);
+ bool straight_join= test(join->select_options & SELECT_STRAIGHT_JOIN);
DBUG_ENTER("simplify_joins");
/*
@@ -8425,7 +8880,7 @@ simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top)
if (prev_table)
{
/* The order of tables is reverse: prev_table follows table */
- if (prev_table->straight)
+ if (prev_table->straight || straight_join)
prev_table->dep_tables|= used_tables;
if (prev_table->on_expr)
{
@@ -8454,10 +8909,10 @@ simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top)
/* Flatten nested joins that can be flattened. */
TABLE_LIST *right_neighbor= NULL;
- bool fix_name_res= FALSE;
li.rewind();
while ((table= li++))
{
+ bool fix_name_res= FALSE;
nested_join= table->nested_join;
if (nested_join && !table->on_expr)
{
@@ -8483,26 +8938,23 @@ simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top)
}
-/*
- Assign each nested join structure a bit in nested_join_map
-
- SYNOPSIS
- build_bitmap_for_nested_joins()
- join Join being processed
- join_list List of tables
- first_unused Number of first unused bit in nested_join_map before the
- call
+/**
+ Assign each nested join structure a bit in nested_join_map.
- DESCRIPTION
Assign each nested join structure (except "confluent" ones - those that
embed only one element) a bit in nested_join_map.
- NOTE
+ @param join Join being processed
+ @param join_list List of tables
+ @param first_unused Number of first unused bit in nested_join_map before the
+ call
+
+ @note
This function is called after simplify_joins(), when there are no
redundant nested joins, #non_confluent_nested_joins <= #tables_in_join so
we will not run out of bits in nested_join_map.
- RETURN
+ @return
First unused bit in nested_join_map after the call.
*/
@@ -8538,17 +8990,14 @@ static uint build_bitmap_for_nested_joins(List<TABLE_LIST> *join_list,
}
-/*
- Set NESTED_JOIN::counter=0 in all nested joins in passed list
-
- SYNOPSIS
- reset_nj_counters()
- join_list List of nested joins to process. It may also contain base
- tables which will be ignored.
+/**
+ Set NESTED_JOIN::counter=0 in all nested joins in passed list.
- DESCRIPTION
Recursively set NESTED_JOIN::counter=0 for all nested joins contained in
the passed join_list.
+
+ @param join_list List of nested joins to process. It may also contain base
+ tables which will be ignored.
*/
static void reset_nj_counters(List<TABLE_LIST> *join_list)
@@ -8569,92 +9018,93 @@ static void reset_nj_counters(List<TABLE_LIST> *join_list)
}
-/*
- Check interleaving with an inner tables of an outer join for extension table
-
- SYNOPSIS
- check_interleaving_with_nj()
- next_tab Table we're going to extend the current partial join with
+/**
+ Check interleaving with an inner tables of an outer join for
+ extension table.
- DESCRIPTION
Check if table next_tab can be added to current partial join order, and
if yes, record that it has been added.
The function assumes that both current partial join order and its
extension with next_tab are valid wrt table dependencies.
- IMPLEMENTATION
- LIMITATIONS ON JOIN ORDER
- The nested [outer] joins executioner algorithm imposes these limitations
- on join order:
- 1. "Outer tables first" - any "outer" table must be before any
- corresponding "inner" table.
- 2. "No interleaving" - tables inside a nested join must form a continuous
- sequence in join order (i.e. the sequence must not be interrupted by
- tables that are outside of this nested join).
-
- #1 is checked elsewhere, this function checks #2 provided that #1 has
- been already checked.
-
- WHY NEED NON-INTERLEAVING
- Consider an example:
-
- select * from t0 join t1 left join (t2 join t3) on cond1
-
- The join order "t1 t2 t0 t3" is invalid:
-
- table t0 is outside of the nested join, so WHERE condition for t0 is
- attached directly to t0 (without triggers, and it may be used to access
- t0). Applying WHERE(t0) to (t2,t0,t3) record is invalid as we may miss
- combinations of (t1, t2, t3) that satisfy condition cond1, and produce a
- null-complemented (t1, t2.NULLs, t3.NULLs) row, which should not have
- been produced.
-
- If table t0 is not between t2 and t3, the problem doesn't exist:
- * If t0 is located after (t2,t3), WHERE(t0) is applied after nested join
- processing has finished.
- * If t0 is located before (t2,t3), predicates like WHERE_cond(t0, t2) are
- wrapped into condition triggers, which takes care of correct nested
- join processing.
-
- HOW IT IS IMPLEMENTED
- The limitations on join order can be rephrased as follows: for valid
- join order one must be able to:
- 1. write down the used tables in the join order on one line.
- 2. for each nested join, put one '(' and one ')' on the said line
- 3. write "LEFT JOIN" and "ON (...)" where appropriate
- 4. get a query equivalent to the query we're trying to execute.
-
- Calls to check_interleaving_with_nj() are equivalent to writing the
- above described line from left to right.
- A single check_interleaving_with_nj(A,B) call is equivalent to writing
- table B and appropriate brackets on condition that table A and
- appropriate brackets is the last what was written. Graphically the
- transition is as follows:
-
- +---- current position
- |
- ... last_tab ))) | ( next_tab ) )..) | ...
- X Y Z |
- +- need to move to this
- position.
-
- Notes about the position:
- The caller guarantees that there is no more then one X-bracket by
- checking "!(remaining_tables & s->dependent)" before calling this
- function. X-bracket may have a pair in Y-bracket.
-
- When "writing" we store/update this auxilary info about the current
- position:
- 1. join->cur_embedding_map - bitmap of pairs of brackets (aka nested
- joins) we've opened but didn't close.
- 2. {each NESTED_JOIN structure not simplified away}->counter - number
- of this nested join's children that have already been added to to
- the partial join order.
-
- RETURN
- FALSE Join order extended, nested joins info about current join order
- (see NOTE section) updated.
+ @verbatim
+ IMPLEMENTATION
+ LIMITATIONS ON JOIN ORDER
+ The nested [outer] joins executioner algorithm imposes these limitations
+ on join order:
+ 1. "Outer tables first" - any "outer" table must be before any
+ corresponding "inner" table.
+ 2. "No interleaving" - tables inside a nested join must form a continuous
+ sequence in join order (i.e. the sequence must not be interrupted by
+ tables that are outside of this nested join).
+
+ #1 is checked elsewhere, this function checks #2 provided that #1 has
+ been already checked.
+
+ WHY NEED NON-INTERLEAVING
+ Consider an example:
+
+ select * from t0 join t1 left join (t2 join t3) on cond1
+
+ The join order "t1 t2 t0 t3" is invalid:
+
+ table t0 is outside of the nested join, so WHERE condition for t0 is
+ attached directly to t0 (without triggers, and it may be used to access
+ t0). Applying WHERE(t0) to (t2,t0,t3) record is invalid as we may miss
+ combinations of (t1, t2, t3) that satisfy condition cond1, and produce a
+ null-complemented (t1, t2.NULLs, t3.NULLs) row, which should not have
+ been produced.
+
+ If table t0 is not between t2 and t3, the problem doesn't exist:
+ If t0 is located after (t2,t3), WHERE(t0) is applied after nested join
+ processing has finished.
+ If t0 is located before (t2,t3), predicates like WHERE_cond(t0, t2) are
+ wrapped into condition triggers, which takes care of correct nested
+ join processing.
+
+ HOW IT IS IMPLEMENTED
+ The limitations on join order can be rephrased as follows: for valid
+ join order one must be able to:
+ 1. write down the used tables in the join order on one line.
+ 2. for each nested join, put one '(' and one ')' on the said line
+ 3. write "LEFT JOIN" and "ON (...)" where appropriate
+ 4. get a query equivalent to the query we're trying to execute.
+
+ Calls to check_interleaving_with_nj() are equivalent to writing the
+ above described line from left to right.
+ A single check_interleaving_with_nj(A,B) call is equivalent to writing
+ table B and appropriate brackets on condition that table A and
+ appropriate brackets is the last what was written. Graphically the
+ transition is as follows:
+
+ +---- current position
+ |
+ ... last_tab ))) | ( next_tab ) )..) | ...
+ X Y Z |
+ +- need to move to this
+ position.
+
+ Notes about the position:
+ The caller guarantees that there is no more then one X-bracket by
+ checking "!(remaining_tables & s->dependent)" before calling this
+ function. X-bracket may have a pair in Y-bracket.
+
+ When "writing" we store/update this auxilary info about the current
+ position:
+ 1. join->cur_embedding_map - bitmap of pairs of brackets (aka nested
+ joins) we've opened but didn't close.
+ 2. {each NESTED_JOIN structure not simplified away}->counter - number
+ of this nested join's children that have already been added to to
+ the partial join order.
+ @endverbatim
+
+ @param next_tab Table we're going to extend the current partial join with
+
+ @retval
+ FALSE Join order extended, nested joins info about current join
+ order (see NOTE section) updated.
+ @retval
TRUE Requested join order extension not allowed.
*/
@@ -8703,8 +9153,8 @@ static bool check_interleaving_with_nj(JOIN_TAB *next_tab)
}
-/*
- Nested joins perspective: Remove the last table from the join order
+/**
+ Nested joins perspective: Remove the last table from the join order.
The algorithm is the reciprocal of check_interleaving_with_nj(), hence
parent join nest nodes are updated only when the last table in its child
@@ -8745,10 +9195,14 @@ static bool check_interleaving_with_nj(JOIN_TAB *next_tab)
partial join order.
DESCRIPTION
+
Remove the last table from the partial join order and update the nested
joins counters and join->cur_embedding_map. It is ok to call this
function for the first table in join order (for which
check_interleaving_with_nj has not been called)
+
+ @param last join table to remove, it is assumed to be the last in current
+ partial join order.
*/
static void restore_prev_nj_state(JOIN_TAB *last)
@@ -8792,10 +9246,10 @@ optimize_cond(JOIN *join, COND *conds, List<TABLE_LIST> *join_list,
predicate. Substitute a constant instead of this field if the
multiple equality contains a constant.
*/
- DBUG_EXECUTE("where", print_where(conds, "original"););
+ DBUG_EXECUTE("where", print_where(conds, "original", QT_ORDINARY););
conds= build_equal_items(join->thd, conds, NULL, join_list,
&join->cond_equal);
- DBUG_EXECUTE("where",print_where(conds,"after equal_items"););
+ DBUG_EXECUTE("where",print_where(conds,"after equal_items", QT_ORDINARY););
/* change field = field to field = const for each found field = const */
propagate_cond_constants(thd, (I_List<COND_CMP> *) 0, conds, conds);
@@ -8803,20 +9257,23 @@ optimize_cond(JOIN *join, COND *conds, List<TABLE_LIST> *join_list,
Remove all instances of item == item
Remove all and-levels where CONST item != CONST item
*/
- DBUG_EXECUTE("where",print_where(conds,"after const change"););
+ DBUG_EXECUTE("where",print_where(conds,"after const change", QT_ORDINARY););
conds= remove_eq_conds(thd, conds, cond_value) ;
- DBUG_EXECUTE("info",print_where(conds,"after remove"););
+ DBUG_EXECUTE("info",print_where(conds,"after remove", QT_ORDINARY););
}
DBUG_RETURN(conds);
}
-/*
- Remove const and eq items. Return new item, or NULL if no condition
- cond_value is set to according:
- COND_OK query is possible (field = constant)
- COND_TRUE always true ( 1 = 1 )
- COND_FALSE always false ( 1 = 2 )
+/**
+ Remove const and eq items.
+
+ @return
+ Return new item, or NULL if no condition @n
+ cond_value is set to according:
+ - COND_OK : query is possible (field = constant)
+ - COND_TRUE : always true ( 1 = 1 )
+ - COND_FALSE : always false ( 1 = 2 )
*/
COND *
@@ -8900,7 +9357,8 @@ remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value)
Field *field=((Item_field*) args[0])->field;
if (field->flags & AUTO_INCREMENT_FLAG && !field->table->maybe_null &&
(thd->options & OPTION_AUTO_IS_NULL) &&
- thd->current_insert_id && thd->substitute_null_with_insert_id)
+ (thd->first_successful_insert_id_in_prev_stmt > 0 &&
+ thd->substitute_null_with_insert_id))
{
#ifdef HAVE_QUERY_CACHE
query_cache_abort(&thd->net);
@@ -8908,16 +9366,9 @@ remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value)
COND *new_cond;
if ((new_cond= new Item_func_eq(args[0],
new Item_int("last_insert_id()",
- thd->current_insert_id,
- MY_INT64_NUM_DECIMAL_DIGITS))))
+ thd->read_first_successful_insert_id_in_prev_stmt(),
+ MY_INT64_NUM_DECIMAL_DIGITS))))
{
- /*
- Set THD::last_insert_id_used_bin_log manually, as this
- statement uses LAST_INSERT_ID() in a sense, and should
- issue LAST_INSERT_ID_EVENT.
- */
- thd->last_insert_id_used_bin_log= TRUE;
-
cond=new_cond;
/*
Item_func_eq can't be fixed after creation so we do not check
@@ -8926,11 +9377,15 @@ remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value)
*/
cond->fix_fields(thd, &cond);
}
- thd->substitute_null_with_insert_id= FALSE; // Clear for next request
+ /*
+ IS NULL should be mapped to LAST_INSERT_ID only for first row, so
+ clear for next row
+ */
+ thd->substitute_null_with_insert_id= FALSE;
}
/* fix to replace 'NULL' dates with '0' (shreeve@uci.edu) */
- else if (((field->type() == FIELD_TYPE_DATE) ||
- (field->type() == FIELD_TYPE_DATETIME)) &&
+ else if (((field->type() == MYSQL_TYPE_DATE) ||
+ (field->type() == MYSQL_TYPE_DATETIME)) &&
(field->flags & NOT_NULL_FLAG) &&
!field->table->maybe_null)
{
@@ -9009,8 +9464,8 @@ test_if_equality_guarantees_uniqueness(Item *l, Item *r)
l->collation.collation == r->collation.collation)));
}
-/*
- Return 1 if the item is a const value in all the WHERE clause
+/**
+ Return TRUE if the item is a const value in all the WHERE clause.
*/
static bool
@@ -9071,30 +9526,29 @@ const_expression_in_where(COND *cond, Item *comp_item, Item **const_item)
Create internal temporary table
****************************************************************************/
-/*
- Create field for temporary table from given field
-
- SYNOPSIS
- create_tmp_field_from_field()
- thd Thread handler
- org_field field from which new field will be created
- name New field name
- table Temporary table
- item !=NULL if item->result_field should point to new field.
- This is relevant for how fill_record() is going to work:
- If item != NULL then fill_record() will update
- the record in the original table.
- If item == NULL then fill_record() will update
- the temporary table
- convert_blob_length If >0 create a varstring(convert_blob_length) field
- instead of blob.
-
- RETURN
- 0 on error
+/**
+ Create field for temporary table from given field.
+
+ @param thd Thread handler
+ @param org_field field from which new field will be created
+ @param name New field name
+ @param table Temporary table
+ @param item !=NULL if item->result_field should point to new field.
+ This is relevant for how fill_record() is going to work:
+ If item != NULL then fill_record() will update
+ the record in the original table.
+ If item == NULL then fill_record() will update
+ the temporary table
+ @param convert_blob_length If >0 create a varstring(convert_blob_length)
+ field instead of blob.
+
+ @retval
+ NULL on error
+ @retval
new_created field
*/
-Field* create_tmp_field_from_field(THD *thd, Field* org_field,
+Field *create_tmp_field_from_field(THD *thd, Field *org_field,
const char *name, TABLE *table,
Item_field *item, uint convert_blob_length)
{
@@ -9108,13 +9562,15 @@ Field* create_tmp_field_from_field(THD *thd, Field* org_field,
(org_field->flags & BLOB_FLAG))
new_field= new Field_varstring(convert_blob_length,
org_field->maybe_null(),
- org_field->field_name, table,
+ org_field->field_name, table->s,
org_field->charset());
else
new_field= org_field->new_field(thd->mem_root, table,
table == org_field->table);
if (new_field)
{
+ new_field->init(table);
+ new_field->orig_table= org_field->orig_table;
if (item)
item->result_field= new_field;
else
@@ -9131,28 +9587,27 @@ Field* create_tmp_field_from_field(THD *thd, Field* org_field,
return new_field;
}
-/*
- Create field for temporary table using type of given item
-
- SYNOPSIS
- create_tmp_field_from_item()
- thd Thread handler
- item Item to create a field for
- table Temporary table
- copy_func If set and item is a function, store copy of item
- in this array
- modify_item 1 if item->result_field should point to new item.
- This is relevent for how fill_record() is going to
- work:
- If modify_item is 1 then fill_record() will update
- the record in the original table.
- If modify_item is 0 then fill_record() will update
- the temporary table
- convert_blob_length If >0 create a varstring(convert_blob_length) field
- instead of blob.
-
- RETURN
- 0 on error
+/**
+ Create field for temporary table using type of given item.
+
+ @param thd Thread handler
+ @param item Item to create a field for
+ @param table Temporary table
+ @param copy_func If set and item is a function, store copy of
+ item in this array
+ @param modify_item 1 if item->result_field should point to new
+ item. This is relevent for how fill_record()
+ is going to work:
+ If modify_item is 1 then fill_record() will
+ update the record in the original table.
+ If modify_item is 0 then fill_record() will
+ update the temporary table
+ @param convert_blob_length If >0 create a varstring(convert_blob_length)
+ field instead of blob.
+
+ @retval
+ 0 on error
+ @retval
new_created field
*/
@@ -9160,14 +9615,14 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table,
Item ***copy_func, bool modify_item,
uint convert_blob_length)
{
- bool maybe_null=item->maybe_null;
+ bool maybe_null= item->maybe_null;
Field *new_field;
LINT_INIT(new_field);
switch (item->result_type()) {
case REAL_RESULT:
- new_field=new Field_double(item->max_length, maybe_null,
- item->name, table, item->decimals, TRUE);
+ new_field= new Field_double(item->max_length, maybe_null,
+ item->name, item->decimals, TRUE);
break;
case INT_RESULT:
/*
@@ -9178,10 +9633,10 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table,
*/
if (item->max_length >= (MY_INT32_NUM_DECIMAL_DIGITS - 1))
new_field=new Field_longlong(item->max_length, maybe_null,
- item->name, table, item->unsigned_flag);
+ item->name, item->unsigned_flag);
else
new_field=new Field_long(item->max_length, maybe_null,
- item->name, table, item->unsigned_flag);
+ item->name, item->unsigned_flag);
break;
case STRING_RESULT:
DBUG_ASSERT(item->collation.collation);
@@ -9195,7 +9650,7 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table,
type == MYSQL_TYPE_TIME || type == MYSQL_TYPE_DATE ||
type == MYSQL_TYPE_NEWDATE ||
type == MYSQL_TYPE_TIMESTAMP || type == MYSQL_TYPE_GEOMETRY)
- new_field= item->tmp_table_field_from_field_type(table);
+ new_field= item->tmp_table_field_from_field_type(table, 1);
/*
Make sure that the blob fits into a Field_varstring which has
2-byte lenght.
@@ -9204,57 +9659,25 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table,
convert_blob_length <= Field_varstring::MAX_SIZE &&
convert_blob_length)
new_field= new Field_varstring(convert_blob_length, maybe_null,
- item->name, table,
+ item->name, table->s,
item->collation.collation);
else
new_field= item->make_string_field(table);
new_field->set_derivation(item->collation.derivation);
break;
case DECIMAL_RESULT:
- {
- uint8 dec= item->decimals;
- uint8 intg= ((Item_decimal *) item)->decimal_precision() - dec;
- uint32 len= item->max_length;
-
- /*
- Trying to put too many digits overall in a DECIMAL(prec,dec)
- will always throw a warning. We must limit dec to
- DECIMAL_MAX_SCALE however to prevent an assert() later.
- */
-
- if (dec > 0)
- {
- signed int overflow;
-
- dec= min(dec, DECIMAL_MAX_SCALE);
-
- /*
- If the value still overflows the field with the corrected dec,
- we'll throw out decimals rather than integers. This is still
- bad and of course throws a truncation warning.
- +1: for decimal point
- */
-
- overflow= my_decimal_precision_to_length(intg + dec, dec,
- item->unsigned_flag) - len;
-
- if (overflow > 0)
- dec= max(0, dec - overflow); // too long, discard fract
- else
- len -= item->decimals - dec; // corrected value fits
- }
-
- new_field= new Field_new_decimal(len, maybe_null, item->name,
- table, dec, item->unsigned_flag);
+ new_field= Field_new_decimal::create_from_item(item);
break;
- }
case ROW_RESULT:
default:
// This case should never be choosen
DBUG_ASSERT(0);
- new_field= 0; // to satisfy compiler (uninitialized variable)
+ new_field= 0;
break;
}
+ if (new_field)
+ new_field->init(table);
+
if (copy_func && item->is_result_field())
*((*copy_func)++) = item; // Save for copy_funcs
if (modify_item)
@@ -9265,17 +9688,16 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table,
}
-/*
- Create field for information schema table
+/**
+ Create field for information schema table.
- SYNOPSIS
- create_tmp_field_for_schema()
- thd Thread handler
- table Temporary table
- item Item to create a field for
+ @param thd Thread handler
+ @param table Temporary table
+ @param item Item to create a field for
- RETURN
+ @retval
0 on error
+ @retval
new_created field
*/
@@ -9283,44 +9705,48 @@ Field *create_tmp_field_for_schema(THD *thd, Item *item, TABLE *table)
{
if (item->field_type() == MYSQL_TYPE_VARCHAR)
{
- if (item->max_length > MAX_FIELD_VARCHARLENGTH /
- item->collation.collation->mbmaxlen)
- return new Field_blob(item->max_length, item->maybe_null,
- item->name, table, item->collation.collation);
- return new Field_varstring(item->max_length, item->maybe_null, item->name,
- table, item->collation.collation);
+ Field *field;
+ if (item->max_length > MAX_FIELD_VARCHARLENGTH)
+ field= new Field_blob(item->max_length, item->maybe_null,
+ item->name, item->collation.collation);
+ else
+ field= new Field_varstring(item->max_length, item->maybe_null,
+ item->name,
+ table->s, item->collation.collation);
+ if (field)
+ field->init(table);
+ return field;
}
- return item->tmp_table_field_from_field_type(table);
+ return item->tmp_table_field_from_field_type(table, 0);
}
-/*
- Create field for temporary table
-
- SYNOPSIS
- create_tmp_field()
- thd Thread handler
- table Temporary table
- item Item to create a field for
- type Type of item (normally item->type)
- copy_func If set and item is a function, store copy of item
- in this array
- from_field if field will be created using other field as example,
- pointer example field will be written here
- default_field If field has a default value field, store it here
- group 1 if we are going to do a relative group by on result
- modify_item 1 if item->result_field should point to new item.
- This is relevent for how fill_record() is going to
- work:
- If modify_item is 1 then fill_record() will update
- the record in the original table.
- If modify_item is 0 then fill_record() will update
- the temporary table
- convert_blob_length If >0 create a varstring(convert_blob_length) field
- instead of blob.
-
- RETURN
+/**
+ Create field for temporary table.
+
+ @param thd Thread handler
+ @param table Temporary table
+ @param item Item to create a field for
+ @param type Type of item (normally item->type)
+ @param copy_func If set and item is a function, store copy of item
+ in this array
+ @param from_field if field will be created using other field as example,
+ pointer example field will be written here
+ @param default_field If field has a default value field, store it here
+ @param group 1 if we are going to do a relative group by on result
+ @param modify_item 1 if item->result_field should point to new item.
+ This is relevent for how fill_record() is going to
+ work:
+ If modify_item is 1 then fill_record() will update
+ the record in the original table.
+ If modify_item is 0 then fill_record() will update
+ the temporary table
+ @param convert_blob_length If >0 create a varstring(convert_blob_length)
+ field instead of blob.
+
+ @retval
0 on error
+ @retval
new_created field
*/
@@ -9343,6 +9769,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
item= item->real_item();
type= Item::FIELD_ITEM;
}
+
switch (type) {
case Item::SUM_FUNC_ITEM:
{
@@ -9372,7 +9799,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
field->result_field= result;
}
else if (table_cant_handle_bit_fields && field->field->type() ==
- FIELD_TYPE_BIT)
+ MYSQL_TYPE_BIT)
{
*from_field= field->field;
result= create_tmp_field_from_item(thd, item, table, copy_func,
@@ -9396,6 +9823,36 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
}
/* Fall through */
case Item::FUNC_ITEM:
+ if (((Item_func *) item)->functype() == Item_func::FUNC_SP)
+ {
+ Item_func_sp *item_func_sp= (Item_func_sp *) item;
+ Field *sp_result_field= item_func_sp->get_sp_result_field();
+
+ if (make_copy_field)
+ {
+ DBUG_ASSERT(item_func_sp->result_field);
+ *from_field= item_func_sp->result_field;
+ }
+ else
+ {
+ *((*copy_func)++)= item;
+ }
+
+ Field *result_field=
+ create_tmp_field_from_field(thd,
+ sp_result_field,
+ item_func_sp->name,
+ table,
+ NULL,
+ convert_blob_length);
+
+ if (modify_item)
+ item->set_result_field(result_field);
+
+ return result_field;
+ }
+
+ /* Fall through */
case Item::COND_ITEM:
case Item::FIELD_AVG_ITEM:
case Item::FIELD_STD_ITEM:
@@ -9426,34 +9883,54 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
}
}
-
/*
- Create a temp table according to a field list.
+ Set up column usage bitmaps for a temporary table
- SYNOPSIS
- create_tmp_table()
- thd thread handle
- param a description used as input to create the table
- fields list of items that will be used to define
- column types of the table (also see NOTES)
- group TODO document
- distinct should table rows be distinct
- save_sum_fields see NOTES
- select_options
- rows_limit
- table_alias possible name of the temporary table that can be used
- for name resolving; can be "".
+ IMPLEMENTATION
+ For temporary tables, we need one bitmap with all columns set and
+ a tmp_set bitmap to be used by things like filesort.
+*/
- DESCRIPTION
- Given field pointers are changed to point at tmp_table for
- send_fields. The table object is self contained: it's
- allocated in its own memory root, as well as Field objects
- created for table columns.
- This function will replace Item_sum items in 'fields' list with
- corresponding Item_field items, pointing at the fields in the
- temporary table, unless this was prohibited by TRUE
- value of argument save_sum_fields. The Item_field objects
- are created in THD memory root.
+void setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps)
+{
+ uint field_count= table->s->fields;
+ bitmap_init(&table->def_read_set, (my_bitmap_map*) bitmaps, field_count,
+ FALSE);
+ bitmap_init(&table->tmp_set,
+ (my_bitmap_map*) (bitmaps+ bitmap_buffer_size(field_count)),
+ field_count, FALSE);
+ /* write_set and all_set are copies of read_set */
+ table->def_write_set= table->def_read_set;
+ table->s->all_set= table->def_read_set;
+ bitmap_set_all(&table->s->all_set);
+ table->default_column_bitmaps();
+}
+
+
+/**
+ Create a temp table according to a field list.
+
+ Given field pointers are changed to point at tmp_table for
+ send_fields. The table object is self contained: it's
+ allocated in its own memory root, as well as Field objects
+ created for table columns.
+ This function will replace Item_sum items in 'fields' list with
+ corresponding Item_field items, pointing at the fields in the
+ temporary table, unless this was prohibited by TRUE
+ value of argument save_sum_fields. The Item_field objects
+ are created in THD memory root.
+
+ @param thd thread handle
+ @param param a description used as input to create the table
+ @param fields list of items that will be used to define
+ column types of the table (also see NOTES)
+ @param group TODO document
+ @param distinct should table rows be distinct
+ @param save_sum_fields see NOTES
+ @param select_options
+ @param rows_limit
+ @param table_alias possible name of the temporary table that can
+ be used for name resolving; can be "".
*/
#define STRING_TOTAL_LENGTH_TO_PACK_ROWS 128
@@ -9469,17 +9946,19 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
{
MEM_ROOT *mem_root_save, own_root;
TABLE *table;
+ TABLE_SHARE *share;
uint i,field_count,null_count,null_pack_length;
uint copy_func_count= param->func_count;
uint hidden_null_count, hidden_null_pack_length, hidden_field_count;
uint blob_count,group_null_items, string_count;
uint temp_pool_slot=MY_BIT_NONE;
+ uint fieldnr= 0;
ulong reclength, string_total_length;
bool using_unique_constraint= 0;
bool use_packed_rows= 0;
bool not_all_columns= !(select_options & TMP_TABLE_ALL_COLUMNS);
char *tmpname,path[FN_REFLEN];
- byte *pos,*group_buff;
+ uchar *pos, *group_buff, *bitmaps;
uchar *null_flags;
Field **reg_field, **from_field, **default_field;
uint *blob_field;
@@ -9488,19 +9967,24 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
KEY_PART_INFO *key_part_info;
Item **copy_func;
MI_COLUMNDEF *recinfo;
- uint total_uneven_bit_length= 0;
+ /*
+ total_uneven_bit_length is uneven bit length for visible fields
+ hidden_uneven_bit_length is uneven bit length for hidden fields
+ */
+ uint total_uneven_bit_length= 0, hidden_uneven_bit_length= 0;
bool force_copy_fields= param->force_copy_fields;
/* Treat sum functions as normal ones when loose index scan is used. */
save_sum_fields|= param->precomputed_group_by;
DBUG_ENTER("create_tmp_table");
- DBUG_PRINT("enter",("distinct: %d save_sum_fields: %d rows_limit: %lu group: %d",
- (int) distinct, (int) save_sum_fields,
- (ulong) rows_limit,test(group)));
+ DBUG_PRINT("enter",
+ ("distinct: %d save_sum_fields: %d rows_limit: %lu group: %d",
+ (int) distinct, (int) save_sum_fields,
+ (ulong) rows_limit,test(group)));
- statistic_increment(thd->status_var.created_tmp_tables, &LOCK_status);
+ status_var_increment(thd->status_var.created_tmp_tables);
if (use_temp_pool && !(test_flags & TEST_KEEP_TMP_TABLES))
- temp_pool_slot = bitmap_set_next(&temp_pool);
+ temp_pool_slot = bitmap_lock_set_next(&temp_pool);
if (temp_pool_slot != MY_BIT_NONE) // we got a slot
sprintf(path, "%s_%lx_%i", tmp_file_prefix,
@@ -9552,6 +10036,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
if (!multi_alloc_root(&own_root,
&table, sizeof(*table),
+ &share, sizeof(*share),
&reg_field, sizeof(Field*) * (field_count+1),
&default_field, sizeof(Field*) * (field_count),
&blob_field, sizeof(uint)*(field_count+1),
@@ -9563,19 +10048,20 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
&param->start_recinfo,
sizeof(*param->recinfo)*(field_count*2+4),
&tmpname, (uint) strlen(path)+1,
- &group_buff, group && ! using_unique_constraint ?
- param->group_length : 0,
+ &group_buff, (group && ! using_unique_constraint ?
+ param->group_length : 0),
+ &bitmaps, bitmap_buffer_size(field_count)*2,
NullS))
{
if (temp_pool_slot != MY_BIT_NONE)
- bitmap_clear_bit(&temp_pool, temp_pool_slot);
+ bitmap_lock_clear_bit(&temp_pool, temp_pool_slot);
DBUG_RETURN(NULL); /* purecov: inspected */
}
/* Copy_field belongs to TMP_TABLE_PARAM, allocate it in THD mem_root */
if (!(param->copy_field= copy= new (thd->mem_root) Copy_field[field_count]))
{
if (temp_pool_slot != MY_BIT_NONE)
- bitmap_clear_bit(&temp_pool, temp_pool_slot);
+ bitmap_lock_clear_bit(&temp_pool, temp_pool_slot);
free_root(&own_root, MYF(0)); /* purecov: inspected */
DBUG_RETURN(NULL); /* purecov: inspected */
}
@@ -9601,21 +10087,19 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
table->copy_blobs= 1;
table->in_use= thd;
table->quick_keys.init();
- table->used_keys.init();
+ table->covering_keys.init();
+ table->merge_keys.init();
table->keys_in_use_for_query.init();
- table->s= &table->share_not_to_be_used;
- table->s->blob_field= blob_field;
- table->s->table_name= table->s->path= tmpname;
- table->s->db= "";
- table->s->blob_ptr_size= mi_portable_sizeof_char_ptr;
- table->s->tmp_table= NON_TRANSACTIONAL_TMP_TABLE;
- table->s->db_low_byte_first=1; // True for HEAP and MyISAM
- table->s->table_charset= param->table_charset;
- table->s->keys_for_keyread.init();
- table->s->keys_in_use.init();
- /* For easier error reporting */
- table->s->table_cache_key= (char*) (table->s->db= "");
+ table->s= share;
+ init_tmp_table_share(thd, share, "", 0, tmpname, tmpname);
+ share->blob_field= blob_field;
+ share->blob_ptr_size= portable_sizeof_char_ptr;
+ share->db_low_byte_first=1; // True for HEAP and MyISAM
+ share->table_charset= param->table_charset;
+ share->primary_key= MAX_KEY; // Indicate no primary key
+ share->keys_for_keyread.init();
+ share->keys_in_use.init();
/* Calculate which type of fields we will store in the temporary table */
@@ -9659,10 +10143,9 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
Item *arg= sum_item->get_arg(i);
if (!arg->const_item())
{
- uint field_index= (uint) (reg_field - table->field);
Field *new_field=
create_tmp_field(thd, table, arg, arg->type(), &copy_func,
- tmp_from_field, &default_field[field_index],
+ tmp_from_field, &default_field[fieldnr],
group != 0,not_all_columns,
distinct, 0,
param->convert_blob_length);
@@ -9672,12 +10155,11 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
reclength+=new_field->pack_length();
if (new_field->flags & BLOB_FLAG)
{
- *blob_field++= field_index;
+ *blob_field++= fieldnr;
blob_count++;
}
- if (new_field->type() == FIELD_TYPE_BIT)
+ if (new_field->type() == MYSQL_TYPE_BIT)
total_uneven_bit_length+= new_field->field_length & 7;
- new_field->field_index= field_index;
*(reg_field++)= new_field;
if (new_field->real_type() == MYSQL_TYPE_STRING ||
new_field->real_type() == MYSQL_TYPE_VARCHAR)
@@ -9697,13 +10179,12 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
*/
arg->maybe_null=1;
}
- new_field->query_id= thd->query_id;
+ new_field->field_index= fieldnr++;
}
}
}
else
{
- uint field_index= (uint) (reg_field - table->field);
/*
The last parameter to create_tmp_field() is a bit tricky:
@@ -9720,7 +10201,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
Field *new_field= (param->schema_table) ?
create_tmp_field_for_schema(thd, item, table) :
create_tmp_field(thd, table, item, type, &copy_func,
- tmp_from_field, &default_field[field_index],
+ tmp_from_field, &default_field[fieldnr],
group != 0,
!force_copy_fields &&
(not_all_columns || group !=0),
@@ -9739,11 +10220,11 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
reclength+=new_field->pack_length();
if (!(new_field->flags & NOT_NULL_FLAG))
null_count++;
- if (new_field->type() == FIELD_TYPE_BIT)
+ if (new_field->type() == MYSQL_TYPE_BIT)
total_uneven_bit_length+= new_field->field_length & 7;
if (new_field->flags & BLOB_FLAG)
{
- *blob_field++= field_index;
+ *blob_field++= fieldnr;
blob_count++;
}
if (item->marker == 4 && item->maybe_null)
@@ -9751,9 +10232,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
group_null_items++;
new_field->flags|= GROUP_FLAG;
}
- new_field->query_id= thd->query_id;
- new_field->field_index= field_index;
- *(reg_field++) =new_field;
+ new_field->field_index= fieldnr++;
+ *(reg_field++)= new_field;
}
if (!--hidden_field_count)
{
@@ -9766,22 +10246,34 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
We need to update hidden_field_count as we may have stored group
functions with constant arguments
*/
- param->hidden_field_count= (uint) (reg_field - table->field);
+ param->hidden_field_count= fieldnr;
null_count= 0;
+ /*
+ On last hidden field we store uneven bit length in
+ hidden_uneven_bit_length and proceed calculation of
+ uneven bits for visible fields into
+ total_uneven_bit_length variable.
+ */
+ hidden_uneven_bit_length= total_uneven_bit_length;
+ total_uneven_bit_length= 0;
}
}
+ DBUG_ASSERT(fieldnr == (uint) (reg_field - table->field));
DBUG_ASSERT(field_count >= (uint) (reg_field - table->field));
- field_count= (uint) (reg_field - table->field);
+ field_count= fieldnr;
*reg_field= 0;
*blob_field= 0; // End marker
+ share->fields= field_count;
/* If result table is small; use a heap */
+ /* future: storage engine selection can be made dynamic? */
if (blob_count || using_unique_constraint ||
(select_options & (OPTION_BIG_TABLES | SELECT_SMALL_RESULT)) ==
OPTION_BIG_TABLES || (select_options & TMP_TABLE_FORCE_MYISAM))
{
- table->file= get_new_handler(table, &table->mem_root,
- table->s->db_type= DB_TYPE_MYISAM);
+ share->db_plugin= ha_lock_engine(0, myisam_hton);
+ table->file= get_new_handler(share, &table->mem_root,
+ share->db_type());
if (group &&
(param->group_parts > table->file->max_key_parts() ||
param->group_length > table->file->max_key_length()))
@@ -9789,14 +10281,18 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
}
else
{
- table->file= get_new_handler(table, &table->mem_root,
- table->s->db_type= DB_TYPE_HEAP);
+ share->db_plugin= ha_lock_engine(0, heap_hton);
+ table->file= get_new_handler(share, &table->mem_root,
+ share->db_type());
}
+ if (!table->file)
+ goto err;
+
if (!using_unique_constraint)
reclength+= group_null_items; // null flag is stored separately
- table->s->blob_fields= blob_count;
+ share->blob_fields= blob_count;
if (blob_count == 0)
{
/* We need to ensure that first byte is not 0 for the delete link */
@@ -9805,7 +10301,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
else
null_count++;
}
- hidden_null_pack_length=(hidden_null_count+7)/8;
+ hidden_null_pack_length= (hidden_null_count + 7 +
+ hidden_uneven_bit_length) / 8;
null_pack_length= (hidden_null_pack_length +
(null_count + total_uneven_bit_length + 7) / 8);
reclength+=null_pack_length;
@@ -9818,34 +10315,35 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
string_total_length / string_count >= AVG_STRING_LENGTH_TO_PACK_ROWS)))
use_packed_rows= 1;
- table->s->fields= field_count;
- table->s->reclength= reclength;
+ share->reclength= reclength;
{
uint alloc_length=ALIGN_SIZE(reclength+MI_UNIQUE_HASH_LENGTH+1);
- table->s->rec_buff_length= alloc_length;
- if (!(table->record[0]= (byte*)
+ share->rec_buff_length= alloc_length;
+ if (!(table->record[0]= (uchar*)
alloc_root(&table->mem_root, alloc_length*3)))
goto err;
table->record[1]= table->record[0]+alloc_length;
- table->s->default_values= table->record[1]+alloc_length;
+ share->default_values= table->record[1]+alloc_length;
}
copy_func[0]=0; // End marker
- param->func_count= (uint) (copy_func - param->items_to_copy);
+ param->func_count= copy_func - param->items_to_copy;
+
+ setup_tmp_table_column_bitmaps(table, bitmaps);
recinfo=param->start_recinfo;
null_flags=(uchar*) table->record[0];
pos=table->record[0]+ null_pack_length;
if (null_pack_length)
{
- bzero((byte*) recinfo,sizeof(*recinfo));
+ bzero((uchar*) recinfo,sizeof(*recinfo));
recinfo->type=FIELD_NORMAL;
recinfo->length=null_pack_length;
recinfo++;
bfill(null_flags,null_pack_length,255); // Set null fields
table->null_flags= (uchar*) table->record[0];
- table->s->null_fields= null_count+ hidden_null_count;
- table->s->null_bytes= null_pack_length;
+ share->null_fields= null_count+ hidden_null_count;
+ share->null_bytes= null_pack_length;
}
null_count= (blob_count == 0) ? 1 : 0;
hidden_field_count=param->hidden_field_count;
@@ -9853,7 +10351,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
{
Field *field= *reg_field;
uint length;
- bzero((byte*) recinfo,sizeof(*recinfo));
+ bzero((uchar*) recinfo,sizeof(*recinfo));
if (!(field->flags & NOT_NULL_FLAG))
{
@@ -9867,20 +10365,20 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
recinfo->length=1;
recinfo->type=FIELD_NORMAL;
recinfo++;
- bzero((byte*) recinfo,sizeof(*recinfo));
+ bzero((uchar*) recinfo,sizeof(*recinfo));
}
else
{
recinfo->null_bit= 1 << (null_count & 7);
recinfo->null_pos= null_count/8;
}
- field->move_field((char*) pos,null_flags+null_count/8,
+ field->move_field(pos,null_flags+null_count/8,
1 << (null_count & 7));
null_count++;
}
else
- field->move_field((char*) pos,(uchar*) 0,0);
- if (field->type() == FIELD_TYPE_BIT)
+ field->move_field(pos,(uchar*) 0,0);
+ if (field->type() == MYSQL_TYPE_BIT)
{
/* We have to reserve place for extra bits among null bits */
((Field_bit*) field)->set_bit_ptr(null_flags + null_count / 8,
@@ -9895,18 +10393,17 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
*/
if (default_field[i] && default_field[i]->ptr)
{
- /*
- default_field[i] is set only in the cases when 'field' can
- inherit the default value that is defined for the field referred
- by the Item_field object from which 'field' has been created.
+ /*
+ default_field[i] is set only in the cases when 'field' can
+ inherit the default value that is defined for the field referred
+ by the Item_field object from which 'field' has been created.
*/
my_ptrdiff_t diff;
Field *orig_field= default_field[i];
-
/* Get the value from default_values */
diff= (my_ptrdiff_t) (orig_field->table->s->default_values-
orig_field->table->record[0]);
- orig_field->move_field(diff); // Points now at default_values
+ orig_field->move_field_offset(diff); // Points now at default_values
if (orig_field->is_real_null())
field->set_null();
else
@@ -9914,8 +10411,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
field->set_notnull();
memcpy(field->ptr, orig_field->ptr, field->pack_length());
}
- orig_field->move_field(-diff); // Back to record[0]
- }
+ orig_field->move_field_offset(-diff); // Back to record[0]
+ }
if (from_field[i])
{ /* Not a table Item */
@@ -9947,19 +10444,19 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
store_record(table,s->default_values); // Make empty default record
if (thd->variables.tmp_table_size == ~ (ulonglong) 0) // No limit
- table->s->max_rows= ~(ha_rows) 0;
+ share->max_rows= ~(ha_rows) 0;
else
- table->s->max_rows= (ha_rows) (((table->s->db_type == DB_TYPE_HEAP) ?
- min(thd->variables.tmp_table_size,
- thd->variables.max_heap_table_size) :
- thd->variables.tmp_table_size)/
- table->s->reclength);
- set_if_bigger(table->s->max_rows,1); // For dummy start options
+ share->max_rows= (ha_rows) (((share->db_type() == heap_hton) ?
+ min(thd->variables.tmp_table_size,
+ thd->variables.max_heap_table_size) :
+ thd->variables.tmp_table_size) /
+ share->reclength);
+ set_if_bigger(share->max_rows,1); // For dummy start options
/*
Push the LIMIT clause to the temporary table creation, so that we
materialize only up to 'rows_limit' records instead of all result records.
*/
- set_if_smaller(table->s->max_rows, rows_limit);
+ set_if_smaller(share->max_rows, rows_limit);
param->end_write_records= rows_limit;
keyinfo= param->keyinfo;
@@ -9969,8 +10466,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
DBUG_PRINT("info",("Creating group key in temporary table"));
table->group=group; /* Table is grouped by key */
param->group_buff=group_buff;
- table->s->keys=1;
- table->s->uniques= test(using_unique_constraint);
+ share->keys=1;
+ share->uniques= test(using_unique_constraint);
table->key_info=keyinfo;
keyinfo->key_part=key_part_info;
keyinfo->flags=HA_NOSAME;
@@ -9987,7 +10484,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
bool maybe_null=(*cur_group->item)->maybe_null;
key_part_info->null_bit=0;
key_part_info->field= field;
- key_part_info->offset= field->offset();
+ key_part_info->offset= field->offset(table->record[0]);
key_part_info->length= (uint16) field->key_length();
key_part_info->type= (uint8) field->key_type();
key_part_info->key_type =
@@ -9999,10 +10496,10 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
{
cur_group->buff=(char*) group_buff;
if (!(cur_group->field= field->new_key_field(thd->mem_root,table,
- (char*) group_buff +
- test(maybe_null),
- field->null_ptr,
- field->null_bit)))
+ group_buff +
+ test(maybe_null),
+ field->null_ptr,
+ field->null_bit)))
goto err; /* purecov: inspected */
if (maybe_null)
{
@@ -10041,11 +10538,11 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
keyinfo->key_parts= ((field_count-param->hidden_field_count)+
test(null_pack_length));
table->distinct= 1;
- table->s->keys= 1;
+ share->keys= 1;
if (blob_count)
{
using_unique_constraint=1;
- table->s->uniques= 1;
+ share->uniques= 1;
}
if (!(key_part_info= (KEY_PART_INFO*)
alloc_root(&table->mem_root,
@@ -10064,12 +10561,15 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
key_part_info->null_bit=0;
key_part_info->offset=hidden_null_pack_length;
key_part_info->length=null_pack_length;
- key_part_info->field=new Field_string((char*) table->record[0],
- (uint32) key_part_info->length,
- (uchar*) 0,
- (uint) 0,
- Field::NONE,
- NullS, table, &my_charset_bin);
+ key_part_info->field= new Field_string(table->record[0],
+ (uint32) key_part_info->length,
+ (uchar*) 0,
+ (uint) 0,
+ Field::NONE,
+ NullS, &my_charset_bin);
+ if (!key_part_info->field)
+ goto err;
+ key_part_info->field->init(table);
key_part_info->key_type=FIELDFLAG_BINARY;
key_part_info->type= HA_KEYTYPE_BINARY;
key_part_info++;
@@ -10081,7 +10581,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
{
key_part_info->null_bit=0;
key_part_info->field= *reg_field;
- key_part_info->offset= (*reg_field)->offset();
+ key_part_info->offset= (*reg_field)->offset(table->record[0]);
key_part_info->length= (uint16) (*reg_field)->pack_length();
key_part_info->type= (uint8) (*reg_field)->key_type();
key_part_info->key_type =
@@ -10094,8 +10594,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
if (thd->is_fatal_error) // If end of memory
goto err; /* purecov: inspected */
- table->s->db_record_offset= 1;
- if (table->s->db_type == DB_TYPE_MYISAM)
+ share->db_record_offset= 1;
+ if (share->db_type() == myisam_hton)
{
if (create_myisam_tmp_table(table,param,select_options))
goto err;
@@ -10111,24 +10611,18 @@ err:
thd->mem_root= mem_root_save;
free_tmp_table(thd,table); /* purecov: inspected */
if (temp_pool_slot != MY_BIT_NONE)
- bitmap_clear_bit(&temp_pool, temp_pool_slot);
+ bitmap_lock_clear_bit(&temp_pool, temp_pool_slot);
DBUG_RETURN(NULL); /* purecov: inspected */
}
/****************************************************************************/
-/*
+/**
Create a reduced TABLE object with properly set up Field list from a
list of field definitions.
- SYNOPSIS
- create_virtual_tmp_table()
- thd connection handle
- field_list list of column definitions
-
- DESCRIPTION
- The created table doesn't have a table handler assotiated with
+ The created table doesn't have a table handler associated with
it, has no keys, no group/distinct, no copy_funcs array.
The sole purpose of this TABLE object is to use the power of Field
class to read/write data to/from table->record[0]. Then one can store
@@ -10136,90 +10630,100 @@ err:
The table is created in THD mem_root, so are the table's fields.
Consequently, if you don't BLOB fields, you don't need to free it.
- RETURN
+ @param thd connection handle
+ @param field_list list of column definitions
+
+ @return
0 if out of memory, TABLE object in case of success
*/
-TABLE *create_virtual_tmp_table(THD *thd, List<create_field> &field_list)
+TABLE *create_virtual_tmp_table(THD *thd, List<Create_field> &field_list)
{
uint field_count= field_list.elements;
uint blob_count= 0;
Field **field;
- create_field *cdef; /* column definition */
+ Create_field *cdef; /* column definition */
uint record_length= 0;
uint null_count= 0; /* number of columns which may be null */
uint null_pack_length; /* NULL representation array length */
- TABLE_SHARE *s;
- /* Create the table and list of all fields */
- TABLE *table= (TABLE*) thd->calloc(sizeof(*table));
- field= (Field**) thd->alloc((field_count + 1) * sizeof(Field*));
- if (!table || !field)
- return 0;
-
- table->field= field;
- table->s= s= &table->share_not_to_be_used;
- s->fields= field_count;
+ uint *blob_field;
+ uchar *bitmaps;
+ TABLE *table;
+ TABLE_SHARE *share;
- if (!(s->blob_field= (uint*)thd->alloc((field_list.elements + 1) *
- sizeof(uint))))
+ if (!multi_alloc_root(thd->mem_root,
+ &table, sizeof(*table),
+ &share, sizeof(*share),
+ &field, (field_count + 1) * sizeof(Field*),
+ &blob_field, (field_count+1) *sizeof(uint),
+ &bitmaps, bitmap_buffer_size(field_count)*2,
+ NullS))
return 0;
- s->blob_ptr_size= mi_portable_sizeof_char_ptr;
+ bzero(table, sizeof(*table));
+ bzero(share, sizeof(*share));
+ table->field= field;
+ table->s= share;
+ share->blob_field= blob_field;
+ share->fields= field_count;
+ share->blob_ptr_size= portable_sizeof_char_ptr;
+ setup_tmp_table_column_bitmaps(table, bitmaps);
/* Create all fields and calculate the total length of record */
- List_iterator_fast<create_field> it(field_list);
+ List_iterator_fast<Create_field> it(field_list);
while ((cdef= it++))
{
- *field= make_field(0, cdef->length,
+ *field= make_field(share, 0, cdef->length,
(uchar*) (f_maybe_null(cdef->pack_flag) ? "" : 0),
f_maybe_null(cdef->pack_flag) ? 1 : 0,
cdef->pack_flag, cdef->sql_type, cdef->charset,
cdef->geom_type, cdef->unireg_check,
- cdef->interval, cdef->field_name, table);
+ cdef->interval, cdef->field_name);
if (!*field)
goto error;
- record_length+= (**field).pack_length();
- if (! ((**field).flags & NOT_NULL_FLAG))
- ++null_count;
+ (*field)->init(table);
+ record_length+= (*field)->pack_length();
+ if (! ((*field)->flags & NOT_NULL_FLAG))
+ null_count++;
if ((*field)->flags & BLOB_FLAG)
- s->blob_field[blob_count++]= (uint) (field - table->field);
+ share->blob_field[blob_count++]= (uint) (field - table->field);
- ++field;
+ field++;
}
*field= NULL; /* mark the end of the list */
- s->blob_field[blob_count]= 0; /* mark the end of the list */
- s->blob_fields= blob_count;
+ share->blob_field[blob_count]= 0; /* mark the end of the list */
+ share->blob_fields= blob_count;
null_pack_length= (null_count + 7)/8;
- s->reclength= record_length + null_pack_length;
- s->rec_buff_length= ALIGN_SIZE(s->reclength + 1);
- table->record[0]= (byte*) thd->alloc(s->rec_buff_length);
+ share->reclength= record_length + null_pack_length;
+ share->rec_buff_length= ALIGN_SIZE(share->reclength + 1);
+ table->record[0]= (uchar*) thd->alloc(share->rec_buff_length);
if (!table->record[0])
goto error;
if (null_pack_length)
{
table->null_flags= (uchar*) table->record[0];
- s->null_fields= null_count;
- s->null_bytes= null_pack_length;
+ share->null_fields= null_count;
+ share->null_bytes= null_pack_length;
}
table->in_use= thd; /* field->reset() may access table->in_use */
{
/* Set up field pointers */
- byte *null_pos= table->record[0];
- byte *field_pos= null_pos + s->null_bytes;
+ uchar *null_pos= table->record[0];
+ uchar *field_pos= null_pos + share->null_bytes;
uint null_bit= 1;
for (field= table->field; *field; ++field)
{
Field *cur_field= *field;
if ((cur_field->flags & NOT_NULL_FLAG))
- cur_field->move_field((char*) field_pos);
+ cur_field->move_field(field_pos);
else
{
- cur_field->move_field((char*) field_pos, (uchar*) null_pos, null_bit);
+ cur_field->move_field(field_pos, (uchar*) null_pos, null_bit);
null_bit<<= 1;
if (null_bit == (1 << 8))
{
@@ -10243,8 +10747,8 @@ error:
static bool open_tmp_table(TABLE *table)
{
int error;
- if ((error=table->file->ha_open(table->s->table_name,O_RDWR,
- HA_OPEN_TMP_TABLE)))
+ if ((error=table->file->ha_open(table, table->s->table_name.str,O_RDWR,
+ HA_OPEN_TMP_TABLE | HA_OPEN_INTERNAL_TABLE)))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
table->db_stat=0;
@@ -10262,9 +10766,10 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
MI_KEYDEF keydef;
MI_UNIQUEDEF uniquedef;
KEY *keyinfo=param->keyinfo;
+ TABLE_SHARE *share= table->s;
DBUG_ENTER("create_myisam_tmp_table");
- if (table->s->keys)
+ if (share->keys)
{ // Get keys for ni_create
bool using_unique_constraint=0;
HA_KEYSEG *seg= (HA_KEYSEG*) alloc_root(&table->mem_root,
@@ -10275,11 +10780,11 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
bzero(seg, sizeof(*seg) * keyinfo->key_parts);
if (keyinfo->key_length >= table->file->max_key_length() ||
keyinfo->key_parts > table->file->max_key_parts() ||
- table->s->uniques)
+ share->uniques)
{
/* Can't create a key; Make a unique constraint instead of a key */
- table->s->keys= 0;
- table->s->uniques= 1;
+ share->keys= 0;
+ share->uniques= 1;
using_unique_constraint=1;
bzero((char*) &uniquedef,sizeof(uniquedef));
uniquedef.keysegs=keyinfo->key_parts;
@@ -10287,11 +10792,11 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
uniquedef.null_are_equal=1;
/* Create extra column for hash value */
- bzero((byte*) param->recinfo,sizeof(*param->recinfo));
+ bzero((uchar*) param->recinfo,sizeof(*param->recinfo));
param->recinfo->type= FIELD_CHECK;
param->recinfo->length=MI_UNIQUE_HASH_LENGTH;
param->recinfo++;
- table->s->reclength+=MI_UNIQUE_HASH_LENGTH;
+ share->reclength+=MI_UNIQUE_HASH_LENGTH;
}
else
{
@@ -10313,7 +10818,7 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
seg->type=
((keyinfo->key_part[i].key_type & FIELDFLAG_BINARY) ?
HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2);
- seg->bit_start= (uint8)(field->pack_length() - table->s->blob_ptr_size);
+ seg->bit_start= (uint8)(field->pack_length() - share->blob_ptr_size);
seg->flag= HA_BLOB_PART;
seg->length=0; // Whole blob in unique constraint
}
@@ -10346,10 +10851,10 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
OPTION_BIG_TABLES)
create_info.data_file_length= ~(ulonglong) 0;
- if ((error=mi_create(table->s->table_name,table->s->keys,&keydef,
+ if ((error=mi_create(share->table_name.str, share->keys, &keydef,
(uint) (param->recinfo-param->start_recinfo),
param->start_recinfo,
- table->s->uniques, &uniquedef,
+ share->uniques, &uniquedef,
&create_info,
HA_CREATE_TMP_TABLE)))
{
@@ -10357,9 +10862,8 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
table->db_stat=0;
goto err;
}
- statistic_increment(table->in_use->status_var.created_tmp_disk_tables,
- &LOCK_status);
- table->s->db_record_offset= 1;
+ status_var_increment(table->in_use->status_var.created_tmp_disk_tables);
+ share->db_record_offset= 1;
DBUG_RETURN(0);
err:
DBUG_RETURN(1);
@@ -10383,17 +10887,9 @@ free_tmp_table(THD *thd, TABLE *entry)
if (entry->file)
{
if (entry->db_stat)
- {
- (void) entry->file->close();
- }
- /*
- We can't call ha_delete_table here as the table may created in mixed case
- here and we have to ensure that delete_table gets the table name in
- the original case.
- */
- if (!(test_flags & TEST_KEEP_TMP_TABLES) ||
- entry->s->db_type == DB_TYPE_HEAP)
- entry->file->delete_table(entry->s->table_name);
+ entry->file->ha_drop_table(entry->s->table_name.str);
+ else
+ entry->file->ha_delete_table(entry->s->table_name.str);
delete entry->file;
}
@@ -10403,7 +10899,9 @@ free_tmp_table(THD *thd, TABLE *entry)
free_io_cache(entry);
if (entry->temp_pool_slot != MY_BIT_NONE)
- bitmap_clear_bit(&temp_pool, entry->temp_pool_slot);
+ bitmap_lock_clear_bit(&temp_pool, entry->temp_pool_slot);
+
+ plugin_unlock(0, entry->s->db_plugin);
free_root(&own_root, MYF(0)); /* the table is allocated in its own root */
thd_proc_info(thd, save_proc_info);
@@ -10411,25 +10909,28 @@ free_tmp_table(THD *thd, TABLE *entry)
DBUG_VOID_RETURN;
}
-/*
-* If a HEAP table gets full, create a MyISAM table and copy all rows to this
+/**
+ If a HEAP table gets full, create a MyISAM table and copy all rows
+ to this.
*/
bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
int error, bool ignore_last_dupp_key_error)
{
TABLE new_table;
+ TABLE_SHARE share;
const char *save_proc_info;
int write_err;
DBUG_ENTER("create_myisam_from_heap");
- if (table->s->db_type != DB_TYPE_HEAP || error != HA_ERR_RECORD_FILE_FULL)
+ if (table->s->db_type() != heap_hton ||
+ error != HA_ERR_RECORD_FILE_FULL)
{
/*
We don't want this error to be converted to a warning, e.g. in case of
INSERT IGNORE ... SELECT.
*/
- thd->is_fatal_error= 1;
+ thd->fatal_error();
table->file->print_error(error,MYF(0));
DBUG_RETURN(1);
}
@@ -10438,22 +10939,23 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
ha_release_temporary_latches(thd);
new_table= *table;
- new_table.s= &new_table.share_not_to_be_used;
- new_table.s->db_type= DB_TYPE_MYISAM;
- if (!(new_table.file= get_new_handler(&new_table, &new_table.mem_root,
- DB_TYPE_MYISAM)))
+ share= *table->s;
+ new_table.s= &share;
+ new_table.s->db_plugin= ha_lock_engine(thd, myisam_hton);
+ if (!(new_table.file= get_new_handler(&share, &new_table.mem_root,
+ new_table.s->db_type())))
DBUG_RETURN(1); // End of memory
save_proc_info=thd->proc_info;
thd_proc_info(thd, "converting HEAP to MyISAM");
- if (create_myisam_tmp_table(&new_table,param,
+ if (create_myisam_tmp_table(&new_table, param,
thd->lex->select_lex.options | thd->options))
goto err2;
if (open_tmp_table(&new_table))
goto err1;
if (table->file->indexes_are_disabled())
- new_table.file->disable_indexes(HA_KEY_SWITCH_ALL);
+ new_table.file->ha_disable_indexes(HA_KEY_SWITCH_ALL);
table->file->ha_index_or_rnd_end();
table->file->ha_rnd_init(1);
if (table->no_rows)
@@ -10467,38 +10969,47 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
To use start_bulk_insert() (which is new in 4.1) we need to find
all places where a corresponding end_bulk_insert() should be put.
*/
- table->file->info(HA_STATUS_VARIABLE); /* update table->file->records */
- new_table.file->start_bulk_insert(table->file->records);
+ table->file->info(HA_STATUS_VARIABLE); /* update table->file->stats.records */
+ new_table.file->ha_start_bulk_insert(table->file->stats.records);
#else
/* HA_EXTRA_WRITE_CACHE can stay until close, no need to disable it */
new_table.file->extra(HA_EXTRA_WRITE_CACHE);
#endif
- /* copy all old rows */
+ /*
+ copy all old rows from heap table to MyISAM table
+ This is the only code that uses record[1] to read/write but this
+ is safe as this is a temporary MyISAM table without timestamp/autoincrement
+ or partitioning.
+ */
while (!table->file->rnd_next(new_table.record[1]))
{
- write_err=new_table.file->write_row(new_table.record[1]);
+ write_err= new_table.file->ha_write_row(new_table.record[1]);
DBUG_EXECUTE_IF("raise_error", write_err= HA_ERR_FOUND_DUPP_KEY ;);
if (write_err)
goto err;
}
/* copy row that filled HEAP table */
- if ((write_err=new_table.file->write_row(table->record[0])))
+ if ((write_err=new_table.file->ha_write_row(table->record[0])))
{
- if ((write_err != HA_ERR_FOUND_DUPP_KEY &&
- write_err != HA_ERR_FOUND_DUPP_UNIQUE) || !ignore_last_dupp_key_error)
- goto err;
+ if (new_table.file->is_fatal_error(write_err, HA_CHECK_DUP) ||
+ !ignore_last_dupp_key_error)
+ goto err;
}
/* remove heap table and change to use myisam table */
(void) table->file->ha_rnd_end();
- (void) table->file->close();
- (void) table->file->delete_table(table->s->table_name);
+ (void) table->file->close(); // This deletes the table !
delete table->file;
table->file=0;
+ plugin_unlock(0, table->s->db_plugin);
+ share.db_plugin= my_plugin_lock(0, &share.db_plugin);
+ new_table.s= table->s; // Keep old share
*table= new_table;
- table->s= &table->share_not_to_be_used;
- table->file->change_table_ptr(table);
+ *table->s= share;
+
+ table->file->change_table_ptr(table, table->s);
+ table->use_all_columns();
if (save_proc_info)
thd_proc_info(thd, (!strcmp(save_proc_info,"Copying to tmp table") ?
"Copying to tmp table on disk" : save_proc_info));
@@ -10510,26 +11021,24 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
(void) table->file->ha_rnd_end();
(void) new_table.file->close();
err1:
- new_table.file->delete_table(new_table.s->table_name);
- delete new_table.file;
+ new_table.file->ha_delete_table(new_table.s->table_name.str);
err2:
+ delete new_table.file;
thd_proc_info(thd, save_proc_info);
table->mem_root= new_table.mem_root;
DBUG_RETURN(1);
}
-/*
- SYNOPSIS
- setup_end_select_func()
- join join to setup the function for.
+/**
+ @details
+ Rows produced by a join sweep may end up in a temporary table or be sent
+ to a client. Setup the function of the nested loop join algorithm which
+ handles final fully constructed and matched records.
- DESCRIPTION
- Rows produced by a join sweep may end up in a temporary table or be sent
- to a client. Setup the function of the nested loop join algorithm which
- handles final fully constructed and matched records.
+ @param join join to setup the function for.
- RETURN
+ @return
end_select function to use. This function can't fail.
*/
@@ -10583,6 +11092,12 @@ Next_select_func setup_end_select_func(JOIN *join)
}
else
{
+ /*
+ Choose method for presenting result to user. Use end_send_group
+ if the query requires grouping (has a GROUP BY clause and/or one or
+ more aggregate functions). Use end_send if the query should not
+ be grouped.
+ */
if ((join->sort_and_group ||
(join->procedure && join->procedure->flags & PROC_GROUP)) &&
!tmp_tbl->precomputed_group_by)
@@ -10594,21 +11109,25 @@ Next_select_func setup_end_select_func(JOIN *join)
}
-/****************************************************************************
- Make a join of all tables and write it on socket or to table
- Return: 0 if ok
- 1 if error is sent
- -1 if error should be sent
-****************************************************************************/
+/**
+ Make a join of all tables and write it on socket or to table.
+
+ @retval
+ 0 if ok
+ @retval
+ 1 if error is sent
+ @retval
+ -1 if error should be sent
+*/
static int
do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
{
int rc= 0;
enum_nested_loop_state error= NESTED_LOOP_OK;
- JOIN_TAB *UNINIT_VAR(join_tab);
+ JOIN_TAB *join_tab= NULL;
DBUG_ENTER("do_select");
-
+
join->procedure=procedure;
join->tmp_table= table; /* Save for easy recursion */
join->fields= fields;
@@ -10619,7 +11138,7 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
empty_record(table);
if (table->group && join->tmp_table_param.sum_func_count &&
table->s->keys && !table->file->inited)
- table->file->ha_index_init(0);
+ table->file->ha_index_init(0, 0);
}
/* Set up select_end */
Next_select_func end_select= setup_end_select_func(join);
@@ -10638,9 +11157,9 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
*/
if (!join->conds || join->conds->val_int())
{
- error= (*end_select)(join,join_tab,0);
+ error= (*end_select)(join, 0, 0);
if (error == NESTED_LOOP_OK || error == NESTED_LOOP_QUERY_LIMIT)
- error= (*end_select)(join,join_tab,1);
+ error= (*end_select)(join, 0, 1);
/*
If we don't go through evaluate_join_record(), do the counting
@@ -10670,22 +11189,20 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
if (error == NESTED_LOOP_NO_MORE_ROWS)
error= NESTED_LOOP_OK;
+ if (table == NULL) // If sending data to client
+ /*
+ The following will unlock all cursors if the command wasn't an
+ update command
+ */
+ join->join_free(); // Unlock all cursors
if (error == NESTED_LOOP_OK)
{
/*
Sic: this branch works even if rc != 0, e.g. when
send_data above returns an error.
*/
- if (!table) // If sending data to client
- {
- /*
- The following will unlock all cursors if the command wasn't an
- update command
- */
- join->join_free(); // Unlock all cursors
- if (join->result->send_eof())
- rc= 1; // Don't send error
- }
+ if (table == NULL && join->result->send_eof()) // If sending data to client
+ rc= 1; // Don't send error
DBUG_PRINT("info",("%ld records output", (long) join->send_records));
}
else
@@ -10712,7 +11229,7 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
DBUG_PRINT("error",("Error: do_select() failed"));
}
#endif
- DBUG_RETURN(join->thd->net.report_error ? -1 : rc);
+ DBUG_RETURN(join->thd->is_error() ? -1 : rc);
}
@@ -10745,35 +11262,30 @@ sub_select_cache(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
return rc;
}
-/*
- Retrieve records ends with a given beginning from the result of a join
-
- SYNPOSIS
- sub_select()
- join pointer to the structure providing all context info for the query
- join_tab the first next table of the execution plan to be retrieved
- end_records true when we need to perform final steps of retrival
+/**
+ Retrieve records ends with a given beginning from the result of a join.
- DESCRIPTION
For a given partial join record consisting of records from the tables
preceding the table join_tab in the execution plan, the function
retrieves all matching full records from the result set and
send them to the result set stream.
- NOTES
+ @note
The function effectively implements the final (n-k) nested loops
of nested loops join algorithm, where k is the ordinal number of
the join_tab table and n is the total number of tables in the join query.
It performs nested loops joins with all conjunctive predicates from
the where condition pushed as low to the tables as possible.
E.g. for the query
- SELECT * FROM t1,t2,t3
- WHERE t1.a=t2.a AND t2.b=t3.b AND t1.a BETWEEN 5 AND 9
+ @code
+ SELECT * FROM t1,t2,t3
+ WHERE t1.a=t2.a AND t2.b=t3.b AND t1.a BETWEEN 5 AND 9
+ @endcode
the predicate (t1.a BETWEEN 5 AND 9) will be pushed to table t1,
given the selected plan prescribes to nest retrievals of the
joined tables in the following order: t1,t2,t3.
A pushed down predicate are attached to the table which it pushed to,
- at the field select_cond.
+ at the field join_tab->select_cond.
When executing a nested loop of level k the function runs through
the rows of 'join_tab' and for each row checks the pushed condition
attached to the table.
@@ -10784,9 +11296,11 @@ sub_select_cache(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
the execution plan. In this case the pushed down predicates can be
checked only at certain conditions.
Suppose for the query
- SELECT * FROM t1 LEFT JOIN (t2,t3) ON t3.a=t1.a
- WHERE t1>2 AND (t2.b>5 OR t2.b IS NULL)
- the optimizer has chosen a plan with the table order t1,t2,t3.
+ @code
+ SELECT * FROM t1 LEFT JOIN (t2,t3) ON t3.a=t1.a
+ WHERE t1>2 AND (t2.b>5 OR t2.b IS NULL)
+ @endcode
+ the optimizer has chosen a plan with the table order t1,t2,t3.
The predicate P1=t1>2 will be pushed down to the table t1, while the
predicate P2=(t2.b>5 OR t2.b IS NULL) will be attached to the table
t2. But the second predicate can not be unconditionally tested right
@@ -10812,16 +11326,18 @@ sub_select_cache(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
is complemented by nulls for t2 and t3. Then the pushed down predicates
are checked for the composed row almost in the same way as it had
been done for the first row with a match. The only difference is
- the predicates from on expressions are not checked.
+ the predicates from on expressions are not checked.
- IMPLEMENTATION
+ @par
+ @b IMPLEMENTATION
+ @par
The function forms output rows for a current partial join of k
tables tables recursively.
For each partial join record ending with a certain row from
join_tab it calls sub_select that builds all possible matching
tails from the result set.
To be able check predicates conditionally items of the class
- Item_func_trig_cond are employed.
+ Item_func_trig_cond are employed.
An object of this class is constructed from an item of class COND
and a pointer to a guarding boolean variable.
When the value of the guard variable is true the value of the object
@@ -10830,11 +11346,13 @@ sub_select_cache(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
To carry out a return to a nested loop level of join table t the pointer
to t is remembered in the field 'return_tab' of the join structure.
Consider the following query:
- SELECT * FROM t1,
- LEFT JOIN
- (t2, t3 LEFT JOIN (t4,t5) ON t5.a=t3.a)
- ON t4.a=t2.a
- WHERE (t2.b=5 OR t2.b IS NULL) AND (t4.b=2 OR t4.b IS NULL)
+ @code
+ SELECT * FROM t1,
+ LEFT JOIN
+ (t2, t3 LEFT JOIN (t4,t5) ON t5.a=t3.a)
+ ON t4.a=t2.a
+ WHERE (t2.b=5 OR t2.b IS NULL) AND (t4.b=2 OR t4.b IS NULL)
+ @endcode
Suppose the chosen execution plan dictates the order t1,t2,t3,t4,t5
and suppose for a given joined rows from tables t1,t2,t3 there are
no rows in the result set yet.
@@ -10849,11 +11367,18 @@ sub_select_cache(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
Thus, when the first row from t5 with t5.a=t3.a is found
this pointer for t5 is changed from t4 to t2.
- STRUCTURE NOTES
+ @par
+ @b STRUCTURE @b NOTES
+ @par
join_tab->first_unmatched points always backwards to the first inner
table of the embedding nested join, if any.
- RETURN
+ @param join pointer to the structure providing all context info for
+ the query
+ @param join_tab the first next table of the execution plan to be retrieved
+ @param end_records true when we need to perform final steps of retrival
+
+ @return
return one of enum_nested_loop_state, except NESTED_LOOP_NO_MORE_ROWS.
*/
@@ -10866,7 +11391,6 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
int error;
enum_nested_loop_state rc;
- my_bool *report_error= &(join->thd->net.report_error);
READ_RECORD *info= &join_tab->read_record;
if (join->resume_nested_loop)
@@ -10898,13 +11422,13 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
join->thd->row_count= 0;
error= (*join_tab->read_first_record)(join_tab);
- rc= evaluate_join_record(join, join_tab, error, report_error);
+ rc= evaluate_join_record(join, join_tab, error);
}
while (rc == NESTED_LOOP_OK)
{
error= info->read_record(info);
- rc= evaluate_join_record(join, join_tab, error, report_error);
+ rc= evaluate_join_record(join, join_tab, error);
}
if (rc == NESTED_LOOP_NO_MORE_ROWS &&
@@ -10917,10 +11441,9 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
}
-/*
+/**
Process one record of the nested loop join.
- DESCRIPTION
This function will evaluate parts of WHERE/ON clauses that are
applicable to the partial record on hand and in case of success
submit this record to the next level of the nested loop.
@@ -10928,14 +11451,14 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
static enum_nested_loop_state
evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
- int error, my_bool *report_error)
+ int error)
{
bool not_used_in_distinct=join_tab->not_used_in_distinct;
ha_rows found_records=join->found_records;
COND *select_cond= join_tab->select_cond;
bool select_cond_result= TRUE;
- if (error > 0 || (*report_error)) // Fatal error
+ if (error > 0 || (join->thd->is_error())) // Fatal error
return NESTED_LOOP_ERROR;
if (error < 0)
return NESTED_LOOP_NO_MORE_ROWS;
@@ -10951,7 +11474,7 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
select_cond_result= test(select_cond->val_int());
/* check for errors evaluating the condition */
- if (join->thd->net.report_error)
+ if (join->thd->is_error())
return NESTED_LOOP_ERROR;
}
@@ -11055,8 +11578,9 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
}
-/*
- DESCRIPTION
+/**
+
+ @details
Construct a NULL complimented partial join record and feed it to the next
level of the nested loop. This function is used in case we have
an OUTER join and no matching record was found.
@@ -11163,21 +11687,37 @@ flush_cached_records(JOIN *join,JOIN_TAB *join_tab,bool skip_last)
return NESTED_LOOP_KILLED; // Aborted by user /* purecov: inspected */
}
SQL_SELECT *select=join_tab->select;
- if (rc == NESTED_LOOP_OK &&
- (!join_tab->cache.select || !join_tab->cache.select->skip_record()))
+ if (rc == NESTED_LOOP_OK)
{
- uint i;
- reset_cache_read(&join_tab->cache);
- for (i=(join_tab->cache.records- (skip_last ? 1 : 0)) ; i-- > 0 ;)
+ bool skip_record= FALSE;
+ if (join_tab->cache.select &&
+ join_tab->cache.select->skip_record(join->thd, &skip_record))
{
- read_cached_record(join_tab);
- if (!select || !select->skip_record())
+ reset_cache_write(&join_tab->cache);
+ return NESTED_LOOP_ERROR;
+ }
+
+ if (!skip_record)
+ {
+ uint i;
+ reset_cache_read(&join_tab->cache);
+ for (i=(join_tab->cache.records- (skip_last ? 1 : 0)) ; i-- > 0 ;)
{
- rc= (join_tab->next_select)(join,join_tab+1,0);
- if (rc != NESTED_LOOP_OK && rc != NESTED_LOOP_NO_MORE_ROWS)
+ read_cached_record(join_tab);
+ skip_record= FALSE;
+ if (select && select->skip_record(join->thd, &skip_record))
{
reset_cache_write(&join_tab->cache);
- return rc;
+ return NESTED_LOOP_ERROR;
+ }
+ if (!skip_record)
+ {
+ rc= (join_tab->next_select)(join,join_tab+1,0);
+ if (rc != NESTED_LOOP_OK && rc != NESTED_LOOP_NO_MORE_ROWS)
+ {
+ reset_cache_write(&join_tab->cache);
+ return rc;
+ }
}
}
}
@@ -11200,7 +11740,7 @@ flush_cached_records(JOIN *join,JOIN_TAB *join_tab,bool skip_last)
Returns -1 if row was not found, 0 if row was found and 1 on errors
*****************************************************************************/
-/* Help function when we get some an error from the table handler */
+/** Help function when we get some an error from the table handler. */
int report_error(TABLE *table, int error)
{
@@ -11215,7 +11755,7 @@ int report_error(TABLE *table, int error)
*/
if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT)
sql_print_error("Got error %d when reading table '%s'",
- error, table->s->path);
+ error, table->s->path.str);
table->file->print_error(error,MYF(0));
return 1;
}
@@ -11225,9 +11765,10 @@ int safe_index_read(JOIN_TAB *tab)
{
int error;
TABLE *table= tab->table;
- if ((error=table->file->index_read(table->record[0],
- tab->ref.key_buff,
- tab->ref.key_length, HA_READ_KEY_EXACT)))
+ if ((error=table->file->index_read_map(table->record[0],
+ tab->ref.key_buff,
+ make_prev_keypart_map(tab->ref.key_parts),
+ HA_READ_KEY_EXACT)))
return report_error(table, error);
return 0;
}
@@ -11250,31 +11791,28 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos)
tab->info="const row not found";
/* Mark for EXPLAIN that the row was not found */
pos->records_read=0.0;
+ pos->ref_depend_map= 0;
if (!table->maybe_null || error > 0)
DBUG_RETURN(error);
}
}
else
{
- if (!table->key_read && table->used_keys.is_set(tab->ref.key) &&
+ if (!table->key_read && table->covering_keys.is_set(tab->ref.key) &&
!table->no_keyread &&
(int) table->reginfo.lock_type <= (int) TL_READ_HIGH_PRIORITY)
{
- table->key_read=1;
- table->file->extra(HA_EXTRA_KEYREAD);
+ table->set_keyread(TRUE);
tab->index= tab->ref.key;
}
error=join_read_const(tab);
- if (table->key_read)
- {
- table->key_read=0;
- table->file->extra(HA_EXTRA_NO_KEYREAD);
- }
+ table->set_keyread(FALSE);
if (error)
{
tab->info="unique row not found";
/* Mark for EXPLAIN that the row was not found */
pos->records_read=0.0;
+ pos->ref_depend_map= 0;
if (!table->maybe_null || error > 0)
DBUG_RETURN(error);
}
@@ -11336,17 +11874,17 @@ join_read_system(JOIN_TAB *tab)
}
-/*
- Read a table when there is at most one matching row
+/**
+ Read a table when there is at most one matching row.
- SYNOPSIS
- join_read_const()
- tab Table to read
+ @param tab Table to read
- RETURN
+ @retval
0 Row was found
- -1 Row was not found
- 1 Got an error (other than row not found) during read
+ @retval
+ -1 Row was not found
+ @retval
+ 1 Got an error (other than row not found) during read
*/
static int
@@ -11357,20 +11895,21 @@ join_read_const(JOIN_TAB *tab)
if (table->status & STATUS_GARBAGE) // If first read
{
table->status= 0;
- if (cp_buffer_from_ref(tab->join->thd, &tab->ref))
+ if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
error=HA_ERR_KEY_NOT_FOUND;
else
{
- error=table->file->index_read_idx(table->record[0],tab->ref.key,
- (byte*) tab->ref.key_buff,
- tab->ref.key_length,HA_READ_KEY_EXACT);
+ error=table->file->index_read_idx_map(table->record[0],tab->ref.key,
+ (uchar*) tab->ref.key_buff,
+ make_prev_keypart_map(tab->ref.key_parts),
+ HA_READ_KEY_EXACT);
}
if (error)
{
table->status= STATUS_NOT_FOUND;
mark_as_null_row(tab->table);
empty_record(table);
- if (error != HA_ERR_KEY_NOT_FOUND)
+ if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
return report_error(table, error);
return -1;
}
@@ -11393,7 +11932,9 @@ join_read_key(JOIN_TAB *tab)
TABLE *table= tab->table;
if (!table->file->inited)
- table->file->ha_index_init(tab->ref.key);
+ {
+ table->file->ha_index_init(tab->ref.key, tab->sorted);
+ }
if (cmp_buffer_with_ref(tab) ||
(table->status & (STATUS_GARBAGE | STATUS_NO_PARENT | STATUS_NULL_ROW)))
{
@@ -11411,11 +11952,11 @@ join_read_key(JOIN_TAB *tab)
tab->read_record.file->unlock_row();
tab->ref.has_record= FALSE;
}
-
- error=table->file->index_read(table->record[0],
- tab->ref.key_buff,
- tab->ref.key_length,HA_READ_KEY_EXACT);
- if (error && error != HA_ERR_KEY_NOT_FOUND)
+ error=table->file->index_read_map(table->record[0],
+ tab->ref.key_buff,
+ make_prev_keypart_map(tab->ref.key_parts),
+ HA_READ_KEY_EXACT);
+ if (error && error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
return report_error(table, error);
if (! error)
@@ -11478,8 +12019,8 @@ join_read_always_key(JOIN_TAB *tab)
/* Initialize the index first */
if (!table->file->inited)
- table->file->ha_index_init(tab->ref.key);
-
+ table->file->ha_index_init(tab->ref.key, tab->sorted);
+
/* Perform "Late NULLs Filtering" (see internals manual for explanations) */
for (uint i= 0 ; i < tab->ref.key_parts ; i++)
{
@@ -11487,13 +12028,14 @@ join_read_always_key(JOIN_TAB *tab)
return -1;
}
- if (cp_buffer_from_ref(tab->join->thd, &tab->ref))
+ if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
return -1;
- if ((error=table->file->index_read(table->record[0],
- tab->ref.key_buff,
- tab->ref.key_length,HA_READ_KEY_EXACT)))
+ if ((error=table->file->index_read_map(table->record[0],
+ tab->ref.key_buff,
+ make_prev_keypart_map(tab->ref.key_parts),
+ HA_READ_KEY_EXACT)))
{
- if (error != HA_ERR_KEY_NOT_FOUND)
+ if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
return report_error(table, error);
return -1; /* purecov: inspected */
}
@@ -11501,9 +12043,9 @@ join_read_always_key(JOIN_TAB *tab)
}
-/*
+/**
This function is used when optimizing away ORDER BY in
- SELECT * FROM t1 WHERE a=1 ORDER BY a DESC,b DESC
+ SELECT * FROM t1 WHERE a=1 ORDER BY a DESC,b DESC.
*/
static int
@@ -11513,14 +12055,14 @@ join_read_last_key(JOIN_TAB *tab)
TABLE *table= tab->table;
if (!table->file->inited)
- table->file->ha_index_init(tab->ref.key);
- if (cp_buffer_from_ref(tab->join->thd, &tab->ref))
+ table->file->ha_index_init(tab->ref.key, tab->sorted);
+ if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
return -1;
- if ((error=table->file->index_read_last(table->record[0],
- tab->ref.key_buff,
- tab->ref.key_length)))
+ if ((error=table->file->index_read_last_map(table->record[0],
+ tab->ref.key_buff,
+ make_prev_keypart_map(tab->ref.key_parts))))
{
- if (error != HA_ERR_KEY_NOT_FOUND)
+ if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
return report_error(table, error);
return -1; /* purecov: inspected */
}
@@ -11619,12 +12161,8 @@ join_read_first(JOIN_TAB *tab)
{
int error;
TABLE *table=tab->table;
- if (!table->key_read && table->used_keys.is_set(tab->index) &&
- !table->no_keyread)
- {
- table->key_read=1;
- table->file->extra(HA_EXTRA_KEYREAD);
- }
+ if (table->covering_keys.is_set(tab->index) && !table->no_keyread)
+ table->set_keyread(TRUE);
tab->table->status=0;
tab->read_record.read_record=join_read_next;
tab->read_record.table=table;
@@ -11632,7 +12170,7 @@ join_read_first(JOIN_TAB *tab)
tab->read_record.index=tab->index;
tab->read_record.record=table->record[0];
if (!table->file->inited)
- table->file->ha_index_init(tab->index);
+ table->file->ha_index_init(tab->index, tab->sorted);
if ((error=tab->table->file->index_first(tab->table->record[0])))
{
if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE)
@@ -11658,12 +12196,8 @@ join_read_last(JOIN_TAB *tab)
{
TABLE *table=tab->table;
int error;
- if (!table->key_read && table->used_keys.is_set(tab->index) &&
- !table->no_keyread)
- {
- table->key_read=1;
- table->file->extra(HA_EXTRA_KEYREAD);
- }
+ if (table->covering_keys.is_set(tab->index) && !table->no_keyread)
+ table->set_keyread(TRUE);
tab->table->status=0;
tab->read_record.read_record=join_read_prev;
tab->read_record.table=table;
@@ -11671,7 +12205,7 @@ join_read_last(JOIN_TAB *tab)
tab->read_record.index=tab->index;
tab->read_record.record=table->record[0];
if (!table->file->inited)
- table->file->ha_index_init(tab->index);
+ table->file->ha_index_init(tab->index, 1);
if ((error= tab->table->file->index_last(tab->table->record[0])))
return report_error(table, error);
return 0;
@@ -11695,10 +12229,11 @@ join_ft_read_first(JOIN_TAB *tab)
TABLE *table= tab->table;
if (!table->file->inited)
- table->file->ha_index_init(tab->ref.key);
+ table->file->ha_index_init(tab->ref.key, 1);
#if NOT_USED_YET
- if (cp_buffer_from_ref(tab->join->thd, &tab->ref)) // as ft-key doesn't use store_key's
- return -1; // see also FT_SELECT::init()
+ /* as ft-key doesn't use store_key's, see also FT_SELECT::init() */
+ if (cp_buffer_from_ref(tab->join->thd, table, &tab->ref))
+ return -1;
#endif
table->file->ft_init();
@@ -11717,8 +12252,8 @@ join_ft_read_next(READ_RECORD *info)
}
-/*
- Reading of key with key reference and one part that may be NULL
+/**
+ Reading of key with key reference and one part that may be NULL.
*/
int
@@ -11805,7 +12340,7 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if ((join->tables == 1) && !join->tmp_table && !join->sort_and_group
&& !join->send_group_parts && !join->having && !jt->select_cond &&
!(jt->select && jt->select->quick) &&
- !(jt->table->file->table_flags() & HA_NOT_EXACT_COUNT) &&
+ (jt->table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) &&
(jt->ref.key < 0))
{
/* Join over all rows in table; Return number of found rows */
@@ -11821,7 +12356,7 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
else
{
table->file->info(HA_STATUS_VARIABLE);
- join->send_records = table->file->records;
+ join->send_records= table->file->stats.records;
}
}
else
@@ -11982,7 +12517,9 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (!end_of_records)
{
copy_fields(&join->tmp_table_param);
- copy_funcs(join->tmp_table_param.items_to_copy);
+ if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd))
+ DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
+
#ifdef TO_BE_DELETED
if (!table->uniques) // If not unique handling
{
@@ -11994,7 +12531,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (item->maybe_null)
{
Field *field=item->get_tmp_table_field();
- field->ptr[-1]= (byte) (field->is_null() ? 1 : 0);
+ field->ptr[-1]= (uchar) (field->is_null() ? 1 : 0);
}
}
}
@@ -12003,10 +12540,9 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
int error;
join->found_records++;
- if ((error=table->file->write_row(table->record[0])))
+ if ((error=table->file->ha_write_row(table->record[0])))
{
- if (error == HA_ERR_FOUND_DUPP_KEY ||
- error == HA_ERR_FOUND_DUPP_UNIQUE)
+ if (!table->file->is_fatal_error(error, HA_CHECK_DUP))
goto end;
if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param,
error,1))
@@ -12028,8 +12564,8 @@ end:
DBUG_RETURN(NESTED_LOOP_OK);
}
-/* Group by searching after group record and updating it if possible */
/* ARGSUSED */
+/** Group by searching after group record and updating it if possible. */
static enum_nested_loop_state
end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
@@ -12059,14 +12595,15 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (item->maybe_null)
group->buff[-1]= (char) group->field->is_null();
}
- if (!table->file->index_read(table->record[1],
- join->tmp_table_param.group_buff,0,
- HA_READ_KEY_EXACT))
+ if (!table->file->index_read_map(table->record[1],
+ join->tmp_table_param.group_buff,
+ HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT))
{ /* Update old record */
restore_record(table,record[1]);
update_tmptable_sum_func(join->sum_funcs,table);
- if ((error=table->file->update_row(table->record[1],
- table->record[0])))
+ if ((error=table->file->ha_update_row(table->record[1],
+ table->record[0])))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
@@ -12088,14 +12625,15 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
memcpy(table->record[0]+key_part->offset, group->buff, 1);
}
init_tmptable_sum_functions(join->sum_funcs);
- copy_funcs(join->tmp_table_param.items_to_copy);
- if ((error=table->file->write_row(table->record[0])))
+ if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd))
+ DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
+ if ((error=table->file->ha_write_row(table->record[0])))
{
if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param,
error, 0))
DBUG_RETURN(NESTED_LOOP_ERROR); // Not a table_is_full error
/* Change method to update rows */
- table->file->ha_index_init(0);
+ table->file->ha_index_init(0, 0);
join->join_tab[join->tables-1].next_select=end_unique_update;
}
join->send_records++;
@@ -12103,7 +12641,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
}
-/* Like end_update, but this is done with unique constraints instead of keys */
+/** Like end_update, but this is done with unique constraints instead of keys. */
static enum_nested_loop_state
end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
@@ -12123,9 +12661,10 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
init_tmptable_sum_functions(join->sum_funcs);
copy_fields(&join->tmp_table_param); // Groups are copied twice.
- copy_funcs(join->tmp_table_param.items_to_copy);
+ if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd))
+ DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
- if (!(error=table->file->write_row(table->record[0])))
+ if (!(error=table->file->ha_write_row(table->record[0])))
join->send_records++; // New group
else
{
@@ -12134,15 +12673,15 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
}
- if (table->file->rnd_pos(table->record[1],table->file->dupp_ref))
+ if (table->file->rnd_pos(table->record[1],table->file->dup_ref))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
}
restore_record(table,record[1]);
update_tmptable_sum_func(join->sum_funcs,table);
- if ((error=table->file->update_row(table->record[1],
- table->record[0])))
+ if ((error=table->file->ha_update_row(table->record[1],
+ table->record[0])))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
@@ -12185,7 +12724,7 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
join->sum_funcs_end[send_group_parts]);
if (!join->having || join->having->val_int())
{
- int error= table->file->write_row(table->record[0]);
+ int error= table->file->ha_write_row(table->record[0]);
if (error && create_myisam_from_heap(join->thd, table,
&join->tmp_table_param,
error, 0))
@@ -12210,7 +12749,8 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (idx < (int) join->send_group_parts)
{
copy_fields(&join->tmp_table_param);
- copy_funcs(join->tmp_table_param.items_to_copy);
+ if (copy_funcs(join->tmp_table_param.items_to_copy, join->thd))
+ DBUG_RETURN(NESTED_LOOP_ERROR);
if (init_sum_functions(join->sum_funcs, join->sum_funcs_end[idx+1]))
DBUG_RETURN(NESTED_LOOP_ERROR);
if (join->procedure)
@@ -12234,7 +12774,10 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
in sorted order.
*****************************************************************************/
-/* Return 1 if right_item is used removable reference key on left_item */
+/**
+ @return
+ 1 if right_item is used removable reference key on left_item
+*/
static bool test_if_ref(Item_field *left_item,Item *right_item)
{
@@ -12261,7 +12804,7 @@ static bool test_if_ref(Item_field *left_item,Item *right_item)
if (field->binary() &&
field->real_type() != MYSQL_TYPE_STRING &&
field->real_type() != MYSQL_TYPE_VARCHAR &&
- (field->type() != FIELD_TYPE_FLOAT || field->decimals() == 0))
+ (field->type() != MYSQL_TYPE_FLOAT || field->decimals() == 0))
{
return !store_val_in_field(field, right_item, CHECK_FIELD_WARN);
}
@@ -12389,23 +12932,27 @@ part_of_refkey(TABLE *table,Field *field)
/**
- Test if a key can be used to resolve ORDER BY
-
- used_key_parts is set to correct key parts used if return value != 0
- (On other cases, used_key_part may be changed).
- Note that the value may actually be greater than the number of index
- key parts. This can happen for storage engines that have the primary
- key parts as a suffix for every secondary key.
-
- @param order Sort order
- @param table Table to sort
- @param idx Index to check
- @param[out] used_key_parts Return value for used key parts.
-
- @return indication if the key can be used for sorting
- @retval 1 key can be used for reading data in order.
- @retval 0 Key can't be used
- @retval -1 Reverse read on the key can be used
+ Test if one can use the key to resolve ORDER BY.
+
+ @param order Sort order
+ @param table Table to sort
+ @param idx Index to check
+ @param used_key_parts Return value for used key parts.
+
+
+ @note
+ used_key_parts is set to correct key parts used if return value != 0
+ (On other cases, used_key_part may be changed)
+ Note that the value may actually be greater than the number of index
+ key parts. This can happen for storage engines that have the primary
+ key parts as a suffix for every secondary key.
+
+ @retval
+ 1 key is ok.
+ @retval
+ 0 Key can't be used
+ @retval
+ -1 Reverse key can be used
*/
static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx,
@@ -12416,7 +12963,7 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx,
key_part_end=key_part+table->key_info[idx].key_parts;
key_part_map const_key_parts=table->const_key_parts[idx];
int reverse=0;
- my_bool on_primary_key= FALSE;
+ my_bool on_pk_suffix= FALSE;
DBUG_ENTER("test_if_order_by_key");
for (; order ; order=order->next, const_key_parts>>=1)
@@ -12438,11 +12985,12 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx,
key as a suffix to the secondary keys. If it has continue to check
the primary key as a suffix.
*/
- if (!on_primary_key &&
- (table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
- table->s->primary_key != MAX_KEY)
+ if (!on_pk_suffix &&
+ (table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
+ table->s->primary_key != MAX_KEY &&
+ table->s->primary_key != idx)
{
- on_primary_key= TRUE;
+ on_pk_suffix= TRUE;
key_part= table->key_info[table->s->primary_key].key_part;
key_part_end=key_part+table->key_info[table->s->primary_key].key_parts;
const_key_parts=table->const_key_parts[table->s->primary_key];
@@ -12454,7 +13002,10 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx,
one row). The sorting doesn't matter.
*/
if (key_part == key_part_end && reverse == 0)
+ {
+ *used_key_parts= 0;
DBUG_RETURN(1);
+ }
}
else
DBUG_RETURN(0);
@@ -12471,7 +13022,7 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx,
reverse=flag; // Remember if reverse
key_part++;
}
- if (on_primary_key)
+ if (on_pk_suffix)
{
uint used_key_parts_secondary= table->key_info[idx].key_parts;
uint used_key_parts_pk=
@@ -12496,14 +13047,48 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx,
}
+/**
+ Find shortest key suitable for full table scan.
+
+ @param table Table to scan
+ @param usable_keys Allowed keys
+
+ @note
+ As far as
+ 1) clustered primary key entry data set is a set of all record
+ fields (key fields and not key fields) and
+ 2) secondary index entry data is a union of its key fields and
+ primary key fields (at least InnoDB and its derivatives don't
+ duplicate primary key fields there, even if the primary and
+ the secondary keys have a common subset of key fields),
+ then secondary index entry data is always a subset of primary key entry.
+ Unfortunately, key_info[nr].key_length doesn't show the length
+ of key/pointer pair but a sum of key field lengths only, thus
+ we can't estimate index IO volume comparing only this key_length
+ value of secondary keys and clustered PK.
+ So, try secondary keys first, and choose PK only if there are no
+ usable secondary covering keys or found best secondary key include
+ all table fields (i.e. same as PK):
+
+ @return
+ MAX_KEY no suitable key found
+ key index otherwise
+*/
+
uint find_shortest_key(TABLE *table, const key_map *usable_keys)
{
- uint min_length= (uint) ~0;
uint best= MAX_KEY;
+ uint usable_clustered_pk= (table->file->primary_key_is_clustered() &&
+ table->s->primary_key != MAX_KEY &&
+ usable_keys->is_set(table->s->primary_key)) ?
+ table->s->primary_key : MAX_KEY;
if (!usable_keys->is_clear_all())
{
+ uint min_length= (uint) ~0;
for (uint nr=0; nr < table->s->keys ; nr++)
{
+ if (nr == usable_clustered_pk)
+ continue;
if (usable_keys->is_set(nr))
{
if (table->key_info[nr].key_length < min_length)
@@ -12514,23 +13099,35 @@ uint find_shortest_key(TABLE *table, const key_map *usable_keys)
}
}
}
+ if (usable_clustered_pk != MAX_KEY)
+ {
+ /*
+ If the primary key is clustered and found shorter key covers all table
+ fields then primary key scan normally would be faster because amount of
+ data to scan is the same but PK is clustered.
+ It's safe to compare key parts with table fields since duplicate key
+ parts aren't allowed.
+ */
+ if (best == MAX_KEY ||
+ table->key_info[best].key_parts >= table->s->fields)
+ best= usable_clustered_pk;
+ }
return best;
}
-/*
+/**
Test if a second key is the subkey of the first one.
- SYNOPSIS
- is_subkey()
- key_part First key parts
- ref_key_part Second key parts
- ref_key_part_end Last+1 part of the second key
+ @param key_part First key parts
+ @param ref_key_part Second key parts
+ @param ref_key_part_end Last+1 part of the second key
- NOTE
+ @note
Second key MUST be shorter than the first one.
- RETURN
+ @retval
1 is a subkey
+ @retval
0 no sub key
*/
@@ -12544,17 +13141,16 @@ is_subkey(KEY_PART_INFO *key_part, KEY_PART_INFO *ref_key_part,
return 1;
}
-/*
- Test if we can use one of the 'usable_keys' instead of 'ref' key for sorting
+/**
+ Test if we can use one of the 'usable_keys' instead of 'ref' key
+ for sorting.
- SYNOPSIS
- test_if_subkey()
- ref Number of key, used for WHERE clause
- usable_keys Keys for testing
+ @param ref Number of key, used for WHERE clause
+ @param usable_keys Keys for testing
- RETURN
- MAX_KEY If we can't use other key
- the number of found key Otherwise
+ @return
+ - MAX_KEY If we can't use other key
+ - the number of found key Otherwise
*/
static uint
@@ -12585,24 +13181,19 @@ test_if_subkey(ORDER *order, TABLE *table, uint ref, uint ref_key_parts,
}
-/*
+/**
Check if GROUP BY/DISTINCT can be optimized away because the set is
already known to be distinct.
-
- SYNOPSIS
- list_contains_unique_index ()
- table The table to operate on.
- find_func function to iterate over the list and search
- for a field
- DESCRIPTION
- Used in removing the GROUP BY/DISTINCT of the following types of
- statements:
- SELECT [DISTINCT] <unique_key_cols>... FROM <single_table_ref>
- [GROUP BY <unique_key_cols>,...]
+ Used in removing the GROUP BY/DISTINCT of the following types of
+ statements:
+ @code
+ SELECT [DISTINCT] <unique_key_cols>... FROM <single_table_ref>
+ [GROUP BY <unique_key_cols>,...]
+ @endcode
If (a,b,c is distinct)
- then <any combination of a,b,c>,{whatever} is also distinct
+ then <any combination of a,b,c>,{whatever} is also distinct
This function checks if all the key parts of any of the unique keys
of the table are referenced by a list : either the select list
@@ -12611,9 +13202,14 @@ test_if_subkey(ORDER *order, TABLE *table, uint ref, uint ref_key_parts,
If the above holds and the key parts cannot contain NULLs then we
can safely remove the GROUP BY/DISTINCT,
as no result set can be more distinct than an unique key.
-
- RETURN VALUE
+
+ @param table The table to operate on.
+ @param find_func function to iterate over the list and search
+ for a field
+
+ @retval
1 found
+ @retval
0 not found.
*/
@@ -12621,6 +13217,8 @@ static bool
list_contains_unique_index(TABLE *table,
bool (*find_func) (Field *, void *), void *data)
{
+ if (table->pos_in_table_list->outer_join)
+ return 0;
for (uint keynr= 0; keynr < table->s->keys; keynr++)
{
if (keynr == table->s->primary_key ||
@@ -12634,7 +13232,7 @@ list_contains_unique_index(TABLE *table,
key_part < key_part_end;
key_part++)
{
- if (key_part->field->maybe_null() ||
+ if (key_part->field->real_maybe_null() ||
!find_func(key_part->field, data))
break;
}
@@ -12646,20 +13244,17 @@ list_contains_unique_index(TABLE *table,
}
-/*
+/**
Helper function for list_contains_unique_index.
Find a field reference in a list of ORDER structures.
-
- SYNOPSIS
- find_field_in_order_list ()
- field The field to search for.
- data ORDER *.The list to search in
-
- DESCRIPTION
- Finds a direct reference of the Field in the list.
-
- RETURN VALUE
+ Finds a direct reference of the Field in the list.
+
+ @param field The field to search for.
+ @param data ORDER *.The list to search in
+
+ @retval
1 found
+ @retval
0 not found.
*/
@@ -12682,20 +13277,17 @@ find_field_in_order_list (Field *field, void *data)
}
-/*
+/**
Helper function for list_contains_unique_index.
Find a field reference in a dynamic list of Items.
-
- SYNOPSIS
- find_field_in_item_list ()
- field in The field to search for.
- data in List<Item> *.The list to search in
-
- DESCRIPTION
- Finds a direct reference of the Field in the list.
-
- RETURN VALUE
+ Finds a direct reference of the Field in the list.
+
+ @param[in] field The field to search for.
+ @param[in] data List<Item> *.The list to search in
+
+ @retval
1 found
+ @retval
0 not found.
*/
@@ -12720,26 +13312,38 @@ find_field_in_item_list (Field *field, void *data)
}
-/*
+/**
Test if we can skip the ORDER BY by using an index.
If we can use an index, the JOIN_TAB / tab->select struct
is changed to use the index.
- Return:
- 0 We have to use filesort to do the sorting
- 1 We can use an index.
+ The index must cover all fields in <order>, or it will not be considered.
+
+ @param no_changes No changes will be made to the query plan.
+
+ @todo
+ - sergeyp: Results of all index merge selects actually are ordered
+ by clustered PK values.
+
+ @retval
+ 0 We have to use filesort to do the sorting
+ @retval
+ 1 We can use an index.
*/
static bool
test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
- bool no_changes)
+ bool no_changes, key_map *map)
{
int ref_key;
uint ref_key_parts;
+ int order_direction;
+ uint used_key_parts;
TABLE *table=tab->table;
SQL_SELECT *select=tab->select;
key_map usable_keys;
+ QUICK_SELECT_I *save_quick= 0;
DBUG_ENTER("test_if_skip_sort_order");
LINT_INIT(ref_key_parts);
@@ -12747,8 +13351,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
Keys disabled by ALTER TABLE ... DISABLE KEYS should have already
been taken into account.
*/
- usable_keys= table->keys_in_use_for_query;
- DBUG_ASSERT(usable_keys.is_subset(table->s->keys_in_use));
+ usable_keys= *map;
for (ORDER *tmp_order=order; tmp_order ; tmp_order=tmp_order->next)
{
@@ -12775,6 +13378,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
else if (select && select->quick) // Range found by opt_range
{
int quick_type= select->quick->get_type();
+ save_quick= select->quick;
/*
assume results are not ordered when index merge is used
TODO: sergeyp: Results of all index merge selects actually are ordered
@@ -12794,8 +13398,6 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
/*
We come here when there is a REF key.
*/
- int order_direction;
- uint used_key_parts;
if (!usable_keys.is_set(ref_key))
{
/*
@@ -12806,8 +13408,8 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
If using index only read, only consider other possible index only
keys
*/
- if (table->used_keys.is_set(ref_key))
- usable_keys.intersect(table->used_keys);
+ if (table->covering_keys.is_set(ref_key))
+ usable_keys.intersect(table->covering_keys);
if ((new_ref_key= test_if_subkey(order, table, ref_key, ref_key_parts,
&usable_keys)) < MAX_KEY)
{
@@ -12858,72 +13460,42 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
}
/* Check if we get the rows in requested sorted order by using the key */
if (usable_keys.is_set(ref_key) &&
- (order_direction = test_if_order_by_key(order,table,ref_key,
- &used_key_parts)))
- {
- if (order_direction == -1) // If ORDER BY ... DESC
- {
- if (select && select->quick)
- {
- /*
- Don't reverse the sort order, if it's already done.
- (In some cases test_if_order_by_key() can be called multiple times
- */
- if (!select->quick->reverse_sorted())
- {
- int quick_type= select->quick->get_type();
- if (quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE ||
- quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT ||
- quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION ||
- quick_type == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)
- DBUG_RETURN(0); // Use filesort
-
- /* ORDER BY range_key DESC */
- QUICK_SELECT_DESC *tmp=new QUICK_SELECT_DESC((QUICK_RANGE_SELECT*)(select->quick),
- used_key_parts);
- if (!tmp || tmp->error)
- {
- delete tmp;
- DBUG_RETURN(0); // Reverse sort not supported
- }
- select->quick=tmp;
- }
- DBUG_RETURN(1);
- }
- if (tab->ref.key_parts <= used_key_parts && tab->type == JT_REF)
- {
- /*
- SELECT * FROM t1 WHERE a=1 ORDER BY a DESC,b DESC
-
- Use a traversal function that starts by reading the last row
- with key part (A) and then traverse the index backwards.
- */
- tab->read_first_record= join_read_last_key;
- tab->read_record.read_record= join_read_prev_same;
- /* fall through */
- }
- }
- else if (select && select->quick)
- select->quick->sorted= 1;
- DBUG_RETURN(1); /* No need to sort */
- }
+ (order_direction= test_if_order_by_key(order,table,ref_key,
+ &used_key_parts)))
+ goto check_reverse_order;
}
- else
{
- /* check if we can use a key to resolve the group */
- /* Tables using JT_NEXT are handled here */
+ /*
+ Check whether there is an index compatible with the given order
+ usage of which is cheaper than usage of the ref_key index (ref_key>=0)
+ or a table scan.
+ It may be the case if ORDER/GROUP BY is used with LIMIT.
+ */
uint nr;
key_map keys;
+ uint best_key_parts= 0;
+ uint saved_best_key_parts= 0;
+ int best_key_direction= 0;
+ ha_rows best_records= 0;
+ double read_time;
+ int best_key= -1;
+ bool is_best_covering= FALSE;
+ double fanout= 1;
+ JOIN *join= tab->join;
+ uint tablenr= tab - join->join_tab;
+ ha_rows table_records= table->file->stats.records;
+ bool group= join->group && order == join->group_list;
+ ha_rows ref_key_quick_rows= HA_POS_ERROR;
/*
If not used with LIMIT, only use keys if the whole query can be
resolved with a key; This is because filesort() is usually faster than
retrieving all rows through an index.
*/
- if (select_limit >= table->file->records)
+ if (select_limit >= table_records)
{
keys= *table->file->keys_to_use_for_scanning();
- keys.merge(table->used_keys);
+ keys.merge(table->covering_keys);
/*
We are adding here also the index specified in FORCE INDEX clause,
@@ -12931,38 +13503,291 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
This is to allow users to use index in ORDER BY.
*/
if (table->force_index)
- keys.merge(table->keys_in_use_for_query);
+ keys.merge(group ? table->keys_in_use_for_group_by :
+ table->keys_in_use_for_order_by);
keys.intersect(usable_keys);
}
else
keys= usable_keys;
+ if (ref_key >= 0 && table->covering_keys.is_set(ref_key))
+ ref_key_quick_rows= table->quick_rows[ref_key];
+
+ read_time= join->best_positions[tablenr].read_time;
+ for (uint i= tablenr+1; i < join->tables; i++)
+ fanout*= join->best_positions[i].records_read; // fanout is always >= 1
+
for (nr=0; nr < table->s->keys ; nr++)
{
- uint not_used;
- if (keys.is_set(nr))
+ int direction;
+
+ if (keys.is_set(nr) &&
+ (direction= test_if_order_by_key(order, table, nr, &used_key_parts)))
{
- int flag;
- if ((flag= test_if_order_by_key(order, table, nr, &not_used)))
+ /*
+ At this point we are sure that ref_key is a non-ordering
+ key (where "ordering key" is a key that will return rows
+ in the order required by ORDER BY).
+ */
+ DBUG_ASSERT (ref_key != (int) nr);
+
+ bool is_covering= table->covering_keys.is_set(nr) ||
+ (nr == table->s->primary_key &&
+ table->file->primary_key_is_clustered());
+
+ /*
+ Don't use an index scan with ORDER BY without limit.
+ For GROUP BY without limit always use index scan
+ if there is a suitable index.
+ Why we hold to this asymmetry hardly can be explained
+ rationally. It's easy to demonstrate that using
+ temporary table + filesort could be cheaper for grouping
+ queries too.
+ */
+ if (is_covering ||
+ select_limit != HA_POS_ERROR ||
+ (ref_key < 0 && (group || table->force_index)))
+ {
+ double rec_per_key;
+ double index_scan_time;
+ KEY *keyinfo= tab->table->key_info+nr;
+ if (select_limit == HA_POS_ERROR)
+ select_limit= table_records;
+ if (group)
+ {
+ /*
+ Used_key_parts can be larger than keyinfo->key_parts
+ when using a secondary index clustered with a primary
+ key (e.g. as in Innodb).
+ See Bug #28591 for details.
+ */
+ rec_per_key= used_key_parts &&
+ used_key_parts <= keyinfo->key_parts ?
+ keyinfo->rec_per_key[used_key_parts-1] : 1;
+ set_if_bigger(rec_per_key, 1);
+ /*
+ With a grouping query each group containing on average
+ rec_per_key records produces only one row that will
+ be included into the result set.
+ */
+ if (select_limit > table_records/rec_per_key)
+ select_limit= table_records;
+ else
+ select_limit= (ha_rows) (select_limit*rec_per_key);
+ }
+ /*
+ If tab=tk is not the last joined table tn then to get first
+ L records from the result set we can expect to retrieve
+ only L/fanout(tk,tn) where fanout(tk,tn) says how many
+ rows in the record set on average will match each row tk.
+ Usually our estimates for fanouts are too pessimistic.
+ So the estimate for L/fanout(tk,tn) will be too optimistic
+ and as result we'll choose an index scan when using ref/range
+ access + filesort will be cheaper.
+ */
+ select_limit= (ha_rows) (select_limit < fanout ?
+ 1 : select_limit/fanout);
+ /*
+ We assume that each of the tested indexes is not correlated
+ with ref_key. Thus, to select first N records we have to scan
+ N/selectivity(ref_key) index entries.
+ selectivity(ref_key) = #scanned_records/#table_records =
+ table->quick_condition_rows/table_records.
+ In any case we can't select more than #table_records.
+ N/(table->quick_condition_rows/table_records) > table_records
+ <=> N > table->quick_condition_rows.
+ */
+ if (select_limit > table->quick_condition_rows)
+ select_limit= table_records;
+ else
+ select_limit= (ha_rows) (select_limit *
+ (double) table_records /
+ table->quick_condition_rows);
+ rec_per_key= keyinfo->rec_per_key[keyinfo->key_parts-1];
+ set_if_bigger(rec_per_key, 1);
+ /*
+ Here we take into account the fact that rows are
+ accessed in sequences rec_per_key records in each.
+ Rows in such a sequence are supposed to be ordered
+ by rowid/primary key. When reading the data
+ in a sequence we'll touch not more pages than the
+ table file contains.
+ TODO. Use the formula for a disk sweep sequential access
+ to calculate the cost of accessing data rows for one
+ index entry.
+ */
+ index_scan_time= select_limit/rec_per_key *
+ min(rec_per_key, table->file->scan_time());
+ if ((ref_key < 0 && is_covering) ||
+ (ref_key < 0 && (group || table->force_index)) ||
+ index_scan_time < read_time)
+ {
+ ha_rows quick_records= table_records;
+ if ((is_best_covering && !is_covering) ||
+ (is_covering && ref_key_quick_rows < select_limit))
+ continue;
+ if (table->quick_keys.is_set(nr))
+ quick_records= table->quick_rows[nr];
+ if (best_key < 0 ||
+ (select_limit <= min(quick_records,best_records) ?
+ keyinfo->key_parts < best_key_parts :
+ quick_records < best_records))
+ {
+ best_key= nr;
+ best_key_parts= keyinfo->key_parts;
+ saved_best_key_parts= used_key_parts;
+ best_records= quick_records;
+ is_best_covering= is_covering;
+ best_key_direction= direction;
+ }
+ }
+ }
+ }
+ }
+
+ /*
+ filesort() and join cache are usually faster than reading in
+ index order and not using join cache, except in case that chosen
+ index is clustered primary key.
+ */
+ if ((select_limit >= table_records) &&
+ (tab->type == JT_ALL &&
+ tab->join->tables > tab->join->const_tables + 1) &&
+ ((unsigned) best_key != table->s->primary_key ||
+ !table->file->primary_key_is_clustered()))
+ DBUG_RETURN(0);
+
+ if (best_key >= 0)
+ {
+ bool quick_created= FALSE;
+ if (table->quick_keys.is_set(best_key) && best_key != ref_key)
+ {
+ key_map map;
+ map.clear_all(); // Force the creation of quick select
+ map.set_bit(best_key); // only best_key.
+ quick_created=
+ select->test_quick_select(join->thd, map, 0,
+ join->select_options & OPTION_FOUND_ROWS ?
+ HA_POS_ERROR :
+ join->unit->select_limit_cnt,
+ 0) > 0;
+ }
+ if (!no_changes)
+ {
+ /*
+ If ref_key used index tree reading only ('Using index' in EXPLAIN),
+ and best_key doesn't, then revert the decision.
+ */
+ if (!table->covering_keys.is_set(best_key))
+ table->set_keyread(FALSE);
+ if (!quick_created)
{
- if (!no_changes)
- {
- tab->index=nr;
- tab->read_first_record= (flag > 0 ? join_read_first:
- join_read_last);
- tab->type=JT_NEXT; // Read with index_first(), index_next()
- if (table->used_keys.is_set(nr))
- {
- table->key_read=1;
- table->file->extra(HA_EXTRA_KEYREAD);
- }
- }
- DBUG_RETURN(1);
+ tab->index= best_key;
+ tab->read_first_record= best_key_direction > 0 ?
+ join_read_first:join_read_last;
+ tab->type=JT_NEXT; // Read with index_first(), index_next()
+ if (select && select->quick)
+ {
+ delete select->quick;
+ select->quick= 0;
+ }
+ if (table->covering_keys.is_set(best_key))
+ table->set_keyread(TRUE);
+ table->file->ha_index_or_rnd_end();
+ if (join->select_options & SELECT_DESCRIBE)
+ {
+ tab->ref.key= -1;
+ tab->ref.key_parts= 0;
+ if (select_limit < table_records)
+ tab->limit= select_limit;
+ }
+ }
+ else if (tab->type != JT_ALL)
+ {
+ /*
+ We're about to use a quick access to the table.
+ We need to change the access method so as the quick access
+ method is actually used.
+ */
+ DBUG_ASSERT(tab->select->quick);
+ tab->type=JT_ALL;
+ tab->use_quick=1;
+ tab->ref.key= -1;
+ tab->ref.key_parts=0; // Don't use ref key.
+ tab->read_first_record= join_init_read_record;
+ if (tab->is_using_loose_index_scan())
+ join->tmp_table_param.precomputed_group_by= TRUE;
+ /*
+ TODO: update the number of records in join->best_positions[tablenr]
+ */
+ }
+ }
+ order_direction= best_key_direction;
+ /*
+ saved_best_key_parts is actual number of used keyparts found by the
+ test_if_order_by_key function. It could differ from keyinfo->key_parts,
+ thus we have to restore it in case of desc order as it affects
+ QUICK_SELECT_DESC behaviour.
+ */
+ used_key_parts= (order_direction == -1) ?
+ saved_best_key_parts : best_key_parts;
+ }
+ else
+ DBUG_RETURN(0);
+ }
+
+check_reverse_order:
+ if (order_direction == -1) // If ORDER BY ... DESC
+ {
+ if (select && select->quick)
+ {
+ /*
+ Don't reverse the sort order, if it's already done.
+ (In some cases test_if_order_by_key() can be called multiple times
+ */
+ if (!select->quick->reverse_sorted())
+ {
+ QUICK_SELECT_DESC *tmp;
+ int quick_type= select->quick->get_type();
+ if (quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE ||
+ quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT ||
+ quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION ||
+ quick_type == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)
+ {
+ tab->limit= 0;
+ select->quick= save_quick;
+ DBUG_RETURN(0); // Use filesort
+ }
+
+ /* ORDER BY range_key DESC */
+ tmp= new QUICK_SELECT_DESC((QUICK_RANGE_SELECT*)(select->quick),
+ used_key_parts);
+ if (!tmp || tmp->error)
+ {
+ delete tmp;
+ select->quick= save_quick;
+ tab->limit= 0;
+ DBUG_RETURN(0); // Reverse sort not supported
}
+ select->quick=tmp;
}
}
+ else if (tab->type != JT_NEXT && tab->type != JT_REF_OR_NULL &&
+ tab->ref.key >= 0 && tab->ref.key_parts <= used_key_parts)
+ {
+ /*
+ SELECT * FROM t1 WHERE a=1 ORDER BY a DESC,b DESC
+
+ Use a traversal function that starts by reading the last row
+ with key part (A) and then traverse the index backwards.
+ */
+ tab->read_first_record= join_read_last_key;
+ tab->read_record.read_record= join_read_prev_same;
+ }
}
- DBUG_RETURN(0); // Can't use index.
+ else if (select && select->quick)
+ select->quick->sorted= 1;
+ DBUG_RETURN(1);
}
@@ -12977,6 +13802,8 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
filesort_limit Max number of rows that needs to be sorted
select_limit Max number of rows in final output
Used to decide if we should use index or not
+ is_order_by true if we are sorting on ORDER BY, false if GROUP BY
+ Used to decide if we should use index or not
IMPLEMENTATION
@@ -12995,7 +13822,8 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
static int
create_sort_index(THD *thd, JOIN *join, ORDER *order,
- ha_rows filesort_limit, ha_rows select_limit)
+ ha_rows filesort_limit, ha_rows select_limit,
+ bool is_order_by)
{
uint length= 0;
ha_rows examined_rows;
@@ -13019,8 +13847,10 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
if ((order != join->group_list ||
!(join->select_options & SELECT_BIG_RESULT) ||
(select && select->quick &&
- select->quick->get_type() == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)) &&
- test_if_skip_sort_order(tab,order,select_limit,0))
+ select->quick->get_type() == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)) &&
+ test_if_skip_sort_order(tab,order,select_limit,0,
+ is_order_by ? &table->keys_in_use_for_order_by :
+ &table->keys_in_use_for_group_by))
DBUG_RETURN(0);
for (ORDER *ord= join->order; ord; ord= ord->next)
length++;
@@ -13043,11 +13873,8 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
We can only use 'Only index' if quick key is same as ref_key
and in index_merge 'Only index' cannot be used
*/
- if (table->key_read && ((uint) tab->ref.key != select->quick->index))
- {
- table->key_read=0;
- table->file->extra(HA_EXTRA_NO_KEYREAD);
- }
+ if (((uint) tab->ref.key != select->quick->index))
+ table->set_keyread(FALSE);
}
else
{
@@ -13067,14 +13894,14 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
/* Fill schema tables with data before filesort if it's necessary */
if ((join->select_lex->options & OPTION_SCHEMA_TABLE) &&
- !thd->lex->describe &&
get_schema_tables_result(join, PROCESSED_BY_CREATE_SORT_INDEX))
goto err;
if (table->s->tmp_table)
table->file->info(HA_STATUS_VARIABLE); // Get record count
table->sort.found_records=filesort(thd, table,join->sortorder, length,
- select, filesort_limit, &examined_rows);
+ select, filesort_limit, 0,
+ &examined_rows);
tab->records= table->sort.found_records; // For SQL_CALC_ROWS
if (select)
{
@@ -13094,6 +13921,7 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
select->cleanup(); // filesort did select
tab->select= 0;
+ table->quick_keys.clear_all(); // as far as we cleanup select->quick
table->sort.io_cache= tablesort_result_cache;
}
tab->select_cond=0;
@@ -13102,28 +13930,24 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
tab->type=JT_ALL; // Read with normal read_record
tab->read_first_record= join_init_read_record;
tab->join->examined_rows+=examined_rows;
- if (table->key_read) // Restore if we used indexes
- {
- table->key_read=0;
- table->file->extra(HA_EXTRA_NO_KEYREAD);
- }
+ table->set_keyread(FALSE); // Restore if we used indexes
DBUG_RETURN(table->sort.found_records == HA_POS_ERROR);
err:
DBUG_RETURN(-1);
}
-/*
- Add the HAVING criteria to table->select
+#ifdef NOT_YET
+/**
+ Add the HAVING criteria to table->select.
*/
-#ifdef NOT_YET
static bool fix_having(JOIN *join, Item **having)
{
(*having)->update_used_tables(); // Some tables may have been const
JOIN_TAB *table=&join->join_tab[join->const_tables];
table_map used_tables= join->const_table_map | table->table->map;
- DBUG_EXECUTE("where",print_where(*having,"having"););
+ DBUG_EXECUTE("where",print_where(*having,"having", QT_ORDINARY););
Item* sort_table_cond=make_cond_for_table(*having,used_tables,used_tables);
if (sort_table_cond)
{
@@ -13140,9 +13964,11 @@ static bool fix_having(JOIN *join, Item **having)
table->select_cond=table->select->cond;
table->select_cond->top_level_item();
DBUG_EXECUTE("where",print_where(table->select_cond,
- "select and having"););
+ "select and having",
+ QT_ORDINARY););
*having=make_cond_for_table(*having,~ (table_map) 0,~used_tables);
- DBUG_EXECUTE("where",print_where(*having,"having after make_cond"););
+ DBUG_EXECUTE("where",
+ print_where(*having,"having after make_cond", QT_ORDINARY););
}
return 0;
}
@@ -13215,15 +14041,16 @@ remove_duplicates(JOIN *join, TABLE *entry,List<Item> &fields, Item *having)
DBUG_RETURN(0);
}
Field **first_field=entry->field+entry->s->fields - field_count;
- offset= field_count ?
- entry->field[entry->s->fields - field_count]->offset() : 0;
+ offset= (field_count ?
+ entry->field[entry->s->fields - field_count]->
+ offset(entry->record[0]) : 0);
reclength=entry->s->reclength-offset;
free_io_cache(entry); // Safety
entry->file->info(HA_STATUS_VARIABLE);
- if (entry->s->db_type == DB_TYPE_HEAP ||
+ if (entry->s->db_type() == heap_hton ||
(!entry->s->blob_fields &&
- ((ALIGN_SIZE(reclength) + HASH_OVERHEAD) * entry->file->records <
+ ((ALIGN_SIZE(reclength) + HASH_OVERHEAD) * entry->file->stats.records <
thd->variables.sortbuff_size)))
error=remove_dup_with_hash_index(join->thd, entry,
field_count, first_field,
@@ -13242,7 +14069,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
{
handler *file=table->file;
char *org_record,*new_record;
- byte *record;
+ uchar *record;
int error;
ulong reclength= table->s->reclength-offset;
DBUG_ENTER("remove_dup_with_compare");
@@ -13263,14 +14090,17 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
if (error)
{
if (error == HA_ERR_RECORD_DELETED)
- continue;
+ {
+ error= file->rnd_next(record);
+ continue;
+ }
if (error == HA_ERR_END_OF_FILE)
break;
goto err;
}
if (having && !having->val_int())
{
- if ((error=file->delete_row(record)))
+ if ((error=file->ha_delete_row(record)))
goto err;
error=file->rnd_next(record);
continue;
@@ -13297,7 +14127,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
}
if (compare_record(table, first_field) == 0)
{
- if ((error=file->delete_row(record)))
+ if ((error=file->ha_delete_row(record)))
goto err;
}
else if (!found)
@@ -13322,9 +14152,11 @@ err:
}
-/*
- Generate a hash index for each row to quickly find duplicate rows
- Note that this will not work on tables with blobs!
+/**
+ Generate a hash index for each row to quickly find duplicate rows.
+
+ @note
+ Note that this will not work on tables with blobs!
*/
static int remove_dup_with_hash_index(THD *thd, TABLE *table,
@@ -13333,7 +14165,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
ulong key_length,
Item *having)
{
- byte *key_buffer, *key_pos, *record=table->record[0];
+ uchar *key_buffer, *key_pos, *record=table->record[0];
int error;
handler *file= table->file;
ulong extra_length= ALIGN_SIZE(key_length)-key_length;
@@ -13344,7 +14176,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
if (!my_multi_malloc(MYF(MY_WME),
&key_buffer,
(uint) ((key_length + extra_length) *
- (long) file->records),
+ (long) file->stats.records),
&field_lengths,
(uint) (field_count*sizeof(*field_lengths)),
NullS))
@@ -13366,7 +14198,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
extra_length= ALIGN_SIZE(key_length)-key_length;
}
- if (hash_init(&hash, &my_charset_bin, (uint) file->records, 0,
+ if (hash_init(&hash, &my_charset_bin, (uint) file->stats.records, 0,
key_length, (hash_get_key) 0, 0, 0))
{
my_free((char*) key_buffer,MYF(0));
@@ -13377,7 +14209,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
key_pos=key_buffer;
for (;;)
{
- byte *org_key_pos;
+ uchar *org_key_pos;
if (thd->killed)
{
thd->send_kill_message();
@@ -13394,7 +14226,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
}
if (having && !having->val_int())
{
- if ((error=file->delete_row(record)))
+ if ((error=file->ha_delete_row(record)))
goto err;
continue;
}
@@ -13404,18 +14236,21 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
field_length=field_lengths;
for (Field **ptr= first_field ; *ptr ; ptr++)
{
- (*ptr)->sort_string((char*) key_pos,*field_length);
+ (*ptr)->sort_string(key_pos,*field_length);
key_pos+= *field_length++;
}
/* Check if it exists before */
if (hash_search(&hash, org_key_pos, key_length))
{
/* Duplicated found ; Remove the row */
- if ((error=file->delete_row(record)))
+ if ((error=file->ha_delete_row(record)))
goto err;
}
else
- (void) my_hash_insert(&hash, org_key_pos);
+ {
+ if (my_hash_insert(&hash, org_key_pos))
+ goto err;
+ }
key_pos+=extra_length;
}
my_free((char*) key_buffer,MYF(0));
@@ -13463,7 +14298,7 @@ SORT_FIELD *make_unireg_sortorder(ORDER *order, uint *length,
pos->field= ((Item_sum*) item)->get_tmp_table_field();
else if (item->type() == Item::COPY_STR_ITEM)
{ // Blob patch
- pos->item= ((Item_copy_string*) item)->item;
+ pos->item= ((Item_copy*) item)->get_item();
}
else
pos->item= *order->item;
@@ -13508,7 +14343,7 @@ join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count)
sizeof(CACHE_FIELD*))))
{
- my_free((gptr) cache->buff,MYF(0)); /* purecov: inspected */
+ my_free((uchar*) cache->buff,MYF(0)); /* purecov: inspected */
cache->buff=0; /* purecov: inspected */
DBUG_RETURN(1); /* purecov: inspected */
}
@@ -13521,18 +14356,18 @@ join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count)
{
bool have_bit_fields= FALSE;
uint null_fields=0,used_fields;
-
Field **f_ptr,*field;
+ MY_BITMAP *read_set= tables[i].table->read_set;
for (f_ptr=tables[i].table->field,used_fields=tables[i].used_fields ;
used_fields ;
f_ptr++)
{
field= *f_ptr;
- if (field->query_id == thd->query_id)
+ if (bitmap_is_set(read_set, field->field_index))
{
used_fields--;
length+=field->fill_cache_field(copy);
- if (copy->blob_field)
+ if (copy->type == CACHE_BLOB)
(*blob_ptr++)=copy;
if (field->real_maybe_null())
null_fields++;
@@ -13545,10 +14380,10 @@ join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count)
/* Copy null bits from table */
if (null_fields || have_bit_fields)
{ /* must copy null bits */
- copy->str=(char*) tables[i].table->null_flags;
+ copy->str= tables[i].table->null_flags;
copy->length= tables[i].table->s->null_bytes;
- copy->strip=0;
- copy->blob_field=0;
+ copy->type=0;
+ copy->field=0;
length+=copy->length;
copy++;
cache->fields++;
@@ -13556,10 +14391,10 @@ join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count)
/* If outer join table, copy null_row flag */
if (tables[i].table->maybe_null)
{
- copy->str= (char*) &tables[i].table->null_row;
+ copy->str= (uchar*) &tables[i].table->null_row;
copy->length=sizeof(tables[i].table->null_row);
- copy->strip=0;
- copy->blob_field=0;
+ copy->type=0;
+ copy->field=0;
length+=copy->length;
copy++;
cache->fields++;
@@ -13584,9 +14419,10 @@ used_blob_length(CACHE_FIELD **ptr)
uint length,blob_length;
for (length=0 ; *ptr ; ptr++)
{
- (*ptr)->blob_length=blob_length=(*ptr)->blob_field->get_length();
+ Field_blob *field_blob= (Field_blob *) (*ptr)->field;
+ (*ptr)->blob_length=blob_length= field_blob->get_length();
length+=blob_length;
- (*ptr)->blob_field->get_ptr(&(*ptr)->str);
+ field_blob->get_ptr(&(*ptr)->str);
}
return length;
}
@@ -13615,30 +14451,35 @@ store_record_in_cache(JOIN_CACHE *cache)
cache->records++;
for (copy=cache->field ; copy < end_field; copy++)
{
- if (copy->blob_field)
+ if (copy->type == CACHE_BLOB)
{
+ Field_blob *blob_field= (Field_blob *) copy->field;
if (last_record)
{
- copy->blob_field->get_image((char*) pos,copy->length+sizeof(char*),
- copy->blob_field->charset());
+ blob_field->get_image(pos, copy->length+sizeof(char*),
+ blob_field->charset());
pos+=copy->length+sizeof(char*);
}
else
{
- copy->blob_field->get_image((char*) pos,copy->length, // blob length
- copy->blob_field->charset());
+ blob_field->get_image(pos, copy->length, // blob length
+ blob_field->charset());
memcpy(pos+copy->length,copy->str,copy->blob_length); // Blob data
pos+=copy->length+copy->blob_length;
}
}
else
{
- if (copy->strip)
+ if (copy->type == CACHE_STRIPPED)
{
- char *str,*end;
- for (str=copy->str,end= str+copy->length;
- end > str && end[-1] == ' ' ;
- end--) ;
+ uchar *str,*end;
+ Field *field= copy->field;
+ if (field && field->maybe_null() && field->is_null())
+ end= str= copy->str;
+ else
+ for (str=copy->str,end= str+copy->length;
+ end > str && end[-1] == ' ' ;
+ end--) ;
length=(uint) (end-str);
memcpy(pos+2, str, length);
int2store(pos, length);
@@ -13687,23 +14528,24 @@ read_cached_record(JOIN_TAB *tab)
copy < end_field;
copy++)
{
- if (copy->blob_field)
+ if (copy->type == CACHE_BLOB)
{
+ Field_blob *blob_field= (Field_blob *) copy->field;
if (last_record)
{
- copy->blob_field->set_image((char*) pos,copy->length+sizeof(char*),
- copy->blob_field->charset());
+ blob_field->set_image(pos, copy->length+sizeof(char*),
+ blob_field->charset());
pos+=copy->length+sizeof(char*);
}
else
{
- copy->blob_field->set_ptr((char*) pos,(char*) pos+copy->length);
- pos+=copy->length+copy->blob_field->get_length();
+ blob_field->set_ptr(pos, pos+copy->length);
+ pos+=copy->length + blob_field->get_length();
}
}
else
{
- if (copy->strip)
+ if (copy->type == CACHE_STRIPPED)
{
length= uint2korr(pos);
memcpy(copy->str, pos+2, length);
@@ -13730,7 +14572,8 @@ cmp_buffer_with_ref(JOIN_TAB *tab)
{
memcpy(tab->ref.key_buff2, tab->ref.key_buff, tab->ref.key_length);
}
- if ((tab->ref.key_err= cp_buffer_from_ref(tab->join->thd, &tab->ref)) ||
+ if ((tab->ref.key_err= cp_buffer_from_ref(tab->join->thd, tab->table,
+ &tab->ref)) ||
diff)
return 1;
return memcmp(tab->ref.key_buff2, tab->ref.key_buff, tab->ref.key_length)
@@ -13739,20 +14582,24 @@ cmp_buffer_with_ref(JOIN_TAB *tab)
bool
-cp_buffer_from_ref(THD *thd, TABLE_REF *ref)
+cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref)
{
enum enum_check_fields save_count_cuted_fields= thd->count_cuted_fields;
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set);
+ bool result= 0;
+
for (store_key **copy=ref->key_copy ; *copy ; copy++)
{
if ((*copy)->copy() & 1)
{
- thd->count_cuted_fields= save_count_cuted_fields;
- return 1; // Something went wrong
+ result= 1;
+ break;
}
}
thd->count_cuted_fields= save_count_cuted_fields;
- return 0;
+ dbug_tmp_restore_column_map(table->write_set, old_map);
+ return result;
}
@@ -13760,37 +14607,37 @@ cp_buffer_from_ref(THD *thd, TABLE_REF *ref)
Group and order functions
*****************************************************************************/
-/*
+/**
Resolve an ORDER BY or GROUP BY column reference.
- SYNOPSIS
- find_order_in_list()
- thd [in] Pointer to current thread structure
- ref_pointer_array [in/out] All select, group and order by fields
- tables [in] List of tables to search in (usually FROM clause)
- order [in] Column reference to be resolved
- fields [in] List of fields to search in (usually SELECT list)
- all_fields [in/out] All select, group and order by fields
- is_group_field [in] True if order is a GROUP field, false if
- ORDER by field
-
- DESCRIPTION
- Given a column reference (represented by 'order') from a GROUP BY or ORDER
- BY clause, find the actual column it represents. If the column being
- resolved is from the GROUP BY clause, the procedure searches the SELECT
- list 'fields' and the columns in the FROM list 'tables'. If 'order' is from
- the ORDER BY clause, only the SELECT list is being searched.
-
- If 'order' is resolved to an Item, then order->item is set to the found
- Item. If there is no item for the found column (that is, it was resolved
- into a table field), order->item is 'fixed' and is added to all_fields and
- ref_pointer_array.
-
- RETURN
+ Given a column reference (represented by 'order') from a GROUP BY or ORDER
+ BY clause, find the actual column it represents. If the column being
+ resolved is from the GROUP BY clause, the procedure searches the SELECT
+ list 'fields' and the columns in the FROM list 'tables'. If 'order' is from
+ the ORDER BY clause, only the SELECT list is being searched.
+
+ If 'order' is resolved to an Item, then order->item is set to the found
+ Item. If there is no item for the found column (that is, it was resolved
+ into a table field), order->item is 'fixed' and is added to all_fields and
+ ref_pointer_array.
+
+ ref_pointer_array and all_fields are updated.
+
+ @param[in] thd Pointer to current thread structure
+ @param[in,out] ref_pointer_array All select, group and order by fields
+ @param[in] tables List of tables to search in (usually
+ FROM clause)
+ @param[in] order Column reference to be resolved
+ @param[in] fields List of fields to search in (usually
+ SELECT list)
+ @param[in,out] all_fields All select, group and order by fields
+ @param[in] is_group_field True if order is a GROUP field, false if
+ ORDER by field
+
+ @retval
FALSE if OK
+ @retval
TRUE if error occurred
-
- ref_pointer_array and all_fields are updated
*/
static bool
@@ -13909,11 +14756,29 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
We check order_item->fixed because Item_func_group_concat can put
arguments for which fix_fields already was called.
+
+ group_fix_field= TRUE is to resolve aliases from the SELECT list
+ without creating of Item_ref-s: JOIN::exec() wraps aliased items
+ in SELECT list with Item_copy items. To re-evaluate such a tree
+ that includes Item_copy items we have to refresh Item_copy caches,
+ but:
+ - filesort() never refresh Item_copy items,
+ - end_send_group() checks every record for group boundary by the
+ test_if_group_changed function that obtain data from these
+ Item_copy items, but the copy_fields function that
+ refreshes Item copy items is called after group boundaries only -
+ that is a vicious circle.
+ So we prevent inclusion of Item_copy items.
*/
- if (!order_item->fixed &&
+ bool save_group_fix_field= thd->lex->current_select->group_fix_field;
+ if (is_group_field)
+ thd->lex->current_select->group_fix_field= TRUE;
+ bool ret= (!order_item->fixed &&
(order_item->fix_fields(thd, order->item) ||
(order_item= *order->item)->check_cols(1) ||
- thd->is_fatal_error))
+ thd->is_fatal_error));
+ thd->lex->current_select->group_fix_field= save_group_fix_field;
+ if (ret)
return TRUE; /* Wrong field. */
uint el= all_fields.elements;
@@ -13924,9 +14789,11 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
}
-/*
- Change order to point at item in select list. If item isn't a number
- and doesn't exits in the select list, add it the the field list.
+/**
+ Change order to point at item in select list.
+
+ If item isn't a number and doesn't exits in the select list, add it the
+ the field list.
*/
int setup_order(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
@@ -13943,27 +14810,30 @@ int setup_order(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
}
-/*
+/**
Intitialize the GROUP BY list.
- SYNOPSIS
- setup_group()
- thd Thread handler
- ref_pointer_array We store references to all fields that was not in
- 'fields' here.
- fields All fields in the select part. Any item in 'order'
- that is part of these list is replaced by a pointer
- to this fields.
- all_fields Total list of all unique fields used by the select.
- All items in 'order' that was not part of fields will
- be added first to this list.
- order The fields we should do GROUP BY on.
- hidden_group_fields Pointer to flag that is set to 1 if we added any fields
- to all_fields.
-
- RETURN
- 0 ok
- 1 error (probably out of memory)
+ @param thd Thread handler
+ @param ref_pointer_array We store references to all fields that was
+ not in 'fields' here.
+ @param fields All fields in the select part. Any item in
+ 'order' that is part of these list is replaced
+ by a pointer to this fields.
+ @param all_fields Total list of all unique fields used by the
+ select. All items in 'order' that was not part
+ of fields will be added first to this list.
+ @param order The fields we should do GROUP BY on.
+ @param hidden_group_fields Pointer to flag that is set to 1 if we added
+ any fields to all_fields.
+
+ @todo
+ change ER_WRONG_FIELD_WITH_GROUP to more detailed
+ ER_NON_GROUPING_FIELD_USED
+
+ @retval
+ 0 ok
+ @retval
+ 1 error (probably out of memory)
*/
int
@@ -14056,8 +14926,11 @@ next_field:
return 0;
}
-/*
- Add fields with aren't used at start of field list. Return FALSE if ok
+/**
+ Add fields with aren't used at start of field list.
+
+ @return
+ FALSE if ok
*/
static bool
@@ -14065,11 +14938,11 @@ setup_new_fields(THD *thd, List<Item> &fields,
List<Item> &all_fields, ORDER *new_field)
{
Item **item;
- DBUG_ENTER("setup_new_fields");
-
- thd->set_query_id=1; // Not really needed, but...
uint counter;
enum_resolution_type not_used;
+ DBUG_ENTER("setup_new_fields");
+
+ thd->mark_used_columns= MARK_COLUMNS_READ; // Not really needed, but...
for (; new_field ; new_field= new_field->next)
{
if ((item= find_item_in_list(*new_field->item, fields, &counter,
@@ -14087,10 +14960,11 @@ setup_new_fields(THD *thd, List<Item> &fields,
DBUG_RETURN(0);
}
-/*
- Create a group by that consist of all non const fields. Try to use
- the fields in the order given by 'order' to allow one to optimize
- away 'order by'.
+/**
+ Create a group by that consist of all non const fields.
+
+ Try to use the fields in the order given by 'order' to allow one to
+ optimize away 'order by'.
*/
static ORDER *
@@ -14177,9 +15051,9 @@ next_item:
}
-/*****************************************************************************
- Update join with count of the different type of fields
-*****************************************************************************/
+/**
+ Update join with count of the different type of fields.
+*/
void
count_field_types(SELECT_LEX *select_lex, TMP_TABLE_PARAM *param,
@@ -14229,8 +15103,9 @@ count_field_types(SELECT_LEX *select_lex, TMP_TABLE_PARAM *param,
}
-/*
- Return 1 if second is a subpart of first argument
+/**
+ Return 1 if second is a subpart of first argument.
+
If first parts has different direction, change it to second part
(group is sorted like order)
*/
@@ -14248,10 +15123,9 @@ test_if_subpart(ORDER *a,ORDER *b)
return test(!b);
}
-/*
+/**
Return table number if there is only one table in sort order
- and group and order is compatible
- else return 0;
+ and group and order is compatible, else return 0.
*/
static TABLE *
@@ -14282,7 +15156,9 @@ get_sort_by_table(ORDER *a,ORDER *b,TABLE_LIST *tables)
}
- /* calc how big buffer we need for comparing group entries */
+/**
+ calc how big buffer we need for comparing group entries.
+*/
static void
calc_group_buffer(JOIN *join,ORDER *group)
@@ -14298,11 +15174,11 @@ calc_group_buffer(JOIN *join,ORDER *group)
if (field)
{
enum_field_types type;
- if ((type= field->type()) == FIELD_TYPE_BLOB)
+ if ((type= field->type()) == MYSQL_TYPE_BLOB)
key_length+=MAX_BLOB_WIDTH; // Can't be used as a key
else if (type == MYSQL_TYPE_VARCHAR || type == MYSQL_TYPE_VAR_STRING)
key_length+= field->field_length + HA_KEY_BLOB_LENGTH;
- else if (type == FIELD_TYPE_BIT)
+ else if (type == MYSQL_TYPE_BIT)
{
/* Bit is usually stored as a longlong key for group fields */
key_length+= 8; // Big enough
@@ -14339,6 +15215,8 @@ calc_group_buffer(JOIN *join,ORDER *group)
{
key_length+= 8;
}
+ else if (type == MYSQL_TYPE_BLOB)
+ key_length+= MAX_BLOB_WIDTH; // Can't be used as a key
else
{
/*
@@ -14353,6 +15231,7 @@ calc_group_buffer(JOIN *join,ORDER *group)
default:
/* This case should never be choosen */
DBUG_ASSERT(0);
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
join->thd->fatal_error();
}
}
@@ -14366,17 +15245,17 @@ calc_group_buffer(JOIN *join,ORDER *group)
}
-/*
- allocate group fields or take prepared (cached)
+/**
+ allocate group fields or take prepared (cached).
- SYNOPSIS
- make_group_fields()
- main_join - join of current select
- curr_join - current join (join of current select or temporary copy of it)
+ @param main_join join of current select
+ @param curr_join current join (join of current select or temporary copy
+ of it)
- RETURN
- 0 - ok
- 1 - failed
+ @retval
+ 0 ok
+ @retval
+ 1 failed
*/
static bool
@@ -14397,9 +15276,10 @@ make_group_fields(JOIN *main_join, JOIN *curr_join)
}
-/*
- Get a list of buffers for saveing last group
- Groups are saved in reverse order for easyer check loop
+/**
+ Get a list of buffers for saveing last group.
+
+ Groups are saved in reverse order for easyer check loop.
*/
static bool
@@ -14437,27 +15317,33 @@ test_if_group_changed(List<Cached_item> &list)
}
-/*
- Setup copy_fields to save fields at start of new group
+/**
+ Setup copy_fields to save fields at start of new group.
- setup_copy_fields()
- thd - THD pointer
- param - temporary table parameters
- ref_pointer_array - array of pointers to top elements of filed list
- res_selected_fields - new list of items of select item list
- res_all_fields - new list of all items
- elements - number of elements in select item list
- all_fields - all fields list
+ Setup copy_fields to save fields at start of new group
- DESCRIPTION
- Setup copy_fields to save fields at start of new group
- Only FIELD_ITEM:s and FUNC_ITEM:s needs to be saved between groups.
- Change old item_field to use a new field with points at saved fieldvalue
- This function is only called before use of send_fields
-
- RETURN
- 0 - ok
- !=0 - error
+ Only FIELD_ITEM:s and FUNC_ITEM:s needs to be saved between groups.
+ Change old item_field to use a new field with points at saved fieldvalue
+ This function is only called before use of send_fields.
+
+ @param thd THD pointer
+ @param param temporary table parameters
+ @param ref_pointer_array array of pointers to top elements of filed list
+ @param res_selected_fields new list of items of select item list
+ @param res_all_fields new list of all items
+ @param elements number of elements in select item list
+ @param all_fields all fields list
+
+ @todo
+ In most cases this result will be sent to the user.
+ This should be changed to use copy_int or copy_real depending
+ on how the value is to be used: In some cases this may be an
+ argument in a group function, like: IF(ISNULL(col),0,COUNT(*))
+
+ @retval
+ 0 ok
+ @retval
+ !=0 error
*/
bool
@@ -14486,7 +15372,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
for (i= 0; (pos= li++); i++)
{
Field *field;
- char *tmp;
+ uchar *tmp;
Item *real_pos= pos->real_item();
/*
Aggregate functions can be substituted for fields (by e.g. temp tables).
@@ -14510,7 +15396,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
pos= item;
if (item->field->flags & BLOB_FLAG)
{
- if (!(pos= new Item_copy_string(pos)))
+ if (!(pos= Item_copy::create(pos)))
goto err;
/*
Item_copy_string::copy for function can call
@@ -14537,8 +15423,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
another extra byte to not get warnings from purify in
Field_string::val_int
*/
- tmp= (char*) sql_alloc(field->pack_length()+2);
- if (!tmp)
+ if (!(tmp= (uchar*) sql_alloc(field->pack_length()+2)))
goto err;
if (copy)
{
@@ -14565,7 +15450,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
on how the value is to be used: In some cases this may be an
argument in a group function, like: IF(ISNULL(col),0,COUNT(*))
*/
- if (!(pos=new Item_copy_string(pos)))
+ if (!(pos= Item_copy::create(pos)))
goto err;
if (i < border) // HAVING, ORDER and GROUP BY
{
@@ -14601,8 +15486,8 @@ err2:
}
-/*
- Make a copy of all simple SELECT'ed items
+/**
+ Make a copy of all simple SELECT'ed items.
This is done at the start of a new group so that we can retrieve
these later when the group changes.
@@ -14618,20 +15503,19 @@ copy_fields(TMP_TABLE_PARAM *param)
(*ptr->do_copy)(ptr);
List_iterator_fast<Item> it(param->copy_funcs);
- Item_copy_string *item;
- while ((item = (Item_copy_string*) it++))
+ Item_copy *item;
+ while ((item = (Item_copy*) it++))
item->copy();
}
-/*
- Make an array of pointers to sum_functions to speed up sum_func calculation
-
- SYNOPSIS
- alloc_func_list()
+/**
+ Make an array of pointers to sum_functions to speed up
+ sum_func calculation.
- RETURN
+ @retval
0 ok
+ @retval
1 Error
*/
@@ -14676,18 +15560,17 @@ bool JOIN::alloc_func_list()
}
-/*
- Initialize 'sum_funcs' array with all Item_sum objects
+/**
+ Initialize 'sum_funcs' array with all Item_sum objects.
- SYNOPSIS
- make_sum_func_list()
- field_list All items
- send_fields Items in select list
- before_group_by Set to 1 if this is called before GROUP BY handling
- recompute Set to TRUE if sum_funcs must be recomputed
+ @param field_list All items
+ @param send_fields Items in select list
+ @param before_group_by Set to 1 if this is called before GROUP BY handling
+ @param recompute Set to TRUE if sum_funcs must be recomputed
- RETURN
+ @retval
0 ok
+ @retval
1 error
*/
@@ -14728,21 +15611,21 @@ bool JOIN::make_sum_func_list(List<Item> &field_list, List<Item> &send_fields,
}
-/*
+/**
Change all funcs and sum_funcs to fields in tmp table, and create
new list of all items.
- change_to_use_tmp_fields()
- thd - THD pointer
- ref_pointer_array - array of pointers to top elements of filed list
- res_selected_fields - new list of items of select item list
- res_all_fields - new list of all items
- elements - number of elements in select item list
- all_fields - all fields list
-
- RETURN
- 0 - ok
- !=0 - error
+ @param thd THD pointer
+ @param ref_pointer_array array of pointers to top elements of filed list
+ @param res_selected_fields new list of items of select item list
+ @param res_all_fields new list of all items
+ @param elements number of elements in select item list
+ @param all_fields all fields list
+
+ @retval
+ 0 ok
+ @retval
+ !=0 error
*/
static bool
@@ -14762,7 +15645,7 @@ change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array,
for (i= 0; (item= it++); i++)
{
Field *field;
-
+
if ((item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM) ||
(item->type() == Item::FUNC_ITEM &&
((Item_func*)item)->functype() == Item_func::SUSERVAR_FUNC))
@@ -14798,7 +15681,7 @@ change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array,
char buff[256];
String str(buff,sizeof(buff),&my_charset_bin);
str.length(0);
- item->print(&str);
+ item->print(&str, QT_ORDINARY);
item_field->name= sql_strmake(str.ptr(),str.length());
}
#endif
@@ -14819,20 +15702,20 @@ change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array,
}
-/*
- Change all sum_func refs to fields to point at fields in tmp table
- Change all funcs to be fields in tmp table
-
- change_refs_to_tmp_fields()
- thd - THD pointer
- ref_pointer_array - array of pointers to top elements of filed list
- res_selected_fields - new list of items of select item list
- res_all_fields - new list of all items
- elements - number of elements in select item list
- all_fields - all fields list
-
- RETURN
+/**
+ Change all sum_func refs to fields to point at fields in tmp table.
+ Change all funcs to be fields in tmp table.
+
+ @param thd THD pointer
+ @param ref_pointer_array array of pointers to top elements of filed list
+ @param res_selected_fields new list of items of select item list
+ @param res_all_fields new list of all items
+ @param elements number of elements in select item list
+ @param all_fields all fields list
+
+ @retval
0 ok
+ @retval
1 error
*/
@@ -14870,16 +15753,15 @@ change_refs_to_tmp_fields(THD *thd, Item **ref_pointer_array,
******************************************************************************/
-/*
- Call ::setup for all sum functions
+/**
+ Call ::setup for all sum functions.
- SYNOPSIS
- setup_sum_funcs()
- thd thread handler
- func_ptr sum function list
+ @param thd thread handler
+ @param func_ptr sum function list
- RETURN
+ @retval
FALSE ok
+ @retval
TRUE error
*/
@@ -14905,7 +15787,7 @@ init_tmptable_sum_functions(Item_sum **func_ptr)
}
- /* Update record 0 in tmp_table from record 1 */
+/** Update record 0 in tmp_table from record 1. */
static void
update_tmptable_sum_func(Item_sum **func_ptr,
@@ -14917,7 +15799,7 @@ update_tmptable_sum_func(Item_sum **func_ptr,
}
- /* Copy result of sum functions to record in tmp_table */
+/** Copy result of sum functions to record in tmp_table. */
static void
copy_sum_funcs(Item_sum **func_ptr, Item_sum **end_ptr)
@@ -14956,20 +15838,45 @@ update_sum_func(Item_sum **func_ptr)
return 0;
}
- /* Copy result of functions to record in tmp_table */
+/**
+ Copy result of functions to record in tmp_table.
-void
-copy_funcs(Item **func_ptr)
+ Uses the thread pointer to check for errors in
+ some of the val_xxx() methods called by the
+ save_in_result_field() function.
+ TODO: make the Item::val_xxx() return error code
+
+ @param func_ptr array of the function Items to copy to the tmp table
+ @param thd pointer to the current thread for error checking
+ @retval
+ FALSE if OK
+ @retval
+ TRUE on error
+*/
+
+bool
+copy_funcs(Item **func_ptr, const THD *thd)
{
Item *func;
for (; (func = *func_ptr) ; func_ptr++)
+ {
func->save_in_result_field(1);
+ /*
+ Need to check the THD error state because Item::val_xxx() don't
+ return error code, but can generate errors
+ TODO: change it for a real status check when Item::val_xxx()
+ are extended to return status code.
+ */
+ if (thd->is_error())
+ return TRUE;
+ }
+ return FALSE;
}
-/*
+/**
Create a condition for a const reference and add this to the
- currenct select for the table
+ currenct select for the table.
*/
static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab)
@@ -14980,7 +15887,7 @@ static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab)
Item_cond_and *cond=new Item_cond_and();
TABLE *table=join_tab->table;
- int error;
+ int error= 0;
if (!cond)
DBUG_RETURN(TRUE);
@@ -14998,7 +15905,8 @@ static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab)
cond->fix_fields(thd, (Item**)&cond);
if (join_tab->select)
{
- error=(int) cond->add(join_tab->select->cond);
+ if (join_tab->select->cond)
+ error=(int) cond->add(join_tab->select->cond);
join_tab->select_cond=join_tab->select->cond=cond;
}
else if ((join_tab->select= make_select(join_tab->table, 0, 0, cond, 0,
@@ -15009,12 +15917,11 @@ static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab)
}
-/*
+/**
Free joins of subselect of this select.
- free_underlaid_joins()
- thd - THD pointer
- select - pointer to st_select_lex which subselects joins we will free
+ @param thd THD pointer
+ @param select pointer to st_select_lex which subselects joins we will free
*/
void free_underlaid_joins(THD *thd, SELECT_LEX *select)
@@ -15029,41 +15936,43 @@ void free_underlaid_joins(THD *thd, SELECT_LEX *select)
ROLLUP handling
****************************************************************************/
-/*
- Replace occurences of group by fields in an expression by ref items
+/**
+ Replace occurences of group by fields in an expression by ref items.
- SYNOPSIS
- change_group_ref()
- thd reference to the context
- expr expression to make replacement
- group_list list of references to group by items
- changed out: returns 1 if item contains a replaced field item
+ The function replaces occurrences of group by fields in expr
+ by ref objects for these fields unless they are under aggregate
+ functions.
+ The function also corrects value of the the maybe_null attribute
+ for the items of all subexpressions containing group by fields.
- DESCRIPTION
- The function replaces occurrences of group by fields in expr
- by ref objects for these fields unless they are under aggregate
- functions.
- The function also corrects value of the the maybe_null attribute
- for the items of all subexpressions containing group by fields.
+ @b EXAMPLES
+ @code
+ SELECT a+1 FROM t1 GROUP BY a WITH ROLLUP
+ SELECT SUM(a)+a FROM t1 GROUP BY a WITH ROLLUP
+ @endcode
+
+ @b IMPLEMENTATION
- IMPLEMENTATION
The function recursively traverses the tree of the expr expression,
looks for occurrences of the group by fields that are not under
aggregate functions and replaces them for the corresponding ref items.
- NOTES
+ @note
This substitution is needed GROUP BY queries with ROLLUP if
SELECT list contains expressions over group by attributes.
- TODO: Some functions are not null-preserving. For those functions
+ @param thd reference to the context
+ @param expr expression to make replacement
+ @param group_list list of references to group by items
+ @param changed out: returns 1 if item contains a replaced field item
+
+ @todo
+ - TODO: Some functions are not null-preserving. For those functions
updating of the maybe_null attribute is an overkill.
- EXAMPLES
- SELECT a+1 FROM t1 GROUP BY a WITH ROLLUP
- SELECT SUM(a)+a FROM t1 GROUP BY a WITH ROLLUP
-
- RETURN
+ @retval
0 if ok
+ @retval
1 on error
*/
@@ -15112,7 +16021,7 @@ static bool change_group_ref(THD *thd, Item_func *expr, ORDER *group_list,
}
-/* Allocate memory needed for other rollup functions */
+/** Allocate memory needed for other rollup functions. */
bool JOIN::rollup_init()
{
@@ -15243,22 +16152,20 @@ bool JOIN::rollup_process_const_fields()
}
-/*
- Fill up rollup structures with pointers to fields to use
+/**
+ Fill up rollup structures with pointers to fields to use.
- SYNOPSIS
- rollup_make_fields()
- fields_arg List of all fields (hidden and real ones)
- sel_fields Pointer to selected fields
- func Store here a pointer to all fields
+ Creates copies of item_sum items for each sum level.
- IMPLEMENTATION:
- Creates copies of item_sum items for each sum level
+ @param fields_arg List of all fields (hidden and real ones)
+ @param sel_fields Pointer to selected fields
+ @param func Store here a pointer to all fields
- RETURN
- 0 if ok
- In this case func is pointing to next not used element.
- 1 on error
+ @retval
+ 0 if ok;
+ In this case func is pointing to next not used element.
+ @retval
+ 1 on error
*/
bool JOIN::rollup_make_fields(List<Item> &fields_arg, List<Item> &sel_fields,
@@ -15376,21 +16283,22 @@ bool JOIN::rollup_make_fields(List<Item> &fields_arg, List<Item> &sel_fields,
return 0;
}
-/*
- Send all rollup levels higher than the current one to the client
+/**
+ Send all rollup levels higher than the current one to the client.
- SYNOPSIS:
- rollup_send_data()
- idx Level we are on:
- 0 = Total sum level
- 1 = First group changed (a)
- 2 = Second group changed (a,b)
+ @b SAMPLE
+ @code
+ SELECT a, b, c SUM(b) FROM t1 GROUP BY a,b WITH ROLLUP
+ @endcode
- SAMPLE
- SELECT a, b, c SUM(b) FROM t1 GROUP BY a,b WITH ROLLUP
+ @param idx Level we are on:
+ - 0 = Total sum level
+ - 1 = First group changed (a)
+ - 2 = Second group changed (a,b)
- RETURN
- 0 ok
+ @retval
+ 0 ok
+ @retval
1 If send_data_failed()
*/
@@ -15416,22 +16324,23 @@ int JOIN::rollup_send_data(uint idx)
return 0;
}
-/*
- Write all rollup levels higher than the current one to a temp table
-
- SYNOPSIS:
- rollup_write_data()
- idx Level we are on:
- 0 = Total sum level
- 1 = First group changed (a)
- 2 = Second group changed (a,b)
- table reference to temp table
-
- SAMPLE
- SELECT a, b, SUM(c) FROM t1 GROUP BY a,b WITH ROLLUP
-
- RETURN
- 0 ok
+/**
+ Write all rollup levels higher than the current one to a temp table.
+
+ @b SAMPLE
+ @code
+ SELECT a, b, SUM(c) FROM t1 GROUP BY a,b WITH ROLLUP
+ @endcode
+
+ @param idx Level we are on:
+ - 0 = Total sum level
+ - 1 = First group changed (a)
+ - 2 = Second group changed (a,b)
+ @param table reference to temp table
+
+ @retval
+ 0 ok
+ @retval
1 if write_data_failed()
*/
@@ -15455,7 +16364,7 @@ int JOIN::rollup_write_data(uint idx, TABLE *table_arg)
item->save_in_result_field(1);
}
copy_sum_funcs(sum_funcs_end[i+1], sum_funcs_end[i]);
- if ((write_error= table_arg->file->write_row(table_arg->record[0])))
+ if ((write_error= table_arg->file->ha_write_row(table_arg->record[0])))
{
if (create_myisam_from_heap(thd, table_arg, &tmp_table_param,
write_error, 0))
@@ -15468,12 +16377,9 @@ int JOIN::rollup_write_data(uint idx, TABLE *table_arg)
return 0;
}
-/*
+/**
clear results if there are not rows found for group
(end_send_group/end_write_group)
-
- SYNOPSYS
- JOIN::clear()
*/
void JOIN::clear()
@@ -15489,11 +16395,11 @@ void JOIN::clear()
}
}
-/****************************************************************************
- EXPLAIN handling
+/**
+ EXPLAIN handling.
- Send a description about what how the select will be done to stdout
-****************************************************************************/
+ Send a description about what how the select will be done to stdout.
+*/
static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
bool distinct,const char *message)
@@ -15513,15 +16419,24 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
thd->server_status&= ~(SERVER_QUERY_NO_INDEX_USED | SERVER_QUERY_NO_GOOD_INDEX_USED);
join->unit->offset_limit_cnt= 0;
+ /*
+ NOTE: the number/types of items pushed into item_list must be in sync with
+ EXPLAIN column types as they're "defined" in THD::send_explain_fields()
+ */
if (message)
{
item_list.push_back(new Item_int((int32)
join->select_lex->select_number));
item_list.push_back(new Item_string(join->select_lex->type,
- (uint) strlen(join->select_lex->type), cs));
+ strlen(join->select_lex->type), cs));
for (uint i=0 ; i < 7; i++)
item_list.push_back(item_null);
- item_list.push_back(new Item_string(message,(uint) strlen(message),cs));
+ if (join->thd->lex->describe & DESCRIBE_PARTITIONS)
+ item_list.push_back(item_null);
+ if (join->thd->lex->describe & DESCRIBE_EXTENDED)
+ item_list.push_back(item_null);
+
+ item_list.push_back(new Item_string(message,strlen(message),cs));
if (result->send_data(item_list))
join->error= 1;
}
@@ -15540,7 +16455,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
item_list.push_back(new Item_null);
/* select_type */
item_list.push_back(new Item_string(join->select_lex->type,
- (uint) strlen(join->select_lex->type),
+ strlen(join->select_lex->type),
cs));
/* table */
{
@@ -15565,9 +16480,12 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
}
item_list.push_back(new Item_string(table_name_buffer, len, cs));
}
+ /* partitions */
+ if (join->thd->lex->describe & DESCRIBE_PARTITIONS)
+ item_list.push_back(item_null);
/* type */
item_list.push_back(new Item_string(join_type_str[JT_ALL],
- (uint) strlen(join_type_str[JT_ALL]),
+ strlen(join_type_str[JT_ALL]),
cs));
/* possible_keys */
item_list.push_back(item_null);
@@ -15577,6 +16495,9 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
item_list.push_back(item_null);
/* ref */
item_list.push_back(item_null);
+ /* in_rows */
+ if (join->thd->lex->describe & DESCRIBE_EXTENDED)
+ item_list.push_back(item_null);
/* rows */
item_list.push_back(item_null);
/* extra */
@@ -15596,6 +16517,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
{
JOIN_TAB *tab=join->join_tab+i;
TABLE *table=tab->table;
+ TABLE_LIST *table_list= tab->table->pos_in_table_list;
char buff[512];
char buff1[512], buff2[512], buff3[512];
char keylen_str_buf[64];
@@ -15616,7 +16538,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
join->select_lex->select_number));
/* select_type */
item_list.push_back(new Item_string(join->select_lex->type,
- (uint) strlen(join->select_lex->type),
+ strlen(join->select_lex->type),
cs));
if (tab->type == JT_ALL && tab->select && tab->select->quick)
{
@@ -15641,12 +16563,31 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
{
TABLE_LIST *real_table= table->pos_in_table_list;
item_list.push_back(new Item_string(real_table->alias,
- (uint) strlen(real_table->alias),
+ strlen(real_table->alias),
cs));
}
- /* type */
+ /* "partitions" column */
+ if (join->thd->lex->describe & DESCRIBE_PARTITIONS)
+ {
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ partition_info *part_info;
+ if (!table->derived_select_number &&
+ (part_info= table->part_info))
+ {
+ Item_string *item_str= new Item_string(cs);
+ make_used_partitions_str(part_info, &item_str->str_value);
+ item_list.push_back(item_str);
+ }
+ else
+ item_list.push_back(item_null);
+#else
+ /* just produce empty column if partitioning is not compiled in */
+ item_list.push_back(item_null);
+#endif
+ }
+ /* "type" column */
item_list.push_back(new Item_string(join_type_str[tab->type],
- (uint) strlen(join_type_str[tab->type]),
+ strlen(join_type_str[tab->type]),
cs));
/* Build "possible_keys" value and add it to item_list */
if (!tab->keys.is_clear_all())
@@ -15659,7 +16600,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
if (tmp1.length())
tmp1.append(',');
tmp1.append(table->key_info[j].name,
- (uint) strlen(table->key_info[j].name),
+ strlen(table->key_info[j].name),
system_charset_info);
}
}
@@ -15675,17 +16616,17 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
KEY *key_info=table->key_info+ tab->ref.key;
register uint length;
item_list.push_back(new Item_string(key_info->name,
- (uint) strlen(key_info->name),
+ strlen(key_info->name),
system_charset_info));
- length= (uint) (longlong2str(tab->ref.key_length, keylen_str_buf, 10) -
- keylen_str_buf);
+ length= longlong2str(tab->ref.key_length, keylen_str_buf, 10) -
+ keylen_str_buf;
item_list.push_back(new Item_string(keylen_str_buf, length,
system_charset_info));
for (store_key **ref=tab->ref.key_copy ; *ref ; ref++)
{
if (tmp2.length())
tmp2.append(',');
- tmp2.append((*ref)->name(), (uint) strlen((*ref)->name()),
+ tmp2.append((*ref)->name(), strlen((*ref)->name()),
system_charset_info);
}
item_list.push_back(new Item_string(tmp2.ptr(),tmp2.length(),cs));
@@ -15695,9 +16636,9 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
KEY *key_info=table->key_info+ tab->index;
register uint length;
item_list.push_back(new Item_string(key_info->name,
- (uint) strlen(key_info->name),cs));
- length= (uint) (longlong2str(key_info->key_length, keylen_str_buf, 10) -
- keylen_str_buf);
+ strlen(key_info->name),cs));
+ length= longlong2str(key_info->key_length, keylen_str_buf, 10) -
+ keylen_str_buf;
item_list.push_back(new Item_string(keylen_str_buf,
length,
system_charset_info));
@@ -15712,25 +16653,88 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
}
else
{
- item_list.push_back(item_null);
+ if (table_list->schema_table &&
+ table_list->schema_table->i_s_requested_object & OPTIMIZE_I_S_TABLE)
+ {
+ const char *tmp_buff;
+ int f_idx;
+ if (table_list->has_db_lookup_value)
+ {
+ f_idx= table_list->schema_table->idx_field1;
+ tmp_buff= table_list->schema_table->fields_info[f_idx].field_name;
+ tmp2.append(tmp_buff, strlen(tmp_buff), cs);
+ }
+ if (table_list->has_table_lookup_value)
+ {
+ if (table_list->has_db_lookup_value)
+ tmp2.append(',');
+ f_idx= table_list->schema_table->idx_field2;
+ tmp_buff= table_list->schema_table->fields_info[f_idx].field_name;
+ tmp2.append(tmp_buff, strlen(tmp_buff), cs);
+ }
+ if (tmp2.length())
+ item_list.push_back(new Item_string(tmp2.ptr(),tmp2.length(),cs));
+ else
+ item_list.push_back(item_null);
+ }
+ else
+ item_list.push_back(item_null);
item_list.push_back(item_null);
item_list.push_back(item_null);
}
+
/* Add "rows" field to item_list. */
- item_list.push_back(new Item_int((longlong) (ulonglong)
- join->best_positions[i]. records_read,
- MY_INT64_NUM_DECIMAL_DIGITS));
+ if (table_list->schema_table)
+ {
+ /* in_rows */
+ if (join->thd->lex->describe & DESCRIBE_EXTENDED)
+ item_list.push_back(item_null);
+ /* rows */
+ item_list.push_back(item_null);
+ }
+ else
+ {
+ ha_rows examined_rows;
+ if (tab->select && tab->select->quick)
+ examined_rows= tab->select->quick->records;
+ else if (tab->type == JT_NEXT || tab->type == JT_ALL)
+ {
+ if (tab->limit)
+ examined_rows= tab->limit;
+ else
+ {
+ tab->table->file->info(HA_STATUS_VARIABLE);
+ examined_rows= tab->table->file->stats.records;
+ }
+ }
+ else
+ examined_rows=(ha_rows)join->best_positions[i].records_read;
+
+ item_list.push_back(new Item_int((longlong) (ulonglong) examined_rows,
+ MY_INT64_NUM_DECIMAL_DIGITS));
+
+ /* Add "filtered" field to item_list. */
+ if (join->thd->lex->describe & DESCRIBE_EXTENDED)
+ {
+ float f= 0.0;
+ if (examined_rows)
+ f= (float) (100.0 * join->best_positions[i].records_read /
+ examined_rows);
+ item_list.push_back(new Item_float(f, 2));
+ }
+ }
+
/* Build "Extra" field and add it to item_list. */
my_bool key_read=table->key_read;
if ((tab->type == JT_NEXT || tab->type == JT_CONST) &&
- table->used_keys.is_set(tab->index))
+ table->covering_keys.is_set(tab->index))
key_read=1;
if (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT &&
!((QUICK_ROR_INTERSECT_SELECT*)tab->select->quick)->need_to_fetch_row)
key_read=1;
if (tab->info)
- item_list.push_back(new Item_string(tab->info,(uint) strlen(tab->info),cs));
+ item_list.push_back(new Item_string(tab->info,strlen(tab->info),cs));
else if (tab->packed_info & TAB_INFO_HAVE_VALUE)
{
if (tab->packed_info & TAB_INFO_USING_INDEX)
@@ -15780,13 +16784,31 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
if (thd->lex->describe & DESCRIBE_EXTENDED)
{
extra.append(STRING_WITH_LEN(": "));
- ((COND *)pushed_cond)->print(&extra);
+ ((COND *)pushed_cond)->print(&extra, QT_ORDINARY);
}
}
else
extra.append(STRING_WITH_LEN("; Using where"));
}
}
+ if (table_list->schema_table &&
+ table_list->schema_table->i_s_requested_object & OPTIMIZE_I_S_TABLE)
+ {
+ if (!table_list->table_open_method)
+ extra.append(STRING_WITH_LEN("; Skip_open_table"));
+ else if (table_list->table_open_method == OPEN_FRM_ONLY)
+ extra.append(STRING_WITH_LEN("; Open_frm_only"));
+ else
+ extra.append(STRING_WITH_LEN("; Open_full_table"));
+ if (table_list->has_db_lookup_value &&
+ table_list->has_table_lookup_value)
+ extra.append(STRING_WITH_LEN("; Scanned 0 databases"));
+ else if (table_list->has_db_lookup_value ||
+ table_list->has_table_lookup_value)
+ extra.append(STRING_WITH_LEN("; Scanned 1 database"));
+ else
+ extra.append(STRING_WITH_LEN("; Scanned all databases"));
+ }
if (key_read)
{
if (quick_type == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)
@@ -15817,6 +16839,8 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
break;
}
}
+ if (i > 0 && tab[-1].next_select == sub_select_cache)
+ extra.append(STRING_WITH_LEN("; Using join buffer"));
/* Skip initial "; "*/
const char *str= extra.ptr();
@@ -15873,7 +16897,7 @@ bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result)
"UNION")));
sl->options|= SELECT_DESCRIBE;
}
- if (first->next_select())
+ if (unit->is_union())
{
unit->fake_select_lex->select_number= UINT_MAX; // jost for initialization
unit->fake_select_lex->type= "UNION RESULT";
@@ -15887,33 +16911,35 @@ bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result)
thd->lex->current_select= first;
unit->set_limit(unit->global_parameters);
res= mysql_select(thd, &first->ref_pointer_array,
- (TABLE_LIST*) first->table_list.first,
+ first->table_list.first,
first->with_wild, first->item_list,
first->where,
first->order_list.elements +
first->group_list.elements,
- (ORDER*) first->order_list.first,
- (ORDER*) first->group_list.first,
+ first->order_list.first,
+ first->group_list.first,
first->having,
- (ORDER*) thd->lex->proc_list.first,
+ thd->lex->proc_list.first,
first->options | thd->options | SELECT_DESCRIBE,
result, unit, first);
}
- DBUG_RETURN(res || thd->net.report_error);
+ DBUG_RETURN(res || thd->is_error());
}
-/*
- Print joins from the FROM clause
+/**
+ Print joins from the FROM clause.
- SYNOPSIS
- print_join()
- thd thread handler
- str string where table should be printed
- tables list of tables in join
+ @param thd thread handler
+ @param str string where table should be printed
+ @param tables list of tables in join
+ @query_type type of the query is being generated
*/
-static void print_join(THD *thd, String *str, List<TABLE_LIST> *tables)
+static void print_join(THD *thd,
+ String *str,
+ List<TABLE_LIST> *tables,
+ enum_query_type query_type)
{
/* List is reversed => we should reverse it before using */
List_iterator_fast<TABLE_LIST> ti(*tables);
@@ -15926,7 +16952,7 @@ static void print_join(THD *thd, String *str, List<TABLE_LIST> *tables)
*t= ti++;
DBUG_ASSERT(tables->elements >= 1);
- (*table)->print(thd, str);
+ (*table)->print(thd, str, query_type);
TABLE_LIST **end= table + tables->elements;
for (TABLE_LIST **tbl= table + 1; tbl < end; tbl++)
@@ -15941,11 +16967,11 @@ static void print_join(THD *thd, String *str, List<TABLE_LIST> *tables)
str->append(STRING_WITH_LEN(" straight_join "));
else
str->append(STRING_WITH_LEN(" join "));
- curr->print(thd, str);
+ curr->print(thd, str, query_type);
if (curr->on_expr)
{
str->append(STRING_WITH_LEN(" on("));
- curr->on_expr->print(str);
+ curr->on_expr->print(str, query_type);
str->append(')');
}
}
@@ -15953,9 +16979,9 @@ static void print_join(THD *thd, String *str, List<TABLE_LIST> *tables)
/**
- @brief Print an index hint for a table
+ @brief Print an index hint
- @details Prints out the USE|FORCE|IGNORE index hints for a table.
+ @details Prints out the USE|FORCE|IGNORE index hint.
@param thd the current thread
@param[out] str appends the index hint here
@@ -15966,55 +16992,41 @@ static void print_join(THD *thd, String *str, List<TABLE_LIST> *tables)
*/
void
-TABLE_LIST::print_index_hint(THD *thd, String *str,
- const char *hint, uint32 hint_length,
- List<String> indexes)
+Index_hint::print(THD *thd, String *str)
{
- List_iterator_fast<String> li(indexes);
- String *idx;
- bool first= 1;
- size_t find_length= strlen(primary_key_name);
-
- str->append (' ');
- str->append (hint, hint_length);
+ switch (type)
+ {
+ case INDEX_HINT_IGNORE: str->append(STRING_WITH_LEN("IGNORE INDEX")); break;
+ case INDEX_HINT_USE: str->append(STRING_WITH_LEN("USE INDEX")); break;
+ case INDEX_HINT_FORCE: str->append(STRING_WITH_LEN("FORCE INDEX")); break;
+ }
str->append (STRING_WITH_LEN(" ("));
- while ((idx = li++))
+ if (key_name.length)
{
- if (first)
- first= 0;
- else
- str->append(',');
- /*
- It's safe to use ptr() here because we compare the length first
- and we rely that my_strcasecmp will not access more than length()
- chars from the string. See test_if_string_in_list() for similar
- implementation.
- */
- if (find_length == idx->length() &&
- !my_strcasecmp (system_charset_info, primary_key_name,
- idx->ptr()))
+ if (thd && !my_strnncoll(system_charset_info,
+ (const uchar *)key_name.str, key_name.length,
+ (const uchar *)primary_key_name,
+ strlen(primary_key_name)))
str->append(primary_key_name);
else
- append_identifier (thd, str, idx->ptr(), idx->length());
+ append_identifier(thd, str, key_name.str, key_name.length);
}
str->append(')');
}
-/*
- Print table as it should be in join list
+/**
+ Print table as it should be in join list.
- SYNOPSIS
- TABLE_LIST::print();
- str string where table should bbe printed
+ @param str string where table should be printed
*/
-void TABLE_LIST::print(THD *thd, String *str)
+void TABLE_LIST::print(THD *thd, String *str, enum_query_type query_type)
{
if (nested_join)
{
str->append('(');
- print_join(thd, str, &nested_join->join_list);
+ print_join(thd, str, &nested_join->join_list, query_type);
str->append(')');
}
else
@@ -16037,7 +17049,7 @@ void TABLE_LIST::print(THD *thd, String *str)
{
// A derived table
str->append('(');
- derived->print(str);
+ derived->print(str, query_type);
str->append(')');
cmp_name= ""; // Force printing of alias
}
@@ -16054,7 +17066,7 @@ void TABLE_LIST::print(THD *thd, String *str)
if (schema_table)
{
append_identifier(thd, str, schema_table_name,
- (uint) strlen(schema_table_name));
+ strlen(schema_table_name));
cmp_name= schema_table_name;
}
else
@@ -16079,24 +17091,25 @@ void TABLE_LIST::print(THD *thd, String *str)
}
}
- append_identifier(thd, str, t_alias, (uint) strlen(t_alias));
+ append_identifier(thd, str, t_alias, strlen(t_alias));
}
- if (use_index)
+ if (index_hints)
{
- if (force_index)
- print_index_hint(thd, str, STRING_WITH_LEN("FORCE INDEX"), *use_index);
- else
- print_index_hint(thd, str, STRING_WITH_LEN("USE INDEX"), *use_index);
- }
- if (ignore_index)
- print_index_hint (thd, str, STRING_WITH_LEN("IGNORE INDEX"), *ignore_index);
+ List_iterator<Index_hint> it(*index_hints);
+ Index_hint *hint;
+ while ((hint= it++))
+ {
+ str->append (STRING_WITH_LEN(" "));
+ hint->print (thd, str);
+ }
+ }
}
}
-void st_select_lex::print(THD *thd, String *str)
+void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
{
/* QQ: thd may not be set for sub queries, but this should be fixed */
if (!thd)
@@ -16144,7 +17157,17 @@ void st_select_lex::print(THD *thd, String *str)
first= 0;
else
str->append(',');
- item->print_item_w_name(str);
+
+ if (master_unit()->item && item->is_autogenerated_name)
+ {
+ /*
+ Do not print auto-generated aliases in subqueries. It has no purpose
+ in a view definition or other contexts where the query is printed.
+ */
+ item->print(str, query_type);
+ }
+ else
+ item->print_item_w_name(str, query_type);
}
/*
@@ -16155,7 +17178,7 @@ void st_select_lex::print(THD *thd, String *str)
{
str->append(STRING_WITH_LEN(" from "));
/* go through join tree */
- print_join(thd, str, &top_join_list);
+ print_join(thd, str, &top_join_list, query_type);
}
else if (where)
{
@@ -16174,7 +17197,7 @@ void st_select_lex::print(THD *thd, String *str)
{
str->append(STRING_WITH_LEN(" where "));
if (cur_where)
- cur_where->print(str);
+ cur_where->print(str, query_type);
else
str->append(cond_value != Item::COND_FALSE ? "1" : "0");
}
@@ -16183,7 +17206,7 @@ void st_select_lex::print(THD *thd, String *str)
if (group_list.elements)
{
str->append(STRING_WITH_LEN(" group by "));
- print_order(str, (ORDER *) group_list.first);
+ print_order(str, group_list.first, query_type);
switch (olap)
{
case CUBE_TYPE:
@@ -16206,7 +17229,7 @@ void st_select_lex::print(THD *thd, String *str)
{
str->append(STRING_WITH_LEN(" having "));
if (cur_having)
- cur_having->print(str);
+ cur_having->print(str, query_type);
else
str->append(having_value != Item::COND_FALSE ? "1" : "0");
}
@@ -16214,26 +17237,25 @@ void st_select_lex::print(THD *thd, String *str)
if (order_list.elements)
{
str->append(STRING_WITH_LEN(" order by "));
- print_order(str, (ORDER *) order_list.first);
+ print_order(str, order_list.first, query_type);
}
// limit
- print_limit(thd, str);
+ print_limit(thd, str, query_type);
// PROCEDURE unsupported here
}
-/*
- change select_result object of JOIN
+/**
+ change select_result object of JOIN.
- SYNOPSIS
- JOIN::change_result()
- res new select_result object
+ @param res new select_result object
- RETURN
- FALSE - OK
- TRUE - error
+ @retval
+ FALSE OK
+ @retval
+ TRUE error
*/
bool JOIN::change_result(select_result *res)
@@ -16247,3 +17269,7 @@ bool JOIN::change_result(select_result *res)
}
DBUG_RETURN(FALSE);
}
+
+/**
+ @} (end of group Query_Optimizer)
+*/