summaryrefslogtreecommitdiff
path: root/sql/sql_select.cc
diff options
context:
space:
mode:
Diffstat (limited to 'sql/sql_select.cc')
-rw-r--r--sql/sql_select.cc1624
1 files changed, 1036 insertions, 588 deletions
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 99feb1006fb..35b5665d1d0 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2016, Oracle and/or its affiliates.
- Copyright (c) 2009, 2019, MariaDB Corporation.
+ Copyright (c) 2009, 2020, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -43,7 +43,6 @@
#include "sql_base.h" // setup_wild, setup_fields, fill_record
#include "sql_parse.h" // check_stack_overrun
#include "sql_partition.h" // make_used_partitions_str
-#include "sql_acl.h" // *_ACL
#include "sql_test.h" // print_where, print_keyuse_array,
// print_sjm, print_plan, TEST_join
#include "records.h" // init_read_record, end_read_record
@@ -300,6 +299,14 @@ void set_postjoin_aggr_write_func(JOIN_TAB *tab);
static Item **get_sargable_cond(JOIN *join, TABLE *table);
+static
+bool build_notnull_conds_for_range_scans(JOIN *join, COND *cond,
+ table_map allowed);
+static
+void build_notnull_conds_for_inner_nest_of_outer_join(JOIN *join,
+ TABLE_LIST *nest_tbl);
+
+
#ifndef DBUG_OFF
/*
@@ -409,7 +416,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result,
*/
res= mysql_select(thd,
select_lex->table_list.first,
- select_lex->with_wild, select_lex->item_list,
+ select_lex->item_list,
select_lex->where,
select_lex->order_list.elements +
select_lex->group_list.elements,
@@ -563,9 +570,9 @@ fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select,
new_ref= direct_ref ?
new (thd->mem_root) Item_direct_ref(thd, ref->context, item_ref, ref->table_name,
- &ref->field_name, ref->alias_name_used) :
+ ref->field_name, ref->alias_name_used) :
new (thd->mem_root) Item_ref(thd, ref->context, item_ref, ref->table_name,
- &ref->field_name, ref->alias_name_used);
+ ref->field_name, ref->alias_name_used);
if (!new_ref)
return TRUE;
ref->outer_ref= new_ref;
@@ -773,11 +780,11 @@ Item* period_get_condition(THD *thd, TABLE_LIST *table, SELECT_LEX *select,
const LEX_CSTRING &fend= period->end_field(share)->field_name;
conds->field_start= newx Item_field(thd, &select->context,
- table->db.str, table->alias.str,
- thd->make_clex_string(fstart));
+ table->db, table->alias,
+ thd->strmake_lex_cstring(fstart));
conds->field_end= newx Item_field(thd, &select->context,
- table->db.str, table->alias.str,
- thd->make_clex_string(fend));
+ table->db, table->alias,
+ thd->strmake_lex_cstring(fend));
Item *cond1= NULL, *cond2= NULL, *cond3= NULL, *curr= NULL;
if (timestamp)
@@ -1044,7 +1051,7 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables)
storing vers_conditions as Item and make some magic related to
vers_system_time_t/VERS_TRX_ID at stage of fix_fields()
(this is large refactoring). */
- if (vers_conditions.resolve_units(thd))
+ if (vers_conditions.check_units(thd))
DBUG_RETURN(-1);
if (timestamps_only && (vers_conditions.start.unit == VERS_TRX_ID ||
vers_conditions.end.unit == VERS_TRX_ID))
@@ -1099,8 +1106,7 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables)
0 on success
*/
int
-JOIN::prepare(TABLE_LIST *tables_init,
- uint wild_num, COND *conds_init, uint og_num,
+JOIN::prepare(TABLE_LIST *tables_init, COND *conds_init, uint og_num,
ORDER *order_init, bool skip_order_by,
ORDER *group_init, Item *having_init,
ORDER *proc_param_init, SELECT_LEX *select_lex_arg,
@@ -1222,8 +1228,7 @@ JOIN::prepare(TABLE_LIST *tables_init,
real_og_num+= select_lex->order_list.elements;
DBUG_ASSERT(select_lex->hidden_bit_fields == 0);
- if (setup_wild(thd, tables_list, fields_list, &all_fields, wild_num,
- &select_lex->hidden_bit_fields))
+ if (setup_wild(thd, tables_list, fields_list, &all_fields, select_lex))
DBUG_RETURN(-1);
if (select_lex->setup_ref_array(thd, real_og_num))
DBUG_RETURN(-1);
@@ -1791,7 +1796,7 @@ JOIN::optimize_inner()
{
DBUG_ENTER("JOIN::optimize");
subq_exit_fl= false;
- do_send_rows = (unit->select_limit_cnt) ? 1 : 0;
+ do_send_rows = (unit->lim.get_select_limit()) ? 1 : 0;
DEBUG_SYNC(thd, "before_join_optimize");
@@ -1866,9 +1871,9 @@ JOIN::optimize_inner()
DBUG_RETURN(-1);
row_limit= ((select_distinct || order || group_list) ? HA_POS_ERROR :
- unit->select_limit_cnt);
+ unit->lim.get_select_limit());
/* select_limit is used to decide if we are likely to scan the whole table */
- select_limit= unit->select_limit_cnt;
+ select_limit= unit->lim.get_select_limit();
if (having || (select_options & OPTION_FOUND_ROWS))
select_limit= HA_POS_ERROR;
#ifdef HAVE_REF_TO_FIELDS // Not done yet
@@ -2109,9 +2114,10 @@ JOIN::optimize_inner()
thd->change_item_tree(&sel->having, having);
}
if (cond_value == Item::COND_FALSE || having_value == Item::COND_FALSE ||
- (!unit->select_limit_cnt && !(select_options & OPTION_FOUND_ROWS)))
+ (!unit->lim.get_select_limit() &&
+ !(select_options & OPTION_FOUND_ROWS)))
{ /* Impossible cond */
- if (unit->select_limit_cnt)
+ if (unit->lim.get_select_limit())
{
DBUG_PRINT("info", (having_value == Item::COND_FALSE ?
"Impossible HAVING" : "Impossible WHERE"));
@@ -3206,7 +3212,8 @@ bool JOIN::make_aggr_tables_info()
{
/* Check if the storage engine can intercept the query */
Query query= {&all_fields, select_distinct, tables_list, conds,
- group_list, order ? order : group_list, having};
+ group_list, order ? order : group_list, having,
+ &select_lex->master_unit()->lim};
group_by_handler *gbh= ht->create_group_by(thd, &query);
if (gbh)
@@ -3661,8 +3668,8 @@ bool JOIN::make_aggr_tables_info()
unit->select_limit_cnt == 1 (we only need one row in the result set)
*/
sort_tab->filesort->limit=
- (has_group_by || (join_tab + table_count > curr_tab + 1)) ?
- select_limit : unit->select_limit_cnt;
+ (has_group_by || (join_tab + top_join_tab_count > curr_tab + 1)) ?
+ select_limit : unit->lim.get_select_limit();
}
if (!only_const_tables() &&
!join_tab[const_tables].filesort &&
@@ -4047,9 +4054,6 @@ JOIN::reinit()
{
DBUG_ENTER("JOIN::reinit");
- unit->offset_limit_cnt= (ha_rows)(select_lex->offset_limit ?
- select_lex->offset_limit->val_uint() : 0);
-
first_record= false;
group_sent= false;
cleaned= false;
@@ -4171,9 +4175,8 @@ bool JOIN::save_explain_data(Explain_query *output, bool can_overwrite,
If there is SELECT in this statement with the same number it must be the
same SELECT
*/
- DBUG_SLOW_ASSERT(select_lex->select_number == UINT_MAX ||
- select_lex->select_number == INT_MAX ||
- !output ||
+ DBUG_ASSERT(select_lex->select_number == UINT_MAX ||
+ select_lex->select_number == INT_MAX || !output ||
!output->get_select(select_lex->select_number) ||
output->get_select(select_lex->select_number)->select_lex ==
select_lex);
@@ -4233,9 +4236,9 @@ void JOIN::exec()
select_lex->select_number))
dbug_serve_apcs(thd, 1);
);
- ANALYZE_START_TRACKING(&explain->time_tracker);
+ ANALYZE_START_TRACKING(thd, &explain->time_tracker);
exec_inner();
- ANALYZE_STOP_TRACKING(&explain->time_tracker);
+ ANALYZE_STOP_TRACKING(thd, &explain->time_tracker);
DBUG_EXECUTE_IF("show_explain_probe_join_exec_end",
if (dbug_user_var_equals_int(thd,
@@ -4320,7 +4323,8 @@ void JOIN::exec_inner()
{
if (do_send_rows &&
(procedure ? (procedure->send_row(procedure_fields_list) ||
- procedure->end_of_records()) : result->send_data(fields_list)> 0))
+ procedure->end_of_records()):
+ result->send_data_with_check(fields_list, unit, 0)> 0))
error= 1;
else
send_records= ((select_options & OPTION_FOUND_ROWS) ? 1 :
@@ -4533,11 +4537,6 @@ void JOIN::cleanup_item_list(List<Item> &items) const
the top-level select_lex for this query
@param tables list of all tables used in this query.
The tables have been pre-opened.
- @param wild_num number of wildcards used in the top level
- select of this query.
- For example statement
- SELECT *, t1.*, catalog.t2.* FROM t0, t1, t2;
- has 3 wildcards.
@param fields list of items in SELECT list of the top-level
select
e.g. SELECT a, b, c FROM t1 will have Item_field
@@ -4570,12 +4569,10 @@ void JOIN::cleanup_item_list(List<Item> &items) const
*/
bool
-mysql_select(THD *thd,
- TABLE_LIST *tables, uint wild_num, List<Item> &fields,
- COND *conds, uint og_num, ORDER *order, ORDER *group,
- Item *having, ORDER *proc_param, ulonglong select_options,
- select_result *result, SELECT_LEX_UNIT *unit,
- SELECT_LEX *select_lex)
+mysql_select(THD *thd, TABLE_LIST *tables, List<Item> &fields, COND *conds,
+ uint og_num, ORDER *order, ORDER *group, Item *having,
+ ORDER *proc_param, ulonglong select_options, select_result *result,
+ SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex)
{
int err= 0;
bool free_join= 1;
@@ -4605,9 +4602,8 @@ mysql_select(THD *thd,
}
else
{
- if ((err= join->prepare( tables, wild_num,
- conds, og_num, order, false, group, having,
- proc_param, select_lex, unit)))
+ if ((err= join->prepare(tables, conds, og_num, order, false, group,
+ having, proc_param, select_lex, unit)))
{
goto err;
}
@@ -4629,9 +4625,8 @@ mysql_select(THD *thd,
DBUG_RETURN(TRUE);
THD_STAGE_INFO(thd, stage_init);
thd->lex->used_tables=0;
- if ((err= join->prepare(tables, wild_num,
- conds, og_num, order, false, group, having, proc_param,
- select_lex, unit)))
+ if ((err= join->prepare(tables, conds, og_num, order, false, group, having,
+ proc_param, select_lex, unit)))
{
goto err;
}
@@ -5345,6 +5340,9 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
}
}
+ join->join_tab= stat;
+ join->make_notnull_conds_for_range_scans();
+
/* Calc how many (possible) matched records in each table */
/*
@@ -5578,7 +5576,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
if (double rr= join->best_positions[i].records_read)
records= COST_MULT(records, rr);
ha_rows rows= records > (double) HA_ROWS_MAX ? HA_ROWS_MAX : (ha_rows) records;
- set_if_smaller(rows, unit->select_limit_cnt);
+ set_if_smaller(rows, unit->lim.get_select_limit());
join->select_lex->increase_derived_records(rows);
}
}
@@ -6405,18 +6403,18 @@ max_part_bit(key_part_map bits)
/**
Add a new keuse to the specified array of KEYUSE objects
- @param[in,out] keyuse_array array of keyuses to be extended
+ @param[in,out] keyuse_array array of keyuses to be extended
@param[in] key_field info on the key use occurrence
@param[in] key key number for the keyse to be added
@param[in] part key part for the keyuse to be added
@note
The function builds a new KEYUSE object for a key use utilizing the info
- on the left and right parts of the given key use extracted from the
- structure key_field, the key number and key part for this key use.
+ on the left and right parts of the given key use extracted from the
+ structure key_field, the key number and key part for this key use.
The built object is added to the dynamic array keyuse_array.
- @retval 0 the built object is succesfully added
+ @retval 0 the built object is successfully added
@retval 1 otherwise
*/
@@ -6780,7 +6778,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,
/* set a barrier for the array of SARGABLE_PARAM */
(*sargables)[0].field= 0;
- if (my_init_dynamic_array2(keyuse, sizeof(KEYUSE),
+ if (my_init_dynamic_array2(thd->mem_root->m_psi_key, keyuse, sizeof(KEYUSE),
thd->alloc(sizeof(KEYUSE) * 20), 20, 64,
MYF(MY_THREAD_SPECIFIC)))
DBUG_RETURN(TRUE);
@@ -6967,7 +6965,6 @@ void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array)
}
}
-
/**
Check for the presence of AGGFN(DISTINCT a) queries that may be subject
to loose index scan.
@@ -7005,7 +7002,6 @@ is_indexed_agg_distinct(JOIN *join, List<Item_field> *out_args)
{
Item_sum **sum_item_ptr;
bool result= false;
- Field_map first_aggdistinct_fields;
if (join->table_count != 1 || /* reference more than 1 table */
join->select_distinct || /* or a DISTINCT */
@@ -7015,10 +7011,11 @@ is_indexed_agg_distinct(JOIN *join, List<Item_field> *out_args)
if (join->make_sum_func_list(join->all_fields, join->fields_list, true))
return false;
+ Bitmap<MAX_FIELDS> first_aggdistinct_fields;
+ bool first_aggdistinct_fields_initialized= false;
for (sum_item_ptr= join->sum_funcs; *sum_item_ptr; sum_item_ptr++)
{
Item_sum *sum_item= *sum_item_ptr;
- Field_map cur_aggdistinct_fields;
Item *expr;
/* aggregate is not AGGFN(DISTINCT) or more than 1 argument to it */
switch (sum_item->sum_func())
@@ -7041,6 +7038,8 @@ is_indexed_agg_distinct(JOIN *join, List<Item_field> *out_args)
We don't worry about duplicates as these will be sorted out later in
get_best_group_min_max
*/
+ Bitmap<MAX_FIELDS> cur_aggdistinct_fields;
+ cur_aggdistinct_fields.clear_all();
for (uint i= 0; i < sum_item->get_arg_count(); i++)
{
expr= sum_item->get_arg(i);
@@ -7059,8 +7058,11 @@ is_indexed_agg_distinct(JOIN *join, List<Item_field> *out_args)
If there are multiple aggregate functions, make sure that they all
refer to exactly the same set of columns.
*/
- if (first_aggdistinct_fields.is_clear_all())
- first_aggdistinct_fields.merge(cur_aggdistinct_fields);
+ if (!first_aggdistinct_fields_initialized)
+ {
+ first_aggdistinct_fields= cur_aggdistinct_fields;
+ first_aggdistinct_fields_initialized=true;
+ }
else if (first_aggdistinct_fields != cur_aggdistinct_fields)
return false;
}
@@ -8032,7 +8034,7 @@ best_access_path(JOIN *join,
if (!best_key &&
idx == join->const_tables &&
s->table == join->sort_by_table &&
- join->unit->select_limit_cnt >= records)
+ join->unit->lim.get_select_limit() >= records)
{
trace_access_scan.add("use_tmp_table", true);
join->sort_by_table= (TABLE*) 1; // Must use temporary table
@@ -10265,14 +10267,16 @@ bool JOIN::get_best_combination()
if (aggr_tables > 2)
aggr_tables= 2;
- if (!(join_tab= (JOIN_TAB*) thd->alloc(sizeof(JOIN_TAB)*
- (top_join_tab_count + aggr_tables))))
- DBUG_RETURN(TRUE);
full_join=0;
hash_join= FALSE;
fix_semijoin_strategies_for_picked_join_order(this);
+ top_join_tab_count= get_number_of_tables_at_top_level(this);
+
+ if (!(join_tab= (JOIN_TAB*) thd->alloc(sizeof(JOIN_TAB)*
+ (top_join_tab_count + aggr_tables))))
+ DBUG_RETURN(TRUE);
JOIN_TAB_RANGE *root_range;
if (!(root_range= new (thd->mem_root) JOIN_TAB_RANGE))
@@ -11592,7 +11596,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
!tab->loosescan_match_tab && // (1)
((cond && (!tab->keys.is_subset(tab->const_keys) && i > 0)) ||
(!tab->const_keys.is_clear_all() && i == join->const_tables &&
- join->unit->select_limit_cnt <
+ join->unit->lim.get_select_limit() <
join->best_positions[i].records_read &&
!(join->select_options & OPTION_FOUND_ROWS))))
{
@@ -11618,7 +11622,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
(join->select_options &
OPTION_FOUND_ROWS ?
HA_POS_ERROR :
- join->unit->select_limit_cnt), 0,
+ join->unit->lim.get_select_limit()), 0,
FALSE, FALSE, FALSE) < 0)
{
/*
@@ -11632,7 +11636,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
(join->select_options &
OPTION_FOUND_ROWS ?
HA_POS_ERROR :
- join->unit->select_limit_cnt),0,
+ join->unit->lim.get_select_limit()),0,
FALSE, FALSE, FALSE) < 0)
DBUG_RETURN(1); // Impossible WHERE
}
@@ -13290,7 +13294,7 @@ void JOIN_TAB::build_range_rowid_filter_if_needed()
Exec_time_tracker *table_tracker= table->file->get_time_tracker();
Rowid_filter_tracker *rowid_tracker= rowid_filter->get_tracker();
table->file->set_time_tracker(rowid_tracker->get_time_tracker());
- rowid_tracker->start_tracking();
+ rowid_tracker->start_tracking(join->thd);
if (!rowid_filter->build())
{
is_rowid_filter_built= true;
@@ -13300,7 +13304,7 @@ void JOIN_TAB::build_range_rowid_filter_if_needed()
delete rowid_filter;
rowid_filter= 0;
}
- rowid_tracker->stop_tracking();
+ rowid_tracker->stop_tracking(join->thd);
table->file->set_time_tracker(table_tracker);
}
}
@@ -13503,8 +13507,7 @@ bool JOIN_TAB::preread_init()
if ((!derived->get_unit()->executed ||
derived->is_recursive_with_table() ||
derived->get_unit()->uncacheable) &&
- mysql_handle_single_derived(join->thd->lex,
- derived, DT_CREATE | DT_FILL))
+ mysql_handle_single_derived(join->thd->lex, derived, DT_CREATE | DT_FILL))
return TRUE;
if (!(derived->get_unit()->uncacheable & UNCACHEABLE_DEPENDENT) ||
@@ -13529,6 +13532,21 @@ bool JOIN_TAB::preread_init()
}
+bool JOIN_TAB::pfs_batch_update(JOIN *join)
+{
+ /*
+ Use PFS batch mode if
+ 1. tab is an inner-most table, or
+ 2. will read more than one row (not eq_ref or const access type)
+ 3. no subqueries
+ */
+
+ return join->join_tab + join->table_count - 1 == this && // 1
+ type != JT_EQ_REF && type != JT_CONST && type != JT_SYSTEM && // 2
+ (!select_cond || !select_cond->with_subquery()); // 3
+}
+
+
/**
Build a TABLE_REF structure for index lookup in the temporary table
@@ -14067,7 +14085,7 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond,
ORDER BY and GROUP BY
*/
for (JOIN_TAB *tab= join->join_tab + join->const_tables;
- tab < join->join_tab + join->table_count;
+ tab < join->join_tab + join->top_join_tab_count;
tab++)
tab->cached_eq_ref_table= FALSE;
@@ -14075,7 +14093,7 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond,
*simple_order= head->on_expr_ref[0] == NULL;
if (*simple_order && head->table->file->ha_table_flags() & HA_SLOW_RND_POS)
{
- uint u1, u2, u3;
+ uint u1, u2, u3, u4;
/*
normally the condition is (see filesort_use_addons())
@@ -14086,7 +14104,7 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond,
TODO proper cost estimations
*/
- *simple_order= filesort_use_addons(head->table, 0, &u1, &u2, &u3);
+ *simple_order= filesort_use_addons(head->table, 0, &u1, &u2, &u3, &u4);
}
}
else
@@ -14352,7 +14370,7 @@ return_zero_rows(JOIN *join, select_result *result, List<TABLE_LIST> &tables,
{
bool send_error= FALSE;
if (send_row)
- send_error= result->send_data(fields) > 0;
+ send_error= result->send_data_with_check(fields, join->unit, 0) > 0;
if (likely(!send_error))
result->send_eof(); // Should be safe
}
@@ -14890,28 +14908,28 @@ bool Item_func_eq::check_equality(THD *thd, COND_EQUAL *cond_equal,
left_item, right_item, cond_equal);
}
-
+
/**
Item_xxx::build_equal_items()
-
+
Replace all equality predicates in a condition referenced by "this"
by multiple equality items.
At each 'and' level the function detects items for equality predicates
and replaced them by a set of multiple equality items of class Item_equal,
- taking into account inherited equalities from upper levels.
+ taking into account inherited equalities from upper levels.
If an equality predicate is used not in a conjunction it's just
replaced by a multiple equality predicate.
For each 'and' level the function set a pointer to the inherited
multiple equalities in the cond_equal field of the associated
- object of the type Item_cond_and.
+ object of the type Item_cond_and.
The function also traverses the cond tree and and for each field reference
sets a pointer to the multiple equality item containing the field, if there
is any. If this multiple equality equates fields to a constant the
- function replaces the field reference by the constant in the cases
+ function replaces the field reference by the constant in the cases
when the field is not of a string type or when the field reference is
just an argument of a comparison predicate.
- The function also determines the maximum number of members in
+ The function also determines the maximum number of members in
equality lists of each Item_cond_and object assigning it to
thd->lex->current_select->max_equal_elems.
@@ -14925,7 +14943,7 @@ bool Item_func_eq::check_equality(THD *thd, COND_EQUAL *cond_equal,
in a conjuction for a minimal set of multiple equality predicates.
This set can be considered as a canonical representation of the
sub-conjunction of the equality predicates.
- E.g. (t1.a=t2.b AND t2.b>5 AND t1.a=t3.c) is replaced by
+ E.g. (t1.a=t2.b AND t2.b>5 AND t1.a=t3.c) is replaced by
(=(t1.a,t2.b,t3.c) AND t2.b>5), not by
(=(t1.a,t2.b) AND =(t1.a,t3.c) AND t2.b>5);
while (t1.a=t2.b AND t2.b>5 AND t3.c=t4.d) is replaced by
@@ -14936,16 +14954,16 @@ bool Item_func_eq::check_equality(THD *thd, COND_EQUAL *cond_equal,
The function performs the substitution in a recursive descent by
the condtion tree, passing to the next AND level a chain of multiple
equality predicates which have been built at the upper levels.
- The Item_equal items built at the level are attached to other
+ The Item_equal items built at the level are attached to other
non-equality conjucts as a sublist. The pointer to the inherited
multiple equalities is saved in the and condition object (Item_cond_and).
- This chain allows us for any field reference occurence easyly to find a
- multiple equality that must be held for this occurence.
+ This chain allows us for any field reference occurrence easily to find a
+ multiple equality that must be held for this occurrence.
For each AND level we do the following:
- scan it for all equality predicate (=) items
- join them into disjoint Item_equal() groups
- - process the included OR conditions recursively to do the same for
- lower AND levels.
+ - process the included OR conditions recursively to do the same for
+ lower AND levels.
We need to do things in this order as lower AND levels need to know about
all possible Item_equal objects in upper levels.
@@ -14981,7 +14999,7 @@ COND *Item_cond_and::build_equal_items(THD *thd,
/*
Retrieve all conjuncts of this level detecting the equality
that are subject to substitution by multiple equality items and
- removing each such predicate from the conjunction after having
+ removing each such predicate from the conjunction after having
found/created a multiple equality whose inference the predicate is.
*/
while ((item= li++))
@@ -17706,16 +17724,20 @@ const_expression_in_where(COND *cond, Item *comp_item, Field *comp_field,
Create internal temporary table
****************************************************************************/
-Field *Item::create_tmp_field_int(TABLE *table, uint convert_int_length)
+Field *Item::create_tmp_field_int(MEM_ROOT *root, TABLE *table,
+ uint convert_int_length)
{
- const Type_handler *h= &type_handler_long;
+ const Type_handler *h= &type_handler_slong;
if (max_char_length() > convert_int_length)
- h= &type_handler_longlong;
- return h->make_and_init_table_field(&name, Record_addr(maybe_null),
+ h= &type_handler_slonglong;
+ if (unsigned_flag)
+ h= h->type_handler_unsigned();
+ return h->make_and_init_table_field(root, &name, Record_addr(maybe_null),
*this, table);
}
-Field *Item::tmp_table_field_from_field_type_maybe_null(TABLE *table,
+Field *Item::tmp_table_field_from_field_type_maybe_null(MEM_ROOT *root,
+ TABLE *table,
Tmp_field_src *src,
const Tmp_field_param *param,
bool is_explicit_null)
@@ -17727,7 +17749,7 @@ Field *Item::tmp_table_field_from_field_type_maybe_null(TABLE *table,
DBUG_ASSERT(!param->make_copy_field() || type() == CONST_ITEM);
DBUG_ASSERT(!is_result_field());
Field *result;
- if ((result= tmp_table_field_from_field_type(table)))
+ if ((result= tmp_table_field_from_field_type(root, table)))
{
if (result && is_explicit_null)
result->is_created_from_null_item= true;
@@ -17736,15 +17758,14 @@ Field *Item::tmp_table_field_from_field_type_maybe_null(TABLE *table,
}
-Field *Item_sum::create_tmp_field(bool group, TABLE *table)
+Field *Item_sum::create_tmp_field(MEM_ROOT *root, bool group, TABLE *table)
{
Field *UNINIT_VAR(new_field);
- MEM_ROOT *mem_root= table->in_use->mem_root;
switch (cmp_type()) {
case REAL_RESULT:
{
- new_field= new (mem_root)
+ new_field= new (root)
Field_double(max_char_length(), maybe_null, &name, decimals, TRUE);
break;
}
@@ -17752,7 +17773,7 @@ Field *Item_sum::create_tmp_field(bool group, TABLE *table)
case TIME_RESULT:
case DECIMAL_RESULT:
case STRING_RESULT:
- new_field= tmp_table_field_from_field_type(table);
+ new_field= tmp_table_field_from_field_type(root, table);
break;
case ROW_RESULT:
// This case should never be choosen
@@ -17767,47 +17788,11 @@ Field *Item_sum::create_tmp_field(bool group, TABLE *table)
/**
- Create field for information schema table.
-
- @param thd Thread handler
- @param table Temporary table
- @param item Item to create a field for
-
- @retval
- 0 on error
- @retval
- new_created field
-*/
-
-Field *Item::create_field_for_schema(THD *thd, TABLE *table)
-{
- if (field_type() == MYSQL_TYPE_VARCHAR)
- {
- Field *field;
- if (max_length > MAX_FIELD_VARCHARLENGTH)
- field= new (thd->mem_root) Field_blob(max_length, maybe_null, &name,
- collation.collation);
- else if (max_length > 0)
- field= new (thd->mem_root) Field_varstring(max_length, maybe_null, &name,
- table->s,
- collation.collation);
- else
- field= new Field_null((uchar*) 0, 0, Field::NONE, &name,
- collation.collation);
- if (field)
- field->init(table);
- return field;
- }
- return tmp_table_field_from_field_type(table);
-}
-
-
-/**
Create a temporary field for Item_field (or its descendant),
either direct or referenced by an Item_ref.
*/
Field *
-Item_field::create_tmp_field_from_item_field(TABLE *new_table,
+Item_field::create_tmp_field_from_item_field(MEM_ROOT *root, TABLE *new_table,
Item_ref *orig_item,
const Tmp_field_param *param)
{
@@ -17831,14 +17816,16 @@ Item_field::create_tmp_field_from_item_field(TABLE *new_table,
Record_addr rec(orig_item ? orig_item->maybe_null : maybe_null);
const Type_handler *handler= type_handler()->
type_handler_for_tmp_table(this);
- result= handler->make_and_init_table_field(orig_item ? &orig_item->name : &name,
+ result= handler->make_and_init_table_field(root,
+ orig_item ? &orig_item->name : &name,
rec, *this, new_table);
}
else if (param->table_cant_handle_bit_fields() &&
field->type() == MYSQL_TYPE_BIT)
{
- const Type_handler *handler= type_handler_long_or_longlong();
- result= handler->make_and_init_table_field(&name,
+ const Type_handler *handler=
+ Type_handler::type_handler_long_or_longlong(max_char_length(), true);
+ result= handler->make_and_init_table_field(root, &name,
Record_addr(maybe_null),
*this, new_table);
}
@@ -17847,8 +17834,7 @@ Item_field::create_tmp_field_from_item_field(TABLE *new_table,
LEX_CSTRING *tmp= orig_item ? &orig_item->name : &name;
bool tmp_maybe_null= param->modify_item() ? maybe_null :
field->maybe_null();
- result= field->create_tmp_field(new_table->in_use->mem_root, new_table,
- tmp_maybe_null);
+ result= field->create_tmp_field(root, new_table, tmp_maybe_null);
if (result)
result->field_name= *tmp;
}
@@ -17858,14 +17844,14 @@ Item_field::create_tmp_field_from_item_field(TABLE *new_table,
}
-Field *Item_field::create_tmp_field_ex(TABLE *table,
+Field *Item_field::create_tmp_field_ex(MEM_ROOT *root, TABLE *table,
Tmp_field_src *src,
const Tmp_field_param *param)
{
DBUG_ASSERT(!is_result_field());
Field *result;
src->set_field(field);
- if (!(result= create_tmp_field_from_item_field(table, NULL, param)))
+ if (!(result= create_tmp_field_from_item_field(root, table, NULL, param)))
return NULL;
/*
Fields that are used as arguments to the DEFAULT() function already have
@@ -17878,7 +17864,7 @@ Field *Item_field::create_tmp_field_ex(TABLE *table,
}
-Field *Item_ref::create_tmp_field_ex(TABLE *table,
+Field *Item_ref::create_tmp_field_ex(MEM_ROOT *root, TABLE *table,
Tmp_field_src *src,
const Tmp_field_param *param)
{
@@ -17891,13 +17877,14 @@ Field *Item_ref::create_tmp_field_ex(TABLE *table,
Tmp_field_param prm2(*param);
prm2.set_modify_item(false);
src->set_field(field->field);
- if (!(result= field->create_tmp_field_from_item_field(table, this, &prm2)))
+ if (!(result= field->create_tmp_field_from_item_field(root, table,
+ this, &prm2)))
return NULL;
if (param->modify_item())
result_field= result;
return result;
}
- return Item_result_field::create_tmp_field_ex(table, src, param);
+ return Item_result_field::create_tmp_field_ex(root, table, src, param);
}
@@ -17916,9 +17903,13 @@ void Item_result_field::get_tmp_field_src(Tmp_field_src *src,
}
-Field *Item_result_field::create_tmp_field_ex(TABLE *table,
- Tmp_field_src *src,
- const Tmp_field_param *param)
+Field *
+Item_result_field::create_tmp_field_ex_from_handler(
+ MEM_ROOT *root,
+ TABLE *table,
+ Tmp_field_src *src,
+ const Tmp_field_param *param,
+ const Type_handler *h)
{
/*
Possible Item types:
@@ -17926,38 +17917,27 @@ Field *Item_result_field::create_tmp_field_ex(TABLE *table,
- Item_func
- Item_subselect
*/
+ DBUG_ASSERT(fixed);
DBUG_ASSERT(is_result_field());
DBUG_ASSERT(type() != NULL_ITEM);
get_tmp_field_src(src, param);
Field *result;
- if ((result= tmp_table_field_from_field_type(table)) && param->modify_item())
- result_field= result;
- return result;
-}
-
-
-Field *Item_func_user_var::create_tmp_field_ex(TABLE *table,
- Tmp_field_src *src,
- const Tmp_field_param *param)
-{
- DBUG_ASSERT(is_result_field());
- DBUG_ASSERT(type() != NULL_ITEM);
- get_tmp_field_src(src, param);
- Field *result;
- if ((result= create_table_field_from_handler(table)) && param->modify_item())
+ if ((result= h->make_and_init_table_field(root, &name,
+ Record_addr(maybe_null),
+ *this, table)) &&
+ param->modify_item())
result_field= result;
return result;
}
-Field *Item_func_sp::create_tmp_field_ex(TABLE *table,
+Field *Item_func_sp::create_tmp_field_ex(MEM_ROOT *root, TABLE *table,
Tmp_field_src *src,
const Tmp_field_param *param)
{
Field *result;
get_tmp_field_src(src, param);
- if ((result= sp_result_field->create_tmp_field(table->in_use->mem_root,
- table)))
+ if ((result= sp_result_field->create_tmp_field(root, table)))
{
result->field_name= name;
if (param->modify_item())
@@ -18003,7 +17983,8 @@ Field *create_tmp_field(TABLE *table, Item *item,
Tmp_field_src src;
Tmp_field_param prm(group, modify_item, table_cant_handle_bit_fields,
make_copy_field);
- Field *result= item->create_tmp_field_ex(table, &src, &prm);
+ Field *result= item->create_tmp_field_ex(table->in_use->mem_root,
+ table, &src, &prm);
*from_field= src.field();
*default_field= src.default_field();
if (src.item_result_field())
@@ -18055,6 +18036,114 @@ setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps)
}
+class Create_tmp_table: public Data_type_statistics
+{
+ // The following members are initialized only in start()
+ Field **m_from_field, **m_default_field;
+ KEY_PART_INFO *m_key_part_info;
+ uchar *m_group_buff, *m_bitmaps;
+ // The following members are initialized in ctor
+ uint m_alloced_field_count;
+ bool m_using_unique_constraint;
+ uint m_temp_pool_slot;
+ ORDER *m_group;
+ bool m_distinct;
+ bool m_save_sum_fields;
+ bool m_with_cycle;
+ ulonglong m_select_options;
+ ha_rows m_rows_limit;
+ uint m_group_null_items;
+
+ // counter for distinct/other fields
+ uint m_field_count[2];
+ // counter for distinct/other fields which can be NULL
+ uint m_null_count[2];
+ // counter for distinct/other blob fields
+ uint m_blobs_count[2];
+ // counter for "tails" of bit fields which do not fit in a byte
+ uint m_uneven_bit[2];
+
+public:
+ enum counter {distinct, other};
+ /*
+ shows which field we are processing: distinct/other (set in processing
+ cycles)
+ */
+ counter current_counter;
+ Create_tmp_table(const TMP_TABLE_PARAM *param,
+ ORDER *group, bool distinct, bool save_sum_fields,
+ ulonglong select_options, ha_rows rows_limit)
+ :m_alloced_field_count(0),
+ m_using_unique_constraint(false),
+ m_temp_pool_slot(MY_BIT_NONE),
+ m_group(group),
+ m_distinct(distinct),
+ m_save_sum_fields(save_sum_fields),
+ m_with_cycle(false),
+ m_select_options(select_options),
+ m_rows_limit(rows_limit),
+ m_group_null_items(0),
+ current_counter(other)
+ {
+ m_field_count[Create_tmp_table::distinct]= 0;
+ m_field_count[Create_tmp_table::other]= 0;
+ m_null_count[Create_tmp_table::distinct]= 0;
+ m_null_count[Create_tmp_table::other]= 0;
+ m_blobs_count[Create_tmp_table::distinct]= 0;
+ m_blobs_count[Create_tmp_table::other]= 0;
+ m_uneven_bit[Create_tmp_table::distinct]= 0;
+ m_uneven_bit[Create_tmp_table::other]= 0;
+ }
+
+ void add_field(TABLE *table, Field *field, uint fieldnr, bool force_not_null_cols);
+
+ TABLE *start(THD *thd,
+ TMP_TABLE_PARAM *param,
+ const LEX_CSTRING *table_alias);
+
+ bool add_fields(THD *thd, TABLE *table,
+ TMP_TABLE_PARAM *param, List<Item> &fields);
+
+ bool add_schema_fields(THD *thd, TABLE *table,
+ TMP_TABLE_PARAM *param,
+ const ST_SCHEMA_TABLE &schema_table,
+ const MY_BITMAP &bitmap);
+
+ bool finalize(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
+ bool do_not_open, bool keep_row_order);
+ void cleanup_on_failure(THD *thd, TABLE *table);
+};
+
+
+void Create_tmp_table::add_field(TABLE *table, Field *field, uint fieldnr, bool force_not_null_cols)
+{
+ DBUG_ASSERT(!field->field_name.str || strlen(field->field_name.str) == field->field_name.length);
+
+ if (force_not_null_cols)
+ {
+ field->flags|= NOT_NULL_FLAG;
+ field->null_ptr= NULL;
+ }
+
+ if (!(field->flags & NOT_NULL_FLAG))
+ m_null_count[current_counter]++;
+
+ table->s->reclength+= field->pack_length();
+
+ // Assign it here, before update_data_type_statistics() changes m_blob_count
+ if (field->flags & BLOB_FLAG)
+ {
+ table->s->blob_field[m_blob_count]= fieldnr;
+ m_blobs_count[current_counter]++;
+ }
+
+ table->field[fieldnr]= field;
+ field->field_index= fieldnr;
+
+ field->update_data_type_statistics(this);
+}
+
+
/**
Create a temp table according to a field list.
@@ -18089,61 +18178,36 @@ setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps)
inserted, the engine should preserve this order
*/
-TABLE *
-create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
- ORDER *group, bool distinct, bool save_sum_fields,
- ulonglong select_options, ha_rows rows_limit,
- const LEX_CSTRING *table_alias, bool do_not_open,
- bool keep_row_order)
+TABLE *Create_tmp_table::start(THD *thd,
+ TMP_TABLE_PARAM *param,
+ const LEX_CSTRING *table_alias)
{
MEM_ROOT *mem_root_save, own_root;
TABLE *table;
TABLE_SHARE *share;
- uint i,field_count,null_count,null_pack_length;
uint copy_func_count= param->func_count;
- uint hidden_null_count, hidden_null_pack_length, hidden_field_count;
- uint blob_count,group_null_items, string_count;
- uint temp_pool_slot=MY_BIT_NONE;
- uint fieldnr= 0;
- ulong reclength, string_total_length;
- bool using_unique_constraint= false;
- bool use_packed_rows= false;
- bool not_all_columns= !(select_options & TMP_TABLE_ALL_COLUMNS);
char *tmpname,path[FN_REFLEN];
- uchar *pos, *group_buff, *bitmaps;
- uchar *null_flags;
- Field **reg_field, **from_field, **default_field;
+ Field **reg_field;
uint *blob_field;
- Copy_field *copy=0;
- KEY *keyinfo;
- KEY_PART_INFO *key_part_info;
- Item **copy_func;
- TMP_ENGINE_COLUMNDEF *recinfo;
- /*
- total_uneven_bit_length is uneven bit length for visible fields
- hidden_uneven_bit_length is uneven bit length for hidden fields
- */
- uint total_uneven_bit_length= 0, hidden_uneven_bit_length= 0;
- bool force_copy_fields= param->force_copy_fields;
/* Treat sum functions as normal ones when loose index scan is used. */
- save_sum_fields|= param->precomputed_group_by;
- DBUG_ENTER("create_tmp_table");
+ m_save_sum_fields|= param->precomputed_group_by;
+ DBUG_ENTER("Create_tmp_table::start");
DBUG_PRINT("enter",
("table_alias: '%s' distinct: %d save_sum_fields: %d "
"rows_limit: %lu group: %d", table_alias->str,
- (int) distinct, (int) save_sum_fields,
- (ulong) rows_limit, MY_TEST(group)));
+ (int) m_distinct, (int) m_save_sum_fields,
+ (ulong) m_rows_limit, MY_TEST(m_group)));
if (use_temp_pool && !(test_flags & TEST_KEEP_TMP_TABLES))
- temp_pool_slot = bitmap_lock_set_next(&temp_pool);
+ m_temp_pool_slot = bitmap_lock_set_next(&temp_pool);
- if (temp_pool_slot != MY_BIT_NONE) // we got a slot
- sprintf(path, "%s_%lx_%i", tmp_file_prefix,
- current_pid, temp_pool_slot);
+ if (m_temp_pool_slot != MY_BIT_NONE) // we got a slot
+ sprintf(path, "%s-%lx-%i", tmp_file_prefix,
+ current_pid, m_temp_pool_slot);
else
{
/* if we run out of slots or we are not using tempool */
- sprintf(path, "%s%lx_%lx_%x", tmp_file_prefix,current_pid,
+ sprintf(path, "%s-%lx-%lx-%x", tmp_file_prefix,current_pid,
(ulong) thd->thread_id, thd->tmp_table++);
}
@@ -18154,12 +18218,12 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
fn_format(path, path, mysql_tmpdir, "",
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
- if (group)
+ if (m_group)
{
- ORDER **prev= &group;
+ ORDER **prev= &m_group;
if (!param->quick_group)
- group=0; // Can't use group key
- else for (ORDER *tmp=group ; tmp ; tmp=tmp->next)
+ m_group= 0; // Can't use group key
+ else for (ORDER *tmp= m_group ; tmp ; tmp= tmp->next)
{
/* Exclude found constant from the list */
if ((*tmp->item)->const_item())
@@ -18178,16 +18242,17 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
*/
(*tmp->item)->marker=4; // Store null in key
if ((*tmp->item)->too_big_for_varchar())
- using_unique_constraint= true;
+ m_using_unique_constraint= true;
}
if (param->group_length >= MAX_BLOB_WIDTH)
- using_unique_constraint= true;
- if (group)
- distinct=0; // Can't use distinct
+ m_using_unique_constraint= true;
+ if (m_group)
+ m_distinct= 0; // Can't use distinct
}
- field_count=param->field_count+param->func_count+param->sum_func_count;
- hidden_field_count=param->hidden_field_count;
+ m_alloced_field_count= param->field_count+param->func_count+param->sum_func_count;
+ DBUG_ASSERT(m_alloced_field_count);
+ const uint field_count= m_alloced_field_count;
/*
When loose index scan is employed as access method, it already
@@ -18199,48 +18264,44 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
if (param->precomputed_group_by)
copy_func_count+= param->sum_func_count;
- init_sql_alloc(&own_root, "tmp_table", TABLE_ALLOC_BLOCK_SIZE, 0,
+ init_sql_alloc(key_memory_TABLE, &own_root, TABLE_ALLOC_BLOCK_SIZE, 0,
MYF(MY_THREAD_SPECIFIC));
if (!multi_alloc_root(&own_root,
&table, sizeof(*table),
&share, sizeof(*share),
&reg_field, sizeof(Field*) * (field_count+1),
- &default_field, sizeof(Field*) * (field_count),
+ &m_default_field, sizeof(Field*) * (field_count),
&blob_field, sizeof(uint)*(field_count+1),
- &from_field, sizeof(Field*)*field_count,
- &copy_func, sizeof(*copy_func)*(copy_func_count+1),
+ &m_from_field, sizeof(Field*)*field_count,
+ &param->items_to_copy,
+ sizeof(param->items_to_copy[0])*(copy_func_count+1),
&param->keyinfo, sizeof(*param->keyinfo),
- &key_part_info,
- sizeof(*key_part_info)*(param->group_parts+1),
+ &m_key_part_info,
+ sizeof(*m_key_part_info)*(param->group_parts+1),
&param->start_recinfo,
sizeof(*param->recinfo)*(field_count*2+4),
&tmpname, (uint) strlen(path)+1,
- &group_buff, (group && ! using_unique_constraint ?
+ &m_group_buff, (m_group && ! m_using_unique_constraint ?
param->group_length : 0),
- &bitmaps, bitmap_buffer_size(field_count)*6,
+ &m_bitmaps, bitmap_buffer_size(field_count)*6,
NullS))
{
- if (temp_pool_slot != MY_BIT_NONE)
- bitmap_lock_clear_bit(&temp_pool, temp_pool_slot);
DBUG_RETURN(NULL); /* purecov: inspected */
}
/* Copy_field belongs to TMP_TABLE_PARAM, allocate it in THD mem_root */
- if (!(param->copy_field= copy= new (thd->mem_root) Copy_field[field_count]))
+ if (!(param->copy_field= new (thd->mem_root) Copy_field[field_count]))
{
- if (temp_pool_slot != MY_BIT_NONE)
- bitmap_lock_clear_bit(&temp_pool, temp_pool_slot);
free_root(&own_root, MYF(0)); /* purecov: inspected */
DBUG_RETURN(NULL); /* purecov: inspected */
}
- param->items_to_copy= copy_func;
strmov(tmpname, path);
/* make table according to fields */
bzero((char*) table,sizeof(*table));
- bzero((char*) reg_field,sizeof(Field*)*(field_count+1));
- bzero((char*) default_field, sizeof(Field*) * (field_count));
- bzero((char*) from_field,sizeof(Field*)*field_count);
+ bzero((char*) reg_field, sizeof(Field*) * (field_count+1));
+ bzero((char*) m_default_field, sizeof(Field*) * (field_count));
+ bzero((char*) m_from_field, sizeof(Field*) * field_count);
table->mem_root= own_root;
mem_root_save= thd->mem_root;
@@ -18251,7 +18312,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
table->reginfo.lock_type=TL_WRITE; /* Will be updated */
table->map=1;
- table->temp_pool_slot = temp_pool_slot;
+ table->temp_pool_slot= m_temp_pool_slot;
table->copy_blobs= 1;
table->in_use= thd;
table->no_rows_with_nulls= param->force_not_null_cols;
@@ -18266,17 +18327,60 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
if (param->schema_table)
share->db= INFORMATION_SCHEMA_NAME;
- /* Calculate which type of fields we will store in the temporary table */
-
- reclength= string_total_length= 0;
- blob_count= string_count= null_count= hidden_null_count= group_null_items= 0;
param->using_outer_summary_function= 0;
+ thd->mem_root= mem_root_save;
+ DBUG_RETURN(table);
+}
+
+
+bool Create_tmp_table::add_fields(THD *thd,
+ TABLE *table,
+ TMP_TABLE_PARAM *param,
+ List<Item> &fields)
+{
+ DBUG_ENTER("Create_tmp_table::add_fields");
+ DBUG_ASSERT(table);
+ DBUG_ASSERT(table->field);
+ DBUG_ASSERT(table->s->blob_field);
+ DBUG_ASSERT(table->s->reclength == 0);
+ DBUG_ASSERT(table->s->fields == 0);
+ DBUG_ASSERT(table->s->blob_fields == 0);
+
+ const bool not_all_columns= !(m_select_options & TMP_TABLE_ALL_COLUMNS);
+ bool distinct_record_structure= m_distinct;
+ uint fieldnr= 0;
+ TABLE_SHARE *share= table->s;
+ Item **copy_func= param->items_to_copy;
+
+ MEM_ROOT *mem_root_save= thd->mem_root;
+ thd->mem_root= &table->mem_root;
List_iterator_fast<Item> li(fields);
Item *item;
- Field **tmp_from_field=from_field;
+ Field **tmp_from_field= m_from_field;
+ while (!m_with_cycle && (item= li++))
+ if (item->common_flags & IS_IN_WITH_CYCLE)
+ {
+ m_with_cycle= true;
+ /*
+ Following distinct_record_structure is (m_distinct || m_with_cycle)
+
+ Note: distinct_record_structure can be true even if m_distinct is
+ false, for example for incr_table in recursive CTE
+ (see select_union_recursive::create_result_table)
+ */
+ distinct_record_structure= true;
+ }
+ li.rewind();
+ uint uneven_delta= 0;
while ((item=li++))
{
+ current_counter= (((param->hidden_field_count < (fieldnr + 1)) &&
+ distinct_record_structure &&
+ (!m_with_cycle ||
+ (item->common_flags & IS_IN_WITH_CYCLE)))?
+ distinct :
+ other);
Item::Type type= item->type();
if (type == Item::COPY_STR_ITEM)
{
@@ -18292,98 +18396,85 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
if ((item->real_type() == Item::SUBSELECT_ITEM) ||
(item->used_tables() & ~OUTER_REF_TABLE_BIT))
{
- /*
- Mark that the we have ignored an item that refers to a summary
- function. We need to know this if someone is going to use
- DISTINCT on the result.
- */
- param->using_outer_summary_function=1;
- continue;
+ /*
+ Mark that the we have ignored an item that refers to a summary
+ function. We need to know this if someone is going to use
+ DISTINCT on the result.
+ */
+ param->using_outer_summary_function=1;
+ continue;
}
}
- if (item->const_item() && (int) hidden_field_count <= 0)
+ if (item->const_item() &&
+ param->hidden_field_count < (fieldnr + 1))
continue; // We don't have to store this
}
- if (type == Item::SUM_FUNC_ITEM && !group && !save_sum_fields)
+ if (type == Item::SUM_FUNC_ITEM && !m_group && !m_save_sum_fields)
{ /* Can't calc group yet */
Item_sum *sum_item= (Item_sum *) item;
sum_item->result_field=0;
- for (i=0 ; i < sum_item->get_arg_count() ; i++)
+ for (uint i= 0 ; i < sum_item->get_arg_count() ; i++)
{
- Item *arg= sum_item->get_arg(i);
- if (!arg->const_item())
- {
+ Item *arg= sum_item->get_arg(i);
+ if (!arg->const_item())
+ {
Item *tmp_item;
Field *new_field=
create_tmp_field(table, arg, &copy_func,
- tmp_from_field, &default_field[fieldnr],
- group != 0,not_all_columns,
- distinct, false);
- if (!new_field)
- goto err; // Should be OOM
- DBUG_ASSERT(!new_field->field_name.str || strlen(new_field->field_name.str) == new_field->field_name.length);
- tmp_from_field++;
- reclength+=new_field->pack_length();
- if (new_field->flags & BLOB_FLAG)
- {
- *blob_field++= fieldnr;
- blob_count++;
- }
- if (new_field->type() == MYSQL_TYPE_BIT)
- total_uneven_bit_length+= new_field->field_length & 7;
- *(reg_field++)= new_field;
- if (new_field->real_type() == MYSQL_TYPE_STRING ||
- new_field->real_type() == MYSQL_TYPE_VARCHAR)
- {
- string_count++;
- string_total_length+= new_field->pack_length();
- }
+ tmp_from_field, &m_default_field[fieldnr],
+ m_group != 0, not_all_columns,
+ distinct_record_structure , false);
+ if (!new_field)
+ goto err; // Should be OOM
+ tmp_from_field++;
+
thd->mem_root= mem_root_save;
if (!(tmp_item= new (thd->mem_root)
Item_temptable_field(thd, new_field)))
goto err;
arg= sum_item->set_arg(i, thd, tmp_item);
thd->mem_root= &table->mem_root;
- if (param->force_not_null_cols)
- {
- new_field->flags|= NOT_NULL_FLAG;
- new_field->null_ptr= NULL;
- }
- if (!(new_field->flags & NOT_NULL_FLAG))
+
+ uneven_delta= m_uneven_bit_length;
+ add_field(table, new_field, fieldnr++, param->force_not_null_cols);
+ uneven_delta= m_uneven_bit_length - uneven_delta;
+ m_field_count[current_counter]++;
+
+ if (!(new_field->flags & NOT_NULL_FLAG))
{
- null_count++;
/*
new_field->maybe_null() is still false, it will be
changed below. But we have to setup Item_field correctly
*/
arg->maybe_null=1;
}
- new_field->field_index= fieldnr++;
- }
+ if (current_counter == distinct)
+ new_field->flags|= FIELD_PART_OF_TMP_UNIQUE;
+ }
}
}
else
{
/*
- The last parameter to create_tmp_field_ex() is a bit tricky:
+ The last parameter to create_tmp_field_ex() is a bit tricky:
- We need to set it to 0 in union, to get fill_record() to modify the
- temporary table.
- We need to set it to 1 on multi-table-update and in select to
- write rows to the temporary table.
- We here distinguish between UNION and multi-table-updates by the fact
- that in the later case group is set to the row pointer.
+ We need to set it to 0 in union, to get fill_record() to modify the
+ temporary table.
+ We need to set it to 1 on multi-table-update and in select to
+ write rows to the temporary table.
+ We here distinguish between UNION and multi-table-updates by the fact
+ that in the later case group is set to the row pointer.
The test for item->marker == 4 is ensure we don't create a group-by
key over a bit field as heap tables can't handle that.
*/
- Field *new_field= (param->schema_table) ?
- item->create_field_for_schema(thd, table) :
+ DBUG_ASSERT(!param->schema_table);
+ Field *new_field=
create_tmp_field(table, item, &copy_func,
- tmp_from_field, &default_field[fieldnr],
- group != 0,
- !force_copy_fields &&
- (not_all_columns || group !=0),
+ tmp_from_field, &m_default_field[fieldnr],
+ m_group != 0,
+ !param->force_copy_fields &&
+ (not_all_columns || m_group !=0),
/*
If item->marker == 4 then we force create_tmp_field
to create a 64-bit longs for BIT fields because HEAP
@@ -18392,14 +18483,13 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
to be usable in this case too.
*/
item->marker == 4 || param->bit_fields_as_long,
- force_copy_fields);
+ param->force_copy_fields);
if (!new_field)
{
- if (unlikely(thd->is_fatal_error))
- goto err; // Got OOM
- continue; // Some kind of const item
+ if (unlikely(thd->is_fatal_error))
+ goto err; // Got OOM
+ continue; // Some kind of const item
}
- DBUG_ASSERT(!new_field->field_name.str || strlen(new_field->field_name.str) == new_field->field_name.length);
if (type == Item::SUM_FUNC_ITEM)
{
Item_sum *agg_item= (Item_sum *) item;
@@ -18426,82 +18516,83 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
agg_item->result_field= new_field;
}
tmp_from_field++;
- if (param->force_not_null_cols)
- {
- new_field->flags|= NOT_NULL_FLAG;
- new_field->null_ptr= NULL;
- }
- reclength+=new_field->pack_length();
- if (!(new_field->flags & NOT_NULL_FLAG))
- null_count++;
- if (new_field->type() == MYSQL_TYPE_BIT)
- total_uneven_bit_length+= new_field->field_length & 7;
- if (new_field->flags & BLOB_FLAG)
- {
- *blob_field++= fieldnr;
- blob_count++;
- }
- if (new_field->real_type() == MYSQL_TYPE_STRING ||
- new_field->real_type() == MYSQL_TYPE_VARCHAR)
- {
- string_count++;
- string_total_length+= new_field->pack_length();
- }
+ uneven_delta= m_uneven_bit_length;
+ add_field(table, new_field, fieldnr++, param->force_not_null_cols);
+ uneven_delta= m_uneven_bit_length - uneven_delta;
+ m_field_count[current_counter]++;
if (item->marker == 4 && item->maybe_null)
{
- group_null_items++;
- new_field->flags|= GROUP_FLAG;
+ m_group_null_items++;
+ new_field->flags|= GROUP_FLAG;
}
- new_field->field_index= fieldnr++;
- *(reg_field++)= new_field;
- }
- if (!--hidden_field_count)
- {
- /*
- This was the last hidden field; Remember how many hidden fields could
- have null
- */
- hidden_null_count=null_count;
- /*
- We need to update hidden_field_count as we may have stored group
- functions with constant arguments
- */
- param->hidden_field_count= fieldnr;
- null_count= 0;
- /*
- On last hidden field we store uneven bit length in
- hidden_uneven_bit_length and proceed calculation of
- uneven bits for visible fields into
- total_uneven_bit_length variable.
- */
- hidden_uneven_bit_length= total_uneven_bit_length;
- total_uneven_bit_length= 0;
+ if (current_counter == distinct)
+ new_field->flags|= FIELD_PART_OF_TMP_UNIQUE;
}
+ m_uneven_bit[current_counter]+= uneven_delta;
}
- DBUG_ASSERT(fieldnr == (uint) (reg_field - table->field));
- DBUG_ASSERT(field_count >= (uint) (reg_field - table->field));
- field_count= fieldnr;
- *reg_field= 0;
- *blob_field= 0; // End marker
- share->fields= field_count;
+ DBUG_ASSERT(fieldnr == m_field_count[other] + m_field_count[distinct]);
+ DBUG_ASSERT(m_blob_count == m_blobs_count[other] + m_blobs_count[distinct]);
+ share->fields= fieldnr;
+ share->blob_fields= m_blob_count;
+ table->field[fieldnr]= 0; // End marker
+ share->blob_field[m_blob_count]= 0; // End marker
+ copy_func[0]= 0; // End marker
+ param->func_count= (uint) (copy_func - param->items_to_copy);
share->column_bitmap_size= bitmap_buffer_size(share->fields);
+ thd->mem_root= mem_root_save;
+ DBUG_RETURN(false);
+
+err:
+ thd->mem_root= mem_root_save;
+ DBUG_RETURN(true);
+}
+
+
+bool Create_tmp_table::finalize(THD *thd,
+ TABLE *table,
+ TMP_TABLE_PARAM *param,
+ bool do_not_open, bool keep_row_order)
+{
+ DBUG_ENTER("Create_tmp_table::finalize");
+ DBUG_ASSERT(table);
+
+ uint null_pack_length[2];
+ uint null_pack_base[2];
+ uint null_counter[2]= {0, 0};
+
+ uint whole_null_pack_length;
+
+ bool use_packed_rows= false;
+ uchar *pos;
+ uchar *null_flags;
+ KEY *keyinfo;
+ TMP_ENGINE_COLUMNDEF *recinfo;
+ TABLE_SHARE *share= table->s;
+ Copy_field *copy= param->copy_field;
+
+ MEM_ROOT *mem_root_save= thd->mem_root;
+ thd->mem_root= &table->mem_root;
+
+ DBUG_ASSERT(m_alloced_field_count >= share->fields);
+ DBUG_ASSERT(m_alloced_field_count >= share->blob_fields);
+
/* If result table is small; use a heap */
/* future: storage engine selection can be made dynamic? */
- if (blob_count || using_unique_constraint
- || (thd->variables.big_tables && !(select_options & SELECT_SMALL_RESULT))
- || (select_options & TMP_TABLE_FORCE_MYISAM)
+ if (share->blob_fields || m_using_unique_constraint
+ || (thd->variables.big_tables && !(m_select_options & SELECT_SMALL_RESULT))
+ || (m_select_options & TMP_TABLE_FORCE_MYISAM)
|| thd->variables.tmp_memory_table_size == 0)
{
share->db_plugin= ha_lock_engine(0, TMP_ENGINE_HTON);
table->file= get_new_handler(share, &table->mem_root,
share->db_type());
- if (group &&
+ if (m_group &&
(param->group_parts > table->file->max_key_parts() ||
param->group_length > table->file->max_key_length()))
- using_unique_constraint= true;
+ m_using_unique_constraint= true;
}
else
{
@@ -18518,35 +18609,38 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
goto err;
}
- if (!using_unique_constraint)
- reclength+= group_null_items; // null flag is stored separately
+ if (!m_using_unique_constraint)
+ share->reclength+= m_group_null_items; // null flag is stored separately
- share->blob_fields= blob_count;
- if (blob_count == 0)
+ if (share->blob_fields == 0)
{
/* We need to ensure that first byte is not 0 for the delete link */
- if (param->hidden_field_count)
- hidden_null_count++;
+ if (m_field_count[other])
+ m_null_count[other]++;
else
- null_count++;
- }
- hidden_null_pack_length= (hidden_null_count + 7 +
- hidden_uneven_bit_length) / 8;
- null_pack_length= (hidden_null_pack_length +
- (null_count + total_uneven_bit_length + 7) / 8);
- reclength+=null_pack_length;
- if (!reclength)
- reclength=1; // Dummy select
+ m_null_count[distinct]++;
+ }
+
+ null_pack_length[other]= (m_null_count[other] + 7 +
+ m_uneven_bit[other]) / 8;
+ null_pack_base[other]= 0;
+ null_pack_length[distinct]= (m_null_count[distinct] + 7 +
+ m_uneven_bit[distinct]) / 8;
+ null_pack_base[distinct]= null_pack_length[other];
+ whole_null_pack_length= null_pack_length[other] +
+ null_pack_length[distinct];
+ share->reclength+= whole_null_pack_length;
+ if (!share->reclength)
+ share->reclength= 1; // Dummy select
/* Use packed rows if there is blobs or a lot of space to gain */
- if (blob_count ||
- (string_total_length >= STRING_TOTAL_LENGTH_TO_PACK_ROWS &&
- (reclength / string_total_length <= RATIO_TO_PACK_ROWS ||
- string_total_length / string_count >= AVG_STRING_LENGTH_TO_PACK_ROWS)))
+ if (share->blob_fields ||
+ (string_total_length() >= STRING_TOTAL_LENGTH_TO_PACK_ROWS &&
+ (share->reclength / string_total_length() <= RATIO_TO_PACK_ROWS ||
+ string_total_length() / string_count() >= AVG_STRING_LENGTH_TO_PACK_ROWS)))
use_packed_rows= 1;
- share->reclength= reclength;
{
- uint alloc_length=ALIGN_SIZE(reclength+MI_UNIQUE_HASH_LENGTH+1);
+ uint alloc_length= ALIGN_SIZE(share->reclength + MI_UNIQUE_HASH_LENGTH+1);
share->rec_buff_length= alloc_length;
if (!(table->record[0]= (uchar*)
alloc_root(&table->mem_root, alloc_length*3)))
@@ -18554,50 +18648,58 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
table->record[1]= table->record[0]+alloc_length;
share->default_values= table->record[1]+alloc_length;
}
- copy_func[0]=0; // End marker
- param->func_count= (uint)(copy_func - param->items_to_copy);
- setup_tmp_table_column_bitmaps(table, bitmaps);
+ setup_tmp_table_column_bitmaps(table, m_bitmaps);
recinfo=param->start_recinfo;
null_flags=(uchar*) table->record[0];
- pos=table->record[0]+ null_pack_length;
- if (null_pack_length)
+ pos=table->record[0]+ whole_null_pack_length;
+ if (whole_null_pack_length)
{
bzero((uchar*) recinfo,sizeof(*recinfo));
recinfo->type=FIELD_NORMAL;
- recinfo->length=null_pack_length;
+ recinfo->length= whole_null_pack_length;
recinfo++;
- bfill(null_flags,null_pack_length,255); // Set null fields
+ bfill(null_flags, whole_null_pack_length, 255); // Set null fields
table->null_flags= (uchar*) table->record[0];
- share->null_fields= null_count+ hidden_null_count;
- share->null_bytes= share->null_bytes_for_compare= null_pack_length;
+ share->null_fields= m_null_count[other] + m_null_count[distinct];
+ share->null_bytes= share->null_bytes_for_compare= whole_null_pack_length;
+ }
+
+ if (share->blob_fields == 0)
+ {
+ null_counter[(m_field_count[other] ? other : distinct)]++;
}
- null_count= (blob_count == 0) ? 1 : 0;
- hidden_field_count=param->hidden_field_count;
- for (i=0,reg_field=table->field; i < field_count; i++,reg_field++,recinfo++)
+ for (uint i= 0; i < share->fields; i++, recinfo++)
{
- Field *field= *reg_field;
+ Field *field= table->field[i];
uint length;
bzero((uchar*) recinfo,sizeof(*recinfo));
+ current_counter= ((field->flags & FIELD_PART_OF_TMP_UNIQUE) ?
+ distinct :
+ other);
+
if (!(field->flags & NOT_NULL_FLAG))
{
- recinfo->null_bit= (uint8)1 << (null_count & 7);
- recinfo->null_pos= null_count/8;
- field->move_field(pos,null_flags+null_count/8,
- (uint8)1 << (null_count & 7));
- null_count++;
+
+ recinfo->null_bit= (uint8)1 << (null_counter[current_counter] & 7);
+ recinfo->null_pos= (null_pack_base[current_counter] +
+ null_counter[current_counter]/8);
+ field->move_field(pos, null_flags + recinfo->null_pos, recinfo->null_bit);
+ null_counter[current_counter]++;
}
else
field->move_field(pos,(uchar*) 0,0);
if (field->type() == MYSQL_TYPE_BIT)
{
/* We have to reserve place for extra bits among null bits */
- ((Field_bit*) field)->set_bit_ptr(null_flags + null_count / 8,
- null_count & 7);
- null_count+= (field->field_length & 7);
+ ((Field_bit*) field)->set_bit_ptr(null_flags +
+ null_pack_base[current_counter] +
+ null_counter[current_counter]/8,
+ null_counter[current_counter] & 7);
+ null_counter[current_counter]+= (field->field_length & 7);
}
field->reset();
@@ -18605,14 +18707,14 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
Test if there is a default field value. The test for ->ptr is to skip
'offset' fields generated by initialize_tables
*/
- if (default_field[i] && default_field[i]->ptr)
+ if (m_default_field[i] && m_default_field[i]->ptr)
{
/*
default_field[i] is set only in the cases when 'field' can
inherit the default value that is defined for the field referred
by the Item_field object from which 'field' has been created.
*/
- const Field *orig_field= default_field[i];
+ const Field *orig_field= m_default_field[i];
/* Get the value from default_values */
if (orig_field->is_null_in_record(orig_field->table->s->default_values))
field->set_null();
@@ -18625,9 +18727,9 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
}
}
- if (from_field[i])
+ if (m_from_field[i])
{ /* Not a table Item */
- copy->set(field,from_field[i],save_sum_fields);
+ copy->set(field, m_from_field[i], m_save_sum_fields);
copy++;
}
length=field->pack_length();
@@ -18635,25 +18737,13 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
/* Make entry for create table */
recinfo->length=length;
- if (field->flags & BLOB_FLAG)
- recinfo->type= FIELD_BLOB;
- else if (use_packed_rows &&
- field->real_type() == MYSQL_TYPE_STRING &&
- length >= MIN_STRING_LENGTH_TO_PACK_ROWS)
- recinfo->type= FIELD_SKIP_ENDSPACE;
- else if (field->real_type() == MYSQL_TYPE_VARCHAR)
- recinfo->type= FIELD_VARCHAR;
- else
- recinfo->type= FIELD_NORMAL;
-
- if (!--hidden_field_count)
- null_count=(null_count+7) & ~7; // move to next byte
+ recinfo->type= field->tmp_engine_column_type(use_packed_rows);
// fix table name in field entry
field->set_table_name(&table->alias);
}
- param->copy_field_end=copy;
+ param->copy_field_end= copy;
param->recinfo= recinfo; // Pointer to after last field
store_record(table,s->default_values); // Make empty default record
@@ -18663,29 +18753,29 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
share->max_rows= (ha_rows) (((share->db_type() == heap_hton) ?
MY_MIN(thd->variables.tmp_memory_table_size,
thd->variables.max_heap_table_size) :
- thd->variables.tmp_memory_table_size) /
+ thd->variables.tmp_disk_table_size) /
share->reclength);
set_if_bigger(share->max_rows,1); // For dummy start options
/*
Push the LIMIT clause to the temporary table creation, so that we
materialize only up to 'rows_limit' records instead of all result records.
*/
- set_if_smaller(share->max_rows, rows_limit);
- param->end_write_records= rows_limit;
+ set_if_smaller(share->max_rows, m_rows_limit);
+ param->end_write_records= m_rows_limit;
keyinfo= param->keyinfo;
- if (group)
+ if (m_group)
{
DBUG_PRINT("info",("Creating group key in temporary table"));
- table->group=group; /* Table is grouped by key */
- param->group_buff=group_buff;
+ table->group= m_group; /* Table is grouped by key */
+ param->group_buff= m_group_buff;
share->keys=1;
- share->uniques= MY_TEST(using_unique_constraint);
+ share->uniques= MY_TEST(m_using_unique_constraint);
table->key_info= table->s->key_info= keyinfo;
table->keys_in_use_for_query.set_bit(0);
share->keys_in_use.set_bit(0);
- keyinfo->key_part=key_part_info;
+ keyinfo->key_part= m_key_part_info;
keyinfo->flags=HA_NOSAME | HA_BINARY_PACK_KEY | HA_PACK_KEY;
keyinfo->ext_key_flags= keyinfo->flags;
keyinfo->usable_key_parts=keyinfo->user_defined_key_parts= param->group_parts;
@@ -18697,29 +18787,29 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
keyinfo->algorithm= HA_KEY_ALG_UNDEF;
keyinfo->is_statistics_from_stat_tables= FALSE;
keyinfo->name= group_key;
- ORDER *cur_group= group;
- for (; cur_group ; cur_group= cur_group->next, key_part_info++)
+ ORDER *cur_group= m_group;
+ for (; cur_group ; cur_group= cur_group->next, m_key_part_info++)
{
Field *field=(*cur_group->item)->get_tmp_table_field();
DBUG_ASSERT(field->table == table);
bool maybe_null=(*cur_group->item)->maybe_null;
- key_part_info->null_bit=0;
- key_part_info->field= field;
- key_part_info->fieldnr= field->field_index + 1;
- if (cur_group == group)
+ m_key_part_info->null_bit=0;
+ m_key_part_info->field= field;
+ m_key_part_info->fieldnr= field->field_index + 1;
+ if (cur_group == m_group)
field->key_start.set_bit(0);
- key_part_info->offset= field->offset(table->record[0]);
- key_part_info->length= (uint16) field->key_length();
- key_part_info->type= (uint8) field->key_type();
- key_part_info->key_type =
- ((ha_base_keytype) key_part_info->type == HA_KEYTYPE_TEXT ||
- (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT1 ||
- (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT2) ?
+ m_key_part_info->offset= field->offset(table->record[0]);
+ m_key_part_info->length= (uint16) field->key_length();
+ m_key_part_info->type= (uint8) field->key_type();
+ m_key_part_info->key_type =
+ ((ha_base_keytype) m_key_part_info->type == HA_KEYTYPE_TEXT ||
+ (ha_base_keytype) m_key_part_info->type == HA_KEYTYPE_VARTEXT1 ||
+ (ha_base_keytype) m_key_part_info->type == HA_KEYTYPE_VARTEXT2) ?
0 : FIELDFLAG_BINARY;
- key_part_info->key_part_flag= 0;
- if (!using_unique_constraint)
+ m_key_part_info->key_part_flag= 0;
+ if (!m_using_unique_constraint)
{
- cur_group->buff=(char*) group_buff;
+ cur_group->buff=(char*) m_group_buff;
if (maybe_null && !field->null_bit)
{
@@ -18734,9 +18824,9 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
}
if (!(cur_group->field= field->new_key_field(thd->mem_root,table,
- group_buff +
+ m_group_buff +
MY_TEST(maybe_null),
- key_part_info->length,
+ m_key_part_info->length,
field->null_ptr,
field->null_bit)))
goto err; /* purecov: inspected */
@@ -18750,26 +18840,29 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
The NULL flag is updated in 'end_update()' and 'end_write()'
*/
keyinfo->flags|= HA_NULL_ARE_EQUAL; // def. that NULL == NULL
- key_part_info->null_bit=field->null_bit;
- key_part_info->null_offset= (uint) (field->null_ptr -
+ m_key_part_info->null_bit=field->null_bit;
+ m_key_part_info->null_offset= (uint) (field->null_ptr -
(uchar*) table->record[0]);
cur_group->buff++; // Pointer to field data
- group_buff++; // Skipp null flag
+ m_group_buff++; // Skipp null flag
}
- group_buff+= cur_group->field->pack_length();
+ m_group_buff+= cur_group->field->pack_length();
}
- keyinfo->key_length+= key_part_info->length;
+ keyinfo->key_length+= m_key_part_info->length;
}
/*
Ensure we didn't overrun the group buffer. The < is only true when
some maybe_null fields was changed to be not null fields.
*/
- DBUG_ASSERT(using_unique_constraint ||
- group_buff <= param->group_buff + param->group_length);
+ DBUG_ASSERT(m_using_unique_constraint ||
+ m_group_buff <= param->group_buff + param->group_length);
}
- if (distinct && field_count != param->hidden_field_count)
+ if (m_distinct && (share->fields != param->hidden_field_count ||
+ m_with_cycle))
{
+ uint i;
+ Field **reg_field;
/*
Create an unique key or an unique constraint over all columns
that should be in the result. In the temporary table, there are
@@ -18778,7 +18871,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
*/
DBUG_PRINT("info",("hidden_field_count: %d", param->hidden_field_count));
- if (blob_count)
+ if (m_blobs_count[distinct])
{
/*
Special mode for index creation in MyISAM used to support unique
@@ -18787,23 +18880,21 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
*/
share->uniques= 1;
}
- null_pack_length-=hidden_null_pack_length;
- keyinfo->user_defined_key_parts=
- ((field_count-param->hidden_field_count)+
- (share->uniques ? MY_TEST(null_pack_length) : 0));
+ keyinfo->user_defined_key_parts= m_field_count[distinct] +
+ (share->uniques ? MY_TEST(null_pack_length[distinct]) : 0);
keyinfo->ext_key_parts= keyinfo->user_defined_key_parts;
keyinfo->usable_key_parts= keyinfo->user_defined_key_parts;
table->distinct= 1;
share->keys= 1;
- if (!(key_part_info= (KEY_PART_INFO*)
+ if (!(m_key_part_info= (KEY_PART_INFO*)
alloc_root(&table->mem_root,
keyinfo->user_defined_key_parts * sizeof(KEY_PART_INFO))))
goto err;
- bzero((void*) key_part_info, keyinfo->user_defined_key_parts * sizeof(KEY_PART_INFO));
+ bzero((void*) m_key_part_info, keyinfo->user_defined_key_parts * sizeof(KEY_PART_INFO));
table->keys_in_use_for_query.set_bit(0);
share->keys_in_use.set_bit(0);
table->key_info= table->s->key_info= keyinfo;
- keyinfo->key_part=key_part_info;
+ keyinfo->key_part= m_key_part_info;
keyinfo->flags=HA_NOSAME | HA_NULL_ARE_EQUAL | HA_BINARY_PACK_KEY | HA_PACK_KEY;
keyinfo->ext_key_flags= keyinfo->flags;
keyinfo->key_length= 0; // Will compute the sum of the parts below.
@@ -18833,41 +18924,43 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
blobs can distinguish NULL from 0. This extra field is not needed
when we do not use UNIQUE indexes for blobs.
*/
- if (null_pack_length && share->uniques)
+ if (null_pack_length[distinct] && share->uniques)
{
- key_part_info->null_bit=0;
- key_part_info->offset=hidden_null_pack_length;
- key_part_info->length=null_pack_length;
- key_part_info->field= new Field_string(table->record[0],
- (uint32) key_part_info->length,
+ m_key_part_info->null_bit=0;
+ m_key_part_info->offset= null_pack_base[distinct];
+ m_key_part_info->length= null_pack_length[distinct];
+ m_key_part_info->field= new Field_string(table->record[0],
+ (uint32) m_key_part_info->length,
(uchar*) 0,
(uint) 0,
Field::NONE,
&null_clex_str, &my_charset_bin);
- if (!key_part_info->field)
+ if (!m_key_part_info->field)
goto err;
- key_part_info->field->init(table);
- key_part_info->key_type=FIELDFLAG_BINARY;
- key_part_info->type= HA_KEYTYPE_BINARY;
- key_part_info->fieldnr= key_part_info->field->field_index + 1;
- key_part_info++;
+ m_key_part_info->field->init(table);
+ m_key_part_info->key_type=FIELDFLAG_BINARY;
+ m_key_part_info->type= HA_KEYTYPE_BINARY;
+ m_key_part_info->fieldnr= m_key_part_info->field->field_index + 1;
+ m_key_part_info++;
}
/* Create a distinct key over the columns we are going to return */
- for (i=param->hidden_field_count, reg_field=table->field + i ;
- i < field_count;
- i++, reg_field++, key_part_info++)
+ for (i= param->hidden_field_count, reg_field= table->field + i ;
+ i < share->fields;
+ i++, reg_field++)
{
- key_part_info->field= *reg_field;
+ if (!((*reg_field)->flags & FIELD_PART_OF_TMP_UNIQUE))
+ continue;
+ m_key_part_info->field= *reg_field;
(*reg_field)->flags |= PART_KEY_FLAG;
- if (key_part_info == keyinfo->key_part)
+ if (m_key_part_info == keyinfo->key_part)
(*reg_field)->key_start.set_bit(0);
- key_part_info->null_bit= (*reg_field)->null_bit;
- key_part_info->null_offset= (uint) ((*reg_field)->null_ptr -
+ m_key_part_info->null_bit= (*reg_field)->null_bit;
+ m_key_part_info->null_offset= (uint) ((*reg_field)->null_ptr -
(uchar*) table->record[0]);
- key_part_info->offset= (*reg_field)->offset(table->record[0]);
- key_part_info->length= (uint16) (*reg_field)->pack_length();
- key_part_info->fieldnr= (*reg_field)->field_index + 1;
+ m_key_part_info->offset= (*reg_field)->offset(table->record[0]);
+ m_key_part_info->length= (uint16) (*reg_field)->pack_length();
+ m_key_part_info->fieldnr= (*reg_field)->field_index + 1;
/* TODO:
The below method of computing the key format length of the
key part is a copy/paste from opt_range.cc, and table.cc.
@@ -18876,34 +18969,25 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
methods is supposed to compute the same length. If so, it
might be reused.
*/
- key_part_info->store_length= key_part_info->length;
+ m_key_part_info->store_length= m_key_part_info->length;
if ((*reg_field)->real_maybe_null())
{
- key_part_info->store_length+= HA_KEY_NULL_LENGTH;
- key_part_info->key_part_flag |= HA_NULL_PART;
- }
- if ((*reg_field)->type() == MYSQL_TYPE_BLOB ||
- (*reg_field)->real_type() == MYSQL_TYPE_VARCHAR ||
- (*reg_field)->type() == MYSQL_TYPE_GEOMETRY)
- {
- if ((*reg_field)->type() == MYSQL_TYPE_BLOB ||
- (*reg_field)->type() == MYSQL_TYPE_GEOMETRY)
- key_part_info->key_part_flag|= HA_BLOB_PART;
- else
- key_part_info->key_part_flag|= HA_VAR_LENGTH_PART;
-
- key_part_info->store_length+=HA_KEY_BLOB_LENGTH;
+ m_key_part_info->store_length+= HA_KEY_NULL_LENGTH;
+ m_key_part_info->key_part_flag |= HA_NULL_PART;
}
+ m_key_part_info->key_part_flag|= (*reg_field)->key_part_flag();
+ m_key_part_info->store_length+= (*reg_field)->key_part_length_bytes();
+ keyinfo->key_length+= m_key_part_info->store_length;
- keyinfo->key_length+= key_part_info->store_length;
-
- key_part_info->type= (uint8) (*reg_field)->key_type();
- key_part_info->key_type =
- ((ha_base_keytype) key_part_info->type == HA_KEYTYPE_TEXT ||
- (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT1 ||
- (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT2) ?
+ m_key_part_info->type= (uint8) (*reg_field)->key_type();
+ m_key_part_info->key_type =
+ ((ha_base_keytype) m_key_part_info->type == HA_KEYTYPE_TEXT ||
+ (ha_base_keytype) m_key_part_info->type == HA_KEYTYPE_VARTEXT1 ||
+ (ha_base_keytype) m_key_part_info->type == HA_KEYTYPE_VARTEXT2) ?
0 : FIELDFLAG_BINARY;
+
+ m_key_part_info++;
}
}
@@ -18917,7 +19001,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
if (!do_not_open)
{
if (instantiate_tmp_table(table, param->keyinfo, param->start_recinfo,
- &param->recinfo, select_options))
+ &param->recinfo, m_select_options))
goto err;
}
@@ -18926,14 +19010,119 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
thd->mem_root= mem_root_save;
- DBUG_RETURN(table);
+ DBUG_RETURN(false);
err:
thd->mem_root= mem_root_save;
- free_tmp_table(thd,table); /* purecov: inspected */
- if (temp_pool_slot != MY_BIT_NONE)
- bitmap_lock_clear_bit(&temp_pool, temp_pool_slot);
- DBUG_RETURN(NULL); /* purecov: inspected */
+ DBUG_RETURN(true); /* purecov: inspected */
+}
+
+
+bool Create_tmp_table::add_schema_fields(THD *thd, TABLE *table,
+ TMP_TABLE_PARAM *param,
+ const ST_SCHEMA_TABLE &schema_table,
+ const MY_BITMAP &bitmap)
+{
+ DBUG_ENTER("Create_tmp_table::add_schema_fields");
+ DBUG_ASSERT(table);
+ DBUG_ASSERT(table->field);
+ DBUG_ASSERT(table->s->blob_field);
+ DBUG_ASSERT(table->s->reclength == 0);
+ DBUG_ASSERT(table->s->fields == 0);
+ DBUG_ASSERT(table->s->blob_fields == 0);
+
+ TABLE_SHARE *share= table->s;
+ ST_FIELD_INFO *defs= schema_table.fields_info;
+ uint fieldnr;
+ MEM_ROOT *mem_root_save= thd->mem_root;
+ thd->mem_root= &table->mem_root;
+
+ for (fieldnr= 0; !defs[fieldnr].end_marker(); fieldnr++)
+ {
+ const ST_FIELD_INFO &def= defs[fieldnr];
+ bool visible= bitmap_is_set(&bitmap, fieldnr);
+ Record_addr addr(def.nullable());
+ const Type_handler *h= def.type_handler();
+ Field *field= h->make_schema_field(&table->mem_root, table,
+ addr, def, visible);
+ if (!field)
+ {
+ thd->mem_root= mem_root_save;
+ DBUG_RETURN(true); // EOM
+ }
+ field->init(table);
+ switch (def.def()) {
+ case DEFAULT_NONE:
+ field->flags|= NO_DEFAULT_VALUE_FLAG;
+ break;
+ case DEFAULT_TYPE_IMPLICIT:
+ break;
+ default:
+ DBUG_ASSERT(0);
+ break;
+ }
+ add_field(table, field, fieldnr, param->force_not_null_cols);
+ }
+
+ share->fields= fieldnr;
+ share->blob_fields= m_blob_count;
+ table->field[fieldnr]= 0; // End marker
+ share->blob_field[m_blob_count]= 0; // End marker
+ param->func_count= 0;
+ share->column_bitmap_size= bitmap_buffer_size(share->fields);
+
+ thd->mem_root= mem_root_save;
+ DBUG_RETURN(false);
+}
+
+
+void Create_tmp_table::cleanup_on_failure(THD *thd, TABLE *table)
+{
+ if (table)
+ free_tmp_table(thd, table);
+ if (m_temp_pool_slot != MY_BIT_NONE)
+ bitmap_lock_clear_bit(&temp_pool, m_temp_pool_slot);
+}
+
+
+TABLE *create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
+ ORDER *group, bool distinct, bool save_sum_fields,
+ ulonglong select_options, ha_rows rows_limit,
+ const LEX_CSTRING *table_alias, bool do_not_open,
+ bool keep_row_order)
+{
+ TABLE *table;
+ Create_tmp_table maker(param, group,
+ distinct, save_sum_fields, select_options, rows_limit);
+ if (!(table= maker.start(thd, param, table_alias)) ||
+ maker.add_fields(thd, table, param, fields) ||
+ maker.finalize(thd, table, param, do_not_open, keep_row_order))
+ {
+ maker.cleanup_on_failure(thd, table);
+ return NULL;
+ }
+ return table;
+}
+
+
+TABLE *create_tmp_table_for_schema(THD *thd, TMP_TABLE_PARAM *param,
+ const ST_SCHEMA_TABLE &schema_table,
+ const MY_BITMAP &bitmap,
+ longlong select_options,
+ const LEX_CSTRING &table_alias,
+ bool keep_row_order)
+{
+ TABLE *table;
+ Create_tmp_table maker(param, (ORDER *) NULL, false, false,
+ select_options, HA_POS_ERROR);
+ if (!(table= maker.start(thd, param, &table_alias)) ||
+ maker.add_schema_fields(thd, table, param, schema_table, bitmap) ||
+ maker.finalize(thd, table, param, false, keep_row_order))
+ {
+ maker.cleanup_on_failure(thd, table);
+ return NULL;
+ }
+ return table;
}
@@ -19051,10 +19240,8 @@ bool Virtual_tmp_table::sp_find_field_by_name(uint *idx,
for (uint i= 0; (f= field[i]); i++)
{
// Use the same comparison style with sp_context::find_variable()
- if (!my_strnncoll(system_charset_info,
- (const uchar *) f->field_name.str,
- f->field_name.length,
- (const uchar *) name.str, name.length))
+ if (!system_charset_info->strnncoll(f->field_name.str, f->field_name.length,
+ name.str, name.length))
{
*idx= i;
return false;
@@ -19809,8 +19996,7 @@ do_select(JOIN *join, Procedure *procedure)
if (join->pushdown_query->store_data_in_temp_table)
{
- JOIN_TAB *last_tab= join->join_tab + join->table_count -
- join->exec_join_tab_cnt();
+ JOIN_TAB *last_tab= join->join_tab + join->exec_join_tab_cnt();
last_tab->next_select= end_send;
enum_nested_loop_state state= last_tab->aggr->end_send();
@@ -19845,7 +20031,7 @@ do_select(JOIN *join, Procedure *procedure)
// HAVING will be checked by end_select
error= (*end_select)(join, 0, 0);
if (error >= NESTED_LOOP_OK)
- error= (*end_select)(join, 0, 1);
+ error= (*end_select)(join, 0, 1);
/*
If we don't go through evaluate_join_record(), do the counting
@@ -19861,7 +20047,8 @@ do_select(JOIN *join, Procedure *procedure)
{
List<Item> *columns_list= (procedure ? &join->procedure_fields_list :
join->fields);
- rc= join->result->send_data(*columns_list) > 0;
+ rc= join->result->send_data_with_check(*columns_list,
+ join->unit, 0) > 0;
}
}
/*
@@ -20343,6 +20530,10 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
if (join_tab->loosescan_match_tab)
join_tab->loosescan_match_tab->found_match= FALSE;
+ const bool pfs_batch_update= join_tab->pfs_batch_update(join);
+ if (pfs_batch_update)
+ join_tab->table->file->start_psi_batch_mode();
+
if (rc != NESTED_LOOP_NO_MORE_ROWS)
{
error= (*join_tab->read_first_record)(join_tab);
@@ -20394,6 +20585,9 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
join_tab->last_inner && !join_tab->found)
rc= evaluate_null_complemented_join_record(join, join_tab);
+ if (pfs_batch_update)
+ join_tab->table->file->end_psi_batch_mode();
+
if (rc == NESTED_LOOP_NO_MORE_ROWS)
rc= NESTED_LOOP_OK;
DBUG_RETURN(rc);
@@ -21537,7 +21731,9 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
int error;
/* result < 0 if row was not accepted and should not be counted */
- if (unlikely((error= join->result->send_data(*fields))))
+ if (unlikely((error= join->result->send_data_with_check(*fields,
+ join->unit,
+ join->send_records))))
{
if (error > 0)
DBUG_RETURN(NESTED_LOOP_ERROR);
@@ -21547,7 +21743,7 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
}
++join->send_records;
- if (join->send_records >= join->unit->select_limit_cnt &&
+ if (join->send_records >= join->unit->lim.get_select_limit() &&
!join->do_send_rows)
{
/*
@@ -21565,7 +21761,7 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT);
}
}
- if (join->send_records >= join->unit->select_limit_cnt &&
+ if (join->send_records >= join->unit->lim.get_select_limit() &&
join->do_send_rows)
{
if (join->select_options & OPTION_FOUND_ROWS)
@@ -21685,7 +21881,9 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
if (join->do_send_rows)
{
- error=join->result->send_data(*fields);
+ error= join->result->send_data_with_check(*fields,
+ join->unit,
+ join->send_records);
if (unlikely(error < 0))
{
/* Duplicate row, don't count */
@@ -21706,13 +21904,13 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */
if (end_of_records)
DBUG_RETURN(NESTED_LOOP_OK);
- if (join->send_records >= join->unit->select_limit_cnt &&
+ if (join->send_records >= join->unit->lim.get_select_limit() &&
join->do_send_rows)
{
if (!(join->select_options & OPTION_FOUND_ROWS))
DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT); // Abort nicely
join->do_send_rows=0;
- join->unit->select_limit_cnt = HA_POS_ERROR;
+ join->unit->lim.set_unlimited();
}
else if (join->send_records >= join->fetch_limit)
{
@@ -21797,7 +21995,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (!(join->select_options & OPTION_FOUND_ROWS))
DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT);
join->do_send_rows=0;
- join->unit->select_limit_cnt = HA_POS_ERROR;
+ join->unit->lim.set_unlimited();
}
}
}
@@ -23159,8 +23357,9 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
(tab->join->select_options &
OPTION_FOUND_ROWS) ?
HA_POS_ERROR :
- tab->join->unit->select_limit_cnt,TRUE,
- TRUE, FALSE, FALSE) <= 0;
+ tab->join->unit->
+ lim.get_select_limit(),
+ TRUE, TRUE, FALSE, FALSE) <= 0;
if (res)
{
select->cond= save_cond;
@@ -23261,7 +23460,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
select->test_quick_select(join->thd, tmp_map, 0,
join->select_options & OPTION_FOUND_ROWS ?
HA_POS_ERROR :
- join->unit->select_limit_cnt,
+ join->unit->lim.get_select_limit(),
TRUE, FALSE, FALSE, FALSE);
if (cond_saved)
@@ -23707,7 +23906,7 @@ JOIN_TAB::remove_duplicates()
if (!field_count && !(join->select_options & OPTION_FOUND_ROWS) && !having)
{ // only const items with no OPTION_FOUND_ROWS
- join->unit->select_limit_cnt= 1; // Only send first row
+ join->unit->lim.set_single_row(); // Only send first row
DBUG_RETURN(false);
}
@@ -23844,21 +24043,21 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
Field **ptr;
DBUG_ENTER("remove_dup_with_hash_index");
- if (unlikely(!my_multi_malloc(MYF(MY_WME),
- &key_buffer,
- (uint) ((key_length + extra_length) *
- (long) file->stats.records),
- &field_lengths,
- (uint) (field_count*sizeof(*field_lengths)),
- NullS)))
+ if (!my_multi_malloc(key_memory_hash_index_key_buffer, MYF(MY_WME),
+ &key_buffer,
+ (uint) ((key_length + extra_length) *
+ (long) file->stats.records),
+ &field_lengths,
+ (uint) (field_count*sizeof(*field_lengths)),
+ NullS))
DBUG_RETURN(1);
for (ptr= first_field, field_length=field_lengths ; *ptr ; ptr++)
(*field_length++)= (*ptr)->sort_length();
- if (unlikely(my_hash_init(&hash, &my_charset_bin,
- (uint) file->stats.records, 0,
- key_length, (my_hash_get_key) 0, 0, 0)))
+ if (my_hash_init(key_memory_hash_index_key_buffer, &hash, &my_charset_bin,
+ (uint) file->stats.records, 0, key_length,
+ (my_hash_get_key) 0, 0, 0))
{
my_free(key_buffer);
DBUG_RETURN(1);
@@ -23894,7 +24093,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
field_length=field_lengths;
for (ptr= first_field ; *ptr ; ptr++)
{
- (*ptr)->make_sort_key(key_pos, *field_length);
+ (*ptr)->make_sort_key_part(key_pos, *field_length);
key_pos+= (*ptr)->maybe_null() + *field_length++;
}
/* Check if it exists before */
@@ -25519,7 +25718,7 @@ void free_underlaid_joins(THD *thd, SELECT_LEX *select)
****************************************************************************/
/**
- Replace occurences of group by fields in an expression by ref items.
+ Replace occurrences of group by fields in an expression by ref items.
The function replaces occurrences of group by fields in expr
by ref objects for these fields unless they are under aggregate
@@ -25580,8 +25779,9 @@ static bool change_group_ref(THD *thd, Item_func *expr, ORDER *group_list,
{
Item *new_item;
if (!(new_item= new (thd->mem_root) Item_ref(thd, context,
- group_tmp->item, 0,
- &item->name)))
+ group_tmp->item,
+ null_clex_str,
+ item->name)))
return 1; // fatal_error is set
thd->change_item_tree(arg, new_item);
arg_changed= TRUE;
@@ -25906,8 +26106,9 @@ int JOIN::rollup_send_data(uint idx)
copy_ref_ptr_array(ref_ptrs, rollup.ref_pointer_arrays[i]);
if ((!having || having->val_int()))
{
- if (send_records < unit->select_limit_cnt && do_send_rows &&
- (res= result->send_data(rollup.fields[i])) > 0)
+ if (send_records < unit->lim.get_select_limit() && do_send_rows &&
+ (res= result->send_data_with_check(rollup.fields[i],
+ unit, send_records)) > 0)
return 1;
if (!res)
send_records++;
@@ -26139,8 +26340,10 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
/* Enable the table access time tracker only for "ANALYZE stmt" */
if (thd->lex->analyze_stmt)
+ {
table->file->set_time_tracker(&eta->op_tracker);
-
+ eta->op_tracker.my_gap_tracker = &eta->extra_time_tracker;
+ }
/* No need to save id and select_type here, they are kept in Explain_select */
/* table */
@@ -26320,16 +26523,14 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
table_list->schema_table->i_s_requested_object & OPTIMIZE_I_S_TABLE)
{
IS_table_read_plan *is_table_read_plan= table_list->is_table_read_plan;
- const char *tmp_buff;
- int f_idx;
StringBuffer<64> key_name_buf;
if (is_table_read_plan->trivial_show_command ||
is_table_read_plan->has_db_lookup_value())
{
/* The "key" has the name of the column referring to the database */
- f_idx= table_list->schema_table->idx_field1;
- tmp_buff= table_list->schema_table->fields_info[f_idx].field_name;
- key_name_buf.append(tmp_buff, strlen(tmp_buff), cs);
+ int f_idx= table_list->schema_table->idx_field1;
+ LEX_CSTRING tmp= table_list->schema_table->fields_info[f_idx].name();
+ key_name_buf.append(tmp, cs);
}
if (is_table_read_plan->trivial_show_command ||
is_table_read_plan->has_table_lookup_value())
@@ -26338,9 +26539,9 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
is_table_read_plan->has_db_lookup_value())
key_name_buf.append(',');
- f_idx= table_list->schema_table->idx_field2;
- tmp_buff= table_list->schema_table->fields_info[f_idx].field_name;
- key_name_buf.append(tmp_buff, strlen(tmp_buff), cs);
+ int f_idx= table_list->schema_table->idx_field2;
+ LEX_CSTRING tmp= table_list->schema_table->fields_info[f_idx].name();
+ key_name_buf.append(tmp, cs);
}
if (key_name_buf.length())
@@ -26813,21 +27014,8 @@ int JOIN::save_explain_data_intern(Explain_query *output,
tmp_unit;
tmp_unit= tmp_unit->next_unit())
{
- /*
- Display subqueries only if
- (1) they are not parts of ON clauses that were eliminated by table
- elimination.
- (2) they are not merged derived tables
- (3) they are not hanging CTEs (they are needed for execution)
- */
- if (!(tmp_unit->item && tmp_unit->item->eliminated) && // (1)
- (!tmp_unit->derived ||
- tmp_unit->derived->is_materialized_derived()) && // (2)
- !(tmp_unit->with_element &&
- (!tmp_unit->derived || !tmp_unit->derived->derived_result))) // (3)
- {
+ if (tmp_unit->explainable())
explain->add_child(tmp_unit->first_select()->select_number);
- }
}
if (select_lex->is_top_level_node())
@@ -26881,16 +27069,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
DBUG_ASSERT(ref == unit->item);
}
- /*
- Save plans for child subqueries, when
- (1) they are not parts of eliminated WHERE/ON clauses.
- (2) they are not VIEWs that were "merged for INSERT".
- (3) they are not hanging CTEs (they are needed for execution)
- */
- if (!(unit->item && unit->item->eliminated) && // (1)
- !(unit->derived && unit->derived->merged_for_insert) && // (2)
- !(unit->with_element &&
- (!unit->derived || !unit->derived->derived_result))) // (3)
+ if (unit->explainable())
{
if (mysql_explain_union(thd, unit, result))
DBUG_VOID_RETURN;
@@ -26932,15 +27111,11 @@ bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result)
{
thd->lex->current_select= first;
unit->set_limit(unit->global_parameters());
- res= mysql_select(thd,
- first->table_list.first,
- first->with_wild, first->item_list,
+ res= mysql_select(thd, first->table_list.first, first->item_list,
first->where,
first->order_list.elements + first->group_list.elements,
- first->order_list.first,
- first->group_list.first,
- first->having,
- thd->lex->proc_list.first,
+ first->order_list.first, first->group_list.first,
+ first->having, thd->lex->proc_list.first,
first->options | thd->variables.option_bits | SELECT_DESCRIBE,
result, unit, first);
}
@@ -27147,7 +27322,7 @@ Index_hint::print(THD *thd, String *str)
str->append (STRING_WITH_LEN(" ("));
if (key_name.length)
{
- if (thd && !my_strnncoll(system_charset_info,
+ if (thd && !system_charset_info->strnncoll(
(const uchar *)key_name.str, key_name.length,
(const uchar *)primary_key_name,
strlen(primary_key_name)))
@@ -27397,7 +27572,7 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
else
str->append(',');
- if (is_subquery_function() && item->is_autogenerated_name)
+ if (is_subquery_function() && item->is_autogenerated_name())
{
/*
Do not print auto-generated aliases in subqueries. It has no purpose
@@ -27693,8 +27868,8 @@ JOIN::reoptimize(Item *added_where, table_map join_tables,
reset_query_plan();
if (!keyuse.buffer &&
- my_init_dynamic_array(&keyuse, sizeof(KEYUSE), 20, 64,
- MYF(MY_THREAD_SPECIFIC)))
+ my_init_dynamic_array(thd->mem_root->m_psi_key, &keyuse, sizeof(KEYUSE),
+ 20, 64, MYF(MY_THREAD_SPECIFIC)))
{
delete_dynamic(&added_keyuse);
return REOPT_ERROR;
@@ -28786,5 +28961,278 @@ select_handler *SELECT_LEX::find_select_handler(THD *thd)
/**
+ @brief
+ Construct not null conditions for provingly not nullable fields
+
+ @details
+ For each non-constant joined table the function creates a conjunction
+ of IS NOT NULL predicates containing a predicate for each field used
+ in the WHERE clause or an OR expression such that
+ - is declared as nullable
+ - for which it can proved be that it is null-rejected
+ - is a part of some index.
+ This conjunction could be anded with either the WHERE condition or with
+ an ON expression and the modified join query would produce the same
+ result set as the original one.
+ If a conjunction of IS NOT NULL predicates is constructed for an inner
+ table of an outer join OJ that is not an inner table of embedded outer
+ joins then it is to be anded with the ON expression of OJ.
+ The constructed conjunctions of IS NOT NULL predicates are attached
+ to the corresponding tables. They used for range analysis complementary
+ to other sargable range conditions.
+
+ @note
+ Let f be a field of the joined table t. In the context of the upper
+ paragraph field f is called null-rejected if any the following holds:
+
+ - t is a table of a top inner join and a conjunctive formula that rejects
+ rows with null values for f can be extracted from the WHERE condition
+
+ - t is an outer table of a top outer join operation and a conjunctive
+ formula over the outer tables of the outer join that rejects rows with
+ null values for can be extracted from the WHERE condition
+
+ - t is an outer table of a non-top outer join operation and a conjunctive
+ formula over the outer tables of the outer join that rejects rows with
+ null values for f can be extracted from the ON expression of the
+ embedding outer join
+
+ - the joined table is an inner table of a outer join operation and
+ a conjunctive formula over inner tables of the outer join that rejects
+ rows with null values for f can be extracted from the ON expression of
+ the outer join operation.
+
+ It is assumed above that all inner join nests have been eliminated and
+ that all possible conversions of outer joins into inner joins have been
+ already done.
+*/
+
+void JOIN::make_notnull_conds_for_range_scans()
+{
+ DBUG_ENTER("JOIN::make_notnull_conds_for_range_scans");
+
+
+ if (impossible_where ||
+ !optimizer_flag(thd, OPTIMIZER_SWITCH_NOT_NULL_RANGE_SCAN))
+ {
+ /* Complementary range analysis is not needed */
+ DBUG_VOID_RETURN;
+ }
+
+ if (conds && build_notnull_conds_for_range_scans(this, conds,
+ conds->used_tables()))
+ {
+ Item *false_cond= new (thd->mem_root) Item_int(thd, (longlong) 0, 1);
+ if (false_cond)
+ {
+ /*
+ Found a IS NULL conjunctive predicate for a null-rejected field
+ in the WHERE clause
+ */
+ conds= false_cond;
+ cond_equal= 0;
+ impossible_where= true;
+ }
+ DBUG_VOID_RETURN;
+ }
+
+ List_iterator<TABLE_LIST> li(*join_list);
+ TABLE_LIST *tbl;
+ while ((tbl= li++))
+ {
+ if (tbl->on_expr)
+ {
+ if (tbl->nested_join)
+ {
+ build_notnull_conds_for_inner_nest_of_outer_join(this, tbl);
+ }
+ else if (build_notnull_conds_for_range_scans(this, tbl->on_expr,
+ tbl->table->map))
+ {
+ /*
+ Found a IS NULL conjunctive predicate for a null-rejected field
+ of the inner table of an outer join with ON expression tbl->on_expr
+ */
+ Item *false_cond= new (thd->mem_root) Item_int(thd, (longlong) 0, 1);
+ if (false_cond)
+ tbl->on_expr= false_cond;
+ }
+ }
+ }
+ DBUG_VOID_RETURN;
+}
+
+
+/**
+ @brief
+ Build not null conditions for range scans of given join tables
+
+ @param join the join for whose tables not null conditions are to be built
+ @param cond the condition from which not null predicates are to be inferred
+ @param allowed the bit map of join tables to be taken into account
+
+ @details
+ For each join table t from the 'allowed' set of tables the function finds
+ all fields whose null-rejectedness can be inferred from null-rejectedness
+ of the condition cond. For each found field f from table t such that it
+ participates at least in one index on table t a NOT NULL predicate is
+ constructed and a conjunction of all such predicates is attached to t.
+ If when looking for null-rejecting fields of t it is discovered one of its
+ fields has to be null-rejected and there is IS NULL conjunctive top level
+ predicate for this field then the function immediately returns true.
+ The function uses the bitmap TABLE::tmp_set to mark found null-rejected
+ fields of table t.
+
+ @note
+ Currently only top level conjuncts without disjunctive sub-formulas are
+ are taken into account when looking for null-rejected fields.
+
+ @retval
+ true if a contradiction is inferred
+ false otherwise
+*/
+
+static
+bool build_notnull_conds_for_range_scans(JOIN *join, Item *cond,
+ table_map allowed)
+{
+ THD *thd= join->thd;
+
+ DBUG_ENTER("build_notnull_conds_for_range_scans");
+
+ for (JOIN_TAB *s= join->join_tab + join->const_tables ;
+ s < join->join_tab + join->table_count ; s++)
+ {
+ /* Clear all needed bitmaps to mark found fields */
+ if (allowed & s->table->map)
+ bitmap_clear_all(&s->table->tmp_set);
+ }
+
+ /*
+ Find all null-rejected fields assuming that cond is null-rejected and
+ only formulas over tables from 'allowed' are to be taken into account
+ */
+ if (cond->find_not_null_fields(allowed))
+ DBUG_RETURN(true);
+
+ /*
+ For each table t from 'allowed' build a conjunction of NOT NULL predicates
+ constructed for all found fields if they are included in some indexes.
+ If the construction of the conjunction succeeds attach the formula to
+ t->table->notnull_cond. The condition will be used to look for complementary
+ range scans.
+ */
+ for (JOIN_TAB *s= join->join_tab + join->const_tables ;
+ s < join->join_tab + join->table_count ; s++)
+ {
+ TABLE *tab= s->table;
+ List<Item> notnull_list;
+ Item *notnull_cond= 0;
+
+ if (!(allowed & tab->map))
+ continue;
+
+ for (Field** field_ptr= tab->field; *field_ptr; field_ptr++)
+ {
+ Field *field= *field_ptr;
+ if (field->part_of_key.is_clear_all())
+ continue;
+ if (!bitmap_is_set(&tab->tmp_set, field->field_index))
+ continue;
+ Item_field *field_item= new (thd->mem_root) Item_field(thd, field);
+ if (!field_item)
+ continue;
+ Item *isnotnull_item=
+ new (thd->mem_root) Item_func_isnotnull(thd, field_item);
+ if (!isnotnull_item)
+ continue;
+ if (notnull_list.push_back(isnotnull_item, thd->mem_root))
+ continue;
+ s->const_keys.merge(field->part_of_key);
+ }
+
+ switch (notnull_list.elements) {
+ case 0:
+ break;
+ case 1:
+ notnull_cond= notnull_list.head();
+ break;
+ default:
+ notnull_cond=
+ new (thd->mem_root) Item_cond_and(thd, notnull_list);
+ }
+ if (notnull_cond && !notnull_cond->fix_fields(thd, 0))
+ {
+ tab->notnull_cond= notnull_cond;
+ }
+ }
+ DBUG_RETURN(false);
+}
+
+
+/**
+ @brief
+ Build not null conditions for inner nest tables of an outer join
+
+ @param join the join for whose table nest not null conditions are to be built
+ @param nest_tbl the nest of the inner tables of an outer join
+
+ @details
+ The function assumes that nest_tbl is the nest of the inner tables of an
+ outer join and so an ON expression for this outer join is attached to
+ nest_tbl.
+ The function selects the tables of the nest_tbl that are not inner tables of
+ embedded outer joins and then it calls build_notnull_conds_for_range_scans()
+ for nest_tbl->on_expr and the bitmap for the selected tables. This call
+ finds all fields belonging to the selected tables whose null-rejectedness
+ can be inferred from the null-rejectedness of nest_tbl->on_expr. After this
+ the function recursively finds all null_rejected fields for the remaining
+ tables from the nest of nest_tbl.
+*/
+
+static
+void build_notnull_conds_for_inner_nest_of_outer_join(JOIN *join,
+ TABLE_LIST *nest_tbl)
+{
+ TABLE_LIST *tbl;
+ table_map used_tables= 0;
+ THD *thd= join->thd;
+ List_iterator<TABLE_LIST> li(nest_tbl->nested_join->join_list);
+
+ while ((tbl= li++))
+ {
+ if (!tbl->on_expr)
+ used_tables|= tbl->table->map;
+ }
+ if (used_tables &&
+ build_notnull_conds_for_range_scans(join, nest_tbl->on_expr, used_tables))
+ {
+ Item *false_cond= new (thd->mem_root) Item_int(thd, (longlong) 0, 1);
+ if (false_cond)
+ nest_tbl->on_expr= false_cond;
+ }
+
+ li.rewind();
+ while ((tbl= li++))
+ {
+ if (tbl->on_expr)
+ {
+ if (tbl->nested_join)
+ {
+ build_notnull_conds_for_inner_nest_of_outer_join(join, tbl);
+ }
+ else if (build_notnull_conds_for_range_scans(join, tbl->on_expr,
+ tbl->table->map))
+ {
+ Item *false_cond= new (thd->mem_root) Item_int(thd, (longlong) 0, 1);
+ if (false_cond)
+ tbl->on_expr= false_cond;
+ }
+ }
+ }
+}
+
+
+/**
@} (end of group Query_Optimizer)
*/