summaryrefslogtreecommitdiff
path: root/sql/opt_range.cc
diff options
context:
space:
mode:
Diffstat (limited to 'sql/opt_range.cc')
-rw-r--r--sql/opt_range.cc2097
1 files changed, 1765 insertions, 332 deletions
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 079501f309b..7f58e0359c4 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -23,16 +23,42 @@
*/
/*
- Classes in this file are used in the following way:
- 1. For a selection condition a tree of SEL_IMERGE/SEL_TREE/SEL_ARG objects
- is created. #of rows in table and index statistics are ignored at this
- step.
- 2. Created SEL_TREE and index stats data are used to construct a
- TABLE_READ_PLAN-derived object (TRP_*). Several 'candidate' table read
- plans may be created.
- 3. The least expensive table read plan is used to create a tree of
- QUICK_SELECT_I-derived objects which are later used for row retrieval.
- QUICK_RANGEs are also created in this step.
+ This file contains:
+
+ RangeAnalysisModule
+ A module that accepts a condition, index (or partitioning) description,
+ and builds lists of intervals (in index/partitioning space), such that
+ all possible records that match the condition are contained within the
+ intervals.
+ The entry point for the range analysis module is get_mm_tree() function.
+
+ The lists are returned in form of complicated structure of interlinked
+ SEL_TREE/SEL_IMERGE/SEL_ARG objects.
+ See check_quick_keys, find_used_partitions for examples of how to walk
+ this structure.
+ All direct "users" of this module are located within this file, too.
+
+
+ PartitionPruningModule
+ A module that accepts a partitioned table, condition, and finds which
+ partitions we will need to use in query execution. Search down for
+ "PartitionPruningModule" for description.
+ The module has single entry point - prune_partitions() function.
+
+
+ Range/index_merge/groupby-minmax optimizer module
+ A module that accepts a table, condition, and returns
+ - a QUICK_*_SELECT object that can be used to retrieve rows that match
+ the specified condition, or a "no records will match the condition"
+ statement.
+
+ The module entry points are
+ test_quick_select()
+ get_quick_select_for_ref()
+
+
+ Record retrieval code for range/index_merge/groupby-min-max.
+ Implementations of QUICK_*_SELECT classes.
*/
#ifdef USE_PRAGMA_IMPLEMENTATION
@@ -386,6 +412,48 @@ public:
return parent->left == this ? &parent->left : &parent->right;
}
SEL_ARG *clone_tree();
+
+
+ /*
+ Check if this SEL_ARG object represents a single-point interval
+
+ SYNOPSIS
+ is_singlepoint()
+
+ DESCRIPTION
+ Check if this SEL_ARG object (not tree) represents a single-point
+ interval, i.e. if it represents a "keypart = const" or
+ "keypart IS NULL".
+
+ RETURN
+ TRUE This SEL_ARG object represents a singlepoint interval
+ FALSE Otherwise
+ */
+
+ bool is_singlepoint()
+ {
+ /*
+ Check for NEAR_MIN ("strictly less") and NO_MIN_RANGE (-inf < field)
+ flags, and the same for right edge.
+ */
+ if (min_flag || max_flag)
+ return FALSE;
+ byte *min_val= (byte *)min_value;
+ byte *max_val= (byte *)max_value;
+
+ if (maybe_null)
+ {
+ /* First byte is a NULL value indicator */
+ if (*min_val != *max_val)
+ return FALSE;
+
+ if (*min_val)
+ return TRUE; /* This "x IS NULL" */
+ min_val++;
+ max_val++;
+ }
+ return !field->key_cmp(min_val, max_val);
+ }
};
class SEL_IMERGE;
@@ -394,6 +462,11 @@ class SEL_IMERGE;
class SEL_TREE :public Sql_alloc
{
public:
+ /*
+ Starting an effort to document this field:
+ (for some i, keys[i]->type == SEL_ARG::IMPOSSIBLE) =>
+ (type == SEL_TREE::IMPOSSIBLE)
+ */
enum Type { IMPOSSIBLE, ALWAYS, MAYBE, KEY, KEY_SMALLER } type;
SEL_TREE(enum Type type_arg) :type(type_arg) {}
SEL_TREE() :type(KEY)
@@ -401,6 +474,12 @@ public:
keys_map.clear_all();
bzero((char*) keys,sizeof(keys));
}
+ /*
+ Note: there may exist SEL_TREE objects with sel_tree->type=KEY and
+ keys[i]=0 for all i. (SergeyP: it is not clear whether there is any
+ merit in range analyzer functions (e.g. get_mm_parts) returning a
+ pointer to such SEL_TREE instead of NULL)
+ */
SEL_ARG *keys[MAX_KEY];
key_map keys_map; /* bitmask of non-NULL elements in keys */
@@ -419,25 +498,54 @@ public:
/* Note that #records for each key scan is stored in table->quick_rows */
};
+class RANGE_OPT_PARAM
+{
+public:
+ THD *thd; /* Current thread handle */
+ TABLE *table; /* Table being analyzed */
+ COND *cond; /* Used inside get_mm_tree(). */
+ table_map prev_tables;
+ table_map read_tables;
+ table_map current_table; /* Bit of the table being analyzed */
+
+ /* Array of parts of all keys for which range analysis is performed */
+ KEY_PART *key_parts;
+ KEY_PART *key_parts_end;
+ MEM_ROOT *mem_root; /* Memory that will be freed when range analysis completes */
+ MEM_ROOT *old_root; /* Memory that will last until the query end */
+ /*
+ Number of indexes used in range analysis (In SEL_TREE::keys only first
+ #keys elements are not empty)
+ */
+ uint keys;
+
+ /*
+ If true, the index descriptions describe real indexes (and it is ok to
+ call field->optimize_range(real_keynr[...], ...).
+ Otherwise index description describes fake indexes.
+ */
+ bool using_real_indexes;
+
+ bool remove_jump_scans;
+
+ /*
+ used_key_no -> table_key_no translation table. Only makes sense if
+ using_real_indexes==TRUE
+ */
+ uint real_keynr[MAX_KEY];
+};
-typedef struct st_qsel_param {
- THD *thd;
- TABLE *table;
- KEY_PART *key_parts,*key_parts_end;
+class PARAM : public RANGE_OPT_PARAM
+{
+public:
KEY_PART *key[MAX_KEY]; /* First key parts of keys used in the query */
- MEM_ROOT *mem_root, *old_root;
- table_map prev_tables,read_tables,current_table;
- uint baseflag, max_key_part, range_count;
+ longlong baseflag;
+ uint max_key_part, range_count;
- uint keys; /* number of keys used in the query */
-
- /* used_key_no -> table_key_no translation table */
- uint real_keynr[MAX_KEY];
char min_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH],
max_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH];
bool quick; // Don't calulate possible keys
- COND *cond;
uint fields_bitmap_size;
MY_BITMAP needed_fields; /* bitmask of fields needed by the query */
@@ -452,7 +560,7 @@ typedef struct st_qsel_param {
bool is_ror_scan;
/* Number of ranges in the last checked tree->key */
uint n_ranges;
-} PARAM;
+};
class TABLE_READ_PLAN;
class TRP_RANGE;
@@ -463,16 +571,17 @@ class TABLE_READ_PLAN;
struct st_ror_scan_info;
-static SEL_TREE * get_mm_parts(PARAM *param,COND *cond_func,Field *field,
+static SEL_TREE * get_mm_parts(RANGE_OPT_PARAM *param,COND *cond_func,Field *field,
Item_func::Functype type,Item *value,
Item_result cmp_type);
-static SEL_ARG *get_mm_leaf(PARAM *param,COND *cond_func,Field *field,
+static SEL_ARG *get_mm_leaf(RANGE_OPT_PARAM *param,COND *cond_func,Field *field,
KEY_PART *key_part,
Item_func::Functype type,Item *value);
-static SEL_TREE *get_mm_tree(PARAM *param,COND *cond);
+static SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param,COND *cond);
static bool is_key_scan_ror(PARAM *param, uint keynr, uint8 nparts);
-static ha_rows check_quick_select(PARAM *param,uint index,SEL_ARG *key_tree);
+static ha_rows check_quick_select(PARAM *param,uint index,SEL_ARG *key_tree,
+ bool update_tbl_stats);
static ha_rows check_quick_keys(PARAM *param,uint index,SEL_ARG *key_tree,
char *min_key,uint min_key_flag,
char *max_key, uint max_key_flag);
@@ -482,6 +591,7 @@ QUICK_RANGE_SELECT *get_quick_select(PARAM *param,uint index,
MEM_ROOT *alloc = NULL);
static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
bool index_read_must_be_used,
+ bool update_tbl_stats,
double read_time);
static
TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
@@ -512,8 +622,8 @@ static void print_rowid(byte* val, int len);
static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg);
#endif
-static SEL_TREE *tree_and(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2);
-static SEL_TREE *tree_or(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2);
+static SEL_TREE *tree_and(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2);
+static SEL_TREE *tree_or(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2);
static SEL_ARG *sel_add(SEL_ARG *key1,SEL_ARG *key2);
static SEL_ARG *key_or(SEL_ARG *key1,SEL_ARG *key2);
static SEL_ARG *key_and(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag);
@@ -526,7 +636,7 @@ static bool eq_tree(SEL_ARG* a,SEL_ARG *b);
static SEL_ARG null_element(SEL_ARG::IMPOSSIBLE);
static bool null_part_in_key(KEY_PART *key_part, const char *key,
uint length);
-bool sel_trees_can_be_ored(SEL_TREE *tree1, SEL_TREE *tree2, PARAM* param);
+bool sel_trees_can_be_ored(SEL_TREE *tree1, SEL_TREE *tree2, RANGE_OPT_PARAM* param);
/*
@@ -558,9 +668,9 @@ public:
trees_next(trees),
trees_end(trees + PREALLOCED_TREES)
{}
- int or_sel_tree(PARAM *param, SEL_TREE *tree);
- int or_sel_tree_with_checks(PARAM *param, SEL_TREE *new_tree);
- int or_sel_imerge_with_checks(PARAM *param, SEL_IMERGE* imerge);
+ int or_sel_tree(RANGE_OPT_PARAM *param, SEL_TREE *tree);
+ int or_sel_tree_with_checks(RANGE_OPT_PARAM *param, SEL_TREE *new_tree);
+ int or_sel_imerge_with_checks(RANGE_OPT_PARAM *param, SEL_IMERGE* imerge);
};
@@ -576,7 +686,7 @@ public:
-1 - Out of memory.
*/
-int SEL_IMERGE::or_sel_tree(PARAM *param, SEL_TREE *tree)
+int SEL_IMERGE::or_sel_tree(RANGE_OPT_PARAM *param, SEL_TREE *tree)
{
if (trees_next == trees_end)
{
@@ -627,7 +737,7 @@ int SEL_IMERGE::or_sel_tree(PARAM *param, SEL_TREE *tree)
-1 An error occurred.
*/
-int SEL_IMERGE::or_sel_tree_with_checks(PARAM *param, SEL_TREE *new_tree)
+int SEL_IMERGE::or_sel_tree_with_checks(RANGE_OPT_PARAM *param, SEL_TREE *new_tree)
{
for (SEL_TREE** tree = trees;
tree != trees_next;
@@ -661,7 +771,7 @@ int SEL_IMERGE::or_sel_tree_with_checks(PARAM *param, SEL_TREE *new_tree)
-1 - An error occurred
*/
-int SEL_IMERGE::or_sel_imerge_with_checks(PARAM *param, SEL_IMERGE* imerge)
+int SEL_IMERGE::or_sel_imerge_with_checks(RANGE_OPT_PARAM *param, SEL_IMERGE* imerge)
{
for (SEL_TREE** tree= imerge->trees;
tree != imerge->trees_next;
@@ -707,7 +817,7 @@ inline void imerge_list_and_list(List<SEL_IMERGE> *im1, List<SEL_IMERGE> *im2)
other Error, both passed lists are unusable
*/
-int imerge_list_or_list(PARAM *param,
+int imerge_list_or_list(RANGE_OPT_PARAM *param,
List<SEL_IMERGE> *im1,
List<SEL_IMERGE> *im2)
{
@@ -727,7 +837,7 @@ int imerge_list_or_list(PARAM *param,
other Error
*/
-int imerge_list_or_tree(PARAM *param,
+int imerge_list_or_tree(RANGE_OPT_PARAM *param,
List<SEL_IMERGE> *im1,
SEL_TREE *tree)
{
@@ -822,6 +932,10 @@ QUICK_RANGE_SELECT::QUICK_RANGE_SELECT(THD *thd, TABLE *table, uint key_nr,
bool no_alloc, MEM_ROOT *parent_alloc)
:dont_free(0),error(0),free_file(0),in_range(0),cur_range(NULL),range(0)
{
+ my_bitmap_map *bitmap;
+ DBUG_ENTER("QUICK_RANGE_SELECT::QUICK_RANGE_SELECT");
+
+ in_ror_merged_scan= 0;
sorted= 0;
index= key_nr;
head= table;
@@ -845,6 +959,19 @@ QUICK_RANGE_SELECT::QUICK_RANGE_SELECT(THD *thd, TABLE *table, uint key_nr,
bzero((char*) &alloc,sizeof(alloc));
file= head->file;
record= head->record[0];
+ save_read_set= head->read_set;
+ save_write_set= head->write_set;
+
+ /* Allocate a bitmap for used columns */
+ if (!(bitmap= (my_bitmap_map*) my_malloc(head->s->column_bitmap_size,
+ MYF(MY_WME))))
+ {
+ column_bitmap.bitmap= 0;
+ error= 1;
+ }
+ else
+ bitmap_init(&column_bitmap, bitmap, head->s->fields, FALSE);
+ DBUG_VOID_RETURN;
}
@@ -854,7 +981,7 @@ int QUICK_RANGE_SELECT::init()
if (file->inited != handler::NONE)
file->ha_index_or_rnd_end();
- DBUG_RETURN(error= file->ha_index_init(index));
+ DBUG_RETURN(error= file->ha_index_init(index, 1));
}
@@ -883,18 +1010,18 @@ QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT()
{
DBUG_PRINT("info", ("Freeing separate handler 0x%lx (free: %d)", (long) file,
free_file));
- file->reset();
- file->external_lock(current_thd, F_UNLCK);
+ file->ha_external_lock(current_thd, F_UNLCK);
file->close();
+ delete file;
}
}
delete_dynamic(&ranges); /* ranges are allocated in alloc */
free_root(&alloc,MYF(0));
+ my_free((char*) column_bitmap.bitmap, MYF(MY_ALLOW_ZERO_PTR));
}
- if (multi_range)
- my_free((char*) multi_range, MYF(0));
- if (multi_range_buff)
- my_free((char*) multi_range_buff, MYF(0));
+ head->column_bitmaps_set(save_read_set, save_write_set);
+ x_free(multi_range);
+ x_free(multi_range_buff);
DBUG_VOID_RETURN;
}
@@ -1014,23 +1141,20 @@ int QUICK_ROR_INTERSECT_SELECT::init()
int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
{
- handler *save_file= file;
+ handler *save_file= file, *org_file;
+ THD *thd;
DBUG_ENTER("QUICK_RANGE_SELECT::init_ror_merged_scan");
+ in_ror_merged_scan= 1;
if (reuse_handler)
{
- DBUG_PRINT("info", ("Reusing handler %p", file));
- if (!head->no_keyread)
- {
- head->key_read= 1;
- file->extra(HA_EXTRA_KEYREAD);
- }
- if (file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) ||
- init() || reset())
+ DBUG_PRINT("info", ("Reusing handler 0x%lx", (long) file));
+ if (init() || reset())
{
DBUG_RETURN(1);
}
- DBUG_RETURN(0);
+ head->column_bitmaps_set(&column_bitmap, &column_bitmap);
+ goto end;
}
/* Create a separate handler object for this quick select */
@@ -1040,31 +1164,52 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
DBUG_RETURN(0);
}
- THD *thd= current_thd;
+ thd= head->in_use;
if (!(file= head->file->clone(thd->mem_root)))
{
/* Caller will free the memory */
goto failure;
}
- if (file->external_lock(thd, F_RDLCK))
+
+ head->column_bitmaps_set(&column_bitmap, &column_bitmap);
+
+ if (file->ha_external_lock(thd, F_RDLCK))
goto failure;
- if (!head->no_keyread)
- {
- head->key_read= 1;
- file->extra(HA_EXTRA_KEYREAD);
- }
- if (file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) ||
- init() || reset())
+
+ if (init() || reset())
{
- file->external_lock(thd, F_UNLCK);
+ file->ha_external_lock(thd, F_UNLCK);
file->close();
goto failure;
}
free_file= TRUE;
last_rowid= file->ref;
+
+end:
+ /*
+ We are only going to read key fields and call position() on 'file'
+ The following sets head->tmp_set to only use this key and then updates
+ head->read_set and head->write_set to use this bitmap.
+ The now bitmap is stored in 'column_bitmap' which is used in ::get_next()
+ */
+ org_file= head->file;
+ head->file= file;
+ /* We don't have to set 'head->keyread' here as the 'file' is unique */
+ if (!head->no_keyread)
+ {
+ head->key_read= 1;
+ head->mark_columns_used_by_index(index);
+ }
+ head->prepare_for_position();
+ head->file= org_file;
+ bitmap_copy(&column_bitmap, head->read_set);
+ head->column_bitmaps_set(&column_bitmap, &column_bitmap);
+
DBUG_RETURN(0);
failure:
+ head->column_bitmaps_set(save_read_set, save_write_set);
+ delete file;
file= save_file;
DBUG_RETURN(1);
}
@@ -1770,33 +1915,27 @@ public:
static int fill_used_fields_bitmap(PARAM *param)
{
TABLE *table= param->table;
- param->fields_bitmap_size= (table->s->fields/8 + 1);
- uchar *tmp;
+ my_bitmap_map *tmp;
uint pk;
param->tmp_covered_fields.bitmap= 0;
- if (!(tmp= (uchar*)alloc_root(param->mem_root,param->fields_bitmap_size)) ||
- bitmap_init(&param->needed_fields, tmp, param->fields_bitmap_size*8,
- FALSE))
+ param->fields_bitmap_size= table->s->column_bitmap_size;
+ if (!(tmp= (my_bitmap_map*) alloc_root(param->mem_root,
+ param->fields_bitmap_size)) ||
+ bitmap_init(&param->needed_fields, tmp, table->s->fields, FALSE))
return 1;
- bitmap_clear_all(&param->needed_fields);
- for (uint i= 0; i < table->s->fields; i++)
- {
- if (param->thd->query_id == table->field[i]->query_id)
- bitmap_set_bit(&param->needed_fields, i+1);
- }
+ bitmap_copy(&param->needed_fields, table->read_set);
+ bitmap_union(&param->needed_fields, table->write_set);
pk= param->table->s->primary_key;
- if (param->table->file->primary_key_is_clustered() && pk != MAX_KEY)
+ if (pk != MAX_KEY && param->table->file->primary_key_is_clustered())
{
/* The table uses clustered PK and it is not internally generated */
KEY_PART_INFO *key_part= param->table->key_info[pk].key_part;
KEY_PART_INFO *key_part_end= key_part +
param->table->key_info[pk].key_parts;
for (;key_part != key_part_end; ++key_part)
- {
- bitmap_clear_bit(&param->needed_fields, key_part->fieldnr);
- }
+ bitmap_clear_bit(&param->needed_fields, key_part->fieldnr-1);
}
return 0;
}
@@ -1821,16 +1960,46 @@ static int fill_used_fields_bitmap(PARAM *param)
quick - Parameter to use when reading records.
In the table struct the following information is updated:
- quick_keys - Which keys can be used
- quick_rows - How many rows the key matches
+ quick_keys - Which keys can be used
+ quick_rows - How many rows the key matches
+ quick_condition_rows - E(# rows that will satisfy the table condition)
+
+ IMPLEMENTATION
+ quick_condition_rows value is obtained as follows:
+
+ It is a minimum of E(#output rows) for all considered table access
+ methods (range and index_merge accesses over various indexes).
+
+ The obtained value is not a true E(#rows that satisfy table condition)
+ but rather a pessimistic estimate. To obtain a true E(#...) one would
+ need to combine estimates of various access methods, taking into account
+ correlations between sets of rows they will return.
+
+ For example, if values of tbl.key1 and tbl.key2 are independent (a right
+ assumption if we have no information about their correlation) then the
+ correct estimate will be:
+
+ E(#rows("tbl.key1 < c1 AND tbl.key2 < c2")) =
+ = E(#rows(tbl.key1 < c1)) / total_rows(tbl) * E(#rows(tbl.key2 < c2)
+
+ which is smaller than
+
+ MIN(E(#rows(tbl.key1 < c1), E(#rows(tbl.key2 < c2)))
+
+ which is currently produced.
TODO
- Check if this function really needs to modify keys_to_use, and change the
- code to pass it by reference if it doesn't.
+ * Change the value returned in quick_condition_rows from a pessimistic
+ estimate to true E(#rows that satisfy table condition).
+ (we can re-use some of E(#rows) calcuation code from index_merge/intersection
+ for this)
+
+ * Check if this function really needs to modify keys_to_use, and change the
+ code to pass it by reference if it doesn't.
- In addition to force_quick_range other means can be (an usually are) used
- to make this function prefer range over full table scan. Figure out if
- force_quick_range is really needed.
+ * In addition to force_quick_range other means can be (an usually are) used
+ to make this function prefer range over full table scan. Figure out if
+ force_quick_range is really needed.
RETURN
-1 if impossible select (i.e. certainly no rows will be selected)
@@ -1848,7 +2017,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
DBUG_PRINT("enter",("keys_to_use: %lu prev_tables: %lu const_tables: %lu",
(ulong) keys_to_use.to_ulonglong(), (ulong) prev_tables,
(ulong) const_tables));
- DBUG_PRINT("info", ("records: %lu", (ulong) head->file->records));
+ DBUG_PRINT("info", ("records: %lu", (ulong) head->file->stats.records));
delete quick;
quick=0;
needed_reg.clear_all();
@@ -1858,7 +2027,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
DBUG_RETURN(0); /* purecov: inspected */
if (keys_to_use.is_clear_all())
DBUG_RETURN(0);
- records= head->file->records;
+ records= head->file->stats.records;
if (!records)
records++; /* purecov: inspected */
scan_time= (double) records / TIME_FOR_COMPARE + 1;
@@ -1883,7 +2052,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
/* set up parameter that is passed to all functions */
param.thd= thd;
- param.baseflag=head->file->table_flags();
+ param.baseflag= head->file->ha_table_flags();
param.prev_tables=prev_tables | const_tables;
param.read_tables=read_tables;
param.current_table= head->map;
@@ -1893,6 +2062,8 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
param.old_root= thd->mem_root;
param.needed_reg= &needed_reg;
param.imerge_cost_buff_size= 0;
+ param.using_real_indexes= TRUE;
+ param.remove_jump_scans= TRUE;
thd->no_errors=1; // Don't warn about NULL
init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0);
@@ -1934,6 +2105,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
key_parts->null_bit= key_part_info->null_bit;
key_parts->image_type =
(key_info->flags & HA_SPATIAL) ? Field::itMBR : Field::itRAW;
+ /* Only HA_PART_KEY_SEG is used */
key_parts->flag= (uint8) key_part_info->key_part_flag;
}
param.real_keynr[param.keys++]=idx;
@@ -1967,9 +2139,12 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
read_time= (double) HA_POS_ERROR;
goto free_mem;
}
- if (tree->type != SEL_TREE::KEY &&
- tree->type != SEL_TREE::KEY_SMALLER)
- goto free_mem;
+ /*
+ If the tree can't be used for range scans, proceed anyway, as we
+ can construct a group-min-max quick select
+ */
+ if (tree->type != SEL_TREE::KEY && tree->type != SEL_TREE::KEY_SMALLER)
+ tree= NULL;
}
}
@@ -1978,10 +2153,15 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
Notice that it can be constructed no matter if there is a range tree.
*/
group_trp= get_best_group_min_max(&param, tree);
- if (group_trp && group_trp->read_cost < best_read_time)
+ if (group_trp)
{
- best_trp= group_trp;
- best_read_time= best_trp->read_cost;
+ param.table->quick_condition_rows= min(group_trp->records,
+ head->file->stats.records);
+ if (group_trp->read_cost < best_read_time)
+ {
+ best_trp= group_trp;
+ best_read_time= best_trp->read_cost;
+ }
}
if (tree)
@@ -1997,7 +2177,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
bool can_build_covering= FALSE;
/* Get best 'range' plan and prepare data for making other plans */
- if ((range_trp= get_key_scans_params(&param, tree, FALSE,
+ if ((range_trp= get_key_scans_params(&param, tree, FALSE, TRUE,
best_read_time)))
{
best_trp= range_trp;
@@ -2040,13 +2220,15 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
SEL_IMERGE *imerge;
TABLE_READ_PLAN *best_conj_trp= NULL, *new_conj_trp;
LINT_INIT(new_conj_trp); /* no empty index_merge lists possible */
-
DBUG_PRINT("info",("No range reads possible,"
" trying to construct index_merge"));
List_iterator_fast<SEL_IMERGE> it(tree->merges);
while ((imerge= it++))
{
new_conj_trp= get_best_disjunct_quick(&param, imerge, best_read_time);
+ if (new_conj_trp)
+ set_if_smaller(param.table->quick_condition_rows,
+ new_conj_trp->records);
if (!best_conj_trp || (new_conj_trp && new_conj_trp->read_cost <
best_conj_trp->read_cost))
best_conj_trp= new_conj_trp;
@@ -2084,6 +2266,1111 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
DBUG_RETURN(records ? test(quick) : -1);
}
+/****************************************************************************
+ * Partition pruning module
+ ****************************************************************************/
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+
+/*
+ PartitionPruningModule
+
+ This part of the code does partition pruning. Partition pruning solves the
+ following problem: given a query over partitioned tables, find partitions
+ that we will not need to access (i.e. partitions that we can assume to be
+ empty) when executing the query.
+ The set of partitions to prune doesn't depend on which query execution
+ plan will be used to execute the query.
+
+ HOW IT WORKS
+
+ Partition pruning module makes use of RangeAnalysisModule. The following
+ examples show how the problem of partition pruning can be reduced to the
+ range analysis problem:
+
+ EXAMPLE 1
+ Consider a query:
+
+ SELECT * FROM t1 WHERE (t1.a < 5 OR t1.a = 10) AND t1.a > 3 AND t1.b='z'
+
+ where table t1 is partitioned using PARTITION BY RANGE(t1.a). An apparent
+ way to find the used (i.e. not pruned away) partitions is as follows:
+
+ 1. analyze the WHERE clause and extract the list of intervals over t1.a
+ for the above query we will get this list: {(3 < t1.a < 5), (t1.a=10)}
+
+ 2. for each interval I
+ {
+ find partitions that have non-empty intersection with I;
+ mark them as used;
+ }
+
+ EXAMPLE 2
+ Suppose the table is partitioned by HASH(part_func(t1.a, t1.b)). Then
+ we need to:
+
+ 1. Analyze the WHERE clause and get a list of intervals over (t1.a, t1.b).
+ The list of intervals we'll obtain will look like this:
+ ((t1.a, t1.b) = (1,'foo')),
+ ((t1.a, t1.b) = (2,'bar')),
+ ((t1,a, t1.b) > (10,'zz'))
+
+ 2. for each interval I
+ {
+ if (the interval has form "(t1.a, t1.b) = (const1, const2)" )
+ {
+ calculate HASH(part_func(t1.a, t1.b));
+ find which partition has records with this hash value and mark
+ it as used;
+ }
+ else
+ {
+ mark all partitions as used;
+ break;
+ }
+ }
+
+ For both examples the step #1 is exactly what RangeAnalysisModule could
+ be used to do, if it was provided with appropriate index description
+ (array of KEY_PART structures).
+ In example #1, we need to provide it with description of index(t1.a),
+ in example #2, we need to provide it with description of index(t1.a, t1.b).
+
+ These index descriptions are further called "partitioning index
+ descriptions". Note that it doesn't matter if such indexes really exist,
+ as range analysis module only uses the description.
+
+ Putting it all together, partitioning module works as follows:
+
+ prune_partitions() {
+ call create_partition_index_description();
+
+ call get_mm_tree(); // invoke the RangeAnalysisModule
+
+ // analyze the obtained interval list and get used partitions
+ call find_used_partitions();
+ }
+
+*/
+
+struct st_part_prune_param;
+struct st_part_opt_info;
+
+typedef void (*mark_full_part_func)(partition_info*, uint32);
+
+/*
+ Partition pruning operation context
+*/
+typedef struct st_part_prune_param
+{
+ RANGE_OPT_PARAM range_param; /* Range analyzer parameters */
+
+ /***************************************************************
+ Following fields are filled in based solely on partitioning
+ definition and not modified after that:
+ **************************************************************/
+ partition_info *part_info; /* Copy of table->part_info */
+ /* Function to get partition id from partitioning fields only */
+ get_part_id_func get_top_partition_id_func;
+ /* Function to mark a partition as used (w/all subpartitions if they exist)*/
+ mark_full_part_func mark_full_partition_used;
+
+ /* Partitioning 'index' description, array of key parts */
+ KEY_PART *key;
+
+ /*
+ Number of fields in partitioning 'index' definition created for
+ partitioning (0 if partitioning 'index' doesn't include partitioning
+ fields)
+ */
+ uint part_fields;
+ uint subpart_fields; /* Same as above for subpartitioning */
+
+ /*
+ Number of the last partitioning field keypart in the index, or -1 if
+ partitioning index definition doesn't include partitioning fields.
+ */
+ int last_part_partno;
+ int last_subpart_partno; /* Same as above for supartitioning */
+
+ /*
+ is_part_keypart[i] == test(keypart #i in partitioning index is a member
+ used in partitioning)
+ Used to maintain current values of cur_part_fields and cur_subpart_fields
+ */
+ my_bool *is_part_keypart;
+ /* Same as above for subpartitioning */
+ my_bool *is_subpart_keypart;
+
+ /***************************************************************
+ Following fields form find_used_partitions() recursion context:
+ **************************************************************/
+ SEL_ARG **arg_stack; /* "Stack" of SEL_ARGs */
+ SEL_ARG **arg_stack_end; /* Top of the stack */
+ /* Number of partitioning fields for which we have a SEL_ARG* in arg_stack */
+ uint cur_part_fields;
+ /* Same as cur_part_fields, but for subpartitioning */
+ uint cur_subpart_fields;
+
+ /* Iterator to be used to obtain the "current" set of used partitions */
+ PARTITION_ITERATOR part_iter;
+
+ /* Initialized bitmap of no_subparts size */
+ MY_BITMAP subparts_bitmap;
+} PART_PRUNE_PARAM;
+
+static bool create_partition_index_description(PART_PRUNE_PARAM *prune_par);
+static int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree);
+static int find_used_partitions_imerge(PART_PRUNE_PARAM *ppar,
+ SEL_IMERGE *imerge);
+static int find_used_partitions_imerge_list(PART_PRUNE_PARAM *ppar,
+ List<SEL_IMERGE> &merges);
+static void mark_all_partitions_as_used(partition_info *part_info);
+static uint32 part_num_to_part_id_range(PART_PRUNE_PARAM* prune_par,
+ uint32 num);
+
+#ifndef DBUG_OFF
+static void print_partitioning_index(KEY_PART *parts, KEY_PART *parts_end);
+static void dbug_print_field(Field *field);
+static void dbug_print_segment_range(SEL_ARG *arg, KEY_PART *part);
+static void dbug_print_singlepoint_range(SEL_ARG **start, uint num);
+#endif
+
+
+/*
+ Perform partition pruning for a given table and condition.
+
+ SYNOPSIS
+ prune_partitions()
+ thd Thread handle
+ table Table to perform partition pruning for
+ pprune_cond Condition to use for partition pruning
+
+ DESCRIPTION
+ This function assumes that all partitions are marked as unused when it
+ is invoked. The function analyzes the condition, finds partitions that
+ need to be used to retrieve the records that match the condition, and
+ marks them as used by setting appropriate bit in part_info->used_partitions
+ In the worst case all partitions are marked as used.
+
+ NOTE
+ This function returns promptly if called for non-partitioned table.
+
+ RETURN
+ TRUE We've inferred that no partitions need to be used (i.e. no table
+ records will satisfy pprune_cond)
+ FALSE Otherwise
+*/
+
+bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond)
+{
+ bool retval= FALSE;
+ partition_info *part_info = table->part_info;
+ DBUG_ENTER("prune_partitions");
+
+ if (!part_info)
+ DBUG_RETURN(FALSE); /* not a partitioned table */
+
+ if (!pprune_cond)
+ {
+ mark_all_partitions_as_used(part_info);
+ DBUG_RETURN(FALSE);
+ }
+
+ PART_PRUNE_PARAM prune_param;
+ MEM_ROOT alloc;
+ RANGE_OPT_PARAM *range_par= &prune_param.range_param;
+ my_bitmap_map *old_read_set, *old_write_set;
+
+ prune_param.part_info= part_info;
+ init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0);
+ range_par->mem_root= &alloc;
+ range_par->old_root= thd->mem_root;
+
+ if (create_partition_index_description(&prune_param))
+ {
+ mark_all_partitions_as_used(part_info);
+ free_root(&alloc,MYF(0)); // Return memory & allocator
+ DBUG_RETURN(FALSE);
+ }
+
+ old_write_set= dbug_tmp_use_all_columns(table, table->write_set);
+ old_read_set= dbug_tmp_use_all_columns(table, table->read_set);
+ range_par->thd= thd;
+ range_par->table= table;
+ /* range_par->cond doesn't need initialization */
+ range_par->prev_tables= range_par->read_tables= 0;
+ range_par->current_table= table->map;
+
+ range_par->keys= 1; // one index
+ range_par->using_real_indexes= FALSE;
+ range_par->remove_jump_scans= FALSE;
+ range_par->real_keynr[0]= 0;
+
+ thd->no_errors=1; // Don't warn about NULL
+ thd->mem_root=&alloc;
+
+ bitmap_clear_all(&part_info->used_partitions);
+
+ prune_param.key= prune_param.range_param.key_parts;
+ SEL_TREE *tree;
+ int res;
+
+ tree= get_mm_tree(range_par, pprune_cond);
+ if (!tree)
+ goto all_used;
+
+ if (tree->type == SEL_TREE::IMPOSSIBLE)
+ {
+ retval= TRUE;
+ goto end;
+ }
+
+ if (tree->type != SEL_TREE::KEY && tree->type != SEL_TREE::KEY_SMALLER)
+ goto all_used;
+
+ if (tree->merges.is_empty())
+ {
+ /* Range analysis has produced a single list of intervals. */
+ prune_param.arg_stack_end= prune_param.arg_stack;
+ prune_param.cur_part_fields= 0;
+ prune_param.cur_subpart_fields= 0;
+ init_all_partitions_iterator(part_info, &prune_param.part_iter);
+ if (!tree->keys[0] || (-1 == (res= find_used_partitions(&prune_param,
+ tree->keys[0]))))
+ goto all_used;
+ }
+ else
+ {
+ if (tree->merges.elements == 1)
+ {
+ /*
+ Range analysis has produced a "merge" of several intervals lists, a
+ SEL_TREE that represents an expression in form
+ sel_imerge = (tree1 OR tree2 OR ... OR treeN)
+ that cannot be reduced to one tree. This can only happen when
+ partitioning index has several keyparts and the condition is OR of
+ conditions that refer to different key parts. For example, we'll get
+ here for "partitioning_field=const1 OR subpartitioning_field=const2"
+ */
+ if (-1 == (res= find_used_partitions_imerge(&prune_param,
+ tree->merges.head())))
+ goto all_used;
+ }
+ else
+ {
+ /*
+ Range analysis has produced a list of several imerges, i.e. a
+ structure that represents a condition in form
+ imerge_list= (sel_imerge1 AND sel_imerge2 AND ... AND sel_imergeN)
+ This is produced for complicated WHERE clauses that range analyzer
+ can't really analyze properly.
+ */
+ if (-1 == (res= find_used_partitions_imerge_list(&prune_param,
+ tree->merges)))
+ goto all_used;
+ }
+ }
+
+ /*
+ res == 0 => no used partitions => retval=TRUE
+ res == 1 => some used partitions => retval=FALSE
+ res == -1 - we jump over this line to all_used:
+ */
+ retval= test(!res);
+ goto end;
+
+all_used:
+ retval= FALSE; // some partitions are used
+ mark_all_partitions_as_used(prune_param.part_info);
+end:
+ dbug_tmp_restore_column_map(table->write_set, old_write_set);
+ dbug_tmp_restore_column_map(table->read_set, old_read_set);
+ thd->no_errors=0;
+ thd->mem_root= range_par->old_root;
+ free_root(&alloc,MYF(0)); // Return memory & allocator
+ DBUG_RETURN(retval);
+}
+
+
+/*
+ Store field key image to table record
+
+ SYNOPSIS
+ store_key_image_to_rec()
+ field Field which key image should be stored
+ ptr Field value in key format
+ len Length of the value, in bytes
+
+ DESCRIPTION
+ Copy the field value from its key image to the table record. The source
+ is the value in key image format, occupying len bytes in buffer pointed
+ by ptr. The destination is table record, in "field value in table record"
+ format.
+*/
+
+void store_key_image_to_rec(Field *field, char *ptr, uint len)
+{
+ /* Do the same as print_key() does */
+ my_bitmap_map *old_map;
+
+ if (field->real_maybe_null())
+ {
+ if (*ptr)
+ {
+ field->set_null();
+ return;
+ }
+ field->set_notnull();
+ ptr++;
+ }
+ old_map= dbug_tmp_use_all_columns(field->table,
+ field->table->write_set);
+ field->set_key_image(ptr, len);
+ dbug_tmp_restore_column_map(field->table->write_set, old_map);
+}
+
+
+/*
+ For SEL_ARG* array, store sel_arg->min values into table record buffer
+
+ SYNOPSIS
+ store_selargs_to_rec()
+ ppar Partition pruning context
+ start Array of SEL_ARG* for which the minimum values should be stored
+ num Number of elements in the array
+
+ DESCRIPTION
+ For each SEL_ARG* interval in the specified array, store the left edge
+ field value (sel_arg->min, key image format) into the table record.
+*/
+
+static void store_selargs_to_rec(PART_PRUNE_PARAM *ppar, SEL_ARG **start,
+ int num)
+{
+ KEY_PART *parts= ppar->range_param.key_parts;
+ for (SEL_ARG **end= start + num; start != end; start++)
+ {
+ SEL_ARG *sel_arg= (*start);
+ store_key_image_to_rec(sel_arg->field, sel_arg->min_value,
+ parts[sel_arg->part].length);
+ }
+}
+
+
+/* Mark a partition as used in the case when there are no subpartitions */
+static void mark_full_partition_used_no_parts(partition_info* part_info,
+ uint32 part_id)
+{
+ DBUG_ENTER("mark_full_partition_used_no_parts");
+ DBUG_PRINT("enter", ("Mark partition %u as used", part_id));
+ bitmap_set_bit(&part_info->used_partitions, part_id);
+ DBUG_VOID_RETURN;
+}
+
+
+/* Mark a partition as used in the case when there are subpartitions */
+static void mark_full_partition_used_with_parts(partition_info *part_info,
+ uint32 part_id)
+{
+ uint32 start= part_id * part_info->no_subparts;
+ uint32 end= start + part_info->no_subparts;
+ DBUG_ENTER("mark_full_partition_used_with_parts");
+
+ for (; start != end; start++)
+ {
+ DBUG_PRINT("info", ("1:Mark subpartition %u as used", start));
+ bitmap_set_bit(&part_info->used_partitions, start);
+ }
+ DBUG_VOID_RETURN;
+}
+
+/*
+ Find the set of used partitions for List<SEL_IMERGE>
+ SYNOPSIS
+ find_used_partitions_imerge_list
+ ppar Partition pruning context.
+ key_tree Intervals tree to perform pruning for.
+
+ DESCRIPTION
+ List<SEL_IMERGE> represents "imerge1 AND imerge2 AND ...".
+ The set of used partitions is an intersection of used partitions sets
+ for imerge_{i}.
+ We accumulate this intersection in a separate bitmap.
+
+ RETURN
+ See find_used_partitions()
+*/
+
+static int find_used_partitions_imerge_list(PART_PRUNE_PARAM *ppar,
+ List<SEL_IMERGE> &merges)
+{
+ MY_BITMAP all_merges;
+ uint bitmap_bytes;
+ my_bitmap_map *bitmap_buf;
+ uint n_bits= ppar->part_info->used_partitions.n_bits;
+ bitmap_bytes= bitmap_buffer_size(n_bits);
+ if (!(bitmap_buf= (my_bitmap_map*) alloc_root(ppar->range_param.mem_root,
+ bitmap_bytes)))
+ {
+ /*
+ Fallback, process just the first SEL_IMERGE. This can leave us with more
+ partitions marked as used then actually needed.
+ */
+ return find_used_partitions_imerge(ppar, merges.head());
+ }
+ bitmap_init(&all_merges, bitmap_buf, n_bits, FALSE);
+ bitmap_set_prefix(&all_merges, n_bits);
+
+ List_iterator<SEL_IMERGE> it(merges);
+ SEL_IMERGE *imerge;
+ while ((imerge=it++))
+ {
+ int res= find_used_partitions_imerge(ppar, imerge);
+ if (!res)
+ {
+ /* no used partitions on one ANDed imerge => no used partitions at all */
+ return 0;
+ }
+
+ if (res != -1)
+ bitmap_intersect(&all_merges, &ppar->part_info->used_partitions);
+
+ if (bitmap_is_clear_all(&all_merges))
+ return 0;
+
+ bitmap_clear_all(&ppar->part_info->used_partitions);
+ }
+ memcpy(ppar->part_info->used_partitions.bitmap, all_merges.bitmap,
+ bitmap_bytes);
+ return 1;
+}
+
+
+/*
+ Find the set of used partitions for SEL_IMERGE structure
+ SYNOPSIS
+ find_used_partitions_imerge()
+ ppar Partition pruning context.
+ key_tree Intervals tree to perform pruning for.
+
+ DESCRIPTION
+ SEL_IMERGE represents "tree1 OR tree2 OR ...". The implementation is
+ trivial - just use mark used partitions for each tree and bail out early
+ if for some tree_{i} all partitions are used.
+
+ RETURN
+ See find_used_partitions().
+*/
+
+static
+int find_used_partitions_imerge(PART_PRUNE_PARAM *ppar, SEL_IMERGE *imerge)
+{
+ int res= 0;
+ for (SEL_TREE **ptree= imerge->trees; ptree < imerge->trees_next; ptree++)
+ {
+ ppar->arg_stack_end= ppar->arg_stack;
+ ppar->cur_part_fields= 0;
+ ppar->cur_subpart_fields= 0;
+ init_all_partitions_iterator(ppar->part_info, &ppar->part_iter);
+ SEL_ARG *key_tree= (*ptree)->keys[0];
+ if (!key_tree || (-1 == (res |= find_used_partitions(ppar, key_tree))))
+ return -1;
+ }
+ return res;
+}
+
+
+/*
+ Collect partitioning ranges for the SEL_ARG tree and mark partitions as used
+
+ SYNOPSIS
+ find_used_partitions()
+ ppar Partition pruning context.
+ key_tree SEL_ARG range tree to perform pruning for
+
+ DESCRIPTION
+ This function
+ * recursively walks the SEL_ARG* tree collecting partitioning "intervals"
+ * finds the partitions one needs to use to get rows in these intervals
+ * marks these partitions as used.
+ The next session desribes the process in greater detail.
+
+ IMPLEMENTATION
+ TYPES OF RESTRICTIONS THAT WE CAN OBTAIN PARTITIONS FOR
+ We can find out which [sub]partitions to use if we obtain restrictions on
+ [sub]partitioning fields in the following form:
+ 1. "partition_field1=const1 AND ... AND partition_fieldN=constN"
+ 1.1 Same as (1) but for subpartition fields
+
+ If partitioning supports interval analysis (i.e. partitioning is a
+ function of a single table field, and partition_info::
+ get_part_iter_for_interval != NULL), then we can also use condition in
+ this form:
+ 2. "const1 <=? partition_field <=? const2"
+ 2.1 Same as (2) but for subpartition_field
+
+ INFERRING THE RESTRICTIONS FROM SEL_ARG TREE
+
+ The below is an example of what SEL_ARG tree may represent:
+
+ (start)
+ | $
+ | Partitioning keyparts $ subpartitioning keyparts
+ | $
+ | ... ... $
+ | | | $
+ | +---------+ +---------+ $ +-----------+ +-----------+
+ \-| par1=c1 |--| par2=c2 |-----| subpar1=c3|--| subpar2=c5|
+ +---------+ +---------+ $ +-----------+ +-----------+
+ | $ | |
+ | $ | +-----------+
+ | $ | | subpar2=c6|
+ | $ | +-----------+
+ | $ |
+ | $ +-----------+ +-----------+
+ | $ | subpar1=c4|--| subpar2=c8|
+ | $ +-----------+ +-----------+
+ | $
+ | $
+ +---------+ $ +------------+ +------------+
+ | par1=c2 |------------------| subpar1=c10|--| subpar2=c12|
+ +---------+ $ +------------+ +------------+
+ | $
+ ... $
+
+ The up-down connections are connections via SEL_ARG::left and
+ SEL_ARG::right. A horizontal connection to the right is the
+ SEL_ARG::next_key_part connection.
+
+ find_used_partitions() traverses the entire tree via recursion on
+ * SEL_ARG::next_key_part (from left to right on the picture)
+ * SEL_ARG::left|right (up/down on the pic). Left-right recursion is
+ performed for each depth level.
+
+ Recursion descent on SEL_ARG::next_key_part is used to accumulate (in
+ ppar->arg_stack) constraints on partitioning and subpartitioning fields.
+ For the example in the above picture, one of stack states is:
+ in find_used_partitions(key_tree = "subpar2=c5") (***)
+ in find_used_partitions(key_tree = "subpar1=c3")
+ in find_used_partitions(key_tree = "par2=c2") (**)
+ in find_used_partitions(key_tree = "par1=c1")
+ in prune_partitions(...)
+ We apply partitioning limits as soon as possible, e.g. when we reach the
+ depth (**), we find which partition(s) correspond to "par1=c1 AND par2=c2",
+ and save them in ppar->part_iter.
+ When we reach the depth (***), we find which subpartition(s) correspond to
+ "subpar1=c3 AND subpar2=c5", and then mark appropriate subpartitions in
+ appropriate subpartitions as used.
+
+ It is possible that constraints on some partitioning fields are missing.
+ For the above example, consider this stack state:
+ in find_used_partitions(key_tree = "subpar2=c12") (***)
+ in find_used_partitions(key_tree = "subpar1=c10")
+ in find_used_partitions(key_tree = "par1=c2")
+ in prune_partitions(...)
+ Here we don't have constraints for all partitioning fields. Since we've
+ never set the ppar->part_iter to contain used set of partitions, we use
+ its default "all partitions" value. We get subpartition id for
+ "subpar1=c3 AND subpar2=c5", and mark that subpartition as used in every
+ partition.
+
+ The inverse is also possible: we may get constraints on partitioning
+ fields, but not constraints on subpartitioning fields. In that case,
+ calls to find_used_partitions() with depth below (**) will return -1,
+ and we will mark entire partition as used.
+
+ TODO
+ Replace recursion on SEL_ARG::left and SEL_ARG::right with a loop
+
+ RETURN
+ 1 OK, one or more [sub]partitions are marked as used.
+ 0 The passed condition doesn't match any partitions
+ -1 Couldn't infer any partition pruning "intervals" from the passed
+ SEL_ARG* tree (which means that all partitions should be marked as
+ used) Marking partitions as used is the responsibility of the caller.
+*/
+
+static
+int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
+{
+ int res, left_res=0, right_res=0;
+ int partno= (int)key_tree->part;
+ bool pushed= FALSE;
+ bool set_full_part_if_bad_ret= FALSE;
+
+ if (key_tree->left != &null_element)
+ {
+ if (-1 == (left_res= find_used_partitions(ppar,key_tree->left)))
+ return -1;
+ }
+
+ if (key_tree->type == SEL_ARG::KEY_RANGE)
+ {
+ if (partno == 0 && (NULL != ppar->part_info->get_part_iter_for_interval))
+ {
+ /*
+ Partitioning is done by RANGE|INTERVAL(monotonic_expr(fieldX)), and
+ we got "const1 CMP fieldX CMP const2" interval <-- psergey-todo: change
+ */
+ DBUG_EXECUTE("info", dbug_print_segment_range(key_tree,
+ ppar->range_param.
+ key_parts););
+ res= ppar->part_info->
+ get_part_iter_for_interval(ppar->part_info,
+ FALSE,
+ key_tree->min_value,
+ key_tree->max_value,
+ key_tree->min_flag | key_tree->max_flag,
+ &ppar->part_iter);
+ if (!res)
+ goto go_right; /* res==0 --> no satisfying partitions */
+ if (res == -1)
+ {
+ //get a full range iterator
+ init_all_partitions_iterator(ppar->part_info, &ppar->part_iter);
+ }
+ /*
+ Save our intent to mark full partition as used if we will not be able
+ to obtain further limits on subpartitions
+ */
+ set_full_part_if_bad_ret= TRUE;
+ goto process_next_key_part;
+ }
+
+ if (partno == ppar->last_subpart_partno &&
+ (NULL != ppar->part_info->get_subpart_iter_for_interval))
+ {
+ PARTITION_ITERATOR subpart_iter;
+ DBUG_EXECUTE("info", dbug_print_segment_range(key_tree,
+ ppar->range_param.
+ key_parts););
+ res= ppar->part_info->
+ get_subpart_iter_for_interval(ppar->part_info,
+ TRUE,
+ key_tree->min_value,
+ key_tree->max_value,
+ key_tree->min_flag | key_tree->max_flag,
+ &subpart_iter);
+ DBUG_ASSERT(res); /* We can't get "no satisfying subpartitions" */
+ if (res == -1)
+ return -1; /* all subpartitions satisfy */
+
+ uint32 subpart_id;
+ bitmap_clear_all(&ppar->subparts_bitmap);
+ while ((subpart_id= subpart_iter.get_next(&subpart_iter)) !=
+ NOT_A_PARTITION_ID)
+ bitmap_set_bit(&ppar->subparts_bitmap, subpart_id);
+
+ /* Mark each partition as used in each subpartition. */
+ uint32 part_id;
+ while ((part_id= ppar->part_iter.get_next(&ppar->part_iter)) !=
+ NOT_A_PARTITION_ID)
+ {
+ for (uint i= 0; i < ppar->part_info->no_subparts; i++)
+ if (bitmap_is_set(&ppar->subparts_bitmap, i))
+ bitmap_set_bit(&ppar->part_info->used_partitions,
+ part_id * ppar->part_info->no_subparts + i);
+ }
+ goto go_right;
+ }
+
+ if (key_tree->is_singlepoint())
+ {
+ pushed= TRUE;
+ ppar->cur_part_fields+= ppar->is_part_keypart[partno];
+ ppar->cur_subpart_fields+= ppar->is_subpart_keypart[partno];
+ *(ppar->arg_stack_end++) = key_tree;
+
+ if (partno == ppar->last_part_partno &&
+ ppar->cur_part_fields == ppar->part_fields)
+ {
+ /*
+ Ok, we've got "fieldN<=>constN"-type SEL_ARGs for all partitioning
+ fields. Save all constN constants into table record buffer.
+ */
+ store_selargs_to_rec(ppar, ppar->arg_stack, ppar->part_fields);
+ DBUG_EXECUTE("info", dbug_print_singlepoint_range(ppar->arg_stack,
+ ppar->part_fields););
+ uint32 part_id;
+ longlong func_value;
+ /* Find in which partition the {const1, ...,constN} tuple goes */
+ if (ppar->get_top_partition_id_func(ppar->part_info, &part_id,
+ &func_value))
+ {
+ res= 0; /* No satisfying partitions */
+ goto pop_and_go_right;
+ }
+ /* Rembember the limit we got - single partition #part_id */
+ init_single_partition_iterator(part_id, &ppar->part_iter);
+
+ /*
+ If there are no subpartitions/we fail to get any limit for them,
+ then we'll mark full partition as used.
+ */
+ set_full_part_if_bad_ret= TRUE;
+ goto process_next_key_part;
+ }
+
+ if (partno == ppar->last_subpart_partno &&
+ ppar->cur_subpart_fields == ppar->subpart_fields)
+ {
+ /*
+ Ok, we've got "fieldN<=>constN"-type SEL_ARGs for all subpartitioning
+ fields. Save all constN constants into table record buffer.
+ */
+ store_selargs_to_rec(ppar, ppar->arg_stack_end - ppar->subpart_fields,
+ ppar->subpart_fields);
+ DBUG_EXECUTE("info", dbug_print_singlepoint_range(ppar->arg_stack_end-
+ ppar->subpart_fields,
+ ppar->subpart_fields););
+ /* Find the subpartition (it's HASH/KEY so we always have one) */
+ partition_info *part_info= ppar->part_info;
+ uint32 subpart_id= part_info->get_subpartition_id(part_info);
+
+ /* Mark this partition as used in each subpartition. */
+ uint32 part_id;
+ while ((part_id= ppar->part_iter.get_next(&ppar->part_iter)) !=
+ NOT_A_PARTITION_ID)
+ {
+ bitmap_set_bit(&part_info->used_partitions,
+ part_id * part_info->no_subparts + subpart_id);
+ }
+ res= 1; /* Some partitions were marked as used */
+ goto pop_and_go_right;
+ }
+ }
+ else
+ {
+ /*
+ Can't handle condition on current key part. If we're that deep that
+ we're processing subpartititoning's key parts, this means we'll not be
+ able to infer any suitable condition, so bail out.
+ */
+ if (partno >= ppar->last_part_partno)
+ return -1;
+ }
+ }
+
+process_next_key_part:
+ if (key_tree->next_key_part)
+ res= find_used_partitions(ppar, key_tree->next_key_part);
+ else
+ res= -1;
+
+ if (set_full_part_if_bad_ret)
+ {
+ if (res == -1)
+ {
+ /* Got "full range" for subpartitioning fields */
+ uint32 part_id;
+ bool found= FALSE;
+ while ((part_id= ppar->part_iter.get_next(&ppar->part_iter)) !=
+ NOT_A_PARTITION_ID)
+ {
+ ppar->mark_full_partition_used(ppar->part_info, part_id);
+ found= TRUE;
+ }
+ res= test(found);
+ }
+ /*
+ Restore the "used partitions iterator" to the default setting that
+ specifies iteration over all partitions.
+ */
+ init_all_partitions_iterator(ppar->part_info, &ppar->part_iter);
+ }
+
+ if (pushed)
+ {
+pop_and_go_right:
+ /* Pop this key part info off the "stack" */
+ ppar->arg_stack_end--;
+ ppar->cur_part_fields-= ppar->is_part_keypart[partno];
+ ppar->cur_subpart_fields-= ppar->is_subpart_keypart[partno];
+ }
+
+ if (res == -1)
+ return -1;
+go_right:
+ if (key_tree->right != &null_element)
+ {
+ if (-1 == (right_res= find_used_partitions(ppar,key_tree->right)))
+ return -1;
+ }
+ return (left_res || right_res || res);
+}
+
+
+static void mark_all_partitions_as_used(partition_info *part_info)
+{
+ bitmap_set_all(&part_info->used_partitions);
+}
+
+
+/*
+ Check if field types allow to construct partitioning index description
+
+ SYNOPSIS
+ fields_ok_for_partition_index()
+ pfield NULL-terminated array of pointers to fields.
+
+ DESCRIPTION
+ For an array of fields, check if we can use all of the fields to create
+ partitioning index description.
+
+ We can't process GEOMETRY fields - for these fields singlepoint intervals
+ cant be generated, and non-singlepoint are "special" kinds of intervals
+ to which our processing logic can't be applied.
+
+ It is not known if we could process ENUM fields, so they are disabled to be
+ on the safe side.
+
+ RETURN
+ TRUE Yes, fields can be used in partitioning index
+ FALSE Otherwise
+*/
+
+static bool fields_ok_for_partition_index(Field **pfield)
+{
+ if (!pfield)
+ return FALSE;
+ for (; (*pfield); pfield++)
+ {
+ enum_field_types ftype= (*pfield)->real_type();
+ if (ftype == MYSQL_TYPE_ENUM || ftype == MYSQL_TYPE_GEOMETRY)
+ return FALSE;
+ }
+ return TRUE;
+}
+
+
+/*
+ Create partition index description and fill related info in the context
+ struct
+
+ SYNOPSIS
+ create_partition_index_description()
+ prune_par INOUT Partition pruning context
+
+ DESCRIPTION
+ Create partition index description. Partition index description is:
+
+ part_index(used_fields_list(part_expr), used_fields_list(subpart_expr))
+
+ If partitioning/sub-partitioning uses BLOB or Geometry fields, then
+ corresponding fields_list(...) is not included into index description
+ and we don't perform partition pruning for partitions/subpartitions.
+
+ RETURN
+ TRUE Out of memory or can't do partition pruning at all
+ FALSE OK
+*/
+
+static bool create_partition_index_description(PART_PRUNE_PARAM *ppar)
+{
+ RANGE_OPT_PARAM *range_par= &(ppar->range_param);
+ partition_info *part_info= ppar->part_info;
+ uint used_part_fields, used_subpart_fields;
+
+ used_part_fields= fields_ok_for_partition_index(part_info->part_field_array) ?
+ part_info->no_part_fields : 0;
+ used_subpart_fields=
+ fields_ok_for_partition_index(part_info->subpart_field_array)?
+ part_info->no_subpart_fields : 0;
+
+ uint total_parts= used_part_fields + used_subpart_fields;
+
+ ppar->part_fields= used_part_fields;
+ ppar->last_part_partno= (int)used_part_fields - 1;
+
+ ppar->subpart_fields= used_subpart_fields;
+ ppar->last_subpart_partno=
+ used_subpart_fields?(int)(used_part_fields + used_subpart_fields - 1): -1;
+
+ if (part_info->is_sub_partitioned())
+ {
+ ppar->mark_full_partition_used= mark_full_partition_used_with_parts;
+ ppar->get_top_partition_id_func= part_info->get_part_partition_id;
+ }
+ else
+ {
+ ppar->mark_full_partition_used= mark_full_partition_used_no_parts;
+ ppar->get_top_partition_id_func= part_info->get_partition_id;
+ }
+
+ KEY_PART *key_part;
+ MEM_ROOT *alloc= range_par->mem_root;
+ if (!total_parts ||
+ !(key_part= (KEY_PART*)alloc_root(alloc, sizeof(KEY_PART)*
+ total_parts)) ||
+ !(ppar->arg_stack= (SEL_ARG**)alloc_root(alloc, sizeof(SEL_ARG*)*
+ total_parts)) ||
+ !(ppar->is_part_keypart= (my_bool*)alloc_root(alloc, sizeof(my_bool)*
+ total_parts)) ||
+ !(ppar->is_subpart_keypart= (my_bool*)alloc_root(alloc, sizeof(my_bool)*
+ total_parts)))
+ return TRUE;
+
+ if (ppar->subpart_fields)
+ {
+ my_bitmap_map *buf;
+ uint32 bufsize= bitmap_buffer_size(ppar->part_info->no_subparts);
+ if (!(buf= (my_bitmap_map*) alloc_root(alloc, bufsize)))
+ return TRUE;
+ bitmap_init(&ppar->subparts_bitmap, buf, ppar->part_info->no_subparts,
+ FALSE);
+ }
+ range_par->key_parts= key_part;
+ Field **field= (ppar->part_fields)? part_info->part_field_array :
+ part_info->subpart_field_array;
+ bool in_subpart_fields= FALSE;
+ for (uint part= 0; part < total_parts; part++, key_part++)
+ {
+ key_part->key= 0;
+ key_part->part= part;
+ key_part->length= (uint16) (*field)->pack_length_in_rec();
+ /*
+ psergey-todo: check yet again if this is correct for tricky field types,
+ e.g. see "Fix a fatal error in decimal key handling" in open_binary_frm()
+ */
+ key_part->store_length= (uint16) (*field)->pack_length();
+ if ((*field)->real_maybe_null())
+ key_part->store_length+= HA_KEY_NULL_LENGTH;
+ if ((*field)->type() == MYSQL_TYPE_BLOB ||
+ (*field)->real_type() == MYSQL_TYPE_VARCHAR)
+ key_part->store_length+= HA_KEY_BLOB_LENGTH;
+
+ key_part->field= (*field);
+ key_part->image_type = Field::itRAW;
+ /*
+ We set keypart flag to 0 here as the only HA_PART_KEY_SEG is checked
+ in the RangeAnalysisModule.
+ */
+ key_part->flag= 0;
+ /* We don't set key_parts->null_bit as it will not be used */
+
+ ppar->is_part_keypart[part]= !in_subpart_fields;
+ ppar->is_subpart_keypart[part]= in_subpart_fields;
+
+ /*
+ Check if this was last field in this array, in this case we
+ switch to subpartitioning fields. (This will only happens if
+ there are subpartitioning fields to cater for).
+ */
+ if (!*(++field))
+ {
+ field= part_info->subpart_field_array;
+ in_subpart_fields= TRUE;
+ }
+ }
+ range_par->key_parts_end= key_part;
+
+ DBUG_EXECUTE("info", print_partitioning_index(range_par->key_parts,
+ range_par->key_parts_end););
+ return FALSE;
+}
+
+
+#ifndef DBUG_OFF
+
+static void print_partitioning_index(KEY_PART *parts, KEY_PART *parts_end)
+{
+ DBUG_ENTER("print_partitioning_index");
+ DBUG_LOCK_FILE;
+ fprintf(DBUG_FILE, "partitioning INDEX(");
+ for (KEY_PART *p=parts; p != parts_end; p++)
+ {
+ fprintf(DBUG_FILE, "%s%s", p==parts?"":" ,", p->field->field_name);
+ }
+ fputs(");\n", DBUG_FILE);
+ DBUG_UNLOCK_FILE;
+ DBUG_VOID_RETURN;
+}
+
+/* Print field value into debug trace, in NULL-aware way. */
+static void dbug_print_field(Field *field)
+{
+ if (field->is_real_null())
+ fprintf(DBUG_FILE, "NULL");
+ else
+ {
+ char buf[256];
+ String str(buf, sizeof(buf), &my_charset_bin);
+ str.length(0);
+ String *pstr;
+ pstr= field->val_str(&str);
+ fprintf(DBUG_FILE, "'%s'", pstr->c_ptr_safe());
+ }
+}
+
+
+/* Print a "c1 < keypartX < c2" - type interval into debug trace. */
+static void dbug_print_segment_range(SEL_ARG *arg, KEY_PART *part)
+{
+ DBUG_ENTER("dbug_print_segment_range");
+ DBUG_LOCK_FILE;
+ if (!(arg->min_flag & NO_MIN_RANGE))
+ {
+ store_key_image_to_rec(part->field, (char*)(arg->min_value), part->length);
+ dbug_print_field(part->field);
+ if (arg->min_flag & NEAR_MIN)
+ fputs(" < ", DBUG_FILE);
+ else
+ fputs(" <= ", DBUG_FILE);
+ }
+
+ fprintf(DBUG_FILE, "%s", part->field->field_name);
+
+ if (!(arg->max_flag & NO_MAX_RANGE))
+ {
+ if (arg->max_flag & NEAR_MAX)
+ fputs(" < ", DBUG_FILE);
+ else
+ fputs(" <= ", DBUG_FILE);
+ store_key_image_to_rec(part->field, (char*)(arg->max_value), part->length);
+ dbug_print_field(part->field);
+ }
+ fputs("\n", DBUG_FILE);
+ DBUG_UNLOCK_FILE;
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Print a singlepoint multi-keypart range interval to debug trace
+
+ SYNOPSIS
+ dbug_print_singlepoint_range()
+ start Array of SEL_ARG* ptrs representing conditions on key parts
+ num Number of elements in the array.
+
+ DESCRIPTION
+ This function prints a "keypartN=constN AND ... AND keypartK=constK"-type
+ interval to debug trace.
+*/
+
+static void dbug_print_singlepoint_range(SEL_ARG **start, uint num)
+{
+ DBUG_ENTER("dbug_print_singlepoint_range");
+ DBUG_LOCK_FILE;
+ SEL_ARG **end= start + num;
+
+ for (SEL_ARG **arg= start; arg != end; arg++)
+ {
+ Field *field= (*arg)->field;
+ fprintf(DBUG_FILE, "%s%s=", (arg==start)?"":", ", field->field_name);
+ dbug_print_field(field);
+ }
+ fputs("\n", DBUG_FILE);
+ DBUG_UNLOCK_FILE;
+ DBUG_VOID_RETURN;
+}
+#endif
+
+/****************************************************************************
+ * Partition pruning code ends
+ ****************************************************************************/
+#endif
+
/*
Get cost of 'sweep' full records retrieval.
@@ -2107,7 +3394,8 @@ double get_sweep_read_cost(const PARAM *param, ha_rows records)
else
{
double n_blocks=
- ceil(ulonglong2double(param->table->file->data_file_length) / IO_SIZE);
+ ceil(ulonglong2double(param->table->file->stats.data_file_length) /
+ IO_SIZE);
double busy_blocks=
n_blocks * (1.0 - pow(1.0 - 1.0/n_blocks, rows2double(records)));
if (busy_blocks < 1.0)
@@ -2247,7 +3535,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
{
DBUG_EXECUTE("info", print_sel_tree(param, *ptree, &(*ptree)->keys_map,
"tree in SEL_IMERGE"););
- if (!(*cur_child= get_key_scans_params(param, *ptree, TRUE, read_time)))
+ if (!(*cur_child= get_key_scans_params(param, *ptree, TRUE, FALSE, read_time)))
{
/*
One of index scans in this index_merge is more expensive than entire
@@ -2276,7 +3564,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
DBUG_PRINT("info", ("index_merge scans cost %g", imerge_cost));
if (imerge_too_expensive || (imerge_cost > read_time) ||
- (non_cpk_scan_records+cpk_scan_records >= param->table->file->records) &&
+ (non_cpk_scan_records+cpk_scan_records >= param->table->file->stats.records) &&
read_time != DBL_MAX)
{
/*
@@ -2334,7 +3622,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
imerge_trp->read_cost= imerge_cost;
imerge_trp->records= non_cpk_scan_records + cpk_scan_records;
imerge_trp->records= min(imerge_trp->records,
- param->table->file->records);
+ param->table->file->stats.records);
imerge_trp->range_scans= range_scans;
imerge_trp->range_scans_end= range_scans + n_child_scans;
read_time= imerge_cost;
@@ -2395,7 +3683,7 @@ skip_to_ror_scan:
((TRP_ROR_INTERSECT*)(*cur_roru_plan))->index_scan_costs;
roru_total_records += (*cur_roru_plan)->records;
roru_intersect_part *= (*cur_roru_plan)->records /
- param->table->file->records;
+ param->table->file->stats.records;
}
/*
@@ -2405,7 +3693,7 @@ skip_to_ror_scan:
in disjunction do not share key parts.
*/
roru_total_records -= (ha_rows)(roru_intersect_part*
- param->table->file->records);
+ param->table->file->stats.records);
/* ok, got a ROR read plan for each of the disjuncts
Calculate cost:
cost(index_union_scan(scan_1, ... scan_n)) =
@@ -2466,7 +3754,7 @@ static double get_index_only_read_time(const PARAM* param, ha_rows records,
int keynr)
{
double read_time;
- uint keys_per_block= (param->table->file->block_size/2/
+ uint keys_per_block= (param->table->file->stats.block_size/2/
(param->table->key_info[keynr].key_length+
param->table->file->ref_length) + 1);
read_time=((double) (records+keys_per_block-1)/
@@ -2518,7 +3806,7 @@ static
ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg)
{
ROR_SCAN_INFO *ror_scan;
- uchar *bitmap_buf;
+ my_bitmap_map *bitmap_buf;
uint keynr;
DBUG_ENTER("make_ror_scan");
@@ -2533,12 +3821,12 @@ ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg)
ror_scan->sel_arg= sel_arg;
ror_scan->records= param->table->quick_rows[keynr];
- if (!(bitmap_buf= (uchar*)alloc_root(param->mem_root,
- param->fields_bitmap_size)))
+ if (!(bitmap_buf= (my_bitmap_map*) alloc_root(param->mem_root,
+ param->fields_bitmap_size)))
DBUG_RETURN(NULL);
if (bitmap_init(&ror_scan->covered_fields, bitmap_buf,
- param->fields_bitmap_size*8, FALSE))
+ param->table->s->fields, FALSE))
DBUG_RETURN(NULL);
bitmap_clear_all(&ror_scan->covered_fields);
@@ -2547,8 +3835,8 @@ ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg)
param->table->key_info[keynr].key_parts;
for (;key_part != key_part_end; ++key_part)
{
- if (bitmap_is_set(&param->needed_fields, key_part->fieldnr))
- bitmap_set_bit(&ror_scan->covered_fields, key_part->fieldnr);
+ if (bitmap_is_set(&param->needed_fields, key_part->fieldnr-1))
+ bitmap_set_bit(&ror_scan->covered_fields, key_part->fieldnr-1);
}
ror_scan->index_read_cost=
get_index_only_read_time(param, param->table->quick_rows[ror_scan->keynr],
@@ -2648,20 +3936,21 @@ static
ROR_INTERSECT_INFO* ror_intersect_init(const PARAM *param)
{
ROR_INTERSECT_INFO *info;
- uchar* buf;
+ my_bitmap_map* buf;
if (!(info= (ROR_INTERSECT_INFO*)alloc_root(param->mem_root,
sizeof(ROR_INTERSECT_INFO))))
return NULL;
info->param= param;
- if (!(buf= (uchar*)alloc_root(param->mem_root, param->fields_bitmap_size)))
+ if (!(buf= (my_bitmap_map*) alloc_root(param->mem_root,
+ param->fields_bitmap_size)))
return NULL;
- if (bitmap_init(&info->covered_fields, buf, param->fields_bitmap_size*8,
+ if (bitmap_init(&info->covered_fields, buf, param->table->s->fields,
FALSE))
return NULL;
info->is_covering= FALSE;
info->index_scan_costs= 0.0;
info->index_records= 0;
- info->out_rows= param->table->file->records;
+ info->out_rows= param->table->file->stats.records;
bitmap_clear_all(&info->covered_fields);
return info;
}
@@ -2670,7 +3959,7 @@ void ror_intersect_cpy(ROR_INTERSECT_INFO *dst, const ROR_INTERSECT_INFO *src)
{
dst->param= src->param;
memcpy(dst->covered_fields.bitmap, src->covered_fields.bitmap,
- src->covered_fields.bitmap_size);
+ no_bytes_in_map(&src->covered_fields));
dst->out_rows= src->out_rows;
dst->is_covering= src->is_covering;
dst->index_records= src->index_records;
@@ -2780,14 +4069,14 @@ static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info,
SEL_ARG *sel_arg, *tuple_arg= NULL;
bool cur_covered;
bool prev_covered= test(bitmap_is_set(&info->covered_fields,
- key_part->fieldnr));
+ key_part->fieldnr-1));
key_range min_range;
key_range max_range;
min_range.key= (byte*) key_val;
min_range.flag= HA_READ_KEY_EXACT;
max_range.key= (byte*) key_val;
max_range.flag= HA_READ_AFTER_KEY;
- ha_rows prev_records= info->param->table->file->records;
+ ha_rows prev_records= info->param->table->file->stats.records;
DBUG_ENTER("ror_intersect_selectivity");
for (sel_arg= scan->sel_arg; sel_arg;
@@ -2795,7 +4084,7 @@ static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info,
{
DBUG_PRINT("info",("sel_arg step"));
cur_covered= test(bitmap_is_set(&info->covered_fields,
- key_part[sel_arg->part].fieldnr));
+ key_part[sel_arg->part].fieldnr-1));
if (cur_covered != prev_covered)
{
/* create (part1val, ..., part{n-1}val) tuple. */
@@ -2924,15 +4213,15 @@ static bool ror_intersect_add(ROR_INTERSECT_INFO *info,
}
info->total_cost= info->index_scan_costs;
- DBUG_PRINT("info", ("info->total_cost= %g", info->total_cost));
+ DBUG_PRINT("info", ("info->total_cost: %g", info->total_cost));
if (!info->is_covering)
{
info->total_cost +=
get_sweep_read_cost(info->param, double2rows(info->out_rows));
DBUG_PRINT("info", ("info->total_cost= %g", info->total_cost));
}
- DBUG_PRINT("info", ("New out_rows= %g", info->out_rows));
- DBUG_PRINT("info", ("New cost= %g, %scovering", info->total_cost,
+ DBUG_PRINT("info", ("New out_rows: %g", info->out_rows));
+ DBUG_PRINT("info", ("New cost: %g, %scovering", info->total_cost,
info->is_covering?"" : "non-"));
DBUG_RETURN(TRUE);
}
@@ -3011,7 +4300,7 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
double min_cost= DBL_MAX;
DBUG_ENTER("get_best_ror_intersect");
- if ((tree->n_ror_scans < 2) || !param->table->file->records)
+ if ((tree->n_ror_scans < 2) || !param->table->file->stats.records)
DBUG_RETURN(NULL);
/*
@@ -3147,6 +4436,7 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
ha_rows best_rows = double2rows(intersect_best->out_rows);
if (!best_rows)
best_rows= 1;
+ set_if_smaller(param->table->quick_condition_rows, best_rows);
trp->records= best_rows;
trp->index_scan_costs= intersect_best->index_scan_costs;
trp->cpk_scan= cpk_scan_used? cpk_scan: NULL;
@@ -3180,7 +4470,8 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
F=set of all fields to cover
S={}
- do {
+ do
+ {
Order I by (#covered fields in F desc,
#components asc,
number of first not covered component asc);
@@ -3198,7 +4489,6 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
ROR_SCAN_INFO **ror_scan_mark;
ROR_SCAN_INFO **ror_scans_end= tree->ror_scans_end;
DBUG_ENTER("get_best_covering_ror_intersect");
- uint nbits= param->fields_bitmap_size*8;
for (ROR_SCAN_INFO **scan= tree->ror_scans; scan != ror_scans_end; ++scan)
(*scan)->key_components=
@@ -3214,10 +4504,11 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
MY_BITMAP *covered_fields= &param->tmp_covered_fields;
if (!covered_fields->bitmap)
- covered_fields->bitmap= (uchar*)alloc_root(param->mem_root,
+ covered_fields->bitmap= (my_bitmap_map*)alloc_root(param->mem_root,
param->fields_bitmap_size);
if (!covered_fields->bitmap ||
- bitmap_init(covered_fields, covered_fields->bitmap, nbits, FALSE))
+ bitmap_init(covered_fields, covered_fields->bitmap,
+ param->table->s->fields, FALSE))
DBUG_RETURN(0);
bitmap_clear_all(covered_fields);
@@ -3229,7 +4520,8 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
DBUG_EXECUTE("info", print_ror_scans_arr(param->table,
"building covering ROR-I",
ror_scan_mark, ror_scans_end););
- do {
+ do
+ {
/*
Update changed sorting info:
#covered fields,
@@ -3299,6 +4591,7 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
trp->read_cost= total_cost;
trp->records= records;
trp->cpk_scan= NULL;
+ set_if_smaller(param->table->quick_condition_rows, records);
DBUG_PRINT("info",
("Returning covering ROR-intersect plan: cost %g, records %lu",
@@ -3323,7 +4616,8 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
*/
static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
- bool index_read_must_be_used,
+ bool index_read_must_be_used,
+ bool update_tbl_stats,
double read_time)
{
int idx;
@@ -3358,7 +4652,7 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
bool read_index_only= index_read_must_be_used ? TRUE :
(bool) param->table->used_keys.is_set(keynr);
- found_records= check_quick_select(param, idx, *key);
+ found_records= check_quick_select(param, idx, *key, update_tbl_stats);
if (param->is_ror_scan)
{
tree->n_ror_scans++;
@@ -3465,7 +4759,8 @@ QUICK_SELECT_I *TRP_ROR_INTERSECT::make_quick(PARAM *param,
if ((quick_intrsect=
new QUICK_ROR_INTERSECT_SELECT(param->thd, param->table,
- retrieve_full_rows? (!is_covering):FALSE,
+ (retrieve_full_rows? (!is_covering) :
+ FALSE),
parent_alloc)))
{
DBUG_EXECUTE("info", print_ror_scans_arr(param->table,
@@ -3544,7 +4839,7 @@ QUICK_SELECT_I *TRP_ROR_UNION::make_quick(PARAM *param,
0 on error
*/
-static SEL_TREE *get_ne_mm_tree(PARAM *param, Item_func *cond_func,
+static SEL_TREE *get_ne_mm_tree(RANGE_OPT_PARAM *param, Item_func *cond_func,
Field *field,
Item *lt_value, Item *gt_value,
Item_result cmp_type)
@@ -3579,7 +4874,7 @@ static SEL_TREE *get_ne_mm_tree(PARAM *param, Item_func *cond_func,
Pointer to the tree built tree
*/
-static SEL_TREE *get_func_mm_tree(PARAM *param, Item_func *cond_func,
+static SEL_TREE *get_func_mm_tree(RANGE_OPT_PARAM *param, Item_func *cond_func,
Field *field, Item *value,
Item_result cmp_type, bool inv)
{
@@ -3628,9 +4923,17 @@ static SEL_TREE *get_func_mm_tree(PARAM *param, Item_func *cond_func,
{
Item_func_in *func=(Item_func_in*) cond_func;
+ /*
+ Array for IN() is constructed when all values have the same result
+ type. Tree won't be built for values with different result types,
+ so we check it here to avoid unnecessary work.
+ */
+ if (!func->array)
+ break;
+
if (inv)
{
- if (func->array && func->cmp_type != ROW_RESULT)
+ if (func->array->result_type() != ROW_RESULT)
{
/*
We get here for conditions in form "t.key NOT IN (c1, c2, ...)",
@@ -3864,7 +5167,8 @@ static SEL_TREE *get_func_mm_tree(PARAM *param, Item_func *cond_func,
Pointer to the tree representing the built conjunction of SEL_TREEs
*/
-static SEL_TREE *get_full_func_mm_tree(PARAM *param, Item_func *cond_func,
+static SEL_TREE *get_full_func_mm_tree(RANGE_OPT_PARAM *param,
+ Item_func *cond_func,
Item_field *field_item, Item *value,
bool inv)
{
@@ -3907,7 +5211,7 @@ static SEL_TREE *get_full_func_mm_tree(PARAM *param, Item_func *cond_func,
/* make a select tree of all keys in condition */
-static SEL_TREE *get_mm_tree(PARAM *param,COND *cond)
+static SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param,COND *cond)
{
SEL_TREE *tree=0;
SEL_TREE *ftree= 0;
@@ -4078,7 +5382,7 @@ static SEL_TREE *get_mm_tree(PARAM *param,COND *cond)
static SEL_TREE *
-get_mm_parts(PARAM *param, COND *cond_func, Field *field,
+get_mm_parts(RANGE_OPT_PARAM *param, COND *cond_func, Field *field,
Item_func::Functype type,
Item *value, Item_result cmp_type)
{
@@ -4122,14 +5426,14 @@ get_mm_parts(PARAM *param, COND *cond_func, Field *field,
tree->keys_map.set_bit(key_part->key);
}
}
-
+
DBUG_RETURN(tree);
}
static SEL_ARG *
-get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part,
- Item_func::Functype type,Item *value)
+get_mm_leaf(RANGE_OPT_PARAM *param, COND *conf_func, Field *field,
+ KEY_PART *key_part, Item_func::Functype type,Item *value)
{
uint maybe_null=(uint) field->real_maybe_null();
bool optimize_range;
@@ -4188,8 +5492,11 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part,
!(conf_func->compare_collation()->state & MY_CS_BINSORT))
goto end;
- optimize_range= field->optimize_range(param->real_keynr[key_part->key],
- key_part->part);
+ if (param->using_real_indexes)
+ optimize_range= field->optimize_range(param->real_keynr[key_part->key],
+ key_part->part);
+ else
+ optimize_range= TRUE;
if (type == Item_func::LIKE_FUNC)
{
@@ -4286,8 +5593,8 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part,
/* For comparison purposes allow invalid dates like 2000-01-32 */
orig_sql_mode= field->table->in_use->variables.sql_mode;
if (value->real_item()->type() == Item::STRING_ITEM &&
- (field->type() == FIELD_TYPE_DATE ||
- field->type() == FIELD_TYPE_DATETIME))
+ (field->type() == MYSQL_TYPE_DATE ||
+ field->type() == MYSQL_TYPE_DATETIME))
field->table->in_use->variables.sql_mode|= MODE_INVALID_DATES;
err= value->save_in_field_no_warnings(field, 1);
if (err > 0 && field->cmp_type() != value->result_type())
@@ -4464,7 +5771,7 @@ sel_add(SEL_ARG *key1,SEL_ARG *key2)
static SEL_TREE *
-tree_and(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2)
+tree_and(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2)
{
DBUG_ENTER("tree_and");
if (!tree1)
@@ -4534,7 +5841,8 @@ tree_and(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2)
using index_merge.
*/
-bool sel_trees_can_be_ored(SEL_TREE *tree1, SEL_TREE *tree2, PARAM* param)
+bool sel_trees_can_be_ored(SEL_TREE *tree1, SEL_TREE *tree2,
+ RANGE_OPT_PARAM* param)
{
key_map common_keys= tree1->keys_map;
DBUG_ENTER("sel_trees_can_be_ored");
@@ -4560,8 +5868,84 @@ bool sel_trees_can_be_ored(SEL_TREE *tree1, SEL_TREE *tree2, PARAM* param)
DBUG_RETURN(FALSE);
}
+
+/*
+ Remove the trees that are not suitable for record retrieval.
+ SYNOPSIS
+ param Range analysis parameter
+ tree Tree to be processed, tree->type is KEY or KEY_SMALLER
+
+ DESCRIPTION
+ This function walks through tree->keys[] and removes the SEL_ARG* trees
+ that are not "maybe" trees (*) and cannot be used to construct quick range
+ selects.
+ (*) - have type MAYBE or MAYBE_KEY. Perhaps we should remove trees of
+ these types here as well.
+
+ A SEL_ARG* tree cannot be used to construct quick select if it has
+ tree->part != 0. (e.g. it could represent "keypart2 < const").
+
+ WHY THIS FUNCTION IS NEEDED
+
+ Normally we allow construction of SEL_TREE objects that have SEL_ARG
+ trees that do not allow quick range select construction. For example for
+ " keypart1=1 AND keypart2=2 " the execution will proceed as follows:
+ tree1= SEL_TREE { SEL_ARG{keypart1=1} }
+ tree2= SEL_TREE { SEL_ARG{keypart2=2} } -- can't make quick range select
+ from this
+ call tree_and(tree1, tree2) -- this joins SEL_ARGs into a usable SEL_ARG
+ tree.
+
+ There is an exception though: when we construct index_merge SEL_TREE,
+ any SEL_ARG* tree that cannot be used to construct quick range select can
+ be removed, because current range analysis code doesn't provide any way
+ that tree could be later combined with another tree.
+ Consider an example: we should not construct
+ st1 = SEL_TREE {
+ merges = SEL_IMERGE {
+ SEL_TREE(t.key1part1 = 1),
+ SEL_TREE(t.key2part2 = 2) -- (*)
+ }
+ };
+ because
+ - (*) cannot be used to construct quick range select,
+ - There is no execution path that would cause (*) to be converted to
+ a tree that could be used.
+
+ The latter is easy to verify: first, notice that the only way to convert
+ (*) into a usable tree is to call tree_and(something, (*)).
+
+ Second look at what tree_and/tree_or function would do when passed a
+ SEL_TREE that has the structure like st1 tree has, and conlcude that
+ tree_and(something, (*)) will not be called.
+
+ RETURN
+ 0 Ok, some suitable trees left
+ 1 No tree->keys[] left.
+*/
+
+static bool remove_nonrange_trees(RANGE_OPT_PARAM *param, SEL_TREE *tree)
+{
+ bool res= FALSE;
+ for (uint i=0; i < param->keys; i++)
+ {
+ if (tree->keys[i])
+ {
+ if (tree->keys[i]->part)
+ {
+ tree->keys[i]= NULL;
+ tree->keys_map.clear_bit(i);
+ }
+ else
+ res= TRUE;
+ }
+ }
+ return !res;
+}
+
+
static SEL_TREE *
-tree_or(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2)
+tree_or(RANGE_OPT_PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2)
{
DBUG_ENTER("tree_or");
if (!tree1 || !tree2)
@@ -4603,6 +5987,13 @@ tree_or(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2)
/* ok, two trees have KEY type but cannot be used without index merge */
if (tree1->merges.is_empty() && tree2->merges.is_empty())
{
+ if (param->remove_jump_scans)
+ {
+ bool no_trees= remove_nonrange_trees(param, tree1);
+ no_trees= no_trees || remove_nonrange_trees(param, tree2);
+ if (no_trees)
+ DBUG_RETURN(new SEL_TREE(SEL_TREE::ALWAYS));
+ }
SEL_IMERGE *merge;
/* both trees are "range" trees, produce new index merge structure */
if (!(result= new SEL_TREE()) || !(merge= new SEL_IMERGE()) ||
@@ -4625,7 +6016,9 @@ tree_or(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2)
/* one tree is index merge tree and another is range tree */
if (tree1->merges.is_empty())
swap_variables(SEL_TREE*, tree1, tree2);
-
+
+ if (param->remove_jump_scans && remove_nonrange_trees(param, tree2))
+ DBUG_RETURN(new SEL_TREE(SEL_TREE::ALWAYS));
/* add tree2 to tree1->merges, checking if it collapses to ALWAYS */
if (imerge_list_or_tree(param, &tree1->merges, tree2))
result= new SEL_TREE(SEL_TREE::ALWAYS);
@@ -5119,7 +6512,8 @@ SEL_ARG *
SEL_ARG::insert(SEL_ARG *key)
{
SEL_ARG *element,**par,*last_element;
- LINT_INIT(par); LINT_INIT(last_element);
+ LINT_INIT(par);
+ LINT_INIT(last_element);
for (element= this; element != &null_element ; )
{
@@ -5594,9 +6988,12 @@ void SEL_ARG::test_use_count(SEL_ARG *root)
SYNOPSIS
check_quick_select
param Parameter from test_quick_select
- idx Number of index to use in PARAM::key SEL_TREE::key
- tree Transformed selection condition, tree->key[idx] holds intervals
- tree to be used for scanning.
+ idx Number of index to use in tree->keys
+ tree Transformed selection condition, tree->keys[idx]
+ holds the range tree to be used for scanning.
+ update_tbl_stats If true, update table->quick_keys with information
+ about range scan we've evaluated.
+
NOTES
param->is_ror_scan is set to reflect if the key scan is a ROR (see
is_key_scan_ror function for more info)
@@ -5610,7 +7007,7 @@ void SEL_ARG::test_use_count(SEL_ARG *root)
*/
static ha_rows
-check_quick_select(PARAM *param,uint idx,SEL_ARG *tree)
+check_quick_select(PARAM *param,uint idx,SEL_ARG *tree, bool update_tbl_stats)
{
ha_rows records;
bool cpk_scan;
@@ -5651,10 +7048,19 @@ check_quick_select(PARAM *param,uint idx,SEL_ARG *tree)
records=check_quick_keys(param,idx,tree,param->min_key,0,param->max_key,0);
if (records != HA_POS_ERROR)
{
- param->table->quick_keys.set_bit(key);
+ if (update_tbl_stats)
+ {
+ param->table->quick_keys.set_bit(key);
+ param->table->quick_key_parts[key]=param->max_key_part+1;
+ param->table->quick_n_ranges[key]= param->n_ranges;
+ param->table->quick_condition_rows=
+ min(param->table->quick_condition_rows, records);
+ }
+ /*
+ Need to save quick_rows in any case as it is used when calculating
+ cost of ROR intersection:
+ */
param->table->quick_rows[key]=records;
- param->table->quick_key_parts[key]=param->max_key_part+1;
- param->table->quick_n_ranges[key]= param->n_ranges;
if (cpk_scan)
param->is_ror_scan= TRUE;
}
@@ -6141,36 +7547,36 @@ static bool null_part_in_key(KEY_PART *key_part, const char *key, uint length)
}
-bool QUICK_SELECT_I::is_keys_used(List<Item> *fields)
+bool QUICK_SELECT_I::is_keys_used(const MY_BITMAP *fields)
{
- return is_key_used(head, index, *fields);
+ return is_key_used(head, index, fields);
}
-bool QUICK_INDEX_MERGE_SELECT::is_keys_used(List<Item> *fields)
+bool QUICK_INDEX_MERGE_SELECT::is_keys_used(const MY_BITMAP *fields)
{
QUICK_RANGE_SELECT *quick;
List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects);
while ((quick= it++))
{
- if (is_key_used(head, quick->index, *fields))
+ if (is_key_used(head, quick->index, fields))
return 1;
}
return 0;
}
-bool QUICK_ROR_INTERSECT_SELECT::is_keys_used(List<Item> *fields)
+bool QUICK_ROR_INTERSECT_SELECT::is_keys_used(const MY_BITMAP *fields)
{
QUICK_RANGE_SELECT *quick;
List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects);
while ((quick= it++))
{
- if (is_key_used(head, quick->index, *fields))
+ if (is_key_used(head, quick->index, fields))
return 1;
}
return 0;
}
-bool QUICK_ROR_UNION_SELECT::is_keys_used(List<Item> *fields)
+bool QUICK_ROR_UNION_SELECT::is_keys_used(const MY_BITMAP *fields)
{
QUICK_SELECT_I *quick;
List_iterator_fast<QUICK_SELECT_I> it(quick_selects);
@@ -6229,7 +7635,7 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
goto err;
quick->records= records;
- if (cp_buffer_from_ref(thd,ref) && thd->is_fatal_error ||
+ if (cp_buffer_from_ref(thd, table, ref) && thd->is_fatal_error ||
!(range= new(alloc) QUICK_RANGE()))
goto err; // out of memory
@@ -6293,10 +7699,9 @@ err:
rowids into Unique, get the sorted sequence and destroy the Unique.
If table has a clustered primary key that covers all rows (TRUE for bdb
- and innodb currently) and one of the index_merge scans is a scan on PK,
- then
- rows that will be retrieved by PK scan are not put into Unique and
- primary key scan is not performed here, it is performed later separately.
+ and innodb currently) and one of the index_merge scans is a scan on PK,
+ then rows that will be retrieved by PK scan are not put into Unique and
+ primary key scan is not performed here, it is performed later separately.
RETURN
0 OK
@@ -6309,21 +7714,11 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
QUICK_RANGE_SELECT* cur_quick;
int result;
Unique *unique;
- DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::prepare_unique");
+ handler *file= head->file;
+ DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::read_keys_and_merge");
- /* We're going to just read rowids. */
- if (head->file->extra(HA_EXTRA_KEYREAD))
- DBUG_RETURN(1);
-
- /*
- Make innodb retrieve all PK member fields, so
- * ha_innobase::position (which uses them) call works.
- * We can filter out rows that will be retrieved by clustered PK.
- (This also creates a deficiency - it is possible that we will retrieve
- parts of key that are not used by current query at all.)
- */
- if (head->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY))
- DBUG_RETURN(1);
+ file->extra(HA_EXTRA_KEYREAD);
+ head->prepare_for_position();
cur_quick_it.rewind();
cur_quick= cur_quick_it++;
@@ -6336,8 +7731,8 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
if (cur_quick->init() || cur_quick->reset())
DBUG_RETURN(1);
- unique= new Unique(refpos_order_cmp, (void *)head->file,
- head->file->ref_length,
+ unique= new Unique(refpos_order_cmp, (void *)file,
+ file->ref_length,
thd->variables.sortbuff_size);
if (!unique)
DBUG_RETURN(1);
@@ -6380,15 +7775,15 @@ int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge()
}
+ DBUG_PRINT("info", ("ok"));
/* ok, all row ids are in Unique */
result= unique->get(head);
delete unique;
doing_pk_scan= FALSE;
+ /* index_merge currently doesn't support "using index" at all */
+ file->extra(HA_EXTRA_NO_KEYREAD);
/* start table scan */
init_read_record(&read_record, thd, head, (SQL_SELECT*) 0, 1, 1);
- /* index_merge currently doesn't support "using index" at all */
- head->file->extra(HA_EXTRA_NO_KEYREAD);
-
DBUG_RETURN(result);
}
@@ -6410,9 +7805,7 @@ int QUICK_INDEX_MERGE_SELECT::get_next()
if (doing_pk_scan)
DBUG_RETURN(pk_quick_select->get_next());
- result= read_record.read_record(&read_record);
-
- if (result == -1)
+ if ((result= read_record.read_record(&read_record)) == -1)
{
result= HA_ERR_END_OF_FILE;
end_read_record(&read_record);
@@ -6420,7 +7813,8 @@ int QUICK_INDEX_MERGE_SELECT::get_next()
if (pk_quick_select)
{
doing_pk_scan= TRUE;
- if ((result= pk_quick_select->init()) || (result= pk_quick_select->reset()))
+ if ((result= pk_quick_select->init()) ||
+ (result= pk_quick_select->reset()))
DBUG_RETURN(result);
DBUG_RETURN(pk_quick_select->get_next());
}
@@ -6458,64 +7852,65 @@ int QUICK_ROR_INTERSECT_SELECT::get_next()
uint last_rowid_count=0;
DBUG_ENTER("QUICK_ROR_INTERSECT_SELECT::get_next");
- /* Get a rowid for first quick and save it as a 'candidate' */
- quick= quick_it++;
- if (cpk_quick)
+ do
{
- do {
- error= quick->get_next();
- }while (!error && !cpk_quick->row_in_ranges());
- }
- else
+ /* Get a rowid for first quick and save it as a 'candidate' */
+ quick= quick_it++;
error= quick->get_next();
-
- if (error)
- DBUG_RETURN(error);
-
- quick->file->position(quick->record);
- memcpy(last_rowid, quick->file->ref, head->file->ref_length);
- last_rowid_count= 1;
-
- while (last_rowid_count < quick_selects.elements)
- {
- if (!(quick= quick_it++))
+ if (cpk_quick)
{
- quick_it.rewind();
- quick= quick_it++;
+ while (!error && !cpk_quick->row_in_ranges())
+ error= quick->get_next();
}
+ if (error)
+ DBUG_RETURN(error);
- do {
- if ((error= quick->get_next()))
- DBUG_RETURN(error);
- quick->file->position(quick->record);
- cmp= head->file->cmp_ref(quick->file->ref, last_rowid);
- } while (cmp < 0);
+ quick->file->position(quick->record);
+ memcpy(last_rowid, quick->file->ref, head->file->ref_length);
+ last_rowid_count= 1;
- /* Ok, current select 'caught up' and returned ref >= cur_ref */
- if (cmp > 0)
+ while (last_rowid_count < quick_selects.elements)
{
- /* Found a row with ref > cur_ref. Make it a new 'candidate' */
- if (cpk_quick)
+ if (!(quick= quick_it++))
+ {
+ quick_it.rewind();
+ quick= quick_it++;
+ }
+
+ do
{
- while (!cpk_quick->row_in_ranges())
+ if ((error= quick->get_next()))
+ DBUG_RETURN(error);
+ quick->file->position(quick->record);
+ cmp= head->file->cmp_ref(quick->file->ref, last_rowid);
+ } while (cmp < 0);
+
+ /* Ok, current select 'caught up' and returned ref >= cur_ref */
+ if (cmp > 0)
+ {
+ /* Found a row with ref > cur_ref. Make it a new 'candidate' */
+ if (cpk_quick)
{
- if ((error= quick->get_next()))
- DBUG_RETURN(error);
+ while (!cpk_quick->row_in_ranges())
+ {
+ if ((error= quick->get_next()))
+ DBUG_RETURN(error);
+ }
}
+ memcpy(last_rowid, quick->file->ref, head->file->ref_length);
+ last_rowid_count= 1;
+ }
+ else
+ {
+ /* current 'candidate' row confirmed by this select */
+ last_rowid_count++;
}
- memcpy(last_rowid, quick->file->ref, head->file->ref_length);
- last_rowid_count= 1;
- }
- else
- {
- /* current 'candidate' row confirmed by this select */
- last_rowid_count++;
}
- }
- /* We get here iff we got the same row ref in all scans. */
- if (need_to_fetch_row)
- error= head->file->rnd_pos(head->record[0], last_rowid);
+ /* We get here if we got the same row ref in all scans. */
+ if (need_to_fetch_row)
+ error= head->file->rnd_pos(head->record[0], last_rowid);
+ } while (error == HA_ERR_RECORD_DELETED);
DBUG_RETURN(error);
}
@@ -6544,44 +7939,48 @@ int QUICK_ROR_UNION_SELECT::get_next()
do
{
- if (!queue.elements)
- DBUG_RETURN(HA_ERR_END_OF_FILE);
- /* Ok, we have a queue with >= 1 scans */
+ do
+ {
+ if (!queue.elements)
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+ /* Ok, we have a queue with >= 1 scans */
- quick= (QUICK_SELECT_I*)queue_top(&queue);
- memcpy(cur_rowid, quick->last_rowid, rowid_length);
+ quick= (QUICK_SELECT_I*)queue_top(&queue);
+ memcpy(cur_rowid, quick->last_rowid, rowid_length);
- /* put into queue rowid from the same stream as top element */
- if ((error= quick->get_next()))
- {
- if (error != HA_ERR_END_OF_FILE)
- DBUG_RETURN(error);
- queue_remove(&queue, 0);
- }
- else
- {
- quick->save_last_pos();
- queue_replaced(&queue);
- }
+ /* put into queue rowid from the same stream as top element */
+ if ((error= quick->get_next()))
+ {
+ if (error != HA_ERR_END_OF_FILE)
+ DBUG_RETURN(error);
+ queue_remove(&queue, 0);
+ }
+ else
+ {
+ quick->save_last_pos();
+ queue_replaced(&queue);
+ }
- if (!have_prev_rowid)
- {
- /* No rows have been returned yet */
- dup_row= FALSE;
- have_prev_rowid= TRUE;
- }
- else
- dup_row= !head->file->cmp_ref(cur_rowid, prev_rowid);
- }while (dup_row);
+ if (!have_prev_rowid)
+ {
+ /* No rows have been returned yet */
+ dup_row= FALSE;
+ have_prev_rowid= TRUE;
+ }
+ else
+ dup_row= !head->file->cmp_ref(cur_rowid, prev_rowid);
+ } while (dup_row);
- tmp= cur_rowid;
- cur_rowid= prev_rowid;
- prev_rowid= tmp;
+ tmp= cur_rowid;
+ cur_rowid= prev_rowid;
+ prev_rowid= tmp;
- error= head->file->rnd_pos(quick->record, prev_rowid);
+ error= head->file->rnd_pos(quick->record, prev_rowid);
+ } while (error == HA_ERR_RECORD_DELETED);
DBUG_RETURN(error);
}
+
int QUICK_RANGE_SELECT::reset()
{
uint mrange_bufsiz;
@@ -6592,7 +7991,7 @@ int QUICK_RANGE_SELECT::reset()
in_range= FALSE;
cur_range= (QUICK_RANGE**) ranges.buffer;
- if (file->inited == handler::NONE && (error= file->ha_index_init(index)))
+ if (file->inited == handler::NONE && (error= file->ha_index_init(index,1)))
DBUG_RETURN(error);
/* Do not allocate the buffers twice. */
@@ -6621,7 +8020,7 @@ int QUICK_RANGE_SELECT::reset()
}
/* Allocate the handler buffer if necessary. */
- if (file->table_flags() & HA_NEED_READ_RANGE_BUFFER)
+ if (file->ha_table_flags() & HA_NEED_READ_RANGE_BUFFER)
{
mrange_bufsiz= min(multi_range_bufsiz,
(QUICK_SELECT_I::records + 1)* head->s->reclength);
@@ -6647,6 +8046,14 @@ int QUICK_RANGE_SELECT::reset()
multi_range_buff->buffer= mrange_buff;
multi_range_buff->buffer_end= mrange_buff + mrange_bufsiz;
multi_range_buff->end_of_used_area= mrange_buff;
+#ifdef HAVE_purify
+ /*
+ We need this until ndb will use the buffer efficiently
+ (Now ndb stores complete row in here, instead of only the used fields
+ which gives us valgrind warnings in compare_record[])
+ */
+ bzero((char*) mrange_buff, mrange_bufsiz);
+#endif
}
DBUG_RETURN(0);
}
@@ -6678,6 +8085,15 @@ int QUICK_RANGE_SELECT::get_next()
(cur_range >= (QUICK_RANGE**) ranges.buffer) &&
(cur_range <= (QUICK_RANGE**) ranges.buffer + ranges.elements));
+ if (in_ror_merged_scan)
+ {
+ /*
+ We don't need to signal the bitmap change as the bitmap is always the
+ same for this head->file
+ */
+ head->column_bitmaps_set_no_signal(&column_bitmap, &column_bitmap);
+ }
+
for (;;)
{
if (in_range)
@@ -6685,10 +8101,7 @@ int QUICK_RANGE_SELECT::get_next()
/* We did already start to read this key. */
result= file->read_multi_range_next(&mrange);
if (result != HA_ERR_END_OF_FILE)
- {
- in_range= ! result;
- DBUG_RETURN(result);
- }
+ goto end;
}
uint count= min(multi_range_length, ranges.elements -
@@ -6697,6 +8110,8 @@ int QUICK_RANGE_SELECT::get_next()
{
/* Ranges have already been used up before. None is left for read. */
in_range= FALSE;
+ if (in_ror_merged_scan)
+ head->column_bitmaps_set_no_signal(save_read_set, save_write_set);
DBUG_RETURN(HA_ERR_END_OF_FILE);
}
KEY_MULTI_RANGE *mrange_slot, *mrange_end;
@@ -6728,12 +8143,18 @@ int QUICK_RANGE_SELECT::get_next()
result= file->read_multi_range_first(&mrange, multi_range, count,
sorted, multi_range_buff);
if (result != HA_ERR_END_OF_FILE)
- {
- in_range= ! result;
- DBUG_RETURN(result);
- }
+ goto end;
in_range= FALSE; /* No matching rows; go to next set of ranges. */
}
+
+end:
+ in_range= ! result;
+ if (in_ror_merged_scan)
+ {
+ /* Restore bitmaps set on entry */
+ head->column_bitmaps_set_no_signal(save_read_set, save_write_set);
+ }
+ DBUG_RETURN(result);
}
/*
@@ -6850,7 +8271,7 @@ int QUICK_RANGE_SELECT_GEOM::get_next()
(byte*) range->min_key,
range->min_length,
(ha_rkey_function)(range->flag ^ GEOM_FLAG));
- if (result != HA_ERR_KEY_NOT_FOUND)
+ if (result != HA_ERR_KEY_NOT_FOUND && result != HA_ERR_END_OF_FILE)
DBUG_RETURN(result);
range=0; // Not found, to next range
}
@@ -6909,7 +8330,7 @@ bool QUICK_RANGE_SELECT::row_in_ranges()
QUICK_SELECT_DESC::QUICK_SELECT_DESC(QUICK_RANGE_SELECT *q,
uint used_key_parts)
- : QUICK_RANGE_SELECT(*q), rev_it(rev_ranges)
+ :QUICK_RANGE_SELECT(*q), rev_it(rev_ranges)
{
QUICK_RANGE *r;
@@ -6993,7 +8414,7 @@ int QUICK_SELECT_DESC::get_next()
}
if (result)
{
- if (result != HA_ERR_KEY_NOT_FOUND)
+ if (result != HA_ERR_KEY_NOT_FOUND && result != HA_ERR_END_OF_FILE)
DBUG_RETURN(result);
range=0; // Not found, to next range
continue;
@@ -7385,9 +8806,10 @@ cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
groups, and thus can be applied after the grouping.
GA4. There are no expressions among G_i, just direct column references.
NGA1.If in the index I there is a gap between the last GROUP attribute G_k,
- and the MIN/MAX attribute C, then NGA must consist of exactly the index
- attributes that constitute the gap. As a result there is a permutation
- of NGA that coincides with the gap in the index <B_1, ..., B_m>.
+ and the MIN/MAX attribute C, then NGA must consist of exactly the
+ index attributes that constitute the gap. As a result there is a
+ permutation of NGA that coincides with the gap in the index
+ <B_1, ..., B_m>.
NGA2.If BA <> {}, then the WHERE clause must contain a conjunction EQ of
equality conditions for all NG_i of the form (NG_i = const) or
(const = NG_i), such that each NG_i is referenced in exactly one
@@ -7395,9 +8817,10 @@ cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
gap in the index.
WA1. There are no other attributes in the WHERE clause except the ones
referenced in predicates RNG, PA, PC, EQ defined above. Therefore
- WA is subset of (GA union NGA union C) for GA,NGA,C that pass the above
- tests. By transitivity then it also follows that each WA_i participates
- in the index I (if this was already tested for GA, NGA and C).
+ WA is subset of (GA union NGA union C) for GA,NGA,C that pass the
+ above tests. By transitivity then it also follows that each WA_i
+ participates in the index I (if this was already tested for GA, NGA
+ and C).
C) Overall query form:
SELECT EXPR([A_1,...,A_k], [B_1,...,B_m], [MIN(C)], [MAX(C)])
@@ -7459,12 +8882,12 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
TABLE *table= param->table;
bool have_min= FALSE; /* TRUE if there is a MIN function. */
bool have_max= FALSE; /* TRUE if there is a MAX function. */
- Item_field *min_max_arg_item= NULL;/* The argument of all MIN/MAX functions.*/
+ Item_field *min_max_arg_item= NULL; // The argument of all MIN/MAX functions
KEY_PART_INFO *min_max_arg_part= NULL; /* The corresponding keypart. */
uint group_prefix_len= 0; /* Length (in bytes) of the key prefix. */
KEY *index_info= NULL; /* The index chosen for data access. */
uint index= 0; /* The id of the chosen index. */
- uint group_key_parts= 0; /* Number of index key parts in the group prefix. */
+ uint group_key_parts= 0; // Number of index key parts in the group prefix.
uint used_key_parts= 0; /* Number of index key parts used for access. */
byte key_infix[MAX_KEY_LENGTH]; /* Constants from equality predicates.*/
uint key_infix_len= 0; /* Length of key_infix. */
@@ -7582,28 +9005,19 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
we check that all query fields are indeed covered by 'cur_index'.
*/
if (pk < MAX_KEY && cur_index != pk &&
- (table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX))
+ (table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX))
{
/* For each table field */
for (uint i= 0; i < table->s->fields; i++)
{
Field *cur_field= table->field[i];
/*
- If the field is used in the current query, check that the
- field is covered by some keypart of the current index.
+ If the field is used in the current query ensure that it's
+ part of 'cur_index'
*/
- if (thd->query_id == cur_field->query_id)
- {
- KEY_PART_INFO *key_part= cur_index_info->key_part;
- KEY_PART_INFO *key_part_end= key_part + cur_index_info->key_parts;
- for (;;)
- {
- if (key_part->field == cur_field)
- break;
- if (++key_part == key_part_end)
- goto next_index; // Field was not part of key
- }
- }
+ if (bitmap_is_set(table->read_set, cur_field->field_index) &&
+ !cur_field->part_of_key_not_clustered.is_set(cur_index))
+ goto next_index; // Field was not part of key
}
}
@@ -7757,7 +9171,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
key_part_range[1]= last_part;
/* Check if cur_part is referenced in the WHERE clause. */
- if (join->conds->walk(&Item::find_item_in_field_list_processor,
+ if (join->conds->walk(&Item::find_item_in_field_list_processor, 0,
(byte*) key_part_range))
goto next_index;
}
@@ -7771,7 +9185,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
{
for (cur_part= first_non_infix_part; cur_part != last_part; cur_part++)
{
- if (cur_part->field->query_id == thd->query_id)
+ if (bitmap_is_set(table->read_set, cur_part->field->field_index))
goto next_index;
}
}
@@ -7789,7 +9203,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
&cur_param_idx);
/* Check if this range tree can be used for prefix retrieval. */
cur_quick_prefix_records= check_quick_select(param, cur_param_idx,
- cur_index_tree);
+ cur_index_tree, TRUE);
}
cost_group_min_max(table, cur_index_info, used_key_parts,
cur_group_key_parts, tree, cur_index_tree,
@@ -8068,10 +9482,10 @@ get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree,
uint field_length= cur_part->store_length;
if ((cur_range->maybe_null &&
- cur_range->min_value[0] && cur_range->max_value[0])
- ||
- (memcmp(cur_range->min_value, cur_range->max_value, field_length) == 0))
- { /* cur_range specifies 'IS NULL' or an equality condition. */
+ cur_range->min_value[0] && cur_range->max_value[0]) ||
+ !memcmp(cur_range->min_value, cur_range->max_value, field_length))
+ {
+ /* cur_range specifies 'IS NULL' or an equality condition. */
memcpy(key_ptr, cur_range->min_value, field_length);
key_ptr+= field_length;
*key_infix_len+= field_length;
@@ -8235,8 +9649,8 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
double cpu_cost= 0; /* TODO: CPU cost of index_read calls? */
DBUG_ENTER("cost_group_min_max");
- table_records= table->file->records;
- keys_per_block= (table->file->block_size / 2 /
+ table_records= table->file->stats.records;
+ keys_per_block= (table->file->stats.block_size / 2 /
(index_info->key_length + table->file->ref_length)
+ 1);
num_blocks= (table_records / keys_per_block) + 1;
@@ -8739,7 +10153,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::reset(void)
DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::reset");
file->extra(HA_EXTRA_KEYREAD); /* We need only the key attributes */
- result= file->ha_index_init(index);
+ result= file->ha_index_init(index, 1);
result= file->index_last(record);
if (result == HA_ERR_END_OF_FILE)
DBUG_RETURN(0);
@@ -8815,7 +10229,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::get_next()
DBUG_ASSERT(is_last_prefix <= 0);
if (result == HA_ERR_KEY_NOT_FOUND)
continue;
- else if (result)
+ if (result)
break;
if (have_min)
@@ -8845,10 +10259,11 @@ int QUICK_GROUP_MIN_MAX_SELECT::get_next()
HA_READ_KEY_EXACT);
result= have_min ? min_res : have_max ? max_res : result;
- }
- while (result == HA_ERR_KEY_NOT_FOUND && is_last_prefix != 0);
+ } while ((result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE) &&
+ is_last_prefix != 0);
if (result == 0)
+ {
/*
Partially mimic the behavior of end_select_send. Copy the
field data from Item_field::field into Item_field::result_field
@@ -8856,6 +10271,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::get_next()
other fields in non-ANSI SQL mode).
*/
copy_fields(&join->tmp_table_param);
+ }
else if (result == HA_ERR_KEY_NOT_FOUND)
result= HA_ERR_END_OF_FILE;
@@ -8882,6 +10298,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::get_next()
RETURN
0 on success
HA_ERR_KEY_NOT_FOUND if no MIN key was found that fulfills all conditions.
+ HA_ERR_END_OF_FILE - "" -
other if some error occurred
*/
@@ -8935,7 +10352,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min()
if (key_cmp(index_info->key_part, group_prefix, real_prefix_len))
key_restore(record, tmp_record, index_info, 0);
}
- else if (result == HA_ERR_KEY_NOT_FOUND)
+ else if (result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE)
result= 0; /* There is a result in any case. */
}
}
@@ -8960,6 +10377,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min()
RETURN
0 on success
HA_ERR_KEY_NOT_FOUND if no MAX key was found that fulfills all conditions.
+ HA_ERR_END_OF_FILE - "" -
other if some error occurred
*/
@@ -9060,6 +10478,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_prefix()
0 on success
HA_ERR_KEY_NOT_FOUND if there is no key with the given prefix in any of
the ranges
+ HA_ERR_END_OF_FILE - "" -
other if some error
*/
@@ -9104,11 +10523,12 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range()
result= file->index_read(record, group_prefix, search_prefix_len,
find_flag);
- if ((result == HA_ERR_KEY_NOT_FOUND) &&
- (cur_range->flag & (EQ_RANGE | NULL_RANGE)))
- continue; /* Check the next range. */
- else if (result)
+ if (result)
{
+ if ((result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE) &&
+ (cur_range->flag & (EQ_RANGE | NULL_RANGE)))
+ continue; /* Check the next range. */
+
/*
In all other cases (HA_ERR_*, HA_READ_KEY_EXACT with NO_MIN_RANGE,
HA_READ_AFTER_KEY, HA_READ_KEY_OR_NEXT) if the lookup failed for this
@@ -9135,7 +10555,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range()
/* Check if record belongs to the current group. */
if (key_cmp(index_info->key_part, group_prefix, real_prefix_len))
{
- result = HA_ERR_KEY_NOT_FOUND;
+ result= HA_ERR_KEY_NOT_FOUND;
continue;
}
@@ -9153,7 +10573,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range()
if (!((cur_range->flag & NEAR_MAX) && (cmp_res == -1) ||
(cmp_res <= 0)))
{
- result = HA_ERR_KEY_NOT_FOUND;
+ result= HA_ERR_KEY_NOT_FOUND;
continue;
}
}
@@ -9192,6 +10612,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range()
0 on success
HA_ERR_KEY_NOT_FOUND if there is no key with the given prefix in any of
the ranges
+ HA_ERR_END_OF_FILE - "" -
other if some error
*/
@@ -9237,10 +10658,12 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_max_in_range()
result= file->index_read(record, group_prefix, search_prefix_len,
find_flag);
- if ((result == HA_ERR_KEY_NOT_FOUND) && (cur_range->flag & EQ_RANGE))
- continue; /* Check the next range. */
if (result)
{
+ if ((result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE) &&
+ (cur_range->flag & EQ_RANGE))
+ continue; /* Check the next range. */
+
/*
In no key was found with this upper bound, there certainly are no keys
in the ranges to the left.
@@ -9377,8 +10800,6 @@ static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map,
int idx;
char buff[1024];
DBUG_ENTER("print_sel_tree");
- if (! _db_on_)
- DBUG_VOID_RETURN;
String tmp(buff,sizeof(buff),&my_charset_bin);
tmp.length(0);
@@ -9397,7 +10818,7 @@ static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map,
if (!tmp.length())
tmp.append(STRING_WITH_LEN("(empty)"));
- DBUG_PRINT("info", ("SEL_TREE %p (%s) scans:%s", tree, msg, tmp.ptr()));
+ DBUG_PRINT("info", ("SEL_TREE: 0x%lx (%s) scans: %s", (long) tree, msg, tmp.ptr()));
DBUG_VOID_RETURN;
}
@@ -9407,9 +10828,7 @@ static void print_ror_scans_arr(TABLE *table, const char *msg,
struct st_ror_scan_info **start,
struct st_ror_scan_info **end)
{
- DBUG_ENTER("print_ror_scans");
- if (! _db_on_)
- DBUG_VOID_RETURN;
+ DBUG_ENTER("print_ror_scans_arr");
char buff[1024];
String tmp(buff,sizeof(buff),&my_charset_bin);
@@ -9440,6 +10859,10 @@ print_key(KEY_PART *key_part,const char *key,uint used_length)
const char *key_end= key+used_length;
String tmp(buff,sizeof(buff),&my_charset_bin);
uint store_length;
+ TABLE *table= key_part->field->table;
+ my_bitmap_map *old_write_set, *old_read_set;
+ old_write_set= dbug_tmp_use_all_columns(table, table->write_set);
+ old_read_set= dbug_tmp_use_all_columns(table, table->read_set);
for (; key < key_end; key+=store_length, key_part++)
{
@@ -9465,18 +10888,28 @@ print_key(KEY_PART *key_part,const char *key,uint used_length)
if (key+store_length < key_end)
fputc('/',DBUG_FILE);
}
+ dbug_tmp_restore_column_map(table->write_set, old_write_set);
+ dbug_tmp_restore_column_map(table->read_set, old_read_set);
}
static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg)
{
char buf[MAX_KEY/8+1];
+ TABLE *table;
+ my_bitmap_map *old_read_map, *old_write_map;
DBUG_ENTER("print_quick");
- if (! _db_on_ || !quick)
+ if (!quick)
DBUG_VOID_RETURN;
DBUG_LOCK_FILE;
+ table= quick->head;
+ old_read_map= dbug_tmp_use_all_columns(table, table->read_set);
+ old_write_map= dbug_tmp_use_all_columns(table, table->write_set);
quick->dbug_dump(0, TRUE);
+ dbug_tmp_restore_column_map(table->read_set, old_read_map);
+ dbug_tmp_restore_column_map(table->write_set, old_write_map);
+
fprintf(DBUG_FILE,"other_keys: 0x%s:\n", needed_reg->print(buf));
DBUG_UNLOCK_FILE;
@@ -9627,7 +11060,7 @@ void QUICK_GROUP_MIN_MAX_SELECT::dbug_dump(int indent, bool verbose)
#endif /* NOT_USED */
/*****************************************************************************
-** Instantiate templates
+** Instantiate templates
*****************************************************************************/
#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION