summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/opt_range.cc67
-rw-r--r--sql/uniques.cc34
2 files changed, 61 insertions, 40 deletions
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 5a0c4066ae0..fa27ef43463 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -274,7 +274,11 @@ class SEL_TREE :public Sql_alloc
public:
enum Type { IMPOSSIBLE, ALWAYS, MAYBE, KEY, KEY_SMALLER } type;
SEL_TREE(enum Type type_arg) :type(type_arg) {}
- SEL_TREE() :type(KEY) { keys_map.clear_all(); bzero((char*) keys,sizeof(keys));}
+ SEL_TREE() :type(KEY)
+ {
+ keys_map.clear_all();
+ bzero((char*) keys,sizeof(keys));
+ }
SEL_ARG *keys[MAX_KEY];
key_map keys_map; /* bitmask of non-NULL elements in keys */
List<SEL_IMERGE> merges; /* possible ways to read rows using index_merge */
@@ -306,7 +310,8 @@ static ha_rows check_quick_keys(PARAM *param,uint index,SEL_ARG *key_tree,
char *max_key, uint max_key_flag);
QUICK_RANGE_SELECT *get_quick_select(PARAM *param,uint index,
- SEL_ARG *key_tree, MEM_ROOT *alloc = NULL);
+ SEL_ARG *key_tree,
+ MEM_ROOT *alloc = NULL);
static int get_quick_select_params(SEL_TREE *tree, PARAM *param,
key_map& needed_reg,
bool index_read_can_be_used,
@@ -338,7 +343,8 @@ bool get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key,
static bool eq_tree(SEL_ARG* a,SEL_ARG *b);
static SEL_ARG null_element(SEL_ARG::IMPOSSIBLE);
-static bool null_part_in_key(KEY_PART *key_part, const char *key, uint length);
+static bool null_part_in_key(KEY_PART *key_part, const char *key,
+ uint length);
bool sel_trees_can_be_ored(SEL_TREE *tree1, SEL_TREE *tree2, PARAM* param);
@@ -620,7 +626,7 @@ QUICK_SELECT_I::QUICK_SELECT_I()
QUICK_RANGE_SELECT::QUICK_RANGE_SELECT(THD *thd, TABLE *table, uint key_nr,
bool no_alloc, MEM_ROOT *parent_alloc)
- :dont_free(0),error(0),range(0),cur_range(NULL)
+ :dont_free(0),error(0),cur_range(NULL),range(0)
{
index= key_nr;
head= table;
@@ -654,9 +660,10 @@ QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT()
}
-QUICK_INDEX_MERGE_SELECT::QUICK_INDEX_MERGE_SELECT(THD *thd_param, TABLE *table)
- :cur_quick_it(quick_selects), thd(thd_param), unique(NULL),
- pk_quick_select(NULL)
+QUICK_INDEX_MERGE_SELECT::QUICK_INDEX_MERGE_SELECT(THD *thd_param,
+ TABLE *table)
+ :cur_quick_it(quick_selects),pk_quick_select(NULL),unique(NULL),
+ thd(thd_param)
{
index= MAX_KEY;
head= table;
@@ -1231,7 +1238,7 @@ static int get_index_merge_params(PARAM *param, key_map& needed_reg,
double tree_read_time;
ha_rows tree_records;
bool pk_is_clustered= param->table->file->primary_key_is_clustered();
- bool have_cpk_scan;
+ bool have_cpk_scan= false;
ha_rows records_for_unique= 0;
ha_rows cpk_records= 0;
@@ -1295,8 +1302,9 @@ static int get_index_merge_params(PARAM *param, key_map& needed_reg,
imerge_cost += get_index_only_read_time(param, tree_records,keynr);
records_for_unique += tree_records;
}
- }
-
+ }
+ DBUG_PRINT("info",("index_merge cost of index reads: %g", imerge_cost));
+
if (imerge_too_expensive)
DBUG_RETURN(1);
@@ -1329,33 +1337,37 @@ static int get_index_merge_params(PARAM *param, key_map& needed_reg,
else
{
double n_blocks=
- ceil((double)(longlong)param->table->file->data_file_length / IO_SIZE);
- /* Don't ceil the following as it is an estimate */
+ ceil((double)(longlong)param->table->file->data_file_length / IO_SIZE);
double busy_blocks=
n_blocks * (1.0 - pow(1.0 - 1.0/n_blocks, records_for_unique));
JOIN *join= param->thd->lex->select_lex.join;
if (!join || join->tables == 1)
- {
imerge_cost += busy_blocks*(DISK_SEEK_BASE_COST +
DISK_SEEK_PROP_COST*n_blocks/busy_blocks);
- }
else
{
/*
It can be a join with source table being non-last table, so assume
that disk seeks are random here.
- (TODO it is possible to determine if this *is* a last table in 'index
- checked for each record'-type join)
+ (TODO it is possible to determine if this is a last table in 'index
+ checked for each record' join)
*/
imerge_cost += busy_blocks;
}
}
+ DBUG_PRINT("info",("index_merge cost with rowid-to-row scan: %g", imerge_cost));
/* PHASE 3: Add Unique operations cost */
- imerge_cost += Unique::get_use_cost(param->mem_root, records_for_unique,
- param->table->file->ref_length,
- param->thd->variables.sortbuff_size);
+ double unique_cost=
+ Unique::get_use_cost(param->mem_root, records_for_unique,
+ param->table->file->ref_length,
+ param->thd->variables.sortbuff_size);
+ if (unique_cost < 0.0)
+ DBUG_RETURN(1);
+
+ imerge_cost += unique_cost;
+ DBUG_PRINT("info",("index_merge total cost: %g", imerge_cost));
if (imerge_cost < *read_time)
{
*read_time= imerge_cost;
@@ -1382,7 +1394,8 @@ static int get_index_merge_params(PARAM *param, key_map& needed_reg,
key blocks are half full (normally things are much better).
*/
-inline double get_index_only_read_time(PARAM* param, ha_rows records, int keynr)
+inline double get_index_only_read_time(PARAM* param, ha_rows records,
+ int keynr)
{
double read_time;
uint keys_per_block= (param->table->file->block_size/2/
@@ -1445,18 +1458,13 @@ static int get_quick_select_params(SEL_TREE *tree, PARAM *param,
read_index_only &&
(param->table->file->index_flags(keynr) & HA_KEY_READ_ONLY))
{
- /*
- We can resolve this by only reading through this key.
- Assume that we will read trough the whole key range
- and that all key blocks are half full (normally things are
- much better).
- */
+ /* We can resolve this by only reading through this key. */
found_read_time=get_index_only_read_time(param, found_records, keynr);
}
else
found_read_time= (param->table->file->read_time(keynr,
- param->range_count,
- found_records)+
+ param->range_count,
+ found_records)+
(double) found_records / TIME_FOR_COMPARE);
if (*read_time > found_read_time && found_records != HA_POS_ERROR)
{
@@ -3893,7 +3901,8 @@ static void print_quick_sel_imerge(QUICK_INDEX_MERGE_SELECT *quick,
DBUG_VOID_RETURN;
}
-void print_quick_sel_range(QUICK_RANGE_SELECT *quick,const key_map *needed_reg)
+void print_quick_sel_range(QUICK_RANGE_SELECT *quick,
+ const key_map *needed_reg)
{
QUICK_RANGE *range;
char buf[MAX_KEY/8+1];
diff --git a/sql/uniques.cc b/sql/uniques.cc
index 48233f29bee..8fc8efff5e6 100644
--- a/sql/uniques.cc
+++ b/sql/uniques.cc
@@ -76,11 +76,13 @@ Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg,
#define M_PI 3.14159265358979323846
#endif
-#define M_E (exp(1))
+#ifndef M_E
+#define M_E (exp((double)1.0))
+#endif
inline double log2_n_fact(double x)
{
- return (2 * ( ((x)+1) * log(((x)+1)/M_E) + log(2*M_PI*((x)+1))/2 ) / log(2));
+ return (2 * (((x)+1)*log(((x)+1)/M_E) + log(2*M_PI*((x)+1))/2 ) / log(2));
}
/*
@@ -127,8 +129,8 @@ static double get_merge_buffers_cost(uint* buff_sizes, uint elem_size,
actions.
RETURN
- >=0 Cost of merge in disk seeks.
- <0 Out of memory.
+ >=0 Cost of merge in disk seeks.
+ <0 Out of memory.
*/
static double get_merge_many_buffs_cost(MEM_ROOT *alloc,
uint maxbuffer, uint max_n_elems,
@@ -174,7 +176,8 @@ static double get_merge_many_buffs_cost(MEM_ROOT *alloc,
key_size using max_in_memory_size memory.
RETURN
- Use cost as # of disk seeks.
+ >=0 Cost in disk seeks.
+ <0 Out of memory.
NOTES
cost(using_unqiue) =
@@ -229,19 +232,28 @@ double Unique::get_use_cost(MEM_ROOT *alloc, uint nkeys, uint key_size,
result+= n_full_trees * log2_n_fact(max_elements_in_tree);
result /= TIME_FOR_COMPARE_ROWID;
- /* Calculate cost of merging */
+ DBUG_PRINT("info",("unique trees sizes: %u=%u*%lu + %lu", nkeys,
+ n_full_trees, n_full_trees?max_elements_in_tree:0,
+ last_tree_elems));
+
if (!n_full_trees)
return result;
- /* There is more then one tree and merging is necessary. */
- /* Add cost of writing all trees to disk. */
+ /*
+ There is more then one tree and merging is necessary.
+ First, add cost of writing all trees to disk.
+ */
result += n_full_trees * ceil(key_size*max_elements_in_tree / IO_SIZE);
result += ceil(key_size*last_tree_elems / IO_SIZE);
/* Cost of merge */
- result += get_merge_many_buffs_cost(alloc, n_full_trees,
- max_elements_in_tree,
- last_tree_elems, key_size);
+ double merge_cost= get_merge_many_buffs_cost(alloc, n_full_trees,
+ max_elements_in_tree,
+ last_tree_elems, key_size);
+ if (merge_cost < 0.0)
+ return merge_cost;
+
+ result += merge_cost;
/*
Add cost of reading the resulting sequence, assuming there were no
duplicate elements.