summaryrefslogtreecommitdiff
path: root/sql/sql_select.cc
diff options
context:
space:
mode:
authorunknown <Sinisa@sinisa.nasamreza.org>2003-07-02 00:26:23 +0300
committerunknown <Sinisa@sinisa.nasamreza.org>2003-07-02 00:26:23 +0300
commit65d7980556227be9371d4fc2592cbe5c1140689c (patch)
tree32b1530dab96b7ee94ea6a553b1107f6dc0ea29a /sql/sql_select.cc
parent12d879c912543fffa77cecfda799bc20ddd35df5 (diff)
parent49938821c26bb67de5f41fc6937401c6415bf641 (diff)
downloadmariadb-git-65d7980556227be9371d4fc2592cbe5c1140689c.tar.gz
merge changes
mysql-test/r/insert_select.result: Auto merged mysql-test/t/insert_select.test: Auto merged sql/sql_select.cc: Auto merged
Diffstat (limited to 'sql/sql_select.cc')
-rw-r--r--sql/sql_select.cc43
1 files changed, 31 insertions, 12 deletions
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index eb29946a3d7..5b5972be384 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -1277,14 +1277,14 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
/*
Set a max range of how many seeks we can expect when using keys
- This was (s->read_time*5), but this was too low with small rows
+ This is can't be to high as otherwise we are likely to use
+ table scan.
*/
- s->worst_seeks= (double) s->found_records / 5;
+ s->worst_seeks= min((double) s->found_records / 10,
+ (double) s->read_time*3);
if (s->worst_seeks < 2.0) // Fix for small tables
s->worst_seeks=2.0;
- /* if (s->type == JT_EQ_REF)
- continue; */
if (s->const_keys)
{
ha_rows records;
@@ -1890,6 +1890,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
best=best_time=records=DBL_MAX;
KEYUSE *best_key=0;
uint best_max_key_part=0;
+ my_bool found_constrain= 0;
if (s->keyuse)
{ /* Use key if possible */
@@ -1970,6 +1971,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
}
else
{
+ found_constrain= 1;
/*
Check if we found full key
*/
@@ -2006,16 +2008,18 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
records=2.0; // Can't be as good as a unique
}
}
+ /* Limit the number of matched rows */
+ tmp= records;
+ set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key);
if (table->used_keys & ((key_map) 1 << key))
{
/* we can use only index tree */
uint keys_per_block= table->file->block_size/2/
(keyinfo->key_length+table->file->ref_length)+1;
- tmp=(record_count*(records+keys_per_block-1)/
- keys_per_block);
+ tmp=record_count*(tmp+keys_per_block-1)/keys_per_block;
}
else
- tmp=record_count*min(records,s->worst_seeks);
+ tmp=record_count*min(tmp,s->worst_seeks);
}
}
else
@@ -2045,7 +2049,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
{
/*
Assume that the first key part matches 1% of the file
- and that the hole key matches 10 (dupplicates) or 1
+ and that the hole key matches 10 (duplicates) or 1
(unique) records.
Assume also that more key matches proportionally more
records
@@ -2077,6 +2081,8 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
records=(ulong) tmp;
}
}
+ /* Limit the number of matched rows */
+ set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key);
if (table->used_keys & ((key_map) 1 << key))
{
/* we can use only index tree */
@@ -2119,20 +2125,31 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
s->table->used_keys && best_key) &&
!(s->table->force_index && best_key))
{ // Check full join
+ ha_rows rnd_records= s->found_records;
if (s->on_expr)
{
- tmp=rows2double(s->found_records); // Can't use read cache
+ tmp=rows2double(rnd_records); // Can't use read cache
}
else
{
tmp=(double) s->read_time;
- /* Calculate time to read through cache */
+ /* Calculate time to read previous rows through cache */
tmp*=(1.0+floor((double) cache_record_length(join,idx)*
record_count /
(double) thd->variables.join_buff_size));
}
+
+ /*
+ If there is a restriction on the table, assume that 25% of the
+ rows can be skipped on next part.
+ This is to force tables that this table depends on before this
+ table
+ */
+ if (found_constrain)
+ rnd_records-= rnd_records/4;
+
if (best == DBL_MAX ||
- (tmp + record_count/(double) TIME_FOR_COMPARE*s->found_records <
+ (tmp + record_count/(double) TIME_FOR_COMPARE*rnd_records <
best + record_count/(double) TIME_FOR_COMPARE*records))
{
/*
@@ -2140,7 +2157,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
will ensure that this will be used
*/
best=tmp;
- records= rows2double(s->found_records);
+ records= rows2double(rnd_records);
best_key=0;
}
}
@@ -5947,6 +5964,8 @@ create_sort_index(JOIN_TAB *tab, ORDER *order, ha_rows filesort_limit,
/*
We have a ref on a const; Change this to a range that filesort
can use.
+ For impossible ranges (like when doing a lookup on NULL on a NOT NULL
+ field, quick will contain an empty record set.
*/
if (!(select->quick=get_ft_or_quick_select_for_ref(table, tab)))
goto err;