summaryrefslogtreecommitdiff
path: root/sql/sql_select.cc
diff options
context:
space:
mode:
Diffstat (limited to 'sql/sql_select.cc')
-rw-r--r--sql/sql_select.cc53
1 files changed, 31 insertions, 22 deletions
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 90302897a49..86a4168867b 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -51,6 +51,7 @@
#include "opt_subselect.h"
#include "log_slow.h"
#include "sql_derived.h"
+#include "sql_statistics.h"
#include "debug_sync.h" // DEBUG_SYNC
#include <m_ctype.h>
@@ -3316,6 +3317,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
table_vector[i]=s->table=table=tables->table;
table->pos_in_table_list= tables;
error= tables->fetch_number_of_rows();
+ set_statistics_for_table(join->thd, table);
#ifdef WITH_PARTITION_STORAGE_ENGINE
const bool no_partitions_used= table->no_partitions_used;
@@ -3347,8 +3349,8 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
s->dependent= tables->dep_tables;
if (tables->schema_table)
- table->file->stats.records= 2;
- table->quick_condition_rows= table->file->stats.records;
+ table->file->stats.records= table->used_stat_records= 2;
+ table->quick_condition_rows= table->stat_records();
s->on_expr_ref= &tables->on_expr;
if (*s->on_expr_ref)
@@ -3581,7 +3583,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
// All dep. must be constants
if (s->dependent & ~(found_const_table_map))
continue;
- if (table->file->stats.records <= 1L &&
+ if (table->stat_records() <= 1L &&
(table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT) &&
!table->pos_in_table_list->embedding &&
!((outer_join & table->map) &&
@@ -5550,7 +5552,7 @@ best_access_path(JOIN *join,
else
{
uint key_parts= table->actual_n_key_parts(keyinfo);
- if (!(records=keyinfo->rec_per_key[key_parts-1]))
+ if (!(records= keyinfo->actual_rec_per_key(key_parts-1)))
{ /* Prefer longer keys */
records=
((double) s->records / (double) rec *
@@ -5650,7 +5652,7 @@ best_access_path(JOIN *join,
else
{
/* Check if we have statistic about the distribution */
- if ((records= keyinfo->rec_per_key[max_key_part-1]))
+ if ((records= keyinfo->actual_rec_per_key(max_key_part-1)))
{
/*
Fix for the case where the index statistics is too
@@ -7467,10 +7469,10 @@ JOIN_TAB *next_breadth_first_tab(JOIN *join, enum enum_exec_or_opt tabs_kind,
}
-JOIN_TAB *first_top_level_tab(JOIN *join, enum enum_with_const_tables with_const)
+JOIN_TAB *first_top_level_tab(JOIN *join, enum enum_with_const_tables const_tbls)
{
JOIN_TAB *tab= join->join_tab;
- if (with_const == WITH_CONST_TABLES)
+ if (const_tbls == WITHOUT_CONST_TABLES)
{
if (join->const_tables == join->table_count)
return NULL;
@@ -7866,6 +7868,7 @@ static bool create_hj_key_for_table(JOIN *join, JOIN_TAB *join_tab,
keyinfo->key_length=0;
keyinfo->algorithm= HA_KEY_ALG_UNDEF;
keyinfo->flags= HA_GENERATED_KEY;
+ keyinfo->is_statistics_from_stat_tables= FALSE;
keyinfo->name= (char *) "$hj";
keyinfo->rec_per_key= (ulong*) thd->calloc(sizeof(ulong)*key_parts);
if (!keyinfo->rec_per_key)
@@ -10439,7 +10442,7 @@ double JOIN_TAB::scan_time()
}
else
{
- found_records= records= table->file->stats.records;
+ found_records= records= table->stat_records();
read_time= table->file->scan_time();
/*
table->quick_condition_rows has already been set to
@@ -10450,7 +10453,7 @@ double JOIN_TAB::scan_time()
}
else
{
- found_records= records=table->file->stats.records;
+ found_records= records=table->stat_records();
read_time= found_records ? (double)found_records: 10.0;// TODO:fix this stub
res= read_time;
}
@@ -10491,7 +10494,7 @@ ha_rows JOIN_TAB::get_examined_rows()
handler->info(HA_STATUS_VARIABLE) has been called in
make_join_statistics()
*/
- examined_rows= table->file->stats.records;
+ examined_rows= table->stat_records();
}
}
}
@@ -14851,8 +14854,11 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
keyinfo->usable_key_parts=keyinfo->key_parts= param->group_parts;
keyinfo->ext_key_parts= keyinfo->key_parts;
keyinfo->key_length=0;
- keyinfo->rec_per_key=0;
+ keyinfo->rec_per_key=NULL;
+ keyinfo->read_stats= NULL;
+ keyinfo->collected_stats= NULL;
keyinfo->algorithm= HA_KEY_ALG_UNDEF;
+ keyinfo->is_statistics_from_stat_tables= FALSE;
keyinfo->name= (char*) "group_key";
ORDER *cur_group= group;
for (; cur_group ; cur_group= cur_group->next, key_part_info++)
@@ -14965,7 +14971,10 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
keyinfo->key_length= 0; // Will compute the sum of the parts below.
keyinfo->name= (char*) "distinct_key";
keyinfo->algorithm= HA_KEY_ALG_UNDEF;
+ keyinfo->is_statistics_from_stat_tables= FALSE;
keyinfo->rec_per_key=0;
+ keyinfo->read_stats= NULL;
+ keyinfo->collected_stats= NULL;
/*
Create an extra field to hold NULL bits so that unique indexes on
@@ -18764,7 +18773,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
uint saved_best_key_parts= 0;
int best_key_direction= 0;
JOIN *join= tab->join;
- ha_rows table_records= table->file->stats.records;
+ ha_rows table_records= table->stat_records();
test_if_cheaper_ordering(tab, order, table, usable_keys,
ref_key, select_limit,
@@ -18879,7 +18888,7 @@ check_reverse_order:
{
tab->ref.key= -1;
tab->ref.key_parts= 0;
- if (select_limit < table->file->stats.records)
+ if (select_limit < table->stat_records())
tab->limit= select_limit;
table->disable_keyread();
}
@@ -23070,7 +23079,7 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
int best_key= -1;
bool is_best_covering= FALSE;
double fanout= 1;
- ha_rows table_records= table->file->stats.records;
+ ha_rows table_records= table->stat_records();
bool group= join && join->group && order == join->group_list;
ha_rows ref_key_quick_rows= HA_POS_ERROR;
const bool has_limit= (select_limit_arg != HA_POS_ERROR);
@@ -23162,7 +23171,7 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
if (used_key_parts > used_index_parts)
used_pk_parts= used_key_parts-used_index_parts;
rec_per_key= used_key_parts ?
- keyinfo->rec_per_key[used_key_parts-1] : 1;
+ keyinfo->actual_rec_per_key(used_key_parts-1) : 1;
/* Take into account the selectivity of the used pk prefix */
if (used_pk_parts)
{
@@ -23177,8 +23186,8 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
rec_per_key= 1;
if (rec_per_key > 1)
{
- rec_per_key*= pkinfo->rec_per_key[used_pk_parts-1];
- rec_per_key/= pkinfo->rec_per_key[0];
+ rec_per_key*= pkinfo->actual_rec_per_key(used_pk_parts-1);
+ rec_per_key/= pkinfo->actual_rec_per_key(0);
/*
The value of rec_per_key for the extended key has
to be adjusted accordingly if some components of
@@ -23192,9 +23201,9 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
We presume here that for any index rec_per_key[i] != 0
if rec_per_key[0] != 0.
*/
- DBUG_ASSERT(pkinfo->rec_per_key[i]);
- rec_per_key*= pkinfo->rec_per_key[i-1];
- rec_per_key/= pkinfo->rec_per_key[i];
+ DBUG_ASSERT(pkinfo->actual_rec_per_key(i));
+ rec_per_key*= pkinfo->actual_rec_per_key(i-1);
+ rec_per_key/= pkinfo->actual_rec_per_key(i);
}
}
}
@@ -23239,7 +23248,7 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
select_limit= (ha_rows) (select_limit *
(double) table_records /
table->quick_condition_rows);
- rec_per_key= keyinfo->rec_per_key[keyinfo->key_parts-1];
+ rec_per_key= keyinfo->actual_rec_per_key(keyinfo->key_parts-1);
set_if_bigger(rec_per_key, 1);
/*
Here we take into account the fact that rows are
@@ -23380,7 +23389,7 @@ uint get_index_for_order(ORDER *order, TABLE *table, SQL_SELECT *select,
Update quick_condition_rows since single table UPDATE/DELETE procedures
don't call make_join_statistics() and leave this variable uninitialized.
*/
- table->quick_condition_rows= table->file->stats.records;
+ table->quick_condition_rows= table->stat_records();
int key, direction;
if (test_if_cheaper_ordering(NULL, order, table,