summaryrefslogtreecommitdiff
path: root/sql/sql_select.h
diff options
context:
space:
mode:
Diffstat (limited to 'sql/sql_select.h')
-rw-r--r--sql/sql_select.h97
1 files changed, 49 insertions, 48 deletions
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 9574c93cd7d..dd364e441cb 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -700,8 +700,6 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
bool end_of_records);
-struct st_position;
-
class Semi_join_strategy_picker
{
public:
@@ -712,7 +710,7 @@ public:
Update internal state after another table has been added to the join
prefix
*/
- virtual void set_from_prev(struct st_position *prev) = 0;
+ virtual void set_from_prev(POSITION *prev) = 0;
virtual bool check_qep(JOIN *join,
uint idx,
@@ -722,7 +720,7 @@ public:
double *read_time,
table_map *handled_fanout,
sj_strategy_enum *strategy,
- struct st_position *loose_scan_pos) = 0;
+ POSITION *loose_scan_pos) = 0;
virtual void mark_used() = 0;
@@ -753,7 +751,7 @@ public:
first_dupsweedout_table= MAX_TABLES;
is_used= FALSE;
}
- void set_from_prev(struct st_position *prev);
+ void set_from_prev(POSITION *prev);
bool check_qep(JOIN *join,
uint idx,
@@ -763,7 +761,7 @@ public:
double *read_time,
table_map *handled_fanout,
sj_strategy_enum *stratey,
- struct st_position *loose_scan_pos);
+ POSITION *loose_scan_pos);
void mark_used() { is_used= TRUE; }
friend void fix_semijoin_strategies_for_picked_join_order(JOIN *join);
@@ -799,7 +797,7 @@ public:
is_used= FALSE;
}
- void set_from_prev(struct st_position *prev);
+ void set_from_prev(POSITION *prev);
bool check_qep(JOIN *join,
uint idx,
table_map remaining_tables,
@@ -808,7 +806,7 @@ public:
double *read_time,
table_map *handled_fanout,
sj_strategy_enum *strategy,
- struct st_position *loose_scan_pos);
+ POSITION *loose_scan_pos);
void mark_used() { is_used= TRUE; }
friend void fix_semijoin_strategies_for_picked_join_order(JOIN *join);
@@ -817,6 +815,7 @@ public:
class LooseScan_picker : public Semi_join_strategy_picker
{
+public:
/* The first (i.e. driving) table we're doing loose scan for */
uint first_loosescan_table;
/*
@@ -835,14 +834,13 @@ class LooseScan_picker : public Semi_join_strategy_picker
uint loosescan_parts; /* Number of keyparts to be kept distinct */
bool is_used;
-public:
void set_empty()
{
first_loosescan_table= MAX_TABLES;
is_used= FALSE;
}
- void set_from_prev(struct st_position *prev);
+ void set_from_prev(POSITION *prev);
bool check_qep(JOIN *join,
uint idx,
table_map remaining_tables,
@@ -851,19 +849,19 @@ public:
double *read_time,
table_map *handled_fanout,
sj_strategy_enum *strategy,
- struct st_position *loose_scan_pos);
+ POSITION *loose_scan_pos);
void mark_used() { is_used= TRUE; }
friend class Loose_scan_opt;
friend void best_access_path(JOIN *join,
JOIN_TAB *s,
table_map remaining_tables,
- const struct st_position *join_positions,
+ const POSITION *join_positions,
uint idx,
bool disable_jbuf,
double record_count,
- struct st_position *pos,
- struct st_position *loose_scan_pos);
+ POSITION *pos,
+ POSITION *loose_scan_pos);
friend bool get_best_combination(JOIN *join);
friend int setup_semijoin_loosescan(JOIN *join);
friend void fix_semijoin_strategies_for_picked_join_order(JOIN *join);
@@ -890,7 +888,7 @@ public:
sjm_scan_last_inner= 0;
is_used= FALSE;
}
- void set_from_prev(struct st_position *prev);
+ void set_from_prev(POSITION *prev);
bool check_qep(JOIN *join,
uint idx,
table_map remaining_tables,
@@ -899,7 +897,7 @@ public:
double *read_time,
table_map *handled_fanout,
sj_strategy_enum *strategy,
- struct st_position *loose_scan_pos);
+ POSITION *loose_scan_pos);
void mark_used() { is_used= TRUE; }
friend void fix_semijoin_strategies_for_picked_join_order(JOIN *join);
@@ -914,8 +912,9 @@ class Rowid_filter;
Information about a position of table within a join order. Used in join
optimization.
*/
-typedef struct st_position
+class POSITION
{
+public:
/* The table that's put into join order */
JOIN_TAB *table;
@@ -927,7 +926,7 @@ typedef struct st_position
double records_read;
/* The selectivity of the pushed down conditions */
- double cond_selectivity;
+ double cond_selectivity;
/*
Cost accessing the table in course of the entire complete join execution,
@@ -936,8 +935,6 @@ typedef struct st_position
*/
double read_time;
- /* Cumulative cost and record count for the join prefix */
- Cost_estimate prefix_cost;
double prefix_record_count;
/*
@@ -946,29 +943,46 @@ typedef struct st_position
*/
KEYUSE *key;
+ /* Info on splitting plan used at this position */
+ SplM_plan_info *spl_plan;
+
+ /* Cost info for the range filter used at this position */
+ Range_rowid_filter_cost_info *range_rowid_filter_info;
+
/* If ref-based access is used: bitmap of tables this table depends on */
table_map ref_depend_map;
-
+
/*
- TRUE <=> join buffering will be used. At the moment this is based on
- *very* imprecise guesses made in best_access_path().
+ Bitmap of semi-join inner tables that are in the join prefix and for
+ which there's no provision for how to eliminate semi-join duplicates
+ they produce.
*/
- bool use_join_buffer;
-
+ table_map dups_producing_tables;
+
+ table_map inner_tables_handled_with_other_sjs;
+
+ Duplicate_weedout_picker dups_weedout_picker;
+ Firstmatch_picker firstmatch_picker;
+ LooseScan_picker loosescan_picker;
+ Sj_materialization_picker sjmat_picker;
+
+ /* Cumulative cost and record count for the join prefix */
+ Cost_estimate prefix_cost;
+
/*
Current optimization state: Semi-join strategy to be used for this
and preceding join tables.
-
+
Join optimizer sets this for the *last* join_tab in the
- duplicate-generating range. That is, in order to interpret this field,
+ duplicate-generating range. That is, in order to interpret this field,
one needs to traverse join->[best_]positions array from right to left.
When you see a join table with sj_strategy!= SJ_OPT_NONE, some other
- field (depending on the strategy) tells how many preceding positions
+ field (depending on the strategy) tells how many preceding positions
this applies to. The values of covered_preceding_positions->sj_strategy
must be ignored.
*/
enum sj_strategy_enum sj_strategy;
-
+
/*
Valid only after fix_semijoin_strategies_for_picked_join_order() call:
if sj_strategy!=SJ_OPT_NONE, this is the number of subsequent tables that
@@ -977,26 +991,12 @@ typedef struct st_position
uint n_sj_tables;
/*
- Bitmap of semi-join inner tables that are in the join prefix and for
- which there's no provision for how to eliminate semi-join duplicates
- they produce.
+ TRUE <=> join buffering will be used. At the moment this is based on
+ *very* imprecise guesses made in best_access_path().
*/
- table_map dups_producing_tables;
-
- table_map inner_tables_handled_with_other_sjs;
-
- Duplicate_weedout_picker dups_weedout_picker;
- Firstmatch_picker firstmatch_picker;
- LooseScan_picker loosescan_picker;
- Sj_materialization_picker sjmat_picker;
-
- /* Info on splitting plan used at this position */
- SplM_plan_info *spl_plan;
-
- /* Cost info for the range filter used at this position */
- Range_rowid_filter_cost_info *range_rowid_filter_info;
-
-} POSITION;
+ bool use_join_buffer;
+ POSITION();
+};
typedef Bounds_checked_array<Item_null_result*> Item_null_array;
@@ -1592,6 +1592,7 @@ public:
fields_list= fields_arg;
non_agg_fields.empty();
bzero((char*) &keyuse,sizeof(keyuse));
+ having_value= Item::COND_UNDEF;
tmp_table_param.init();
tmp_table_param.end_write_records= HA_POS_ERROR;
rollup.state= ROLLUP::STATE_NONE;