summaryrefslogtreecommitdiff
path: root/sql/sql_select.cc
diff options
context:
space:
mode:
Diffstat (limited to 'sql/sql_select.cc')
-rw-r--r--sql/sql_select.cc38
1 files changed, 23 insertions, 15 deletions
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index be2aeb52618..9bc47569f1a 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -97,10 +97,6 @@ static int sort_keyuse(KEYUSE *a,KEYUSE *b);
static bool are_tables_local(JOIN_TAB *jtab, table_map used_tables);
static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse,
bool allow_full_scan, table_map used_tables);
-void best_access_path(JOIN *join, JOIN_TAB *s,
- table_map remaining_tables, uint idx,
- bool disable_jbuf, double record_count,
- POSITION *pos, POSITION *loose_scan_pos);
static void optimize_straight_join(JOIN *join, table_map join_tables);
static bool greedy_search(JOIN *join, table_map remaining_tables,
uint depth, uint prune_level,
@@ -4573,6 +4569,13 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
{
if (choose_plan(join, all_table_map & ~join->const_table_map))
goto error;
+
+#ifdef HAVE_valgrind
+ // JOIN::positions holds the current query plan. We've already
+ // made the plan choice, so we should only use JOIN::best_positions
+ for (uint k=join->const_tables; k < join->table_count; k++)
+ MEM_UNDEFINED(&join->positions[k], sizeof(join->positions[k]));
+#endif
}
else
{
@@ -6288,6 +6291,7 @@ void
best_access_path(JOIN *join,
JOIN_TAB *s,
table_map remaining_tables,
+ const POSITION *join_positions,
uint idx,
bool disable_jbuf,
double record_count,
@@ -6391,7 +6395,7 @@ best_access_path(JOIN *join,
if (!(keyuse->used_tables & ~join->const_table_map))
const_part|= keyuse->keypart_map;
- double tmp2= prev_record_reads(join->positions, idx,
+ double tmp2= prev_record_reads(join_positions, idx,
(found_ref | keyuse->used_tables));
if (tmp2 < best_prev_record_reads)
{
@@ -6432,7 +6436,7 @@ best_access_path(JOIN *join,
Really, there should be records=0.0 (yes!)
but 1.0 would be probably safer
*/
- tmp= prev_record_reads(join->positions, idx, found_ref);
+ tmp= prev_record_reads(join_positions, idx, found_ref);
records= 1.0;
}
else
@@ -6448,7 +6452,7 @@ best_access_path(JOIN *join,
if ((key_flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME ||
MY_TEST(key_flags & HA_EXT_NOSAME))
{
- tmp = prev_record_reads(join->positions, idx, found_ref);
+ tmp = prev_record_reads(join_positions, idx, found_ref);
records=1.0;
}
else
@@ -6692,7 +6696,8 @@ best_access_path(JOIN *join,
}
tmp += s->startup_cost;
- loose_scan_opt.check_ref_access_part2(key, start_key, records, tmp);
+ loose_scan_opt.check_ref_access_part2(key, start_key, records, tmp,
+ found_ref);
} /* not ft_key */
if (tmp + 0.0001 < best_time - records/(double) TIME_FOR_COMPARE)
{
@@ -7364,7 +7369,8 @@ optimize_straight_join(JOIN *join, table_map join_tables)
for (JOIN_TAB **pos= join->best_ref + idx ; (s= *pos) ; pos++)
{
/* Find the best access method from 's' to the current partial plan */
- best_access_path(join, s, join_tables, idx, disable_jbuf, record_count,
+ best_access_path(join, s, join_tables, join->positions, idx,
+ disable_jbuf, record_count,
join->positions + idx, &loose_scan_pos);
/* compute the cost of the new plan extended with 's' */
@@ -8270,8 +8276,9 @@ best_extension_by_limited_search(JOIN *join,
/* Find the best access method from 's' to the current partial plan */
POSITION loose_scan_pos;
- best_access_path(join, s, remaining_tables, idx, disable_jbuf,
- record_count, join->positions + idx, &loose_scan_pos);
+ best_access_path(join, s, remaining_tables, join->positions, idx,
+ disable_jbuf, record_count, join->positions + idx,
+ &loose_scan_pos);
/* Compute the cost of extending the plan with 's', avoid overflow */
if (position->records_read < DBL_MAX / record_count)
@@ -8654,11 +8661,11 @@ cache_record_length(JOIN *join,uint idx)
*/
double
-prev_record_reads(POSITION *positions, uint idx, table_map found_ref)
+prev_record_reads(const POSITION *positions, uint idx, table_map found_ref)
{
double found=1.0;
- POSITION *pos_end= positions - 1;
- for (POSITION *pos= positions + idx - 1; pos != pos_end; pos--)
+ const POSITION *pos_end= positions - 1;
+ for (const POSITION *pos= positions + idx - 1; pos != pos_end; pos--)
{
if (pos->table->table->map & found_ref)
{
@@ -15560,7 +15567,8 @@ void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab,
if ((i == first_tab && first_alt) || join->positions[i].use_join_buffer)
{
/* Find the best access method that would not use join buffering */
- best_access_path(join, rs, reopt_remaining_tables, i,
+ best_access_path(join, rs, reopt_remaining_tables,
+ join->positions, i,
TRUE, rec_count,
&pos, &loose_scan_pos);
}