summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorunknown <timour@mysql.com>2005-11-30 12:52:12 +0200
committerunknown <timour@mysql.com>2005-11-30 12:52:12 +0200
commite3f575522824ee79ade046cf96f4f3679d68c1a3 (patch)
tree93c8fe49567155c829f0e7241c60faa6b980c051
parent35d40868fd0f3d6a90e7190ebc63ee3183860e9f (diff)
downloadmariadb-git-e3f575522824ee79ade046cf96f4f3679d68c1a3.tar.gz
Fix for BUG#14920 Ordering aggregated result sets corrupts resultset.
The cause of the bug was the use of end_write_group instead of end_write in the case when ORDER BY required a temporary table, which didn't take into account the fact that loose index scan already computes the result of MIN/MAX aggregate functions (and performs grouping). The solution is to call end_write instead of end_write_group and to add the MIN/MAX functions to the list of regular functions so that their values are inserted into the temporary table. mysql-test/r/group_min_max.result: Test for BUG#14920 mysql-test/t/group_min_max.test: Test for BUG#14920 sql/sql_class.cc: Added new member to TMP_TABLE_PARAM. sql/sql_class.h: Added new member to TMP_TABLE_PARAM. sql/sql_select.cc: Enable result rows generated by loose index scan being written into a temporary table. The change is necessary because loose index scan already computes the result of GROUP BY and the MIN/MAX aggregate functions. This is realized by three changes: - create_tmp_table allocates space for aggregate functions in the list of regular functions, - use end_write instead of end_write group, - copy the pointers to the MIN/MAX aggregate functions to the list of regular functions TMP_TABLE_PARAM::items_to_copy. sql/sql_select.h: New parameter to create_tmp_table.
-rw-r--r--mysql-test/r/group_min_max.result31
-rw-r--r--mysql-test/t/group_min_max.test13
-rw-r--r--sql/sql_class.cc1
-rw-r--r--sql/sql_class.h9
-rw-r--r--sql/sql_select.cc65
-rw-r--r--sql/sql_select.h6
6 files changed, 113 insertions, 12 deletions
diff --git a/mysql-test/r/group_min_max.result b/mysql-test/r/group_min_max.result
index 038d0c75f74..636fb719c81 100644
--- a/mysql-test/r/group_min_max.result
+++ b/mysql-test/r/group_min_max.result
@@ -2002,3 +2002,34 @@ a count(a)
1 1
NULL 1
drop table t1;
+create table t1 (c1 int not null,c2 int not null, primary key(c1,c2));
+insert into t1 (c1,c2) values
+(10,1),(10,2),(10,3),(20,4),(20,5),(20,6),(30,7),(30,8),(30,9);
+select distinct c1, c2 from t1 order by c2;
+c1 c2
+10 1
+10 2
+10 3
+20 4
+20 5
+20 6
+30 7
+30 8
+30 9
+select c1,min(c2) as c2 from t1 group by c1 order by c2;
+c1 c2
+10 1
+20 4
+30 7
+select c1,c2 from t1 group by c1,c2 order by c2;
+c1 c2
+10 1
+10 2
+10 3
+20 4
+20 5
+20 6
+30 7
+30 8
+30 9
+drop table t1;
diff --git a/mysql-test/t/group_min_max.test b/mysql-test/t/group_min_max.test
index 3d751f4a571..f6be6170976 100644
--- a/mysql-test/t/group_min_max.test
+++ b/mysql-test/t/group_min_max.test
@@ -693,3 +693,16 @@ create table t1(a int, key(a)) engine=innodb;
insert into t1 values(1);
select a, count(a) from t1 group by a with rollup;
drop table t1;
+
+#
+# Bug #14920 Ordering aggregated result sets with composite primary keys
+# corrupts resultset
+#
+
+create table t1 (c1 int not null,c2 int not null, primary key(c1,c2));
+insert into t1 (c1,c2) values
+(10,1),(10,2),(10,3),(20,4),(20,5),(20,6),(30,7),(30,8),(30,9);
+select distinct c1, c2 from t1 order by c2;
+select c1,min(c2) as c2 from t1 group by c1 order by c2;
+select c1,c2 from t1 group by c1,c2 order by c2;
+drop table t1;
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index c52addf3995..4b3d6486ef7 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -1802,6 +1802,7 @@ void TMP_TABLE_PARAM::init()
group_parts= group_length= group_null_parts= 0;
quick_group= 1;
table_charset= 0;
+ precomputed_group_by= 0;
}
diff --git a/sql/sql_class.h b/sql/sql_class.h
index ed6f5732ca8..c5f661cebd5 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -1819,11 +1819,18 @@ public:
uint convert_blob_length;
CHARSET_INFO *table_charset;
bool schema_table;
+ /*
+ True if GROUP BY and its aggregate functions are already computed
+ by a table access method (e.g. by loose index scan). In this case
+ query execution should not perform aggregation and should treat
+ aggregate functions as normal functions.
+ */
+ bool precomputed_group_by;
TMP_TABLE_PARAM()
:copy_field(0), group_parts(0),
group_length(0), group_null_parts(0), convert_blob_length(0),
- schema_table(0)
+ schema_table(0), precomputed_group_by(0)
{}
~TMP_TABLE_PARAM()
{
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 9c7df461a3e..adebf9b4a52 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -1007,6 +1007,20 @@ JOIN::optimize()
}
having= 0;
+ /*
+ The loose index scan access method guarantees that all grouping or
+ duplicate row elimination (for distinct) is already performed
+ during data retrieval, and that all MIN/MAX functions are already
+ computed for each group. Thus all MIN/MAX functions should be
+ treated as regular functions, and there is no need to perform
+ grouping in the main execution loop.
+ Notice that currently loose index scan is applicable only for
+ single table queries, thus it is sufficient to test only the first
+ join_tab element of the plan for its access method.
+ */
+ if (join_tab->is_using_loose_index_scan())
+ tmp_table_param.precomputed_group_by= TRUE;
+
/* Create a tmp table if distinct or if the sort is too complicated */
if (need_tmp)
{
@@ -1410,6 +1424,15 @@ JOIN::exec()
else
{
/* group data to new table */
+
+ /*
+ If the access method is loose index scan then all MIN/MAX
+ functions are precomputed, and should be treated as regular
+ functions. See extended comment in JOIN::exec.
+ */
+ if (curr_join->join_tab->is_using_loose_index_scan())
+ curr_join->tmp_table_param.precomputed_group_by= TRUE;
+
if (!(curr_tmp_table=
exec_tmp_table2= create_tmp_table(thd,
&curr_join->tmp_table_param,
@@ -8279,6 +8302,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
MEM_ROOT *mem_root_save, own_root;
TABLE *table;
uint i,field_count,null_count,null_pack_length;
+ uint copy_func_count= param->func_count;
uint hidden_null_count, hidden_null_pack_length, hidden_field_count;
uint blob_count,group_null_items, string_count;
uint temp_pool_slot=MY_BIT_NONE;
@@ -8342,6 +8366,16 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
field_count=param->field_count+param->func_count+param->sum_func_count;
hidden_field_count=param->hidden_field_count;
+ /*
+ When loose index scan is employed as access method, it already
+ computes all groups and the result of all aggregate functions. We
+ make space for the items of the aggregate function in the list of
+ functions TMP_TABLE_PARAM::items_to_copy, so that the values of
+ these items are stored in the temporary table.
+ */
+ if (param->precomputed_group_by)
+ copy_func_count+= param->sum_func_count;
+
init_sql_alloc(&own_root, TABLE_ALLOC_BLOCK_SIZE, 0);
if (!multi_alloc_root(&own_root,
@@ -8349,7 +8383,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
&reg_field, sizeof(Field*) * (field_count+1),
&blob_field, sizeof(uint)*(field_count+1),
&from_field, sizeof(Field*)*field_count,
- &copy_func, sizeof(*copy_func)*(param->func_count+1),
+ &copy_func, sizeof(*copy_func)*(copy_func_count+1),
&param->keyinfo, sizeof(*param->keyinfo),
&key_part_info,
sizeof(*key_part_info)*(param->group_parts+1),
@@ -9241,11 +9275,13 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
Next_select_func setup_end_select_func(JOIN *join)
{
TABLE *table= join->tmp_table;
+ TMP_TABLE_PARAM *tmp_tbl= &join->tmp_table_param;
Next_select_func end_select;
+
/* Set up select_end */
if (table)
{
- if (table->group && join->tmp_table_param.sum_func_count)
+ if (table->group && tmp_tbl->sum_func_count)
{
if (table->s->keys)
{
@@ -9258,7 +9294,7 @@ Next_select_func setup_end_select_func(JOIN *join)
end_select=end_unique_update;
}
}
- else if (join->sort_and_group)
+ else if (join->sort_and_group && !tmp_tbl->precomputed_group_by)
{
DBUG_PRINT("info",("Using end_write_group"));
end_select=end_write_group;
@@ -9267,19 +9303,27 @@ Next_select_func setup_end_select_func(JOIN *join)
{
DBUG_PRINT("info",("Using end_write"));
end_select=end_write;
+ if (tmp_tbl->precomputed_group_by)
+ {
+ /*
+ A preceding call to create_tmp_table in the case when loose
+ index scan is used guarantees that
+ TMP_TABLE_PARAM::items_to_copy has enough space for the group
+ by functions. It is OK here to use memcpy since we copy
+ Item_sum pointers into an array of Item pointers.
+ */
+ memcpy(tmp_tbl->items_to_copy + tmp_tbl->func_count,
+ join->sum_funcs,
+ sizeof(Item*)*tmp_tbl->sum_func_count);
+ tmp_tbl->items_to_copy[tmp_tbl->func_count+tmp_tbl->sum_func_count]= 0;
+ }
}
}
else
{
- /* Test if data is accessed via QUICK_GROUP_MIN_MAX_SELECT. */
- bool is_using_quick_group_min_max_select=
- (join->join_tab->select && join->join_tab->select->quick &&
- (join->join_tab->select->quick->get_type() ==
- QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX));
-
if ((join->sort_and_group ||
(join->procedure && join->procedure->flags & PROC_GROUP)) &&
- !is_using_quick_group_min_max_select)
+ !tmp_tbl->precomputed_group_by)
end_select= end_send_group;
else
end_select= end_send;
@@ -10553,7 +10597,6 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
copy_fields(&join->tmp_table_param);
copy_funcs(join->tmp_table_param.items_to_copy);
-
#ifdef TO_BE_DELETED
if (!table->uniques) // If not unique handling
{
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 2f53c9a3b35..b3abd59f2b4 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -140,6 +140,12 @@ typedef struct st_join_table {
nested_join_map embedding_map;
void cleanup();
+ inline bool is_using_loose_index_scan()
+ {
+ return (select && select->quick &&
+ (select->quick->get_type() ==
+ QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX));
+ }
} JOIN_TAB;
enum_nested_loop_state sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool