summaryrefslogtreecommitdiff
path: root/mysql-test/t/insert_select.test
diff options
context:
space:
mode:
authorunknown <gkodinov@mysql.com>2006-06-19 13:22:42 +0300
committerunknown <gkodinov@mysql.com>2006-06-19 13:22:42 +0300
commit124cb126fa65fc1099256be5e269277bde815703 (patch)
tree106aa086fe749f7dd8a500936751d54dcad4ef40 /mysql-test/t/insert_select.test
parent6297d1ab38d1b2eb56c965c3cb682ac20b97296b (diff)
downloadmariadb-git-124cb126fa65fc1099256be5e269277bde815703.tar.gz
* Bug #9676: INSERT INTO x SELECT .. FROM x LIMIT 1; slows down with big
tables Currently in INSERT ... SELECT ... LIMIT ... the compiler uses a temporary table to store the results of SELECT ... LIMIT .. and then uses that table as a source for INSERT. The problem is that in some cases it actually skips the LIMIT clause in doing that and materializes the whole SELECT result set regardless of the LIMIT. This fix is limiting the process of filling up the temp table with only that much rows that will be actually used by propagating the LIMIT value. mysql-test/r/insert_select.result: * Bug #9676: INSERT INTO x SELECT .. FROM x LIMIT 1; slows down with big tables - a test demonstrating the code path mysql-test/t/insert_select.test: * Bug #9676: INSERT INTO x SELECT .. FROM x LIMIT 1; slows down with big tables - a test demonstrating the code path sql/sql_select.cc: * Bug #9676: INSERT INTO x SELECT .. FROM x LIMIT 1; slows down with big tables - pass through the real LIMIT number if the temp table is created for buffering results. - set the counter for all the cases when the temp table is not used for grouping
Diffstat (limited to 'mysql-test/t/insert_select.test')
-rw-r--r--mysql-test/t/insert_select.test13
1 files changed, 13 insertions, 0 deletions
diff --git a/mysql-test/t/insert_select.test b/mysql-test/t/insert_select.test
index 48acdf1cbc5..fcea489fcff 100644
--- a/mysql-test/t/insert_select.test
+++ b/mysql-test/t/insert_select.test
@@ -226,4 +226,17 @@ insert into t1(x,y) select x,z from t2 on duplicate key update x=values(z);
insert into t1(x,y) select x,z from t2 on duplicate key update x=values(t2.x);
drop table t1,t2;
+#
+# Bug #9676: INSERT INTO x SELECT .. FROM x LIMIT 1; slows down with big
+# tables
+#
+
+#Note: not an exsaustive test : just a check of the code path.
+CREATE TABLE t1 (a int PRIMARY KEY);
+INSERT INTO t1 values (1), (2);
+
+INSERT INTO t1 SELECT a + 2 FROM t1 LIMIT 1;
+
+DROP TABLE t1;
+
# End of 4.1 tests