diff options
34 files changed, 1882 insertions, 2320 deletions
diff --git a/mysql-test/r/ndb_dd_advance.result b/mysql-test/r/ndb_dd_advance.result deleted file mode 100644 index 64c30aab9ab..00000000000 --- a/mysql-test/r/ndb_dd_advance.result +++ /dev/null @@ -1,1088 +0,0 @@ -DROP TABLE IF EXISTS test.t1; -DROP TABLE IF EXISTS test.t2; -**** Test Setup Section **** -CREATE LOGFILE GROUP log_group1 -ADD UNDOFILE './log_group1/undofile.dat' -INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE table_space1 -ADD DATAFILE './table_space1/datafile.dat' -USE LOGFILE GROUP log_group1 -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLE test.t1 -(pk1 INT NOT NULL PRIMARY KEY, b INT NOT NULL, c INT NOT NULL) -TABLESPACE table_space1 STORAGE DISK -ENGINE=NDB; -CREATE TABLE test.t2 -(pk2 INT NOT NULL PRIMARY KEY, b2 INT NOT NULL, c2 INT NOT NULL) -ENGINE=NDB; - -**** Data load for first test **** -INSERT INTO test.t1 VALUES -(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), -(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), -(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), -(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), -(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), -(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), -(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), -(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), -(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45), -(46,46,46),(47,47,47),(48,48,48),(49,49,49),(50,50,50), -(51,51,51),(52,52,52),(53,53,53),(54,54,54),(55,55,55), -(56,56,56),(57,57,57),(58,58,58),(59,59,59),(60,60,60), -(61,61,61),(62,62,62),(63,63,63),(64,64,64),(65,65,65), -(66,66,66),(67,67,67),(68,68,68),(69,69,69),(70,70,70), -(71,71,71),(72,72,72),(73,73,73),(74,74,74),(75,75,75); -INSERT INTO test.t2 VALUES -(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), -(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), -(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), -(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), -(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), -(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), -(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), -(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), -(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45), -(46,46,46),(47,47,47),(48,48,48),(49,49,49),(50,50,50), -(51,51,51),(52,52,52),(53,53,53),(54,54,54),(55,55,55), -(56,56,56),(57,57,57),(58,58,58),(59,59,59),(60,60,60), -(61,61,61),(62,62,62),(63,63,63),(64,64,64),(65,65,65), -(66,66,66),(67,67,67),(68,68,68),(69,69,69),(70,70,70), -(71,71,71),(72,72,72),(73,73,73),(74,74,74),(75,75,75); - -*** Test 1 Section Begins *** -SELECT COUNT(*) FROM test.t2 LEFT JOIN test.t1 ON pk2=pk1 WHERE b2 IN (4); -COUNT(*) -1 -SELECT * FROM test.t2 LEFT JOIN test.t1 ON pk2=pk1 WHERE b2 IN (4); -pk2 b2 c2 pk1 b c -4 4 4 4 4 4 -SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 WHERE b IN (4); -COUNT(*) -1 -SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON b=b2 WHERE pk1 IN (75); -COUNT(*) -1 -SELECT b, c FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 ORDER BY b; -b c -1 1 -2 2 -3 3 -4 4 -5 5 -6 6 -7 7 -8 8 -9 9 -10 10 -11 11 -12 12 -13 13 -14 14 -15 15 -16 16 -17 17 -18 18 -19 19 -20 20 -21 21 -22 22 -23 23 -24 24 -25 25 -26 26 -27 27 -28 28 -29 29 -30 30 -31 31 -32 32 -33 33 -34 34 -35 35 -36 36 -37 37 -38 38 -39 39 -40 40 -41 41 -42 42 -43 43 -44 44 -45 45 -46 46 -47 47 -48 48 -49 49 -50 50 -51 51 -52 52 -53 53 -54 54 -55 55 -56 56 -57 57 -58 58 -59 59 -60 60 -61 61 -62 62 -63 63 -64 64 -65 65 -66 66 -67 67 -68 68 -69 69 -70 70 -71 71 -72 72 -73 73 -74 74 -75 75 - -*** Setup for test 2 **** -DELETE FROM test.t1; -INSERT INTO test.t1 VALUES -(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), -(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), -(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), -(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), -(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), -(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), -(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), -(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), -(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45); - -**** Test Section 2 **** -SELECT b, c FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 ORDER BY b; -b c -1 1 -2 2 -3 3 -4 4 -5 5 -6 6 -7 7 -8 8 -9 9 -10 10 -11 11 -12 12 -13 13 -14 14 -15 15 -16 16 -17 17 -18 18 -19 19 -20 20 -21 21 -22 22 -23 23 -24 24 -25 25 -26 26 -27 27 -28 28 -29 29 -30 30 -31 31 -32 32 -33 33 -34 34 -35 35 -36 36 -37 37 -38 38 -39 39 -40 40 -41 41 -42 42 -43 43 -44 44 -45 45 -SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON b=b2; -COUNT(*) -45 -SELECT COUNT(*) FROM test.t1 RIGHT JOIN test.t2 ON b=b2; -COUNT(*) -75 -SHOW CREATE TABLE test.t2; -Table Create Table -t2 CREATE TABLE `t2` ( - `pk2` int(11) NOT NULL, - `b2` int(11) NOT NULL, - `c2` int(11) NOT NULL, - PRIMARY KEY (`pk2`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `pk1` int(11) NOT NULL, - `b` int(11) NOT NULL, - `c` int(11) NOT NULL, - PRIMARY KEY (`pk1`) -) /*!50100 TABLESPACE table_space1 STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 -ALTER TABLE test.t2 TABLESPACE table_space1 STORAGE DISK -ENGINE=NDB; -SHOW CREATE TABLE test.t2; -Table Create Table -t2 CREATE TABLE `t2` ( - `pk2` int(11) NOT NULL, - `b2` int(11) NOT NULL, - `c2` int(11) NOT NULL, - PRIMARY KEY (`pk2`) -) /*!50100 TABLESPACE table_space1 STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 -ALTER TABLE test.t1 ENGINE=NDBCLUSTER; -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `pk1` int(11) NOT NULL, - `b` int(11) NOT NULL, - `c` int(11) NOT NULL, - PRIMARY KEY (`pk1`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 - -DROP TABLE test.t1; -DROP TABLE test.t2; -*** Setup for Test Section 3 *** -CREATE TABLE test.t1 ( -usr_id INT unsigned NOT NULL, -uniq_id INT unsigned NOT NULL AUTO_INCREMENT, -start_num INT unsigned NOT NULL DEFAULT 1, -increment INT unsigned NOT NULL DEFAULT 1, -PRIMARY KEY (uniq_id), -INDEX usr_uniq_idx (usr_id, uniq_id), -INDEX uniq_usr_idx (uniq_id, usr_id)) -TABLESPACE table_space1 STORAGE DISK -ENGINE=NDB; -CREATE TABLE test.t2 ( -id INT unsigned NOT NULL DEFAULT 0, -usr2_id INT unsigned NOT NULL DEFAULT 0, -max INT unsigned NOT NULL DEFAULT 0, -c_amount INT unsigned NOT NULL DEFAULT 0, -d_max INT unsigned NOT NULL DEFAULT 0, -d_num INT unsigned NOT NULL DEFAULT 0, -orig_time INT unsigned NOT NULL DEFAULT 0, -c_time INT unsigned NOT NULL DEFAULT 0, -active ENUM ("no","yes") NOT NULL, -PRIMARY KEY (id,usr2_id), -INDEX id_idx (id), -INDEX usr2_idx (usr2_id)) -ENGINE=NDB; -INSERT INTO test.t1 VALUES (3,NULL,0,50),(3,NULL,0,200),(3,NULL,0,25),(3,NULL,0,84676),(3,NULL,0,235),(3,NULL,0,10),(3,NULL,0,3098),(3,NULL,0,2947),(3,NULL,0,8987),(3,NULL,0,8347654),(3,NULL,0,20398),(3,NULL,0,8976),(3,NULL,0,500),(3,NULL,0,198); - -**** Test Section 3 **** -SELECT test.t1.usr_id,test.t1.uniq_id,test.t1.increment, -test.t2.usr2_id,test.t2.c_amount,test.t2.max -FROM test.t1 -LEFT JOIN test.t2 ON test.t2.id = test.t1.uniq_id -WHERE test.t1.uniq_id = 4 -ORDER BY test.t2.c_amount; -usr_id uniq_id increment usr2_id c_amount max -3 4 84676 NULL NULL NULL -INSERT INTO test.t2 VALUES (2,3,3000,6000,0,0,746584,837484,'yes'); -INSERT INTO test.t2 VALUES (4,3,3000,6000,0,0,746584,837484,'yes'); -INSERT INTO test.t2 VALUES (7,3,1000,2000,0,0,746294,937484,'yes'); -SELECT test.t1.usr_id,test.t1.uniq_id,test.t1.increment, -test.t2.usr2_id,test.t2.c_amount,test.t2.max -FROM test.t1 -LEFT JOIN test.t2 ON test.t2.id = test.t1.uniq_id -WHERE test.t1.uniq_id = 4 -ORDER BY test.t2.c_amount; -usr_id uniq_id increment usr2_id c_amount max -3 4 84676 3 6000 3000 - -DROP TABLE test.t1; -DROP TABLE test.t2; -ALTER TABLESPACE table_space1 -DROP DATAFILE './table_space1/datafile.dat' -ENGINE = NDB; -DROP TABLESPACE table_space1 -ENGINE = NDB; -DROP LOGFILE GROUP log_group1 -ENGINE =NDB; -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLESPACE ts2 -ADD DATAFILE './table_space2/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLE t1 (a int NOT NULL PRIMARY KEY, b int) -TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -CREATE TABLE t2 (a int NOT NULL PRIMARY KEY, b int) -ENGINE=NDB; -SHOW CREATE TABLE t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a` int(11) NOT NULL, - `b` int(11) DEFAULT NULL, - PRIMARY KEY (`a`) -) /*!50100 TABLESPACE ts1 STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 -SHOW CREATE TABLE t2; -Table Create Table -t2 CREATE TABLE `t2` ( - `a` int(11) NOT NULL, - `b` int(11) DEFAULT NULL, - PRIMARY KEY (`a`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 -INSERT INTO t1 VALUES (1,1); -INSERT INTO t1 VALUES (2,2); -SELECT * FROM t1 order by a; -a b -1 1 -2 2 -INSERT INTO t2(a,b) SELECT * FROM t1; -SELECT * FROM t2 order by a; -a b -1 1 -2 2 -TRUNCATE t1; -TRUNCATE t2; -INSERT INTO t2 VALUES (3,3); -INSERT INTO t2 VALUES (4,4); -INSERT INTO t1(a,b) SELECT * FROM t2; -SELECT * FROM t1 order by a; -a b -3 3 -4 4 -DROP TABLE t1, t2; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE NDB; -DROP TABLESPACE ts1 ENGINE NDB; -ALTER TABLESPACE ts2 -DROP DATAFILE './table_space2/datafile.dat' - ENGINE NDB; -DROP TABLESPACE ts2 ENGINE NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; -CREATE LOGFILE GROUP lg -ADD UNDOFILE './undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts -ADD DATAFILE './datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLE test.t ( -a smallint NOT NULL, -b int NOT NULL, -c bigint NOT NULL, -d char(10), -e TEXT, -f VARCHAR(255), -PRIMARY KEY(a) -) TABLESPACE ts STORAGE DISK ENGINE=NDB; -ALTER TABLE test.t ADD INDEX (d), ADD INDEX (f); -SHOW CREATE TABLE test.t; -Table Create Table -t CREATE TABLE `t` ( - `a` smallint(6) NOT NULL, - `b` int(11) NOT NULL, - `c` bigint(20) NOT NULL, - `d` char(10) DEFAULT NULL, - `e` text, - `f` varchar(255) DEFAULT NULL, - PRIMARY KEY (`a`), - KEY `d` (`d`), - KEY `f` (`f`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 -SELECT * FROM test.t order by a; -a b c d e f -1 2 3 aaa1 bbb1 ccccc1 -2 3 4 aaa2 bbb2 ccccc2 -3 4 5 aaa3 bbb3 ccccc3 -4 5 6 aaa4 bbb4 ccccc4 -5 6 7 aaa5 bbb5 ccccc5 -6 7 8 aaa6 bbb6 ccccc6 -7 8 9 aaa7 bbb7 ccccc7 -8 9 10 aaa8 bbb8 ccccc8 -9 10 11 aaa9 bbb9 ccccc9 -10 11 12 aaa10 bbb10 ccccc10 -11 12 13 aaa11 bbb11 ccccc11 -12 13 14 aaa12 bbb12 ccccc12 -13 14 15 aaa13 bbb13 ccccc13 -14 15 16 aaa14 bbb14 ccccc14 -15 16 17 aaa15 bbb15 ccccc15 -16 17 18 aaa16 bbb16 ccccc16 -17 18 19 aaa17 bbb17 ccccc17 -18 19 20 aaa18 bbb18 ccccc18 -19 20 21 aaa19 bbb19 ccccc19 -20 21 22 aaa20 bbb20 ccccc20 -21 22 23 aaa21 bbb21 ccccc21 -22 23 24 aaa22 bbb22 ccccc22 -23 24 25 aaa23 bbb23 ccccc23 -24 25 26 aaa24 bbb24 ccccc24 -25 26 27 aaa25 bbb25 ccccc25 -26 27 28 aaa26 bbb26 ccccc26 -27 28 29 aaa27 bbb27 ccccc27 -28 29 30 aaa28 bbb28 ccccc28 -29 30 31 aaa29 bbb29 ccccc29 -30 31 32 aaa30 bbb30 ccccc30 -31 32 33 aaa31 bbb31 ccccc31 -32 33 34 aaa32 bbb32 ccccc32 -33 34 35 aaa33 bbb33 ccccc33 -34 35 36 aaa34 bbb34 ccccc34 -35 36 37 aaa35 bbb35 ccccc35 -36 37 38 aaa36 bbb36 ccccc36 -37 38 39 aaa37 bbb37 ccccc37 -38 39 40 aaa38 bbb38 ccccc38 -39 40 41 aaa39 bbb39 ccccc39 -40 41 42 aaa40 bbb40 ccccc40 -41 42 43 aaa41 bbb41 ccccc41 -42 43 44 aaa42 bbb42 ccccc42 -43 44 45 aaa43 bbb43 ccccc43 -44 45 46 aaa44 bbb44 ccccc44 -45 46 47 aaa45 bbb45 ccccc45 -46 47 48 aaa46 bbb46 ccccc46 -47 48 49 aaa47 bbb47 ccccc47 -48 49 50 aaa48 bbb48 ccccc48 -49 50 51 aaa49 bbb49 ccccc49 -50 51 52 aaa50 bbb50 ccccc50 -51 52 53 aaa51 bbb51 ccccc51 -52 53 54 aaa52 bbb52 ccccc52 -53 54 55 aaa53 bbb53 ccccc53 -54 55 56 aaa54 bbb54 ccccc54 -55 56 57 aaa55 bbb55 ccccc55 -56 57 58 aaa56 bbb56 ccccc56 -57 58 59 aaa57 bbb57 ccccc57 -58 59 60 aaa58 bbb58 ccccc58 -59 60 61 aaa59 bbb59 ccccc59 -60 61 62 aaa60 bbb60 ccccc60 -61 62 63 aaa61 bbb61 ccccc61 -62 63 64 aaa62 bbb62 ccccc62 -63 64 65 aaa63 bbb63 ccccc63 -64 65 66 aaa64 bbb64 ccccc64 -65 66 67 aaa65 bbb65 ccccc65 -66 67 68 aaa66 bbb66 ccccc66 -67 68 69 aaa67 bbb67 ccccc67 -68 69 70 aaa68 bbb68 ccccc68 -69 70 71 aaa69 bbb69 ccccc69 -70 71 72 aaa70 bbb70 ccccc70 -71 72 73 aaa71 bbb71 ccccc71 -72 73 74 aaa72 bbb72 ccccc72 -73 74 75 aaa73 bbb73 ccccc73 -74 75 76 aaa74 bbb74 ccccc74 -75 76 77 aaa75 bbb75 ccccc75 -76 77 78 aaa76 bbb76 ccccc76 -77 78 79 aaa77 bbb77 ccccc77 -78 79 80 aaa78 bbb78 ccccc78 -79 80 81 aaa79 bbb79 ccccc79 -80 81 82 aaa80 bbb80 ccccc80 -81 82 83 aaa81 bbb81 ccccc81 -82 83 84 aaa82 bbb82 ccccc82 -83 84 85 aaa83 bbb83 ccccc83 -84 85 86 aaa84 bbb84 ccccc84 -85 86 87 aaa85 bbb85 ccccc85 -86 87 88 aaa86 bbb86 ccccc86 -87 88 89 aaa87 bbb87 ccccc87 -88 89 90 aaa88 bbb88 ccccc88 -89 90 91 aaa89 bbb89 ccccc89 -90 91 92 aaa90 bbb90 ccccc90 -91 92 93 aaa91 bbb91 ccccc91 -92 93 94 aaa92 bbb92 ccccc92 -93 94 95 aaa93 bbb93 ccccc93 -94 95 96 aaa94 bbb94 ccccc94 -95 96 97 aaa95 bbb95 ccccc95 -96 97 98 aaa96 bbb96 ccccc96 -97 98 99 aaa97 bbb97 ccccc97 -98 99 100 aaa98 bbb98 ccccc98 -99 100 101 aaa99 bbb99 ccccc99 -100 101 102 aaa100 bbb100 ccccc100 -DROP TABLE test.t; -USE test; -show tables; -Tables_in_test -t -SELECT * FROM test.t order by a; -a b c d e f -1 2 3 aaa1 bbb1 ccccc1 -2 3 4 aaa2 bbb2 ccccc2 -3 4 5 aaa3 bbb3 ccccc3 -4 5 6 aaa4 bbb4 ccccc4 -5 6 7 aaa5 bbb5 ccccc5 -6 7 8 aaa6 bbb6 ccccc6 -7 8 9 aaa7 bbb7 ccccc7 -8 9 10 aaa8 bbb8 ccccc8 -9 10 11 aaa9 bbb9 ccccc9 -10 11 12 aaa10 bbb10 ccccc10 -11 12 13 aaa11 bbb11 ccccc11 -12 13 14 aaa12 bbb12 ccccc12 -13 14 15 aaa13 bbb13 ccccc13 -14 15 16 aaa14 bbb14 ccccc14 -15 16 17 aaa15 bbb15 ccccc15 -16 17 18 aaa16 bbb16 ccccc16 -17 18 19 aaa17 bbb17 ccccc17 -18 19 20 aaa18 bbb18 ccccc18 -19 20 21 aaa19 bbb19 ccccc19 -20 21 22 aaa20 bbb20 ccccc20 -21 22 23 aaa21 bbb21 ccccc21 -22 23 24 aaa22 bbb22 ccccc22 -23 24 25 aaa23 bbb23 ccccc23 -24 25 26 aaa24 bbb24 ccccc24 -25 26 27 aaa25 bbb25 ccccc25 -26 27 28 aaa26 bbb26 ccccc26 -27 28 29 aaa27 bbb27 ccccc27 -28 29 30 aaa28 bbb28 ccccc28 -29 30 31 aaa29 bbb29 ccccc29 -30 31 32 aaa30 bbb30 ccccc30 -31 32 33 aaa31 bbb31 ccccc31 -32 33 34 aaa32 bbb32 ccccc32 -33 34 35 aaa33 bbb33 ccccc33 -34 35 36 aaa34 bbb34 ccccc34 -35 36 37 aaa35 bbb35 ccccc35 -36 37 38 aaa36 bbb36 ccccc36 -37 38 39 aaa37 bbb37 ccccc37 -38 39 40 aaa38 bbb38 ccccc38 -39 40 41 aaa39 bbb39 ccccc39 -40 41 42 aaa40 bbb40 ccccc40 -41 42 43 aaa41 bbb41 ccccc41 -42 43 44 aaa42 bbb42 ccccc42 -43 44 45 aaa43 bbb43 ccccc43 -44 45 46 aaa44 bbb44 ccccc44 -45 46 47 aaa45 bbb45 ccccc45 -46 47 48 aaa46 bbb46 ccccc46 -47 48 49 aaa47 bbb47 ccccc47 -48 49 50 aaa48 bbb48 ccccc48 -49 50 51 aaa49 bbb49 ccccc49 -50 51 52 aaa50 bbb50 ccccc50 -51 52 53 aaa51 bbb51 ccccc51 -52 53 54 aaa52 bbb52 ccccc52 -53 54 55 aaa53 bbb53 ccccc53 -54 55 56 aaa54 bbb54 ccccc54 -55 56 57 aaa55 bbb55 ccccc55 -56 57 58 aaa56 bbb56 ccccc56 -57 58 59 aaa57 bbb57 ccccc57 -58 59 60 aaa58 bbb58 ccccc58 -59 60 61 aaa59 bbb59 ccccc59 -60 61 62 aaa60 bbb60 ccccc60 -61 62 63 aaa61 bbb61 ccccc61 -62 63 64 aaa62 bbb62 ccccc62 -63 64 65 aaa63 bbb63 ccccc63 -64 65 66 aaa64 bbb64 ccccc64 -65 66 67 aaa65 bbb65 ccccc65 -66 67 68 aaa66 bbb66 ccccc66 -67 68 69 aaa67 bbb67 ccccc67 -68 69 70 aaa68 bbb68 ccccc68 -69 70 71 aaa69 bbb69 ccccc69 -70 71 72 aaa70 bbb70 ccccc70 -71 72 73 aaa71 bbb71 ccccc71 -72 73 74 aaa72 bbb72 ccccc72 -73 74 75 aaa73 bbb73 ccccc73 -74 75 76 aaa74 bbb74 ccccc74 -75 76 77 aaa75 bbb75 ccccc75 -76 77 78 aaa76 bbb76 ccccc76 -77 78 79 aaa77 bbb77 ccccc77 -78 79 80 aaa78 bbb78 ccccc78 -79 80 81 aaa79 bbb79 ccccc79 -80 81 82 aaa80 bbb80 ccccc80 -81 82 83 aaa81 bbb81 ccccc81 -82 83 84 aaa82 bbb82 ccccc82 -83 84 85 aaa83 bbb83 ccccc83 -84 85 86 aaa84 bbb84 ccccc84 -85 86 87 aaa85 bbb85 ccccc85 -86 87 88 aaa86 bbb86 ccccc86 -87 88 89 aaa87 bbb87 ccccc87 -88 89 90 aaa88 bbb88 ccccc88 -89 90 91 aaa89 bbb89 ccccc89 -90 91 92 aaa90 bbb90 ccccc90 -91 92 93 aaa91 bbb91 ccccc91 -92 93 94 aaa92 bbb92 ccccc92 -93 94 95 aaa93 bbb93 ccccc93 -94 95 96 aaa94 bbb94 ccccc94 -95 96 97 aaa95 bbb95 ccccc95 -96 97 98 aaa96 bbb96 ccccc96 -97 98 99 aaa97 bbb97 ccccc97 -98 99 100 aaa98 bbb98 ccccc98 -99 100 101 aaa99 bbb99 ccccc99 -100 101 102 aaa100 bbb100 ccccc100 -DROP TABLE test.t; -ALTER TABLESPACE ts -DROP DATAFILE './datafile.dat' - ENGINE NDB; -DROP TABLESPACE ts ENGINE NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; -DROP table IF EXISTS test.t1; -Warnings: -Note 1051 Unknown table 't1' -DROP table IF EXISTS test.t2; -Warnings: -Note 1051 Unknown table 't2' -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLESPACE ts2 -ADD DATAFILE './table_space2/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLE test.t1 ( -a1 smallint NOT NULL, -a2 int NOT NULL, -a3 bigint NOT NULL, -a4 char(10), -a5 decimal(5,1), -a6 time, -a7 date, -a8 datetime, -a9 VARCHAR(255), -a10 blob, -PRIMARY KEY(a1) -) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a8); -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` smallint(6) NOT NULL, - `a2` int(11) NOT NULL, - `a3` bigint(20) NOT NULL, - `a4` char(10) DEFAULT NULL, - `a5` decimal(5,1) DEFAULT NULL, - `a6` time DEFAULT NULL, - `a7` date DEFAULT NULL, - `a8` datetime DEFAULT NULL, - `a9` varchar(255) DEFAULT NULL, - `a10` blob, - PRIMARY KEY (`a1`), - KEY `a2` (`a2`), - KEY `a3` (`a3`), - KEY `a8` (`a8`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 -CREATE TABLE test.t2 ( -b1 smallint NOT NULL, -b2 int NOT NULL, -b3 bigint NOT NULL, -b4 char(10), -b5 decimal(5,1), -b6 time, -b7 date, -b8 datetime, -b9 VARCHAR(255), -b10 blob, -PRIMARY KEY(b1) -) ENGINE=NDB; -ALTER TABLE test.t2 ADD INDEX (b2), ADD INDEX (b3), ADD INDEX (b8); -SHOW CREATE TABLE test.t2; -Table Create Table -t2 CREATE TABLE `t2` ( - `b1` smallint(6) NOT NULL, - `b2` int(11) NOT NULL, - `b3` bigint(20) NOT NULL, - `b4` char(10) DEFAULT NULL, - `b5` decimal(5,1) DEFAULT NULL, - `b6` time DEFAULT NULL, - `b7` date DEFAULT NULL, - `b8` datetime DEFAULT NULL, - `b9` varchar(255) DEFAULT NULL, - `b10` blob, - PRIMARY KEY (`b1`), - KEY `b2` (`b2`), - KEY `b3` (`b3`), - KEY `b8` (`b8`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 -SELECT * FROM test.t1 order by a1; -a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 -1 2 2000000001 aaa1 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data -2 3 2000000002 aaa2 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb2 binary data -3 4 2000000003 aaa3 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb3 binary data -4 5 2000000004 aaa4 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb4 binary data -5 6 2000000005 aaa5 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb5 binary data -6 7 2000000006 aaa6 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb6 binary data -7 8 2000000007 aaa7 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb7 binary data -8 9 2000000008 aaa8 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb8 binary data -9 10 2000000009 aaa9 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb9 binary data -10 11 2000000010 aaa10 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb10 binary data -11 12 2000000011 aaa11 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb11 binary data -12 13 2000000012 aaa12 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb12 binary data -13 14 2000000013 aaa13 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb13 binary data -14 15 2000000014 aaa14 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb14 binary data -15 16 2000000015 aaa15 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb15 binary data -16 17 2000000016 aaa16 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb16 binary data -17 18 2000000017 aaa17 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb17 binary data -18 19 2000000018 aaa18 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb18 binary data -19 20 2000000019 aaa19 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb19 binary data -20 21 2000000020 aaa20 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb20 binary data -SELECT * FROM test.t2 order by b1; -b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 -3 4 3000000001 aaa1 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data -4 5 3000000002 aaa2 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb2 binary data -5 6 3000000003 aaa3 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb3 binary data -6 7 3000000004 aaa4 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb4 binary data -7 8 3000000005 aaa5 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb5 binary data -8 9 3000000006 aaa6 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb6 binary data -9 10 3000000007 aaa7 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb7 binary data -10 11 3000000008 aaa8 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb8 binary data -11 12 3000000009 aaa9 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb9 binary data -12 13 3000000010 aaa10 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb10 binary data -13 14 3000000011 aaa11 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb11 binary data -14 15 3000000012 aaa12 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb12 binary data -15 16 3000000013 aaa13 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb13 binary data -16 17 3000000014 aaa14 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb14 binary data -17 18 3000000015 aaa15 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb15 binary data -18 19 3000000016 aaa16 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb16 binary data -19 20 3000000017 aaa17 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb17 binary data -20 21 3000000018 aaa18 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb18 binary data -21 22 3000000019 aaa19 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb19 binary data -22 23 3000000020 aaa20 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb20 binary data -SELECT COUNT(a1), a1, COUNT(a1)*a1 FROM test.t1 GROUP BY a1; -COUNT(a1) a1 COUNT(a1)*a1 -1 1 1 -1 2 2 -1 3 3 -1 4 4 -1 5 5 -1 6 6 -1 7 7 -1 8 8 -1 9 9 -1 10 10 -1 11 11 -1 12 12 -1 13 13 -1 14 14 -1 15 15 -1 16 16 -1 17 17 -1 18 18 -1 19 19 -1 20 20 -SELECT COUNT(a2), (a2+1), COUNT(a2)*(a2+0) FROM test.t1 GROUP BY a2; -COUNT(a2) (a2+1) COUNT(a2)*(a2+0) -1 3 2 -1 4 3 -1 5 4 -1 6 5 -1 7 6 -1 8 7 -1 9 8 -1 10 9 -1 11 10 -1 12 11 -1 13 12 -1 14 13 -1 15 14 -1 16 15 -1 17 16 -1 18 17 -1 19 18 -1 20 19 -1 21 20 -1 22 21 -DROP TABLE test.t1; -DROP TABLE test.t2; -create table test.t1 (a int not null,b char(5), c text) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -Warnings: -Error 1465 Table storage engine 'ndbcluster' does not support the create option 'Binlog of table with BLOB attribute and no PK' -insert into test.t1 (a) values (1),(2),(3),(4),(1),(2),(3),(4); -select distinct a from test.t1 group by b,a having a > 2 order by a desc; -a -4 -3 -select distinct a,c from test.t1 group by b,c,a having a > 2 order by a desc; -a c -4 NULL -3 NULL -select distinct a from test.t1 group by b,a having a > 2 order by a asc; -a -3 -4 -select distinct a,c from test.t1 group by b,c,a having a > 2 order by a asc; -a c -3 NULL -4 NULL -drop table test.t1; -create table test.t1 (a char(1), key(a)) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -insert into test.t1 values('1'),('1'),('1'),('2'),('2'),('3'),('3'); -select * from test.t1 where a >= '1' order by a; -a -1 -1 -1 -2 -2 -3 -3 -select distinct a from test.t1 order by a desc; -a -3 -2 -1 -select distinct a from test.t1 where a >= '1' order by a desc; -a -3 -2 -1 -select distinct a from test.t1 where a >= '1' order by a asc; -a -1 -2 -3 -drop table test.t1; -CREATE TABLE test.t1 (email varchar(50), infoID BIGINT, dateentered DATETIME) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -CREATE TABLE test.t2 (infoID BIGINT, shipcode varchar(10)) ENGINE=NDB; -INSERT INTO test.t1 (email, infoID, dateentered) VALUES -('test1@testdomain.com', 1, '2002-07-30 22:56:38'), -('test1@testdomain.com', 1, '2002-07-27 22:58:16'), -('test2@testdomain.com', 1, '2002-06-19 15:22:19'), -('test2@testdomain.com', 2, '2002-06-18 14:23:47'), -('test3@testdomain.com', 1, '2002-05-19 22:17:32'); -INSERT INTO test.t2(infoID, shipcode) VALUES -(1, 'Z001'), -(2, 'R002'); -SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE t1.infoID=t2.infoID order by email, shipcode; -email shipcode -test1@testdomain.com Z001 -test2@testdomain.com R002 -test2@testdomain.com Z001 -test3@testdomain.com Z001 -SELECT DISTINCTROW email FROM test.t1 ORDER BY dateentered DESC; -email -test1@testdomain.com -test2@testdomain.com -test3@testdomain.com -SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE test.t1.infoID=test.t2.infoID ORDER BY dateentered DESC; -email shipcode -test1@testdomain.com Z001 -test2@testdomain.com Z001 -test2@testdomain.com R002 -test3@testdomain.com Z001 -drop table test.t1,test.t2; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE NDB; -DROP TABLESPACE ts1 ENGINE NDB; -ALTER TABLESPACE ts2 -DROP DATAFILE './table_space2/datafile.dat' - ENGINE NDB; -DROP TABLESPACE ts2 ENGINE NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; -DROP TABLE IF EXISTS test.t; -Warnings: -Note 1051 Unknown table 't' -create table test.t (f1 varchar(50) primary key, f2 text,f3 int) engine=NDB; -insert into test.t (f1,f2,f3)VALUES("111111","aaaaaa",1); -insert into test.t (f1,f2,f3)VALUES("222222","bbbbbb",2); -select * from test.t order by f1; -f1 f2 f3 -111111 aaaaaa 1 -222222 bbbbbb 2 -select f1,f2 from test.t order by f2; -f1 f2 -111111 aaaaaa -222222 bbbbbb -select f2 from test.t order by f2; -f2 -aaaaaa -bbbbbb -select f1,f2 from test.t order by f1; -f1 f2 -111111 aaaaaa -222222 bbbbbb -drop table test.t; -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts -ADD DATAFILE './table_space/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) ENGINE=InnoDB; -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) DEFAULT NULL, - `a2` blob, - `a3` text -) ENGINE=InnoDB DEFAULT CHARSET=latin1 -ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; -Warnings: -Error 1465 Table storage engine 'ndbcluster' does not support the create option 'Binlog of table with BLOB attribute and no PK' -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) DEFAULT NULL, - `a2` blob, - `a3` text -) /*!50100 TABLESPACE ts STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 -DROP TABLE test.t1; -CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) ENGINE=MyISAM; -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) DEFAULT NULL, - `a2` blob, - `a3` text -) ENGINE=MyISAM DEFAULT CHARSET=latin1 -ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; -Warnings: -Error 1465 Table storage engine 'ndbcluster' does not support the create option 'Binlog of table with BLOB attribute and no PK' -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) DEFAULT NULL, - `a2` blob, - `a3` text -) /*!50100 TABLESPACE ts STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 -DROP TABLE test.t1; -CREATE TABLE test.t1 (a1 INT PRIMARY KEY, a2 BLOB, a3 TEXT) TABLESPACE ts STORAGE DISK ENGINE=NDB; -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) NOT NULL, - `a2` blob, - `a3` text, - PRIMARY KEY (`a1`) -) /*!50100 TABLESPACE ts STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 -ALTER TABLE test.t1 ENGINE=InnoDB; -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) NOT NULL, - `a2` blob, - `a3` text, - PRIMARY KEY (`a1`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1 -DROP TABLE test.t1; -CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) TABLESPACE ts STORAGE DISK ENGINE=NDB; -Warnings: -Error 1465 Table storage engine 'ndbcluster' does not support the create option 'Binlog of table with BLOB attribute and no PK' -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) DEFAULT NULL, - `a2` blob, - `a3` text -) /*!50100 TABLESPACE ts STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 -ALTER TABLE test.t1 ENGINE=MyISAM; -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) DEFAULT NULL, - `a2` blob, - `a3` text -) ENGINE=MyISAM DEFAULT CHARSET=latin1 -DROP TABLE test.t1; -CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB; -ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE, ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME, ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB; -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) NOT NULL, - `a2` float DEFAULT NULL, - `a3` double DEFAULT NULL, - `a4` bit(1) DEFAULT NULL, - `a5` tinyint(4) DEFAULT NULL, - `a6` bigint(20) DEFAULT NULL, - `a7` date DEFAULT NULL, - `a8` time DEFAULT NULL, - `a9` datetime DEFAULT NULL, - `a10` tinytext, - `a11` mediumtext, - `a12` longtext, - `a13` text, - `a14` blob, - PRIMARY KEY (`a1`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 -ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a5), ADD INDEX (a6), -ADD INDEX (a7), ADD INDEX (a8); -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) NOT NULL, - `a2` float DEFAULT NULL, - `a3` double DEFAULT NULL, - `a4` bit(1) DEFAULT NULL, - `a5` tinyint(4) DEFAULT NULL, - `a6` bigint(20) DEFAULT NULL, - `a7` date DEFAULT NULL, - `a8` time DEFAULT NULL, - `a9` datetime DEFAULT NULL, - `a10` tinytext, - `a11` mediumtext, - `a12` longtext, - `a13` text, - `a14` blob, - PRIMARY KEY (`a1`), - KEY `a2` (`a2`), - KEY `a3` (`a3`), - KEY `a5` (`a5`), - KEY `a6` (`a6`), - KEY `a7` (`a7`), - KEY `a8` (`a8`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 -DROP TABLE test.t1; -CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB; -ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE, ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME, ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB; -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) NOT NULL, - `a2` float DEFAULT NULL, - `a3` double DEFAULT NULL, - `a4` bit(1) DEFAULT NULL, - `a5` tinyint(4) DEFAULT NULL, - `a6` bigint(20) DEFAULT NULL, - `a7` date DEFAULT NULL, - `a8` time DEFAULT NULL, - `a9` datetime DEFAULT NULL, - `a10` tinytext, - `a11` mediumtext, - `a12` longtext, - `a13` text, - `a14` blob, - PRIMARY KEY (`a1`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 -ALTER TABLE test.t1 DROP a14; -ALTER TABLE test.t1 DROP a13; -ALTER TABLE test.t1 DROP a12; -ALTER TABLE test.t1 DROP a11; -ALTER TABLE test.t1 DROP a10; -ALTER TABLE test.t1 DROP a9; -ALTER TABLE test.t1 DROP a8; -ALTER TABLE test.t1 DROP a7; -ALTER TABLE test.t1 DROP a6; -ALTER TABLE test.t1 DROP PRIMARY KEY; -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) NOT NULL, - `a2` float DEFAULT NULL, - `a3` double DEFAULT NULL, - `a4` bit(1) DEFAULT NULL, - `a5` tinyint(4) DEFAULT NULL -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 -DROP TABLE test.t1; -ALTER TABLESPACE ts -DROP DATAFILE './table_space/datafile.dat' - ENGINE NDB; -DROP TABLESPACE ts ENGINE NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; diff --git a/mysql-test/r/ndb_dd_alter.result b/mysql-test/r/ndb_dd_alter.result new file mode 100644 index 00000000000..a9505747a94 --- /dev/null +++ b/mysql-test/r/ndb_dd_alter.result @@ -0,0 +1,441 @@ +DROP TABLE IF EXISTS test.t1; +DROP TABLE IF EXISTS test.t2; +**** Test Setup Section **** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts +ADD DATAFILE './table_space/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 ( +a1 smallint NOT NULL, +a2 int NOT NULL, +a3 bigint NOT NULL, +a4 char(10), +a5 decimal(5,1), +a6 time, +a7 date, +a8 datetime, +a9 VARCHAR(255), +a10 blob, +PRIMARY KEY(a1) +) ENGINE=InnoDB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` smallint(6) NOT NULL, + `a2` int(11) NOT NULL, + `a3` bigint(20) NOT NULL, + `a4` char(10) DEFAULT NULL, + `a5` decimal(5,1) DEFAULT NULL, + `a6` time DEFAULT NULL, + `a7` date DEFAULT NULL, + `a8` datetime DEFAULT NULL, + `a9` varchar(255) DEFAULT NULL, + `a10` blob, + PRIMARY KEY (`a1`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 +1 2 2000000001 aaa1 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data +2 3 2000000002 aaa2 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb2 binary data +3 4 2000000003 aaa3 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb3 binary data +4 5 2000000004 aaa4 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb4 binary data +5 6 2000000005 aaa5 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb5 binary data +6 7 2000000006 aaa6 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb6 binary data +7 8 2000000007 aaa7 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb7 binary data +8 9 2000000008 aaa8 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb8 binary data +9 10 2000000009 aaa9 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb9 binary data +10 11 2000000010 aaa10 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb10 binary data +11 12 2000000011 aaa11 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb11 binary data +12 13 2000000012 aaa12 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb12 binary data +13 14 2000000013 aaa13 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb13 binary data +14 15 2000000014 aaa14 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb14 binary data +15 16 2000000015 aaa15 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb15 binary data +16 17 2000000016 aaa16 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb16 binary data +17 18 2000000017 aaa17 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb17 binary data +18 19 2000000018 aaa18 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb18 binary data +19 20 2000000019 aaa19 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb19 binary data +20 21 2000000020 aaa20 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb20 binary data +ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` smallint(6) NOT NULL, + `a2` int(11) NOT NULL, + `a3` bigint(20) NOT NULL, + `a4` char(10) DEFAULT NULL, + `a5` decimal(5,1) DEFAULT NULL, + `a6` time DEFAULT NULL, + `a7` date DEFAULT NULL, + `a8` datetime DEFAULT NULL, + `a9` varchar(255) DEFAULT NULL, + `a10` blob, + PRIMARY KEY (`a1`) +) /*!50100 TABLESPACE ts STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 +1 2 2000000001 aaa1 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data +2 3 2000000002 aaa2 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb2 binary data +3 4 2000000003 aaa3 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb3 binary data +4 5 2000000004 aaa4 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb4 binary data +5 6 2000000005 aaa5 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb5 binary data +6 7 2000000006 aaa6 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb6 binary data +7 8 2000000007 aaa7 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb7 binary data +8 9 2000000008 aaa8 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb8 binary data +9 10 2000000009 aaa9 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb9 binary data +10 11 2000000010 aaa10 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb10 binary data +11 12 2000000011 aaa11 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb11 binary data +12 13 2000000012 aaa12 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb12 binary data +13 14 2000000013 aaa13 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb13 binary data +14 15 2000000014 aaa14 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb14 binary data +15 16 2000000015 aaa15 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb15 binary data +16 17 2000000016 aaa16 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb16 binary data +17 18 2000000017 aaa17 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb17 binary data +18 19 2000000018 aaa18 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb18 binary data +19 20 2000000019 aaa19 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb19 binary data +20 21 2000000020 aaa20 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb20 binary data +DROP TABLE test.t1; +CREATE TABLE test.t1 ( +a1 smallint NOT NULL, +a2 int NOT NULL, +a3 bigint NOT NULL, +a4 char(10), +a5 decimal(5,1), +a6 time, +a7 date, +a8 datetime, +a9 VARCHAR(255), +a10 blob, +PRIMARY KEY(a1) +) ENGINE=MyISAM; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` smallint(6) NOT NULL, + `a2` int(11) NOT NULL, + `a3` bigint(20) NOT NULL, + `a4` char(10) DEFAULT NULL, + `a5` decimal(5,1) DEFAULT NULL, + `a6` time DEFAULT NULL, + `a7` date DEFAULT NULL, + `a8` datetime DEFAULT NULL, + `a9` varchar(255) DEFAULT NULL, + `a10` blob, + PRIMARY KEY (`a1`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 +1 2 2000000001 aaa1 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data +2 3 2000000002 aaa2 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb2 binary data +3 4 2000000003 aaa3 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb3 binary data +4 5 2000000004 aaa4 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb4 binary data +5 6 2000000005 aaa5 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb5 binary data +6 7 2000000006 aaa6 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb6 binary data +7 8 2000000007 aaa7 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb7 binary data +8 9 2000000008 aaa8 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb8 binary data +9 10 2000000009 aaa9 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb9 binary data +10 11 2000000010 aaa10 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb10 binary data +11 12 2000000011 aaa11 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb11 binary data +12 13 2000000012 aaa12 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb12 binary data +13 14 2000000013 aaa13 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb13 binary data +14 15 2000000014 aaa14 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb14 binary data +15 16 2000000015 aaa15 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb15 binary data +16 17 2000000016 aaa16 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb16 binary data +17 18 2000000017 aaa17 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb17 binary data +18 19 2000000018 aaa18 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb18 binary data +19 20 2000000019 aaa19 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb19 binary data +20 21 2000000020 aaa20 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb20 binary data +ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` smallint(6) NOT NULL, + `a2` int(11) NOT NULL, + `a3` bigint(20) NOT NULL, + `a4` char(10) DEFAULT NULL, + `a5` decimal(5,1) DEFAULT NULL, + `a6` time DEFAULT NULL, + `a7` date DEFAULT NULL, + `a8` datetime DEFAULT NULL, + `a9` varchar(255) DEFAULT NULL, + `a10` blob, + PRIMARY KEY (`a1`) +) /*!50100 TABLESPACE ts STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 +1 2 2000000001 aaa1 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data +2 3 2000000002 aaa2 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb2 binary data +3 4 2000000003 aaa3 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb3 binary data +4 5 2000000004 aaa4 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb4 binary data +5 6 2000000005 aaa5 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb5 binary data +6 7 2000000006 aaa6 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb6 binary data +7 8 2000000007 aaa7 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb7 binary data +8 9 2000000008 aaa8 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb8 binary data +9 10 2000000009 aaa9 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb9 binary data +10 11 2000000010 aaa10 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb10 binary data +11 12 2000000011 aaa11 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb11 binary data +12 13 2000000012 aaa12 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb12 binary data +13 14 2000000013 aaa13 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb13 binary data +14 15 2000000014 aaa14 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb14 binary data +15 16 2000000015 aaa15 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb15 binary data +16 17 2000000016 aaa16 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb16 binary data +17 18 2000000017 aaa17 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb17 binary data +18 19 2000000018 aaa18 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb18 binary data +19 20 2000000019 aaa19 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb19 binary data +20 21 2000000020 aaa20 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb20 binary data +ALTER TABLE test.t1 ENGINE=InnoDB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` smallint(6) NOT NULL, + `a2` int(11) NOT NULL, + `a3` bigint(20) NOT NULL, + `a4` char(10) DEFAULT NULL, + `a5` decimal(5,1) DEFAULT NULL, + `a6` time DEFAULT NULL, + `a7` date DEFAULT NULL, + `a8` datetime DEFAULT NULL, + `a9` varchar(255) DEFAULT NULL, + `a10` blob, + PRIMARY KEY (`a1`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 +1 2 2000000001 aaa1 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data +2 3 2000000002 aaa2 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb2 binary data +3 4 2000000003 aaa3 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb3 binary data +4 5 2000000004 aaa4 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb4 binary data +5 6 2000000005 aaa5 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb5 binary data +6 7 2000000006 aaa6 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb6 binary data +7 8 2000000007 aaa7 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb7 binary data +8 9 2000000008 aaa8 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb8 binary data +9 10 2000000009 aaa9 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb9 binary data +10 11 2000000010 aaa10 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb10 binary data +11 12 2000000011 aaa11 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb11 binary data +12 13 2000000012 aaa12 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb12 binary data +13 14 2000000013 aaa13 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb13 binary data +14 15 2000000014 aaa14 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb14 binary data +15 16 2000000015 aaa15 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb15 binary data +16 17 2000000016 aaa16 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb16 binary data +17 18 2000000017 aaa17 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb17 binary data +18 19 2000000018 aaa18 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb18 binary data +19 20 2000000019 aaa19 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb19 binary data +20 21 2000000020 aaa20 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb20 binary data +ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` smallint(6) NOT NULL, + `a2` int(11) NOT NULL, + `a3` bigint(20) NOT NULL, + `a4` char(10) DEFAULT NULL, + `a5` decimal(5,1) DEFAULT NULL, + `a6` time DEFAULT NULL, + `a7` date DEFAULT NULL, + `a8` datetime DEFAULT NULL, + `a9` varchar(255) DEFAULT NULL, + `a10` blob, + PRIMARY KEY (`a1`) +) /*!50100 TABLESPACE ts STORAGE DISK */ ENGINE=ndbcluster DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 ENGINE=MyISAM; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` smallint(6) NOT NULL, + `a2` int(11) NOT NULL, + `a3` bigint(20) NOT NULL, + `a4` char(10) DEFAULT NULL, + `a5` decimal(5,1) DEFAULT NULL, + `a6` time DEFAULT NULL, + `a7` date DEFAULT NULL, + `a8` datetime DEFAULT NULL, + `a9` varchar(255) DEFAULT NULL, + `a10` blob, + PRIMARY KEY (`a1`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE test.t1; +CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB; +SELECT * FROM test.t1 ORDER BY a1; +a1 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE; +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 +1 2.2345 20000001 +2 3.2345 20000002 +3 4.2345 20000003 +4 5.2345 20000004 +5 6.2345 20000005 +6 7.2345 20000006 +7 8.2345 20000007 +8 9.2345 20000008 +9 10.2345 20000009 +10 11.2345 20000010 +11 12.2345 20000011 +12 13.2345 20000012 +13 14.2345 20000013 +14 15.2345 20000014 +15 16.2345 20000015 +16 17.2345 20000016 +17 18.2345 20000017 +18 19.2345 20000018 +19 20.2345 20000019 +20 21.2345 20000020 +ALTER TABLE test.t1 ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME; +SELECT a1,a2,a3,hex(a4), a5,a6,a7,a8 FROM test.t1 ORDER BY a1; +a1 a2 a3 hex(a4) a5 a6 a7 a8 +1 2.2345 20000001 0 1 23457 2006-01-01 07:04:00 +2 3.2345 20000002 0 1 23458 2006-01-01 07:04:00 +3 4.2345 20000003 0 1 23459 2006-01-01 07:04:00 +4 5.2345 20000004 0 1 23460 2006-01-01 07:04:00 +5 6.2345 20000005 0 1 23461 2006-01-01 07:04:00 +6 7.2345 20000006 0 1 23462 2006-01-01 07:04:00 +7 8.2345 20000007 0 1 23463 2006-01-01 07:04:00 +8 9.2345 20000008 0 1 23464 2006-01-01 07:04:00 +9 10.2345 20000009 0 1 23465 2006-01-01 07:04:00 +10 11.2345 20000010 0 1 23466 2006-01-01 07:04:00 +11 12.2345 20000011 0 1 23467 2006-01-01 07:04:00 +12 13.2345 20000012 0 1 23468 2006-01-01 07:04:00 +13 14.2345 20000013 0 1 23469 2006-01-01 07:04:00 +14 15.2345 20000014 0 1 23470 2006-01-01 07:04:00 +15 16.2345 20000015 0 1 23471 2006-01-01 07:04:00 +16 17.2345 20000016 0 1 23472 2006-01-01 07:04:00 +17 18.2345 20000017 0 1 23473 2006-01-01 07:04:00 +18 19.2345 20000018 0 1 23474 2006-01-01 07:04:00 +19 20.2345 20000019 0 1 23475 2006-01-01 07:04:00 +20 21.2345 20000020 0 1 23476 2006-01-01 07:04:00 +ALTER TABLE test.t1 ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB; +SELECT a1, a2,a3,hex(a4),a5,a6,a7,a8,a9,a10,a11,a12,a13 FROM test.t1 ORDER BY a1; +a1 a2 a3 hex(a4) a5 a6 a7 a8 a9 a10 a11 a12 a13 +1 2.2345 20000001 0 1 23457 2006-01-01 07:04:00 1971-05-28 16:55:03 abc abcdefg LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL Text Field +2 3.2345 20000002 0 1 23458 2006-01-01 07:04:00 1971-05-28 16:55:03 abc abcdefg LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL Text Field +3 4.2345 20000003 0 1 23459 2006-01-01 07:04:00 1971-05-28 16:55:03 abc abcdefg LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL Text Field +4 5.2345 20000004 0 1 23460 2006-01-01 07:04:00 1971-05-28 16:55:03 abc abcdefg LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL Text Field +5 6.2345 20000005 0 1 23461 2006-01-01 07:04:00 1971-05-28 16:55:03 abc abcdefg LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL Text Field +6 7.2345 20000006 0 1 23462 2006-01-01 07:04:00 1971-05-28 16:55:03 abc abcdefg LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL Text Field +7 8.2345 20000007 0 1 23463 2006-01-01 07:04:00 1971-05-28 16:55:03 abc abcdefg LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL Text Field +8 9.2345 20000008 0 1 23464 2006-01-01 07:04:00 1971-05-28 16:55:03 abc abcdefg LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL Text Field +9 10.2345 20000009 0 1 23465 2006-01-01 07:04:00 1971-05-28 16:55:03 abc abcdefg LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL Text Field +10 11.2345 20000010 0 1 23466 2006-01-01 07:04:00 1971-05-28 16:55:03 abc abcdefg LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL Text Field +11 12.2345 20000011 0 1 23467 2006-01-01 07:04:00 1971-05-28 16:55:03 abc abcdefg LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL Text Field +12 13.2345 20000012 0 1 23468 2006-01-01 07:04:00 1971-05-28 16:55:03 abc abcdefg LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL Text Field +13 14.2345 20000013 0 1 23469 2006-01-01 07:04:00 1971-05-28 16:55:03 abc abcdefg LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL Text Field +14 15.2345 20000014 0 1 23470 2006-01-01 07:04:00 1971-05-28 16:55:03 abc abcdefg LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL Text Field +15 16.2345 20000015 0 1 23471 2006-01-01 07:04:00 1971-05-28 16:55:03 abc abcdefg LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL Text Field +16 17.2345 20000016 0 1 23472 2006-01-01 07:04:00 1971-05-28 16:55:03 abc abcdefg LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL Text Field +17 18.2345 20000017 0 1 23473 2006-01-01 07:04:00 1971-05-28 16:55:03 abc abcdefg LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL Text Field +18 19.2345 20000018 0 1 23474 2006-01-01 07:04:00 1971-05-28 16:55:03 abc abcdefg LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL Text Field +19 20.2345 20000019 0 1 23475 2006-01-01 07:04:00 1971-05-28 16:55:03 abc abcdefg LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL Text Field +20 21.2345 20000020 0 1 23476 2006-01-01 07:04:00 1971-05-28 16:55:03 abc abcdefg LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL Text Field +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` float DEFAULT NULL, + `a3` double DEFAULT NULL, + `a4` bit(1) DEFAULT NULL, + `a5` tinyint(4) DEFAULT NULL, + `a6` bigint(20) DEFAULT NULL, + `a7` date DEFAULT NULL, + `a8` time DEFAULT NULL, + `a9` datetime DEFAULT NULL, + `a10` tinytext, + `a11` mediumtext, + `a12` longtext, + `a13` text, + `a14` blob, + PRIMARY KEY (`a1`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 ADD INDEX a2_i (a2), ADD INDEX a3_i (a3); +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` float DEFAULT NULL, + `a3` double DEFAULT NULL, + `a4` bit(1) DEFAULT NULL, + `a5` tinyint(4) DEFAULT NULL, + `a6` bigint(20) DEFAULT NULL, + `a7` date DEFAULT NULL, + `a8` time DEFAULT NULL, + `a9` datetime DEFAULT NULL, + `a10` tinytext, + `a11` mediumtext, + `a12` longtext, + `a13` text, + `a14` blob, + PRIMARY KEY (`a1`), + KEY `a2_i` (`a2`), + KEY `a3_i` (`a3`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 DROP INDEX a2_i; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` float DEFAULT NULL, + `a3` double DEFAULT NULL, + `a4` bit(1) DEFAULT NULL, + `a5` tinyint(4) DEFAULT NULL, + `a6` bigint(20) DEFAULT NULL, + `a7` date DEFAULT NULL, + `a8` time DEFAULT NULL, + `a9` datetime DEFAULT NULL, + `a10` tinytext, + `a11` mediumtext, + `a12` longtext, + `a13` text, + `a14` blob, + PRIMARY KEY (`a1`), + KEY `a3_i` (`a3`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 DROP a14; +ALTER TABLE test.t1 DROP a13; +ALTER TABLE test.t1 DROP a12; +ALTER TABLE test.t1 DROP a11; +ALTER TABLE test.t1 DROP a10; +ALTER TABLE test.t1 DROP a9; +ALTER TABLE test.t1 DROP a8; +ALTER TABLE test.t1 DROP a7; +ALTER TABLE test.t1 DROP a6; +ALTER TABLE test.t1 DROP PRIMARY KEY; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` float DEFAULT NULL, + `a3` double DEFAULT NULL, + `a4` bit(1) DEFAULT NULL, + `a5` tinyint(4) DEFAULT NULL, + KEY `a3_i` (`a3`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +DROP TABLE test.t1; +ALTER TABLESPACE ts +DROP DATAFILE './table_space/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts ENGINE NDB; +DROP LOGFILE GROUP lg ENGINE=NDB; diff --git a/mysql-test/r/ndb_dd_basic.result b/mysql-test/r/ndb_dd_basic.result index 5a7e70e796d..83cb12ce964 100644 --- a/mysql-test/r/ndb_dd_basic.result +++ b/mysql-test/r/ndb_dd_basic.result @@ -422,6 +422,47 @@ SELECT COUNT(*) FROM t1; COUNT(*) 0 DROP TABLE t1; +CREATE TABLE t1 ( +a int NOT NULL, +b varchar(4000), -- must use 2 pages undo +PRIMARY KEY using hash (a) +) +TABLESPACE ts1 STORAGE DISK ENGINE=NDBCLUSTER; +set autocommit = 0; +insert into t1 values(0,'x'); +insert into t1 values(1,'x'); +insert into t1 values(2,'x'); +insert into t1 values(3,'x'); +insert into t1 values(4,'x'); +insert into t1 values(5,'x'); +insert into t1 values(6,'x'); +insert into t1 values(7,'x'); +insert into t1 values(8,'x'); +delete from t1 where a = 0; +commit; +set autocommit = 1; +drop table t1; +create table test.t1 (f1 varchar(50) primary key, f2 text,f3 int) +TABLESPACE ts1 STORAGE DISK engine=NDB; +insert into test.t1 (f1,f2,f3)VALUES("111111","aaaaaa",1); +insert into test.t1 (f1,f2,f3)VALUES("222222","bbbbbb",2); +select * from test.t1 order by f1; +f1 f2 f3 +111111 aaaaaa 1 +222222 bbbbbb 2 +select f1,f2 from test.t1 order by f2; +f1 f2 +111111 aaaaaa +222222 bbbbbb +select f2 from test.t1 order by f2; +f2 +aaaaaa +bbbbbb +select f1,f2 from test.t1 order by f1; +f1 f2 +111111 aaaaaa +222222 bbbbbb +drop table test.t1; ALTER TABLESPACE ts1 DROP DATAFILE 'datafile.dat' ENGINE = NDB; diff --git a/mysql-test/r/ndb_dd_disk2memory.result b/mysql-test/r/ndb_dd_disk2memory.result index 9084ddc3e16..9da506bf743 100644 --- a/mysql-test/r/ndb_dd_disk2memory.result +++ b/mysql-test/r/ndb_dd_disk2memory.result @@ -299,10 +299,207 @@ usr_id uniq_id increment usr2_id c_amount max DROP TABLE test.t1; DROP TABLE test.t2; -ALTER TABLESPACE table_space1 -DROP DATAFILE './table_space1/datafile.dat' -ENGINE = NDB; -DROP TABLESPACE table_space1 -ENGINE = NDB; -DROP LOGFILE GROUP log_group1 -ENGINE =NDB; +CREATE TABLE test.t1 ( +a1 smallint NOT NULL, +a2 int NOT NULL, +a3 bigint NOT NULL, +a4 char(10), +a5 decimal(5,1), +a6 time, +a7 date, +a8 datetime, +a9 VARCHAR(255), +a10 blob, +PRIMARY KEY(a1) +) TABLESPACE table_space1 STORAGE DISK ENGINE=NDB; +CREATE TABLE test.t2 ( +b1 smallint NOT NULL, +b2 int NOT NULL, +b3 bigint NOT NULL, +b4 char(10), +b5 decimal(5,1), +b6 time, +b7 date, +b8 datetime, +b9 VARCHAR(255), +b10 blob, +PRIMARY KEY(b1) +) ENGINE=NDB; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` smallint(6) NOT NULL, + `a2` int(11) NOT NULL, + `a3` bigint(20) NOT NULL, + `a4` char(10) DEFAULT NULL, + `a5` decimal(5,1) DEFAULT NULL, + `a6` time DEFAULT NULL, + `a7` date DEFAULT NULL, + `a8` datetime DEFAULT NULL, + `a9` varchar(255) DEFAULT NULL, + `a10` blob, + PRIMARY KEY (`a1`), + KEY `a2` (`a2`), + KEY `a3` (`a3`), + KEY `a8` (`a8`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +Table Create Table +t2 CREATE TABLE `t2` ( + `b1` smallint(6) NOT NULL, + `b2` int(11) NOT NULL, + `b3` bigint(20) NOT NULL, + `b4` char(10) DEFAULT NULL, + `b5` decimal(5,1) DEFAULT NULL, + `b6` time DEFAULT NULL, + `b7` date DEFAULT NULL, + `b8` datetime DEFAULT NULL, + `b9` varchar(255) DEFAULT NULL, + `b10` blob, + PRIMARY KEY (`b1`), + KEY `b2` (`b2`), + KEY `b3` (`b3`), + KEY `b8` (`b8`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 +1 2 2000000001 aaa1 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data +2 3 2000000002 aaa2 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb2 binary data +3 4 2000000003 aaa3 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb3 binary data +4 5 2000000004 aaa4 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb4 binary data +5 6 2000000005 aaa5 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb5 binary data +6 7 2000000006 aaa6 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb6 binary data +7 8 2000000007 aaa7 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb7 binary data +8 9 2000000008 aaa8 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb8 binary data +9 10 2000000009 aaa9 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb9 binary data +10 11 2000000010 aaa10 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb10 binary data +11 12 2000000011 aaa11 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb11 binary data +12 13 2000000012 aaa12 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb12 binary data +13 14 2000000013 aaa13 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb13 binary data +14 15 2000000014 aaa14 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb14 binary data +15 16 2000000015 aaa15 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb15 binary data +16 17 2000000016 aaa16 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb16 binary data +17 18 2000000017 aaa17 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb17 binary data +18 19 2000000018 aaa18 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb18 binary data +19 20 2000000019 aaa19 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb19 binary data +20 21 2000000020 aaa20 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb20 binary data +b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 +3 4 3000000001 aaa1 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data +4 5 3000000002 aaa2 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb2 binary data +5 6 3000000003 aaa3 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb3 binary data +6 7 3000000004 aaa4 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb4 binary data +7 8 3000000005 aaa5 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb5 binary data +8 9 3000000006 aaa6 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb6 binary data +9 10 3000000007 aaa7 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb7 binary data +10 11 3000000008 aaa8 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb8 binary data +11 12 3000000009 aaa9 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb9 binary data +12 13 3000000010 aaa10 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb10 binary data +13 14 3000000011 aaa11 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb11 binary data +14 15 3000000012 aaa12 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb12 binary data +15 16 3000000013 aaa13 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb13 binary data +16 17 3000000014 aaa14 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb14 binary data +17 18 3000000015 aaa15 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb15 binary data +18 19 3000000016 aaa16 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb16 binary data +19 20 3000000017 aaa17 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb17 binary data +20 21 3000000018 aaa18 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb18 binary data +21 22 3000000019 aaa19 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb19 binary data +22 23 3000000020 aaa20 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb20 binary data +COUNT(a1) a1 COUNT(a1)*a1 +1 1 1 +1 2 2 +1 3 3 +1 4 4 +1 5 5 +1 6 6 +1 7 7 +1 8 8 +1 9 9 +1 10 10 +1 11 11 +1 12 12 +1 13 13 +1 14 14 +1 15 15 +1 16 16 +1 17 17 +1 18 18 +1 19 19 +1 20 20 +COUNT(a2) (a2+1) COUNT(a2)*(a2+0) +1 3 2 +1 4 3 +1 5 4 +1 6 5 +1 7 6 +1 8 7 +1 9 8 +1 10 9 +1 11 10 +1 12 11 +1 13 12 +1 14 13 +1 15 14 +1 16 15 +1 17 16 +1 18 17 +1 19 18 +1 20 19 +1 21 20 +1 22 21 +a +8 +7 +6 +5 +a c +8 Clavin +7 Serge +6 Jeb +5 Stewart +4 Martin +3 Pekka +a +2 +3 +4 +5 +6 +7 +8 +a c +4 Martin +5 Stewart +6 Jeb +7 Serge +8 Clavin +a +1 +1 +1 +2 +2 +3 +3 +a +3 +2 +1 +a +3 +2 +1 +a +1 +2 +3 +email shipcode +test1@testdomain.com Z001 +test2@testdomain.com R002 +test2@testdomain.com Z001 +test3@testdomain.com Z001 +email +test1@testdomain.com +test2@testdomain.com +test3@testdomain.com +email shipcode +test1@testdomain.com Z001 +test2@testdomain.com Z001 +test2@testdomain.com R002 +test3@testdomain.com Z001 diff --git a/mysql-test/r/ndb_dd_advance2.result b/mysql-test/r/ndb_dd_sql_features.result index 86dc8df3b15..d1aba2f182e 100644 --- a/mysql-test/r/ndb_dd_advance2.result +++ b/mysql-test/r/ndb_dd_sql_features.result @@ -2,7 +2,8 @@ DROP TABLE IF EXISTS test.t1; DROP TABLE IF EXISTS test.t2; DROP TABLE IF EXISTS test.t3; ***** -**** Copy data from table in one table space to table in different table space +**** Copy data from table in one table space to table +**** in different table space ***** CREATE LOGFILE GROUP lg ADD UNDOFILE './lg_group/undofile.dat' @@ -12,12 +13,12 @@ ENGINE=NDB; CREATE TABLESPACE ts1 ADD DATAFILE './table_space1/datafile.dat' USE LOGFILE GROUP lg -INITIAL_SIZE 12M +INITIAL_SIZE 25M ENGINE NDB; CREATE TABLESPACE ts2 ADD DATAFILE './table_space2/datafile.dat' USE LOGFILE GROUP lg -INITIAL_SIZE 12M +INITIAL_SIZE 20M ENGINE NDB; CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; @@ -124,29 +125,9 @@ FROM test.t2 where a1=2; a1 length(a2) substr(a2,480,2) length(a3) substr(a3,1+2*900,3) 2 500 bb 2256 b1b DROP TABLE test.t1, test.t2; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE NDB; -DROP TABLESPACE ts1 ENGINE NDB; -ALTER TABLESPACE ts2 -DROP DATAFILE './table_space2/datafile.dat' - ENGINE NDB; -DROP TABLESPACE ts2 ENGINE NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; ***** **** Insert, Update, Delete from NDB table with BLOB fields ***** -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; set @vc1 = repeat('a', 200); set @vc2 = repeat('b', 500); set @vc3 = repeat('c', 1000); @@ -157,10 +138,12 @@ set @bb2 = repeat('2', 5000); set @bb3 = repeat('3', 10000); set @bb4 = repeat('4', 40000); set @bb5 = repeat('5', 50000); -select length(@vc1),length(@vc2),length(@vc3),length(@vc4),length(@vc5) from dual; +select length(@vc1),length(@vc2),length(@vc3),length(@vc4),length(@vc5) +from dual; length(@vc1) length(@vc2) length(@vc3) length(@vc4) length(@vc5) 200 500 1000 4000 5000 -select length(@bb1),length(@bb2),length(@bb3),length(@bb4),length(@bb5) from dual; +select length(@bb1),length(@bb2),length(@bb3),length(@bb4),length(@bb5) +from dual; length(@bb1) length(@bb2) length(@bb3) length(@bb4) length(@bb5) 2000 5000 10000 40000 50000 CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) @@ -216,25 +199,9 @@ SELECT count(*) from test.t1; count(*) 0 DROP TABLE test.t1; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE NDB; -DROP TABLESPACE ts1 ENGINE NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; ***** **** Create Stored procedures that use disk based tables ***** -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB// CREATE PROCEDURE test.sp1() @@ -257,25 +224,9 @@ DELETE FROM test.t1; DROP PROCEDURE test.sp1; DROP PROCEDURE test.sp2; DROP TABLE test.t1; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; -DROP TABLESPACE ts1 ENGINE=NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; ***** ***** Create function that operate on disk based tables ***** -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; CREATE FUNCTION test.fn1(n INT) RETURNS INT @@ -301,25 +252,9 @@ DELETE FROM test.t1; DROP FUNCTION test.fn1; DROP FUNCTION test.fn2; DROP TABLE test.t1; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; -DROP TABLESPACE ts1 ENGINE=NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; ***** ***** Create triggers that operate on disk based tables ***** -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; CREATE TRIGGER test.trg1 BEFORE INSERT ON test.t1 FOR EACH ROW @@ -340,25 +275,9 @@ a1 a2 a3 DELETE FROM test.t1; DROP TRIGGER test.trg1; DROP TABLE test.t1; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; -DROP TABLESPACE ts1 ENGINE=NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; ***** ***** Create, update views that operate on disk based tables ***** -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; CREATE VIEW test.v1 AS SELECT * FROM test.t1; @@ -394,26 +313,12 @@ a1 a2 a3 DROP VIEW test.v1; DELETE FROM test.t1; DROP TABLE test.t1; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; -DROP TABLESPACE ts1 ENGINE=NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; ***** ***** Create and use disk based table that use auto inc ***** -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, +a2 VARCHAR(256), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; SELECT * FROM test.t1 ORDER BY a1; a1 a2 a3 1 aaaaa10 bbbbb10 @@ -428,27 +333,13 @@ a1 a2 a3 10 aaaaa1 bbbbb1 DELETE FROM test.t1; DROP TABLE test.t1; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; -DROP TABLESPACE ts1 ENGINE=NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; ***** ***** Create test that use transaction (commit, rollback) ***** -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; SET AUTOCOMMIT=0; -CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, +a2 VARCHAR(256), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); COMMIT; SELECT * FROM test.t1 ORDER BY a1; @@ -462,7 +353,9 @@ a1 a2 a3 DELETE FROM test.t1; DROP TABLE test.t1; SET AUTOCOMMIT=1; -CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, +a2 VARCHAR(256), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; START TRANSACTION; INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); COMMIT; @@ -477,27 +370,13 @@ a1 a2 a3 1 aaaaa1 bbbbb1 DELETE FROM test.t1; DROP TABLE test.t1; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; -DROP TABLESPACE ts1 ENGINE=NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; ***** ***** Create test that uses locks ***** -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; drop table if exists test.t1; -CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, +a2 VARCHAR(256), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; LOCK TABLES test.t1 write; INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); @@ -520,12 +399,6 @@ a1 a2 a3 4 aaaaa3 bbbbb3 DELETE FROM test.t1; DROP TABLE test.t1; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; -DROP TABLESPACE ts1 ENGINE=NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; ***** ***** Create large disk base table, do random queries, check cache hits ***** @@ -562,16 +435,6 @@ length(@x0) length(@b1) length(@d1) select length(@x0),length(@b2),length(@d2) from dual; length(@x0) length(@b2) length(@d2) 256 20000 30000 -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(1000), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; INSERT INTO test.t1 values(1,@vc1,@d1); @@ -599,26 +462,12 @@ a1 length(a2) substr(a2,1+2*900,2) length(a3) substr(a3,1+3*900,3) 2 200 3000 dd1 DELETE FROM test.t1; DROP TABLE test.t1; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; -DROP TABLESPACE ts1 ENGINE=NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; ***** -***** Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), USER(), TRUNCATE +***** Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), +***** USER(), TRUNCATE ***** -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB, a4 DATE, a5 CHAR(250)) +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), +a3 BLOB, a4 DATE, a5 CHAR(250)) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; SELECT COUNT(*) from test.t1; COUNT(*) @@ -742,5 +591,9 @@ ALTER TABLESPACE ts1 DROP DATAFILE './table_space1/datafile.dat' ENGINE=NDB; DROP TABLESPACE ts1 ENGINE=NDB; +ALTER TABLESPACE ts2 +DROP DATAFILE './table_space2/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts2 ENGINE=NDB; DROP LOGFILE GROUP lg ENGINE=NDB; diff --git a/mysql-test/t/ndb_dd_advance.test b/mysql-test/t/ndb_dd_advance.test deleted file mode 100755 index e882ec794c1..00000000000 --- a/mysql-test/t/ndb_dd_advance.test +++ /dev/null @@ -1,630 +0,0 @@ -############################################################## -# Author: JBM -# Date: 2006-01-12 -# Purpose: To test using ndb memory and disk tables together. -############################################################## - -############################################################## -# Author: Nikolay -# Date: 2006-05-12 -# Purpose: To test using ndb memory and disk tables together. -# -# Select from disk into memory table -# Select from disk into memory table -# Create test that loads data, use mysql dump to dump data, drop table, -# create table and load from mysql dump. -# Use group by asc and dec; Use having; Use order by -# ALTER Tests (Meta data testing): -# ALTER from InnoDB to Cluster Disk Data -# ALTER from MyISAM to Cluster Disk Data -# ALTER from Cluster Disk Data to InnoDB -# ALTER from Cluster Disk Data to MyISAM -# ALTER DD Tables and add columns -# ALTER DD Tables and add Indexes -# ALTER DD Tables and drop columns -# -############################################################## - --- source include/have_ndb.inc --- source include/not_embedded.inc - ---disable_warnings -DROP TABLE IF EXISTS test.t1; -DROP TABLE IF EXISTS test.t2; ---enable_warnings - -############ Test Setup Section ############# --- echo **** Test Setup Section **** - -CREATE LOGFILE GROUP log_group1 -ADD UNDOFILE './log_group1/undofile.dat' -INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; - -CREATE TABLESPACE table_space1 -ADD DATAFILE './table_space1/datafile.dat' -USE LOGFILE GROUP log_group1 -INITIAL_SIZE 12M -ENGINE NDB; - - -CREATE TABLE test.t1 -(pk1 INT NOT NULL PRIMARY KEY, b INT NOT NULL, c INT NOT NULL) -TABLESPACE table_space1 STORAGE DISK -ENGINE=NDB; - -CREATE TABLE test.t2 -(pk2 INT NOT NULL PRIMARY KEY, b2 INT NOT NULL, c2 INT NOT NULL) -ENGINE=NDB; - ---echo -##################### Data load for first test #################### ---echo **** Data load for first test **** - -INSERT INTO test.t1 VALUES -(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), -(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), -(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), -(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), -(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), -(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), -(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), -(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), -(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45), -(46,46,46),(47,47,47),(48,48,48),(49,49,49),(50,50,50), -(51,51,51),(52,52,52),(53,53,53),(54,54,54),(55,55,55), -(56,56,56),(57,57,57),(58,58,58),(59,59,59),(60,60,60), -(61,61,61),(62,62,62),(63,63,63),(64,64,64),(65,65,65), -(66,66,66),(67,67,67),(68,68,68),(69,69,69),(70,70,70), -(71,71,71),(72,72,72),(73,73,73),(74,74,74),(75,75,75); - - -INSERT INTO test.t2 VALUES -(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), -(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), -(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), -(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), -(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), -(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), -(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), -(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), -(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45), -(46,46,46),(47,47,47),(48,48,48),(49,49,49),(50,50,50), -(51,51,51),(52,52,52),(53,53,53),(54,54,54),(55,55,55), -(56,56,56),(57,57,57),(58,58,58),(59,59,59),(60,60,60), -(61,61,61),(62,62,62),(63,63,63),(64,64,64),(65,65,65), -(66,66,66),(67,67,67),(68,68,68),(69,69,69),(70,70,70), -(71,71,71),(72,72,72),(73,73,73),(74,74,74),(75,75,75); - ---echo -##################### Test 1 Section Begins ############### ---echo *** Test 1 Section Begins *** -SELECT COUNT(*) FROM test.t2 LEFT JOIN test.t1 ON pk2=pk1 WHERE b2 IN (4); -SELECT * FROM test.t2 LEFT JOIN test.t1 ON pk2=pk1 WHERE b2 IN (4); -SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 WHERE b IN (4); -SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON b=b2 WHERE pk1 IN (75); -SELECT b, c FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 ORDER BY b; ---echo -####################### Test 1 Section End ################ - -##################### Setup for test 2 #################### ---echo *** Setup for test 2 **** -DELETE FROM test.t1; -INSERT INTO test.t1 VALUES -(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), -(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), -(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), -(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), -(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), -(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), -(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), -(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), -(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45); ---echo -############################# Test Section 2 ############### ---echo **** Test Section 2 **** -SELECT b, c FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 ORDER BY b; -SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON b=b2; -SELECT COUNT(*) FROM test.t1 RIGHT JOIN test.t2 ON b=b2; -SHOW CREATE TABLE test.t2; -SHOW CREATE TABLE test.t1; -ALTER TABLE test.t2 TABLESPACE table_space1 STORAGE DISK -ENGINE=NDB; -SHOW CREATE TABLE test.t2; -ALTER TABLE test.t1 ENGINE=NDBCLUSTER; -SHOW CREATE TABLE test.t1; ---echo -######################### End Test Section 2 ################# -DROP TABLE test.t1; -DROP TABLE test.t2; -##################### Setup for Test Section 3 ############### ---echo *** Setup for Test Section 3 *** -CREATE TABLE test.t1 ( - usr_id INT unsigned NOT NULL, - uniq_id INT unsigned NOT NULL AUTO_INCREMENT, - start_num INT unsigned NOT NULL DEFAULT 1, - increment INT unsigned NOT NULL DEFAULT 1, - PRIMARY KEY (uniq_id), - INDEX usr_uniq_idx (usr_id, uniq_id), - INDEX uniq_usr_idx (uniq_id, usr_id)) -TABLESPACE table_space1 STORAGE DISK -ENGINE=NDB; - - -CREATE TABLE test.t2 ( - id INT unsigned NOT NULL DEFAULT 0, - usr2_id INT unsigned NOT NULL DEFAULT 0, - max INT unsigned NOT NULL DEFAULT 0, - c_amount INT unsigned NOT NULL DEFAULT 0, - d_max INT unsigned NOT NULL DEFAULT 0, - d_num INT unsigned NOT NULL DEFAULT 0, - orig_time INT unsigned NOT NULL DEFAULT 0, - c_time INT unsigned NOT NULL DEFAULT 0, - active ENUM ("no","yes") NOT NULL, - PRIMARY KEY (id,usr2_id), - INDEX id_idx (id), - INDEX usr2_idx (usr2_id)) -ENGINE=NDB; - -INSERT INTO test.t1 VALUES (3,NULL,0,50),(3,NULL,0,200),(3,NULL,0,25),(3,NULL,0,84676),(3,NULL,0,235),(3,NULL,0,10),(3,NULL,0,3098),(3,NULL,0,2947),(3,NULL,0,8987),(3,NULL,0,8347654),(3,NULL,0,20398),(3,NULL,0,8976),(3,NULL,0,500),(3,NULL,0,198); - ---echo -###################### Test Section 3 ###################### ---echo **** Test Section 3 **** -SELECT test.t1.usr_id,test.t1.uniq_id,test.t1.increment, -test.t2.usr2_id,test.t2.c_amount,test.t2.max -FROM test.t1 -LEFT JOIN test.t2 ON test.t2.id = test.t1.uniq_id -WHERE test.t1.uniq_id = 4 -ORDER BY test.t2.c_amount; - -INSERT INTO test.t2 VALUES (2,3,3000,6000,0,0,746584,837484,'yes'); -INSERT INTO test.t2 VALUES (4,3,3000,6000,0,0,746584,837484,'yes'); -INSERT INTO test.t2 VALUES (7,3,1000,2000,0,0,746294,937484,'yes'); - -SELECT test.t1.usr_id,test.t1.uniq_id,test.t1.increment, -test.t2.usr2_id,test.t2.c_amount,test.t2.max -FROM test.t1 -LEFT JOIN test.t2 ON test.t2.id = test.t1.uniq_id -WHERE test.t1.uniq_id = 4 -ORDER BY test.t2.c_amount; ---echo -####################### End Section 3 ######################### -DROP TABLE test.t1; -DROP TABLE test.t2; -ALTER TABLESPACE table_space1 -DROP DATAFILE './table_space1/datafile.dat' -ENGINE = NDB; - -DROP TABLESPACE table_space1 -ENGINE = NDB; - -DROP LOGFILE GROUP log_group1 -ENGINE =NDB; - -####################### Section 4 ######################### - - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - - CREATE TABLESPACE ts2 - ADD DATAFILE './table_space2/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - - -### Select from disk into memory table ### - - CREATE TABLE t1 (a int NOT NULL PRIMARY KEY, b int) - TABLESPACE ts1 STORAGE DISK ENGINE=NDB; - CREATE TABLE t2 (a int NOT NULL PRIMARY KEY, b int) - ENGINE=NDB; - - SHOW CREATE TABLE t1; - SHOW CREATE TABLE t2; - - INSERT INTO t1 VALUES (1,1); - INSERT INTO t1 VALUES (2,2); - SELECT * FROM t1 order by a; - INSERT INTO t2(a,b) SELECT * FROM t1; - SELECT * FROM t2 order by a; - -### Select from disk into memory table ### - - TRUNCATE t1; - TRUNCATE t2; - INSERT INTO t2 VALUES (3,3); - INSERT INTO t2 VALUES (4,4); - INSERT INTO t1(a,b) SELECT * FROM t2; - SELECT * FROM t1 order by a; - - DROP TABLE t1, t2; - - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE NDB; - - DROP TABLESPACE ts1 ENGINE NDB; - - ALTER TABLESPACE ts2 - DROP DATAFILE './table_space2/datafile.dat' - ENGINE NDB; - - DROP TABLESPACE ts2 ENGINE NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; - -#### Create test that loads data, use mysql dump to dump data, drop table, -#### create table and load from mysql dump. - -# DROP DATABASE IF EXISTS test; - - CREATE LOGFILE GROUP lg - ADD UNDOFILE './undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts - ADD DATAFILE './datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - -# CREATE DATABASE test; - -CREATE TABLE test.t ( - a smallint NOT NULL, - b int NOT NULL, - c bigint NOT NULL, - d char(10), - e TEXT, - f VARCHAR(255), - PRIMARY KEY(a) -) TABLESPACE ts STORAGE DISK ENGINE=NDB; - - ALTER TABLE test.t ADD INDEX (d), ADD INDEX (f); - SHOW CREATE TABLE test.t; - -# insert records into tables - - let $1=100; - disable_query_log; - while ($1) - { - eval insert into test.t values($1, $1+1, $1+2, "aaa$1", "bbb$1", "ccccc$1"); - dec $1; - } - enable_query_log; - - SELECT * FROM test.t order by a; ---exec $MYSQL_DUMP --skip-comments --databases test > $MYSQLTEST_VARDIR/tmp/t_dump.sql -DROP TABLE test.t; ---exec $MYSQL test < $MYSQLTEST_VARDIR/tmp/t_dump.sql -USE test; -show tables; - -SELECT * FROM test.t order by a; - - DROP TABLE test.t; -# DROP DATABASE test; - - ALTER TABLESPACE ts - DROP DATAFILE './datafile.dat' - ENGINE NDB; - - DROP TABLESPACE ts ENGINE NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; - -#### BUG 18856 test case comented out -##### Use "SELECT * INTO OUTFILE" to dump data and "LOAD DATA INFILE" to load ##### data back to the data file. - -# CREATE LOGFILE GROUP lg -# ADD UNDOFILE './undofile.dat' -# INITIAL_SIZE 16M -# UNDO_BUFFER_SIZE = 1M -# ENGINE=NDB; - -# CREATE TABLESPACE ts -# ADD DATAFILE './datafile.dat' -# USE LOGFILE GROUP lg -# INITIAL_SIZE 12M -# ENGINE NDB; - -#CREATE DATABASE test; - -#CREATE TABLE test.t ( -# a smallint NOT NULL, -# b int NOT NULL, -# c bigint NOT NULL, -# d char(10), -# e TEXT, -# f VARCHAR(255), -# PRIMARY KEY(a) -#) TABLESPACE ts STORAGE DISK ENGINE=NDB; - -# ALTER TABLE test.t ADD INDEX (d), ADD INDEX (f); -# SHOW CREATE TABLE test.t; - -# insert records into tables - -# let $1=100; -# disable_query_log; -# while ($1) -# { -# eval insert into test.t values($1, $1+1, $1+2, "aaa$1", "bbb$1", "ccccc$1"); -# dec $1; -# } -# enable_query_log; - -# SELECT * FROM test.t order by a; - -# SELECT * INTO OUTFILE 't_backup' FROM test.t; -# TRUNCATE test.t; - -#'TRUNCATE test.t' failed: 1205: Lock wait timeout exceeded; try restarting #transaction. TABLESPACE ts STORAGE DISK ENGINE=NDB; - -# SELECT count(*) FROM test.t; -# LOAD DATA INFILE 't_backup' INTO TABLE test.t; - -# SELECT * FROM test.t order by a; - -# DROP TABLE test.t; -# DROP DATABASE test; - -# ALTER TABLESPACE ts -# DROP DATAFILE './datafile.dat' -# ENGINE NDB; -# DROP TABLESPACE ts ENGINE NDB; -# DROP LOGFILE GROUP lg -# ENGINE=NDB; - -#### Use group by asc and dec; Use having; Use order by. #### - -# DROP DATABASE IF EXISTS test; - DROP table IF EXISTS test.t1; - DROP table IF EXISTS test.t2; - - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - - CREATE TABLESPACE ts2 - ADD DATAFILE './table_space2/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - -# CREATE DATABASE test; - -CREATE TABLE test.t1 ( - a1 smallint NOT NULL, - a2 int NOT NULL, - a3 bigint NOT NULL, - a4 char(10), - a5 decimal(5,1), - a6 time, - a7 date, - a8 datetime, - a9 VARCHAR(255), - a10 blob, - PRIMARY KEY(a1) -) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; - - ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a8); - SHOW CREATE TABLE test.t1; - -CREATE TABLE test.t2 ( - b1 smallint NOT NULL, - b2 int NOT NULL, - b3 bigint NOT NULL, - b4 char(10), - b5 decimal(5,1), - b6 time, - b7 date, - b8 datetime, - b9 VARCHAR(255), - b10 blob, - PRIMARY KEY(b1) -) ENGINE=NDB; - - ALTER TABLE test.t2 ADD INDEX (b2), ADD INDEX (b3), ADD INDEX (b8); - SHOW CREATE TABLE test.t2; - -let $1=20; -disable_query_log; -while ($1) -{ - eval insert into test.t1 values($1, $1+1, $1+2000000000, "aaa$1", 34.2, '4:3:2', '2006-1-1', '1971-5-28 16:55:03', "bbbbbbbbbbbbb$1", "binary data"); - eval insert into test.t2 values($1+2, $1+3, $1+3000000000, "aaa$1", 35.2, '4:3:2', '2006-1-1', '1971-5-28 16:55:03', "bbbbbbbbbbbbb$1", "binary data"); - dec $1; -} -enable_query_log; - -SELECT * FROM test.t1 order by a1; -SELECT * FROM test.t2 order by b1; -SELECT COUNT(a1), a1, COUNT(a1)*a1 FROM test.t1 GROUP BY a1; -SELECT COUNT(a2), (a2+1), COUNT(a2)*(a2+0) FROM test.t1 GROUP BY a2; - -DROP TABLE test.t1; -DROP TABLE test.t2; - -create table test.t1 (a int not null,b char(5), c text) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; - -insert into test.t1 (a) values (1),(2),(3),(4),(1),(2),(3),(4); -select distinct a from test.t1 group by b,a having a > 2 order by a desc; -select distinct a,c from test.t1 group by b,c,a having a > 2 order by a desc; -select distinct a from test.t1 group by b,a having a > 2 order by a asc; -select distinct a,c from test.t1 group by b,c,a having a > 2 order by a asc; -drop table test.t1; - -create table test.t1 (a char(1), key(a)) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -insert into test.t1 values('1'),('1'),('1'),('2'),('2'),('3'),('3'); -select * from test.t1 where a >= '1' order by a; -select distinct a from test.t1 order by a desc; -select distinct a from test.t1 where a >= '1' order by a desc; -select distinct a from test.t1 where a >= '1' order by a asc; -drop table test.t1; - -CREATE TABLE test.t1 (email varchar(50), infoID BIGINT, dateentered DATETIME) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -CREATE TABLE test.t2 (infoID BIGINT, shipcode varchar(10)) ENGINE=NDB; - -INSERT INTO test.t1 (email, infoID, dateentered) VALUES - ('test1@testdomain.com', 1, '2002-07-30 22:56:38'), - ('test1@testdomain.com', 1, '2002-07-27 22:58:16'), - ('test2@testdomain.com', 1, '2002-06-19 15:22:19'), - ('test2@testdomain.com', 2, '2002-06-18 14:23:47'), - ('test3@testdomain.com', 1, '2002-05-19 22:17:32'); - -INSERT INTO test.t2(infoID, shipcode) VALUES - (1, 'Z001'), - (2, 'R002'); - -SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE t1.infoID=t2.infoID order by email, shipcode; -SELECT DISTINCTROW email FROM test.t1 ORDER BY dateentered DESC; -SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE test.t1.infoID=test.t2.infoID ORDER BY dateentered DESC; -drop table test.t1,test.t2; - - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE NDB; - DROP TABLESPACE ts1 ENGINE NDB; - ALTER TABLESPACE ts2 - DROP DATAFILE './table_space2/datafile.dat' - ENGINE NDB; - DROP TABLESPACE ts2 ENGINE NDB; - DROP LOGFILE GROUP lg - ENGINE=NDB; -#################################################################### - - -#### Customer posted order by test case - -DROP TABLE IF EXISTS test.t; -create table test.t (f1 varchar(50) primary key, f2 text,f3 int) engine=NDB; -insert into test.t (f1,f2,f3)VALUES("111111","aaaaaa",1); -insert into test.t (f1,f2,f3)VALUES("222222","bbbbbb",2); -select * from test.t order by f1; -select f1,f2 from test.t order by f2; -select f2 from test.t order by f2; -select f1,f2 from test.t order by f1; -drop table test.t; - -################## ALTER Tests (Meta data testing) #################### - - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts - ADD DATAFILE './table_space/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - -#### Try to ALTER from InnoDB to Cluster Disk Data - -CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) ENGINE=InnoDB; -SHOW CREATE TABLE test.t1; -ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; -SHOW CREATE TABLE test.t1; -DROP TABLE test.t1; - -#### Try to ALTER from MyISAM to Cluster Disk Data - -CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) ENGINE=MyISAM; -SHOW CREATE TABLE test.t1; -ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; -SHOW CREATE TABLE test.t1; -DROP TABLE test.t1; - -#### Try to ALTER from Cluster Disk Data to InnoDB - -CREATE TABLE test.t1 (a1 INT PRIMARY KEY, a2 BLOB, a3 TEXT) TABLESPACE ts STORAGE DISK ENGINE=NDB; -SHOW CREATE TABLE test.t1; -ALTER TABLE test.t1 ENGINE=InnoDB; -SHOW CREATE TABLE test.t1; -DROP TABLE test.t1; - -#### Try to ALTER from Cluster Disk Data to MyISAM - -CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) TABLESPACE ts STORAGE DISK ENGINE=NDB; -SHOW CREATE TABLE test.t1; -ALTER TABLE test.t1 ENGINE=MyISAM; -SHOW CREATE TABLE test.t1; -DROP TABLE test.t1; - -#### Try to ALTER DD Tables and add columns - -CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB; - -ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE, ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME, ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB; - -SHOW CREATE TABLE test.t1; - -#### Try to ALTER DD Tables and add Indexes - -ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a5), ADD INDEX (a6), -ADD INDEX (a7), ADD INDEX (a8); - -SHOW CREATE TABLE test.t1; - -DROP TABLE test.t1; - -#### Try to ALTER DD Tables and drop columns - -CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB; - -ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE, ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME, ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB; - -SHOW CREATE TABLE test.t1; - -ALTER TABLE test.t1 DROP a14; -ALTER TABLE test.t1 DROP a13; -ALTER TABLE test.t1 DROP a12; -ALTER TABLE test.t1 DROP a11; -ALTER TABLE test.t1 DROP a10; -ALTER TABLE test.t1 DROP a9; -ALTER TABLE test.t1 DROP a8; -ALTER TABLE test.t1 DROP a7; -ALTER TABLE test.t1 DROP a6; -ALTER TABLE test.t1 DROP PRIMARY KEY; - -SHOW CREATE TABLE test.t1; - -DROP TABLE test.t1; - - ALTER TABLESPACE ts - DROP DATAFILE './table_space/datafile.dat' - ENGINE NDB; - DROP TABLESPACE ts ENGINE NDB; - DROP LOGFILE GROUP lg - ENGINE=NDB; - -####################### End section 4 ######################### -#End 5.1 test case - diff --git a/mysql-test/t/ndb_dd_alter.test b/mysql-test/t/ndb_dd_alter.test new file mode 100644 index 00000000000..4eb76fc1ad6 --- /dev/null +++ b/mysql-test/t/ndb_dd_alter.test @@ -0,0 +1,247 @@ +############################################################## +# Author: JBM +# Date: 2006-01-12 +# Purpose: To test using ndb memory and disk tables together. +############################################################## + +############################################################## +# Author: Nikolay +# Date: 2006-05-12 +# Purpose: To test using ndb memory and disk tables together. +# +# Select from disk into memory table +# Select from disk into memory table +# Create test that loads data, use mysql dump to dump data, drop table, +# create table and load from mysql dump. +# Use group by asc and dec; Use having; Use order by +# ALTER Tests (Meta data testing): +# ALTER from InnoDB to Cluster Disk Data +# ALTER from MyISAM to Cluster Disk Data +# ALTER from Cluster Disk Data to InnoDB +# ALTER from Cluster Disk Data to MyISAM +# ALTER DD Tables and add columns +# ALTER DD Tables and add Indexes +# ALTER DD Tables and drop columns +# +############################################################## +# Author: Jonathan +# Date 2006-08-28 +# Purpose: To take out some of the test that are already +# Covered by other tests. Per Jonas +# The new purpose of this test is testing "Alter" +# Statements. Therefore the name is changed to +# ndb_dd_alter.test +# Removed tests include: +# Select from disk into memory table +# Select from disk into memory table +# Create test that loads data, use mysql dump to dump data, drop table, +# create table and load from mysql dump. +# Use group by asc and dec; Use having; Use order by +############################################################## + +-- source include/have_ndb.inc +-- source include/have_innodb.inc +-- source include/not_embedded.inc + +--disable_warnings +DROP TABLE IF EXISTS test.t1; +DROP TABLE IF EXISTS test.t2; +--enable_warnings + +############ Test Setup Section ############# +-- echo **** Test Setup Section **** +################## ALTER Tests (Meta data testing) #################### + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts + ADD DATAFILE './table_space/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + +#### Try to ALTER from InnoDB to Cluster Disk Data + +CREATE TABLE test.t1 ( + a1 smallint NOT NULL, + a2 int NOT NULL, + a3 bigint NOT NULL, + a4 char(10), + a5 decimal(5,1), + a6 time, + a7 date, + a8 datetime, + a9 VARCHAR(255), + a10 blob, + PRIMARY KEY(a1) +) ENGINE=InnoDB; + +let $1=20; +disable_query_log; +while ($1) +{ + eval insert into test.t1 values($1, $1+1, $1+2000000000, "aaa$1", 34.2, '4:3:2', '2006-1-1', '1971-5-28 16:55:03', "bbbbbbbbbbbbb$1", "binary data"); + dec $1; +} +enable_query_log; + +SHOW CREATE TABLE test.t1; +SELECT * FROM test.t1 ORDER BY a1; +ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +SELECT * FROM test.t1 ORDER BY a1; +DROP TABLE test.t1; + +#### Try to ALTER from MyISAM to Cluster Disk Data + +CREATE TABLE test.t1 ( + a1 smallint NOT NULL, + a2 int NOT NULL, + a3 bigint NOT NULL, + a4 char(10), + a5 decimal(5,1), + a6 time, + a7 date, + a8 datetime, + a9 VARCHAR(255), + a10 blob, + PRIMARY KEY(a1) +) ENGINE=MyISAM; + +let $1=20; +disable_query_log; +while ($1) +{ + eval insert into test.t1 values($1, $1+1, $1+2000000000, "aaa$1", 34.2, '4:3:2', '2006-1-1', '1971-5-28 16:55:03', "bbbbbbbbbbbbb$1", "binary data"); + dec $1; +} +enable_query_log; + +SHOW CREATE TABLE test.t1; +SELECT * FROM test.t1 ORDER BY a1; +ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +SELECT * FROM test.t1 ORDER BY a1; + +#### Try to ALTER from Cluster Disk Data to InnoDB + +ALTER TABLE test.t1 ENGINE=InnoDB; +SHOW CREATE TABLE test.t1; +SELECT * FROM test.t1 ORDER BY a1; +ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; + +#### Try to ALTER from Cluster Disk Data to MyISAM + +SHOW CREATE TABLE test.t1; +ALTER TABLE test.t1 ENGINE=MyISAM; +SHOW CREATE TABLE test.t1; +DROP TABLE test.t1; + +#### Try to ALTER DD Tables and add columns + +CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB; + +let $1=20; +disable_query_log; +while ($1) +{ + eval insert into test.t1 values($1); + dec $1; +} +enable_query_log; + +SELECT * FROM test.t1 ORDER BY a1; + +ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE; + +let $1=20; +disable_query_log; +while ($1) +{ + eval update test.t1 set a2 = $1+1.2345, a3 = $1+20000000.00 where a1 = $1; + dec $1; +} +enable_query_log; + +SELECT * FROM test.t1 ORDER BY a1; + +ALTER TABLE test.t1 ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME; + +let $1=20; +disable_query_log; +while ($1) +{ + eval update test.t1 set a4 = 0, a5 = 1, a6 = $1+23456, a7 = '2006-1-1', + a8 = '07:04:00' where a1 = $1; + dec $1; +} +enable_query_log; + +SELECT a1,a2,a3,hex(a4), a5,a6,a7,a8 FROM test.t1 ORDER BY a1; + +ALTER TABLE test.t1 ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB; + +disable_query_log; +set @d2 = 'dd2'; +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +enable_query_log; + +let $1=20; +disable_query_log; +while ($1) +{ + eval update test.t1 set a9 = '1971-5-28 16:55:03', a10 = 'abc', a11 = 'abcdefg', + a12 = 'LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL', a13 = 'Text Field', + a14 = @d2 where a1 = $1; + dec $1; +} +enable_query_log; + +SELECT a1, a2,a3,hex(a4),a5,a6,a7,a8,a9,a10,a11,a12,a13 FROM test.t1 ORDER BY a1; +SHOW CREATE TABLE test.t1; + +#### Try to ALTER DD Tables and add Indexes + +ALTER TABLE test.t1 ADD INDEX a2_i (a2), ADD INDEX a3_i (a3); + +SHOW CREATE TABLE test.t1; + +ALTER TABLE test.t1 DROP INDEX a2_i; + +SHOW CREATE TABLE test.t1; + + +#### Try to ALTER DD Tables and drop columns + + +ALTER TABLE test.t1 DROP a14; +ALTER TABLE test.t1 DROP a13; +ALTER TABLE test.t1 DROP a12; +ALTER TABLE test.t1 DROP a11; +ALTER TABLE test.t1 DROP a10; +ALTER TABLE test.t1 DROP a9; +ALTER TABLE test.t1 DROP a8; +ALTER TABLE test.t1 DROP a7; +ALTER TABLE test.t1 DROP a6; +ALTER TABLE test.t1 DROP PRIMARY KEY; + +SHOW CREATE TABLE test.t1; + +DROP TABLE test.t1; + + ALTER TABLESPACE ts + DROP DATAFILE './table_space/datafile.dat' + ENGINE NDB; + DROP TABLESPACE ts ENGINE NDB; + DROP LOGFILE GROUP lg ENGINE=NDB; + +####################### End section 4 ######################### +#End 5.1 test case + diff --git a/mysql-test/t/ndb_dd_basic.test b/mysql-test/t/ndb_dd_basic.test index 9df2cfb0371..5d43d7997b0 100644 --- a/mysql-test/t/ndb_dd_basic.test +++ b/mysql-test/t/ndb_dd_basic.test @@ -345,6 +345,46 @@ DELETE FROM t1 WHERE a=2; SELECT COUNT(*) FROM t1; DROP TABLE t1; +# bug#20612 INS-DEL bug (not pgman bug) +# found via disk data assert but is not pgman or disk data related + +CREATE TABLE t1 ( + a int NOT NULL, + b varchar(4000), -- must use 2 pages undo + PRIMARY KEY using hash (a) +) +TABLESPACE ts1 STORAGE DISK ENGINE=NDBCLUSTER; + +set autocommit = 0; +insert into t1 values(0,'x'); +insert into t1 values(1,'x'); +insert into t1 values(2,'x'); +insert into t1 values(3,'x'); +insert into t1 values(4,'x'); +insert into t1 values(5,'x'); +insert into t1 values(6,'x'); +insert into t1 values(7,'x'); +insert into t1 values(8,'x'); +delete from t1 where a = 0; +commit; +set autocommit = 1; + +drop table t1; +############################# +# Customer posted order by test case +# Org in ndb_dd_advance.test +############################# + +create table test.t1 (f1 varchar(50) primary key, f2 text,f3 int) +TABLESPACE ts1 STORAGE DISK engine=NDB; +insert into test.t1 (f1,f2,f3)VALUES("111111","aaaaaa",1); +insert into test.t1 (f1,f2,f3)VALUES("222222","bbbbbb",2); +select * from test.t1 order by f1; +select f1,f2 from test.t1 order by f2; +select f2 from test.t1 order by f2; +select f1,f2 from test.t1 order by f1; +drop table test.t1; + ################### # Test Cleanup ################### diff --git a/mysql-test/t/ndb_dd_disk2memory.test b/mysql-test/t/ndb_dd_disk2memory.test index 354a0c9bfb5..0f819b54fb2 100644 --- a/mysql-test/t/ndb_dd_disk2memory.test +++ b/mysql-test/t/ndb_dd_disk2memory.test @@ -172,6 +172,112 @@ ORDER BY test.t2.c_amount; ####################### End Section 3 ######################### DROP TABLE test.t1; DROP TABLE test.t2; + +########## Test that use to be in ndb_dd_advance.test ######### +########## ndb_dd_advance.test is now ndb_dd_alter.test ####### +#### Use group by asc and dec; Use having; Use order by. #### + +CREATE TABLE test.t1 ( + a1 smallint NOT NULL, + a2 int NOT NULL, + a3 bigint NOT NULL, + a4 char(10), + a5 decimal(5,1), + a6 time, + a7 date, + a8 datetime, + a9 VARCHAR(255), + a10 blob, + PRIMARY KEY(a1) +) TABLESPACE table_space1 STORAGE DISK ENGINE=NDB; + +CREATE TABLE test.t2 ( + b1 smallint NOT NULL, + b2 int NOT NULL, + b3 bigint NOT NULL, + b4 char(10), + b5 decimal(5,1), + b6 time, + b7 date, + b8 datetime, + b9 VARCHAR(255), + b10 blob, + PRIMARY KEY(b1) +) ENGINE=NDB; + +### ADD Some DATA to tables ###### + + +let $1=20; +disable_query_log; +while ($1) +{ + eval insert into test.t1 values($1, $1+1, $1+2000000000, "aaa$1", 34.2, '4:3:2', '2006-1-1', '1971-5-28 16:55:03', "bbbbbbbbbbbbb$1", "binary data"); + eval insert into test.t2 values($1+2, $1+3, $1+3000000000, "aaa$1", 35.2, '4:3:2', '2006-1-1', '1971-5-28 16:55:03', "bbbbbbbbbbbbb$1", "binary data"); + dec $1; +} + +ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a8); +ALTER TABLE test.t2 ADD INDEX (b2), ADD INDEX (b3), ADD INDEX (b8); +SHOW CREATE TABLE test.t1; +SHOW CREATE TABLE test.t2; +SELECT * FROM test.t1 order by a1; +SELECT * FROM test.t2 order by b1; +SELECT COUNT(a1), a1, COUNT(a1)*a1 FROM test.t1 GROUP BY a1; +SELECT COUNT(a2), (a2+1), COUNT(a2)*(a2+0) FROM test.t1 GROUP BY a2; + +DROP TABLE test.t1; +DROP TABLE test.t2; + +CREATE TABLE test.t1 (a INT NOT NULL, +b CHAR(5), c TEXT, d INT NOT NULL PRIMARY KEY) TABLESPACE +table_space1 STORAGE DISK ENGINE=NDB; + +insert into test.t1 (a,b,c,d) values (1,'a','Jonas',1),(2,'b','Tomas',2), +(3,'c','Pekka',3),(4,'d','Martin',4),(5,'e','Stewart',5),(6,'f','Jeb',6), +(7,'g','Serge',7),(8,'h','Clavin',8); +select distinct a from test.t1 group by b,a having a > 4 order by a desc; +select distinct a,c from test.t1 group by b,c,a having a > 2 order by a desc; +select distinct a from test.t1 group by b,a having a > 1 order by a asc; +select distinct a,c from test.t1 group by b,c,a having a > 3 order by a asc; +drop table test.t1; + +create table test.t1 (a char(1), key(a)) TABLESPACE table_space1 +STORAGE DISK ENGINE=NDB; +insert into test.t1 values('1'),('1'),('1'),('2'),('2'),('3'),('3'); +select * from test.t1 where a >= '1' order by a; +select distinct a from test.t1 order by a desc; +select distinct a from test.t1 where a >= '1' order by a desc; +select distinct a from test.t1 where a >= '1' order by a asc; +drop table test.t1; + +CREATE TABLE test.t1 (email varchar(50), infoID BIGINT, dateentered DATETIME) +TABLESPACE table_space1 STORAGE DISK ENGINE=NDB; +CREATE TABLE test.t2 (infoID BIGINT, shipcode varchar(10)) ENGINE=NDB; + +INSERT INTO test.t1 (email, infoID, dateentered) VALUES + ('test1@testdomain.com', 1, '2002-07-30 22:56:38'), + ('test1@testdomain.com', 1, '2002-07-27 22:58:16'), + ('test2@testdomain.com', 1, '2002-06-19 15:22:19'), + ('test2@testdomain.com', 2, '2002-06-18 14:23:47'), + ('test3@testdomain.com', 1, '2002-05-19 22:17:32'); + +INSERT INTO test.t2(infoID, shipcode) VALUES + (1, 'Z001'), + (2, 'R002'); + +SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 +WHERE t1.infoID=t2.infoID order by email, shipcode; +SELECT DISTINCTROW email FROM test.t1 ORDER BY dateentered DESC; +SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 +WHERE test.t1.infoID=test.t2.infoID ORDER BY dateentered DESC; + +DROP TABLE test.t1,test.t2; + +################# +# Test Cleanup +################# + ALTER TABLESPACE table_space1 DROP DATAFILE './table_space1/datafile.dat' ENGINE = NDB; diff --git a/mysql-test/t/ndb_dd_dump.test b/mysql-test/t/ndb_dd_dump.test index 3f16f7fb28b..38ceafb7d80 100644 --- a/mysql-test/t/ndb_dd_dump.test +++ b/mysql-test/t/ndb_dd_dump.test @@ -221,6 +221,48 @@ DROP TABLE t1; DROP TABLE t2; DROP TABLE t3; +#### BUG 18856 test case comented out +##### Use "SELECT * INTO OUTFILE" to dump data and "LOAD DATA INFILE" to load ##### data back to the data file. + +#CREATE TABLE test.t ( +# a smallint NOT NULL, +# b int NOT NULL, +# c bigint NOT NULL, +# d char(10), +# e TEXT, +# f VARCHAR(255), +# PRIMARY KEY(a) +#) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + +# ALTER TABLE test.t ADD INDEX (d), ADD INDEX (f); +# SHOW CREATE TABLE test.t; + +# insert records into tables + +# let $1=100; +# disable_query_log; +# while ($1) +# { +# eval insert into test.t values($1, $1+1, $1+2, "aaa$1", "bbb$1", "ccccc$1"); +# dec $1; +# } +# enable_query_log; + +# SELECT * FROM test.t order by a; + +# SELECT * INTO OUTFILE 't_backup' FROM test.t; +# TRUNCATE test.t; + +#'TRUNCATE test.t' failed: 1205: Lock wait timeout exceeded; try restarting #transaction. TABLESPACE ts STORAGE DISK ENGINE=NDB; + +# SELECT count(*) FROM test.t; +# LOAD DATA INFILE 't_backup' INTO TABLE test.t; + +# SELECT * FROM test.t order by a; + +# DROP TABLE test.t; + + ALTER TABLESPACE ts1 DROP DATAFILE 'datafile_ts1_01.dat' ENGINE = NDB; diff --git a/mysql-test/t/ndb_dd_advance2.test b/mysql-test/t/ndb_dd_sql_features.test index 7b7a15ef01a..6c27ecf397b 100755..100644 --- a/mysql-test/t/ndb_dd_advance2.test +++ b/mysql-test/t/ndb_dd_sql_features.test @@ -10,13 +10,21 @@ # Try to create FK constraints on disk based tables. # Create and use disk based table that use auto inc. # Create test that use transaction (commit, rollback) -# Create large disk base table, do random queries, check cache hits, do same -# query 10 times check cache hits. -# Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), USER(), ... other built in # SQL functions +# Create large disk base table, do random queries, check cache hits, +# do same query 10 times check cache hits. +# Create test that uses COUNT(), SUM(), MAX(), MIN(), +# NOW(), USER(), ... other built in # SQL functions # Create test that uses locks. # Create test using truncate. ############################################################## - +# Author: Jonathan +# Date: 2006-08-28 +# Change: Remove all the create and drop LFG and TS except +# The first create and the last drop to make test +# run faster. +# In addition, renamed test to be to a name that +# gives a better idea of what the test is about +########################################################### -- source include/have_ndb.inc --disable_warnings @@ -25,9 +33,11 @@ DROP TABLE IF EXISTS test.t2; DROP TABLE IF EXISTS test.t3; --enable_warnings -#### Copy data from table in one table space to table in different table space. #### +#### Copy data from table in one table space to table in ### +#### different table space. #### --echo ***** ---echo **** Copy data from table in one table space to table in different table space +--echo **** Copy data from table in one table space to table +--echo **** in different table space --echo ***** CREATE LOGFILE GROUP lg @@ -39,13 +49,13 @@ DROP TABLE IF EXISTS test.t3; CREATE TABLESPACE ts1 ADD DATAFILE './table_space1/datafile.dat' USE LOGFILE GROUP lg - INITIAL_SIZE 12M + INITIAL_SIZE 25M ENGINE NDB; CREATE TABLESPACE ts2 ADD DATAFILE './table_space2/datafile.dat' USE LOGFILE GROUP lg - INITIAL_SIZE 12M + INITIAL_SIZE 20M ENGINE NDB; CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) @@ -130,37 +140,11 @@ select length(@x0),length(@b2),length(@d2) from dual; DROP TABLE test.t1, test.t2; - - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE NDB; - DROP TABLESPACE ts1 ENGINE NDB; - - ALTER TABLESPACE ts2 - DROP DATAFILE './table_space2/datafile.dat' - ENGINE NDB; - DROP TABLESPACE ts2 ENGINE NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; - #### Insert, Update, Delete from NDB table with BLOB fields #### --echo ***** --echo **** Insert, Update, Delete from NDB table with BLOB fields --echo ***** - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - set @vc1 = repeat('a', 200); set @vc2 = repeat('b', 500); set @vc3 = repeat('c', 1000); @@ -173,8 +157,10 @@ set @bb3 = repeat('3', 10000); set @bb4 = repeat('4', 40000); set @bb5 = repeat('5', 50000); -select length(@vc1),length(@vc2),length(@vc3),length(@vc4),length(@vc5) from dual; -select length(@bb1),length(@bb2),length(@bb3),length(@bb4),length(@bb5) from dual; +select length(@vc1),length(@vc2),length(@vc3),length(@vc4),length(@vc5) +from dual; +select length(@bb1),length(@bb2),length(@bb3),length(@bb4),length(@bb5) +from dual; CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; @@ -220,31 +206,11 @@ select length(@bb1),length(@bb2),length(@bb3),length(@bb4),length(@bb5) from dua DROP TABLE test.t1; - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE NDB; - DROP TABLESPACE ts1 ENGINE NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; - ##### Create Stored procedures that use disk based tables ##### --echo ***** --echo **** Create Stored procedures that use disk based tables --echo ***** - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - delimiter //; CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB// @@ -272,31 +238,12 @@ delimiter ;// DROP PROCEDURE test.sp2; DROP TABLE test.t1; - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; - DROP TABLESPACE ts1 ENGINE=NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; #### Create function that operate on disk based tables #### --echo ***** --echo ***** Create function that operate on disk based tables --echo ***** - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; let $1=100; @@ -335,31 +282,11 @@ delimiter //; DROP FUNCTION test.fn2; DROP TABLE test.t1; - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; - DROP TABLESPACE ts1 ENGINE=NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; - #### Create triggers that operate on disk based tables #### --echo ***** --echo ***** Create triggers that operate on disk based tables --echo ***** - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; @@ -382,30 +309,11 @@ delimiter //; DROP TRIGGER test.trg1; DROP TABLE test.t1; - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; - DROP TABLESPACE ts1 ENGINE=NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; - #### Create, update views that operate on disk based tables #### --echo ***** --echo ***** Create, update views that operate on disk based tables --echo ***** - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; @@ -428,32 +336,14 @@ delimiter //; DELETE FROM test.t1; DROP TABLE test.t1; - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; - DROP TABLESPACE ts1 ENGINE=NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; - #### Create and use disk based table that use auto inc #### --echo ***** --echo ***** Create and use disk based table that use auto inc --echo ***** - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - - CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, + a2 VARCHAR(256), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; let $1=10; disable_query_log; while ($1) @@ -466,33 +356,16 @@ delimiter //; DELETE FROM test.t1; DROP TABLE test.t1; - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; - DROP TABLESPACE ts1 ENGINE=NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; #### Create test that use transaction (commit, rollback) #### --echo ***** --echo ***** Create test that use transaction (commit, rollback) --echo ***** - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - SET AUTOCOMMIT=0; - CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, + a2 VARCHAR(256), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); COMMIT; @@ -507,7 +380,9 @@ delimiter //; # Now do the same thing with START TRANSACTION without using AUTOCOMMIT. - CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, + a2 VARCHAR(256), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; START TRANSACTION; INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); @@ -522,14 +397,6 @@ delimiter //; DELETE FROM test.t1; DROP TABLE test.t1; - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; - DROP TABLESPACE ts1 ENGINE=NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; - #### Create test that uses locks #### --echo ***** --echo ***** Create test that uses locks @@ -538,22 +405,12 @@ delimiter //; connect (con1,localhost,root,,); connect (con2,localhost,root,,); - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - # connection con1; --disable_warnings drop table if exists test.t1; - CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, + a2 VARCHAR(256), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; --enable_warnings LOCK TABLES test.t1 write; @@ -576,14 +433,6 @@ delimiter //; #connection defualt; - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; - DROP TABLESPACE ts1 ENGINE=NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; - #### Create large disk base table, do random queries, check cache hits #### --echo ***** --echo ***** Create large disk base table, do random queries, check cache hits @@ -626,18 +475,6 @@ set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); select length(@x0),length(@b1),length(@d1) from dual; select length(@x0),length(@b2),length(@d2) from dual; - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(1000), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; @@ -664,38 +501,22 @@ select length(@x0),length(@b2),length(@d2) from dual; DELETE FROM test.t1; DROP TABLE test.t1; - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; - DROP TABLESPACE ts1 ENGINE=NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; - -#### Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), USER(), TRUNCATE #### +#### Create test that uses COUNT(), SUM(), MAX(), #### +##### MIN(), NOW(), USER(), TRUNCATE #### --echo ***** ---echo ***** Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), USER(), TRUNCATE +--echo ***** Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), +--echo ***** USER(), TRUNCATE --echo ***** - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - - CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB, a4 DATE, a5 CHAR(250)) + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), + a3 BLOB, a4 DATE, a5 CHAR(250)) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; let $1=100; disable_query_log; while ($1) { - eval insert into test.t1 values($1, "aaaaaaaaaaaaaaaa$1", "bbbbbbbbbbbbbbbbbb$1", '2006-06-20' , USER()); + eval insert into test.t1 values($1, "aaaaaaaaaaaaaaaa$1", + "bbbbbbbbbbbbbbbbbb$1", '2006-06-20' , USER()); dec $1; } enable_query_log; @@ -716,6 +537,11 @@ select length(@x0),length(@b2),length(@d2) from dual; ENGINE=NDB; DROP TABLESPACE ts1 ENGINE=NDB; + ALTER TABLESPACE ts2 + DROP DATAFILE './table_space2/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts2 ENGINE=NDB; + DROP LOGFILE GROUP lg ENGINE=NDB; diff --git a/mysql-test/t/ndb_multi.test b/mysql-test/t/ndb_multi.test index 28c6fd4d3d7..75558ba24fe 100644 --- a/mysql-test/t/ndb_multi.test +++ b/mysql-test/t/ndb_multi.test @@ -1,7 +1,6 @@ -- source include/have_ndb.inc -- source include/have_multi_ndb.inc -- source include/not_embedded.inc --- source include/have_binlog_format_statement.inc --disable_warnings connection server2; diff --git a/storage/ndb/include/kernel/signaldata/CreateIndx.hpp b/storage/ndb/include/kernel/signaldata/CreateIndx.hpp index 8a321d4a657..cd4084d2248 100644 --- a/storage/ndb/include/kernel/signaldata/CreateIndx.hpp +++ b/storage/ndb/include/kernel/signaldata/CreateIndx.hpp @@ -208,7 +208,10 @@ public: NotUnique = 4251, AllocationError = 4252, CreateIndexTableFailed = 4253, - DuplicateAttributes = 4258 + DuplicateAttributes = 4258, + TableIsTemporary = 776, + TableIsNotTemporary = 777, + NoLoggingTemporaryIndex = 778 }; CreateIndxConf m_conf; diff --git a/storage/ndb/include/kernel/signaldata/CreateTable.hpp b/storage/ndb/include/kernel/signaldata/CreateTable.hpp index e5e78bddd49..b731742f75c 100644 --- a/storage/ndb/include/kernel/signaldata/CreateTable.hpp +++ b/storage/ndb/include/kernel/signaldata/CreateTable.hpp @@ -97,7 +97,8 @@ public: VarsizeBitfieldNotSupported = 757, NotATablespace = 758, InvalidTablespaceVersion = 759, - OutOfStringBuffer = 773 + OutOfStringBuffer = 773, + NoLoggingTemporaryTable = 778 }; private: diff --git a/storage/ndb/include/kernel/signaldata/DiAddTab.hpp b/storage/ndb/include/kernel/signaldata/DiAddTab.hpp index 47456f11842..dc3f976bb73 100644 --- a/storage/ndb/include/kernel/signaldata/DiAddTab.hpp +++ b/storage/ndb/include/kernel/signaldata/DiAddTab.hpp @@ -30,7 +30,7 @@ class DiAddTabReq { */ friend class Dbdih; public: - STATIC_CONST( SignalLength = 9 ); + STATIC_CONST( SignalLength = 10 ); SECTION( FRAGMENTATION = 0 ); SECTION( TS_RANGE = 0 ); @@ -40,10 +40,11 @@ private: Uint32 fragType; Uint32 kValue; Uint32 noOfReplicas; //Currently not used - Uint32 storedTable; + Uint32 loggedTable; Uint32 tableType; Uint32 schemaVersion; Uint32 primaryTableId; + Uint32 temporaryTable; }; class DiAddTabRef { diff --git a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp index 1382b09eabf..86186929394 100644 --- a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp +++ b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp @@ -118,6 +118,8 @@ public: FrmLen = 26, FrmData = 27, + TableTemporaryFlag = 28, //Default not Temporary + FragmentCount = 128, // No of fragments in table (!fragment replicas) FragmentDataLen = 129, FragmentData = 130, // CREATE_FRAGMENTATION reply @@ -278,7 +280,7 @@ public: // Object store for translating from/to API enum ObjectStore { StoreUndefined = 0, - StoreTemporary = 1, + StoreNotLogged = 1, StorePermanent = 2 }; @@ -297,6 +299,7 @@ public: char PrimaryTable[MAX_TAB_NAME_SIZE]; // Only used when "index" Uint32 PrimaryTableId; Uint32 TableLoggedFlag; + Uint32 TableTemporaryFlag; Uint32 NoOfKeyAttr; Uint32 NoOfAttributes; Uint32 NoOfNullable; diff --git a/storage/ndb/include/kernel/signaldata/ListTables.hpp b/storage/ndb/include/kernel/signaldata/ListTables.hpp index 7fbfab1294c..4c60e04ec75 100644 --- a/storage/ndb/include/kernel/signaldata/ListTables.hpp +++ b/storage/ndb/include/kernel/signaldata/ListTables.hpp @@ -39,10 +39,16 @@ public: BitmaskImpl::setField(1, &data, 12, 8, val); } static Uint32 getTableStore(Uint32 data) { - return BitmaskImpl::getField(1, &data, 20, 4); + return BitmaskImpl::getField(1, &data, 20, 3); } static void setTableStore(Uint32& data, Uint32 val) { - BitmaskImpl::setField(1, &data, 20, 4, val); + BitmaskImpl::setField(1, &data, 20, 3, val); + } + static Uint32 getTableTemp(Uint32 data) { + return BitmaskImpl::getField(1, &data, 23, 1); + } + static void setTableTemp(Uint32& data, Uint32 val) { + BitmaskImpl::setField(1, &data, 23, 1, val); } static Uint32 getTableState(Uint32 data) { return BitmaskImpl::getField(1, &data, 24, 4); @@ -161,6 +167,12 @@ public: void setTableState(unsigned pos, Uint32 val) { ListTablesData::setTableState(tableData[pos], val); } + static Uint32 getTableTemp(Uint32 data) { + return ListTablesData::getTableTemp(data); + } + void setTableTemp(unsigned pos, Uint32 val) { + ListTablesData::setTableTemp(tableData[pos], val); + } }; #endif diff --git a/storage/ndb/include/ndb_constants.h b/storage/ndb/include/ndb_constants.h index 0626fcb7bf3..750587507d4 100644 --- a/storage/ndb/include/ndb_constants.h +++ b/storage/ndb/include/ndb_constants.h @@ -84,5 +84,11 @@ #define NDB_STORAGETYPE_MEMORY 0 #define NDB_STORAGETYPE_DISK 1 - + +/* + * Table temporary status. + */ +#define NDB_TEMP_TAB_PERMANENT 0 +#define NDB_TEMP_TAB_TEMPORARY 1 + #endif diff --git a/storage/ndb/include/ndbapi/NdbDictionary.hpp b/storage/ndb/include/ndbapi/NdbDictionary.hpp index 35b0d927bda..9e73bc00712 100644 --- a/storage/ndb/include/ndbapi/NdbDictionary.hpp +++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp @@ -136,7 +136,7 @@ public: */ enum Store { StoreUndefined = 0, ///< Undefined - StoreTemporary = 1, ///< Object or data deleted on system restart + StoreNotLogged = 1, ///< Object or data deleted on system restart StorePermanent = 2 ///< Permanent. logged to disk }; @@ -917,6 +917,9 @@ public: int createTableInDb(Ndb*, bool existingEqualIsOk = true) const ; int getReplicaCount() const ; + + bool getTemporary(); + void setTemporary(bool); #endif private: @@ -1104,6 +1107,9 @@ public: #ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED void setStoredIndex(bool x) { setLogging(x); } bool getStoredIndex() const { return getLogging(); } + + bool getTemporary(); + void setTemporary(bool); #endif /** @} *******************************************************************/ @@ -1564,7 +1570,8 @@ public: unsigned id; ///< Id of object Object::Type type; ///< Type of object Object::State state; ///< State of object - Object::Store store; ///< How object is stored + Object::Store store; ///< How object is logged + Uint32 temp; ///< Temporary status of object char * database; ///< In what database the object resides char * schema; ///< What schema the object is defined in char * name; ///< Name of object @@ -1573,6 +1580,7 @@ public: type(Object::TypeUndefined), state(Object::StateUndefined), store(Object::StoreUndefined), + temp(NDB_TEMP_TAB_PERMANENT), database(0), schema(0), name(0) { diff --git a/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp b/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp index 2ed97892488..62f372025ff 100644 --- a/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp +++ b/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp @@ -26,6 +26,7 @@ DictTabInfo::TableMapping[] = { DTIMAPS(Table, PrimaryTable, PrimaryTable, 0, MAX_TAB_NAME_SIZE), DTIMAP(Table, PrimaryTableId, PrimaryTableId), DTIMAP2(Table, TableLoggedFlag, TableLoggedFlag, 0, 1), + DTIMAP2(Table, TableTemporaryFlag, TableTemporaryFlag, 0, 1), DTIMAP2(Table, TableKValue, TableKValue, 6, 6), DTIMAP2(Table, MinLoadFactor, MinLoadFactor, 0, 90), DTIMAP2(Table, MaxLoadFactor, MaxLoadFactor, 25, 110), @@ -122,6 +123,7 @@ DictTabInfo::Table::init(){ memset(PrimaryTable, 0, sizeof(PrimaryTable));//PrimaryTable[0] = 0; // Only used when "index" PrimaryTableId = RNIL; TableLoggedFlag = 1; + TableTemporaryFlag = 0; NoOfKeyAttr = 0; NoOfAttributes = 0; NoOfNullable = 0; diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 45eed73455c..150dc75f90c 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -448,6 +448,8 @@ Dbdict::packTableIntoPages(SimpleProperties::Writer & w, !!(tablePtr.p->m_bits & TableRecord::TR_RowGCI)); w.add(DictTabInfo::RowChecksumFlag, !!(tablePtr.p->m_bits & TableRecord::TR_RowChecksum)); + w.add(DictTabInfo::TableTemporaryFlag, + !!(tablePtr.p->m_bits & TableRecord::TR_Temporary)); w.add(DictTabInfo::MinLoadFactor, tablePtr.p->minLoadFactor); w.add(DictTabInfo::MaxLoadFactor, tablePtr.p->maxLoadFactor); @@ -1086,8 +1088,8 @@ void Dbdict::closeReadTableConf(Signal* signal, /* ---------------------------------------------------------------- */ void Dbdict::updateSchemaState(Signal* signal, Uint32 tableId, - SchemaFile::TableEntry* te, Callback* callback){ - + SchemaFile::TableEntry* te, Callback* callback, + bool savetodisk){ jam(); ndbrequire(tableId < c_tableRecordPool.getSize()); XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; @@ -1130,6 +1132,12 @@ Dbdict::updateSchemaState(Signal* signal, Uint32 tableId, jam(); ok = true; break; + case SchemaFile::TEMPORARY_TABLE_COMMITTED: + jam(); + ndbrequire(oldState == SchemaFile::ADD_STARTED || + oldState == SchemaFile::TEMPORARY_TABLE_COMMITTED); + ok = true; + break; case SchemaFile::INIT: jam(); ok = true; @@ -1140,16 +1148,23 @@ Dbdict::updateSchemaState(Signal* signal, Uint32 tableId, * tableEntry = * te; computeChecksum(xsf, tableId / NDB_SF_PAGE_ENTRIES); - ndbrequire(c_writeSchemaRecord.inUse == false); - c_writeSchemaRecord.inUse = true; - - c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage; - c_writeSchemaRecord.newFile = false; - c_writeSchemaRecord.firstPage = tableId / NDB_SF_PAGE_ENTRIES; - c_writeSchemaRecord.noOfPages = 1; - c_writeSchemaRecord.m_callback = * callback; - - startWriteSchemaFile(signal); + if (savetodisk) + { + ndbrequire(c_writeSchemaRecord.inUse == false); + c_writeSchemaRecord.inUse = true; + + c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage; + c_writeSchemaRecord.newFile = false; + c_writeSchemaRecord.firstPage = tableId / NDB_SF_PAGE_ENTRIES; + c_writeSchemaRecord.noOfPages = 1; + c_writeSchemaRecord.m_callback = * callback; + + startWriteSchemaFile(signal); + } + else + { + execute(signal, *callback, 0); + } } void Dbdict::startWriteSchemaFile(Signal* signal) @@ -2722,6 +2737,13 @@ void Dbdict::checkSchemaStatus(Signal* signal) newEntry->m_tableState = SchemaFile::INIT; restartDropTab(signal, tableId); return; + + case SchemaFile::TEMPORARY_TABLE_COMMITTED: + // Temporary table is never written to disk, so just set to INIT. + jam(); + ok = true; + newEntry->m_tableState = SchemaFile::INIT; + break; }//switch ndbrequire(ok); break; @@ -2752,6 +2774,11 @@ void Dbdict::checkSchemaStatus(Signal* signal) newEntry->m_tableState = SchemaFile::INIT; restartDropTab(signal, tableId); return; + case SchemaFile::TEMPORARY_TABLE_COMMITTED: + jam(); + ok = true; + newEntry->m_tableState = SchemaFile::INIT; + break; } ndbrequire(ok); break; @@ -2812,6 +2839,17 @@ void Dbdict::checkSchemaStatus(Signal* signal) restartCreateTab(signal, tableId, oldEntry, newEntry, false); return; }//if + case SchemaFile::TEMPORARY_TABLE_COMMITTED: + jam(); + ok = true; + // For NR, we must re-create the table. + // For SR, we do nothing as the table was never saved to disk. + if(!c_systemRestart) + { + restartCreateTab(signal, tableId, oldEntry, newEntry, false); + return; + } + break; } ndbrequire(ok); break; @@ -2839,6 +2877,11 @@ void Dbdict::checkSchemaStatus(Signal* signal) newEntry->m_tableState = SchemaFile::INIT; restartDropTab(signal, tableId); return; + case SchemaFile::TEMPORARY_TABLE_COMMITTED: + jam(); + ok = true; + newEntry->m_tableState = SchemaFile::INIT; + break; } ndbrequire(ok); break; @@ -2855,6 +2898,15 @@ void Dbdict::checkSchemaStatus(Signal* signal) jam(); case SchemaFile::DROP_TABLE_COMMITTED: jam(); + case SchemaFile::TEMPORARY_TABLE_COMMITTED: + jam(); + ok = true; + if(!c_systemRestart) + { + restartCreateTab(signal, tableId, oldEntry, newEntry, false); + return; + } + break; case SchemaFile::TABLE_ADD_COMMITTED: jam(); ok = true; @@ -2885,6 +2937,37 @@ void Dbdict::checkSchemaStatus(Signal* signal) ndbrequire(ok); break; } + case SchemaFile::TEMPORARY_TABLE_COMMITTED: { + jam(); + bool ok = false; + switch(oldSchemaState){ + case SchemaFile::INIT: + jam(); + case SchemaFile::DROP_TABLE_COMMITTED: + jam(); + case SchemaFile::ADD_STARTED: + jam(); + case SchemaFile::TABLE_ADD_COMMITTED: + jam(); + case SchemaFile::DROP_TABLE_STARTED: + jam(); + case SchemaFile::ALTER_TABLE_COMMITTED: + jam(); + case SchemaFile::TEMPORARY_TABLE_COMMITTED: + jam(); + ok = true; + if(!c_systemRestart) + { + restartCreateTab(signal, tableId, oldEntry, newEntry, false); + return; + } else { + newEntry->m_tableState = SchemaFile::INIT; + } + break; + } + ndbrequire(ok); + break; + } } } @@ -3119,6 +3202,8 @@ Dbdict::execGET_TABINFO_CONF(Signal* signal){ handleTabInfoInit(r, &parseRecord); ndbrequire(parseRecord.errorCode == 0); + // save to disk + ndbrequire(tableId < c_tableRecordPool.getSize()); XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0]; SchemaFile::TableEntry * tableEntry = getTableEntry(xsf, tableId); @@ -4205,6 +4290,7 @@ Dbdict::execALTER_TAB_REQ(Signal * signal) SegmentedSectionPtr tabInfoPtr; signal->getSection(tabInfoPtr, AlterTabReq::DICT_TAB_INFO); alterTabPtr.p->m_tabInfoPtrI = tabInfoPtr.i; + bool savetodisk = !(tablePtr.p->m_bits & TableRecord::TR_Temporary); signal->header.m_noOfSections = 0; @@ -4216,7 +4302,10 @@ Dbdict::execALTER_TAB_REQ(Signal * signal) SchemaFile::TableEntry tabEntry; tabEntry.m_tableVersion = tableVersion; tabEntry.m_tableType = tablePtr.p->tableType; - tabEntry.m_tableState = SchemaFile::ALTER_TABLE_COMMITTED; + if (savetodisk) + tabEntry.m_tableState = SchemaFile::ALTER_TABLE_COMMITTED; + else + tabEntry.m_tableState = SchemaFile::TEMPORARY_TABLE_COMMITTED; tabEntry.m_gcp = gci; tabEntry.m_info_words = tabInfoPtr.sz; memset(tabEntry.m_unused, 0, sizeof(tabEntry.m_unused)); @@ -4226,7 +4315,7 @@ Dbdict::execALTER_TAB_REQ(Signal * signal) callback.m_callbackFunction = safe_cast(&Dbdict::alterTab_writeSchemaConf); - updateSchemaState(signal, tableId, &tabEntry, &callback); + updateSchemaState(signal, tableId, &tabEntry, &callback, savetodisk); break; } case(AlterTabReq::AlterTableRevert): { @@ -4703,9 +4792,19 @@ Dbdict::alterTab_writeSchemaConf(Signal* signal, callback.m_callbackFunction = safe_cast(&Dbdict::alterTab_writeTableConf); - SegmentedSectionPtr tabInfoPtr; - getSection(tabInfoPtr, alterTabPtr.p->m_tabInfoPtrI); - writeTableFile(signal, tableId, tabInfoPtr, &callback); + TableRecordPtr tablePtr; + c_tableRecordPool.getPtr(tablePtr, tableId); + bool savetodisk = !(tablePtr.p->m_bits & TableRecord::TR_Temporary); + if (savetodisk) + { + SegmentedSectionPtr tabInfoPtr; + getSection(tabInfoPtr, alterTabPtr.p->m_tabInfoPtrI); + writeTableFile(signal, tableId, tabInfoPtr, &callback); + } + else + { + execute(signal, callback, 0); + } } void @@ -5136,7 +5235,8 @@ Dbdict::createTab_prepare(Signal* signal, CreateTabReq * req){ callback.m_callbackFunction = safe_cast(&Dbdict::createTab_writeSchemaConf1); - updateSchemaState(signal, tableId, &tabEntry, &callback); + bool savetodisk = !(tabPtr.p->m_bits & TableRecord::TR_Temporary); + updateSchemaState(signal, tableId, &tabEntry, &callback, savetodisk); } void getSection(SegmentedSectionPtr & ptr, Uint32 i); @@ -5155,9 +5255,19 @@ Dbdict::createTab_writeSchemaConf1(Signal* signal, callback.m_callbackFunction = safe_cast(&Dbdict::createTab_writeTableConf); - SegmentedSectionPtr tabInfoPtr; - getSection(tabInfoPtr, createTabPtr.p->m_tabInfoPtrI); - writeTableFile(signal, createTabPtr.p->m_tablePtrI, tabInfoPtr, &callback); + TableRecordPtr tabPtr; + c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI); + bool savetodisk = !(tabPtr.p->m_bits & TableRecord::TR_Temporary); + if (savetodisk) + { + SegmentedSectionPtr tabInfoPtr; + getSection(tabInfoPtr, createTabPtr.p->m_tabInfoPtrI); + writeTableFile(signal, createTabPtr.p->m_tablePtrI, tabInfoPtr, &callback); + } + else + { + execute(signal, callback, 0); + } #if 0 createTabPtr.p->m_tabInfoPtrI = RNIL; signal->setSection(tabInfoPtr, 0); @@ -5203,10 +5313,11 @@ Dbdict::createTab_dih(Signal* signal, req->fragType = tabPtr.p->fragmentType; req->kValue = tabPtr.p->kValue; req->noOfReplicas = 0; - req->storedTable = !!(tabPtr.p->m_bits & TableRecord::TR_Logged); + req->loggedTable = !!(tabPtr.p->m_bits & TableRecord::TR_Logged); req->tableType = tabPtr.p->tableType; req->schemaVersion = tabPtr.p->tableVersion; req->primaryTableId = tabPtr.p->primaryTableId; + req->temporaryTable = !!(tabPtr.p->m_bits & TableRecord::TR_Temporary); /* Behöver fiska upp fragDataPtr från table object istället @@ -5618,11 +5729,16 @@ Dbdict::createTab_commit(Signal * signal, CreateTabReq * req){ TableRecordPtr tabPtr; c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI); - + bool savetodisk = !(tabPtr.p->m_bits & TableRecord::TR_Temporary); + SchemaFile::TableEntry tabEntry; tabEntry.m_tableVersion = tabPtr.p->tableVersion; tabEntry.m_tableType = tabPtr.p->tableType; - tabEntry.m_tableState = SchemaFile::TABLE_ADD_COMMITTED; + if (savetodisk) + tabEntry.m_tableState = SchemaFile::TABLE_ADD_COMMITTED; + else + tabEntry.m_tableState = SchemaFile::TEMPORARY_TABLE_COMMITTED; + tabEntry.m_gcp = tabPtr.p->gciTableCreated; tabEntry.m_info_words = tabPtr.p->packedSize; memset(tabEntry.m_unused, 0, sizeof(tabEntry.m_unused)); @@ -5632,7 +5748,7 @@ Dbdict::createTab_commit(Signal * signal, CreateTabReq * req){ callback.m_callbackFunction = safe_cast(&Dbdict::createTab_writeSchemaConf2); - updateSchemaState(signal, tabPtr.i, &tabEntry, &callback); + updateSchemaState(signal, tabPtr.i, &tabEntry, &callback, savetodisk); } void @@ -5967,6 +6083,10 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, #endif } + // Disallow logging of a temporary table. + tabRequire(!(c_tableDesc.TableTemporaryFlag && c_tableDesc.TableLoggedFlag), + CreateTableRef::NoLoggingTemporaryTable); + tablePtr.p->noOfAttributes = c_tableDesc.NoOfAttributes; tablePtr.p->m_bits |= (c_tableDesc.TableLoggedFlag ? TableRecord::TR_Logged : 0); @@ -5974,6 +6094,8 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, (c_tableDesc.RowChecksumFlag ? TableRecord::TR_RowChecksum : 0); tablePtr.p->m_bits |= (c_tableDesc.RowGCIFlag ? TableRecord::TR_RowGCI : 0); + tablePtr.p->m_bits |= + (c_tableDesc.TableTemporaryFlag ? TableRecord::TR_Temporary : 0); tablePtr.p->minLoadFactor = c_tableDesc.MinLoadFactor; tablePtr.p->maxLoadFactor = c_tableDesc.MaxLoadFactor; tablePtr.p->fragmentType = (DictTabInfo::FragmentType)c_tableDesc.FragmentType; @@ -6775,21 +6897,31 @@ Dbdict::execPREP_DROP_TAB_REQ(Signal* signal){ SchemaFile::TableState tabState = (SchemaFile::TableState)tableEntry->m_tableState; ndbrequire(tabState == SchemaFile::TABLE_ADD_COMMITTED || - tabState == SchemaFile::ALTER_TABLE_COMMITTED); + tabState == SchemaFile::ALTER_TABLE_COMMITTED || + tabState == SchemaFile::TEMPORARY_TABLE_COMMITTED); tableEntry->m_tableState = SchemaFile::DROP_TABLE_STARTED; computeChecksum(xsf, tablePtr.i / NDB_SF_PAGE_ENTRIES); - - ndbrequire(c_writeSchemaRecord.inUse == false); - c_writeSchemaRecord.inUse = true; - - c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage; - c_writeSchemaRecord.newFile = false; - c_writeSchemaRecord.firstPage = tablePtr.i / NDB_SF_PAGE_ENTRIES; - c_writeSchemaRecord.noOfPages = 1; - c_writeSchemaRecord.m_callback.m_callbackData = dropTabPtr.p->key; - c_writeSchemaRecord.m_callback.m_callbackFunction = - safe_cast(&Dbdict::prepDropTab_writeSchemaConf); - startWriteSchemaFile(signal); + + bool savetodisk = !(tablePtr.p->m_bits & TableRecord::TR_Temporary); + Callback callback; + callback.m_callbackData = dropTabPtr.p->key; + callback.m_callbackFunction = safe_cast(&Dbdict::prepDropTab_writeSchemaConf); + if (savetodisk) + { + ndbrequire(c_writeSchemaRecord.inUse == false); + c_writeSchemaRecord.inUse = true; + + c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage; + c_writeSchemaRecord.newFile = false; + c_writeSchemaRecord.firstPage = tablePtr.i / NDB_SF_PAGE_ENTRIES; + c_writeSchemaRecord.noOfPages = 1; + c_writeSchemaRecord.m_callback = callback; + startWriteSchemaFile(signal); + } + else + { + execute(signal, callback, 0); + } } void @@ -6960,17 +7092,28 @@ Dbdict::dropTab_complete(Signal* signal, ndbrequire(tabState == SchemaFile::DROP_TABLE_STARTED); tableEntry->m_tableState = SchemaFile::DROP_TABLE_COMMITTED; computeChecksum(xsf, tableId / NDB_SF_PAGE_ENTRIES); - - ndbrequire(c_writeSchemaRecord.inUse == false); - c_writeSchemaRecord.inUse = true; - c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage; - c_writeSchemaRecord.firstPage = tableId / NDB_SF_PAGE_ENTRIES; - c_writeSchemaRecord.noOfPages = 1; - c_writeSchemaRecord.m_callback.m_callbackData = dropTabPtr.p->key; - c_writeSchemaRecord.m_callback.m_callbackFunction = - safe_cast(&Dbdict::dropTab_writeSchemaConf); - startWriteSchemaFile(signal); + TableRecordPtr tablePtr; + c_tableRecordPool.getPtr(tablePtr, tableId); + bool savetodisk = !(tablePtr.p->m_bits & TableRecord::TR_Temporary); + Callback callback; + callback.m_callbackData = dropTabPtr.p->key; + callback.m_callbackFunction = safe_cast(&Dbdict::dropTab_writeSchemaConf); + if (savetodisk) + { + ndbrequire(c_writeSchemaRecord.inUse == false); + c_writeSchemaRecord.inUse = true; + + c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage; + c_writeSchemaRecord.firstPage = tableId / NDB_SF_PAGE_ENTRIES; + c_writeSchemaRecord.noOfPages = 1; + c_writeSchemaRecord.m_callback = callback; + startWriteSchemaFile(signal); + } + else + { + execute(signal, callback, 0); + } } void @@ -7260,9 +7403,12 @@ void Dbdict::execGET_TABINFOREQ(Signal* signal) sendGET_TABINFOREF(signal, req, GetTabInfoRef::TableNotDefined); return; }//if - + + // If istable/index, allow ADD_STARTED (not to ref) + if (objEntry->m_tableState != SchemaFile::TABLE_ADD_COMMITTED && - objEntry->m_tableState != SchemaFile::ALTER_TABLE_COMMITTED){ + objEntry->m_tableState != SchemaFile::ALTER_TABLE_COMMITTED && + objEntry->m_tableState != SchemaFile::TEMPORARY_TABLE_COMMITTED){ jam(); sendGET_TABINFOREF(signal, req, GetTabInfoRef::TableNotDefined); return; @@ -7281,6 +7427,8 @@ void Dbdict::execGET_TABINFOREQ(Signal* signal) sendGET_TABINFOREF(signal, req, GetTabInfoRef::TableNotDefined); return; } + ndbrequire(objEntry->m_tableState == SchemaFile::TEMPORARY_TABLE_COMMITTED || + !(tabPtr.p->m_bits & TableRecord::TR_Temporary)); } c_retrieveRecord.busyState = true; @@ -7463,12 +7611,18 @@ Dbdict::execLIST_TABLES_REQ(Signal* signal) break; } } - // store + // Logging status if (! (tablePtr.p->m_bits & TableRecord::TR_Logged)) { - conf->setTableStore(pos, DictTabInfo::StoreTemporary); + conf->setTableStore(pos, DictTabInfo::StoreNotLogged); } else { conf->setTableStore(pos, DictTabInfo::StorePermanent); } + // Temporary status + if (tablePtr.p->m_bits & TableRecord::TR_Temporary) { + conf->setTableTemp(pos, NDB_TEMP_TAB_TEMPORARY); + } else { + conf->setTableTemp(pos, NDB_TEMP_TAB_PERMANENT); + } pos++; } if(DictTabInfo::isTrigger(type)){ @@ -7489,7 +7643,7 @@ Dbdict::execLIST_TABLES_REQ(Signal* signal) conf->setTableState(pos, DictTabInfo::StateBroken); break; } - conf->setTableStore(pos, DictTabInfo::StoreTemporary); + conf->setTableStore(pos, DictTabInfo::StoreNotLogged); pos++; } if (DictTabInfo::isFilegroup(type)){ @@ -7666,7 +7820,8 @@ Dbdict::execCREATE_INDX_REQ(Signal* signal) return; } memcpy(opPtr.p->m_indexName, c_tableDesc.TableName, MAX_TAB_NAME_SIZE); - opPtr.p->m_storedIndex = c_tableDesc.TableLoggedFlag; + opPtr.p->m_loggedIndex = c_tableDesc.TableLoggedFlag; + opPtr.p->m_temporaryIndex = c_tableDesc.TableTemporaryFlag; releaseSections(signal); // master expects to hear from all if (opPtr.p->m_isMaster) @@ -7828,6 +7983,34 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr) opPtr.p->m_errorLine = __LINE__; return; } + + // Check that the temporary status of index is compatible with table. + if (!opPtr.p->m_temporaryIndex && + tablePtr.p->m_bits & TableRecord::TR_Temporary) + { + jam(); + opPtr.p->m_errorCode= CreateIndxRef::TableIsTemporary; + opPtr.p->m_errorLine= __LINE__; + return; + } + if (opPtr.p->m_temporaryIndex && + !(tablePtr.p->m_bits & TableRecord::TR_Temporary)) + { + // This could be implemented later, but mysqld does currently not detect + // that the index disappears after SR, and it appears not too useful. + jam(); + opPtr.p->m_errorCode= CreateIndxRef::TableIsNotTemporary; + opPtr.p->m_errorLine= __LINE__; + return; + } + if (opPtr.p->m_temporaryIndex && opPtr.p->m_loggedIndex) + { + jam(); + opPtr.p->m_errorCode= CreateIndxRef::NoLoggingTemporaryIndex; + opPtr.p->m_errorLine= __LINE__; + return; + } + // compute index table record TableRecord indexRec; TableRecordPtr indexPtr; @@ -7836,16 +8019,20 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr) initialiseTableRecord(indexPtr); indexPtr.p->m_bits = TableRecord::TR_RowChecksum; if (req->getIndexType() == DictTabInfo::UniqueHashIndex) { - indexPtr.p->m_bits |= (opPtr.p->m_storedIndex ? TableRecord::TR_Logged:0); + indexPtr.p->m_bits |= (opPtr.p->m_loggedIndex ? TableRecord::TR_Logged:0); + indexPtr.p->m_bits |= + (opPtr.p->m_temporaryIndex ? TableRecord::TR_Temporary : 0); indexPtr.p->fragmentType = DictTabInfo::DistrKeyUniqueHashIndex; } else if (req->getIndexType() == DictTabInfo::OrderedIndex) { // first version will not supported logging - if (opPtr.p->m_storedIndex) { + if (opPtr.p->m_loggedIndex) { jam(); opPtr.p->m_errorCode = CreateIndxRef::InvalidIndexType; opPtr.p->m_errorLine = __LINE__; return; } + indexPtr.p->m_bits |= + (opPtr.p->m_temporaryIndex ? TableRecord::TR_Temporary : 0); indexPtr.p->fragmentType = DictTabInfo::DistrKeyOrderedIndex; } else { jam(); @@ -7930,6 +8117,7 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr) // write index table w.add(DictTabInfo::TableName, opPtr.p->m_indexName); w.add(DictTabInfo::TableLoggedFlag, !!(indexPtr.p->m_bits & TableRecord::TR_Logged)); + w.add(DictTabInfo::TableTemporaryFlag, !!(indexPtr.p->m_bits & TableRecord::TR_Temporary)); w.add(DictTabInfo::FragmentTypeVal, indexPtr.p->fragmentType); w.add(DictTabInfo::TableTypeVal, indexPtr.p->tableType); Rope name(c_rope_pool, tablePtr.p->tableName); diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp index b196d0a503b..046518aeddc 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp @@ -238,7 +238,8 @@ public: { TR_Logged = 0x1, TR_RowGCI = 0x2, - TR_RowChecksum = 0x4 + TR_RowChecksum = 0x4, + TR_Temporary = 0x8 }; Uint16 m_bits; @@ -1238,7 +1239,8 @@ private: CreateIndxReq m_request; AttributeList m_attrList; char m_indexName[MAX_TAB_NAME_SIZE]; - bool m_storedIndex; + bool m_loggedIndex; + bool m_temporaryIndex; // coordinator DICT Uint32 m_coordinatorRef; bool m_isMaster; @@ -2194,7 +2196,8 @@ private: // Read/Write Schema and Table files /* ------------------------------------------------------------ */ void updateSchemaState(Signal* signal, Uint32 tableId, - SchemaFile::TableEntry*, Callback*); + SchemaFile::TableEntry*, Callback*, + bool savetodisk = 1); void startWriteSchemaFile(Signal* signal); void openSchemaFile(Signal* signal, Uint32 fileNo, diff --git a/storage/ndb/src/kernel/blocks/dbdict/SchemaFile.hpp b/storage/ndb/src/kernel/blocks/dbdict/SchemaFile.hpp index 0226991a073..9fd9748f0e2 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/SchemaFile.hpp +++ b/storage/ndb/src/kernel/blocks/dbdict/SchemaFile.hpp @@ -54,7 +54,8 @@ struct SchemaFile { TABLE_ADD_COMMITTED = 2, DROP_TABLE_STARTED = 3, DROP_TABLE_COMMITTED = 4, - ALTER_TABLE_COMMITTED = 5 + ALTER_TABLE_COMMITTED = 5, + TEMPORARY_TABLE_COMMITTED = 6 }; // entry size 32 bytes diff --git a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp index 9d9ea6af2f5..d0b97c0eb81 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp +++ b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp @@ -469,11 +469,17 @@ public: NORMAL_HASH = 2, USER_DEFINED = 3 }; + enum Storage { + ST_NOLOGGING = 0, // Table is not logged, but survives SR + ST_NORMAL = 1, // Normal table, logged and durable + ST_TEMPORARY = 2 // Table is lost after SR, not logged + }; CopyStatus tabCopyStatus; UpdateState tabUpdateState; TabLcpStatus tabLcpStatus; TabStatus tabStatus; Method method; + Storage tabStorage; Uint32 pageRef[8]; //----------------------------------------------------------------------------- @@ -506,7 +512,6 @@ public: Uint8 kvalue; Uint8 noOfBackups; Uint8 noPages; - Uint8 storedTable; /* 0 IF THE TABLE IS A TEMPORARY TABLE */ Uint16 tableType; Uint16 primaryTableId; }; diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index 43850f297c6..4f20f7bdd22 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -5507,7 +5507,7 @@ void Dbdih::removeNodeFromTable(Signal* signal, //const Uint32 lcpId = SYSFILE->latestLCP_ID; const bool lcpOngoingFlag = (tabPtr.p->tabLcpStatus== TabRecord::TLS_ACTIVE); - const bool temporary = !tabPtr.p->storedTable; + const bool unlogged = (tabPtr.p->tabStorage != TabRecord::ST_NORMAL); FragmentstorePtr fragPtr; for(Uint32 fragNo = 0; fragNo < tabPtr.p->totalfragments; fragNo++){ @@ -5528,7 +5528,7 @@ void Dbdih::removeNodeFromTable(Signal* signal, jam(); found = true; noOfRemovedReplicas++; - removeNodeFromStored(nodeId, fragPtr, replicaPtr, temporary); + removeNodeFromStored(nodeId, fragPtr, replicaPtr, unlogged); if(replicaPtr.p->lcpOngoingFlag){ jam(); /** @@ -6796,7 +6796,12 @@ void Dbdih::execDIADDTABREQ(Signal* signal) /* BUT THEY DO NOT HAVE ANY INFORMATION ABOUT ANY TABLE*/ /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ tabPtr.p->tabStatus = TabRecord::TS_CREATING; - tabPtr.p->storedTable = req->storedTable; + if(req->loggedTable) + tabPtr.p->tabStorage= TabRecord::ST_NORMAL; + else if(req->temporaryTable) + tabPtr.p->tabStorage= TabRecord::ST_TEMPORARY; + else + tabPtr.p->tabStorage= TabRecord::ST_NOLOGGING; tabPtr.p->kvalue = req->kValue; switch ((DictTabInfo::FragmentType)fragType) @@ -6961,7 +6966,7 @@ Dbdih::sendAddFragreq(Signal* signal, ConnectRecordPtr connectPtr, ndbrequire(replicaPtr.p->procNode == getOwnNodeId()); Uint32 requestInfo = 0; - if(!tabPtr.p->storedTable){ + if(tabPtr.p->tabStorage != TabRecord::ST_NORMAL){ requestInfo |= LqhFragReq::TemporaryTable; } @@ -8391,9 +8396,9 @@ void Dbdih::initLcpLab(Signal* signal, Uint32 senderRef, Uint32 tableId) continue; } - if (tabPtr.p->storedTable == 0) { + if (tabPtr.p->tabStorage != TabRecord::ST_NORMAL) { /** - * Temporary table + * Table is not logged */ jam(); tabPtr.p->tabLcpStatus = TabRecord::TLS_COMPLETED; @@ -8881,10 +8886,10 @@ void Dbdih::readPagesIntoTableLab(Signal* signal, Uint32 tableId) rf.rwfTabPtr.p->kvalue = readPageWord(&rf); rf.rwfTabPtr.p->mask = readPageWord(&rf); rf.rwfTabPtr.p->method = (TabRecord::Method)readPageWord(&rf); - /* ---------------------------------- */ - /* Type of table, 2 = temporary table */ - /* ---------------------------------- */ - rf.rwfTabPtr.p->storedTable = readPageWord(&rf); + /* ------------- */ + /* Type of table */ + /* ------------- */ + rf.rwfTabPtr.p->tabStorage = (TabRecord::Storage)(readPageWord(&rf)); Uint32 noOfFrags = rf.rwfTabPtr.p->totalfragments; ndbrequire(noOfFrags > 0); @@ -8975,7 +8980,7 @@ void Dbdih::packTableIntoPagesLab(Signal* signal, Uint32 tableId) writePageWord(&wf, tabPtr.p->kvalue); writePageWord(&wf, tabPtr.p->mask); writePageWord(&wf, tabPtr.p->method); - writePageWord(&wf, tabPtr.p->storedTable); + writePageWord(&wf, tabPtr.p->tabStorage); signal->theData[0] = DihContinueB::ZPACK_FRAG_INTO_PAGES; signal->theData[1] = tabPtr.i; @@ -9180,7 +9185,7 @@ void Dbdih::startFragment(Signal* signal, Uint32 tableId, Uint32 fragId) continue; } - if(tabPtr.p->storedTable == 0){ + if(tabPtr.p->tabStorage != TabRecord::ST_NORMAL){ jam(); TloopCount++; tableId++; @@ -9805,7 +9810,7 @@ void Dbdih::calculateKeepGciLab(Signal* signal, Uint32 tableId, Uint32 fragId) }//if ptrCheckGuard(tabPtr, ctabFileSize, tabRecord); if (tabPtr.p->tabStatus != TabRecord::TS_ACTIVE || - tabPtr.p->storedTable == 0) { + tabPtr.p->tabStorage != TabRecord::ST_NORMAL) { if (TloopCount > 100) { jam(); signal->theData[0] = DihContinueB::ZCALCULATE_KEEP_GCI; @@ -10723,6 +10728,14 @@ void Dbdih::allNodesLcpCompletedLab(Signal* signal) /* ------------------------------------------------------------------------- */ void Dbdih::tableUpdateLab(Signal* signal, TabRecordPtr tabPtr) { FileRecordPtr filePtr; + if(tabPtr.p->tabStorage == TabRecord::ST_TEMPORARY) { + // For temporary tables we do not write to disk. Mark both copies 0 and 1 + // as done, and go straight to the after-close code. + filePtr.i = tabPtr.p->tabFile[1]; + ptrCheckGuard(filePtr, cfileFileSize, fileRecord); + tableCloseLab(signal, filePtr); + return; + } filePtr.i = tabPtr.p->tabFile[0]; ptrCheckGuard(filePtr, cfileFileSize, fileRecord); createFileRw(signal, filePtr); @@ -11758,7 +11771,7 @@ void Dbdih::initTable(TabRecordPtr tabPtr) tabPtr.p->kvalue = 0; tabPtr.p->hashpointer = (Uint32)-1; tabPtr.p->mask = 0; - tabPtr.p->storedTable = 1; + tabPtr.p->tabStorage = TabRecord::ST_NORMAL; tabPtr.p->tabErrorCode = 0; tabPtr.p->schemaVersion = (Uint32)-1; tabPtr.p->tabRemoveNode = RNIL; diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp index dc1b194eed2..3118164badd 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp @@ -1469,7 +1469,15 @@ int Dbtup::handleDeleteReq(Signal* signal, { Operationrec* prevOp= req_struct->prevOpPtr.p; regOperPtr->tupVersion= prevOp->tupVersion; - regOperPtr->m_copy_tuple_location= prevOp->m_copy_tuple_location; + // make copy since previous op is committed before this one + const Uint32* org = c_undo_buffer.get_ptr(&prevOp->m_copy_tuple_location); + Uint32* dst = c_undo_buffer.alloc_copy_tuple( + ®OperPtr->m_copy_tuple_location, regTabPtr->total_rec_size); + if (dst == 0) { + terrorCode = ZMEM_NOMEM_ERROR; + goto error; + } + memcpy(dst, org, regTabPtr->total_rec_size << 2); } else { diff --git a/storage/ndb/src/ndbapi/NdbDictionary.cpp b/storage/ndb/src/ndbapi/NdbDictionary.cpp index 4948095f970..269994e01f4 100644 --- a/storage/ndb/src/ndbapi/NdbDictionary.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp @@ -632,6 +632,16 @@ NdbDictionary::Table::getReplicaCount() const { return m_impl.m_replicaCount; } +bool +NdbDictionary::Table::getTemporary() { + return m_impl.m_temporary; +} + +void +NdbDictionary::Table::setTemporary(bool val) { + m_impl.m_temporary = val; +} + int NdbDictionary::Table::createTableInDb(Ndb* pNdb, bool equalOk) const { const NdbDictionary::Table * pTab = @@ -808,6 +818,16 @@ NdbDictionary::Index::setLogging(bool val){ m_impl.m_logging = val; } +bool +NdbDictionary::Index::getTemporary(){ + return m_impl.m_temporary; +} + +void +NdbDictionary::Index::setTemporary(bool val){ + m_impl.m_temporary = val; +} + bool NdbDictionary::Index::getLogging() const { return m_impl.m_logging; diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 6c8a447f627..4e621e69c47 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -452,6 +452,7 @@ NdbTableImpl::init(){ m_primaryTable.clear(); m_default_no_part_flag = 1; m_logging= true; + m_temporary = false; m_row_gci = true; m_row_checksum = true; m_kvalue= 6; @@ -571,6 +572,12 @@ NdbTableImpl::equal(const NdbTableImpl& obj) const DBUG_RETURN(false); } + if(m_temporary != obj.m_temporary) + { + DBUG_PRINT("info",("m_temporary %d != %d",m_temporary,obj.m_temporary)); + DBUG_RETURN(false); + } + if(m_row_gci != obj.m_row_gci) { DBUG_PRINT("info",("m_row_gci %d != %d",m_row_gci,obj.m_row_gci)); @@ -711,6 +718,7 @@ NdbTableImpl::assign(const NdbTableImpl& org) m_max_rows = org.m_max_rows; m_default_no_part_flag = org.m_default_no_part_flag; m_logging = org.m_logging; + m_temporary = org.m_temporary; m_row_gci = org.m_row_gci; m_row_checksum = org.m_row_checksum; m_kvalue = org.m_kvalue; @@ -1080,6 +1088,7 @@ void NdbIndexImpl::init() m_id= RNIL; m_type= NdbDictionary::Object::TypeUndefined; m_logging= true; + m_temporary= false; m_table= NULL; } @@ -1951,7 +1960,7 @@ objectStateMapping[] = { static const ApiKernelMapping objectStoreMapping[] = { - { DictTabInfo::StoreTemporary, NdbDictionary::Object::StoreTemporary }, + { DictTabInfo::StoreNotLogged, NdbDictionary::Object::StoreNotLogged }, { DictTabInfo::StorePermanent, NdbDictionary::Object::StorePermanent }, { -1, -1 } }; @@ -2030,6 +2039,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, impl->m_default_no_part_flag = tableDesc->DefaultNoPartFlag; impl->m_linear_flag = tableDesc->LinearHashFlag; impl->m_logging = tableDesc->TableLoggedFlag; + impl->m_temporary = tableDesc->TableTemporaryFlag; impl->m_row_gci = tableDesc->RowGCIFlag; impl->m_row_checksum = tableDesc->RowChecksumFlag; impl->m_kvalue = tableDesc->TableKValue; @@ -2472,6 +2482,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, tmpTab->FragmentCount= impl.m_fragmentCount; tmpTab->TableLoggedFlag = impl.m_logging; + tmpTab->TableTemporaryFlag = impl.m_temporary; tmpTab->RowGCIFlag = impl.m_row_gci; tmpTab->RowChecksumFlag = impl.m_row_checksum; tmpTab->TableKValue = impl.m_kvalue; @@ -2990,6 +3001,7 @@ NdbDictInterface::create_index_obj_from_table(NdbIndexImpl** dst, idx->m_tableName.assign(prim->m_externalName); NdbDictionary::Object::Type type = idx->m_type = tab->m_indexType; idx->m_logging = tab->m_logging; + idx->m_temporary = tab->m_temporary; // skip last attribute (NDB$PK or NDB$TNODE) const Uint32 distKeys = prim->m_noOfDistributionKeys; @@ -3081,6 +3093,7 @@ NdbDictInterface::createIndex(Ndb & ndb, ndb.internalize_index_name(&table, impl.getName())); w.add(DictTabInfo::TableName, internalName.c_str()); w.add(DictTabInfo::TableLoggedFlag, impl.m_logging); + w.add(DictTabInfo::TableTemporaryFlag, impl.m_temporary); NdbApiSignal tSignal(m_reference); tSignal.theReceiversBlockNumber = DBDICT; @@ -4064,6 +4077,7 @@ NdbDictInterface::listObjects(NdbDictionary::Dictionary::List& list, getApiConstant(ListTablesConf::getTableState(d), objectStateMapping, 0); element.store = (NdbDictionary::Object::Store) getApiConstant(ListTablesConf::getTableStore(d), objectStoreMapping, 0); + element.temp = ListTablesConf::getTableTemp(d); // table or index name Uint32 n = (data[pos++] + 3) >> 2; BaseString databaseName; diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp index 35db103aa9f..4e8ef471014 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp @@ -198,6 +198,7 @@ public: Uint32 m_default_no_part_flag; bool m_linear_flag; bool m_logging; + bool m_temporary; bool m_row_gci; bool m_row_checksum; int m_kvalue; @@ -273,6 +274,7 @@ public: Vector<int> m_key_ids; bool m_logging; + bool m_temporary; NdbTableImpl * m_table; diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp index 9f2a5844476..08b98cf7b48 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -65,6 +65,39 @@ print_std(const SubTableData * sdata, LinearSectionPtr ptr[3]) } #endif +// EventBufData + +Uint32 +EventBufData::get_blob_part_no() const +{ + assert(ptr[0].sz > 2); + Uint32 pos = AttributeHeader(ptr[0].p[0]).getDataSize() + + AttributeHeader(ptr[0].p[1]).getDataSize(); + Uint32 no = ptr[1].p[pos]; + return no; +} + +void +EventBufData::add_part_size(Uint32 & full_count, Uint32 & full_sz) const +{ + Uint32 tmp_count = 0; + Uint32 tmp_sz = 0; + const EventBufData* data2 = m_next_blob; + while (data2 != 0) { + tmp_count++; + tmp_sz += data2->sz; + const EventBufData* data3 = data2->m_next; + while (data3 != 0) { + tmp_count++; + tmp_sz += data3->sz; + data3 = data3->m_next; + } + data2 = data2->m_next_blob; + } + full_count += tmp_count; + full_sz += tmp_sz; +} + /* * Class NdbEventOperationImpl * @@ -1162,11 +1195,12 @@ NdbEventBuffer::nextEvent() // set NdbEventOperation data op->m_data_item= data; - // remove item from m_available_data - m_available_data.remove_first(); + // remove item from m_available_data and return size + Uint32 full_count, full_sz; + m_available_data.remove_first(full_count, full_sz); // add it to used list - m_used_data.append_used_data(data); + m_used_data.append_used_data(data, full_count, full_sz); #ifdef VM_TRACE op->m_data_done_count++; @@ -1840,7 +1874,7 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op, op->m_has_error = 2; DBUG_RETURN_EVENT(-1); } - if (unlikely(copy_data(sdata, ptr, data))) + if (unlikely(copy_data(sdata, ptr, data, NULL))) { op->m_has_error = 3; DBUG_RETURN_EVENT(-1); @@ -1872,7 +1906,7 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op, } } // link blob event under main event - add_blob_data(main_data, data); + add_blob_data(bucket, main_data, data); } if (use_hash) { @@ -1886,7 +1920,7 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op, else { // event with same op, PK found, merge into old buffer - if (unlikely(merge_data(sdata, ptr, data))) + if (unlikely(merge_data(sdata, ptr, data, & bucket->m_data.m_sz))) { op->m_has_error = 3; DBUG_RETURN_EVENT(-1); @@ -1909,6 +1943,9 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op, } } } +#ifdef NDB_EVENT_VERIFY_SIZE + verify_size(bucket->m_data); +#endif DBUG_RETURN_EVENT(0); } @@ -1962,8 +1999,21 @@ NdbEventBuffer::alloc_data() } // remove data from free list - m_free_data = data->m_next; + if (data->m_next_blob == 0) + m_free_data = data->m_next; + else { + EventBufData* data2 = data->m_next_blob; + if (data2->m_next == 0) { + data->m_next_blob = data2->m_next_blob; + data = data2; + } else { + EventBufData* data3 = data2->m_next; + data2->m_next = data3->m_next; + data = data3; + } + } data->m_next = 0; + data->m_next_blob = 0; #ifdef VM_TRACE m_free_data_count--; assert(m_free_data_sz >= data->sz); @@ -1975,7 +2025,9 @@ NdbEventBuffer::alloc_data() // allocate initial or bigger memory area in EventBufData // takes sizes from given ptr and sets up data->ptr int -NdbEventBuffer::alloc_mem(EventBufData* data, LinearSectionPtr ptr[3]) +NdbEventBuffer::alloc_mem(EventBufData* data, + LinearSectionPtr ptr[3], + Uint32 * change_sz) { DBUG_ENTER("NdbEventBuffer::alloc_mem"); DBUG_PRINT("info", ("ptr sz %u + %u + %u", ptr[0].sz, ptr[1].sz, ptr[2].sz)); @@ -1988,6 +2040,8 @@ NdbEventBuffer::alloc_mem(EventBufData* data, LinearSectionPtr ptr[3]) if (data->sz < alloc_size) { + Uint32 add_sz = alloc_size - data->sz; + NdbMem_Free((char*)data->memory); assert(m_total_alloc >= data->sz); m_total_alloc -= data->sz; @@ -1999,6 +2053,9 @@ NdbEventBuffer::alloc_mem(EventBufData* data, LinearSectionPtr ptr[3]) DBUG_RETURN(-1); data->sz = alloc_size; m_total_alloc += data->sz; + + if (change_sz != NULL) + *change_sz += add_sz; } Uint32* memptr = data->memory; @@ -2014,14 +2071,30 @@ NdbEventBuffer::alloc_mem(EventBufData* data, LinearSectionPtr ptr[3]) DBUG_RETURN(0); } +void +NdbEventBuffer::dealloc_mem(EventBufData* data, + Uint32 * change_sz) +{ + NdbMem_Free((char*)data->memory); + assert(m_total_alloc >= data->sz); + m_total_alloc -= data->sz; + if (change_sz != NULL) { + assert(*change_sz >= data->sz); + *change_sz -= data->sz; + } + data->memory = 0; + data->sz = 0; +} + int NdbEventBuffer::copy_data(const SubTableData * const sdata, LinearSectionPtr ptr[3], - EventBufData* data) + EventBufData* data, + Uint32 * change_sz) { DBUG_ENTER_EVENT("NdbEventBuffer::copy_data"); - if (alloc_mem(data, ptr) != 0) + if (alloc_mem(data, ptr, change_sz) != 0) DBUG_RETURN_EVENT(-1); memcpy(data->sdata, sdata, sizeof(SubTableData)); int i; @@ -2093,7 +2166,8 @@ copy_attr(AttributeHeader ah, int NdbEventBuffer::merge_data(const SubTableData * const sdata, LinearSectionPtr ptr2[3], - EventBufData* data) + EventBufData* data, + Uint32 * change_sz) { DBUG_ENTER_EVENT("NdbEventBuffer::merge_data"); @@ -2102,7 +2176,7 @@ NdbEventBuffer::merge_data(const SubTableData * const sdata, int t1 = SubTableData::getOperation(data->sdata->requestInfo); int t2 = SubTableData::getOperation(sdata->requestInfo); if (t1 == Ev_t::enum_NUL) - DBUG_RETURN_EVENT(copy_data(sdata, ptr2, data)); + DBUG_RETURN_EVENT(copy_data(sdata, ptr2, data, change_sz)); Ev_t* tp = 0; int i; @@ -2142,6 +2216,8 @@ NdbEventBuffer::merge_data(const SubTableData * const sdata, DBUG_RETURN_EVENT(0); } + // TODO: use old data items, avoid malloc/free on each merge + // save old data EventBufData olddata = *data; data->memory = 0; @@ -2158,7 +2234,7 @@ NdbEventBuffer::merge_data(const SubTableData * const sdata, { if (loop == 1) { - if (alloc_mem(data, ptr) != 0) + if (alloc_mem(data, ptr, change_sz) != 0) { result = -1; goto end; @@ -2277,11 +2353,7 @@ NdbEventBuffer::merge_data(const SubTableData * const sdata, } end: - // free old data - NdbMem_Free((char*)olddata.memory); - assert(m_total_alloc >= olddata.sz); - m_total_alloc -= olddata.sz; - + dealloc_mem(&olddata, change_sz); DBUG_RETURN_EVENT(result); } @@ -2357,7 +2429,7 @@ NdbEventBuffer::get_main_data(Gci_container* bucket, SubTableData sdata = *blob_data->sdata; sdata.tableId = main_op->m_eventImpl->m_tableImpl->m_id; SubTableData::setOperation(sdata.requestInfo, NdbDictionary::Event::_TE_NUL); - if (copy_data(&sdata, ptr, main_data) != 0) + if (copy_data(&sdata, ptr, main_data, NULL) != 0) DBUG_RETURN_EVENT(-1); hpos.data = main_data; @@ -2365,7 +2437,8 @@ NdbEventBuffer::get_main_data(Gci_container* bucket, } void -NdbEventBuffer::add_blob_data(EventBufData* main_data, +NdbEventBuffer::add_blob_data(Gci_container* bucket, + EventBufData* main_data, EventBufData* blob_data) { DBUG_ENTER_EVENT("NdbEventBuffer::add_blob_data"); @@ -2389,6 +2462,9 @@ NdbEventBuffer::add_blob_data(EventBufData* main_data, blob_data->m_next = head->m_next; head->m_next = blob_data; } + // adjust data list size + bucket->m_data.m_count += 1; + bucket->m_data.m_sz += blob_data->sz; DBUG_VOID_RETURN_EVENT; } @@ -2424,6 +2500,9 @@ NdbEventBuffer::move_data() void NdbEventBuffer::free_list(EventBufData_list &list) { +#ifdef NDB_EVENT_VERIFY_SIZE + verify_size(list); +#endif // return list to m_free_data list.m_tail->m_next= m_free_data; m_free_data= list.m_head; @@ -2432,38 +2511,15 @@ NdbEventBuffer::free_list(EventBufData_list &list) #endif m_free_data_sz+= list.m_sz; - // free blobs XXX unacceptable performance, fix later - { - EventBufData* data = list.m_head; - while (1) { - while (data->m_next_blob != NULL) { - EventBufData* blob_head = data->m_next_blob; - data->m_next_blob = blob_head->m_next_blob; - blob_head->m_next_blob = NULL; - while (blob_head != NULL) { - EventBufData* blob_part = blob_head; - blob_head = blob_head->m_next; - blob_part->m_next = m_free_data; - m_free_data = blob_part; -#ifdef VM_TRACE - m_free_data_count++; -#endif - m_free_data_sz += blob_part->sz; - } - } - if (data == list.m_tail) - break; - data = data->m_next; - } - } - - // list returned to m_free_data list.m_head = list.m_tail = NULL; list.m_count = list.m_sz = 0; } void EventBufData_list::append_list(EventBufData_list *list, Uint64 gci) { +#ifdef NDB_EVENT_VERIFY_SIZE + NdbEventBuffer::verify_size(*list); +#endif move_gci_ops(list, gci); if (m_tail) @@ -2702,6 +2758,29 @@ send_report: #endif } +#ifdef VM_TRACE +void +NdbEventBuffer::verify_size(const EventBufData* data, Uint32 count, Uint32 sz) +{ + Uint32 tmp_count = 0; + Uint32 tmp_sz = 0; + while (data != 0) { + Uint32 full_count, full_sz; + data->get_full_size(full_count, full_sz); + tmp_count += full_count; + tmp_sz += full_sz; + data = data->m_next; + } + assert(tmp_count == count); + assert(tmp_sz == sz); +} +void +NdbEventBuffer::verify_size(const EventBufData_list & list) +{ + verify_size(list.m_head, list.m_count, list.m_sz); +} +#endif + // hash table routines // could optimize the all-fixed case diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp index 0d6618a7365..eaf89540dfd 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp @@ -40,6 +40,12 @@ #define DBUG_DUMP_EVENT(A,B,C) #endif +#undef NDB_EVENT_VERIFY_SIZE +#ifdef VM_TRACE +// not much effect on performance, leave on +#define NDB_EVENT_VERIFY_SIZE +#endif + class NdbEventOperationImpl; struct EventBufData @@ -54,9 +60,13 @@ struct EventBufData /* * Blobs are stored in blob list (m_next_blob) where each entry - * is list of parts (m_next) in part number order. + * is list of parts (m_next). TODO order by part number + * + * Processed data (m_used_data, m_free_data) keeps the old blob + * list intact. It is reconsumed when new data items are needed. * - * TODO order by part no and link for fast read and free_list + * Data item lists keep track of item count and sum(sz) and + * these include both main items and blob parts. */ EventBufData *m_next; // Next wrt to global order or Next blob part @@ -66,14 +76,22 @@ struct EventBufData Uint32 m_pkhash; // PK hash (without op) for fast compare EventBufData() {} + // Get blob part number from blob data - Uint32 get_blob_part_no() { - assert(ptr[0].sz > 2); - Uint32 pos = AttributeHeader(ptr[0].p[0]).getDataSize() + - AttributeHeader(ptr[0].p[1]).getDataSize(); - Uint32 no = ptr[1].p[pos]; - return no; + Uint32 get_blob_part_no() const; + + /* + * Main item does not include summary of parts (space / performance + * tradeoff). The summary is needed when moving single data item. + * It is not needed when moving entire list. + */ + void get_full_size(Uint32 & full_count, Uint32 & full_sz) const { + full_count = 1; + full_sz = sz; + if (m_next_blob != 0) + add_part_size(full_count, full_sz); } + void add_part_size(Uint32 & full_count, Uint32 & full_sz) const; }; class EventBufData_list @@ -82,19 +100,22 @@ public: EventBufData_list(); ~EventBufData_list(); - void remove_first(); - // append data and insert data into Gci_op list with add_gci_op - void append_data(EventBufData *data); + // remove first and return its size + void remove_first(Uint32 & full_count, Uint32 & full_sz); + // for remove+append avoid double call to get_full_size() + void append_used_data(EventBufData *data, Uint32 full_count, Uint32 full_sz); // append data and insert data but ignore Gci_op list void append_used_data(EventBufData *data); + // append data and insert data into Gci_op list with add_gci_op + void append_data(EventBufData *data); // append list to another, will call move_gci_ops void append_list(EventBufData_list *list, Uint64 gci); int is_empty(); EventBufData *m_head, *m_tail; - unsigned m_count; - unsigned m_sz; + Uint32 m_count; + Uint32 m_sz; /* distinct ops per gci (assume no hash needed) @@ -193,33 +214,47 @@ int EventBufData_list::is_empty() } inline -void EventBufData_list::remove_first() +void EventBufData_list::remove_first(Uint32 & full_count, Uint32 & full_sz) { - m_count--; - m_sz-= m_head->sz; - m_head= m_head->m_next; + m_head->get_full_size(full_count, full_sz); +#ifdef VM_TRACE + assert(m_count >= full_count); + assert(m_sz >= full_sz); +#endif + m_count -= full_count; + m_sz -= full_sz; + m_head = m_head->m_next; if (m_head == 0) - m_tail= 0; + m_tail = 0; } inline -void EventBufData_list::append_used_data(EventBufData *data) +void EventBufData_list::append_used_data(EventBufData *data, Uint32 full_count, Uint32 full_sz) { - data->m_next= 0; + data->m_next = 0; if (m_tail) - m_tail->m_next= data; + m_tail->m_next = data; else { #ifdef VM_TRACE + assert(m_head == 0); assert(m_count == 0); assert(m_sz == 0); #endif - m_head= data; + m_head = data; } - m_tail= data; + m_tail = data; - m_count++; - m_sz+= data->sz; + m_count += full_count; + m_sz += full_sz; +} + +inline +void EventBufData_list::append_used_data(EventBufData *data) +{ + Uint32 full_count, full_sz; + data->get_full_size(full_count, full_sz); + append_used_data(data, full_count, full_sz); } inline @@ -442,17 +477,24 @@ public: // routines to copy/merge events EventBufData* alloc_data(); - int alloc_mem(EventBufData* data, LinearSectionPtr ptr[3]); + int alloc_mem(EventBufData* data, + LinearSectionPtr ptr[3], + Uint32 * change_sz); + void dealloc_mem(EventBufData* data, + Uint32 * change_sz); int copy_data(const SubTableData * const sdata, LinearSectionPtr ptr[3], - EventBufData* data); + EventBufData* data, + Uint32 * change_sz); int merge_data(const SubTableData * const sdata, LinearSectionPtr ptr[3], - EventBufData* data); + EventBufData* data, + Uint32 * change_sz); int get_main_data(Gci_container* bucket, EventBufData_hash::Pos& hpos, EventBufData* blob_data); - void add_blob_data(EventBufData* main_data, + void add_blob_data(Gci_container* bucket, + EventBufData* main_data, EventBufData* blob_data); void free_list(EventBufData_list &list); @@ -478,9 +520,9 @@ public: Gci_container m_complete_data; EventBufData *m_free_data; #ifdef VM_TRACE - unsigned m_free_data_count; + Uint32 m_free_data_count; #endif - unsigned m_free_data_sz; + Uint32 m_free_data_sz; // user thread EventBufData_list m_available_data; @@ -493,6 +535,12 @@ public: unsigned m_gci_slip_thresh; NdbError m_error; + +#ifdef VM_TRACE + static void verify_size(const EventBufData* data, Uint32 count, Uint32 sz); + static void verify_size(const EventBufData_list & list); +#endif + private: int expand(unsigned sz); diff --git a/storage/ndb/src/ndbapi/ndberror.c b/storage/ndb/src/ndbapi/ndberror.c index c3874cbf8eb..460c4fa443b 100644 --- a/storage/ndb/src/ndbapi/ndberror.c +++ b/storage/ndb/src/ndbapi/ndberror.c @@ -421,6 +421,9 @@ ErrorBundle ErrorCodes[] = { { 773, DMEC, SE, "Out of string memory, please modify StringMemory config parameter" }, { 775, DMEC, SE, "Create file is not supported when Diskless=1" }, + { 776, DMEC, AE, "Index created on temporary table must itself be temporary" }, + { 777, DMEC, AE, "Cannot create a temporary index on a non-temporary table" }, + { 778, DMEC, AE, "A temporary table or index must be specified as not logging" }, /** * FunctionNotImplemented @@ -611,7 +614,7 @@ ErrorBundle ErrorCodes[] = { { 4272, DMEC, AE, "Table definition has undefined column" }, { 4273, DMEC, IE, "No blob table in dict cache" }, { 4274, DMEC, IE, "Corrupted main table PK in blob operation" }, - { 4275, DMEC, AE, "The blob method is incompatible with operation type or lock mode" } + { 4275, DMEC, AE, "The blob method is incompatible with operation type or lock mode" }, }; static diff --git a/storage/ndb/test/ndbapi/test_event_merge.cpp b/storage/ndb/test/ndbapi/test_event_merge.cpp index 6936e860065..879e35e4577 100644 --- a/storage/ndb/test/ndbapi/test_event_merge.cpp +++ b/storage/ndb/test/ndbapi/test_event_merge.cpp @@ -161,6 +161,7 @@ static void errdb() { uint any = 0; + // g_ncc return no error... if (g_ndb != 0) { const NdbError& e = g_ndb->getNdbError(); if (e.code != 0) @@ -359,9 +360,9 @@ createtable(Tab& t) } static int -createtable() +createtables() { - ll1("createtable"); + ll1("createtables"); for (uint i = 0; i < maxtab(); i++) chkrc(createtable(tab(i)) == 0); return 0; @@ -381,9 +382,9 @@ droptable(Tab& t) } static int -droptable() +droptables() { - ll1("droptable"); + ll1("droptables"); for (uint i = 0; i < maxtab(); i++) chkrc(droptable(tab(i)) == 0); return 0; @@ -419,9 +420,9 @@ createevent(Tab& t) } static int -createevent() +createevents() { - ll1("createevent"); + ll1("createevents"); for (uint i = 0; i < maxtab(); i++) chkrc(createevent(tab(i)) == 0); return 0; @@ -439,11 +440,14 @@ dropevent(Tab& t, bool force = false) } static int -dropevent(bool force = false) +dropevents(bool force = false) { - ll1("dropevent"); - for (uint i = 0; i < maxtab(); i++) + ll1("dropevents"); + for (uint i = 0; i < maxtab(); i++) { + if (force && g_tablst[i] == 0) + continue; chkrc(dropevent(tab(i), force) == 0 || force); + } return 0; } @@ -1173,8 +1177,11 @@ static int dropeventops(bool force = false) { ll1("dropeventops"); - for (uint i = 0; i < maxrun(); i++) + for (uint i = 0; i < maxrun(); i++) { + if (force && g_runlst[i] == 0) + continue; chkrc(dropeventop(run(i), force) == 0 || force); + } return 0; } @@ -2139,8 +2146,8 @@ runtest() { setseed(-1); initrun(); - chkrc(createtable() == 0); - chkrc(createevent() == 0); + chkrc(createtables() == 0); + chkrc(createevents() == 0); for (g_loop = 0; g_opts.loop == 0 || g_loop < g_opts.loop; g_loop++) { ll0("=== loop " << g_loop << " ==="); setseed(g_loop); @@ -2164,8 +2171,8 @@ runtest() // time erases everything.. chkrc(waitgci(1) == 0); } - chkrc(dropevent() == 0); - chkrc(droptable() == 0); + chkrc(dropevents() == 0); + chkrc(droptables() == 0); resetmem(); deleteops(); return 0; @@ -2287,6 +2294,16 @@ checkopts() return 0; } +static int +doconnect() +{ + g_ncc = new Ndb_cluster_connection(); + chkdb(g_ncc->connect(30) == 0); + g_ndb = new Ndb(g_ncc, "TEST_DB"); + chkdb(g_ndb->init() == 0 && g_ndb->waitUntilReady(30) == 0); + return 0; +} + int main(int argc, char** argv) { @@ -2302,19 +2319,13 @@ main(int argc, char** argv) ret = handle_options(&argc, &argv, my_long_options, ndb_std_get_one_option); if (ret != 0 || argc != 0 || checkopts() != 0) return NDBT_ProgramExit(NDBT_WRONGARGS); - g_ncc = new Ndb_cluster_connection(); - if (g_ncc->connect(30) == 0) { - g_ndb = new Ndb(g_ncc, "TEST_DB"); - if (g_ndb->init() == 0 && g_ndb->waitUntilReady(30) == 0) { - if (runtest() == 0) { - delete g_ndb; - delete g_ncc; - return NDBT_ProgramExit(NDBT_OK); - } - } + if (doconnect() == 0 && runtest() == 0) { + delete g_ndb; + delete g_ncc; + return NDBT_ProgramExit(NDBT_OK); } dropeventops(true); - dropevent(true); + dropevents(true); delete g_ndb; delete g_ncc; return NDBT_ProgramExit(NDBT_FAILED); diff --git a/storage/ndb/tools/listTables.cpp b/storage/ndb/tools/listTables.cpp index a221156280d..0e32d802d2d 100644 --- a/storage/ndb/tools/listTables.cpp +++ b/storage/ndb/tools/listTables.cpp @@ -32,6 +32,7 @@ static Ndb* ndb = 0; static const NdbDictionary::Dictionary * dic = 0; static int _unqualified = 0; static int _parsable = 0; +static int show_temp_status = 0; static void fatal(char const* fmt, ...) @@ -80,9 +81,19 @@ list(const char * tabname, if (!_parsable) { if (ndb->usingFullyQualifiedNames()) - ndbout_c("%-5s %-20s %-8s %-7s %-12s %-8s %s", "id", "type", "state", "logging", "database", "schema", "name"); + { + if (show_temp_status) + ndbout_c("%-5s %-20s %-8s %-7s %-4s %-12s %-8s %s", "id", "type", "state", "logging", "temp", "database", "schema", "name"); + else + ndbout_c("%-5s %-20s %-8s %-7s %-12s %-8s %s", "id", "type", "state", "logging", "database", "schema", "name"); + } else - ndbout_c("%-5s %-20s %-8s %-7s %s", "id", "type", "state", "logging", "name"); + { + if (show_temp_status) + ndbout_c("%-5s %-20s %-8s %-7s %-4s %s", "id", "type", "state", "logging", "temp", "name"); + else + ndbout_c("%-5s %-20s %-8s %-7s %s", "id", "type", "state", "logging", "name"); + } } for (unsigned i = 0; i < list.count; i++) { NdbDictionary::Dictionary::List::Element& elt = list.elements[i]; @@ -162,30 +173,69 @@ list(const char * tabname, strcpy(store, "-"); else { switch (elt.store) { - case NdbDictionary::Object::StoreTemporary: + case NdbDictionary::Object::StoreNotLogged: strcpy(store, "No"); break; case NdbDictionary::Object::StorePermanent: strcpy(store, "Yes"); break; default: - sprintf(state, "%d", (int)elt.store); + sprintf(store, "%d", (int)elt.store); break; } } + char temp[100]; + if (show_temp_status) + { + if (! isTable) + strcpy(temp, "-"); + else { + switch (elt.temp) { + case NDB_TEMP_TAB_PERMANENT: + strcpy(temp, "No"); + break; + case NDB_TEMP_TAB_TEMPORARY: + strcpy(temp, "Yes"); + break; + default: + sprintf(temp, "%d", (int)elt.temp); + break; + } + } + } if (ndb->usingFullyQualifiedNames()) { if (_parsable) - ndbout_c("%d\t'%s'\t'%s'\t'%s'\t'%s'\t'%s'\t'%s'", elt.id, type, state, store, (elt.database)?elt.database:"", (elt.schema)?elt.schema:"", elt.name); + { + if (show_temp_status) + ndbout_c("%d\t'%s'\t'%s'\t'%s'\t'%s'\t'%s'\t'%s'\t'%s'", elt.id, type, state, store, temp, (elt.database)?elt.database:"", (elt.schema)?elt.schema:"", elt.name); + else + ndbout_c("%d\t'%s'\t'%s'\t'%s'\t'%s'\t'%s'\t'%s'", elt.id, type, state, store, (elt.database)?elt.database:"", (elt.schema)?elt.schema:"", elt.name); + } else - ndbout_c("%-5d %-20s %-8s %-7s %-12s %-8s %s", elt.id, type, state, store, (elt.database)?elt.database:"", (elt.schema)?elt.schema:"", elt.name); + { + if (show_temp_status) + ndbout_c("%-5d %-20s %-8s %-7s %-4s %-12s %-8s %s", elt.id, type, state, store, temp, (elt.database)?elt.database:"", (elt.schema)?elt.schema:"", elt.name); + else + ndbout_c("%-5d %-20s %-8s %-7s %-12s %-8s %s", elt.id, type, state, store, (elt.database)?elt.database:"", (elt.schema)?elt.schema:"", elt.name); + } } else { if (_parsable) - ndbout_c("%d\t'%s'\t'%s'\t'%s'\t'%s'", elt.id, type, state, store, elt.name); + { + if (show_temp_status) + ndbout_c("%d\t'%s'\t'%s'\t'%s'\t'%s'\t'%s'", elt.id, type, state, store, temp, elt.name); + else + ndbout_c("%d\t'%s'\t'%s'\t'%s'\t'%s'", elt.id, type, state, store, elt.name); + } else - ndbout_c("%-5d %-20s %-8s %-7s %s", elt.id, type, state, store, elt.name); + { + if (show_temp_status) + ndbout_c("%-5d %-20s %-8s %-7s %-4s %s", elt.id, type, state, store, temp, elt.name); + else + ndbout_c("%-5d %-20s %-8s %-7s %s", elt.id, type, state, store, elt.name); + } } } if (_parsable) @@ -197,6 +247,10 @@ NDB_STD_OPTS_VARS; static const char* _dbname = "TEST_DB"; static int _loops; static int _type; +enum options_ndb_show_tables +{ + OPT_SHOW_TMP_STATUS=256 +}; static struct my_option my_long_options[] = { NDB_STD_OPTS("ndb_show_tables"), @@ -215,6 +269,9 @@ static struct my_option my_long_options[] = { "parsable", 'p', "Return output suitable for mysql LOAD DATA INFILE", (gptr*) &_parsable, (gptr*) &_parsable, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, + { "show-temp-status", OPT_SHOW_TMP_STATUS, "Show table temporary flag", + (gptr*) &show_temp_status, (gptr*) &show_temp_status, 0, + GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; static void usage() |