summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mysql-test/suite/ndb/r/ndb_auto_increment.result445
-rw-r--r--mysql-test/suite/ndb/r/ndb_charset.result8
-rw-r--r--mysql-test/suite/ndb/r/ndb_index_unique.result14
-rw-r--r--mysql-test/suite/ndb/r/ndb_insert.result169
-rw-r--r--mysql-test/suite/ndb/r/ndb_update.result6
-rw-r--r--mysql-test/suite/ndb/t/ndb_auto_increment.test293
-rw-r--r--mysql-test/suite/ndb/t/ndb_insert.test138
-rw-r--r--mysql-test/suite/rpl_ndb/r/rpl_row_basic_7ndb.result4
-rw-r--r--sql/ha_ndbcluster.cc109
-rw-r--r--sql/ha_ndbcluster.h3
-rw-r--r--sql/mysqld.cc9
-rw-r--r--storage/ndb/include/kernel/signaldata/TcKeyRef.hpp3
-rw-r--r--storage/ndb/include/kernel/signaldata/TcRollbackRep.hpp3
-rw-r--r--storage/ndb/include/ndbapi/NdbDictionary.hpp2
-rw-r--r--storage/ndb/src/kernel/blocks/backup/Backup.cpp5
-rw-r--r--storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp57
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp5
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp57
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp5
-rw-r--r--storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp1
-rw-r--r--storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp25
-rw-r--r--storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp3
-rw-r--r--storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp26
-rw-r--r--storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp362
-rw-r--r--storage/ndb/src/kernel/blocks/suma/Suma.cpp6
-rw-r--r--storage/ndb/src/mgmsrv/MgmtSrvr.cpp82
-rw-r--r--storage/ndb/src/ndbapi/NdbOperationExec.cpp7
-rw-r--r--storage/ndb/src/ndbapi/NdbTransaction.cpp12
-rw-r--r--storage/ndb/src/ndbapi/ndberror.c4
-rw-r--r--storage/ndb/test/ndbapi/testNodeRestart.cpp2
30 files changed, 1243 insertions, 622 deletions
diff --git a/mysql-test/suite/ndb/r/ndb_auto_increment.result b/mysql-test/suite/ndb/r/ndb_auto_increment.result
new file mode 100644
index 00000000000..5740ed38242
--- /dev/null
+++ b/mysql-test/suite/ndb/r/ndb_auto_increment.result
@@ -0,0 +1,445 @@
+DROP TABLE IF EXISTS t1,t2;
+DROP TABLE IF EXISTS t1;
+set @old_auto_increment_offset = @@session.auto_increment_offset;
+set @old_auto_increment_increment = @@session.auto_increment_increment;
+set @old_ndb_autoincrement_prefetch_sz = @@session.ndb_autoincrement_prefetch_sz;
+flush status;
+create table t1 (a int not null auto_increment primary key) engine ndb;
+insert into t1 values (NULL);
+select * from t1 order by a;
+a
+1
+update t1 set a = 5 where a = 1;
+insert into t1 values (NULL);
+select * from t1 order by a;
+a
+5
+6
+insert into t1 values (7);
+insert into t1 values (NULL);
+select * from t1 order by a;
+a
+5
+6
+7
+8
+insert into t1 values (2);
+insert into t1 values (NULL);
+select * from t1 order by a;
+a
+2
+5
+6
+7
+8
+9
+update t1 set a = 4 where a = 2;
+insert into t1 values (NULL);
+select * from t1 order by a;
+a
+4
+5
+6
+7
+8
+9
+10
+delete from t1 where a = 10;
+insert into t1 values (NULL);
+select * from t1 order by a;
+a
+4
+5
+6
+7
+8
+9
+11
+replace t1 values (NULL);
+select * from t1 order by a;
+a
+4
+5
+6
+7
+8
+9
+11
+12
+replace t1 values (15);
+select * from t1 order by a;
+a
+4
+5
+6
+7
+8
+9
+11
+12
+15
+replace into t1 values (NULL);
+select * from t1 order by a;
+a
+4
+5
+6
+7
+8
+9
+11
+12
+15
+16
+replace t1 values (15);
+select * from t1 order by a;
+a
+4
+5
+6
+7
+8
+9
+11
+12
+15
+16
+insert ignore into t1 values (NULL);
+select * from t1 order by a;
+a
+4
+5
+6
+7
+8
+9
+11
+12
+15
+16
+17
+insert ignore into t1 values (15), (NULL);
+select * from t1 order by a;
+a
+4
+5
+6
+7
+8
+9
+11
+12
+15
+16
+17
+18
+insert into t1 values (15)
+on duplicate key update a = 20;
+insert into t1 values (NULL);
+select * from t1 order by a;
+a
+4
+5
+6
+7
+8
+9
+11
+12
+16
+17
+18
+20
+21
+insert into t1 values (NULL) on duplicate key update a = 30;
+select * from t1 order by a;
+a
+4
+5
+6
+7
+8
+9
+11
+12
+16
+17
+18
+20
+21
+22
+insert into t1 values (30) on duplicate key update a = 40;
+select * from t1 order by a;
+a
+4
+5
+6
+7
+8
+9
+11
+12
+16
+17
+18
+20
+21
+22
+30
+insert ignore into t1 values(600),(NULL),(NULL),(610),(NULL);
+select * from t1 order by a;
+a
+4
+5
+6
+7
+8
+9
+11
+12
+16
+17
+18
+20
+21
+22
+30
+600
+601
+602
+610
+611
+drop table t1;
+create table t1 (a int not null primary key,
+b int not null unique auto_increment) engine ndb;
+insert into t1 values (1, NULL);
+insert into t1 values (3, NULL);
+update t1 set b = 3 where a = 3;
+insert into t1 values (4, NULL);
+select * from t1 order by a;
+a b
+1 1
+3 3
+4 4
+drop table t1;
+CREATE TABLE t1 (
+pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
+b INT NOT NULL,
+c INT NOT NULL UNIQUE
+) ENGINE=NDBCLUSTER;
+CREATE TABLE t2 (
+pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
+b INT NOT NULL,
+c INT NOT NULL UNIQUE
+) ENGINE=MYISAM;
+SET @@session.auto_increment_increment=10;
+INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
+INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
+SELECT * FROM t1 ORDER BY pk;
+pk b c
+1 1 0
+11 2 1
+21 3 2
+SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
+COUNT(t1.pk)
+3
+TRUNCATE t1;
+TRUNCATE t2;
+SET @@session.auto_increment_offset=5;
+INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
+INSERT INTO t1 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6);
+INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
+INSERT INTO t2 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6);
+SELECT * FROM t1 ORDER BY pk;
+pk b c
+5 1 0
+15 2 1
+25 3 2
+27 4 3
+35 5 4
+99 6 5
+105 7 6
+SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
+COUNT(t1.pk)
+7
+TRUNCATE t1;
+TRUNCATE t2;
+SET @@session.auto_increment_increment=2;
+INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
+INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
+SELECT * FROM t1 ORDER BY pk;
+pk b c
+1 1 0
+3 2 1
+5 3 2
+SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
+COUNT(t1.pk)
+3
+DROP TABLE t1, t2;
+CREATE TABLE t1 (
+pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
+b INT NOT NULL,
+c INT NOT NULL UNIQUE
+) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7;
+CREATE TABLE t2 (
+pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
+b INT NOT NULL,
+c INT NOT NULL UNIQUE
+) ENGINE=MYISAM AUTO_INCREMENT = 7;
+SET @@session.auto_increment_offset=1;
+SET @@session.auto_increment_increment=1;
+INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
+INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
+SELECT * FROM t1 ORDER BY pk;
+pk b c
+7 1 0
+8 2 1
+9 3 2
+SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
+COUNT(t1.pk)
+3
+DROP TABLE t1, t2;
+CREATE TABLE t1 (
+pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
+b INT NOT NULL,
+c INT NOT NULL UNIQUE
+) ENGINE=NDBCLUSTER AUTO_INCREMENT = 3;
+CREATE TABLE t2 (
+pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
+b INT NOT NULL,
+c INT NOT NULL UNIQUE
+) ENGINE=MYISAM AUTO_INCREMENT = 3;
+SET @@session.auto_increment_offset=5;
+SET @@session.auto_increment_increment=10;
+INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
+INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
+SELECT * FROM t1 ORDER BY pk;
+pk b c
+5 1 0
+15 2 1
+25 3 2
+SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
+COUNT(t1.pk)
+3
+DROP TABLE t1, t2;
+CREATE TABLE t1 (
+pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
+b INT NOT NULL,
+c INT NOT NULL UNIQUE
+) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7;
+CREATE TABLE t2 (
+pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
+b INT NOT NULL,
+c INT NOT NULL UNIQUE
+) ENGINE=MYISAM AUTO_INCREMENT = 7;
+SET @@session.auto_increment_offset=5;
+SET @@session.auto_increment_increment=10;
+INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
+INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
+SELECT * FROM t1 ORDER BY pk;
+pk b c
+15 1 0
+25 2 1
+35 3 2
+SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
+COUNT(t1.pk)
+3
+DROP TABLE t1, t2;
+CREATE TABLE t1 (
+pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
+b INT NOT NULL,
+c INT NOT NULL UNIQUE
+) ENGINE=NDBCLUSTER AUTO_INCREMENT = 5;
+CREATE TABLE t2 (
+pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
+b INT NOT NULL,
+c INT NOT NULL UNIQUE
+) ENGINE=MYISAM AUTO_INCREMENT = 5;
+SET @@session.auto_increment_offset=5;
+SET @@session.auto_increment_increment=10;
+INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
+INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
+SELECT * FROM t1 ORDER BY pk;
+pk b c
+5 1 0
+15 2 1
+25 3 2
+SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
+COUNT(t1.pk)
+3
+DROP TABLE t1, t2;
+CREATE TABLE t1 (
+pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
+b INT NOT NULL,
+c INT NOT NULL UNIQUE
+) ENGINE=NDBCLUSTER AUTO_INCREMENT = 100;
+CREATE TABLE t2 (
+pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
+b INT NOT NULL,
+c INT NOT NULL UNIQUE
+) ENGINE=MYISAM AUTO_INCREMENT = 100;
+SET @@session.auto_increment_offset=5;
+SET @@session.auto_increment_increment=10;
+INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
+INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
+SELECT * FROM t1 ORDER BY pk;
+pk b c
+105 1 0
+115 2 1
+125 3 2
+SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
+COUNT(t1.pk)
+3
+DROP TABLE t1, t2;
+SET @@session.auto_increment_offset=1;
+SET @@session.auto_increment_increment=1;
+set ndb_autoincrement_prefetch_sz = 32;
+drop table if exists t1;
+SET @@session.auto_increment_offset=1;
+SET @@session.auto_increment_increment=1;
+set ndb_autoincrement_prefetch_sz = 32;
+create table t1 (a int not null auto_increment primary key) engine ndb;
+insert into t1 values (NULL);
+insert into t1 values (NULL);
+select * from t1 order by a;
+a
+1
+33
+insert into t1 values (20);
+insert into t1 values (NULL);
+select * from t1 order by a;
+a
+1
+20
+33
+34
+insert into t1 values (35);
+insert into t1 values (NULL);
+insert into t1 values (NULL);
+ERROR 23000: Duplicate entry '35' for key 'PRIMARY'
+select * from t1 order by a;
+a
+1
+20
+21
+33
+34
+35
+insert into t1 values (100);
+insert into t1 values (NULL);
+insert into t1 values (NULL);
+select * from t1 order by a;
+a
+1
+20
+21
+22
+33
+34
+35
+100
+101
+set auto_increment_offset = @old_auto_increment_offset;
+set auto_increment_increment = @old_auto_increment_increment;
+set ndb_autoincrement_prefetch_sz = @old_ndb_autoincrement_prefetch_sz;
+drop table t1;
diff --git a/mysql-test/suite/ndb/r/ndb_charset.result b/mysql-test/suite/ndb/r/ndb_charset.result
index 1c65a380039..e84c906dd7e 100644
--- a/mysql-test/suite/ndb/r/ndb_charset.result
+++ b/mysql-test/suite/ndb/r/ndb_charset.result
@@ -112,9 +112,9 @@ unique key(a)
) engine=ndb;
insert into t1 values(1, 'aAa');
insert into t1 values(2, 'aaa');
-ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
+ERROR 23000: Duplicate entry 'aaa' for key 'a'
insert into t1 values(3, 'AAA');
-ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
+ERROR 23000: Duplicate entry 'AAA' for key 'a'
select * from t1 order by p;
p a
1 aAa
@@ -138,9 +138,9 @@ unique key(a)
) engine=ndb;
insert into t1 values (1,'A'),(2,'b '),(3,'C '),(4,'d '),(5,'E'),(6,'f');
insert into t1 values(99,'b');
-ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
+ERROR 23000: Duplicate entry 'b' for key 'a'
insert into t1 values(99,'a ');
-ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
+ERROR 23000: Duplicate entry 'a ' for key 'a'
select a,length(a) from t1 order by a;
a length(a)
A 1
diff --git a/mysql-test/suite/ndb/r/ndb_index_unique.result b/mysql-test/suite/ndb/r/ndb_index_unique.result
index cc63ce69760..bfc0c5a2e56 100644
--- a/mysql-test/suite/ndb/r/ndb_index_unique.result
+++ b/mysql-test/suite/ndb/r/ndb_index_unique.result
@@ -22,7 +22,7 @@ select * from t1 where b = 4 order by a;
a b c
3 4 6
insert into t1 values(8, 2, 3);
-ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
+ERROR 23000: Duplicate entry '2' for key 'ib'
select * from t1 order by a;
a b c
1 2 3
@@ -93,7 +93,7 @@ a b c
1 1 1
4 4 NULL
insert into t1 values(5,1,1);
-ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
+ERROR 23000: Duplicate entry '1-1' for key 'bc'
drop table t1;
CREATE TABLE t2 (
a int unsigned NOT NULL PRIMARY KEY,
@@ -116,7 +116,7 @@ select * from t2 where b = 4 order by a;
a b c
3 4 6
insert into t2 values(8, 2, 3);
-ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
+ERROR 23000: Duplicate entry '2-3' for key 'b'
select * from t2 order by a;
a b c
1 2 3
@@ -139,7 +139,7 @@ a b c
8 2 3
create unique index bi using hash on t2(b);
insert into t2 values(9, 3, 1);
-ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
+ERROR 23000: Duplicate entry '3' for key 'bi'
alter table t2 drop index bi;
insert into t2 values(9, 3, 1);
select * from t2 order by a;
@@ -229,7 +229,7 @@ pk a
3 NULL
4 4
insert into t1 values (5,0);
-ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
+ERROR 23000: Duplicate entry '0' for key 'a'
select * from t1 order by pk;
pk a
-1 NULL
@@ -262,7 +262,7 @@ pk a b c
0 NULL 18 NULL
1 3 19 abc
insert into t2 values(2,3,19,'abc');
-ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
+ERROR 23000: Duplicate entry '3-abc' for key 'si'
select * from t2 order by pk;
pk a b c
-1 1 17 NULL
@@ -682,7 +682,7 @@ create table t1 (a int primary key, b varchar(1000) not null, unique key (b))
engine=ndb charset=utf8;
insert into t1 values (1, repeat(_utf8 0xe288ab6474, 200));
insert into t1 values (2, repeat(_utf8 0xe288ab6474, 200));
-ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
+ERROR 23000: Duplicate entry '∫dt∫dt∫dt∫dt∫dt∫dt∫dt∫dt∫dt∫dt∫dt∫dt∫d' for key 'b'
select a, sha1(b) from t1;
a sha1(b)
1 08f5d02c8b8bc244f275bdfc22c42c5cab0d9d7d
diff --git a/mysql-test/suite/ndb/r/ndb_insert.result b/mysql-test/suite/ndb/r/ndb_insert.result
index 51b346bbf7b..7551dc71823 100644
--- a/mysql-test/suite/ndb/r/ndb_insert.result
+++ b/mysql-test/suite/ndb/r/ndb_insert.result
@@ -657,172 +657,3 @@ a b
2 NULL
3 NULL
drop table t1;
-CREATE TABLE t1 (
-pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
-b INT NOT NULL,
-c INT NOT NULL UNIQUE
-) ENGINE=NDBCLUSTER;
-CREATE TABLE t2 (
-pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
-b INT NOT NULL,
-c INT NOT NULL UNIQUE
-) ENGINE=MYISAM;
-SET @@session.auto_increment_increment=10;
-INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
-INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
-SELECT * FROM t1 ORDER BY pk;
-pk b c
-1 1 0
-11 2 1
-21 3 2
-SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
-COUNT(t1.pk)
-3
-TRUNCATE t1;
-TRUNCATE t2;
-SET @@session.auto_increment_offset=5;
-INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
-INSERT INTO t1 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6);
-INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
-INSERT INTO t2 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6);
-SELECT * FROM t1 ORDER BY pk;
-pk b c
-5 1 0
-15 2 1
-25 3 2
-27 4 3
-35 5 4
-99 6 5
-105 7 6
-SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
-COUNT(t1.pk)
-7
-TRUNCATE t1;
-TRUNCATE t2;
-SET @@session.auto_increment_increment=2;
-INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
-INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
-SELECT * FROM t1 ORDER BY pk;
-pk b c
-1 1 0
-3 2 1
-5 3 2
-SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
-COUNT(t1.pk)
-3
-DROP TABLE t1, t2;
-CREATE TABLE t1 (
-pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
-b INT NOT NULL,
-c INT NOT NULL UNIQUE
-) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7;
-CREATE TABLE t2 (
-pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
-b INT NOT NULL,
-c INT NOT NULL UNIQUE
-) ENGINE=MYISAM AUTO_INCREMENT = 7;
-SET @@session.auto_increment_offset=1;
-SET @@session.auto_increment_increment=1;
-INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
-INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
-SELECT * FROM t1 ORDER BY pk;
-pk b c
-7 1 0
-8 2 1
-9 3 2
-SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
-COUNT(t1.pk)
-3
-DROP TABLE t1, t2;
-CREATE TABLE t1 (
-pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
-b INT NOT NULL,
-c INT NOT NULL UNIQUE
-) ENGINE=NDBCLUSTER AUTO_INCREMENT = 3;
-CREATE TABLE t2 (
-pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
-b INT NOT NULL,
-c INT NOT NULL UNIQUE
-) ENGINE=MYISAM AUTO_INCREMENT = 3;
-SET @@session.auto_increment_offset=5;
-SET @@session.auto_increment_increment=10;
-INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
-INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
-SELECT * FROM t1 ORDER BY pk;
-pk b c
-5 1 0
-15 2 1
-25 3 2
-SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
-COUNT(t1.pk)
-3
-DROP TABLE t1, t2;
-CREATE TABLE t1 (
-pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
-b INT NOT NULL,
-c INT NOT NULL UNIQUE
-) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7;
-CREATE TABLE t2 (
-pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
-b INT NOT NULL,
-c INT NOT NULL UNIQUE
-) ENGINE=MYISAM AUTO_INCREMENT = 7;
-SET @@session.auto_increment_offset=5;
-SET @@session.auto_increment_increment=10;
-INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
-INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
-SELECT * FROM t1 ORDER BY pk;
-pk b c
-15 1 0
-25 2 1
-35 3 2
-SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
-COUNT(t1.pk)
-3
-DROP TABLE t1, t2;
-CREATE TABLE t1 (
-pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
-b INT NOT NULL,
-c INT NOT NULL UNIQUE
-) ENGINE=NDBCLUSTER AUTO_INCREMENT = 5;
-CREATE TABLE t2 (
-pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
-b INT NOT NULL,
-c INT NOT NULL UNIQUE
-) ENGINE=MYISAM AUTO_INCREMENT = 5;
-SET @@session.auto_increment_offset=5;
-SET @@session.auto_increment_increment=10;
-INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
-INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
-SELECT * FROM t1 ORDER BY pk;
-pk b c
-5 1 0
-15 2 1
-25 3 2
-SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
-COUNT(t1.pk)
-3
-DROP TABLE t1, t2;
-CREATE TABLE t1 (
-pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
-b INT NOT NULL,
-c INT NOT NULL UNIQUE
-) ENGINE=NDBCLUSTER AUTO_INCREMENT = 100;
-CREATE TABLE t2 (
-pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
-b INT NOT NULL,
-c INT NOT NULL UNIQUE
-) ENGINE=MYISAM AUTO_INCREMENT = 100;
-SET @@session.auto_increment_offset=5;
-SET @@session.auto_increment_increment=10;
-INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
-INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
-SELECT * FROM t1 ORDER BY pk;
-pk b c
-105 1 0
-115 2 1
-125 3 2
-SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
-COUNT(t1.pk)
-3
-DROP TABLE t1, t2;
diff --git a/mysql-test/suite/ndb/r/ndb_update.result b/mysql-test/suite/ndb/r/ndb_update.result
index ed51daee5cb..fa083587956 100644
--- a/mysql-test/suite/ndb/r/ndb_update.result
+++ b/mysql-test/suite/ndb/r/ndb_update.result
@@ -28,7 +28,7 @@ pk1 b c
2 2 2
4 1 1
UPDATE t1 set pk1 = 1, c = 2 where pk1 = 4;
-ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
+ERROR 23000: Duplicate entry '2' for key 'c'
UPDATE IGNORE t1 set pk1 = 1, c = 2 where pk1 = 4;
select * from t1 order by pk1;
pk1 b c
@@ -62,9 +62,9 @@ INSERT INTO t3 VALUES (2, 2);
UPDATE t1 SET a = 1;
UPDATE t1 SET a = 1 ORDER BY a;
UPDATE t2 SET a = 1;
-ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
+ERROR 23000: Duplicate entry '1-2' for key 'a'
UPDATE t2 SET a = 1 ORDER BY a;
-ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
+ERROR 23000: Duplicate entry '1-2' for key 'a'
UPDATE t3 SET a = 1;
ERROR 23000: Duplicate entry '1-2' for key 'PRIMARY'
UPDATE t3 SET a = 1 ORDER BY a;
diff --git a/mysql-test/suite/ndb/t/ndb_auto_increment.test b/mysql-test/suite/ndb/t/ndb_auto_increment.test
new file mode 100644
index 00000000000..14e7ae7ca7b
--- /dev/null
+++ b/mysql-test/suite/ndb/t/ndb_auto_increment.test
@@ -0,0 +1,293 @@
+-- source include/have_multi_ndb.inc
+-- source include/not_embedded.inc
+
+--disable_warnings
+connection server1;
+DROP TABLE IF EXISTS t1,t2;
+connection server2;
+DROP TABLE IF EXISTS t1;
+connection server1;
+--enable_warnings
+
+set @old_auto_increment_offset = @@session.auto_increment_offset;
+set @old_auto_increment_increment = @@session.auto_increment_increment;
+set @old_ndb_autoincrement_prefetch_sz = @@session.ndb_autoincrement_prefetch_sz;
+
+flush status;
+
+create table t1 (a int not null auto_increment primary key) engine ndb;
+
+# Step 1: Verify simple insert
+insert into t1 values (NULL);
+select * from t1 order by a;
+
+# Step 2: Verify simple update with higher than highest value causes
+# next insert to use updated_value + 1
+update t1 set a = 5 where a = 1;
+insert into t1 values (NULL);
+select * from t1 order by a;
+
+# Step 3: Verify insert that inserts higher than highest value causes
+# next insert to use inserted_value + 1
+insert into t1 values (7);
+insert into t1 values (NULL);
+select * from t1 order by a;
+
+# Step 4: Verify that insert into hole, lower than highest value doesn't
+# affect next insert
+insert into t1 values (2);
+insert into t1 values (NULL);
+select * from t1 order by a;
+
+# Step 5: Verify that update into hole, lower than highest value doesn't
+# affect next insert
+update t1 set a = 4 where a = 2;
+insert into t1 values (NULL);
+select * from t1 order by a;
+
+# Step 6: Verify that delete of highest value doesn't cause the next
+# insert to reuse this value
+delete from t1 where a = 10;
+insert into t1 values (NULL);
+select * from t1 order by a;
+
+# Step 7: Verify that REPLACE has the same effect as INSERT
+replace t1 values (NULL);
+select * from t1 order by a;
+replace t1 values (15);
+select * from t1 order by a;
+replace into t1 values (NULL);
+select * from t1 order by a;
+
+# Step 8: Verify that REPLACE has the same effect as UPDATE
+replace t1 values (15);
+select * from t1 order by a;
+
+# Step 9: Verify that IGNORE doesn't affect auto_increment
+insert ignore into t1 values (NULL);
+select * from t1 order by a;
+insert ignore into t1 values (15), (NULL);
+select * from t1 order by a;
+
+# Step 10: Verify that on duplicate key as UPDATE behaves as an
+# UPDATE
+insert into t1 values (15)
+on duplicate key update a = 20;
+insert into t1 values (NULL);
+select * from t1 order by a;
+
+# Step 11: Verify that on duplicate key as INSERT behaves as INSERT
+insert into t1 values (NULL) on duplicate key update a = 30;
+select * from t1 order by a;
+insert into t1 values (30) on duplicate key update a = 40;
+select * from t1 order by a;
+
+#Step 12: Vefify INSERT IGNORE (bug#32055)
+insert ignore into t1 values(600),(NULL),(NULL),(610),(NULL);
+select * from t1 order by a;
+drop table t1;
+
+#Step 13: Verify auto_increment of unique key
+create table t1 (a int not null primary key,
+ b int not null unique auto_increment) engine ndb;
+insert into t1 values (1, NULL);
+insert into t1 values (3, NULL);
+update t1 set b = 3 where a = 3;
+insert into t1 values (4, NULL);
+select * from t1 order by a;
+drop table t1;
+
+#Step 14: Verify that auto_increment_increment and auto_increment_offset
+# work as expected
+
+CREATE TABLE t1 (
+ pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
+ b INT NOT NULL,
+ c INT NOT NULL UNIQUE
+) ENGINE=NDBCLUSTER;
+
+CREATE TABLE t2 (
+ pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
+ b INT NOT NULL,
+ c INT NOT NULL UNIQUE
+) ENGINE=MYISAM;
+
+SET @@session.auto_increment_increment=10;
+INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
+INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
+SELECT * FROM t1 ORDER BY pk;
+SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
+TRUNCATE t1;
+TRUNCATE t2;
+SET @@session.auto_increment_offset=5;
+INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
+INSERT INTO t1 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6);
+INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
+INSERT INTO t2 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6);
+SELECT * FROM t1 ORDER BY pk;
+SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
+TRUNCATE t1;
+TRUNCATE t2;
+SET @@session.auto_increment_increment=2;
+INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
+INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
+SELECT * FROM t1 ORDER BY pk;
+SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
+DROP TABLE t1, t2;
+
+CREATE TABLE t1 (
+ pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
+ b INT NOT NULL,
+ c INT NOT NULL UNIQUE
+) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7;
+
+CREATE TABLE t2 (
+ pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
+ b INT NOT NULL,
+ c INT NOT NULL UNIQUE
+) ENGINE=MYISAM AUTO_INCREMENT = 7;
+
+SET @@session.auto_increment_offset=1;
+SET @@session.auto_increment_increment=1;
+INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
+INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
+SELECT * FROM t1 ORDER BY pk;
+SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
+DROP TABLE t1, t2;
+
+CREATE TABLE t1 (
+ pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
+ b INT NOT NULL,
+ c INT NOT NULL UNIQUE
+) ENGINE=NDBCLUSTER AUTO_INCREMENT = 3;
+
+CREATE TABLE t2 (
+ pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
+ b INT NOT NULL,
+ c INT NOT NULL UNIQUE
+) ENGINE=MYISAM AUTO_INCREMENT = 3;
+
+SET @@session.auto_increment_offset=5;
+SET @@session.auto_increment_increment=10;
+INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
+INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
+SELECT * FROM t1 ORDER BY pk;
+SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
+DROP TABLE t1, t2;
+
+CREATE TABLE t1 (
+ pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
+ b INT NOT NULL,
+ c INT NOT NULL UNIQUE
+) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7;
+
+CREATE TABLE t2 (
+ pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
+ b INT NOT NULL,
+ c INT NOT NULL UNIQUE
+) ENGINE=MYISAM AUTO_INCREMENT = 7;
+
+SET @@session.auto_increment_offset=5;
+SET @@session.auto_increment_increment=10;
+INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
+INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
+SELECT * FROM t1 ORDER BY pk;
+SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
+DROP TABLE t1, t2;
+
+CREATE TABLE t1 (
+ pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
+ b INT NOT NULL,
+ c INT NOT NULL UNIQUE
+) ENGINE=NDBCLUSTER AUTO_INCREMENT = 5;
+
+CREATE TABLE t2 (
+ pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
+ b INT NOT NULL,
+ c INT NOT NULL UNIQUE
+) ENGINE=MYISAM AUTO_INCREMENT = 5;
+
+SET @@session.auto_increment_offset=5;
+SET @@session.auto_increment_increment=10;
+INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
+INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
+SELECT * FROM t1 ORDER BY pk;
+SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
+DROP TABLE t1, t2;
+
+CREATE TABLE t1 (
+ pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
+ b INT NOT NULL,
+ c INT NOT NULL UNIQUE
+) ENGINE=NDBCLUSTER AUTO_INCREMENT = 100;
+
+CREATE TABLE t2 (
+ pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
+ b INT NOT NULL,
+ c INT NOT NULL UNIQUE
+) ENGINE=MYISAM AUTO_INCREMENT = 100;
+
+SET @@session.auto_increment_offset=5;
+SET @@session.auto_increment_increment=10;
+INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
+INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
+SELECT * FROM t1 ORDER BY pk;
+SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
+DROP TABLE t1, t2;
+
+#Step 15: Now verify that behaviour on multiple MySQL Servers behave
+# properly. Start by dropping table and recreating it to start
+# counters and id caches from zero again.
+--disable_warnings
+connection server2;
+SET @@session.auto_increment_offset=1;
+SET @@session.auto_increment_increment=1;
+set ndb_autoincrement_prefetch_sz = 32;
+drop table if exists t1;
+connection server1;
+SET @@session.auto_increment_offset=1;
+SET @@session.auto_increment_increment=1;
+set ndb_autoincrement_prefetch_sz = 32;
+--enable_warnings
+
+
+create table t1 (a int not null auto_increment primary key) engine ndb;
+# Basic test, ensure that the second server gets a new range.
+#Generate record with key = 1
+insert into t1 values (NULL);
+connection server2;
+#Generate record with key = 33
+insert into t1 values (NULL);
+connection server1;
+select * from t1 order by a;
+
+#This insert should not affect the range of the second server
+insert into t1 values (20);
+connection server2;
+insert into t1 values (NULL);
+select * from t1 order by a;
+
+connection server1;
+#This insert should remove cached values but also skip values already
+#taken by server2, given that there is no method of communicating with
+#the other server it should also cause a conflict
+connection server1;
+
+insert into t1 values (35);
+insert into t1 values (NULL);
+connection server2;
+--error ER_DUP_ENTRY
+insert into t1 values (NULL);
+select * from t1 order by a;
+
+insert into t1 values (100);
+insert into t1 values (NULL);
+connection server1;
+insert into t1 values (NULL);
+select * from t1 order by a;
+
+set auto_increment_offset = @old_auto_increment_offset;
+set auto_increment_increment = @old_auto_increment_increment;
+set ndb_autoincrement_prefetch_sz = @old_ndb_autoincrement_prefetch_sz;
+
+drop table t1;
diff --git a/mysql-test/suite/ndb/t/ndb_insert.test b/mysql-test/suite/ndb/t/ndb_insert.test
index 5b74cc9202c..d659f8357f2 100644
--- a/mysql-test/suite/ndb/t/ndb_insert.test
+++ b/mysql-test/suite/ndb/t/ndb_insert.test
@@ -638,142 +638,4 @@ create table t1(a int primary key, b int, unique key(b)) engine=ndb;
insert ignore into t1 values (1,0), (2,0), (2,null), (3,null);
select * from t1 order by a;
drop table t1;
-
-# Bug#26342 auto_increment_increment AND auto_increment_offset REALLY REALLY anger NDB cluster
-
-CREATE TABLE t1 (
- pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
- b INT NOT NULL,
- c INT NOT NULL UNIQUE
-) ENGINE=NDBCLUSTER;
-
-CREATE TABLE t2 (
- pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
- b INT NOT NULL,
- c INT NOT NULL UNIQUE
-) ENGINE=MYISAM;
-
-SET @@session.auto_increment_increment=10;
-INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
-INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
-SELECT * FROM t1 ORDER BY pk;
-SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
-TRUNCATE t1;
-TRUNCATE t2;
-SET @@session.auto_increment_offset=5;
-INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
-INSERT INTO t1 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6);
-INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
-INSERT INTO t2 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6);
-SELECT * FROM t1 ORDER BY pk;
-SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
-TRUNCATE t1;
-TRUNCATE t2;
-SET @@session.auto_increment_increment=2;
-INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
-INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
-SELECT * FROM t1 ORDER BY pk;
-SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
-DROP TABLE t1, t2;
-
-CREATE TABLE t1 (
- pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
- b INT NOT NULL,
- c INT NOT NULL UNIQUE
-) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7;
-
-CREATE TABLE t2 (
- pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
- b INT NOT NULL,
- c INT NOT NULL UNIQUE
-) ENGINE=MYISAM AUTO_INCREMENT = 7;
-
-SET @@session.auto_increment_offset=1;
-SET @@session.auto_increment_increment=1;
-INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
-INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
-SELECT * FROM t1 ORDER BY pk;
-SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
-DROP TABLE t1, t2;
-
-CREATE TABLE t1 (
- pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
- b INT NOT NULL,
- c INT NOT NULL UNIQUE
-) ENGINE=NDBCLUSTER AUTO_INCREMENT = 3;
-
-CREATE TABLE t2 (
- pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
- b INT NOT NULL,
- c INT NOT NULL UNIQUE
-) ENGINE=MYISAM AUTO_INCREMENT = 3;
-
-SET @@session.auto_increment_offset=5;
-SET @@session.auto_increment_increment=10;
-INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
-INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
-SELECT * FROM t1 ORDER BY pk;
-SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
-DROP TABLE t1, t2;
-
-CREATE TABLE t1 (
- pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
- b INT NOT NULL,
- c INT NOT NULL UNIQUE
-) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7;
-
-CREATE TABLE t2 (
- pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
- b INT NOT NULL,
- c INT NOT NULL UNIQUE
-) ENGINE=MYISAM AUTO_INCREMENT = 7;
-
-SET @@session.auto_increment_offset=5;
-SET @@session.auto_increment_increment=10;
-INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
-INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
-SELECT * FROM t1 ORDER BY pk;
-SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
-DROP TABLE t1, t2;
-
-CREATE TABLE t1 (
- pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
- b INT NOT NULL,
- c INT NOT NULL UNIQUE
-) ENGINE=NDBCLUSTER AUTO_INCREMENT = 5;
-
-CREATE TABLE t2 (
- pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
- b INT NOT NULL,
- c INT NOT NULL UNIQUE
-) ENGINE=MYISAM AUTO_INCREMENT = 5;
-
-SET @@session.auto_increment_offset=5;
-SET @@session.auto_increment_increment=10;
-INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
-INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
-SELECT * FROM t1 ORDER BY pk;
-SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
-DROP TABLE t1, t2;
-
-CREATE TABLE t1 (
- pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
- b INT NOT NULL,
- c INT NOT NULL UNIQUE
-) ENGINE=NDBCLUSTER AUTO_INCREMENT = 100;
-
-CREATE TABLE t2 (
- pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
- b INT NOT NULL,
- c INT NOT NULL UNIQUE
-) ENGINE=MYISAM AUTO_INCREMENT = 100;
-
-SET @@session.auto_increment_offset=5;
-SET @@session.auto_increment_increment=10;
-INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
-INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
-SELECT * FROM t1 ORDER BY pk;
-SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
-DROP TABLE t1, t2;
-
# End of 4.1 tests
diff --git a/mysql-test/suite/rpl_ndb/r/rpl_row_basic_7ndb.result b/mysql-test/suite/rpl_ndb/r/rpl_row_basic_7ndb.result
index abd5bad8e49..f176d4eb52a 100644
--- a/mysql-test/suite/rpl_ndb/r/rpl_row_basic_7ndb.result
+++ b/mysql-test/suite/rpl_ndb/r/rpl_row_basic_7ndb.result
@@ -389,9 +389,9 @@ INSERT INTO t8 VALUES (99,99,99);
INSERT INTO t8 VALUES (99,22,33);
ERROR 23000: Duplicate entry '99' for key 'PRIMARY'
INSERT INTO t8 VALUES (11,99,33);
-ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
+ERROR 23000: Duplicate entry '99' for key 'b'
INSERT INTO t8 VALUES (11,22,99);
-ERROR 23000: Duplicate entry '' for key '*UNKNOWN*'
+ERROR 23000: Duplicate entry '99' for key 'c'
SELECT * FROM t8 ORDER BY a;
a b c
99 99 99
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index adc78f26938..b9d7e846d84 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -588,6 +588,24 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans)
err.code, res));
if (res == HA_ERR_FOUND_DUPP_KEY)
{
+ char *error_data= err.details;
+ uint dupkey= MAX_KEY;
+
+ for (uint i= 0; i < MAX_KEY; i++)
+ {
+ if (m_index[i].type == UNIQUE_INDEX ||
+ m_index[i].type == UNIQUE_ORDERED_INDEX)
+ {
+ const NDBINDEX *unique_index=
+ (const NDBINDEX *) m_index[i].unique_index;
+ if (unique_index &&
+ (char *) unique_index->getObjectId() == error_data)
+ {
+ dupkey= i;
+ break;
+ }
+ }
+ }
if (m_rows_to_insert == 1)
{
/*
@@ -595,7 +613,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans)
violations here, so we need to return MAX_KEY for non-primary
to signal that key is unknown
*/
- m_dupkey= err.code == 630 ? table_share->primary_key : MAX_KEY;
+ m_dupkey= err.code == 630 ? table_share->primary_key : dupkey;
}
else
{
@@ -2704,6 +2722,29 @@ int ha_ndbcluster::full_table_scan(uchar *buf)
DBUG_RETURN(next_result(buf));
}
+int
+ha_ndbcluster::set_auto_inc(Field *field)
+{
+ DBUG_ENTER("ha_ndbcluster::set_auto_inc");
+ Ndb *ndb= get_ndb();
+ bool read_bit= bitmap_is_set(table->read_set, field->field_index);
+ bitmap_set_bit(table->read_set, field->field_index);
+ Uint64 next_val= (Uint64) field->val_int() + 1;
+ if (!read_bit)
+ bitmap_clear_bit(table->read_set, field->field_index);
+#ifndef DBUG_OFF
+ char buff[22];
+ DBUG_PRINT("info",
+ ("Trying to set next auto increment value to %s",
+ llstr(next_val, buff)));
+#endif
+ Ndb_tuple_id_range_guard g(m_share);
+ if (ndb->setAutoIncrementValue(m_table, g.range, next_val, TRUE)
+ == -1)
+ ERR_RETURN(ndb->getNdbError());
+ DBUG_RETURN(0);
+}
+
/*
Insert one record into NDB
*/
@@ -2910,18 +2951,11 @@ int ha_ndbcluster::write_row(uchar *record)
}
if ((has_auto_increment) && (m_skip_auto_increment))
{
- Ndb *ndb= get_ndb();
- Uint64 next_val= (Uint64) table->next_number_field->val_int() + 1;
-#ifndef DBUG_OFF
- char buff[22];
- DBUG_PRINT("info",
- ("Trying to set next auto increment value to %s",
- llstr(next_val, buff)));
-#endif
- Ndb_tuple_id_range_guard g(m_share);
- if (ndb->setAutoIncrementValue(m_table, g.range, next_val, TRUE)
- == -1)
- ERR_RETURN(ndb->getNdbError());
+ int ret_val;
+ if ((ret_val= set_auto_inc(table->next_number_field)))
+ {
+ DBUG_RETURN(ret_val);
+ }
}
m_skip_auto_increment= TRUE;
@@ -3046,6 +3080,17 @@ int ha_ndbcluster::update_row(const uchar *old_data, uchar *new_data)
// Insert new row
DBUG_PRINT("info", ("delete succeded"));
m_primary_key_update= TRUE;
+ /*
+ If we are updating a primary key with auto_increment
+ then we need to update the auto_increment counter
+ */
+ if (table->found_next_number_field &&
+ bitmap_is_set(table->write_set,
+ table->found_next_number_field->field_index) &&
+ (error= set_auto_inc(table->found_next_number_field)))
+ {
+ DBUG_RETURN(error);
+ }
insert_res= write_row(new_data);
m_primary_key_update= FALSE;
if (insert_res)
@@ -3068,7 +3113,17 @@ int ha_ndbcluster::update_row(const uchar *old_data, uchar *new_data)
DBUG_PRINT("info", ("delete+insert succeeded"));
DBUG_RETURN(0);
}
-
+ /*
+ If we are updating a unique key with auto_increment
+ then we need to update the auto_increment counter
+ */
+ if (table->found_next_number_field &&
+ bitmap_is_set(table->write_set,
+ table->found_next_number_field->field_index) &&
+ (error= set_auto_inc(table->found_next_number_field)))
+ {
+ DBUG_RETURN(error);
+ }
if (cursor)
{
/*
@@ -4478,9 +4533,11 @@ int ha_ndbcluster::init_handler_for_statement(THD *thd, Thd_ndb *thd_ndb)
// store thread specific data first to set the right context
m_force_send= thd->variables.ndb_force_send;
m_ha_not_exact_count= !thd->variables.ndb_use_exact_count;
- m_autoincrement_prefetch=
- (ha_rows) thd->variables.ndb_autoincrement_prefetch_sz;
-
+ m_autoincrement_prefetch=
+ (thd->variables.ndb_autoincrement_prefetch_sz >
+ NDB_DEFAULT_AUTO_PREFETCH) ?
+ (ha_rows) thd->variables.ndb_autoincrement_prefetch_sz
+ : (ha_rows) NDB_DEFAULT_AUTO_PREFETCH;
m_active_trans= thd_ndb->trans;
DBUG_ASSERT(m_active_trans);
// Start of transaction
@@ -6163,8 +6220,9 @@ void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment,
ulonglong *first_value,
ulonglong *nb_reserved_values)
{
- int cache_size;
+ uint cache_size;
Uint64 auto_value;
+ THD *thd= current_thd;
DBUG_ENTER("get_auto_increment");
DBUG_PRINT("enter", ("m_tabname: %s", m_tabname));
Ndb *ndb= get_ndb();
@@ -6174,11 +6232,14 @@ void ha_ndbcluster::get_auto_increment(ulonglong offset, ulonglong increment,
/* We guessed too low */
m_rows_to_insert+= m_autoincrement_prefetch;
}
- cache_size=
- (int) ((m_rows_to_insert - m_rows_inserted < m_autoincrement_prefetch) ?
- m_rows_to_insert - m_rows_inserted :
- ((m_rows_to_insert > m_autoincrement_prefetch) ?
- m_rows_to_insert : m_autoincrement_prefetch));
+ uint remaining= m_rows_to_insert - m_rows_inserted;
+ uint min_prefetch=
+ (remaining < thd->variables.ndb_autoincrement_prefetch_sz) ?
+ thd->variables.ndb_autoincrement_prefetch_sz
+ : remaining;
+ cache_size= ((remaining < m_autoincrement_prefetch) ?
+ min_prefetch
+ : remaining);
uint retries= NDB_AUTO_INCREMENT_RETRIES;
int retry_sleep= 30; /* 30 milliseconds, transaction */
for (;;)
@@ -6265,7 +6326,7 @@ ha_ndbcluster::ha_ndbcluster(handlerton *hton, TABLE_SHARE *table_arg):
m_dupkey((uint) -1),
m_ha_not_exact_count(FALSE),
m_force_send(TRUE),
- m_autoincrement_prefetch((ha_rows) 32),
+ m_autoincrement_prefetch((ha_rows) NDB_DEFAULT_AUTO_PREFETCH),
m_transaction_on(TRUE),
m_cond(NULL),
m_multi_cursor(NULL)
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index fd337303853..cc79402fe92 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -31,6 +31,8 @@
#include <ndbapi_limits.h>
#define NDB_HIDDEN_PRIMARY_KEY_LENGTH 8
+#define NDB_DEFAULT_AUTO_PREFETCH 32
+
class Ndb; // Forward declaration
class NdbOperation; // Forward declaration
@@ -446,6 +448,7 @@ private:
uint errcode);
int peek_indexed_rows(const uchar *record, NDB_WRITE_OP write_op);
int fetch_next(NdbScanOperation* op);
+ int set_auto_inc(Field *field);
int next_result(uchar *buf);
int define_read_attrs(uchar* buf, NdbOperation* op);
int filtered_scan(const uchar *key, uint key_len,
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index fad2e5dcd22..7fd625e496b 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -5534,13 +5534,8 @@ master-ssl",
{"ndb-autoincrement-prefetch-sz", OPT_NDB_AUTOINCREMENT_PREFETCH_SZ,
"Specify number of autoincrement values that are prefetched.",
(uchar**) &global_system_variables.ndb_autoincrement_prefetch_sz,
- (uchar**) &global_system_variables.ndb_autoincrement_prefetch_sz,
- 0, GET_ULONG, REQUIRED_ARG, 32, 1, 256, 0, 0, 0},
- {"ndb-distribution", OPT_NDB_DISTRIBUTION,
- "Default distribution for new tables in ndb",
- (uchar**) &opt_ndb_distribution,
- (uchar**) &opt_ndb_distribution,
- 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ (uchar**) &max_system_variables.ndb_autoincrement_prefetch_sz,
+ 0, GET_ULONG, REQUIRED_ARG, 1, 1, 256, 0, 0, 0},
{"ndb-force-send", OPT_NDB_FORCE_SEND,
"Force send of buffers to ndb immediately without waiting for "
"other threads.",
diff --git a/storage/ndb/include/kernel/signaldata/TcKeyRef.hpp b/storage/ndb/include/kernel/signaldata/TcKeyRef.hpp
index 2846ce3854f..56f6cdae29d 100644
--- a/storage/ndb/include/kernel/signaldata/TcKeyRef.hpp
+++ b/storage/ndb/include/kernel/signaldata/TcKeyRef.hpp
@@ -40,12 +40,13 @@ class TcKeyRef {
friend bool printTCKEYREF(FILE *, const Uint32 *, Uint32, Uint16);
public:
- STATIC_CONST( SignalLength = 4 );
+ STATIC_CONST( SignalLength = 5 );
private:
Uint32 connectPtr;
Uint32 transId[2];
Uint32 errorCode;
+ Uint32 errorData;
};
#endif
diff --git a/storage/ndb/include/kernel/signaldata/TcRollbackRep.hpp b/storage/ndb/include/kernel/signaldata/TcRollbackRep.hpp
index 3b5e2f3d3cb..609756605d5 100644
--- a/storage/ndb/include/kernel/signaldata/TcRollbackRep.hpp
+++ b/storage/ndb/include/kernel/signaldata/TcRollbackRep.hpp
@@ -38,12 +38,13 @@ class TcRollbackRep {
friend bool printTCROLBACKREP(FILE *, const Uint32 *, Uint32, Uint16);
public:
- STATIC_CONST( SignalLength = 4 );
+ STATIC_CONST( SignalLength = 5 );
private:
Uint32 connectPtr;
Uint32 transId[2];
Uint32 returnCode;
+ Uint32 errorData;
};
#endif
diff --git a/storage/ndb/include/ndbapi/NdbDictionary.hpp b/storage/ndb/include/ndbapi/NdbDictionary.hpp
index 58882e139fd..0e782ba9214 100644
--- a/storage/ndb/include/ndbapi/NdbDictionary.hpp
+++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp
@@ -1020,7 +1020,7 @@ public:
* Get the name of the table being indexed
*/
const char * getTable() const;
-
+
/**
* Get the number of columns in the index
*/
diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.cpp b/storage/ndb/src/kernel/blocks/backup/Backup.cpp
index 64e2c41aa69..45501bf50d5 100644
--- a/storage/ndb/src/kernel/blocks/backup/Backup.cpp
+++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp
@@ -1026,8 +1026,9 @@ Backup::execINCL_NODEREQ(Signal* signal)
break;
}//if
}//for
- signal->theData[0] = reference();
- sendSignal(senderRef, GSN_INCL_NODECONF, signal, 1, JBB);
+ signal->theData[0] = inclNode;
+ signal->theData[1] = reference();
+ sendSignal(senderRef, GSN_INCL_NODECONF, signal, 2, JBB);
}
/*****************************************************************************
diff --git a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
index 3406176d7a8..6c869435bfa 100644
--- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
@@ -421,9 +421,10 @@ void Cmvmi::execCLOSE_COMREQ(Signal* signal)
// Uint32 noOfNodes = closeCom->noOfNodes;
jamEntry();
- for (unsigned i = 0; i < MAX_NODES; i++){
- if(NodeBitmask::get(closeCom->theNodes, i)){
-
+ for (unsigned i = 0; i < MAX_NODES; i++)
+ {
+ if(NodeBitmask::get(closeCom->theNodes, i))
+ {
jam();
//-----------------------------------------------------
@@ -437,7 +438,9 @@ void Cmvmi::execCLOSE_COMREQ(Signal* signal)
globalTransporterRegistry.do_disconnect(i);
}
}
- if (failNo != 0) {
+
+ if (failNo != 0)
+ {
jam();
signal->theData[0] = userRef;
signal->theData[1] = failNo;
@@ -456,13 +459,21 @@ void Cmvmi::execOPEN_COMREQ(Signal* signal)
jamEntry();
const Uint32 len = signal->getLength();
- if(len == 2){
-
+ if(len == 2)
+ {
#ifdef ERROR_INSERT
if (! ((ERROR_INSERTED(9000) || ERROR_INSERTED(9002))
&& c_error_9000_nodes_mask.get(tStartingNode)))
#endif
{
+ if (globalData.theStartLevel != NodeState::SL_STARTED &&
+ (getNodeInfo(tStartingNode).m_type != NodeInfo::DB &&
+ getNodeInfo(tStartingNode).m_type != NodeInfo::MGM))
+ {
+ jam();
+ goto done;
+ }
+
globalTransporterRegistry.do_connect(tStartingNode);
globalTransporterRegistry.setIOState(tStartingNode, HaltIO);
@@ -475,9 +486,11 @@ void Cmvmi::execOPEN_COMREQ(Signal* signal)
//-----------------------------------------------------
}
} else {
- for(unsigned int i = 1; i < MAX_NODES; i++ ) {
+ for(unsigned int i = 1; i < MAX_NODES; i++ )
+ {
jam();
- if (i != getOwnNodeId() && getNodeInfo(i).m_type == tData2){
+ if (i != getOwnNodeId() && getNodeInfo(i).m_type == tData2)
+ {
jam();
#ifdef ERROR_INSERT
@@ -496,6 +509,7 @@ void Cmvmi::execOPEN_COMREQ(Signal* signal)
}
}
+done:
if (userRef != 0) {
jam();
signal->theData[0] = tStartingNode;
@@ -536,24 +550,10 @@ void Cmvmi::execDISCONNECT_REP(Signal *signal)
setNodeInfo(hostId).m_connectCount++;
const NodeInfo::NodeType type = getNodeInfo(hostId).getType();
ndbrequire(type != NodeInfo::INVALID);
-
- if(type == NodeInfo::DB || globalData.theStartLevel == NodeState::SL_STARTED){
- jam();
- DisconnectRep * const rep = (DisconnectRep *)&signal->theData[0];
- rep->nodeId = hostId;
- rep->err = errNo;
- sendSignal(QMGR_REF, GSN_DISCONNECT_REP, signal,
- DisconnectRep::SignalLength, JBA);
- } else if((globalData.theStartLevel == NodeState::SL_CMVMI ||
- globalData.theStartLevel == NodeState::SL_STARTING)
- && type == NodeInfo::MGM) {
- /**
- * Someone disconnected during cmvmi period
- */
- jam();
- globalTransporterRegistry.do_connect(hostId);
- }
+ sendSignal(QMGR_REF, GSN_DISCONNECT_REP, signal,
+ DisconnectRep::SignalLength, JBA);
+
cancelSubscription(hostId);
signal->theData[0] = NDB_LE_Disconnected;
@@ -587,6 +587,8 @@ void Cmvmi::execCONNECT_REP(Signal *signal){
*/
if(type == NodeInfo::MGM){
jam();
+ signal->theData[0] = hostId;
+ sendSignal(QMGR_REF, GSN_CONNECT_REP, signal, 1, JBA);
} else {
/**
* Dont allow api nodes to connect
@@ -802,6 +804,8 @@ Cmvmi::execSTART_ORD(Signal* signal) {
}
}
}
+
+ EXECUTE_DIRECT(QMGR, GSN_START_ORD, signal, 1);
return ;
}
@@ -829,9 +833,6 @@ Cmvmi::execSTART_ORD(Signal* signal) {
*
* Do Restart
*/
-
- globalScheduler.clear();
- globalTimeQueue.clear();
// Disconnect all nodes as part of the system restart.
// We need to ensure that we are starting up
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
index 569958a6aa9..7ced078144a 100644
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
@@ -3825,8 +3825,9 @@ void Dbdict::execINCL_NODEREQ(Signal* signal)
c_nodes.getPtr(nodePtr);
ndbrequire(nodePtr.p->nodeState == NodeRecord::NDB_NODE_DEAD);
nodePtr.p->nodeState = NodeRecord::NDB_NODE_ALIVE;
- signal->theData[0] = reference();
- sendSignal(retRef, GSN_INCL_NODECONF, signal, 1, JBB);
+ signal->theData[0] = nodePtr.i;
+ signal->theData[1] = reference();
+ sendSignal(retRef, GSN_INCL_NODECONF, signal, 2, JBB);
c_aliveNodes.set(nodePtr.i);
}//execINCL_NODEREQ()
diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
index 28378c41f25..bbacb300089 100644
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
@@ -2135,12 +2135,9 @@ void Dbdih::gcpBlockedLab(Signal* signal)
/*---------------------------------------------------------------------------*/
void Dbdih::execINCL_NODECONF(Signal* signal)
{
- Uint32 TsendNodeId;
- Uint32 TstartNode_or_blockref;
-
jamEntry();
- TstartNode_or_blockref = signal->theData[0];
- TsendNodeId = signal->theData[1];
+ Uint32 TstartNode = signal->theData[0];
+ Uint32 TsendNodeId_or_blockref = signal->theData[1];
Uint32 blocklist[6];
blocklist[0] = clocallqhblockref;
@@ -2152,9 +2149,21 @@ void Dbdih::execINCL_NODECONF(Signal* signal)
for (Uint32 i = 0; blocklist[i] != 0; i++)
{
- if (TstartNode_or_blockref == blocklist[i])
+ if (TsendNodeId_or_blockref == blocklist[i])
{
jam();
+
+ if (TstartNode != c_nodeStartSlave.nodeId)
+ {
+ jam();
+ warningEvent("Recevied INCL_NODECONF for %u from %s"
+ " while %u is starting",
+ TstartNode,
+ getBlockName(refToBlock(TsendNodeId_or_blockref)),
+ c_nodeStartSlave.nodeId);
+ return;
+ }
+
if (getNodeStatus(c_nodeStartSlave.nodeId) == NodeRecord::ALIVE &&
blocklist[i+1] != 0)
{
@@ -2182,10 +2191,21 @@ void Dbdih::execINCL_NODECONF(Signal* signal)
}
}
}
+
+ if (c_nodeStartMaster.startNode != TstartNode)
+ {
+ jam();
+ warningEvent("Recevied INCL_NODECONF for %u from %u"
+ " while %u is starting",
+ TstartNode,
+ TsendNodeId_or_blockref,
+ c_nodeStartMaster.startNode);
+ return;
+ }
ndbrequire(cmasterdihref = reference());
- receiveLoopMacro(INCL_NODEREQ, TsendNodeId);
-
+ receiveLoopMacro(INCL_NODEREQ, TsendNodeId_or_blockref);
+
CRASH_INSERTION(7128);
/*-------------------------------------------------------------------------*/
// Now that we have included the starting node in the node lists in the
@@ -6159,7 +6179,7 @@ void Dbdih::execMASTER_LCPREQ(Signal* signal)
jam();
ndbout_c("resending GSN_MASTER_LCPREQ");
sendSignalWithDelay(reference(), GSN_MASTER_LCPREQ, signal,
- signal->getLength(), 50);
+ 50, signal->getLength());
return;
}
Uint32 failedNodeId = req->failedNodeId;
@@ -10678,6 +10698,12 @@ void Dbdih::execLCP_FRAG_REP(Signal* signal)
Uint32 started = lcpReport->maxGciStarted;
Uint32 completed = lcpReport->maxGciCompleted;
+ if (started > c_lcpState.lcpStopGcp)
+ {
+ jam();
+ c_lcpState.lcpStopGcp = started;
+ }
+
if(tableDone){
jam();
@@ -11218,7 +11244,12 @@ void Dbdih::allNodesLcpCompletedLab(Signal* signal)
signal->theData[0] = NDB_LE_LocalCheckpointCompleted; //Event type
signal->theData[1] = SYSFILE->latestLCP_ID;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
- c_lcpState.lcpStopGcp = c_newest_restorable_gci;
+
+ if (c_newest_restorable_gci > c_lcpState.lcpStopGcp)
+ {
+ jam();
+ c_lcpState.lcpStopGcp = c_newest_restorable_gci;
+ }
/**
* Start checking for next LCP
@@ -12088,13 +12119,12 @@ void Dbdih::findMinGci(ReplicaRecordPtr fmgReplicaPtr,
lcpNo = fmgReplicaPtr.p->nextLcp;
do {
ndbrequire(lcpNo < MAX_LCP_STORED);
- if (fmgReplicaPtr.p->lcpStatus[lcpNo] == ZVALID &&
- fmgReplicaPtr.p->maxGciStarted[lcpNo] < c_newest_restorable_gci)
+ if (fmgReplicaPtr.p->lcpStatus[lcpNo] == ZVALID)
{
jam();
keepGci = fmgReplicaPtr.p->maxGciCompleted[lcpNo];
oldestRestorableGci = fmgReplicaPtr.p->maxGciStarted[lcpNo];
- ndbrequire(((int)oldestRestorableGci) >= 0);
+ ndbassert(fmgReplicaPtr.p->maxGciStarted[lcpNo] <c_newest_restorable_gci);
return;
} else {
jam();
@@ -12996,6 +13026,7 @@ void Dbdih::newCrashedReplica(Uint32 nodeId, ReplicaRecordPtr ncrReplicaPtr)
void Dbdih::nodeResetStart()
{
jam();
+ c_nodeStartSlave.nodeId = 0;
c_nodeStartMaster.startNode = RNIL;
c_nodeStartMaster.failNr = cfailurenr;
c_nodeStartMaster.activeState = false;
diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
index 6efa1b1b116..83d38595c1f 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -494,8 +494,9 @@ void Dblqh::execINCL_NODEREQ(Signal* signal)
cnodeStatus[i] = ZNODE_UP;
}//if
}//for
- signal->theData[0] = cownref;
- sendSignal(retRef, GSN_INCL_NODECONF, signal, 1, JBB);
+ signal->theData[0] = nodeId;
+ signal->theData[1] = cownref;
+ sendSignal(retRef, GSN_INCL_NODECONF, signal, 2, JBB);
return;
}//Dblqh::execINCL_NODEREQ()
diff --git a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
index 3d5e52a525d..4c5c6420d56 100644
--- a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
+++ b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
@@ -734,6 +734,7 @@ public:
// Index op return context
UintR indexOp;
UintR clientData;
+ Uint32 errorData;
UintR attrInfoLen;
UintR accumulatingIndexOp;
diff --git a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
index ce20059e663..e54f163b2e4 100644
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
@@ -310,9 +310,11 @@ void Dbtc::execINCL_NODEREQ(Signal* signal)
hostptr.i = signal->theData[1];
ptrCheckGuard(hostptr, chostFilesize, hostRecord);
hostptr.p->hostStatus = HS_ALIVE;
- signal->theData[0] = cownref;
c_alive_nodes.set(hostptr.i);
+ signal->theData[0] = hostptr.i;
+ signal->theData[1] = cownref;
+
if (ERROR_INSERTED(8039))
{
CLEAR_ERROR_INSERT_VALUE;
@@ -321,11 +323,11 @@ void Dbtc::execINCL_NODEREQ(Signal* signal)
sendSignal(numberToRef(CMVMI, hostptr.i),
GSN_NDB_TAMPER, signal, 1, JBB);
signal->theData[0] = save;
- sendSignalWithDelay(tblockref, GSN_INCL_NODECONF, signal, 5000, 1);
+ sendSignalWithDelay(tblockref, GSN_INCL_NODECONF, signal, 5000, 2);
return;
}
- sendSignal(tblockref, GSN_INCL_NODECONF, signal, 1, JBB);
+ sendSignal(tblockref, GSN_INCL_NODECONF, signal, 2, JBB);
}
void Dbtc::execREAD_NODESREF(Signal* signal)
@@ -5117,6 +5119,7 @@ void Dbtc::releaseDirtyWrite(Signal* signal)
void Dbtc::execLQHKEYREF(Signal* signal)
{
const LqhKeyRef * const lqhKeyRef = (LqhKeyRef *)signal->getDataPtr();
+ Uint32 indexId = 0;
jamEntry();
UintR compare_transid1, compare_transid2;
@@ -5168,6 +5171,9 @@ void Dbtc::execLQHKEYREF(Signal* signal)
ptrCheckGuard(opPtr, ctcConnectFilesize, localTcConnectRecord);
// The operation executed an index trigger
+ TcIndexData* indexData = c_theIndexes.getPtr(currentIndexId);
+ indexId = indexData->indexId;
+ regApiPtr->errorData = indexId;
const Uint32 opType = regTcPtr->operation;
if (errCode == ZALREADYEXIST)
errCode = terrorCode = ZNOTUNIQUE;
@@ -5180,7 +5186,6 @@ void Dbtc::execLQHKEYREF(Signal* signal)
} else {
jam();
/** ZDELETE && NOT_FOUND */
- TcIndexData* indexData = c_theIndexes.getPtr(currentIndexId);
if(indexData->indexState == IS_BUILDING && state != CS_ABORTING){
jam();
/**
@@ -5265,12 +5270,14 @@ void Dbtc::execLQHKEYREF(Signal* signal)
jam();
regApiPtr->lqhkeyreqrec--; // Compensate for extra during read
tcKeyRef->connectPtr = indexOp;
+ tcKeyRef->errorData = indexId;
EXECUTE_DIRECT(DBTC, GSN_TCKEYREF, signal, TcKeyRef::SignalLength);
apiConnectptr.i = save;
apiConnectptr.p = regApiPtr;
} else {
jam();
tcKeyRef->connectPtr = clientData;
+ tcKeyRef->errorData = indexId;
sendSignal(regApiPtr->ndbapiBlockref,
GSN_TCKEYREF, signal, TcKeyRef::SignalLength, JBB);
}//if
@@ -10571,6 +10578,7 @@ void Dbtc::releaseAbortResources(Signal* signal)
tcRollbackRep->transId[0] = apiConnectptr.p->transid[0];
tcRollbackRep->transId[1] = apiConnectptr.p->transid[1];
tcRollbackRep->returnCode = apiConnectptr.p->returncode;
+ tcRollbackRep->errorData = apiConnectptr.p->errorData;
sendSignal(blockRef, GSN_TCROLLBACKREP, signal,
TcRollbackRep::SignalLength, JBB);
}
@@ -11995,6 +12003,7 @@ void Dbtc::execTCKEYCONF(Signal* signal)
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
+ tcIndxRef->errorData = 0;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcKeyRef::SignalLength, JBB);
return;
@@ -12014,6 +12023,7 @@ void Dbtc::execTCKEYCONF(Signal* signal)
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
+ tcIndxRef->errorData = 0;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcKeyRef::SignalLength, JBB);
return;
@@ -12097,6 +12107,7 @@ void Dbtc::execTCKEYREF(Signal* signal)
tcIndxRef->transId[0] = tcKeyRef->transId[0];
tcIndxRef->transId[1] = tcKeyRef->transId[1];
tcIndxRef->errorCode = tcKeyRef->errorCode;
+ tcIndxRef->errorData = 0;
releaseIndexOperation(regApiPtr, indexOp);
@@ -12174,6 +12185,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4000;
+ tcIndxRef->errorData = 0;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcKeyRef::SignalLength, JBB);
return;
@@ -12189,6 +12201,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
+ tcIndxRef->errorData = 0;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcKeyRef::SignalLength, JBB);
return;
@@ -12217,6 +12230,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
+ tcIndxRef->errorData = 0;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcKeyRef::SignalLength, JBB);
*/
@@ -12242,6 +12256,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
+ tcIndxRef->errorData = regApiPtr->errorData;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcKeyRef::SignalLength, JBB);
return;
@@ -12295,6 +12310,7 @@ void Dbtc::readIndexTable(Signal* signal,
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4000;
+ // tcIndxRef->errorData = ??; Where to find indexId
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcKeyRef::SignalLength, JBB);
return;
@@ -12441,6 +12457,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
+ tcIndxRef->errorData = 0;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcKeyRef::SignalLength, JBB);
return;
diff --git a/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp
index 8d51b24ec6a..6a76ce5217a 100644
--- a/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp
+++ b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp
@@ -265,6 +265,8 @@ private:
void execALLOC_NODEID_CONF(Signal *);
void execALLOC_NODEID_REF(Signal *);
void completeAllocNodeIdReq(Signal *);
+
+ void execSTART_ORD(Signal*);
// Arbitration signals
void execARBIT_CFG(Signal* signal);
@@ -281,6 +283,7 @@ private:
void check_readnodes_reply(Signal* signal, Uint32 nodeId, Uint32 gsn);
Uint32 check_startup(Signal* signal);
+ void api_failed(Signal* signal, Uint32 aFailedNode);
void node_failed(Signal* signal, Uint16 aFailedNode);
void checkStartInterface(Signal* signal);
void failReport(Signal* signal,
diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
index f9950072ab4..2f03bd56694 100644
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
@@ -31,10 +31,6 @@ void Qmgr::initData()
cnoCommitFailedNodes = 0;
c_maxDynamicId = 0;
c_clusterNodes.clear();
-
- Uint32 hbDBAPI = 500;
- setHbApiDelay(hbDBAPI);
- c_connectedNodes.set(getOwnNodeId());
c_stopReq.senderRef = 0;
/**
@@ -43,6 +39,27 @@ void Qmgr::initData()
ndbrequire((Uint32)NodeInfo::DB == 0);
ndbrequire((Uint32)NodeInfo::API == 1);
ndbrequire((Uint32)NodeInfo::MGM == 2);
+
+ NodeRecPtr nodePtr;
+ nodePtr.i = getOwnNodeId();
+ ptrAss(nodePtr, nodeRec);
+ nodePtr.p->blockRef = reference();
+
+ c_connectedNodes.set(getOwnNodeId());
+ setNodeInfo(getOwnNodeId()).m_version = NDB_VERSION;
+
+
+ /**
+ * Timeouts
+ */
+ const ndb_mgm_configuration_iterator * p =
+ m_ctx.m_config.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ Uint32 hbDBAPI = 1500;
+ ndb_mgm_get_int_parameter(p, CFG_DB_API_HEARTBEAT_INTERVAL, &hbDBAPI);
+
+ setHbApiDelay(hbDBAPI);
}//Qmgr::initData()
void Qmgr::initRecords()
@@ -113,6 +130,7 @@ Qmgr::Qmgr(Block_context& ctx)
addRecSignal(GSN_DIH_RESTARTREF, &Qmgr::execDIH_RESTARTREF);
addRecSignal(GSN_DIH_RESTARTCONF, &Qmgr::execDIH_RESTARTCONF);
addRecSignal(GSN_NODE_VERSION_REP, &Qmgr::execNODE_VERSION_REP);
+ addRecSignal(GSN_START_ORD, &Qmgr::execSTART_ORD);
initData();
}//Qmgr::Qmgr()
diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
index 1fba4d62e17..23e7829481e 100644
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
@@ -238,6 +238,38 @@ Qmgr::execREAD_CONFIG_REQ(Signal* signal)
ReadConfigConf::SignalLength, JBB);
}
+void
+Qmgr::execSTART_ORD(Signal* signal)
+{
+ /**
+ * Start timer handling
+ */
+ signal->theData[0] = ZTIMER_HANDLING;
+ sendSignal(QMGR_REF, GSN_CONTINUEB, signal, 1, JBB);
+
+ NodeRecPtr nodePtr;
+ for (nodePtr.i = 1; nodePtr.i < MAX_NODES; nodePtr.i++)
+ {
+ ptrAss(nodePtr, nodeRec);
+ nodePtr.p->ndynamicId = 0;
+ if(getNodeInfo(nodePtr.i).m_type == NodeInfo::DB)
+ {
+ nodePtr.p->phase = ZINIT;
+ c_definedNodes.set(nodePtr.i);
+ } else {
+ nodePtr.p->phase = ZAPI_INACTIVE;
+ }
+
+ setNodeInfo(nodePtr.i).m_heartbeat_cnt= 0;
+ nodePtr.p->sendPrepFailReqStatus = Q_NOT_ACTIVE;
+ nodePtr.p->sendCommitFailReqStatus = Q_NOT_ACTIVE;
+ nodePtr.p->sendPresToStatus = Q_NOT_ACTIVE;
+ nodePtr.p->failState = NORMAL;
+ nodePtr.p->rcv[0] = 0;
+ nodePtr.p->rcv[1] = 0;
+ }//for
+}
+
/*
4.2 ADD NODE MODULE*/
/*##########################################################################*/
@@ -298,8 +330,6 @@ void Qmgr::startphase1(Signal* signal)
nodePtr.i = getOwnNodeId();
ptrAss(nodePtr, nodeRec);
nodePtr.p->phase = ZSTARTING;
- nodePtr.p->blockRef = reference();
- c_connectedNodes.set(nodePtr.i);
signal->theData[0] = reference();
sendSignal(DBDIH_REF, GSN_DIH_RESTARTREQ, signal, 1, JBB);
@@ -371,11 +401,14 @@ void Qmgr::execCONNECT_REP(Signal* signal)
case ZFAIL_CLOSING:
jam();
return;
- case ZINIT:
- ndbrequire(false);
case ZAPI_ACTIVE:
case ZAPI_INACTIVE:
return;
+ case ZINIT:
+ ndbrequire(getNodeInfo(nodeId).m_type == NodeInfo::MGM);
+ break;
+ default:
+ ndbrequire(false);
}
if (getNodeInfo(nodeId).getType() != NodeInfo::DB)
@@ -1212,12 +1245,6 @@ void Qmgr::execCM_REGREF(Signal* signal)
{
jam();
electionWon(signal);
-
- /**
- * Start timer handling
- */
- signal->theData[0] = ZTIMER_HANDLING;
- sendSignal(QMGR_REF, GSN_CONTINUEB, signal, 10, JBB);
}
return;
@@ -1855,12 +1882,6 @@ Qmgr::joinedCluster(Signal* signal, NodeRecPtr nodePtr){
sendSttorryLab(signal);
- /**
- * Start timer handling
- */
- signal->theData[0] = ZTIMER_HANDLING;
- sendSignal(QMGR_REF, GSN_CONTINUEB, signal, 10, JBB);
-
sendCmAckAdd(signal, getOwnNodeId(), CmAdd::CommitNew);
}
@@ -2094,25 +2115,6 @@ void Qmgr::findNeighbours(Signal* signal)
/*---------------------------------------------------------------------------*/
void Qmgr::initData(Signal* signal)
{
- NodeRecPtr nodePtr;
- for (nodePtr.i = 1; nodePtr.i < MAX_NODES; nodePtr.i++) {
- ptrAss(nodePtr, nodeRec);
- nodePtr.p->ndynamicId = 0;
- if(getNodeInfo(nodePtr.i).m_type == NodeInfo::DB){
- nodePtr.p->phase = ZINIT;
- c_definedNodes.set(nodePtr.i);
- } else {
- nodePtr.p->phase = ZAPI_INACTIVE;
- }
-
- setNodeInfo(nodePtr.i).m_heartbeat_cnt= 0;
- nodePtr.p->sendPrepFailReqStatus = Q_NOT_ACTIVE;
- nodePtr.p->sendCommitFailReqStatus = Q_NOT_ACTIVE;
- nodePtr.p->sendPresToStatus = Q_NOT_ACTIVE;
- nodePtr.p->failState = NORMAL;
- nodePtr.p->rcv[0] = 0;
- nodePtr.p->rcv[1] = 0;
- }//for
cfailureNr = 1;
ccommitFailureNr = 1;
cprepareFailureNr = 1;
@@ -2146,13 +2148,11 @@ void Qmgr::initData(Signal* signal)
ndbrequire(p != 0);
Uint32 hbDBDB = 1500;
- Uint32 hbDBAPI = 1500;
Uint32 arbitTimeout = 1000;
c_restartPartialTimeout = 30000;
c_restartPartionedTimeout = 60000;
c_restartFailureTimeout = ~0;
ndb_mgm_get_int_parameter(p, CFG_DB_HEARTBEAT_INTERVAL, &hbDBDB);
- ndb_mgm_get_int_parameter(p, CFG_DB_API_HEARTBEAT_INTERVAL, &hbDBAPI);
ndb_mgm_get_int_parameter(p, CFG_DB_ARBIT_TIMEOUT, &arbitTimeout);
ndb_mgm_get_int_parameter(p, CFG_DB_START_PARTIAL_TIMEOUT,
&c_restartPartialTimeout);
@@ -2177,7 +2177,6 @@ void Qmgr::initData(Signal* signal)
}
setHbDelay(hbDBDB);
- setHbApiDelay(hbDBAPI);
setArbitTimeout(arbitTimeout);
arbitRec.state = ARBIT_NULL; // start state for all nodes
@@ -2204,7 +2203,6 @@ void Qmgr::initData(Signal* signal)
execARBIT_CFG(signal);
}
- setNodeInfo(getOwnNodeId()).m_version = NDB_VERSION;
}//Qmgr::initData()
@@ -2237,20 +2235,22 @@ void Qmgr::timerHandlingLab(Signal* signal)
hb_check_timer.reset();
}
}
-
+
if (interface_check_timer.check(TcurrentTime)) {
jam();
interface_check_timer.reset();
checkStartInterface(signal);
}
+ if (hb_api_timer.check(TcurrentTime))
+ {
+ jam();
+ hb_api_timer.reset();
+ apiHbHandlingLab(signal);
+ }
+
if (cactivateApiCheck != 0) {
jam();
- if (hb_api_timer.check(TcurrentTime)) {
- jam();
- hb_api_timer.reset();
- apiHbHandlingLab(signal);
- }//if
if (clatestTransactionCheck == 0) {
//-------------------------------------------------------------
// Initialise the Transaction check timer.
@@ -2367,18 +2367,21 @@ void Qmgr::apiHbHandlingLab(Signal* signal)
if(type == NodeInfo::INVALID)
continue;
- if (TnodePtr.p->phase == ZAPI_ACTIVE){
+ if (c_connectedNodes.get(nodeId))
+ {
jam();
setNodeInfo(TnodePtr.i).m_heartbeat_cnt++;
- if(getNodeInfo(TnodePtr.i).m_heartbeat_cnt > 2){
+ if(getNodeInfo(TnodePtr.i).m_heartbeat_cnt > 2)
+ {
signal->theData[0] = NDB_LE_MissedHeartbeat;
signal->theData[1] = nodeId;
signal->theData[2] = getNodeInfo(TnodePtr.i).m_heartbeat_cnt - 1;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
}
- if (getNodeInfo(TnodePtr.i).m_heartbeat_cnt > 4) {
+ if (getNodeInfo(TnodePtr.i).m_heartbeat_cnt > 4)
+ {
jam();
/*------------------------------------------------------------------*/
/* THE API NODE HAS NOT SENT ANY HEARTBEAT FOR THREE SECONDS.
@@ -2390,8 +2393,8 @@ void Qmgr::apiHbHandlingLab(Signal* signal)
signal->theData[0] = NDB_LE_DeadDueToHeartbeat;
signal->theData[1] = nodeId;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
-
- node_failed(signal, nodeId);
+
+ api_failed(signal, nodeId);
}//if
}//if
}//for
@@ -2480,26 +2483,6 @@ void Qmgr::sendApiFailReq(Signal* signal, Uint16 failedNodeNo)
sendSignal(DBTC_REF, GSN_API_FAILREQ, signal, 2, JBA);
sendSignal(DBDICT_REF, GSN_API_FAILREQ, signal, 2, JBA);
sendSignal(SUMA_REF, GSN_API_FAILREQ, signal, 2, JBA);
-
- /**-------------------------------------------------------------------------
- * THE OTHER NODE WAS AN API NODE. THE COMMUNICATION LINK IS ALREADY
- * BROKEN AND THUS NO ACTION IS NEEDED TO BREAK THE CONNECTION.
- * WE ONLY NEED TO SET PARAMETERS TO ENABLE A NEW CONNECTION IN A FEW
- * SECONDS.
- *-------------------------------------------------------------------------*/
- setNodeInfo(failedNodePtr.i).m_heartbeat_cnt= 0;
- setNodeInfo(failedNodePtr.i).m_version = 0;
- recompute_version_info(getNodeInfo(failedNodePtr.i).m_type);
-
- CloseComReqConf * const closeCom = (CloseComReqConf *)&signal->theData[0];
-
- closeCom->xxxBlockRef = reference();
- closeCom->failNo = 0;
- closeCom->noOfNodes = 1;
- NodeBitmask::clear(closeCom->theNodes);
- NodeBitmask::set(closeCom->theNodes, failedNodePtr.i);
- sendSignal(CMVMI_REF, GSN_CLOSE_COMREQ, signal,
- CloseComReqConf::SignalLength, JBA);
}//Qmgr::sendApiFailReq()
void Qmgr::execAPI_FAILREQ(Signal* signal)
@@ -2512,20 +2495,7 @@ void Qmgr::execAPI_FAILREQ(Signal* signal)
ndbrequire(getNodeInfo(failedNodePtr.i).getType() != NodeInfo::DB);
- // ignore if api not active
- if (failedNodePtr.p->phase != ZAPI_ACTIVE)
- {
- jam();
- // But send to SUMA anyway...
- sendSignal(SUMA_REF, GSN_API_FAILREQ, signal, 2, JBA);
- return;
- }
-
- signal->theData[0] = NDB_LE_Disconnected;
- signal->theData[1] = failedNodePtr.i;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
-
- node_failed(signal, failedNodePtr.i);
+ api_failed(signal, signal->theData[0]);
}
void Qmgr::execAPI_FAILCONF(Signal* signal)
@@ -2649,6 +2619,13 @@ void Qmgr::execDISCONNECT_REP(Signal* signal)
ndbrequire(false);
}
+ if (getNodeInfo(nodeId).getType() != NodeInfo::DB)
+ {
+ jam();
+ api_failed(signal, nodeId);
+ return;
+ }
+
switch(nodePtr.p->phase){
case ZRUNNING:
jam();
@@ -2685,66 +2662,109 @@ void Qmgr::node_failed(Signal* signal, Uint16 aFailedNode)
failedNodePtr.i = aFailedNode;
ptrCheckGuard(failedNodePtr, MAX_NODES, nodeRec);
- if (getNodeInfo(failedNodePtr.i).getType() == NodeInfo::DB){
+ ndbrequire(getNodeInfo(failedNodePtr.i).getType() == NodeInfo::DB);
+
+ /**---------------------------------------------------------------------
+ * THE OTHER NODE IS AN NDB NODE, WE HANDLE IT AS IF A HEARTBEAT
+ * FAILURE WAS DISCOVERED.
+ *---------------------------------------------------------------------*/
+ switch(failedNodePtr.p->phase){
+ case ZRUNNING:
jam();
- /**---------------------------------------------------------------------
- * THE OTHER NODE IS AN NDB NODE, WE HANDLE IT AS IF A HEARTBEAT
- * FAILURE WAS DISCOVERED.
- *---------------------------------------------------------------------*/
- switch(failedNodePtr.p->phase){
- case ZRUNNING:
- jam();
- failReportLab(signal, aFailedNode, FailRep::ZLINK_FAILURE);
- return;
- case ZFAIL_CLOSING:
- jam();
- return;
- case ZSTARTING:
- c_start.reset();
- // Fall-through
- default:
- jam();
- /*---------------------------------------------------------------------*/
- // The other node is still not in the cluster but disconnected.
- // We must restart communication in three seconds.
- /*---------------------------------------------------------------------*/
- failedNodePtr.p->failState = NORMAL;
- failedNodePtr.p->phase = ZFAIL_CLOSING;
- setNodeInfo(failedNodePtr.i).m_heartbeat_cnt= 0;
-
- CloseComReqConf * const closeCom =
- (CloseComReqConf *)&signal->theData[0];
-
- closeCom->xxxBlockRef = reference();
- closeCom->failNo = 0;
- closeCom->noOfNodes = 1;
- NodeBitmask::clear(closeCom->theNodes);
- NodeBitmask::set(closeCom->theNodes, failedNodePtr.i);
- sendSignal(CMVMI_REF, GSN_CLOSE_COMREQ, signal,
- CloseComReqConf::SignalLength, JBA);
- }//if
+ failReportLab(signal, aFailedNode, FailRep::ZLINK_FAILURE);
return;
- }
-
- /**
- * API code
- */
- jam();
- if (failedNodePtr.p->phase != ZFAIL_CLOSING){
+ case ZFAIL_CLOSING:
+ jam();
+ return;
+ case ZSTARTING:
+ c_start.reset();
+ // Fall-through
+ default:
jam();
- //-------------------------------------------------------------------------
- // The API was active and has now failed. We need to initiate API failure
- // handling. If the API had already failed then we can ignore this
- // discovery.
- //-------------------------------------------------------------------------
+ /*---------------------------------------------------------------------*/
+ // The other node is still not in the cluster but disconnected.
+ // We must restart communication in three seconds.
+ /*---------------------------------------------------------------------*/
+ failedNodePtr.p->failState = NORMAL;
failedNodePtr.p->phase = ZFAIL_CLOSING;
-
- sendApiFailReq(signal, aFailedNode);
- arbitRec.code = ArbitCode::ApiFail;
- handleArbitApiFail(signal, aFailedNode);
+ setNodeInfo(failedNodePtr.i).m_heartbeat_cnt= 0;
+
+ CloseComReqConf * const closeCom =
+ (CloseComReqConf *)&signal->theData[0];
+
+ closeCom->xxxBlockRef = reference();
+ closeCom->failNo = 0;
+ closeCom->noOfNodes = 1;
+ NodeBitmask::clear(closeCom->theNodes);
+ NodeBitmask::set(closeCom->theNodes, failedNodePtr.i);
+ sendSignal(CMVMI_REF, GSN_CLOSE_COMREQ, signal,
+ CloseComReqConf::SignalLength, JBA);
}//if
return;
-}//Qmgr::node_failed()
+}
+
+void
+Qmgr::api_failed(Signal* signal, Uint32 nodeId)
+{
+ NodeRecPtr failedNodePtr;
+ /**------------------------------------------------------------------------
+ * A COMMUNICATION LINK HAS BEEN DISCONNECTED. WE MUST TAKE SOME ACTION
+ * DUE TO THIS.
+ *-----------------------------------------------------------------------*/
+ failedNodePtr.i = nodeId;
+ ptrCheckGuard(failedNodePtr, MAX_NODES, nodeRec);
+
+ if (failedNodePtr.p->phase == ZFAIL_CLOSING)
+ {
+ /**
+ * Failure handling already in progress
+ */
+ jam();
+ return;
+ }
+
+ if (failedNodePtr.p->phase == ZAPI_ACTIVE)
+ {
+ jam();
+ sendApiFailReq(signal, nodeId);
+ arbitRec.code = ArbitCode::ApiFail;
+ handleArbitApiFail(signal, nodeId);
+ }
+ else
+ {
+ /**
+ * Always inform SUMA
+ */
+ jam();
+ signal->theData[0] = nodeId;
+ signal->theData[1] = QMGR_REF;
+ sendSignal(SUMA_REF, GSN_API_FAILREQ, signal, 2, JBA);
+ failedNodePtr.p->failState = NORMAL;
+ }
+
+ failedNodePtr.p->phase = ZFAIL_CLOSING;
+ setNodeInfo(failedNodePtr.i).m_heartbeat_cnt= 0;
+ setNodeInfo(failedNodePtr.i).m_version = 0;
+ recompute_version_info(getNodeInfo(failedNodePtr.i).m_type);
+
+ CloseComReqConf * const closeCom = (CloseComReqConf *)&signal->theData[0];
+ closeCom->xxxBlockRef = reference();
+ closeCom->failNo = 0;
+ closeCom->noOfNodes = 1;
+ NodeBitmask::clear(closeCom->theNodes);
+ NodeBitmask::set(closeCom->theNodes, failedNodePtr.i);
+ sendSignal(CMVMI_REF, GSN_CLOSE_COMREQ, signal,
+ CloseComReqConf::SignalLength, JBA);
+
+ if (getNodeInfo(failedNodePtr.i).getType() == NodeInfo::MGM)
+ {
+ /**
+ * Allow MGM do reconnect "directly"
+ */
+ jam();
+ setNodeInfo(failedNodePtr.i).m_heartbeat_cnt = 3;
+ }
+}
/**--------------------------------------------------------------------------
* AN API NODE IS REGISTERING. IF FOR THE FIRST TIME WE WILL ENABLE
@@ -4963,43 +4983,39 @@ Qmgr::execDUMP_STATE_ORD(Signal* signal)
c_start.m_president_candidate_gci);
infoEvent("ctoStatus = %d\n", ctoStatus);
for(Uint32 i = 1; i<MAX_NDB_NODES; i++){
- if(getNodeInfo(i).getType() == NodeInfo::DB){
- NodeRecPtr nodePtr;
- nodePtr.i = i;
- ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRec);
- char buf[100];
- switch(nodePtr.p->phase){
- case ZINIT:
- sprintf(buf, "Node %d: ZINIT(%d)", i, nodePtr.p->phase);
- break;
- case ZSTARTING:
- sprintf(buf, "Node %d: ZSTARTING(%d)", i, nodePtr.p->phase);
- break;
- case ZRUNNING:
- sprintf(buf, "Node %d: ZRUNNING(%d)", i, nodePtr.p->phase);
- break;
- case ZPREPARE_FAIL:
- sprintf(buf, "Node %d: ZPREPARE_FAIL(%d)", i, nodePtr.p->phase);
- break;
- case ZFAIL_CLOSING:
- sprintf(buf, "Node %d: ZFAIL_CLOSING(%d)", i, nodePtr.p->phase);
- break;
- case ZAPI_INACTIVE:
- sprintf(buf, "Node %d: ZAPI_INACTIVE(%d)", i, nodePtr.p->phase);
- break;
- case ZAPI_ACTIVE:
- sprintf(buf, "Node %d: ZAPI_ACTIVE(%d)", i, nodePtr.p->phase);
- break;
- default:
- sprintf(buf, "Node %d: <UNKNOWN>(%d)", i, nodePtr.p->phase);
- break;
- }
- infoEvent(buf);
+ NodeRecPtr nodePtr;
+ nodePtr.i = i;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRec);
+ char buf[100];
+ switch(nodePtr.p->phase){
+ case ZINIT:
+ sprintf(buf, "Node %d: ZINIT(%d)", i, nodePtr.p->phase);
+ break;
+ case ZSTARTING:
+ sprintf(buf, "Node %d: ZSTARTING(%d)", i, nodePtr.p->phase);
+ break;
+ case ZRUNNING:
+ sprintf(buf, "Node %d: ZRUNNING(%d)", i, nodePtr.p->phase);
+ break;
+ case ZPREPARE_FAIL:
+ sprintf(buf, "Node %d: ZPREPARE_FAIL(%d)", i, nodePtr.p->phase);
+ break;
+ case ZFAIL_CLOSING:
+ sprintf(buf, "Node %d: ZFAIL_CLOSING(%d)", i, nodePtr.p->phase);
+ break;
+ case ZAPI_INACTIVE:
+ sprintf(buf, "Node %d: ZAPI_INACTIVE(%d)", i, nodePtr.p->phase);
+ break;
+ case ZAPI_ACTIVE:
+ sprintf(buf, "Node %d: ZAPI_ACTIVE(%d)", i, nodePtr.p->phase);
+ break;
+ default:
+ sprintf(buf, "Node %d: <UNKNOWN>(%d)", i, nodePtr.p->phase);
+ break;
}
+ infoEvent(buf);
}
- default:
- ;
- }//switch
+ }
#ifdef ERROR_INSERT
if (signal->theData[0] == 935 && signal->getLength() == 2)
diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp
index a4e886b14b6..113b63a19d3 100644
--- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp
+++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp
@@ -821,8 +821,9 @@ Suma::execINCL_NODEREQ(Signal* signal){
ndbrequire(!c_alive_nodes.get(nodeId));
c_alive_nodes.set(nodeId);
- signal->theData[0] = reference();
- sendSignal(senderRef, GSN_INCL_NODECONF, signal, 1, JBB);
+ signal->theData[0] = nodeId;
+ signal->theData[1] = reference();
+ sendSignal(senderRef, GSN_INCL_NODECONF, signal, 2, JBB);
}
void
@@ -2450,6 +2451,7 @@ Suma::execSUB_START_REQ(Signal* signal){
{
jam();
+ c_subscriberPool.release(subbPtr);
sendSubStartRef(signal, SubStartRef::PartiallyConnected);
return;
}
diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp
index ee5bb5103d8..a00c68007a7 100644
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp
@@ -119,7 +119,11 @@ operator<<(NdbOut& out, const LogLevel & ll)
void
MgmtSrvr::logLevelThreadRun()
{
- while (!_isStopThread) {
+ while (!_isStopThread)
+ {
+ Vector<NodeId> failed_started_nodes;
+ Vector<EventSubscribeReq> failed_log_level_requests;
+
/**
* Handle started nodes
*/
@@ -144,14 +148,15 @@ MgmtSrvr::logLevelThreadRun()
m_started_nodes.unlock();
if (setEventReportingLevelImpl(node, req))
- {
- ndbout_c("setEventReportingLevelImpl(%d): failed", node);
- }
-
- SetLogLevelOrd ord;
- ord = m_nodeLogLevel[node];
- setNodeLogLevelImpl(node, ord);
-
+ {
+ failed_started_nodes.push_back(node);
+ }
+ else
+ {
+ SetLogLevelOrd ord;
+ ord = m_nodeLogLevel[node];
+ setNodeLogLevelImpl(node, ord);
+ }
m_started_nodes.lock();
}
}
@@ -166,17 +171,20 @@ MgmtSrvr::logLevelThreadRun()
if(req.blockRef == 0)
{
- req.blockRef = _ownReference;
- if (setEventReportingLevelImpl(0, req))
- {
- ndbout_c("setEventReportingLevelImpl: failed 2!");
- }
+ req.blockRef = _ownReference;
+ if (setEventReportingLevelImpl(0, req))
+ {
+ failed_log_level_requests.push_back(req);
+ }
}
else
{
SetLogLevelOrd ord;
ord = req;
- setNodeLogLevelImpl(req.blockRef, ord);
+ if (setNodeLogLevelImpl(req.blockRef, ord))
+ {
+ failed_log_level_requests.push_back(req);
+ }
}
m_log_level_requests.lock();
}
@@ -185,7 +193,28 @@ MgmtSrvr::logLevelThreadRun()
if(!ERROR_INSERTED(10000))
m_event_listner.check_listeners();
- NdbSleep_MilliSleep(_logLevelThreadSleep);
+ Uint32 sleeptime = _logLevelThreadSleep;
+ if (failed_started_nodes.size())
+ {
+ m_started_nodes.lock();
+ for (Uint32 i = 0; i<failed_started_nodes.size(); i++)
+ m_started_nodes.push_back(failed_started_nodes[i], false);
+ m_started_nodes.unlock();
+ failed_started_nodes.clear();
+ sleeptime = 100;
+ }
+
+ if (failed_log_level_requests.size())
+ {
+ m_log_level_requests.lock();
+ for (Uint32 i = 0; i<failed_log_level_requests.size(); i++)
+ m_log_level_requests.push_back(failed_log_level_requests[i], false);
+ m_log_level_requests.unlock();
+ failed_log_level_requests.clear();
+ sleeptime = 100;
+ }
+
+ NdbSleep_MilliSleep(sleeptime);
}
}
@@ -1535,7 +1564,6 @@ MgmtSrvr::setEventReportingLevelImpl(int nodeId_arg,
{
SignalSender ss(theFacade);
NdbNodeBitmask nodes;
- int retries = 30;
nodes.clear();
while (1)
{
@@ -1572,18 +1600,8 @@ MgmtSrvr::setEventReportingLevelImpl(int nodeId_arg,
continue;
}
// api_reg_conf not recevied yet, need to retry
- break;
- }
- }
- if (nodeId <= max)
- {
- if (--retries)
- {
- ss.unlock();
- NdbSleep_MilliSleep(100);
- continue;
+ return SEND_OR_RECEIVE_FAILED;
}
- return SEND_OR_RECEIVE_FAILED;
}
if (nodeId_arg == 0)
@@ -1607,6 +1625,10 @@ MgmtSrvr::setEventReportingLevelImpl(int nodeId_arg,
continue; // node is not connected, skip
if (ss.sendSignal(nodeId, &ssig) == SEND_OK)
nodes.set(nodeId);
+ else if (max == nodeId)
+ {
+ return SEND_OR_RECEIVE_FAILED;
+ }
}
break;
}
@@ -2988,8 +3010,8 @@ int MgmtSrvr::connect_to_self(void)
return 0;
}
-
-
+template class Vector<unsigned short>;
template class MutexVector<unsigned short>;
template class MutexVector<Ndb_mgmd_event_service::Event_listener>;
+template class Vector<EventSubscribeReq>;
template class MutexVector<EventSubscribeReq>;
diff --git a/storage/ndb/src/ndbapi/NdbOperationExec.cpp b/storage/ndb/src/ndbapi/NdbOperationExec.cpp
index 27672e0458c..cd1ac44d82c 100644
--- a/storage/ndb/src/ndbapi/NdbOperationExec.cpp
+++ b/storage/ndb/src/ndbapi/NdbOperationExec.cpp
@@ -24,6 +24,7 @@
#include "Interpreter.hpp"
#include <AttributeHeader.hpp>
#include <signaldata/TcKeyReq.hpp>
+#include <signaldata/TcKeyRef.hpp>
#include <signaldata/KeyInfo.hpp>
#include <signaldata/AttrInfo.hpp>
#include <signaldata/ScanTab.hpp>
@@ -545,6 +546,12 @@ NdbOperation::receiveTCKEYREF( NdbApiSignal* aSignal)
}//if
setErrorCode(aSignal->readData(4));
+ if (aSignal->getLength() == TcKeyRef::SignalLength)
+ {
+ // Signal may contain additional error data
+ theError.details = (char *) aSignal->readData(5);
+ }
+
theStatus = Finished;
theReceiver.m_received_result_length = ~0;
diff --git a/storage/ndb/src/ndbapi/NdbTransaction.cpp b/storage/ndb/src/ndbapi/NdbTransaction.cpp
index bc59df722aa..17c7188bff2 100644
--- a/storage/ndb/src/ndbapi/NdbTransaction.cpp
+++ b/storage/ndb/src/ndbapi/NdbTransaction.cpp
@@ -30,6 +30,7 @@
#include <signaldata/TcCommit.hpp>
#include <signaldata/TcKeyFailConf.hpp>
#include <signaldata/TcHbRep.hpp>
+#include <signaldata/TcRollbackRep.hpp>
/*****************************************************************************
NdbTransaction( Ndb* aNdb );
@@ -1729,6 +1730,8 @@ Remark: Handles the reception of the ROLLBACKREP signal.
int
NdbTransaction::receiveTCROLLBACKREP( NdbApiSignal* aSignal)
{
+ DBUG_ENTER("NdbTransaction::receiveTCROLLBACKREP");
+
/****************************************************************************
Check that we are expecting signals from this transaction and that it doesn't
belong to a transaction already completed. Simply ignore messages from other
@@ -1736,6 +1739,11 @@ transactions.
****************************************************************************/
if(checkState_TransId(aSignal->getDataPtr() + 1)){
theError.code = aSignal->readData(4);// Override any previous errors
+ if (aSignal->getLength() == TcRollbackRep::SignalLength)
+ {
+ // Signal may contain additional error data
+ theError.details = (char *) aSignal->readData(5);
+ }
/**********************************************************************/
/* A serious error has occured. This could be due to deadlock or */
@@ -1747,14 +1755,14 @@ transactions.
theCompletionStatus = CompletedFailure;
theCommitStatus = Aborted;
theReturnStatus = ReturnFailure;
- return 0;
+ DBUG_RETURN(0);
} else {
#ifdef NDB_NO_DROPPED_SIGNAL
abort();
#endif
}
- return -1;
+ DBUG_RETURN(-1);
}//NdbTransaction::receiveTCROLLBACKREP()
/*******************************************************************************
diff --git a/storage/ndb/src/ndbapi/ndberror.c b/storage/ndb/src/ndbapi/ndberror.c
index 0ad2faff76a..e7f946118f2 100644
--- a/storage/ndb/src/ndbapi/ndberror.c
+++ b/storage/ndb/src/ndbapi/ndberror.c
@@ -169,7 +169,7 @@ ErrorBundle ErrorCodes[] = {
{ 219, DMEC, TR, "219" },
{ 233, DMEC, TR,
"Out of operation records in transaction coordinator (increase MaxNoOfConcurrentOperations)" },
- { 275, DMEC, TR, "275" },
+ { 275, DMEC, TR, "Out of transaction records for complete phase (increase MaxNoOfConcurrentTransactions)" },
{ 279, DMEC, TR, "Out of transaction markers in transaction coordinator" },
{ 414, DMEC, TR, "414" },
{ 418, DMEC, TR, "Out of transaction buffers in LQH" },
@@ -766,8 +766,6 @@ ndberror_update(ndberror_struct * error){
if(!found){
error->status = ST_U;
}
-
- error->details = 0;
}
#if CHECK_ERRORCODES
diff --git a/storage/ndb/test/ndbapi/testNodeRestart.cpp b/storage/ndb/test/ndbapi/testNodeRestart.cpp
index 2a5febb7ae9..751134c43c5 100644
--- a/storage/ndb/test/ndbapi/testNodeRestart.cpp
+++ b/storage/ndb/test/ndbapi/testNodeRestart.cpp
@@ -1590,6 +1590,8 @@ runBug27466(NDBT_Context* ctx, NDBT_Step* step)
node2 = res.getDbNodeId(rand() % res.getNumDbNodes());
}
+ ndbout_c("nodes %u %u", node1, node2);
+
if (res.restartOneDbNode(node1, false, true, true))
return NDBT_FAILED;