summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xBUILD/compile-ndb-autotest19
-rw-r--r--ndb/include/kernel/signaldata/CreateIndx.hpp1
-rw-r--r--ndb/include/kernel/signaldata/DropIndx.hpp1
-rw-r--r--ndb/include/ndbapi/NdbOperation.hpp2
-rw-r--r--ndb/src/kernel/blocks/ERROR_codes.txt3
-rw-r--r--ndb/src/kernel/blocks/backup/Backup.cpp64
-rw-r--r--ndb/src/kernel/blocks/dbdict/Dbdict.cpp44
-rw-r--r--ndb/src/kernel/blocks/dbdict/Dbdict.hpp3
-rw-r--r--ndb/src/kernel/blocks/dbdih/DbdihMain.cpp21
-rw-r--r--ndb/src/kernel/blocks/dblqh/DblqhMain.cpp5
-rw-r--r--ndb/src/kernel/blocks/dbtc/Dbtc.hpp4
-rw-r--r--ndb/src/kernel/blocks/dbtc/DbtcMain.cpp159
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp10
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp30
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp4
-rw-r--r--ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp14
-rw-r--r--ndb/src/kernel/vm/DLHashTable2.hpp2
-rw-r--r--ndb/src/mgmapi/mgmapi.cpp11
-rw-r--r--ndb/src/mgmapi/ndb_logevent.cpp7
-rw-r--r--ndb/src/mgmclient/CommandInterpreter.cpp31
-rw-r--r--ndb/src/mgmsrv/MgmtSrvr.cpp11
-rw-r--r--ndb/src/mgmsrv/MgmtSrvr.hpp2
-rw-r--r--ndb/src/mgmsrv/Services.cpp1
-rw-r--r--ndb/src/ndbapi/ClusterMgr.cpp98
-rw-r--r--ndb/src/ndbapi/ClusterMgr.hpp10
-rw-r--r--ndb/test/include/NDBT_Tables.hpp2
-rw-r--r--ndb/test/ndbapi/testDict.cpp78
-rw-r--r--ndb/test/ndbapi/testIndex.cpp52
-rw-r--r--ndb/test/ndbapi/testSystemRestart.cpp47
-rw-r--r--ndb/test/run-test/daily-basic-tests.txt4
-rw-r--r--ndb/test/src/NDBT_Tables.cpp11
-rw-r--r--ndb/tools/ndb_config.cpp4
-rw-r--r--sql/ha_ndbcluster.cc11
33 files changed, 640 insertions, 126 deletions
diff --git a/BUILD/compile-ndb-autotest b/BUILD/compile-ndb-autotest
new file mode 100755
index 00000000000..be28cc28346
--- /dev/null
+++ b/BUILD/compile-ndb-autotest
@@ -0,0 +1,19 @@
+#! /bin/sh
+
+path=`dirname $0`
+. "$path/SETUP.sh"
+
+extra_configs="$max_configs --with-ndb-test --with-ndb-ccflags='-DERROR_INSERT'"
+if [ "$full_debug" ]
+then
+ extra_flags="$debug_cflags"
+ c_warnings="$c_warnings $debug_extra_warnings"
+ cxx_warnings="$cxx_warnings $debug_extra_warnings"
+ extra_configs="$debug_configs $extra_configs"
+else
+ extra_flags="$fast_cflags"
+fi
+
+extra_flags="$extra_flags $max_cflags -g"
+
+. "$path/FINISH.sh"
diff --git a/ndb/include/kernel/signaldata/CreateIndx.hpp b/ndb/include/kernel/signaldata/CreateIndx.hpp
index a9dc653f349..4163583dbd2 100644
--- a/ndb/include/kernel/signaldata/CreateIndx.hpp
+++ b/ndb/include/kernel/signaldata/CreateIndx.hpp
@@ -192,6 +192,7 @@ public:
enum ErrorCode {
NoError = 0,
Busy = 701,
+ BusyWithNR = 711,
NotMaster = 702,
TriggerNotFound = 4238,
TriggerExists = 4239,
diff --git a/ndb/include/kernel/signaldata/DropIndx.hpp b/ndb/include/kernel/signaldata/DropIndx.hpp
index fd2ea7f0b7b..41ee50082f7 100644
--- a/ndb/include/kernel/signaldata/DropIndx.hpp
+++ b/ndb/include/kernel/signaldata/DropIndx.hpp
@@ -168,6 +168,7 @@ public:
NoError = 0,
InvalidIndexVersion = 241,
Busy = 701,
+ BusyWithNR = 711,
NotMaster = 702,
IndexNotFound = 4243,
BadRequestType = 4247,
diff --git a/ndb/include/ndbapi/NdbOperation.hpp b/ndb/include/ndbapi/NdbOperation.hpp
index 4db541f7fe4..dbc343d2238 100644
--- a/ndb/include/ndbapi/NdbOperation.hpp
+++ b/ndb/include/ndbapi/NdbOperation.hpp
@@ -477,7 +477,7 @@ public:
/**
* Interpreted program instruction:
- * Substract RegSource1 from RegSource2 and put the result in RegDest.
+ * Substract RegSource2 from RegSource1 and put the result in RegDest.
*
* @param RegSource1 First register.
* @param RegSource2 Second register.
diff --git a/ndb/src/kernel/blocks/ERROR_codes.txt b/ndb/src/kernel/blocks/ERROR_codes.txt
index 7fee2e92f2b..c8c9e82efc2 100644
--- a/ndb/src/kernel/blocks/ERROR_codes.txt
+++ b/ndb/src/kernel/blocks/ERROR_codes.txt
@@ -6,7 +6,7 @@ Next DBTUP 4014
Next DBLQH 5043
Next DBDICT 6007
Next DBDIH 7177
-Next DBTC 8037
+Next DBTC 8038
Next CMVMI 9000
Next BACKUP 10022
Next DBUTIL 11002
@@ -283,6 +283,7 @@ ABORT OF TCKEYREQ
8032: No free TC records any more
+8037 : Invalid schema version in TCINDXREQ
CMVMI
-----
diff --git a/ndb/src/kernel/blocks/backup/Backup.cpp b/ndb/src/kernel/blocks/backup/Backup.cpp
index 43c1de5e2b3..10318e5f52d 100644
--- a/ndb/src/kernel/blocks/backup/Backup.cpp
+++ b/ndb/src/kernel/blocks/backup/Backup.cpp
@@ -274,36 +274,48 @@ Backup::execCONTINUEB(Signal* signal)
BackupRecordPtr ptr;
c_backupPool.getPtr(ptr, ptr_I);
- TablePtr tabPtr;
- ptr.p->tables.getPtr(tabPtr, tabPtr_I);
- FragmentPtr fragPtr;
- tabPtr.p->fragments.getPtr(fragPtr, fragPtr_I);
- BackupFilePtr filePtr;
- ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr);
-
- const Uint32 sz = sizeof(BackupFormat::CtlFile::FragmentInfo) >> 2;
- Uint32 * dst;
- if (!filePtr.p->operation.dataBuffer.getWritePtr(&dst, sz))
+ if (tabPtr_I == RNIL)
{
- sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 100, 4);
+ closeFiles(signal, ptr);
return;
}
+ jam();
+ TablePtr tabPtr;
+ ptr.p->tables.getPtr(tabPtr, tabPtr_I);
+ jam();
+ if(tabPtr.p->fragments.getSize())
+ {
+ FragmentPtr fragPtr;
+ tabPtr.p->fragments.getPtr(fragPtr, fragPtr_I);
- BackupFormat::CtlFile::FragmentInfo * fragInfo =
- (BackupFormat::CtlFile::FragmentInfo*)dst;
- fragInfo->SectionType = htonl(BackupFormat::FRAGMENT_INFO);
- fragInfo->SectionLength = htonl(sz);
- fragInfo->TableId = htonl(fragPtr.p->tableId);
- fragInfo->FragmentNo = htonl(fragPtr_I);
- fragInfo->NoOfRecordsLow = htonl(fragPtr.p->noOfRecords & 0xFFFFFFFF);
- fragInfo->NoOfRecordsHigh = htonl(fragPtr.p->noOfRecords >> 32);
- fragInfo->FilePosLow = htonl(0 & 0xFFFFFFFF);
- fragInfo->FilePosHigh = htonl(0 >> 32);
+ BackupFilePtr filePtr;
+ ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr);
- filePtr.p->operation.dataBuffer.updateWritePtr(sz);
+ const Uint32 sz = sizeof(BackupFormat::CtlFile::FragmentInfo) >> 2;
+ Uint32 * dst;
+ if (!filePtr.p->operation.dataBuffer.getWritePtr(&dst, sz))
+ {
+ sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 100, 4);
+ return;
+ }
+
+ BackupFormat::CtlFile::FragmentInfo * fragInfo =
+ (BackupFormat::CtlFile::FragmentInfo*)dst;
+ fragInfo->SectionType = htonl(BackupFormat::FRAGMENT_INFO);
+ fragInfo->SectionLength = htonl(sz);
+ fragInfo->TableId = htonl(fragPtr.p->tableId);
+ fragInfo->FragmentNo = htonl(fragPtr_I);
+ fragInfo->NoOfRecordsLow = htonl(fragPtr.p->noOfRecords & 0xFFFFFFFF);
+ fragInfo->NoOfRecordsHigh = htonl(fragPtr.p->noOfRecords >> 32);
+ fragInfo->FilePosLow = htonl(0 & 0xFFFFFFFF);
+ fragInfo->FilePosHigh = htonl(0 >> 32);
+
+ filePtr.p->operation.dataBuffer.updateWritePtr(sz);
+
+ fragPtr_I++;
+ }
- fragPtr_I++;
if (fragPtr_I == tabPtr.p->fragments.getSize())
{
signal->theData[0] = tabPtr.p->tableId;
@@ -4243,6 +4255,12 @@ Backup::execSTOP_BACKUP_REQ(Signal* signal)
TablePtr tabPtr;
ptr.p->tables.first(tabPtr);
+ if (tabPtr.i == RNIL)
+ {
+ closeFiles(signal, ptr);
+ return;
+ }
+
signal->theData[0] = BackupContinueB::BACKUP_FRAGMENT_INFO;
signal->theData[1] = ptr.i;
signal->theData[2] = tabPtr.i;
diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
index efd519339f7..a79ddd05fae 100644
--- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
+++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
@@ -6520,9 +6520,18 @@ Dbdict::execCREATE_INDX_REQ(Signal* signal)
}
if (signal->getLength() == CreateIndxReq::SignalLength) {
jam();
+ CreateIndxRef::ErrorCode tmperr = CreateIndxRef::NoError;
if (getOwnNodeId() != c_masterNodeId) {
jam();
-
+ tmperr = CreateIndxRef::NotMaster;
+ } else if (c_blockState == BS_NODE_RESTART) {
+ jam();
+ tmperr = CreateIndxRef::BusyWithNR;
+ } else if (c_blockState != BS_IDLE) {
+ jam();
+ tmperr = CreateIndxRef::Busy;
+ }
+ if (tmperr != CreateIndxRef::NoError) {
releaseSections(signal);
OpCreateIndex opBusy;
opPtr.p = &opBusy;
@@ -6530,13 +6539,12 @@ Dbdict::execCREATE_INDX_REQ(Signal* signal)
opPtr.p->m_isMaster = (senderRef == reference());
opPtr.p->key = 0;
opPtr.p->m_requestType = CreateIndxReq::RT_DICT_PREPARE;
- opPtr.p->m_errorCode = CreateIndxRef::NotMaster;
+ opPtr.p->m_errorCode = tmperr;
opPtr.p->m_errorLine = __LINE__;
opPtr.p->m_errorNode = c_masterNodeId;
createIndex_sendReply(signal, opPtr, true);
return;
}
-
// forward initial request plus operation key to all
req->setOpKey(++c_opRecordSequence);
NodeReceiverGroup rg(DBDICT, c_aliveNodes);
@@ -7082,10 +7090,19 @@ Dbdict::execDROP_INDX_REQ(Signal* signal)
jam();
if (signal->getLength() == DropIndxReq::SignalLength) {
jam();
+ DropIndxRef::ErrorCode tmperr = DropIndxRef::NoError;
if (getOwnNodeId() != c_masterNodeId) {
jam();
-
- err = DropIndxRef::NotMaster;
+ tmperr = DropIndxRef::NotMaster;
+ } else if (c_blockState == BS_NODE_RESTART) {
+ jam();
+ tmperr = DropIndxRef::BusyWithNR;
+ } else if (c_blockState != BS_IDLE) {
+ jam();
+ tmperr = DropIndxRef::Busy;
+ }
+ if (tmperr != DropIndxRef::NoError) {
+ err = tmperr;
goto error;
}
// forward initial request plus operation key to all
@@ -10130,6 +10147,17 @@ Dbdict::execDICT_LOCK_REQ(Signal* signal)
sendDictLockInfoEvent(lockPtr, "lock request by node");
}
+// only table and index ops are checked
+bool
+Dbdict::hasDictLockSchemaOp()
+{
+ return
+ ! c_opCreateTable.isEmpty() ||
+ ! c_opDropTable.isEmpty() ||
+ ! c_opCreateIndex.isEmpty() ||
+ ! c_opDropIndex.isEmpty();
+}
+
void
Dbdict::checkDictLockQueue(Signal* signal, bool poll)
{
@@ -10150,7 +10178,7 @@ Dbdict::checkDictLockQueue(Signal* signal, bool poll)
break;
}
- if (c_opRecordPool.getNoOfFree() != c_opRecordPool.getSize()) {
+ if (hasDictLockSchemaOp()) {
jam();
break;
}
@@ -10183,7 +10211,7 @@ Dbdict::execDICT_UNLOCK_ORD(Signal* signal)
if (lockPtr.p->locked) {
jam();
ndbrequire(c_blockState == lockPtr.p->lt->blockState);
- ndbrequire(c_opRecordPool.getNoOfFree() == c_opRecordPool.getSize());
+ ndbrequire(! hasDictLockSchemaOp());
ndbrequire(! c_dictLockQueue.hasPrev(lockPtr));
c_blockState = BS_IDLE;
@@ -10279,7 +10307,7 @@ Dbdict::removeStaleDictLocks(Signal* signal, const Uint32* theFailedNodes)
if (lockPtr.p->locked) {
jam();
ndbrequire(c_blockState == lockPtr.p->lt->blockState);
- ndbrequire(c_opRecordPool.getNoOfFree() == c_opRecordPool.getSize());
+ ndbrequire(! hasDictLockSchemaOp());
ndbrequire(! c_dictLockQueue.hasPrev(lockPtr));
c_blockState = BS_IDLE;
diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
index ed8b7e3b822..82644826d5b 100644
--- a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
+++ b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
@@ -1650,6 +1650,9 @@ private:
void sendDictLockInfoEvent(Uint32 pollCount);
void sendDictLockInfoEvent(DictLockPtr lockPtr, const char* text);
+ // check if any schema op exists (conflicting with dict lock)
+ bool hasDictLockSchemaOp();
+
void checkDictLockQueue(Signal* signal, bool poll);
void sendDictLockConf(Signal* signal, DictLockPtr lockPtr);
void sendDictLockRef(Signal* signal, DictLockReq req, Uint32 errorCode);
diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
index 491aa0849b9..1c1fdb41d51 100644
--- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
+++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
@@ -8252,11 +8252,21 @@ void Dbdih::openingTableErrorLab(Signal* signal, FileRecordPtr filePtr)
/* WE FAILED IN OPENING A FILE. IF THE FIRST FILE THEN TRY WITH THE */
/* DUPLICATE FILE, OTHERWISE WE REPORT AN ERROR IN THE SYSTEM RESTART. */
/* ---------------------------------------------------------------------- */
- ndbrequire(filePtr.i == tabPtr.p->tabFile[0]);
- filePtr.i = tabPtr.p->tabFile[1];
- ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
- openFileRw(signal, filePtr);
- filePtr.p->reqStatus = FileRecord::OPENING_TABLE;
+ if (filePtr.i == tabPtr.p->tabFile[0])
+ {
+ filePtr.i = tabPtr.p->tabFile[1];
+ ptrCheckGuard(filePtr, cfileFileSize, fileRecord);
+ openFileRw(signal, filePtr);
+ filePtr.p->reqStatus = FileRecord::OPENING_TABLE;
+ }
+ else
+ {
+ char buf[256];
+ BaseString::snprintf(buf, sizeof(buf),
+ "Error opening DIH schema files for table: %d",
+ tabPtr.i);
+ progError(__LINE__, NDBD_EXIT_AFS_NO_SUCH_FILE, buf);
+ }
}//Dbdih::openingTableErrorLab()
void Dbdih::readingTableLab(Signal* signal, FileRecordPtr filePtr)
@@ -8422,6 +8432,7 @@ Dbdih::resetReplicaSr(TabRecordPtr tabPtr){
}
replicaPtr.i = nextReplicaPtrI;
}//while
+ updateNodeInfo(fragPtr);
}
}
diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
index 0ea49e47fc7..b5cfd4aae6d 100644
--- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -6470,6 +6470,7 @@ void Dblqh::execACC_ABORTCONF(Signal* signal)
* A NORMAL EVENT DURING CREATION OF A FRAGMENT. WE NOW NEED TO CONTINUE
* WITH NORMAL COMMIT PROCESSING.
* ---------------------------------------------------------------------- */
+ regTcPtr->totSendlenAi = regTcPtr->totReclenAi;
if (regTcPtr->currTupAiLen == regTcPtr->totReclenAi) {
jam();
regTcPtr->abortState = TcConnectionrec::ABORT_IDLE;
@@ -12579,19 +12580,17 @@ void Dblqh::lastWriteInFileLab(Signal* signal)
void Dblqh::writePageZeroLab(Signal* signal)
{
- if (false && logPartPtr.p->logPartState == LogPartRecord::FILE_CHANGE_PROBLEM)
+ if (logPartPtr.p->logPartState == LogPartRecord::FILE_CHANGE_PROBLEM)
{
if (logPartPtr.p->firstLogQueue == RNIL)
{
jam();
logPartPtr.p->logPartState = LogPartRecord::IDLE;
- ndbout_c("resetting logPartState to IDLE");
}
else
{
jam();
logPartPtr.p->logPartState = LogPartRecord::ACTIVE;
- ndbout_c("resetting logPartState to ACTIVE");
}
}
diff --git a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
index ac7fca9cf93..bf6ce7129ba 100644
--- a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
+++ b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
@@ -720,7 +720,7 @@ public:
// Index data
- bool isIndexOp; // Used to mark on-going TcKeyReq as indx table access
+ Uint8 isIndexOp; // Used to mark on-going TcKeyReq as indx table access
bool indexOpReturn;
UintR noIndexOp; // No outstanding index ops
@@ -808,7 +808,7 @@ public:
UintR savedState[LqhKeyConf::SignalLength];
// Index data
- bool isIndexOp; // Used to mark on-going TcKeyReq as index table access
+ Uint8 isIndexOp; // Used to mark on-going TcKeyReq as index table access
UintR indexOp;
UintR currentIndexId;
UintR attrInfoLen;
diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
index 71f3aff05d4..dda743616f4 100644
--- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
+++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
@@ -1775,8 +1775,7 @@ void Dbtc::execKEYINFO(Signal* signal)
apiConnectptr.i = signal->theData[0];
tmaxData = 20;
if (apiConnectptr.i >= capiConnectFilesize) {
- jam();
- warningHandlerLab(signal, __LINE__);
+ TCKEY_abort(signal, 18);
return;
}//if
ptrAss(apiConnectptr, apiConnectRecord);
@@ -1785,9 +1784,7 @@ void Dbtc::execKEYINFO(Signal* signal)
compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[2];
compare_transid1 = compare_transid1 | compare_transid2;
if (compare_transid1 != 0) {
- jam();
- printState(signal, 10);
- sendSignalErrorRefuseLab(signal);
+ TCKEY_abort(signal, 19);
return;
}//if
switch (apiConnectptr.p->apiConnectstate) {
@@ -2531,7 +2528,7 @@ void Dbtc::execTCKEYREQ(Signal* signal)
Uint32 TstartFlag = tcKeyReq->getStartFlag(Treqinfo);
Uint32 TexecFlag = TcKeyReq::getExecuteFlag(Treqinfo);
- bool isIndexOp = regApiPtr->isIndexOp;
+ Uint8 isIndexOp = regApiPtr->isIndexOp;
bool isIndexOpReturn = regApiPtr->indexOpReturn;
regApiPtr->isIndexOp = false; // Reset marker
regApiPtr->m_exec_flag |= TexecFlag;
@@ -3277,7 +3274,7 @@ void Dbtc::sendlqhkeyreq(Signal* signal,
sig1 = regCachePtr->fragmentid + (regTcPtr->tcNodedata[1] << 16);
sig2 = regApiPtr->transid[0];
sig3 = regApiPtr->transid[1];
- sig4 = regApiPtr->ndbapiBlockref;
+ sig4 = (regTcPtr->isIndexOp == 2) ? reference() : regApiPtr->ndbapiBlockref;
sig5 = regTcPtr->clientData;
sig6 = regCachePtr->scanInfo;
@@ -8619,6 +8616,7 @@ void Dbtc::execSCAN_TABREQ(Signal* signal)
// left over from simple/dirty read
} else {
jam();
+ jamLine(transP->apiConnectstate);
errCode = ZSTATE_ERROR;
goto SCAN_TAB_error_no_state_change;
}
@@ -12036,14 +12034,18 @@ void Dbtc::readIndexTable(Signal* signal,
opType == ZREAD ? ZREAD : ZREAD_EX);
TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, 1); // Allways send one AttrInfo
TcKeyReq::setExecutingTrigger(tcKeyRequestInfo, 0);
- BlockReference originalReceiver = regApiPtr->ndbapiBlockref;
- regApiPtr->ndbapiBlockref = reference(); // Send result to me
tcKeyReq->senderData = indexOp->indexOpId;
indexOp->indexOpState = IOS_INDEX_ACCESS;
regApiPtr->executingIndexOp = regApiPtr->accumulatingIndexOp;
regApiPtr->accumulatingIndexOp = RNIL;
- regApiPtr->isIndexOp = true;
+ regApiPtr->isIndexOp = 2;
+ if (ERROR_INSERTED(8037))
+ {
+ ndbout_c("shifting index version");
+ tcKeyReq->tableSchemaVersion = ~(Uint32)indexOp->tcIndxReq.tableSchemaVersion;
+ }
+
Uint32 remainingKey = indexOp->keyInfo.getSize();
bool moreKeyData = indexOp->keyInfo.first(keyIter);
// *********** KEYINFO in TCKEYREQ ***********
@@ -12062,21 +12064,13 @@ void Dbtc::readIndexTable(Signal* signal,
ndbassert(TcKeyReq::getDirtyFlag(tcKeyRequestInfo) == 0);
ndbassert(TcKeyReq::getSimpleFlag(tcKeyRequestInfo) == 0);
EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength);
-
- /**
- * "Fool" TC not to start commiting transaction since it always will
- * have one outstanding lqhkeyreq
- * This is later decreased when the index read is complete
- */
- regApiPtr->lqhkeyreqrec++;
+ jamEntry();
- /**
- * Remember ptr to index read operation
- * (used to set correct save point id on index operation later)
- */
- indexOp->indexReadTcConnect = regApiPtr->lastTcConnect;
+ if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING))
+ {
+ goto err;
+ }
- jamEntry();
// *********** KEYINFO ***********
if (moreKeyData) {
jam();
@@ -12096,6 +12090,10 @@ void Dbtc::readIndexTable(Signal* signal,
EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
KeyInfo::HeaderLength + KeyInfo::DataLength);
jamEntry();
+ if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING))
+ {
+ goto err;
+ }
dataPos = 0;
dataPtr = (Uint32 *) &keyInfo->keyData;
}
@@ -12106,10 +12104,32 @@ void Dbtc::readIndexTable(Signal* signal,
EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
KeyInfo::HeaderLength + dataPos);
jamEntry();
+ if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING))
+ {
+ goto err;
+ }
}
}
- regApiPtr->ndbapiBlockref = originalReceiver; // reset original receiver
+ /**
+ * "Fool" TC not to start commiting transaction since it always will
+ * have one outstanding lqhkeyreq
+ * This is later decreased when the index read is complete
+ */
+ regApiPtr->lqhkeyreqrec++;
+
+ /**
+ * Remember ptr to index read operation
+ * (used to set correct save point id on index operation later)
+ */
+ indexOp->indexReadTcConnect = regApiPtr->lastTcConnect;
+
+done:
+ return;
+
+err:
+ jam();
+ goto done;
}
/**
@@ -12160,7 +12180,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
tcKeyReq->transId2 = regApiPtr->transid[1];
tcKeyReq->senderData = tcIndxReq->senderData; // Needed for TRANSID_AI to API
indexOp->indexOpState = IOS_INDEX_OPERATION;
- regApiPtr->isIndexOp = true;
+ regApiPtr->isIndexOp = 1;
regApiPtr->executingIndexOp = indexOp->indexOpId;;
regApiPtr->noIndexOp++; // Increase count
@@ -12233,9 +12253,16 @@ void Dbtc::executeIndexOperation(Signal* signal,
const Uint32 currSavePointId = regApiPtr->currSavePointId;
regApiPtr->currSavePointId = tmp.p->savePointId;
EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength);
+ jamEntry();
+
+ if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING))
+ {
+ jam();
+ return;
+ }
+
regApiPtr->currSavePointId = currSavePointId;
- jamEntry();
// *********** KEYINFO ***********
if (moreKeyData) {
jam();
@@ -12256,6 +12283,13 @@ void Dbtc::executeIndexOperation(Signal* signal,
EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
KeyInfo::HeaderLength + KeyInfo::DataLength);
jamEntry();
+
+ if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING))
+ {
+ jam();
+ return;
+ }
+
dataPos = 0;
dataPtr = (Uint32 *) &keyInfo->keyData;
}
@@ -12266,6 +12300,12 @@ void Dbtc::executeIndexOperation(Signal* signal,
EXECUTE_DIRECT(DBTC, GSN_KEYINFO, signal,
KeyInfo::HeaderLength + dataPos);
jamEntry();
+
+ if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING))
+ {
+ jam();
+ return;
+ }
}
}
@@ -12295,6 +12335,13 @@ void Dbtc::executeIndexOperation(Signal* signal,
EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal,
AttrInfo::HeaderLength + AttrInfo::DataLength);
jamEntry();
+
+ if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING))
+ {
+ jam();
+ return;
+ }
+
attrInfoPos = 0;
dataPtr = (Uint32 *) &attrInfo->attrData;
}
@@ -12694,9 +12741,16 @@ void Dbtc::insertIntoIndexTable(Signal* signal,
const Uint32 currSavePointId = regApiPtr->currSavePointId;
regApiPtr->currSavePointId = opRecord->savePointId;
EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength);
+ jamEntry();
+
+ if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING))
+ {
+ jam();
+ return;
+ }
+
regApiPtr->currSavePointId = currSavePointId;
tcConnectptr.p->currentIndexId = indexData->indexId;
- jamEntry();
// *********** KEYINFO ***********
if (moreKeyData) {
@@ -12726,6 +12780,12 @@ void Dbtc::insertIntoIndexTable(Signal* signal,
KeyInfo::HeaderLength + KeyInfo::DataLength);
jamEntry();
#endif
+ if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING))
+ {
+ jam();
+ return;
+ }
+
dataPtr = (Uint32 *) &keyInfo->keyData;
dataPos = 0;
}
@@ -12761,6 +12821,13 @@ void Dbtc::insertIntoIndexTable(Signal* signal,
KeyInfo::HeaderLength + KeyInfo::DataLength);
jamEntry();
#endif
+
+ if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING))
+ {
+ jam();
+ return;
+ }
+
dataPtr = (Uint32 *) &keyInfo->keyData;
dataPos = 0;
}
@@ -12778,6 +12845,11 @@ void Dbtc::insertIntoIndexTable(Signal* signal,
KeyInfo::HeaderLength + dataPos);
jamEntry();
#endif
+ if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING))
+ {
+ jam();
+ return;
+ }
}
}
@@ -12813,6 +12885,12 @@ void Dbtc::insertIntoIndexTable(Signal* signal,
AttrInfo::HeaderLength + AttrInfo::DataLength);
jamEntry();
#endif
+ if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING))
+ {
+ jam();
+ return;
+ }
+
dataPtr = (Uint32 *) &attrInfo->attrData;
attrInfoPos = 0;
}
@@ -12849,6 +12927,12 @@ void Dbtc::insertIntoIndexTable(Signal* signal,
AttrInfo::HeaderLength + AttrInfo::DataLength);
jamEntry();
#endif
+ if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING))
+ {
+ jam();
+ return;
+ }
+
dataPtr = (Uint32 *) &attrInfo->attrData;
attrInfoPos = 0;
}
@@ -12994,9 +13078,16 @@ void Dbtc::deleteFromIndexTable(Signal* signal,
const Uint32 currSavePointId = regApiPtr->currSavePointId;
regApiPtr->currSavePointId = opRecord->savePointId;
EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength);
+ jamEntry();
+
+ if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING))
+ {
+ jam();
+ return;
+ }
+
regApiPtr->currSavePointId = currSavePointId;
tcConnectptr.p->currentIndexId = indexData->indexId;
- jamEntry();
// *********** KEYINFO ***********
if (moreKeyData) {
@@ -13027,6 +13118,12 @@ void Dbtc::deleteFromIndexTable(Signal* signal,
KeyInfo::HeaderLength + KeyInfo::DataLength);
jamEntry();
#endif
+ if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING))
+ {
+ jam();
+ return;
+ }
+
dataPtr = (Uint32 *) &keyInfo->keyData;
dataPos = 0;
}
@@ -13063,6 +13160,12 @@ void Dbtc::deleteFromIndexTable(Signal* signal,
KeyInfo::HeaderLength + KeyInfo::DataLength);
jamEntry();
#endif
+ if (unlikely(regApiPtr->apiConnectstate == CS_ABORTING))
+ {
+ jam();
+ return;
+ }
+
dataPtr = (Uint32 *) &keyInfo->keyData;
dataPos = 0;
}
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
index f83f21f14d8..13c0bad9c7a 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
@@ -1113,14 +1113,16 @@ Dbtup::updateStartLab(Signal* signal,
regOperPtr->pageOffset,
&cinBuffer[0],
regOperPtr->attrinbufLen);
- if (retValue == -1) {
- tupkeyErrorLab(signal);
- return -1;
- }//if
} else {
jam();
retValue = interpreterStartLab(signal, pagePtr, regOperPtr->pageOffset);
}//if
+
+ if (retValue == -1) {
+ tupkeyErrorLab(signal);
+ return -1;
+ }//if
+
ndbrequire(regOperPtr->tupVersion != ZNIL);
pagePtr->pageWord[regOperPtr->pageOffset + 1] = regOperPtr->tupVersion;
if (regTabPtr->checksumIndicator) {
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp b/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp
index 9722aa437c0..8a18fddae19 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupPagMan.cpp
@@ -184,24 +184,28 @@ void Dbtup::allocConsPages(Uint32 noOfPagesToAllocate,
/* PROPER AMOUNT OF PAGES WERE NOT FOUND. FIND AS MUCH AS */
/* POSSIBLE. */
/* ---------------------------------------------------------------- */
- for (Uint32 j = firstListToCheck; (Uint32)~j; j--) {
+ if (firstListToCheck)
+ {
ljam();
- if (cfreepageList[j] != RNIL) {
+ for (Uint32 j = firstListToCheck - 1; (Uint32)~j; j--) {
ljam();
+ if (cfreepageList[j] != RNIL) {
+ ljam();
/* ---------------------------------------------------------------- */
/* SOME AREA WAS FOUND, ALLOCATE ALL OF IT. */
/* ---------------------------------------------------------------- */
- allocPageRef = cfreepageList[j];
- removeCommonArea(allocPageRef, j);
- noOfPagesAllocated = 1 << j;
- findFreeLeftNeighbours(allocPageRef, noOfPagesAllocated,
- noOfPagesToAllocate);
- findFreeRightNeighbours(allocPageRef, noOfPagesAllocated,
- noOfPagesToAllocate);
-
- return;
- }//if
- }//for
+ allocPageRef = cfreepageList[j];
+ removeCommonArea(allocPageRef, j);
+ noOfPagesAllocated = 1 << j;
+ findFreeLeftNeighbours(allocPageRef, noOfPagesAllocated,
+ noOfPagesToAllocate);
+ findFreeRightNeighbours(allocPageRef, noOfPagesAllocated,
+ noOfPagesToAllocate);
+
+ return;
+ }//if
+ }//for
+ }
/* ---------------------------------------------------------------- */
/* NO FREE AREA AT ALL EXISTED. RETURN ZERO PAGES */
/* ---------------------------------------------------------------- */
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp b/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
index acdb73704cb..0bb7c8a1e41 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
@@ -397,12 +397,12 @@ void Dbtup::allocMoreFragPages(Fragrecord* const regFragPtr)
Uint32 noAllocPages = regFragPtr->noOfPagesToGrow >> 3; // 12.5%
noAllocPages += regFragPtr->noOfPagesToGrow >> 4; // 6.25%
noAllocPages += 2;
- regFragPtr->noOfPagesToGrow += noAllocPages;
/* -----------------------------------------------------------------*/
// We will grow by 18.75% plus two more additional pages to grow
// a little bit quicker in the beginning.
/* -----------------------------------------------------------------*/
- allocFragPages(regFragPtr, noAllocPages);
+ Uint32 allocated = allocFragPages(regFragPtr, noAllocPages);
+ regFragPtr->noOfPagesToGrow += allocated;
}//Dbtup::allocMoreFragPages()
Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr currPageRangePtr)
diff --git a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
index e6bb4d4f14f..fe6caf04d8c 100644
--- a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
+++ b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
@@ -181,10 +181,9 @@ void Ndbcntr::execSYSTEM_ERROR(Signal* signal)
case SystemError::CopyFragRefError:
BaseString::snprintf(buf, sizeof(buf),
- "Node %d killed this node because "
- "it could not copy a fragment during node restart. "
- "Copy fragment error code: %u.",
- killingNode, data1);
+ "Killed by node %d as "
+ "copyfrag failed, error: %u",
+ killingNode, data1);
break;
default:
@@ -2043,6 +2042,11 @@ void Ndbcntr::execSET_VAR_REQ(Signal* signal) {
void Ndbcntr::updateNodeState(Signal* signal, const NodeState& newState) const{
NodeStateRep * const stateRep = (NodeStateRep *)&signal->theData[0];
+ if (newState.startLevel == NodeState::SL_STARTED)
+ {
+ CRASH_INSERTION(1000);
+ }
+
stateRep->nodeState = newState;
stateRep->nodeState.masterNodeId = cmasterNodeId;
stateRep->nodeState.setNodeGroup(c_nodeGroup);
@@ -2843,7 +2847,7 @@ void Ndbcntr::Missra::sendNextSTTOR(Signal* signal){
cntr.sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
}
}
-
+
signal->theData[0] = NDB_LE_NDBStartCompleted;
signal->theData[1] = NDB_VERSION;
cntr.sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
diff --git a/ndb/src/kernel/vm/DLHashTable2.hpp b/ndb/src/kernel/vm/DLHashTable2.hpp
index 6b166331631..1018b053e2a 100644
--- a/ndb/src/kernel/vm/DLHashTable2.hpp
+++ b/ndb/src/kernel/vm/DLHashTable2.hpp
@@ -147,6 +147,8 @@ public:
* @param iter - An "uninitialized" iterator
*/
bool next(Uint32 bucket, Iterator & iter) const;
+
+ inline bool isEmpty() const { Iterator iter; return ! first(iter); }
private:
Uint32 mask;
diff --git a/ndb/src/mgmapi/mgmapi.cpp b/ndb/src/mgmapi/mgmapi.cpp
index 4428b158b6b..9bf19dda3a4 100644
--- a/ndb/src/mgmapi/mgmapi.cpp
+++ b/ndb/src/mgmapi/mgmapi.cpp
@@ -1389,7 +1389,7 @@ ndb_mgm_listen_event_internal(NdbMgmHandle handle, const int filter[],
MGM_END()
};
CHECK_HANDLE(handle, -1);
-
+
const char *hostname= ndb_mgm_get_connected_host(handle);
int port= ndb_mgm_get_connected_port(handle);
SocketClient s(hostname, port);
@@ -1411,19 +1411,20 @@ ndb_mgm_listen_event_internal(NdbMgmHandle handle, const int filter[],
}
args.put("filter", tmp.c_str());
}
-
+
int tmp = handle->socket;
handle->socket = sockfd;
-
+
const Properties *reply;
reply = ndb_mgm_call(handle, stat_reply, "listen event", &args);
-
+
handle->socket = tmp;
-
+
if(reply == NULL) {
close(sockfd);
CHECK_REPLY(reply, -1);
}
+ delete reply;
return sockfd;
}
diff --git a/ndb/src/mgmapi/ndb_logevent.cpp b/ndb/src/mgmapi/ndb_logevent.cpp
index a90d5658506..2472a434590 100644
--- a/ndb/src/mgmapi/ndb_logevent.cpp
+++ b/ndb/src/mgmapi/ndb_logevent.cpp
@@ -68,6 +68,13 @@ ndb_mgm_create_logevent_handle(NdbMgmHandle mh,
}
extern "C"
+int
+ndb_logevent_get_fd(const NdbLogEventHandle h)
+{
+ return h->socket;
+}
+
+extern "C"
void ndb_mgm_destroy_logevent_handle(NdbLogEventHandle * h)
{
if( !h )
diff --git a/ndb/src/mgmclient/CommandInterpreter.cpp b/ndb/src/mgmclient/CommandInterpreter.cpp
index 58b98671b14..ba68f6e4f0a 100644
--- a/ndb/src/mgmclient/CommandInterpreter.cpp
+++ b/ndb/src/mgmclient/CommandInterpreter.cpp
@@ -173,8 +173,15 @@ private:
bool rep_connected;
#endif
struct NdbThread* m_event_thread;
+ NdbMutex *m_print_mutex;
};
+struct event_thread_param {
+ NdbMgmHandle *m;
+ NdbMutex **p;
+};
+
+NdbMutex* print_mutex;
/*
* Facade object for CommandInterpreter
@@ -395,6 +402,7 @@ CommandInterpreter::CommandInterpreter(const char *_host,int verbose)
m_connected= false;
m_event_thread= 0;
try_reconnect = 0;
+ m_print_mutex= NdbMutex_Create();
#ifdef HAVE_GLOBAL_REPLICATION
rep_host = NULL;
m_repserver = NULL;
@@ -408,6 +416,7 @@ CommandInterpreter::CommandInterpreter(const char *_host,int verbose)
CommandInterpreter::~CommandInterpreter()
{
disconnect();
+ NdbMutex_Destroy(m_print_mutex);
}
static bool
@@ -444,11 +453,13 @@ CommandInterpreter::printError()
static int do_event_thread;
static void*
-event_thread_run(void* m)
+event_thread_run(void* p)
{
DBUG_ENTER("event_thread_run");
- NdbMgmHandle handle= *(NdbMgmHandle*)m;
+ struct event_thread_param param= *(struct event_thread_param*)p;
+ NdbMgmHandle handle= *(param.m);
+ NdbMutex* printmutex= *(param.p);
int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP,
1, NDB_MGM_EVENT_CATEGORY_STARTUP,
@@ -466,7 +477,11 @@ event_thread_run(void* m)
{
const char ping_token[]= "<PING>";
if (memcmp(ping_token,tmp,sizeof(ping_token)-1))
- ndbout << tmp;
+ if(tmp && strlen(tmp))
+ {
+ Guard g(printmutex);
+ ndbout << tmp;
+ }
}
} while(do_event_thread);
NDB_CLOSE_SOCKET(fd);
@@ -519,8 +534,11 @@ CommandInterpreter::connect()
assert(m_event_thread == 0);
assert(do_event_thread == 0);
do_event_thread= 0;
+ struct event_thread_param p;
+ p.m= &m_mgmsrv2;
+ p.p= &m_print_mutex;
m_event_thread = NdbThread_Create(event_thread_run,
- (void**)&m_mgmsrv2,
+ (void**)&p,
32768,
"CommandInterpreted_event_thread",
NDB_THREAD_PRIO_LOW);
@@ -607,6 +625,7 @@ CommandInterpreter::execute(const char *_line, int _try_reconnect,
int result= execute_impl(_line);
if (error)
*error= m_error;
+
return result;
}
@@ -686,6 +705,7 @@ CommandInterpreter::execute_impl(const char *_line)
DBUG_RETURN(true);
if (strcasecmp(firstToken, "SHOW") == 0) {
+ Guard g(m_print_mutex);
executeShow(allAfterFirstToken);
DBUG_RETURN(true);
}
@@ -920,6 +940,7 @@ CommandInterpreter::executeForAll(const char * cmd, ExecuteFunction fun,
ndbout_c("Trying to start all nodes of system.");
ndbout_c("Use ALL STATUS to see the system start-up phases.");
} else {
+ Guard g(m_print_mutex);
struct ndb_mgm_cluster_state *cl= ndb_mgm_get_status(m_mgmsrv);
if(cl == 0){
ndbout_c("Unable get status from management server");
@@ -1224,6 +1245,7 @@ CommandInterpreter::executeShow(char* parameters)
if(it == 0){
ndbout_c("Unable to create config iterator");
+ ndb_mgm_destroy_configuration(conf);
return;
}
NdbAutoPtr<ndb_mgm_configuration_iterator> ptr(it);
@@ -1270,6 +1292,7 @@ CommandInterpreter::executeShow(char* parameters)
print_nodes(state, it, "ndb_mgmd", mgm_nodes, NDB_MGM_NODE_TYPE_MGM, 0);
print_nodes(state, it, "mysqld", api_nodes, NDB_MGM_NODE_TYPE_API, 0);
// ndbout << helpTextShow;
+ ndb_mgm_destroy_configuration(conf);
return;
} else if (strcasecmp(parameters, "PROPERTIES") == 0 ||
strcasecmp(parameters, "PROP") == 0) {
diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp
index 69c0286a1de..5fabb84adb7 100644
--- a/ndb/src/mgmsrv/MgmtSrvr.cpp
+++ b/ndb/src/mgmsrv/MgmtSrvr.cpp
@@ -77,7 +77,6 @@
}\
}
-extern int global_flag_send_heartbeat_now;
extern int g_no_nodeid_checks;
extern my_bool opt_core;
@@ -1455,6 +1454,12 @@ MgmtSrvr::exitSingleUser(int * stopCount, bool abort)
#include <ClusterMgr.hpp>
+void
+MgmtSrvr::updateStatus()
+{
+ theFacade->theClusterMgr->forceHB();
+}
+
int
MgmtSrvr::status(int nodeId,
ndb_mgm_node_status * _status,
@@ -2153,7 +2158,7 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId,
if (found_matching_type && !found_free_node) {
// we have a temporary error which might be due to that
// we have got the latest connect status from db-nodes. Force update.
- global_flag_send_heartbeat_now= 1;
+ updateStatus();
}
BaseString type_string, type_c_string;
@@ -2507,7 +2512,7 @@ MgmtSrvr::Allocated_resources::~Allocated_resources()
if (!m_reserved_nodes.isclear()) {
m_mgmsrv.m_reserved_nodes.bitANDC(m_reserved_nodes);
// node has been reserved, force update signal to ndb nodes
- global_flag_send_heartbeat_now= 1;
+ m_mgmsrv.updateStatus();
char tmp_str[128];
m_mgmsrv.m_reserved_nodes.getText(tmp_str);
diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp
index 187f225470a..17debb19f50 100644
--- a/ndb/src/mgmsrv/MgmtSrvr.hpp
+++ b/ndb/src/mgmsrv/MgmtSrvr.hpp
@@ -490,6 +490,8 @@ public:
void get_connected_nodes(NodeBitmask &connected_nodes) const;
SocketServer *get_socket_server() { return m_socket_server; }
+ void updateStatus();
+
//**************************************************************************
private:
//**************************************************************************
diff --git a/ndb/src/mgmsrv/Services.cpp b/ndb/src/mgmsrv/Services.cpp
index 0524aba4c32..7f5b0e29442 100644
--- a/ndb/src/mgmsrv/Services.cpp
+++ b/ndb/src/mgmsrv/Services.cpp
@@ -982,6 +982,7 @@ printNodeStatus(OutputStream *output,
MgmtSrvr &mgmsrv,
enum ndb_mgm_node_type type) {
NodeId nodeId = 0;
+ mgmsrv.updateStatus();
while(mgmsrv.getNextNodeId(&nodeId, type)) {
enum ndb_mgm_node_status status;
Uint32 startPhase = 0,
diff --git a/ndb/src/ndbapi/ClusterMgr.cpp b/ndb/src/ndbapi/ClusterMgr.cpp
index fbff57d3168..475561af225 100644
--- a/ndb/src/ndbapi/ClusterMgr.cpp
+++ b/ndb/src/ndbapi/ClusterMgr.cpp
@@ -37,7 +37,7 @@
#include <mgmapi_configuration.hpp>
#include <mgmapi_config_parameters.h>
-int global_flag_send_heartbeat_now= 0;
+//#define DEBUG_REG
// Just a C wrapper for threadMain
extern "C"
@@ -67,6 +67,8 @@ ClusterMgr::ClusterMgr(TransporterFacade & _facade):
DBUG_ENTER("ClusterMgr::ClusterMgr");
ndbSetOwnVersion();
clusterMgrThreadMutex = NdbMutex_Create();
+ waitForHBCond= NdbCondition_Create();
+ waitingForHB= false;
noOfAliveNodes= 0;
noOfConnectedNodes= 0;
theClusterMgrThread= 0;
@@ -77,7 +79,8 @@ ClusterMgr::ClusterMgr(TransporterFacade & _facade):
ClusterMgr::~ClusterMgr()
{
DBUG_ENTER("ClusterMgr::~ClusterMgr");
- doStop();
+ doStop();
+ NdbCondition_Destroy(waitForHBCond);
NdbMutex_Destroy(clusterMgrThreadMutex);
DBUG_VOID_RETURN;
}
@@ -164,6 +167,70 @@ ClusterMgr::doStop( ){
}
void
+ClusterMgr::forceHB()
+{
+ theFacade.lock_mutex();
+
+ if(waitingForHB)
+ {
+ NdbCondition_WaitTimeout(waitForHBCond, theFacade.theMutexPtr, 1000);
+ theFacade.unlock_mutex();
+ return;
+ }
+
+ waitingForHB= true;
+
+ NodeBitmask ndb_nodes;
+ ndb_nodes.clear();
+ waitForHBFromNodes.clear();
+ for(Uint32 i = 0; i < MAX_NODES; i++)
+ {
+ if(!theNodes[i].defined)
+ continue;
+ if(theNodes[i].m_info.m_type == NodeInfo::DB)
+ {
+ ndb_nodes.set(i);
+ const ClusterMgr::Node &node= getNodeInfo(i);
+ waitForHBFromNodes.bitOR(node.m_state.m_connected_nodes);
+ }
+ }
+ waitForHBFromNodes.bitAND(ndb_nodes);
+
+#ifdef DEBUG_REG
+ char buf[128];
+ ndbout << "Waiting for HB from " << waitForHBFromNodes.getText(buf) << endl;
+#endif
+ NdbApiSignal signal(numberToRef(API_CLUSTERMGR, theFacade.ownId()));
+
+ signal.theVerId_signalNumber = GSN_API_REGREQ;
+ signal.theReceiversBlockNumber = QMGR;
+ signal.theTrace = 0;
+ signal.theLength = ApiRegReq::SignalLength;
+
+ ApiRegReq * req = CAST_PTR(ApiRegReq, signal.getDataPtrSend());
+ req->ref = numberToRef(API_CLUSTERMGR, theFacade.ownId());
+ req->version = NDB_VERSION;
+
+ int nodeId= 0;
+ for(int i=0;
+ NodeBitmask::NotFound!=(nodeId= waitForHBFromNodes.find(i));
+ i= nodeId+1)
+ {
+#ifdef DEBUG_REG
+ ndbout << "FORCE HB to " << nodeId << endl;
+#endif
+ theFacade.sendSignalUnCond(&signal, nodeId);
+ }
+
+ NdbCondition_WaitTimeout(waitForHBCond, theFacade.theMutexPtr, 1000);
+ waitingForHB= false;
+#ifdef DEBUG_REG
+ ndbout << "Still waiting for HB from " << waitForHBFromNodes.getText(buf) << endl;
+#endif
+ theFacade.unlock_mutex();
+}
+
+void
ClusterMgr::threadMain( ){
NdbApiSignal signal(numberToRef(API_CLUSTERMGR, theFacade.ownId()));
@@ -184,9 +251,6 @@ ClusterMgr::threadMain( ){
/**
* Start of Secure area for use of Transporter
*/
- int send_heartbeat_now= global_flag_send_heartbeat_now;
- global_flag_send_heartbeat_now= 0;
-
theFacade.lock_mutex();
for (int i = 1; i < MAX_NODES; i++){
/**
@@ -209,8 +273,7 @@ ClusterMgr::threadMain( ){
}
theNode.hbCounter += timeSlept;
- if (theNode.hbCounter >= theNode.hbFrequency ||
- send_heartbeat_now) {
+ if (theNode.hbCounter >= theNode.hbFrequency) {
/**
* It is now time to send a new Heartbeat
*/
@@ -226,7 +289,7 @@ ClusterMgr::threadMain( ){
if (theNode.m_info.m_type == NodeInfo::REP) {
signal.theReceiversBlockNumber = API_CLUSTERMGR;
}
-#if 0
+#ifdef DEBUG_REG
ndbout_c("ClusterMgr: Sending API_REGREQ to node %d", (int)nodeId);
#endif
theFacade.sendSignalUnCond(&signal, nodeId);
@@ -278,7 +341,7 @@ ClusterMgr::execAPI_REGREQ(const Uint32 * theData){
const ApiRegReq * const apiRegReq = (ApiRegReq *)&theData[0];
const NodeId nodeId = refToNode(apiRegReq->ref);
-#if 0
+#ifdef DEBUG_REG
ndbout_c("ClusterMgr: Recd API_REGREQ from node %d", nodeId);
#endif
@@ -319,7 +382,7 @@ ClusterMgr::execAPI_REGCONF(const Uint32 * theData){
const ApiRegConf * const apiRegConf = (ApiRegConf *)&theData[0];
const NodeId nodeId = refToNode(apiRegConf->qmgrRef);
-#if 0
+#ifdef DEBUG_REG
ndbout_c("ClusterMgr: Recd API_REGCONF from node %d", nodeId);
#endif
@@ -351,6 +414,17 @@ ClusterMgr::execAPI_REGCONF(const Uint32 * theData){
if (node.m_info.m_type != NodeInfo::REP) {
node.hbFrequency = (apiRegConf->apiHeartbeatFrequency * 10) - 50;
}
+
+ if(waitingForHB)
+ {
+ waitForHBFromNodes.clear(nodeId);
+
+ if(waitForHBFromNodes.isclear())
+ {
+ waitingForHB= false;
+ NdbCondition_Broadcast(waitForHBCond);
+ }
+ }
}
void
@@ -379,6 +453,10 @@ ClusterMgr::execAPI_REGREF(const Uint32 * theData){
default:
break;
}
+
+ waitForHBFromNodes.clear(nodeId);
+ if(waitForHBFromNodes.isclear())
+ NdbCondition_Signal(waitForHBCond);
}
void
diff --git a/ndb/src/ndbapi/ClusterMgr.hpp b/ndb/src/ndbapi/ClusterMgr.hpp
index 1a1e622a889..d2bcc52f7e8 100644
--- a/ndb/src/ndbapi/ClusterMgr.hpp
+++ b/ndb/src/ndbapi/ClusterMgr.hpp
@@ -49,7 +49,9 @@ public:
void doStop();
void startThread();
-
+
+ void forceHB();
+
private:
void threadMain();
@@ -85,7 +87,11 @@ private:
Uint32 noOfConnectedNodes;
Node theNodes[MAX_NODES];
NdbThread* theClusterMgrThread;
-
+
+ NodeBitmask waitForHBFromNodes; // used in forcing HBs
+ NdbCondition* waitForHBCond;
+ bool waitingForHB;
+
/**
* Used for controlling start/stop of the thread
*/
diff --git a/ndb/test/include/NDBT_Tables.hpp b/ndb/test/include/NDBT_Tables.hpp
index fb0df8aa35b..a6973861af8 100644
--- a/ndb/test/include/NDBT_Tables.hpp
+++ b/ndb/test/include/NDBT_Tables.hpp
@@ -42,6 +42,8 @@ public:
static const NdbDictionary::Table* getTable(int _num);
static int getNumTables();
+ static const char** getIndexes(const char* table);
+
private:
static const NdbDictionary::Table* tableWithPkSize(const char* _nam, Uint32 pkSize);
};
diff --git a/ndb/test/ndbapi/testDict.cpp b/ndb/test/ndbapi/testDict.cpp
index b992d492ad6..ba05bbad7bb 100644
--- a/ndb/test/ndbapi/testDict.cpp
+++ b/ndb/test/ndbapi/testDict.cpp
@@ -1022,8 +1022,8 @@ int verifyTablesAreEqual(const NdbDictionary::Table* pTab, const NdbDictionary::
if (!pTab->equal(*pTab2)){
g_err << "equal failed" << endl;
- g_info << *pTab;
- g_info << *pTab2;
+ g_info << *(NDBT_Table*)pTab; // gcc-4.1.2
+ g_info << *(NDBT_Table*)pTab2;
return NDBT_FAILED;
}
return NDBT_OK;
@@ -1033,7 +1033,7 @@ int runGetPrimaryKey(NDBT_Context* ctx, NDBT_Step* step){
Ndb* pNdb = GETNDB(step);
const NdbDictionary::Table* pTab = ctx->getTab();
ndbout << "|- " << pTab->getName() << endl;
- g_info << *pTab;
+ g_info << *(NDBT_Table*)pTab;
// Try to create table in db
if (pTab->createTableInDb(pNdb) != 0){
return NDBT_FAILED;
@@ -1890,6 +1890,52 @@ runDictOps(NDBT_Context* ctx, NDBT_Step* step)
// replace by the Retrieved table
pTab = pTab2;
+ // create indexes
+ const char** indlist = NDBT_Tables::getIndexes(tabName);
+ uint indnum = 0;
+ while (*indlist != 0) {
+ uint count = 0;
+ try_create_index:
+ count++;
+ if (count == 1)
+ g_info << "2: create index " << indnum << " " << *indlist << endl;
+ NdbDictionary::Index ind;
+ char indName[200];
+ sprintf(indName, "%s_X%u", tabName, indnum);
+ ind.setName(indName);
+ ind.setTable(tabName);
+ if (strcmp(*indlist, "UNIQUE") == 0) {
+ ind.setType(NdbDictionary::Index::UniqueHashIndex);
+ ind.setLogging(pTab->getLogging());
+ } else if (strcmp(*indlist, "ORDERED") == 0) {
+ ind.setType(NdbDictionary::Index::OrderedIndex);
+ ind.setLogging(false);
+ } else {
+ assert(false);
+ }
+ const char** indtemp = indlist;
+ while (*++indtemp != 0) {
+ ind.addColumn(*indtemp);
+ }
+ if (pDic->createIndex(ind) != 0) {
+ const NdbError err = pDic->getNdbError();
+ if (count == 1)
+ g_err << "2: " << indName << ": create failed: " << err << endl;
+ if (err.code != 711) {
+ result = NDBT_FAILED;
+ break;
+ }
+ NdbSleep_MilliSleep(myRandom48(maxsleep));
+ goto try_create_index;
+ }
+ indlist = ++indtemp;
+ indnum++;
+ }
+ if (result == NDBT_FAILED)
+ break;
+
+ uint indcount = indnum;
+
int records = myRandom48(ctx->getNumRecords());
g_info << "2: load " << records << " records" << endl;
HugoTransactions hugoTrans(*pTab);
@@ -1901,6 +1947,32 @@ runDictOps(NDBT_Context* ctx, NDBT_Step* step)
}
NdbSleep_MilliSleep(myRandom48(maxsleep));
+ // drop indexes
+ indnum = 0;
+ while (indnum < indcount) {
+ uint count = 0;
+ try_drop_index:
+ count++;
+ if (count == 1)
+ g_info << "2: drop index " << indnum << endl;
+ char indName[200];
+ sprintf(indName, "%s_X%u", tabName, indnum);
+ if (pDic->dropIndex(indName, tabName) != 0) {
+ const NdbError err = pDic->getNdbError();
+ if (count == 1)
+ g_err << "2: " << indName << ": drop failed: " << err << endl;
+ if (err.code != 711) {
+ result = NDBT_FAILED;
+ break;
+ }
+ NdbSleep_MilliSleep(myRandom48(maxsleep));
+ goto try_drop_index;
+ }
+ indnum++;
+ }
+ if (result == NDBT_FAILED)
+ break;
+
g_info << "2: drop" << endl;
{
uint count = 0;
diff --git a/ndb/test/ndbapi/testIndex.cpp b/ndb/test/ndbapi/testIndex.cpp
index 5785db232c4..c25aae55897 100644
--- a/ndb/test/ndbapi/testIndex.cpp
+++ b/ndb/test/ndbapi/testIndex.cpp
@@ -1199,6 +1199,48 @@ int runLQHKEYREF(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_OK;
}
+int
+runBug21384(NDBT_Context* ctx, NDBT_Step* step)
+{
+ Ndb* pNdb = GETNDB(step);
+ HugoTransactions hugoTrans(*ctx->getTab());
+ NdbRestarter restarter;
+
+ int loops = ctx->getNumLoops();
+ const int rows = ctx->getNumRecords();
+ const int batchsize = ctx->getProperty("BatchSize", 50);
+
+ while (loops--)
+ {
+ if(restarter.insertErrorInAllNodes(8037) != 0)
+ {
+ g_err << "Failed to error insert(8037)" << endl;
+ return NDBT_FAILED;
+ }
+
+ if (hugoTrans.indexReadRecords(pNdb, pkIdxName, rows, batchsize) == 0)
+ {
+ g_err << "Index succeded (it should have failed" << endl;
+ return NDBT_FAILED;
+ }
+
+ if(restarter.insertErrorInAllNodes(0) != 0)
+ {
+ g_err << "Failed to error insert(0)" << endl;
+ return NDBT_FAILED;
+ }
+
+ if (hugoTrans.indexReadRecords(pNdb, pkIdxName, rows, batchsize) != 0){
+ g_err << "Index read failed" << endl;
+ return NDBT_FAILED;
+ }
+ }
+
+ return NDBT_OK;
+}
+
+
+
NDBT_TESTSUITE(testIndex);
TESTCASE("CreateAll",
"Test that we can create all various indexes on each table\n"
@@ -1512,6 +1554,16 @@ TESTCASE("UniqueNull",
FINALIZER(createPkIndex_Drop);
FINALIZER(runClearTable);
}
+TESTCASE("Bug21384",
+ "Test that unique indexes and nulls"){
+ TC_PROPERTY("LoggedIndexes", (unsigned)0);
+ INITIALIZER(runClearTable);
+ INITIALIZER(createPkIndex);
+ INITIALIZER(runLoadTable);
+ STEP(runBug21384);
+ FINALIZER(createPkIndex_Drop);
+ FINALIZER(runClearTable);
+}
NDBT_TESTSUITE_END(testIndex);
int main(int argc, const char** argv){
diff --git a/ndb/test/ndbapi/testSystemRestart.cpp b/ndb/test/ndbapi/testSystemRestart.cpp
index 30f7aca9b06..8a0100ff3e4 100644
--- a/ndb/test/ndbapi/testSystemRestart.cpp
+++ b/ndb/test/ndbapi/testSystemRestart.cpp
@@ -1121,6 +1121,46 @@ int runClearTable(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_OK;
}
+int
+runBug21536(NDBT_Context* ctx, NDBT_Step* step)
+{
+ NdbRestarter restarter;
+ const Uint32 nodeCount = restarter.getNumDbNodes();
+ if(nodeCount != 2){
+ g_info << "Bug21536 - 2 nodes to test" << endl;
+ return NDBT_OK;
+ }
+
+ int node1 = restarter.getDbNodeId(rand() % nodeCount);
+ int node2 = restarter.getRandomNodeSameNodeGroup(node1, rand());
+
+ if (node1 == -1 || node2 == -1)
+ return NDBT_OK;
+
+ int result = NDBT_OK;
+ do {
+ CHECK(restarter.restartOneDbNode(node1, false, true, true) == 0);
+ CHECK(restarter.waitNodesNoStart(&node1, 1) == 0);
+ CHECK(restarter.insertErrorInNode(node1, 1000) == 0);
+ int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
+ CHECK(restarter.dumpStateOneNode(node1, val2, 2) == 0);
+ CHECK(restarter.startNodes(&node1, 1) == 0);
+ restarter.waitNodesStartPhase(&node1, 1, 3, 120);
+ CHECK(restarter.waitNodesNoStart(&node1, 1) == 0);
+
+ CHECK(restarter.restartOneDbNode(node2, true, true, true) == 0);
+ CHECK(restarter.waitNodesNoStart(&node2, 1) == 0);
+ CHECK(restarter.startNodes(&node1, 1) == 0);
+ CHECK(restarter.waitNodesStarted(&node1, 1) == 0);
+ CHECK(restarter.startNodes(&node2, 1) == 0);
+ CHECK(restarter.waitClusterStarted() == 0);
+
+ } while(0);
+
+ g_info << "Bug21536 finished" << endl;
+
+ return result;
+}
NDBT_TESTSUITE(testSystemRestart);
TESTCASE("SR1",
@@ -1287,6 +1327,13 @@ TESTCASE("Bug18385",
STEP(runBug18385);
FINALIZER(runClearTable);
}
+TESTCASE("Bug21536",
+ "Perform partition system restart with other nodes with higher GCI"){
+ INITIALIZER(runWaitStarted);
+ INITIALIZER(runClearTable);
+ STEP(runBug21536);
+ FINALIZER(runClearTable);
+}
NDBT_TESTSUITE_END(testSystemRestart);
int main(int argc, const char** argv){
diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt
index cbb8a9a2574..a2edc568426 100644
--- a/ndb/test/run-test/daily-basic-tests.txt
+++ b/ndb/test/run-test/daily-basic-tests.txt
@@ -449,6 +449,10 @@ max-time: 1000
cmd: testNodeRestart
args: -n Bug20185 T1
+max-time: 1000
+cmd: testIndex
+args: -n Bug21384
+
# OLD FLEX
max-time: 500
cmd: flexBench
diff --git a/ndb/test/src/NDBT_Tables.cpp b/ndb/test/src/NDBT_Tables.cpp
index 5a5fecd85c1..d72dfcc5031 100644
--- a/ndb/test/src/NDBT_Tables.cpp
+++ b/ndb/test/src/NDBT_Tables.cpp
@@ -799,6 +799,17 @@ NDBT_Tables::getNumTables(){
return numTestTables;
}
+const char**
+NDBT_Tables::getIndexes(const char* table)
+{
+ Uint32 i = 0;
+ for (i = 0; indexes[i].m_table != 0; i++) {
+ if (strcmp(indexes[i].m_table, table) == 0)
+ return indexes[i].m_indexes;
+ }
+ return 0;
+}
+
int
NDBT_Tables::createAllTables(Ndb* pNdb, bool _temp, bool existsOk){
diff --git a/ndb/tools/ndb_config.cpp b/ndb/tools/ndb_config.cpp
index 27ab6a182bb..135bec7ef72 100644
--- a/ndb/tools/ndb_config.cpp
+++ b/ndb/tools/ndb_config.cpp
@@ -145,7 +145,7 @@ struct Match
struct HostMatch : public Match
{
- virtual int eval(NdbMgmHandle, const Iter&);
+ virtual int eval(const Iter&);
};
struct Apply
@@ -402,7 +402,7 @@ Match::eval(const Iter& iter)
}
int
-HostMatch::eval(NdbMgmHandle h, const Iter& iter)
+HostMatch::eval(const Iter& iter)
{
const char* valc;
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 5d6fe5f984f..a753e6e0be0 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -35,6 +35,7 @@
// options from from mysqld.cc
extern my_bool opt_ndb_optimized_node_selection;
extern const char *opt_ndbcluster_connectstring;
+extern ulong opt_ndb_cache_check_time;
// Default value for parallelism
static const int parallelism= 0;
@@ -4175,10 +4176,15 @@ static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length)
acc_row_size+= 4 + /*safety margin*/ 4;
#endif
ulonglong acc_fragment_size= 512*1024*1024;
+ /*
+ * if not --with-big-tables then max_rows is ulong
+ * the warning in this case is misleading though
+ */
+ ulonglong big_max_rows = (ulonglong)max_rows;
#if MYSQL_VERSION_ID >= 50100
- no_fragments= (max_rows*acc_row_size)/acc_fragment_size+1;
+ no_fragments= (big_max_rows*acc_row_size)/acc_fragment_size+1;
#else
- no_fragments= ((max_rows*acc_row_size)/acc_fragment_size+1
+ no_fragments= ((big_max_rows*acc_row_size)/acc_fragment_size+1
+1/*correct rounding*/)/2;
#endif
}
@@ -5238,6 +5244,7 @@ bool ndbcluster_init()
pthread_cond_init(&COND_ndb_util_thread, NULL);
+ ndb_cache_check_time = opt_ndb_cache_check_time;
// Create utility thread
pthread_t tmp;
if (pthread_create(&tmp, &connection_attrib, ndb_util_thread_func, 0))