summaryrefslogtreecommitdiff
path: root/ndb
diff options
context:
space:
mode:
Diffstat (limited to 'ndb')
-rw-r--r--ndb/src/common/portlib/gcc.cpp2
-rw-r--r--ndb/src/common/util/new.cpp2
-rw-r--r--ndb/src/cw/cpcd/APIService.cpp1
-rw-r--r--ndb/src/cw/cpcd/CPCD.hpp6
-rw-r--r--ndb/src/cw/cpcd/Process.cpp14
-rw-r--r--ndb/src/kernel/blocks/dbdict/Dbdict.cpp2
-rw-r--r--ndb/src/kernel/blocks/dbtc/Dbtc.hpp30
-rw-r--r--ndb/src/kernel/blocks/dbtc/DbtcInit.cpp4
-rw-r--r--ndb/src/kernel/blocks/dbtc/DbtcMain.cpp88
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp2
-rw-r--r--ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp4
-rw-r--r--ndb/src/kernel/error/ErrorReporter.cpp10
-rw-r--r--ndb/src/mgmapi/mgmapi.cpp6
-rw-r--r--ndb/src/ndbapi/ClusterMgr.cpp13
-rw-r--r--ndb/src/ndbapi/ClusterMgr.hpp14
-rw-r--r--ndb/src/ndbapi/Ndb.cpp11
-rw-r--r--ndb/src/ndbapi/ndberror.c2
-rw-r--r--ndb/test/include/CpcClient.hpp1
-rw-r--r--ndb/test/ndbapi/testNodeRestart.cpp2
-rw-r--r--ndb/test/run-test/Makefile.am7
-rw-r--r--ndb/test/run-test/conf-daily-basic-dl145a.txt19
-rw-r--r--ndb/test/run-test/conf-daily-basic-ndbmaster.txt19
-rw-r--r--ndb/test/run-test/conf-daily-basic-shark.txt19
-rw-r--r--ndb/test/run-test/conf-daily-devel-ndbmaster.txt19
-rw-r--r--ndb/test/run-test/conf-daily-sql-ndbmaster.txt20
-rw-r--r--ndb/test/run-test/main.cpp43
-rwxr-xr-xndb/test/run-test/make-config.sh526
-rwxr-xr-xndb/test/run-test/ndb-autotest.sh78
-rw-r--r--ndb/test/run-test/run-test.hpp1
-rw-r--r--ndb/test/src/CpcClient.cpp3
-rw-r--r--ndb/test/src/NDBT_ResultRow.cpp8
31 files changed, 381 insertions, 595 deletions
diff --git a/ndb/src/common/portlib/gcc.cpp b/ndb/src/common/portlib/gcc.cpp
index 66aa4812dc6..4e49d787d3c 100644
--- a/ndb/src/common/portlib/gcc.cpp
+++ b/ndb/src/common/portlib/gcc.cpp
@@ -2,6 +2,6 @@
/**
* GCC linking problem...
*/
-#ifdef DEFINE_CXA_PURE_VIRTUAL
+#if 0
extern "C" { int __cxa_pure_virtual() { return 0;} }
#endif
diff --git a/ndb/src/common/util/new.cpp b/ndb/src/common/util/new.cpp
index 901f74bf979..643800f1582 100644
--- a/ndb/src/common/util/new.cpp
+++ b/ndb/src/common/util/new.cpp
@@ -6,7 +6,7 @@ extern "C" {
void (* ndb_new_handler)() = 0;
}
-#ifdef USE_MYSYS_NEW
+#if 0
void *operator new (size_t sz)
{
diff --git a/ndb/src/cw/cpcd/APIService.cpp b/ndb/src/cw/cpcd/APIService.cpp
index 63d0aaafe86..b009f0c0fc4 100644
--- a/ndb/src/cw/cpcd/APIService.cpp
+++ b/ndb/src/cw/cpcd/APIService.cpp
@@ -122,6 +122,7 @@ ParserRow<CPCDAPISession> commands[] =
CPCD_ARG("stderr", String, Optional, "Redirection of stderr"),
CPCD_ARG("stdin", String, Optional, "Redirection of stderr"),
CPCD_ARG("ulimit", String, Optional, "ulimit"),
+ CPCD_ARG("shutdown", String, Optional, "shutdown options"),
CPCD_CMD("undefine process", &CPCDAPISession::undefineProcess, ""),
CPCD_CMD_ALIAS("undef", "undefine process", 0),
diff --git a/ndb/src/cw/cpcd/CPCD.hpp b/ndb/src/cw/cpcd/CPCD.hpp
index a5c0bef1dac..aecc43150c4 100644
--- a/ndb/src/cw/cpcd/CPCD.hpp
+++ b/ndb/src/cw/cpcd/CPCD.hpp
@@ -243,6 +243,12 @@ public:
* @desc Format c:unlimited d:0 ...
*/
BaseString m_ulimit;
+
+ /**
+ * @brief shutdown options
+ */
+ BaseString m_shutdown_options;
+
private:
class CPCD *m_cpcd;
void do_exec();
diff --git a/ndb/src/cw/cpcd/Process.cpp b/ndb/src/cw/cpcd/Process.cpp
index 2509f34e882..cfffec7d0ce 100644
--- a/ndb/src/cw/cpcd/Process.cpp
+++ b/ndb/src/cw/cpcd/Process.cpp
@@ -44,6 +44,8 @@ CPCD::Process::print(FILE * f){
fprintf(f, "stdout: %s\n", m_stdout.c_str() ? m_stdout.c_str() : "");
fprintf(f, "stderr: %s\n", m_stderr.c_str() ? m_stderr.c_str() : "");
fprintf(f, "ulimit: %s\n", m_ulimit.c_str() ? m_ulimit.c_str() : "");
+ fprintf(f, "shutdown: %s\n", m_shutdown_options.c_str() ?
+ m_shutdown_options.c_str() : "");
}
CPCD::Process::Process(const Properties & props, class CPCD *cpcd) {
@@ -64,6 +66,7 @@ CPCD::Process::Process(const Properties & props, class CPCD *cpcd) {
props.get("stdout", m_stdout);
props.get("stderr", m_stderr);
props.get("ulimit", m_ulimit);
+ props.get("shutdown", m_shutdown_options);
m_status = STOPPED;
if(strcasecmp(m_type.c_str(), "temporary") == 0){
@@ -220,8 +223,11 @@ set_ulimit(const BaseString & pair){
if(!(list[1].trim() == "unlimited")){
value = atoi(list[1].c_str());
}
-
+#if defined(__INTEL_COMPILER)
+ struct rlimit64 rlp;
+#else
struct rlimit rlp;
+#endif
#define _RLIMIT_FIX(x) { res = getrlimit(x,&rlp); if(!res){ rlp.rlim_cur = value; res = setrlimit(x, &rlp); }}
if(list[0].trim() == "c"){
@@ -451,7 +457,11 @@ CPCD::Process::stop() {
m_status = STOPPING;
errno = 0;
- int ret = kill(-m_pid, SIGTERM);
+ int signo= SIGTERM;
+ if(m_shutdown_options == "SIGKILL")
+ signo= SIGKILL;
+
+ int ret = kill(-m_pid, signo);
switch(ret) {
case 0:
logger.debug("Sent SIGTERM to pid %d", (int)-m_pid);
diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
index 3d5340494ab..b9877dae46c 100644
--- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
+++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
@@ -4061,12 +4061,14 @@ calcLHbits(Uint32 * lhPageBits, Uint32 * lhDistrBits,
tmp <<= 1;
distrBits++;
}//while
+#ifdef ndb_classical_lhdistrbits
if (tmp != totalFragments) {
tmp >>= 1;
if ((fid >= (totalFragments - tmp)) && (fid < (tmp - 1))) {
distrBits--;
}//if
}//if
+#endif
* lhPageBits = pageBits;
* lhDistrBits = distrBits;
diff --git a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
index 2baa4400409..2983b02de67 100644
--- a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
+++ b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
@@ -585,34 +585,8 @@ public:
*/
ArrayPool<TcIndexOperation> c_theIndexOperationPool;
- /**
- * The list of index operations
- */
- ArrayList<TcIndexOperation> c_theIndexOperations;
-
UintR c_maxNumberOfIndexOperations;
- struct TcSeizedIndexOperation {
- /**
- * Next ptr (used in pool/list)
- */
- union {
- Uint32 nextPool;
- Uint32 nextList;
- };
- /**
- * Prev pointer (used in list)
- */
- Uint32 prevList;
- };
-
- /**
- * Pool of seized index operations
- */
- ArrayPool<TcSeizedIndexOperation> c_theSeizedIndexOperationPool;
-
- typedef Ptr<TcSeizedIndexOperation> TcSeizedIndexOperationPtr;
-
/************************** API CONNECT RECORD ***********************
* The API connect record contains the connection record to which the
* application connects.
@@ -650,7 +624,7 @@ public:
struct ApiConnectRecord {
ApiConnectRecord(ArrayPool<TcFiredTriggerData> & firedTriggerPool,
- ArrayPool<TcSeizedIndexOperation> & seizedIndexOpPool):
+ ArrayPool<TcIndexOperation> & seizedIndexOpPool):
theFiredTriggers(firedTriggerPool),
isIndexOp(false),
theSeizedIndexOperations(seizedIndexOpPool)
@@ -763,7 +737,7 @@ public:
UintR accumulatingIndexOp;
UintR executingIndexOp;
UintR tcIndxSendArray[6];
- ArrayList<TcSeizedIndexOperation> theSeizedIndexOperations;
+ ArrayList<TcIndexOperation> theSeizedIndexOperations;
};
typedef Ptr<ApiConnectRecord> ApiConnectRecordPtr;
diff --git a/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp b/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
index 59c8237f20a..f99b4bf15af 100644
--- a/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
+++ b/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
@@ -65,7 +65,6 @@ void Dbtc::initData()
c_theFiredTriggerPool.setSize(c_maxNumberOfFiredTriggers);
c_theIndexPool.setSize(c_maxNumberOfIndexes);
c_theIndexOperationPool.setSize(c_maxNumberOfIndexOperations);
- c_theSeizedIndexOperationPool.setSize(c_maxNumberOfIndexOperations);
c_theAttributeBufferPool.setSize(c_transactionBufferSpace);
c_firedTriggerHash.setSize((c_maxNumberOfFiredTriggers+10)/10);
}//Dbtc::initData()
@@ -85,7 +84,7 @@ void Dbtc::initRecords()
for(unsigned i = 0; i<capiConnectFilesize; i++) {
p = &apiConnectRecord[i];
new (p) ApiConnectRecord(c_theFiredTriggerPool,
- c_theSeizedIndexOperationPool);
+ c_theIndexOperationPool);
}
// Init all fired triggers
DLFifoList<TcFiredTriggerData> triggers(c_theFiredTriggerPool);
@@ -177,7 +176,6 @@ Dbtc::Dbtc(const class Configuration & conf):
c_maxNumberOfFiredTriggers(0),
c_theIndexes(c_theIndexPool),
c_maxNumberOfIndexes(0),
- c_theIndexOperations(c_theIndexOperationPool),
c_maxNumberOfIndexOperations(0),
m_commitAckMarkerHash(m_commitAckMarkerPool)
{
diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
index c8260223004..38d514047b8 100644
--- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
+++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
@@ -11368,18 +11368,18 @@ void Dbtc::execTCINDXREQ(Signal* signal)
jam();
// This is a newly started transaction, clean-up
releaseAllSeizedIndexOperations(regApiPtr);
+
+ regApiPtr->transid[0] = tcIndxReq->transId1;
+ regApiPtr->transid[1] = tcIndxReq->transId2;
}//if
- if (!seizeIndexOperation(regApiPtr, indexOpPtr)) {
+
+ if (ERROR_INSERTED(8036) || !seizeIndexOperation(regApiPtr, indexOpPtr)) {
jam();
// Failed to allocate index operation
- TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
-
- tcIndxRef->connectPtr = tcIndxReq->senderData;
- tcIndxRef->transId[0] = regApiPtr->transid[0];
- tcIndxRef->transId[1] = regApiPtr->transid[1];
- tcIndxRef->errorCode = 4000;
- sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
- TcKeyRef::SignalLength, JBB);
+ terrorCode = 288;
+ regApiPtr->m_exec_flag |= TcKeyReq::getExecuteFlag(tcIndxRequestInfo);
+ apiConnectptr = transPtr;
+ abortErrorLab(signal);
return;
}
TcIndexOperation* indexOp = indexOpPtr.p;
@@ -11514,15 +11514,17 @@ void Dbtc::execINDXKEYINFO(Signal* signal)
TcIndexOperationPtr indexOpPtr;
TcIndexOperation* indexOp;
- indexOpPtr.i = regApiPtr->accumulatingIndexOp;
- indexOp = c_theIndexOperations.getPtr(indexOpPtr.i);
- if (saveINDXKEYINFO(signal,
- indexOp,
- src,
- keyInfoLength)) {
- jam();
- // We have received all we need
- readIndexTable(signal, regApiPtr, indexOp);
+ if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL)
+ {
+ indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
+ if (saveINDXKEYINFO(signal,
+ indexOp,
+ src,
+ keyInfoLength)) {
+ jam();
+ // We have received all we need
+ readIndexTable(signal, regApiPtr, indexOp);
+ }
}
}
@@ -11545,15 +11547,17 @@ void Dbtc::execINDXATTRINFO(Signal* signal)
TcIndexOperationPtr indexOpPtr;
TcIndexOperation* indexOp;
- indexOpPtr.i = regApiPtr->accumulatingIndexOp;
- indexOp = c_theIndexOperations.getPtr(indexOpPtr.i);
- if (saveINDXATTRINFO(signal,
- indexOp,
- src,
- attrInfoLength)) {
- jam();
- // We have received all we need
- readIndexTable(signal, regApiPtr, indexOp);
+ if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL)
+ {
+ indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
+ if (saveINDXATTRINFO(signal,
+ indexOp,
+ src,
+ attrInfoLength)) {
+ jam();
+ // We have received all we need
+ readIndexTable(signal, regApiPtr, indexOp);
+ }
}
}
@@ -11578,7 +11582,7 @@ bool Dbtc::saveINDXKEYINFO(Signal* signal,
releaseIndexOperation(apiConnectptr.p, indexOp);
terrorCode = 4000;
abortErrorLab(signal);
- return true;
+ return false;
}
if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) {
jam();
@@ -11611,7 +11615,7 @@ bool Dbtc::saveINDXATTRINFO(Signal* signal,
releaseIndexOperation(apiConnectptr.p, indexOp);
terrorCode = 4000;
abortErrorLab(signal);
- return true;
+ return false;
}
if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) {
jam();
@@ -11671,7 +11675,7 @@ void Dbtc::execTCKEYCONF(Signal* signal)
jamEntry();
indexOpPtr.i = tcKeyConf->apiConnectPtr;
- TcIndexOperation* indexOp = c_theIndexOperations.getPtr(indexOpPtr.i);
+ TcIndexOperation* indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
Uint32 confInfo = tcKeyConf->confInfo;
/**
@@ -11760,7 +11764,7 @@ void Dbtc::execTCKEYREF(Signal* signal)
jamEntry();
indexOpPtr.i = tcKeyRef->connectPtr;
- TcIndexOperation* indexOp = c_theIndexOperations.getPtr(indexOpPtr.i);
+ TcIndexOperation* indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
indexOpPtr.p = indexOp;
if (!indexOp) {
jam();
@@ -11861,7 +11865,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
jamEntry();
TcIndexOperationPtr indexOpPtr;
indexOpPtr.i = transIdAI->connectPtr;
- TcIndexOperation* indexOp = c_theIndexOperations.getPtr(indexOpPtr.i);
+ TcIndexOperation* indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
indexOpPtr.p = indexOp;
if (!indexOp) {
jam();
@@ -11969,7 +11973,7 @@ void Dbtc::execTCROLLBACKREP(Signal* signal)
jamEntry();
TcIndexOperationPtr indexOpPtr;
indexOpPtr.i = tcRollbackRep->connectPtr;
- TcIndexOperation* indexOp = c_theIndexOperations.getPtr(indexOpPtr.i);
+ TcIndexOperation* indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
indexOpPtr.p = indexOp;
tcRollbackRep = (TcRollbackRep *)signal->getDataPtrSend();
tcRollbackRep->connectPtr = indexOp->tcIndxReq.senderData;
@@ -12297,16 +12301,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
bool Dbtc::seizeIndexOperation(ApiConnectRecord* regApiPtr,
TcIndexOperationPtr& indexOpPtr)
{
- bool seizeOk;
-
- seizeOk = c_theIndexOperations.seize(indexOpPtr);
- if (seizeOk) {
- jam();
- TcSeizedIndexOperationPtr seizedIndexOpPtr;
- seizeOk &= regApiPtr->theSeizedIndexOperations.seizeId(seizedIndexOpPtr,
- indexOpPtr.i);
- }
- return seizeOk;
+ return regApiPtr->theSeizedIndexOperations.seize(indexOpPtr);
}
void Dbtc::releaseIndexOperation(ApiConnectRecord* regApiPtr,
@@ -12320,18 +12315,16 @@ void Dbtc::releaseIndexOperation(ApiConnectRecord* regApiPtr,
indexOp->expectedTransIdAI = 0;
indexOp->transIdAI.release();
regApiPtr->theSeizedIndexOperations.release(indexOp->indexOpId);
- c_theIndexOperations.release(indexOp->indexOpId);
}
void Dbtc::releaseAllSeizedIndexOperations(ApiConnectRecord* regApiPtr)
{
- TcSeizedIndexOperationPtr seizedIndexOpPtr;
+ TcIndexOperationPtr seizedIndexOpPtr;
regApiPtr->theSeizedIndexOperations.first(seizedIndexOpPtr);
while(seizedIndexOpPtr.i != RNIL) {
jam();
- TcIndexOperation* indexOp =
- c_theIndexOperations.getPtr(seizedIndexOpPtr.i);
+ TcIndexOperation* indexOp = seizedIndexOpPtr.p;
indexOp->indexOpState = IOS_NOOP;
indexOp->expectedKeyInfo = 0;
@@ -12340,7 +12333,6 @@ void Dbtc::releaseAllSeizedIndexOperations(ApiConnectRecord* regApiPtr)
indexOp->attrInfo.release();
indexOp->expectedTransIdAI = 0;
indexOp->transIdAI.release();
- c_theIndexOperations.release(seizedIndexOpPtr.i);
regApiPtr->theSeizedIndexOperations.next(seizedIndexOpPtr);
}
regApiPtr->theSeizedIndexOperations.release();
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp b/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
index cbd56c3281f..470b98fd04c 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
@@ -443,6 +443,7 @@ Dbtup::commitRecord(Signal* signal,
saveAttributeMask.bitOR(befOpPtr.p->changeMask);
befOpPtr.p->changeMask.clear();
befOpPtr.p->changeMask.bitOR(attributeMask);
+ befOpPtr.p->gci = regOperPtr->gci;
operPtr.p = befOpPtr.p;
checkDetachedTriggers(signal,
@@ -480,6 +481,7 @@ Dbtup::commitRecord(Signal* signal,
befOpPtr.p->pageOffset = befOpPtr.p->pageOffsetC;
befOpPtr.p->fragPageId = befOpPtr.p->fragPageIdC;
befOpPtr.p->pageIndex = befOpPtr.p->pageIndexC;
+ befOpPtr.p->gci = regOperPtr->gci;
operPtr.p = befOpPtr.p;
checkDetachedTriggers(signal,
diff --git a/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp b/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
index f76440a462a..b3fc6e04d6c 100644
--- a/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
+++ b/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
@@ -799,7 +799,11 @@ AsyncFile::rmrfReq(Request * request, char * path, bool removePath){
request->error = errno;
return;
}
+#if defined(__INTEL_COMPILER)
+ struct dirent64 * dp;
+#else
struct dirent * dp;
+#endif
while ((dp = readdir(dirp)) != NULL){
if ((strcmp(".", dp->d_name) != 0) && (strcmp("..", dp->d_name) != 0)) {
BaseString::snprintf(path_add, (size_t)path_max_copy, "%s%s",
diff --git a/ndb/src/kernel/error/ErrorReporter.cpp b/ndb/src/kernel/error/ErrorReporter.cpp
index 35cd3f099d9..e4ead4ce34d 100644
--- a/ndb/src/kernel/error/ErrorReporter.cpp
+++ b/ndb/src/kernel/error/ErrorReporter.cpp
@@ -130,7 +130,7 @@ ErrorReporter::formatMessage(ErrorCategory type,
"Date/Time: %s\nType of error: %s\n"
"Message: %s\nFault ID: %d\nProblem data: %s"
"\nObject of reference: %s\nProgramName: %s\n"
- "ProcessID: %d\nTraceFile: %s\n***EOM***\n",
+ "ProcessID: %d\nTraceFile: %s\n%s\n***EOM***\n",
formatTimeStampString() ,
errorType[type],
lookupErrorMessage(faultID),
@@ -139,7 +139,8 @@ ErrorReporter::formatMessage(ErrorCategory type,
objRef,
my_progname,
processId,
- theNameOfTheTraceFile ? theNameOfTheTraceFile : "<no tracefile>");
+ theNameOfTheTraceFile ? theNameOfTheTraceFile : "<no tracefile>",
+ NDB_VERSION_STRING);
// Add trailing blanks to get a fixed lenght of the message
while (strlen(messptr) <= MESSAGE_LENGTH-3){
@@ -237,6 +238,11 @@ WriteMessage(ErrorCategory thrdType, int thrdMessageID,
// Create a new file, and skip the first 69 bytes,
// which are info about the current offset
stream = fopen(theErrorFileName, "w");
+ if(stream == NULL)
+ {
+ fprintf(stderr,"Unable to open error log file: %s\n", theErrorFileName);
+ return -1;
+ }
fprintf(stream, "%s%u%s", "Current byte-offset of file-pointer is: ", 69,
" \n\n\n");
diff --git a/ndb/src/mgmapi/mgmapi.cpp b/ndb/src/mgmapi/mgmapi.cpp
index ab32de5b9ca..e5e00b78bfa 100644
--- a/ndb/src/mgmapi/mgmapi.cpp
+++ b/ndb/src/mgmapi/mgmapi.cpp
@@ -892,7 +892,10 @@ ndb_mgm_restart2(NdbMgmHandle handle, int no_of_nodes, const int * node_list,
args.put("initialstart", initial);
args.put("nostart", nostart);
const Properties *reply;
+ const int timeout = handle->read_timeout;
+ handle->read_timeout= 5*60*1000; // 5 minutes
reply = ndb_mgm_call(handle, restart_reply, "restart all", &args);
+ handle->read_timeout= timeout;
CHECK_REPLY(reply, -1);
BaseString result;
@@ -925,7 +928,10 @@ ndb_mgm_restart2(NdbMgmHandle handle, int no_of_nodes, const int * node_list,
args.put("nostart", nostart);
const Properties *reply;
+ const int timeout = handle->read_timeout;
+ handle->read_timeout= 5*60*1000; // 5 minutes
reply = ndb_mgm_call(handle, restart_reply, "restart node", &args);
+ handle->read_timeout= timeout;
if(reply != NULL) {
BaseString result;
reply->get("result", result);
diff --git a/ndb/src/ndbapi/ClusterMgr.cpp b/ndb/src/ndbapi/ClusterMgr.cpp
index 71938e27037..42b3b01bca1 100644
--- a/ndb/src/ndbapi/ClusterMgr.cpp
+++ b/ndb/src/ndbapi/ClusterMgr.cpp
@@ -66,6 +66,7 @@ ClusterMgr::ClusterMgr(TransporterFacade & _facade):
{
ndbSetOwnVersion();
clusterMgrThreadMutex = NdbMutex_Create();
+ noOfAliveNodes= 0;
noOfConnectedNodes= 0;
theClusterMgrThread= 0;
}
@@ -336,9 +337,9 @@ ClusterMgr::execAPI_REGCONF(const Uint32 * theData){
node.m_state = apiRegConf->nodeState;
if (node.compatible && (node.m_state.startLevel == NodeState::SL_STARTED ||
node.m_state.startLevel == NodeState::SL_SINGLEUSER)){
- node.m_alive = true;
+ set_node_alive(node, true);
} else {
- node.m_alive = false;
+ set_node_alive(node, false);
}//if
node.hbSent = 0;
node.hbCounter = 0;
@@ -361,7 +362,7 @@ ClusterMgr::execAPI_REGREF(const Uint32 * theData){
assert(node.defined == true);
node.compatible = false;
- node.m_alive = false;
+ set_node_alive(node, false);
node.m_state = NodeState::SL_NOTHING;
node.m_info.m_version = ref->version;
@@ -446,7 +447,7 @@ ClusterMgr::reportNodeFailed(NodeId nodeId){
Node & theNode = theNodes[nodeId];
- theNode.m_alive = false;
+ set_node_alive(theNode, false);
theNode.m_info.m_connectCount ++;
if(theNode.connected)
@@ -462,8 +463,8 @@ ClusterMgr::reportNodeFailed(NodeId nodeId){
}
theNode.nfCompleteRep = false;
-
- if(noOfConnectedNodes == 0){
+ if(noOfAliveNodes == 0)
+ {
NFCompleteRep rep;
for(Uint32 i = 1; i<MAX_NODES; i++){
if(theNodes[i].defined && theNodes[i].nfCompleteRep == false){
diff --git a/ndb/src/ndbapi/ClusterMgr.hpp b/ndb/src/ndbapi/ClusterMgr.hpp
index cc3cf66c8aa..d75b820e9cb 100644
--- a/ndb/src/ndbapi/ClusterMgr.hpp
+++ b/ndb/src/ndbapi/ClusterMgr.hpp
@@ -80,6 +80,7 @@ public:
Uint32 getNoOfConnectedNodes() const;
private:
+ Uint32 noOfAliveNodes;
Uint32 noOfConnectedNodes;
Node theNodes[MAX_NODES];
NdbThread* theClusterMgrThread;
@@ -100,6 +101,19 @@ private:
void execAPI_REGREF (const Uint32 * theData);
void execNODE_FAILREP (const Uint32 * theData);
void execNF_COMPLETEREP(const Uint32 * theData);
+
+ inline void set_node_alive(Node& node, bool alive){
+ if(node.m_alive && !alive)
+ {
+ assert(noOfAliveNodes);
+ noOfAliveNodes--;
+ }
+ else if(!node.m_alive && alive)
+ {
+ noOfAliveNodes++;
+ }
+ node.m_alive = alive;
+ }
};
inline
diff --git a/ndb/src/ndbapi/Ndb.cpp b/ndb/src/ndbapi/Ndb.cpp
index c01ce823dc0..5efef1b0112 100644
--- a/ndb/src/ndbapi/Ndb.cpp
+++ b/ndb/src/ndbapi/Ndb.cpp
@@ -764,7 +764,7 @@ Ndb::getAutoIncrementValue(const char* aTableName, Uint32 cacheSize)
DBUG_RETURN(~(Uint64)0);
const NdbTableImpl *table= info->m_table_impl;
Uint64 tupleId = getTupleIdFromNdb(table->m_tableId, cacheSize);
- DBUG_PRINT("info", ("value %u", tupleId));
+ DBUG_PRINT("info", ("value %ul", (ulong) tupleId));
DBUG_RETURN(tupleId);
}
@@ -776,7 +776,7 @@ Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable, Uint32 cacheSize
DBUG_RETURN(~(Uint64)0);
const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable);
Uint64 tupleId = getTupleIdFromNdb(table->m_tableId, cacheSize);
- DBUG_PRINT("info", ("value %u", tupleId));
+ DBUG_PRINT("info", ("value %ul", (ulong) tupleId));
DBUG_RETURN(tupleId);
}
@@ -796,7 +796,8 @@ Ndb::getTupleIdFromNdb(Uint32 aTableId, Uint32 cacheSize)
if ( theFirstTupleId[aTableId] != theLastTupleId[aTableId] )
{
theFirstTupleId[aTableId]++;
- DBUG_PRINT("info", ("next cached value %u", theFirstTupleId[aTableId]));
+ DBUG_PRINT("info", ("next cached value %ul",
+ (ulong) theFirstTupleId[aTableId]));
DBUG_RETURN(theFirstTupleId[aTableId]);
}
else // theFirstTupleId == theLastTupleId
@@ -817,7 +818,7 @@ Ndb::readAutoIncrementValue(const char* aTableName)
DBUG_RETURN(~(Uint64)0);
}
Uint64 tupleId = readTupleIdFromNdb(table->m_tableId);
- DBUG_PRINT("info", ("value %u", tupleId));
+ DBUG_PRINT("info", ("value %ul", (ulong) tupleId));
DBUG_RETURN(tupleId);
}
@@ -829,7 +830,7 @@ Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable)
DBUG_RETURN(~(Uint64)0);
const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable);
Uint64 tupleId = readTupleIdFromNdb(table->m_tableId);
- DBUG_PRINT("info", ("value %u", tupleId));
+ DBUG_PRINT("info", ("value %ul", (ulong) tupleId));
DBUG_RETURN(tupleId);
}
diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c
index 6bbd38c9bbb..be9e5084a47 100644
--- a/ndb/src/ndbapi/ndberror.c
+++ b/ndb/src/ndbapi/ndberror.c
@@ -173,7 +173,7 @@ ErrorBundle ErrorCodes[] = {
{ 4021, TR, "Out of Send Buffer space in NDB API" },
{ 4022, TR, "Out of Send Buffer space in NDB API" },
{ 4032, TR, "Out of Send Buffer space in NDB API" },
-
+ { 288, TR, "Out of index operations in transaction coordinator (increase MaxNoOfConcurrentIndexOperations)" },
/**
* InsufficientSpace
*/
diff --git a/ndb/test/include/CpcClient.hpp b/ndb/test/include/CpcClient.hpp
index 1655bc57b56..8d8e079d219 100644
--- a/ndb/test/include/CpcClient.hpp
+++ b/ndb/test/include/CpcClient.hpp
@@ -56,6 +56,7 @@ public:
BaseString m_stdout;
BaseString m_stderr;
BaseString m_ulimit;
+ BaseString m_shutdown_options;
};
private:
diff --git a/ndb/test/ndbapi/testNodeRestart.cpp b/ndb/test/ndbapi/testNodeRestart.cpp
index 1ce934a19ca..6ef3da2d760 100644
--- a/ndb/test/ndbapi/testNodeRestart.cpp
+++ b/ndb/test/ndbapi/testNodeRestart.cpp
@@ -359,7 +359,7 @@ int runLateCommit(NDBT_Context* ctx, NDBT_Step* step){
if(hugoOps.startTransaction(pNdb) != 0)
return NDBT_FAILED;
- if(hugoOps.pkUpdateRecord(pNdb, 1) != 0)
+ if(hugoOps.pkUpdateRecord(pNdb, 1, 128) != 0)
return NDBT_FAILED;
if(hugoOps.execute_NoCommit(pNdb) != 0)
diff --git a/ndb/test/run-test/Makefile.am b/ndb/test/run-test/Makefile.am
index 1067328dcc3..60d64a7697f 100644
--- a/ndb/test/run-test/Makefile.am
+++ b/ndb/test/run-test/Makefile.am
@@ -6,7 +6,12 @@ include $(top_srcdir)/ndb/config/type_util.mk.am
include $(top_srcdir)/ndb/config/type_mgmapiclient.mk.am
test_PROGRAMS = atrt
-test_DATA=daily-basic-tests.txt daily-devel-tests.txt 16node-tests.txt
+test_DATA=daily-basic-tests.txt daily-devel-tests.txt 16node-tests.txt \
+ conf-daily-basic-ndbmaster.txt \
+ conf-daily-basic-shark.txt \
+ conf-daily-devel-ndbmaster.txt \
+ conf-daily-sql-ndbmaster.txt \
+ conf-daily-basic-dl145a.txt
test_SCRIPTS=atrt-analyze-result.sh atrt-gather-result.sh atrt-setup.sh \
atrt-clear-result.sh make-config.sh make-index.sh make-html-reports.sh
diff --git a/ndb/test/run-test/conf-daily-basic-dl145a.txt b/ndb/test/run-test/conf-daily-basic-dl145a.txt
new file mode 100644
index 00000000000..d8cf8d34d82
--- /dev/null
+++ b/ndb/test/run-test/conf-daily-basic-dl145a.txt
@@ -0,0 +1,19 @@
+baseport: 14000
+basedir: /home/ndbdev/autotest/run
+mgm: CHOOSE_host1
+ndb: CHOOSE_host2 CHOOSE_host3
+api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
+-- cluster config
+[DB DEFAULT]
+NoOfReplicas: 2
+IndexMemory: 100M
+DataMemory: 300M
+BackupMemory: 64M
+MaxNoOfConcurrentScans: 100
+DataDir: .
+FileSystemPath: /home/ndbdev/autotest/run
+
+[MGM DEFAULT]
+PortNumber: 14000
+ArbitrationRank: 1
+DataDir: .
diff --git a/ndb/test/run-test/conf-daily-basic-ndbmaster.txt b/ndb/test/run-test/conf-daily-basic-ndbmaster.txt
new file mode 100644
index 00000000000..bcd809593f3
--- /dev/null
+++ b/ndb/test/run-test/conf-daily-basic-ndbmaster.txt
@@ -0,0 +1,19 @@
+baseport: 14000
+basedir: /space/autotest
+mgm: CHOOSE_host1
+ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
+api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
+-- cluster config
+[DB DEFAULT]
+NoOfReplicas: 2
+IndexMemory: 100M
+DataMemory: 300M
+BackupMemory: 64M
+MaxNoOfConcurrentScans: 100
+DataDir: .
+FileSystemPath: /space/autotest/run
+
+[MGM DEFAULT]
+PortNumber: 14000
+ArbitrationRank: 1
+DataDir: .
diff --git a/ndb/test/run-test/conf-daily-basic-shark.txt b/ndb/test/run-test/conf-daily-basic-shark.txt
new file mode 100644
index 00000000000..6d1f8b64f44
--- /dev/null
+++ b/ndb/test/run-test/conf-daily-basic-shark.txt
@@ -0,0 +1,19 @@
+baseport: 14000
+basedir: /space/autotest
+mgm: CHOOSE_host1
+ndb: CHOOSE_host1 CHOOSE_host1
+api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
+-- cluster config
+[DB DEFAULT]
+NoOfReplicas: 2
+IndexMemory: 100M
+DataMemory: 300M
+BackupMemory: 64M
+MaxNoOfConcurrentScans: 100
+DataDir: .
+FileSystemPath: /space/autotest/run
+
+[MGM DEFAULT]
+PortNumber: 14000
+ArbitrationRank: 1
+DataDir: .
diff --git a/ndb/test/run-test/conf-daily-devel-ndbmaster.txt b/ndb/test/run-test/conf-daily-devel-ndbmaster.txt
new file mode 100644
index 00000000000..8b340e6a39d
--- /dev/null
+++ b/ndb/test/run-test/conf-daily-devel-ndbmaster.txt
@@ -0,0 +1,19 @@
+baseport: 16000
+basedir: /space/autotest
+mgm: CHOOSE_host1
+ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
+api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
+-- cluster config
+[DB DEFAULT]
+NoOfReplicas: 2
+IndexMemory: 100M
+DataMemory: 300M
+BackupMemory: 64M
+MaxNoOfConcurrentScans: 100
+DataDir: .
+FileSystemPath: /space/autotest/run
+
+[MGM DEFAULT]
+PortNumber: 16000
+ArbitrationRank: 1
+DataDir: .
diff --git a/ndb/test/run-test/conf-daily-sql-ndbmaster.txt b/ndb/test/run-test/conf-daily-sql-ndbmaster.txt
new file mode 100644
index 00000000000..0d6a99f8d48
--- /dev/null
+++ b/ndb/test/run-test/conf-daily-sql-ndbmaster.txt
@@ -0,0 +1,20 @@
+baseport: 16000
+basedir: /space/autotest
+mgm: CHOOSE_host1
+ndb: CHOOSE_host2 CHOOSE_host3
+mysqld: CHOOSE_host1 CHOOSE_host4
+mysql: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4
+-- cluster config
+[DB DEFAULT]
+NoOfReplicas: 2
+IndexMemory: 100M
+DataMemory: 300M
+BackupMemory: 64M
+MaxNoOfConcurrentScans: 100
+DataDir: .
+FileSystemPath: /space/autotest/run
+
+[MGM DEFAULT]
+PortNumber: 16000
+ArbitrationRank: 1
+DataDir: .
diff --git a/ndb/test/run-test/main.cpp b/ndb/test/run-test/main.cpp
index 02c2cc862a3..0b0b7472a19 100644
--- a/ndb/test/run-test/main.cpp
+++ b/ndb/test/run-test/main.cpp
@@ -116,10 +116,7 @@ main(int argc, const char ** argv){
*/
if(restart){
g_logger.info("(Re)starting ndb processes");
- if(!stop_processes(g_config, atrt_process::NDB_MGM))
- goto end;
-
- if(!stop_processes(g_config, atrt_process::NDB_DB))
+ if(!stop_processes(g_config, ~0))
goto end;
if(!start_processes(g_config, atrt_process::NDB_MGM))
@@ -142,6 +139,9 @@ main(int argc, const char ** argv){
goto end;
started:
+ if(!start_processes(g_config, p_servers))
+ goto end;
+
g_logger.info("Ndb start completed");
}
@@ -158,9 +158,6 @@ main(int argc, const char ** argv){
if(!setup_test_case(g_config, test_case))
goto end;
- if(!start_processes(g_config, p_servers))
- goto end;
-
if(!start_processes(g_config, p_clients))
goto end;
@@ -201,9 +198,6 @@ main(int argc, const char ** argv){
if(!stop_processes(g_config, p_clients))
goto end;
- if(!stop_processes(g_config, p_servers))
- goto end;
-
if(!gather_result(g_config, &result))
goto end;
@@ -454,6 +448,7 @@ setup_config(atrt_config& config){
proc.m_proc.m_runas = proc.m_host->m_user;
proc.m_proc.m_ulimit = "c:unlimited";
proc.m_proc.m_env.assfmt("MYSQL_BASE_DIR=%s", dir.c_str());
+ proc.m_proc.m_shutdown_options = "";
proc.m_hostname = proc.m_host->m_hostname;
proc.m_ndb_mgm_port = g_default_base_port;
if(split1[0] == "mgm"){
@@ -476,21 +471,19 @@ setup_config(atrt_config& config){
proc.m_proc.m_path.assign(dir).append("/libexec/mysqld");
proc.m_proc.m_args = "--core-file --ndbcluster";
proc.m_proc.m_cwd.appfmt("%d.mysqld", index);
- if(mysql_port_offset > 0 || g_mysqld_use_base){
- // setup mysql specific stuff
- const char * basedir = proc.m_proc.m_cwd.c_str();
- proc.m_proc.m_args.appfmt("--datadir=%s", basedir);
- proc.m_proc.m_args.appfmt("--pid-file=%s/mysql.pid", basedir);
- proc.m_proc.m_args.appfmt("--socket=%s/mysql.sock", basedir);
- proc.m_proc.m_args.appfmt("--port=%d",
- g_default_base_port-(++mysql_port_offset));
- }
+ proc.m_proc.m_shutdown_options = "SIGKILL"; // not nice
} else if(split1[0] == "api"){
proc.m_type = atrt_process::NDB_API;
proc.m_proc.m_name.assfmt("%d-%s", index, "ndb_api");
proc.m_proc.m_path = "";
proc.m_proc.m_args = "";
proc.m_proc.m_cwd.appfmt("%d.ndb_api", index);
+ } else if(split1[0] == "mysql"){
+ proc.m_type = atrt_process::MYSQL_CLIENT;
+ proc.m_proc.m_name.assfmt("%d-%s", index, "mysql");
+ proc.m_proc.m_path = "";
+ proc.m_proc.m_args = "";
+ proc.m_proc.m_cwd.appfmt("%d.mysql", index);
} else {
g_logger.critical("%s:%d: Unhandled process type: %s",
g_process_config_filename, lineno,
@@ -913,6 +906,11 @@ read_test_case(FILE * file, atrt_testcase& tc, int& line){
tc.m_report= true;
else
tc.m_report= false;
+
+ if(p.get("run-all", &mt) && strcmp(mt, "yes") == 0)
+ tc.m_run_all= true;
+ else
+ tc.m_run_all= false;
return true;
}
@@ -928,16 +926,17 @@ setup_test_case(atrt_config& config, const atrt_testcase& tc){
size_t i = 0;
for(; i<config.m_processes.size(); i++){
atrt_process & proc = config.m_processes[i];
- if(proc.m_type == atrt_process::NDB_API){
+ if(proc.m_type == atrt_process::NDB_API || proc.m_type == atrt_process::MYSQL_CLIENT){
proc.m_proc.m_path.assfmt("%s/bin/%s", proc.m_host->m_base_dir.c_str(),
tc.m_command.c_str());
proc.m_proc.m_args.assign(tc.m_args);
- break;
+ if(!tc.m_run_all)
+ break;
}
}
for(i++; i<config.m_processes.size(); i++){
atrt_process & proc = config.m_processes[i];
- if(proc.m_type == atrt_process::NDB_API){
+ if(proc.m_type == atrt_process::NDB_API || proc.m_type == atrt_process::MYSQL_CLIENT){
proc.m_proc.m_path.assign("");
proc.m_proc.m_args.assign("");
}
diff --git a/ndb/test/run-test/make-config.sh b/ndb/test/run-test/make-config.sh
index 5394b0654d4..1321ce4e9f0 100755
--- a/ndb/test/run-test/make-config.sh
+++ b/ndb/test/run-test/make-config.sh
@@ -1,465 +1,101 @@
#!/bin/sh
-# NAME
-# make-config.sh - Makes a config file for mgm server
-#
-# SYNOPSIS
-# make-config.sh [ -t <template> ] [-s] [ -m <machine conf> [ -d <directory> ]
-#
-# DESCRIPTION
-#
-# OPTIONS
-#
-# EXAMPLES
-#
-#
-# ENVIRONMENT
-# NDB_PROJ_HOME Home dir for ndb
-#
-# FILES
-# $NDB_PROJ_HOME/lib/funcs.sh general shell script functions
-#
-#
-# SEE ALSO
-#
-# DIAGNOSTICTS
-#
-# VERSION
-# 1.0
-# 1.1 021112 epesson: Adapted for new mgmt server in NDB 2.00
-#
-# AUTHOR
-# Jonas Oreland
-#
-# CHANGES
-# also generate ndbnet config
-#
-progname=`basename $0`
-synopsis="make-config.sh [ -t template ] [ -m <machine conf> ] [ -d <dst directory> ][-s] [<mgm host>]"
+baseport=""
+basedir=""
+proc_no=1
+node_id=1
-#: ${NDB_PROJ_HOME:?} # If undefined, exit with error message
+d_file=/tmp/d.$$
+dir_file=/tmp/dirs.$$
+config_file=/tmp/config.$$
+cluster_file=/tmp/cluster.$$
-#: ${NDB_LOCAL_BUILD_OPTIONS:=--} # If undef, set to --. Keeps getopts happy.
- # You may have to experiment a bit
- # to get quoting right (if you need it).
-
-
-#. $NDB_PROJ_HOME/lib/funcs.sh # Load some good stuff
-trace() {
- echo $* 1>&2
-}
-syndie() {
- trace $*
- exit 1
-}
-
-# defaults for options related variables
-#
-
-mgm_nodes=0
-ndb_nodes=0
-api_nodes=0
-uniq_id=$$.$$
-own_host=`hostname`
-dst_dir=""
-template=/dev/null
-machines=/dev/null
-verbose=yes
-
-# used if error when parsing the options environment variable
-#
-env_opterr="options environment variable: <<$options>>"
-
-# Option parsing, for the options variable as well as the command line.
-#
-# We want to be able to set options in an environment variable,
-# as well as on the command line. In order not to have to repeat
-# the same getopts information twice, we loop two times over the
-# getopts while loop. The first time, we process options from
-# the options environment variable, the second time we process
-# options from the command line.
-#
-# The things to change are the actual options and what they do.
-#
-add_node(){
- no=$1; shift
+add_procs(){
type=$1; shift
- echo $* | awk 'BEGIN{FS=":";}{h=$1; if(h=="localhost") h="'$own_host'";
- printf("%s_%d_host=%s\n", "'$type'", "'$no'", h);
- if(NF>1 && $2!="") printf("%s_%d_port=%d\n",
- "'$type'", "'$no'", $2);
- if(NF>2 && $3!="") printf("%s_%d_dir=%s\n",
- "'$type'", "'$no'", $3);
- }'
-}
-
-
-add_mgm_node(){
- mgm_nodes=`cat /tmp/mgm_nodes.$uniq_id | grep "_host=" | wc -l`
- mgm_nodes=`expr $mgm_nodes + 1`
- while [ $# -gt 0 ]
- do
- add_node ${mgm_nodes} mgm_node $1 >> /tmp/mgm_nodes.$uniq_id
- shift
- mgm_nodes=`expr $mgm_nodes + 1`
- done
-}
-
-add_ndb_node(){
- ndb_nodes=`cat /tmp/ndb_nodes.$uniq_id | grep "_host=" | wc -l`
- ndb_nodes=`expr $ndb_nodes + 1`
- while [ $# -gt 0 ]
- do
- add_node ${ndb_nodes} ndb_node $1 >> /tmp/ndb_nodes.$uniq_id
- shift
- ndb_nodes=`expr $ndb_nodes + 1`
- done
-}
-
-add_api_node(){
- api_nodes=`cat /tmp/api_nodes.$uniq_id | grep "_host=" |wc -l`
- api_nodes=`expr $api_nodes + 1`
- while [ $# -gt 0 ]
- do
- add_node ${api_nodes} api_node $1 >> /tmp/api_nodes.$uniq_id
- shift
- api_nodes=`expr $api_nodes + 1`
- done
+ while [ $# -ne 0 ]
+ do
+ add_proc $type $1
+ shift
+ done
}
-rm -rf /tmp/mgm_nodes.$uniq_id ; touch /tmp/mgm_nodes.$uniq_id
-rm -rf /tmp/ndb_nodes.$uniq_id ; touch /tmp/ndb_nodes.$uniq_id
-rm -rf /tmp/api_nodes.$uniq_id ; touch /tmp/api_nodes.$uniq_id
-
-for optstring in "$options" "" # 1. options variable 2. cmd line
-do
-
- while getopts d:m:t:n:o:a:b:p:s i $optstring # optstring empty => no arg => cmd line
- do
- case $i in
-
- q) verbose="";; # echo important things
- t) template=$OPTARG;; # Template
- d) dst_dir=$OPTARG;; # Destination directory
- m) machines=$OPTARG;; # Machine configuration
- s) mgm_start=yes;; # Make mgm start script
- \?) syndie $env_opterr;; # print synopsis and exit
-
+add_proc (){
+ dir=""
+ conf=""
+ case $type in
+ mgm)
+ dir="ndb_mgmd"
+ conf="[ndb_mgmd]\nId: $node_id\nHostName: $2\n"
+ node_id=`expr $node_id + 1`
+ ;;
+ api)
+ dir="ndb_api"
+ conf="[api]\nId: $node_id\nHostName: $2\n"
+ node_id=`expr $node_id + 1`
+ ;;
+ ndb)
+ dir="ndbd"
+ conf="[ndbd]\nId: $node_id\nHostName: $2\n"
+ node_id=`expr $node_id + 1`
+ ;;
+ mysqld)
+ dir="mysqld"
+ conf="[mysqld]\nId: $node_id\nHostName: $2\n"
+ node_id=`expr $node_id + 1`
+ ;;
+ mysql)
+ dir="mysql"
+ ;;
esac
- done
-
- [ -n "$optstring" ] && OPTIND=1 # Reset for round 2, cmdline options
-
- env_opterr= # Round 2 should not use the value
-
-done
-shift `expr $OPTIND - 1`
-
-if [ -z "$dst_dir" ]
-then
- verbose=
-fi
-
-skip(){
- no=$1; shift
- shift $no
- echo $*
+ dir="$proc_no.$dir"
+ proc_no=`expr $proc_no + 1`
+ echo -e $dir >> $dir_file
+ if [ "$conf" ]
+ then
+ echo -e $conf >> $config_file
+ fi
}
-# --- option parsing done ---
-grep "^ndb: " $machines | while read node
-do
- node=`skip 1 $node`
- add_ndb_node $node
-done
-
-grep "^api: " $machines | while read node
-do
- node=`skip 1 $node`
- add_api_node $node
-done
-grep "^mgm: " $machines | while read node
+cnf=/dev/null
+cat $1 | while read line
do
- node=`skip 1 $node`
- add_mgm_node $node
+ case $line in
+ baseport:*) baseport=`echo $line | sed 's/baseport[ ]*:[ ]*//g'`;;
+ basedir:*) basedir=`echo $line | sed 's/basedir[ ]*:[ ]*//g'`;;
+ mgm:*) add_procs mgm `echo $line | sed 's/mgm[ ]*:[ ]*//g'`;;
+ api:*) add_procs api `echo $line | sed 's/api[ ]*:[ ]*//g'`;;
+ ndb:*) add_procs ndb `echo $line | sed 's/ndb[ ]*:[ ]*//g'`;;
+ mysqld:*) add_procs mysqld `echo $line | sed 's/mysqld[ ]*:[ ]*//g'`;;
+ mysql:*) add_procs mysql `echo $line | sed 's/mysql[ ]*:[ ]*//g'`;;
+ "-- cluster config")
+ if [ "$cnf" = "/dev/null" ]
+ then
+ cnf=$cluster_file
+ else
+ cnf=/dev/null
+ fi
+ line="";;
+ *) echo $line >> $cnf; line="";;
+ esac
+ if [ "$line" ]
+ then
+ echo $line >> $d_file
+ fi
done
-tmp=`grep "^baseport: " $machines | tail -1 | cut -d ":" -f 2`
-if [ "$tmp" ]
-then
- baseport=`echo $tmp`
-else
- syndie "Unable to find baseport"
-fi
+cat $dir_file | xargs mkdir -p
-trim(){
- echo $*
-}
-tmp=`grep "^basedir: " $machines | tail -1 | cut -d ":" -f 2`
-if [ "$tmp" ]
-then
- basedir=`trim $tmp`
-fi
-
-# -- Load enviroment --
-ndb_nodes=`cat /tmp/ndb_nodes.$uniq_id | grep "_host=" | wc -l`
-api_nodes=`cat /tmp/api_nodes.$uniq_id | grep "_host=" | wc -l`
-mgm_nodes=`cat /tmp/mgm_nodes.$uniq_id | grep "_host=" | wc -l`
-. /tmp/ndb_nodes.$uniq_id
-. /tmp/api_nodes.$uniq_id
-. /tmp/mgm_nodes.$uniq_id
-rm -f /tmp/ndb_nodes.$uniq_id /tmp/api_nodes.$uniq_id /tmp/mgm_nodes.$uniq_id
-
-# -- Verify
-trace "Verifying arguments"
-
-if [ ! -r $template ]
-then
- syndie "Unable to read template file: $template"
-fi
-
-if [ $ndb_nodes -le 0 ]
-then
- syndie "No ndb nodes specified"
-fi
-
-if [ $api_nodes -le 0 ]
-then
- syndie "No api nodes specified"
-fi
-
-if [ $mgm_nodes -gt 1 ]
-then
- syndie "More than one mgm node specified"
-fi
-
-if [ $mgm_nodes -eq 0 ]
-then
- trace "No managment server specified using `hostname`"
- mgm_nodes=1
- mgm_node_1=`hostname`
-fi
-
-if [ -n "$dst_dir" ]
-then
- mkdir -p $dst_dir
- if [ ! -d $dst_dir ]
+if [ -f $cluster_file ]
then
- syndie "Unable to create dst dir: $dst_dir"
- fi
- DST=/tmp/$uniq_id
+ cat $cluster_file $config_file >> /tmp/config2.$$
+ mv /tmp/config2.$$ $config_file
fi
-# --- option verifying done ---
-
-# Find uniq computers
-i=1
-while [ $i -le $mgm_nodes ]
-do
- echo `eval echo "\$"mgm_node_${i}_host` >> /tmp/hosts.$uniq_id
- i=`expr $i + 1`
-done
-
-i=1
-while [ $i -le $ndb_nodes ]
-do
- echo `eval echo "\$"ndb_node_${i}_host` >> /tmp/hosts.$uniq_id
- i=`expr $i + 1`
-done
-
-i=1
-while [ $i -le $api_nodes ]
-do
- echo `eval echo "\$"api_node_${i}_host` >> /tmp/hosts.$uniq_id
- i=`expr $i + 1`
+for i in `find . -type d -name '*.ndb_mgmd'`
+ do
+ cp $config_file $i/config.ini
done
-sort -u -o /tmp/hosts.$uniq_id /tmp/hosts.$uniq_id
-
-get_computer_id(){
- grep -w -n $1 /tmp/hosts.$uniq_id | cut -d ":" -f 1
-}
-
-get_mgm_computer_id(){
- a=`eval echo "\$"mgm_node_${1}_host`
- get_computer_id $a
-}
-
-get_ndb_computer_id(){
- a=`eval echo "\$"ndb_node_${1}_host`
- get_computer_id $a
-}
-
-get_api_computer_id(){
- a=`eval echo "\$"api_node_${1}_host`
- get_computer_id $a
-}
-
-# -- Write config files --
-
-mgm_port=$baseport
-
-(
- i=1
- #echo "COMPUTERS"
- cat /tmp/hosts.$uniq_id | while read host
- do
- echo "[COMPUTER]"
- echo "Id: $i"
- echo "ByteOrder: Big"
- echo "HostName: $host"
- echo
- i=`expr $i + 1`
- done
-
- node_id=1
- echo
-
- # Mgm process
- echo
- echo "[MGM]"
- echo "Id: $node_id"
- echo "ExecuteOnComputer: `get_mgm_computer_id 1`"
- echo "PortNumber: $mgm_port"
- node_id=`expr $node_id + 1`
-
- # Ndb processes
- i=1
- ndb_nodes=`trim $ndb_nodes`
- while [ $i -le $ndb_nodes ]
- do
- echo
- echo "[DB]"
- echo "Id: $node_id"
- echo "ExecuteOnComputer: `get_ndb_computer_id $i`"
- echo "FileSystemPath: $basedir/run/node-${node_id}-fs"
- i=`expr $i + 1`
- node_id=`expr $node_id + 1`
- done
-
- # API processes
- i=1
- while [ $i -le $api_nodes ]
- do
- echo
- echo "[API]"
- echo "Id: $node_id"
- echo "ExecuteOnComputer: `get_api_computer_id $i`"
- i=`expr $i + 1`
- node_id=`expr $node_id + 1`
- done
-
- # Connections
- current_port=`expr $mgm_port + 1`
- echo
-
- # Connect Mgm with all ndb-nodes
- i=1
- while [ $i -le $ndb_nodes ]
- do
- echo
- echo "[TCP]"
- echo "NodeId1: 1"
- echo "NodeId2: `expr $i + 1`"
- echo "PortNumber: $current_port"
- i=`expr $i + 1`
- current_port=`expr $current_port + 1`
- done
-
- # Connect All ndb processes with all ndb processes
- i=1
- while [ $i -le $ndb_nodes ]
- do
- j=`expr $i + 1`
- while [ $j -le $ndb_nodes ]
- do
- echo
- echo "[TCP]"
- echo "NodeId1: `expr $i + 1`"
- echo "NodeId2: `expr $j + 1`"
- echo "PortNumber: $current_port"
- j=`expr $j + 1`
- current_port=`expr $current_port + 1`
- done
- i=`expr $i + 1`
- done
-
- # Connect all ndb-nodes with all api nodes
- i=1
- while [ $i -le $ndb_nodes ]
- do
- j=1
- while [ $j -le $api_nodes ]
- do
- echo
- echo "[TCP]"
- echo "NodeId1: `expr $i + 1`"
- echo "NodeId2: `expr $j + $ndb_nodes + 1`"
- echo "PortNumber: $current_port"
- j=`expr $j + 1`
- current_port=`expr $current_port + 1`
- done
- i=`expr $i + 1`
- done
- echo
-) > $DST
-
-trace "Init config file done"
-
-if [ -z "$dst_dir" ]
-then
- cat $DST
- rm -f $DST
- rm -f /tmp/hosts.$uniq_id
- exit 0
-fi
-
-###
-# Create Ndb.cfg files
-
-# nodeid=2;host=localhost:2200
-
-# Mgm node
-mkcfg(){
- mkdir -p $dst_dir/${2}.ndb_${1}
- (
- echo "OwnProcessId $2"
- echo "host://${mgm_node_1_host}:${mgm_port}"
- ) > $dst_dir/${2}.ndb_${1}/Ndb.cfg
- if [ $1 = "db" ]
- then
- mkdir $dst_dir/node-${2}-fs
- fi
-}
-
-mkcfg mgm 1
-cat $DST > $dst_dir/1.ndb_mgm/initconfig.txt
-
-trace "Creating Ndb.cfg for ndb nodes"
-
-current_node=2
-i=1
-while [ $i -le $ndb_nodes ]
-do
- mkcfg db ${current_node}
- i=`expr $i + 1`
- current_node=`expr $current_node + 1`
-done
-
-trace "Creating Ndb.cfg for api nodes"
-
-i=1
-while [ $i -le $api_nodes ]
-do
- mkcfg api ${current_node}
- i=`expr $i + 1`
- current_node=`expr $current_node + 1`
-done
-
-rm -f $DST
-rm -f /tmp/hosts.$uniq_id
-
-
-exit 0
-# vim: set sw=4:
+mv $d_file d.txt
+rm -f $config_file $dir_file $cluster_file
diff --git a/ndb/test/run-test/ndb-autotest.sh b/ndb/test/run-test/ndb-autotest.sh
index 397df97d52f..f1c83f079cd 100755
--- a/ndb/test/run-test/ndb-autotest.sh
+++ b/ndb/test/run-test/ndb-autotest.sh
@@ -1,7 +1,7 @@
#!/bin/sh
save_args=$*
-VERSION="ndb-autotest.sh version 1.0"
+VERSION="ndb-autotest.sh version 1.04"
DATE=`date '+%Y-%m-%d'`
export DATE
@@ -71,11 +71,18 @@ then
cd $dst_place
rm -rf $run_dir/*
aclocal; autoheader; autoconf; automake
- (cd innobase; aclocal; autoheader; autoconf; automake)
- (cd bdb/dist; sh s_all)
+ if [ -d storage ]
+ then
+ (cd storage/innobase; aclocal; autoheader; autoconf; automake)
+ (cd storage/bdb/dist; sh s_all)
+ else
+ (cd innobase; aclocal; autoheader; autoconf; automake)
+ (cd bdb/dist; sh s_all)
+ fi
eval $configure --prefix=$run_dir
make
make install
+ (cd $run_dir; ./bin/mysql_install_db)
fi
###
@@ -103,7 +110,9 @@ fi
test_dir=$run_dir/mysql-test/ndb
atrt=$test_dir/atrt
html=$test_dir/make-html-reports.sh
-PATH=$test_dir:$PATH
+mkconfig=$run_dir/mysql-test/ndb/make-config.sh
+
+PATH=$run_dir/bin:$test_dir:$PATH
export PATH
filter(){
@@ -125,20 +134,16 @@ hosts=`cat /tmp/hosts.$DATE`
if [ "$deploy" ]
then
- (cd / && tar cfz /tmp/build.$DATE.tgz $run_dir )
- for i in $hosts
- do
- ok=0
- scp /tmp/build.$DATE.tgz $i:/tmp/build.$DATE.$$.tgz && \
- ssh $i "rm -rf /space/autotest/*" && \
- ssh $i "cd / && tar xfz /tmp/build.$DATE.$$.tgz" && \
- ssh $i "rm /tmp/build.$DATE.$$.tgz" && ok=1
- if [ $ok -eq 0 ]
- then
- echo "$i failed during scp/ssh, excluding"
- echo $i >> /tmp/failed.$DATE
- fi
- done
+ for i in $hosts
+ do
+ rsync -a --delete --force --ignore-errors $run_dir/ $i:$run_dir
+ ok=$?
+ if [ $ok -ne 0 ]
+ then
+ echo "$i failed during rsync, excluding"
+ echo $i >> /tmp/failed.$DATE
+ fi
+ done
fi
rm -f /tmp/build.$DATE.tgz
@@ -170,6 +175,18 @@ choose(){
cat $TMP1
rm -f $TMP1
}
+
+choose_conf(){
+ host=`hostname -s`
+ if [ -f $test_dir/conf-$1-$host.txt ]
+ then
+ echo "$test_dir/conf-$1-$host.txt"
+ elif [ -f $test_dir/conf-$1.txt ]
+ then
+ echo "$test_dir/conf-$1.txt"
+ fi
+}
+
start(){
rm -rf report.txt result* log.txt
$atrt -v -v -r -R --log-file=log.txt --testcase-file=$test_dir/$2-tests.txt &
@@ -186,11 +203,17 @@ start(){
p2=`pwd`
cd ..
tar cfz /tmp/res.$$.tgz `basename $p2`/$DATE
- scp /tmp/res.$$.tgz $result_host:$result_path
- ssh $result_host "cd $result_path && tar xfz res.$$.tgz && rm -f res.$$.tgz"
+ scp /tmp/res.$$.tgz $result_host:$result_path/res.$DATE.`hostname -s`.$2.$$.tgz
rm -f /tmp/res.$$.tgz
}
+count_hosts(){
+ cnt=`grep "CHOOSE_host" $1 |
+ awk '{for(i=1; i<=NF;i++) if(match($i, "CHOOSE_host") > 0) print $i;}' |
+ sort | uniq | wc -l`
+ echo $cnt
+}
+
p=`pwd`
for dir in $RUN
do
@@ -199,10 +222,11 @@ do
run_dir=$base_dir/run-$dir-mysql-$clone-$target
res_dir=$base_dir/result-$dir-mysql-$clone-$target/$DATE
- mkdir -p $res_dir
- rm -rf $res_dir/*
+ mkdir -p $run_dir $res_dir
+ rm -rf $res_dir/* $run_dir/*
- count=`grep -c "COMPUTER" $run_dir/1.ndb_mgmd/initconfig.template`
+ conf=`choose_conf $dir`
+ count=`count_hosts $conf`
avail_hosts=`filter /tmp/filter_hosts.$$ $hosts`
avail=`echo $avail_hosts | wc -w`
if [ $count -gt $avail ]
@@ -212,12 +236,12 @@ do
break;
fi
- run_hosts=`echo $avail_hosts| awk '{for(i=1;i<='$count';i++)print $i;}'`
- choose $run_dir/d.template $run_hosts > $run_dir/d.txt
- choose $run_dir/1.ndb_mgmd/initconfig.template $run_hosts > $run_dir/1.ndb_mgmd/config.ini
+ run_hosts=`echo $avail_hosts|awk '{for(i=1;i<='$count';i++)print $i;}'`
echo $run_hosts >> /tmp/filter_hosts.$$
-
+
cd $run_dir
+ choose $conf $run_hosts > d.tmp
+ $mkconfig d.tmp
start $dir-mysql-$clone-$target $dir $res_dir &
done
cd $p
diff --git a/ndb/test/run-test/run-test.hpp b/ndb/test/run-test/run-test.hpp
index ff7f916d4ef..7011aec33d3 100644
--- a/ndb/test/run-test/run-test.hpp
+++ b/ndb/test/run-test/run-test.hpp
@@ -69,6 +69,7 @@ struct atrt_config {
struct atrt_testcase {
bool m_report;
+ bool m_run_all;
time_t m_max_time;
BaseString m_command;
BaseString m_args;
diff --git a/ndb/test/src/CpcClient.cpp b/ndb/test/src/CpcClient.cpp
index 1d1b4fcb977..4d06b4a7ff5 100644
--- a/ndb/test/src/CpcClient.cpp
+++ b/ndb/test/src/CpcClient.cpp
@@ -282,6 +282,7 @@ convert(const Properties & src, SimpleCpcClient::Process & dst){
b &= src.get("stdout", dst.m_stdout);
b &= src.get("stderr", dst.m_stderr);
b &= src.get("ulimit", dst.m_ulimit);
+ b &= src.get("shutdown", dst.m_shutdown_options);
return b;
}
@@ -305,6 +306,7 @@ convert(const SimpleCpcClient::Process & src, Properties & dst ){
b &= dst.put("stdout", src.m_stdout.c_str());
b &= dst.put("stderr", src.m_stderr.c_str());
b &= dst.put("ulimit", src.m_ulimit.c_str());
+ b &= dst.put("shutdown", src.m_shutdown_options.c_str());
return b;
}
@@ -372,6 +374,7 @@ SimpleCpcClient::list_processes(Vector<Process> &procs, Properties& reply) {
CPC_ARG("stdout",String, Mandatory, "Redirect stdout"),
CPC_ARG("stderr",String, Mandatory, "Redirect stderr"),
CPC_ARG("ulimit",String, Mandatory, "ulimit"),
+ CPC_ARG("shutdown",String, Mandatory, "shutdown"),
CPC_END()
};
diff --git a/ndb/test/src/NDBT_ResultRow.cpp b/ndb/test/src/NDBT_ResultRow.cpp
index 8e92a57d2e4..ab8d7b07ea1 100644
--- a/ndb/test/src/NDBT_ResultRow.cpp
+++ b/ndb/test/src/NDBT_ResultRow.cpp
@@ -116,8 +116,12 @@ BaseString NDBT_ResultRow::c_str() const {
NdbOut &
operator << (NdbOut& ndbout, const NDBT_ResultRow & res) {
- for(int i = 0; i<res.cols; i++)
- ndbout << *(res.data[i]) << "\t";
+ if (res.cols != 0)
+ {
+ ndbout << *(res.data[0]);
+ for(int i = 1; i<res.cols; i++)
+ ndbout << res.ad << *(res.data[i]);
+ }
return ndbout;
}