summaryrefslogtreecommitdiff
path: root/ndb/src/kernel/blocks
diff options
context:
space:
mode:
Diffstat (limited to 'ndb/src/kernel/blocks')
-rw-r--r--ndb/src/kernel/blocks/ERROR_codes.txt13
-rw-r--r--ndb/src/kernel/blocks/Makefile.am2
-rw-r--r--ndb/src/kernel/blocks/Makefile_old28
-rw-r--r--ndb/src/kernel/blocks/backup/Backup.cpp138
-rw-r--r--ndb/src/kernel/blocks/backup/Backup.hpp41
-rw-r--r--ndb/src/kernel/blocks/backup/BackupInit.cpp8
-rw-r--r--ndb/src/kernel/blocks/backup/Makefile.am16
-rw-r--r--ndb/src/kernel/blocks/backup/Makefile_old18
-rw-r--r--ndb/src/kernel/blocks/backup/restore/Makefile.am16
-rw-r--r--ndb/src/kernel/blocks/backup/restore/Restore.cpp951
-rw-r--r--ndb/src/kernel/blocks/backup/restore/Restore.hpp373
-rw-r--r--ndb/src/kernel/blocks/backup/restore/consumer.cpp107
-rw-r--r--ndb/src/kernel/blocks/backup/restore/consumer.hpp36
-rw-r--r--ndb/src/kernel/blocks/backup/restore/consumer_printer.cpp55
-rw-r--r--ndb/src/kernel/blocks/backup/restore/consumer_printer.hpp50
-rw-r--r--ndb/src/kernel/blocks/backup/restore/consumer_restore.cpp672
-rw-r--r--ndb/src/kernel/blocks/backup/restore/consumer_restore.hpp92
-rw-r--r--ndb/src/kernel/blocks/backup/restore/consumer_restorem.cpp652
-rw-r--r--ndb/src/kernel/blocks/backup/restore/main.cpp378
-rw-r--r--ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp28
-rw-r--r--ndb/src/kernel/blocks/cmvmi/Makefile.am14
-rw-r--r--ndb/src/kernel/blocks/cmvmi/Makefile_old9
-rw-r--r--ndb/src/kernel/blocks/dbacc/DbaccInit.cpp2
-rw-r--r--ndb/src/kernel/blocks/dbacc/DbaccMain.cpp34
-rw-r--r--ndb/src/kernel/blocks/dbacc/Makefile.am14
-rw-r--r--ndb/src/kernel/blocks/dbacc/Makefile_old11
-rw-r--r--ndb/src/kernel/blocks/dbdict/Dbdict.cpp88
-rw-r--r--ndb/src/kernel/blocks/dbdict/Dbdict.hpp6
-rw-r--r--ndb/src/kernel/blocks/dbdict/Makefile.am14
-rw-r--r--ndb/src/kernel/blocks/dbdict/Makefile_old12
-rw-r--r--ndb/src/kernel/blocks/dbdih/Dbdih.hpp3
-rw-r--r--ndb/src/kernel/blocks/dbdih/DbdihInit.cpp3
-rw-r--r--ndb/src/kernel/blocks/dbdih/DbdihMain.cpp30
-rw-r--r--ndb/src/kernel/blocks/dbdih/Makefile.am14
-rw-r--r--ndb/src/kernel/blocks/dbdih/Makefile_old13
-rw-r--r--ndb/src/kernel/blocks/dblqh/Dblqh.hpp26
-rw-r--r--ndb/src/kernel/blocks/dblqh/DblqhInit.cpp3
-rw-r--r--ndb/src/kernel/blocks/dblqh/DblqhMain.cpp233
-rw-r--r--ndb/src/kernel/blocks/dblqh/Makefile.am14
-rw-r--r--ndb/src/kernel/blocks/dblqh/Makefile_old12
-rw-r--r--ndb/src/kernel/blocks/dbtc/Dbtc.hpp11
-rw-r--r--ndb/src/kernel/blocks/dbtc/DbtcInit.cpp3
-rw-r--r--ndb/src/kernel/blocks/dbtc/DbtcMain.cpp141
-rw-r--r--ndb/src/kernel/blocks/dbtc/Makefile.am14
-rw-r--r--ndb/src/kernel/blocks/dbtc/Makefile_old11
-rw-r--r--ndb/src/kernel/blocks/dbtup/Dbtup.hpp2
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp1
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupGen.cpp12
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp65
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp3
-rw-r--r--ndb/src/kernel/blocks/dbtup/Makefile.am14
-rw-r--r--ndb/src/kernel/blocks/dbtup/Makefile_old26
-rw-r--r--ndb/src/kernel/blocks/dbtux/Dbtux.hpp7
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp13
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp11
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp88
-rw-r--r--ndb/src/kernel/blocks/dbtux/Makefile.am14
-rw-r--r--ndb/src/kernel/blocks/dbtux/Makefile_old17
-rw-r--r--ndb/src/kernel/blocks/dbutil/DbUtil.cpp2
-rw-r--r--ndb/src/kernel/blocks/dbutil/Makefile.am14
-rw-r--r--ndb/src/kernel/blocks/dbutil/Makefile_old8
-rw-r--r--ndb/src/kernel/blocks/grep/Grep.cpp36
-rw-r--r--ndb/src/kernel/blocks/grep/Grep.hpp8
-rw-r--r--ndb/src/kernel/blocks/grep/GrepInit.cpp3
-rw-r--r--ndb/src/kernel/blocks/grep/Makefile.am14
-rw-r--r--ndb/src/kernel/blocks/grep/Makefile_old9
-rw-r--r--ndb/src/kernel/blocks/ndbcntr/Makefile.am14
-rw-r--r--ndb/src/kernel/blocks/ndbcntr/Makefile_old12
-rw-r--r--ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp2
-rw-r--r--ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp87
-rw-r--r--ndb/src/kernel/blocks/ndbfs/Makefile.am14
-rw-r--r--ndb/src/kernel/blocks/ndbfs/Makefile_old14
-rw-r--r--ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp4
-rw-r--r--ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp4
-rw-r--r--ndb/src/kernel/blocks/ndbfs/VoidFs.cpp2
-rw-r--r--ndb/src/kernel/blocks/qmgr/Makefile.am14
-rw-r--r--ndb/src/kernel/blocks/qmgr/Makefile_old11
-rw-r--r--ndb/src/kernel/blocks/qmgr/QmgrInit.cpp2
-rw-r--r--ndb/src/kernel/blocks/qmgr/QmgrMain.cpp43
-rw-r--r--ndb/src/kernel/blocks/suma/Makefile.am14
-rw-r--r--ndb/src/kernel/blocks/suma/Makefile_old10
-rw-r--r--ndb/src/kernel/blocks/suma/Suma.cpp15
-rw-r--r--ndb/src/kernel/blocks/suma/SumaInit.cpp5
-rw-r--r--ndb/src/kernel/blocks/trix/Makefile.am14
-rw-r--r--ndb/src/kernel/blocks/trix/Makefile_old8
-rw-r--r--ndb/src/kernel/blocks/trix/Trix.cpp2
86 files changed, 990 insertions, 4077 deletions
diff --git a/ndb/src/kernel/blocks/ERROR_codes.txt b/ndb/src/kernel/blocks/ERROR_codes.txt
index 70f11c33cd7..5193d3eae9d 100644
--- a/ndb/src/kernel/blocks/ERROR_codes.txt
+++ b/ndb/src/kernel/blocks/ERROR_codes.txt
@@ -1,8 +1,8 @@
Next QMGR 1
Next NDBCNTR 1000
Next NDBFS 2000
-Next DBACC 3001
-Next DBTUP 4007
+Next DBACC 3002
+Next DBTUP 4013
Next DBLQH 5042
Next DBDICT 6006
Next DBDIH 7174
@@ -10,7 +10,7 @@ Next DBTC 8035
Next CMVMI 9000
Next BACKUP 10022
Next DBUTIL 11002
-Next DBTUX 12001
+Next DBTUX 12007
Next SUMA 13001
TESTING NODE FAILURE, ARBITRATION
@@ -393,6 +393,13 @@ Failed Create Table:
--------------------
7173: Create table failed due to not sufficient number of fragment or
replica records.
+3001: Fail create 1st fragment
+4007 12001: Fail create 1st fragment
+4008 12002: Fail create 2nd fragment
+4009 12003: Fail create 1st attribute in 1st fragment
+4010 12004: Fail create last attribute in 1st fragment
+4011 12005: Fail create 1st attribute in 2nd fragment
+4012 12006: Fail create last attribute in 2nd fragment
Drop Table/Index:
-----------------
diff --git a/ndb/src/kernel/blocks/Makefile.am b/ndb/src/kernel/blocks/Makefile.am
index 0b2bc3b8c88..7ee90e6239f 100644
--- a/ndb/src/kernel/blocks/Makefile.am
+++ b/ndb/src/kernel/blocks/Makefile.am
@@ -15,3 +15,5 @@ SUBDIRS = \
suma \
grep \
dbtux
+
+windoze-dsp:
diff --git a/ndb/src/kernel/blocks/Makefile_old b/ndb/src/kernel/blocks/Makefile_old
deleted file mode 100644
index ce554dfc3b8..00000000000
--- a/ndb/src/kernel/blocks/Makefile_old
+++ /dev/null
@@ -1,28 +0,0 @@
-#--------------------------------------------------------------------------
-#
-# Name Makefile
-#
-#
-#
-# List subdirectories to be travered
-include .defs.mk
-
-DIRS := \
- cmvmi \
- dbacc \
- dbdict \
- dbdih \
- dblqh \
- dbtc \
- dbtup \
- ndbfs \
- ndbcntr \
- qmgr \
- trix \
- backup \
- dbutil \
- suma \
- grep \
- dbtux
-
-include ${NDB_TOP}/Epilogue.mk
diff --git a/ndb/src/kernel/blocks/backup/Backup.cpp b/ndb/src/kernel/blocks/backup/Backup.cpp
index e6fe63d9014..2e62979ce8e 100644
--- a/ndb/src/kernel/blocks/backup/Backup.cpp
+++ b/ndb/src/kernel/blocks/backup/Backup.cpp
@@ -863,6 +863,13 @@ Backup::execBACKUP_REQ(Signal* signal)
sendBackupRef(senderRef, signal, senderData, BackupRef::IAmNotMaster);
return;
}//if
+
+ if (m_diskless)
+ {
+ sendBackupRef(senderRef, signal, senderData,
+ BackupRef::CannotBackupDiskless);
+ return;
+ }
if(dataLen32 != 0) {
jam();
@@ -985,7 +992,11 @@ Backup::execUTIL_SEQUENCE_CONF(Signal* signal)
}//if
ndbrequire(ptr.p->masterData.state.getState() == DEFINING);
- ptr.p->backupId = conf->sequenceValue[0];
+ {
+ Uint64 backupId;
+ memcpy(&backupId,conf->sequenceValue,8);
+ ptr.p->backupId= (Uint32)backupId;
+ }
ptr.p->backupKey[0] = (getOwnNodeId() << 16) | (ptr.p->backupId & 0xFFFF);
ptr.p->backupKey[1] = NdbTick_CurrentMillisecond();
@@ -1265,10 +1276,6 @@ Backup::createAttributeMask(TablePtr tabPtr,
jam();
AttributePtr attr;
table.attributes.getPtr(attr, i);
- if(attr.p->data.key != 0){
- jam();
- continue;
- }
mask.set(i);
}
}
@@ -2954,12 +2961,9 @@ Backup::parseTableDescription(Signal* signal, BackupRecordPtr ptr, Uint32 len)
tabPtr.p->schemaVersion = tmpTab.TableVersion;
tabPtr.p->noOfAttributes = tmpTab.NoOfAttributes;
- tabPtr.p->noOfKeys = tmpTab.NoOfKeyAttr;
tabPtr.p->noOfNull = 0;
tabPtr.p->noOfVariable = 0; // Computed while iterating over attribs
- tabPtr.p->sz_FixedKeys = 0; // Computed while iterating over attribs
tabPtr.p->sz_FixedAttributes = 0; // Computed while iterating over attribs
- tabPtr.p->variableKeyId = RNIL; // Computed while iterating over attribs
tabPtr.p->triggerIds[0] = ILLEGAL_TRIGGER_ID;
tabPtr.p->triggerIds[1] = ILLEGAL_TRIGGER_ID;
tabPtr.p->triggerIds[2] = ILLEGAL_TRIGGER_ID;
@@ -2994,7 +2998,6 @@ Backup::parseTableDescription(Signal* signal, BackupRecordPtr ptr, Uint32 len)
attrPtr.p->data.nullable = tmp.AttributeNullableFlag;
attrPtr.p->data.fixed = (tmp.AttributeArraySize != 0);
- attrPtr.p->data.key = tmp.AttributeKeyFlag;
attrPtr.p->data.sz32 = sz32;
/**
@@ -3002,50 +3005,26 @@ Backup::parseTableDescription(Signal* signal, BackupRecordPtr ptr, Uint32 len)
* 1) Fixed
* 2) Nullable
* 3) Variable
- * 4) Fixed key
- * 5) Variable key
*/
- if(attrPtr.p->data.key == false) {
+ if(attrPtr.p->data.fixed == true && attrPtr.p->data.nullable == false) {
jam();
-
- if(attrPtr.p->data.fixed == true && attrPtr.p->data.nullable == false) {
- jam();
- attrPtr.p->data.offset = tabPtr.p->sz_FixedAttributes;
- tabPtr.p->sz_FixedAttributes += sz32;
- }//if
-
- if(attrPtr.p->data.fixed == true && attrPtr.p->data.nullable == true) {
- jam();
- attrPtr.p->data.offset = 0;
-
- attrPtr.p->data.offsetNull = tabPtr.p->noOfNull;
- tabPtr.p->noOfNull++;
- tabPtr.p->noOfVariable++;
- }//if
-
- if(attrPtr.p->data.fixed == false) {
- jam();
- tabPtr.p->noOfVariable++;
- ndbrequire(0);
- }//if
-
- } else if(attrPtr.p->data.key == true) {
+ attrPtr.p->data.offset = tabPtr.p->sz_FixedAttributes;
+ tabPtr.p->sz_FixedAttributes += sz32;
+ }//if
+
+ if(attrPtr.p->data.fixed == true && attrPtr.p->data.nullable == true) {
jam();
- ndbrequire(attrPtr.p->data.nullable == false);
+ attrPtr.p->data.offset = 0;
- if(attrPtr.p->data.fixed == true) { // Fixed key
- jam();
- tabPtr.p->sz_FixedKeys += sz32;
- }//if
-
- if(attrPtr.p->data.fixed == false) { // Variable key
- jam();
- attrPtr.p->data.offset = 0;
- tabPtr.p->noOfVariable++;
- ndbrequire(tabPtr.p->variableKeyId == RNIL); // Only one variable key
- tabPtr.p->variableKeyId = attrPtr.i;
- ndbrequire(0);
- }//if
+ attrPtr.p->data.offsetNull = tabPtr.p->noOfNull;
+ tabPtr.p->noOfNull++;
+ tabPtr.p->noOfVariable++;
+ }//if
+
+ if(attrPtr.p->data.fixed == false) {
+ jam();
+ tabPtr.p->noOfVariable++;
+ ndbrequire(0);
}//if
it.next(); // Move Past EndOfAttribute
@@ -3222,7 +3201,7 @@ Backup::execSTART_BACKUP_REQ(Signal* signal)
return;
}//if
- tabPtr.p->triggerAllocated[i] = true;
+ tabPtr.p->triggerAllocated[j] = true;
trigPtr.p->backupPtr = ptr.i;
trigPtr.p->tableId = tabPtr.p->tableId;
trigPtr.p->tab_ptr_i = tabPtr.i;
@@ -3355,7 +3334,7 @@ Backup::execBACKUP_FRAGMENT_REQ(Signal* signal)
Table & table = * tabPtr.p;
ScanFragReq * req = (ScanFragReq *)signal->getDataPtrSend();
const Uint32 parallelism = 16;
- const Uint32 attrLen = 5 + table.noOfAttributes - table.noOfKeys;
+ const Uint32 attrLen = 5 + table.noOfAttributes;
req->senderData = filePtr.i;
req->resultRef = reference();
@@ -3366,7 +3345,7 @@ Backup::execBACKUP_FRAGMENT_REQ(Signal* signal)
req->tableId = table.tableId;
ScanFragReq::setLockMode(req->requestInfo, 0);
ScanFragReq::setHoldLockFlag(req->requestInfo, 0);
- ScanFragReq::setKeyinfoFlag(req->requestInfo, 1);
+ ScanFragReq::setKeyinfoFlag(req->requestInfo, 0);
ScanFragReq::setAttrLen(req->requestInfo,attrLen);
req->transId1 = 0;
req->transId2 = (BACKUP << 20) + (getOwnNodeId() << 8);
@@ -3381,7 +3360,7 @@ Backup::execBACKUP_FRAGMENT_REQ(Signal* signal)
signal->theData[2] = (BACKUP << 20) + (getOwnNodeId() << 8);
// Return all
- signal->theData[3] = table.noOfAttributes - table.noOfKeys;
+ signal->theData[3] = table.noOfAttributes;
signal->theData[4] = 0;
signal->theData[5] = 0;
signal->theData[6] = 0;
@@ -3393,10 +3372,6 @@ Backup::execBACKUP_FRAGMENT_REQ(Signal* signal)
jam();
AttributePtr attr;
table.attributes.getPtr(attr, i);
- if(attr.p->data.key != 0) {
- jam();
- continue;
- }//if
AttributeHeader::init(&signal->theData[dataPos], i, 0);
dataPos++;
@@ -3506,64 +3481,19 @@ Backup::execTRANSID_AI(Signal* signal)
}
}
-void
-Backup::execKEYINFO20(Signal* signal)
-{
- jamEntry();
-
- const Uint32 filePtrI = signal->theData[0];
- const Uint32 keyLen = signal->theData[1];
- //const Uint32 scanInfo = signal->theData[2];
- //const Uint32 transId1 = signal->theData[3];
- //const Uint32 transId2 = signal->theData[4];
- const Uint32 dataLen = signal->length() - 5;
-
- BackupFilePtr filePtr;
- c_backupFilePool.getPtr(filePtr, filePtrI);
-
- OperationRecord & op = filePtr.p->operation;
-
- /**
- * Unpack data
- */
- ndbrequire(keyLen == dataLen);
- const Uint32 * src = &signal->theData[5];
- const Uint32 klFixed = op.getFixedKeySize();
- ndbrequire(keyLen >= klFixed);
-
- Uint32 * dst = op.newKey();
- memcpy(dst, src, klFixed << 2);
-
- const Uint32 szLeft = (keyLen - klFixed);
- if(szLeft > 0) {
- jam();
- src += klFixed;
- dst = op.newVariableKey(szLeft);
- memcpy(dst, src, (szLeft << 2));
- ndbrequire(0);
- }//if
-
- if(op.finished()){
- jam();
- op.newRecord(op.dst);
- }
-}
-
void
Backup::OperationRecord::init(const TablePtr & ptr)
{
tablePtr = ptr.i;
- noOfAttributes = (ptr.p->noOfAttributes - ptr.p->noOfKeys) + 1;
- variableKeyId = ptr.p->variableKeyId;
+ noOfAttributes = ptr.p->noOfAttributes;
sz_Bitmask = (ptr.p->noOfNull + 31) >> 5;
- sz_FixedKeys = ptr.p->sz_FixedKeys;
sz_FixedAttribs = ptr.p->sz_FixedAttributes;
if(ptr.p->noOfVariable == 0) {
jam();
- maxRecordSize = 1 + sz_Bitmask + sz_FixedKeys + sz_FixedAttribs;
+ maxRecordSize = 1 + sz_Bitmask + sz_FixedAttribs;
} else {
jam();
maxRecordSize =
diff --git a/ndb/src/kernel/blocks/backup/Backup.hpp b/ndb/src/kernel/blocks/backup/Backup.hpp
index 4dc2cd13ae0..1a5d6c7a925 100644
--- a/ndb/src/kernel/blocks/backup/Backup.hpp
+++ b/ndb/src/kernel/blocks/backup/Backup.hpp
@@ -76,7 +76,6 @@ protected:
*/
void execSCAN_HBREP(Signal* signal);
void execTRANSID_AI(Signal* signal);
- void execKEYINFO20(Signal* signal);
void execSCAN_FRAGREF(Signal* signal);
void execSCAN_FRAGCONF(Signal* signal);
@@ -172,8 +171,8 @@ public:
struct Data {
Uint8 nullable;
Uint8 fixed;
- Uint8 key;
- Uint8 unused;
+ Uint8 unused;
+ Uint8 unused2;
Uint32 sz32; // No of 32 bit words
Uint32 offset; // Relative DataFixedAttributes/DataFixedKeys
Uint32 offsetNull; // In NullBitmask
@@ -199,12 +198,9 @@ public:
Uint32 frag_mask;
Uint32 tableType;
Uint32 noOfNull;
- Uint32 noOfKeys;
Uint32 noOfAttributes;
Uint32 noOfVariable;
- Uint32 sz_FixedKeys;
Uint32 sz_FixedAttributes;
- Uint32 variableKeyId;
Uint32 triggerIds[3];
bool triggerAllocated[3];
@@ -224,7 +220,6 @@ public:
* Once per table
*/
void init(const TablePtr & ptr);
- inline Uint32 getFixedKeySize() const { return sz_FixedKeys; }
/**
* Once per fragment
@@ -247,23 +242,19 @@ public:
/**
* Per attribute
*/
- Uint32 * newKey();
void nullAttribute(Uint32 nullOffset);
Uint32 * newNullable(Uint32 attrId, Uint32 sz);
Uint32 * newAttrib(Uint32 offset, Uint32 sz);
Uint32 * newVariable(Uint32 id, Uint32 sz);
- Uint32 * newVariableKey(Uint32 sz);
private:
Uint32* base;
Uint32* dst_Length;
Uint32* dst_Bitmask;
- Uint32* dst_FixedKeys;
Uint32* dst_FixedAttribs;
BackupFormat::DataFile::VariableData* dst_VariableData;
Uint32 noOfAttributes; // No of Attributes
- Uint32 variableKeyId; // Id of variable key
Uint32 attrLeft; // No of attributes left
Uint32 opNoDone;
@@ -289,7 +280,6 @@ public:
* sizes of part
*/
Uint32 sz_Bitmask;
- Uint32 sz_FixedKeys;
Uint32 sz_FixedAttribs;
public:
@@ -526,6 +516,7 @@ public:
NdbNodeBitmask c_aliveNodes;
DLList<BackupRecord> c_backups;
Config c_defaults;
+ Uint32 m_diskless;
STATIC_CONST(NO_OF_PAGES_META_FILE = 2);
@@ -628,7 +619,6 @@ Backup::OperationRecord::newRecord(Uint32 * p){
base = p;
dst_Length = p; p += 1;
dst_Bitmask = p; p += sz_Bitmask;
- dst_FixedKeys = p; p += sz_FixedKeys;
dst_FixedAttribs = p; p += sz_FixedAttribs;
dst_VariableData = (BackupFormat::DataFile::VariableData*)p;
BitmaskImpl::clear(sz_Bitmask, dst_Bitmask);
@@ -646,14 +636,6 @@ Backup::OperationRecord::newAttrib(Uint32 offset, Uint32 sz){
}
inline
-Uint32 *
-Backup::OperationRecord::newKey(){
- attrLeft --;
- attrSzLeft = 0;
- return dst_FixedKeys;
-}
-
-inline
void
Backup::OperationRecord::nullAttribute(Uint32 offsetNull){
attrLeft --;
@@ -692,28 +674,13 @@ Backup::OperationRecord::newVariable(Uint32 id, Uint32 sz){
}
inline
-Uint32 *
-Backup::OperationRecord::newVariableKey(Uint32 sz){
- attrLeft--;
- attrSzLeft = 0;
- attrSzTotal += sz;
-
- dst = &dst_VariableData->Data[0];
- dst_VariableData->Sz = htonl(sz);
- dst_VariableData->Id = htonl(variableKeyId);
-
- dst_VariableData = (BackupFormat::DataFile::VariableData *)(dst + sz);
- return dst;
-}
-
-inline
bool
Backup::OperationRecord::finished(){
if(attrLeft != 0 || attrSzLeft != 0){
return false;
}
- opLen += attrSzTotal + sz_FixedKeys;
+ opLen += attrSzTotal;
opNoDone++;
scanStop = dst = (Uint32 *)dst_VariableData;
diff --git a/ndb/src/kernel/blocks/backup/BackupInit.cpp b/ndb/src/kernel/blocks/backup/BackupInit.cpp
index 8daad05558b..08fa089a9c0 100644
--- a/ndb/src/kernel/blocks/backup/BackupInit.cpp
+++ b/ndb/src/kernel/blocks/backup/BackupInit.cpp
@@ -22,7 +22,6 @@
//===========================================================================
#include "Backup.hpp"
-#include <new>
#include <Properties.hpp>
#include <Configuration.hpp>
@@ -42,8 +41,10 @@ Backup::Backup(const Configuration & conf) :
ndbrequire(p != 0);
Uint32 noBackups = 0, noTables = 0, noAttribs = 0;
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &m_diskless));
ndb_mgm_get_int_parameter(p, CFG_DB_PARALLEL_BACKUPS, &noBackups);
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_TABLES, &noTables));
+ // ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_TABLES, &noTables));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DICT_TABLE, &noTables));
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_ATTRIBUTES, &noAttribs));
noAttribs++; //RT 527 bug fix
@@ -126,7 +127,6 @@ Backup::Backup(const Configuration & conf) :
addRecSignal(GSN_SCAN_HBREP, &Backup::execSCAN_HBREP);
addRecSignal(GSN_TRANSID_AI, &Backup::execTRANSID_AI);
- addRecSignal(GSN_KEYINFO20, &Backup::execKEYINFO20);
addRecSignal(GSN_SCAN_FRAGREF, &Backup::execSCAN_FRAGREF);
addRecSignal(GSN_SCAN_FRAGCONF, &Backup::execSCAN_FRAGCONF);
@@ -204,7 +204,7 @@ Backup::~Backup()
{
}
-BLOCK_FUNCTIONS(Backup);
+BLOCK_FUNCTIONS(Backup)
template class ArrayPool<Backup::Page32>;
template class ArrayPool<Backup::Attribute>;
diff --git a/ndb/src/kernel/blocks/backup/Makefile.am b/ndb/src/kernel/blocks/backup/Makefile.am
index 85bf5b12415..c8f44f31292 100644
--- a/ndb/src/kernel/blocks/backup/Makefile.am
+++ b/ndb/src/kernel/blocks/backup/Makefile.am
@@ -1,6 +1,4 @@
-SUBDIRS = restore
-
noinst_LIBRARIES = libbackup.a
libbackup_a_SOURCES = Backup.cpp BackupInit.cpp
@@ -10,3 +8,17 @@ include $(top_srcdir)/ndb/config/type_kernel.mk.am
# Don't update the files from bitkeeper
%::SCCS/s.%
+
+windoze-dsp: libbackup.dsp
+
+libbackup.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libbackup_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/backup/Makefile_old b/ndb/src/kernel/blocks/backup/Makefile_old
deleted file mode 100644
index 989199cbe02..00000000000
--- a/ndb/src/kernel/blocks/backup/Makefile_old
+++ /dev/null
@@ -1,18 +0,0 @@
-include .defs.mk
-
-TYPE := kernel
-
-#ifneq ($(MYSQLCLUSTER_TOP),)
-DIRS := restore
-#endif
-
-ARCHIVE_TARGET := backup
-
-SOURCES = Backup.cpp BackupInit.cpp
-
-include $(NDB_TOP)/Epilogue.mk
-
-$(NDB_TOP)/bin/readBackupFile: read.o
- $(C++) -o $@ read.o \
- $(NDB_TOP)/lib/libportlib.a $(NDB_TOP)/lib/libgeneral.a
-
diff --git a/ndb/src/kernel/blocks/backup/restore/Makefile.am b/ndb/src/kernel/blocks/backup/restore/Makefile.am
deleted file mode 100644
index 16550f13546..00000000000
--- a/ndb/src/kernel/blocks/backup/restore/Makefile.am
+++ /dev/null
@@ -1,16 +0,0 @@
-
-ndbtools_PROGRAMS = ndb_restore
-
-ndb_restore_SOURCES = main.cpp consumer.cpp consumer_restore.cpp consumer_printer.cpp Restore.cpp
-
-LDADD_LOC = \
- $(top_builddir)/ndb/src/libndbclient.la \
- $(top_builddir)/dbug/libdbug.a \
- $(top_builddir)/mysys/libmysys.a \
- $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
-
-include $(top_srcdir)/ndb/config/common.mk.am
-
-INCLUDES += -I.. -I$(top_srcdir)/include -I$(top_srcdir)/ndb/include -I$(top_srcdir)/ndb/src/ndbapi -I$(top_srcdir)/ndb/include/ndbapi -I$(top_srcdir)/ndb/include/util -I$(top_srcdir)/ndb/include/portlib -I$(top_srcdir)/ndb/include/kernel
-
-ndb_restore_LDFLAGS = @ndb_bin_am_ldflags@
diff --git a/ndb/src/kernel/blocks/backup/restore/Restore.cpp b/ndb/src/kernel/blocks/backup/restore/Restore.cpp
deleted file mode 100644
index fb3bde6bdef..00000000000
--- a/ndb/src/kernel/blocks/backup/restore/Restore.cpp
+++ /dev/null
@@ -1,951 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#include "Restore.hpp"
-#include "BackupFormat.hpp"
-#include <NdbTCP.h>
-#include <OutputStream.hpp>
-#include <Bitmask.hpp>
-
-#include <AttributeHeader.hpp>
-#include <trigger_definitions.h>
-#include <SimpleProperties.hpp>
-#include <signaldata/DictTabInfo.hpp>
-
-// from src/ndbapi
-#include <NdbDictionaryImpl.hpp>
-
-Uint16 Twiddle16(Uint16 in); // Byte shift 16-bit data
-Uint32 Twiddle32(Uint32 in); // Byte shift 32-bit data
-Uint64 Twiddle64(Uint64 in); // Byte shift 64-bit data
-
-bool
-BackupFile::Twiddle(const AttributeDesc* attr_desc, AttributeData* attr_data, Uint32 arraySize){
- Uint32 i;
-
- if(m_hostByteOrder)
- return true;
-
- if(arraySize == 0){
- arraySize = attr_desc->arraySize;
- }
-
- switch(attr_desc->size){
- case 8:
-
- return true;
- case 16:
- for(i = 0; i<arraySize; i++){
- attr_data->u_int16_value[i] = Twiddle16(attr_data->u_int16_value[i]);
- }
- return true;
- case 32:
- for(i = 0; i<arraySize; i++){
- attr_data->u_int32_value[i] = Twiddle32(attr_data->u_int32_value[i]);
- }
- return true;
- case 64:
- for(i = 0; i<arraySize; i++){
- attr_data->u_int64_value[i] = Twiddle64(attr_data->u_int64_value[i]);
- }
- return true;
- default:
- return false;
- } // switch
-
-} // Twiddle
-
-FilteredNdbOut err(* new FileOutputStream(stderr), 0, 0);
-FilteredNdbOut info(* new FileOutputStream(stdout), 1, 1);
-FilteredNdbOut debug(* new FileOutputStream(stdout), 2, 0);
-
-// To decide in what byte order data is
-const Uint32 magicByteOrder = 0x12345678;
-const Uint32 swappedMagicByteOrder = 0x78563412;
-
-RestoreMetaData::RestoreMetaData(const char* path, Uint32 nodeId, Uint32 bNo) {
-
- debug << "RestoreMetaData constructor" << endl;
- setCtlFile(nodeId, bNo, path);
-}
-
-RestoreMetaData::~RestoreMetaData(){
- for(Uint32 i= 0; i < allTables.size(); i++)
- delete allTables[i];
- allTables.clear();
-}
-
-TableS *
-RestoreMetaData::getTable(Uint32 tableId) const {
- for(Uint32 i= 0; i < allTables.size(); i++)
- if(allTables[i]->getTableId() == tableId)
- return allTables[i];
- return NULL;
-}
-
-Uint32
-RestoreMetaData::getStopGCP() const {
- return m_stopGCP;
-}
-
-int
-RestoreMetaData::loadContent()
-{
- Uint32 noOfTables = readMetaTableList();
- if(noOfTables == 0) {
- return 1;
- }
- for(Uint32 i = 0; i<noOfTables; i++){
- if(!readMetaTableDesc()){
- return 0;
- }
- }
- if(!readGCPEntry())
- return 0;
- return 1;
-}
-
-Uint32
-RestoreMetaData::readMetaTableList() {
-
- Uint32 sectionInfo[2];
-
- if (buffer_read(&sectionInfo, sizeof(sectionInfo), 1) != 1){
- err << "readMetaTableList read header error" << endl;
- return 0;
- }
- sectionInfo[0] = ntohl(sectionInfo[0]);
- sectionInfo[1] = ntohl(sectionInfo[1]);
-
- const Uint32 tabCount = sectionInfo[1] - 2;
-
- void *tmp;
- if (buffer_get_ptr(&tmp, 4, tabCount) != tabCount){
- err << "readMetaTableList read tabCount error" << endl;
- return 0;
- }
-
- return tabCount;
-}
-
-bool
-RestoreMetaData::readMetaTableDesc() {
-
- Uint32 sectionInfo[2];
-
- // Read section header
- if (buffer_read(&sectionInfo, sizeof(sectionInfo), 1) != 1){
- err << "readMetaTableDesc read header error" << endl;
- return false;
- } // if
- sectionInfo[0] = ntohl(sectionInfo[0]);
- sectionInfo[1] = ntohl(sectionInfo[1]);
-
- assert(sectionInfo[0] == BackupFormat::TABLE_DESCRIPTION);
-
- // Read dictTabInfo buffer
- const Uint32 len = (sectionInfo[1] - 2);
- void *ptr;
- if (buffer_get_ptr(&ptr, 4, len) != len){
- err << "readMetaTableDesc read error" << endl;
- return false;
- } // if
-
- return parseTableDescriptor((Uint32*)ptr, len);
-}
-
-bool
-RestoreMetaData::readGCPEntry() {
-
- Uint32 data[4];
-
- BackupFormat::CtlFile::GCPEntry * dst =
- (BackupFormat::CtlFile::GCPEntry *)&data[0];
-
- if(buffer_read(dst, 4, 4) != 4){
- err << "readGCPEntry read error" << endl;
- return false;
- }
-
- dst->SectionType = ntohl(dst->SectionType);
- dst->SectionLength = ntohl(dst->SectionLength);
-
- if(dst->SectionType != BackupFormat::GCP_ENTRY){
- err << "readGCPEntry invalid format" << endl;
- return false;
- }
-
- dst->StartGCP = ntohl(dst->StartGCP);
- dst->StopGCP = ntohl(dst->StopGCP);
-
- m_startGCP = dst->StartGCP;
- m_stopGCP = dst->StopGCP;
- return true;
-}
-
-TableS::TableS(NdbTableImpl* tableImpl)
- : m_dictTable(tableImpl)
-{
- m_dictTable = tableImpl;
- m_noOfNullable = m_nullBitmaskSize = 0;
- m_auto_val_id= ~(Uint32)0;
- m_max_auto_val= 0;
-
- for (int i = 0; i < tableImpl->getNoOfColumns(); i++)
- createAttr(tableImpl->getColumn(i));
-}
-
-TableS::~TableS()
-{
- for (Uint32 i= 0; i < allAttributesDesc.size(); i++)
- delete allAttributesDesc[i];
-}
-
-// Parse dictTabInfo buffer and pushback to to vector storage
-bool
-RestoreMetaData::parseTableDescriptor(const Uint32 * data, Uint32 len)
-{
- NdbTableImpl* tableImpl = 0;
- int ret = NdbDictInterface::parseTableInfo(&tableImpl, data, len, false);
-
- if (ret != 0) {
- err << "parseTableInfo " << " failed" << endl;
- return false;
- }
- if(tableImpl == 0)
- return false;
-
- debug << "parseTableInfo " << tableImpl->getName() << " done" << endl;
-
- TableS * table = new TableS(tableImpl);
- if(table == NULL) {
- return false;
- }
- table->setBackupVersion(m_fileHeader.NdbVersion);
-
- debug << "Parsed table id " << table->getTableId() << endl;
- debug << "Parsed table #attr " << table->getNoOfAttributes() << endl;
- debug << "Parsed table schema version not used " << endl;
-
- debug << "Pushing table " << table->getTableName() << endl;
- debug << " with " << table->getNoOfAttributes() << " attributes" << endl;
-
- allTables.push_back(table);
-
- return true;
-}
-
-// Constructor
-RestoreDataIterator::RestoreDataIterator(const RestoreMetaData & md, void (* _free_data_callback)())
- : BackupFile(_free_data_callback), m_metaData(md)
-{
- debug << "RestoreDataIterator constructor" << endl;
- setDataFile(md, 0);
-}
-
-TupleS & TupleS::operator=(const TupleS& tuple)
-{
- prepareRecord(*tuple.m_currentTable);
-
- if (allAttrData)
- memcpy(allAttrData, tuple.allAttrData, getNoOfAttributes()*sizeof(AttributeData));
-
- return *this;
-};
-int TupleS::getNoOfAttributes() const {
- if (m_currentTable == 0)
- return 0;
- return m_currentTable->getNoOfAttributes();
-};
-
-TableS * TupleS::getTable() const {
- return m_currentTable;
-};
-
-const AttributeDesc * TupleS::getDesc(int i) const {
- return m_currentTable->allAttributesDesc[i];
-}
-
-AttributeData * TupleS::getData(int i) const{
- return &(allAttrData[i]);
-};
-
-bool
-TupleS::prepareRecord(TableS & tab){
- if (allAttrData) {
- if (getNoOfAttributes() == tab.getNoOfAttributes())
- {
- m_currentTable = &tab;
- return true;
- }
- delete [] allAttrData;
- m_currentTable= 0;
- }
-
- allAttrData = new AttributeData[tab.getNoOfAttributes()];
- if (allAttrData == 0)
- return false;
-
- m_currentTable = &tab;
-
- return true;
-}
-
-const TupleS *
-RestoreDataIterator::getNextTuple(int & res)
-{
- Uint32 dataLength = 0;
- // Read record length
- if (buffer_read(&dataLength, sizeof(dataLength), 1) != 1){
- err << "getNextTuple:Error reading length of data part" << endl;
- res = -1;
- return NULL;
- } // if
-
- // Convert length from network byte order
- dataLength = ntohl(dataLength);
- const Uint32 dataLenBytes = 4 * dataLength;
-
- if (dataLength == 0) {
- // Zero length for last tuple
- // End of this data fragment
- debug << "End of fragment" << endl;
- res = 0;
- return NULL;
- } // if
-
- // Read tuple data
- void *_buf_ptr;
- if (buffer_get_ptr(&_buf_ptr, 1, dataLenBytes) != dataLenBytes) {
- err << "getNextTuple:Read error: " << endl;
- res = -1;
- return NULL;
- }
-
- Uint32 *buf_ptr = (Uint32*)_buf_ptr, *ptr = buf_ptr;
- ptr += m_currentTable->m_nullBitmaskSize;
- Uint32 i;
- for(i= 0; i < m_currentTable->m_fixedKeys.size(); i++){
- assert(ptr < buf_ptr + dataLength);
-
- const Uint32 attrId = m_currentTable->m_fixedKeys[i]->attrId;
-
- AttributeData * attr_data = m_tuple.getData(attrId);
- const AttributeDesc * attr_desc = m_tuple.getDesc(attrId);
-
- const Uint32 sz = attr_desc->getSizeInWords();
-
- attr_data->null = false;
- attr_data->void_value = ptr;
-
- if(!Twiddle(attr_desc, attr_data))
- {
- res = -1;
- return NULL;
- }
- ptr += sz;
- }
-
- for(i = 0; i < m_currentTable->m_fixedAttribs.size(); i++){
- assert(ptr < buf_ptr + dataLength);
-
- const Uint32 attrId = m_currentTable->m_fixedAttribs[i]->attrId;
-
- AttributeData * attr_data = m_tuple.getData(attrId);
- const AttributeDesc * attr_desc = m_tuple.getDesc(attrId);
-
- const Uint32 sz = attr_desc->getSizeInWords();
-
- attr_data->null = false;
- attr_data->void_value = ptr;
-
- if(!Twiddle(attr_desc, attr_data))
- {
- res = -1;
- return NULL;
- }
-
- ptr += sz;
- }
-
- for(i = 0; i < m_currentTable->m_variableAttribs.size(); i++){
- const Uint32 attrId = m_currentTable->m_variableAttribs[i]->attrId;
-
- AttributeData * attr_data = m_tuple.getData(attrId);
- const AttributeDesc * attr_desc = m_tuple.getDesc(attrId);
-
- if(attr_desc->m_column->getNullable()){
- const Uint32 ind = attr_desc->m_nullBitIndex;
- if(BitmaskImpl::get(m_currentTable->m_nullBitmaskSize,
- buf_ptr,ind)){
- attr_data->null = true;
- attr_data->void_value = NULL;
- continue;
- }
- }
-
- assert(ptr < buf_ptr + dataLength);
-
- typedef BackupFormat::DataFile::VariableData VarData;
- VarData * data = (VarData *)ptr;
- Uint32 sz = ntohl(data->Sz);
- Uint32 id = ntohl(data->Id);
- assert(id == attrId);
-
- attr_data->null = false;
- attr_data->void_value = &data->Data[0];
-
- /**
- * Compute array size
- */
- const Uint32 arraySize = (4 * sz) / (attr_desc->size / 8);
- assert(arraySize >= attr_desc->arraySize);
- if(!Twiddle(attr_desc, attr_data, attr_desc->arraySize))
- {
- res = -1;
- return NULL;
- }
-
- ptr += (sz + 2);
- }
-
- m_count ++;
- res = 0;
- return &m_tuple;
-} // RestoreDataIterator::getNextTuple
-
-BackupFile::BackupFile(void (* _free_data_callback)())
- : free_data_callback(_free_data_callback)
-{
- m_file = 0;
- m_path[0] = 0;
- m_fileName[0] = 0;
-
- m_buffer_sz = 64*1024;
- m_buffer = malloc(m_buffer_sz);
- m_buffer_ptr = m_buffer;
- m_buffer_data_left = 0;
-}
-
-BackupFile::~BackupFile(){
- if(m_file != 0)
- fclose(m_file);
- if(m_buffer != 0)
- free(m_buffer);
-}
-
-bool
-BackupFile::openFile(){
- if(m_file != NULL){
- fclose(m_file);
- m_file = 0;
- }
-
- m_file = fopen(m_fileName, "r");
- return m_file != 0;
-}
-
-Uint32 BackupFile::buffer_get_ptr_ahead(void **p_buf_ptr, Uint32 size, Uint32 nmemb)
-{
- Uint32 sz = size*nmemb;
- if (sz > m_buffer_data_left) {
-
- if (free_data_callback)
- (*free_data_callback)();
-
- memcpy(m_buffer, m_buffer_ptr, m_buffer_data_left);
-
- size_t r = fread(((char *)m_buffer) + m_buffer_data_left, 1, m_buffer_sz - m_buffer_data_left, m_file);
- m_buffer_data_left += r;
- m_buffer_ptr = m_buffer;
-
- if (sz > m_buffer_data_left)
- sz = size * (m_buffer_data_left / size);
- }
-
- *p_buf_ptr = m_buffer_ptr;
-
- return sz/size;
-}
-Uint32 BackupFile::buffer_get_ptr(void **p_buf_ptr, Uint32 size, Uint32 nmemb)
-{
- Uint32 r = buffer_get_ptr_ahead(p_buf_ptr, size, nmemb);
-
- m_buffer_ptr = ((char*)m_buffer_ptr)+(r*size);
- m_buffer_data_left -= (r*size);
-
- return r;
-}
-
-Uint32 BackupFile::buffer_read_ahead(void *ptr, Uint32 size, Uint32 nmemb)
-{
- void *buf_ptr;
- Uint32 r = buffer_get_ptr_ahead(&buf_ptr, size, nmemb);
- memcpy(ptr, buf_ptr, r*size);
-
- return r;
-}
-
-Uint32 BackupFile::buffer_read(void *ptr, Uint32 size, Uint32 nmemb)
-{
- void *buf_ptr;
- Uint32 r = buffer_get_ptr(&buf_ptr, size, nmemb);
- memcpy(ptr, buf_ptr, r*size);
-
- return r;
-}
-
-void
-BackupFile::setCtlFile(Uint32 nodeId, Uint32 backupId, const char * path){
- m_nodeId = nodeId;
- m_expectedFileHeader.BackupId = backupId;
- m_expectedFileHeader.FileType = BackupFormat::CTL_FILE;
-
- char name[PATH_MAX]; const Uint32 sz = sizeof(name);
- BaseString::snprintf(name, sz, "BACKUP-%d.%d.ctl", backupId, nodeId);
- setName(path, name);
-}
-
-void
-BackupFile::setDataFile(const BackupFile & bf, Uint32 no){
- m_nodeId = bf.m_nodeId;
- m_expectedFileHeader = bf.m_fileHeader;
- m_expectedFileHeader.FileType = BackupFormat::DATA_FILE;
-
- char name[PATH_MAX]; const Uint32 sz = sizeof(name);
- BaseString::snprintf(name, sz, "BACKUP-%d-%d.%d.Data",
- m_expectedFileHeader.BackupId, no, m_nodeId);
- setName(bf.m_path, name);
-}
-
-void
-BackupFile::setLogFile(const BackupFile & bf, Uint32 no){
- m_nodeId = bf.m_nodeId;
- m_expectedFileHeader = bf.m_fileHeader;
- m_expectedFileHeader.FileType = BackupFormat::LOG_FILE;
-
- char name[PATH_MAX]; const Uint32 sz = sizeof(name);
- BaseString::snprintf(name, sz, "BACKUP-%d.%d.log",
- m_expectedFileHeader.BackupId, m_nodeId);
- setName(bf.m_path, name);
-}
-
-void
-BackupFile::setName(const char * p, const char * n){
- const Uint32 sz = sizeof(m_path);
- if(p != 0 && strlen(p) > 0){
- if(p[strlen(p)-1] == '/'){
- BaseString::snprintf(m_path, sz, "%s", p);
- } else {
- BaseString::snprintf(m_path, sz, "%s%s", p, "/");
- }
- } else {
- m_path[0] = 0;
- }
-
- BaseString::snprintf(m_fileName, sizeof(m_fileName), "%s%s", m_path, n);
- debug << "Filename = " << m_fileName << endl;
-}
-
-bool
-BackupFile::readHeader(){
- if(!openFile()){
- return false;
- }
-
- if(buffer_read(&m_fileHeader, sizeof(m_fileHeader), 1) != 1){
- err << "readDataFileHeader: Error reading header" << endl;
- return false;
- }
-
- // Convert from network to host byte order for platform compatibility
- m_fileHeader.NdbVersion = ntohl(m_fileHeader.NdbVersion);
- m_fileHeader.SectionType = ntohl(m_fileHeader.SectionType);
- m_fileHeader.SectionLength = ntohl(m_fileHeader.SectionLength);
- m_fileHeader.FileType = ntohl(m_fileHeader.FileType);
- m_fileHeader.BackupId = ntohl(m_fileHeader.BackupId);
- m_fileHeader.BackupKey_0 = ntohl(m_fileHeader.BackupKey_0);
- m_fileHeader.BackupKey_1 = ntohl(m_fileHeader.BackupKey_1);
-
- debug << "FileHeader: " << m_fileHeader.Magic << " " <<
- m_fileHeader.NdbVersion << " " <<
- m_fileHeader.SectionType << " " <<
- m_fileHeader.SectionLength << " " <<
- m_fileHeader.FileType << " " <<
- m_fileHeader.BackupId << " " <<
- m_fileHeader.BackupKey_0 << " " <<
- m_fileHeader.BackupKey_1 << " " <<
- m_fileHeader.ByteOrder << endl;
-
- debug << "ByteOrder is " << m_fileHeader.ByteOrder << endl;
- debug << "magicByteOrder is " << magicByteOrder << endl;
-
- if (m_fileHeader.FileType != m_expectedFileHeader.FileType){
- abort();
- }
-
- // Check for BackupFormat::FileHeader::ByteOrder if swapping is needed
- if (m_fileHeader.ByteOrder == magicByteOrder) {
- m_hostByteOrder = true;
- } else if (m_fileHeader.ByteOrder == swappedMagicByteOrder){
- m_hostByteOrder = false;
- } else {
- abort();
- }
-
- return true;
-} // BackupFile::readHeader
-
-bool
-BackupFile::validateFooter(){
- return true;
-}
-
-bool RestoreDataIterator::readFragmentHeader(int & ret)
-{
- BackupFormat::DataFile::FragmentHeader Header;
-
- debug << "RestoreDataIterator::getNextFragment" << endl;
-
- if (buffer_read(&Header, sizeof(Header), 1) != 1){
- ret = 0;
- return false;
- } // if
-
- Header.SectionType = ntohl(Header.SectionType);
- Header.SectionLength = ntohl(Header.SectionLength);
- Header.TableId = ntohl(Header.TableId);
- Header.FragmentNo = ntohl(Header.FragmentNo);
- Header.ChecksumType = ntohl(Header.ChecksumType);
-
- debug << "FragmentHeader: " << Header.SectionType
- << " " << Header.SectionLength
- << " " << Header.TableId
- << " " << Header.FragmentNo
- << " " << Header.ChecksumType << endl;
-
- m_currentTable = m_metaData.getTable(Header.TableId);
- if(m_currentTable == 0){
- ret = -1;
- return false;
- }
-
- if(!m_tuple.prepareRecord(*m_currentTable))
- {
- ret =-1;
- return false;
- }
-
- info << "_____________________________________________________" << endl
- << "Restoring data in table: " << m_currentTable->getTableName()
- << "(" << Header.TableId << ") fragment "
- << Header.FragmentNo << endl;
-
- m_count = 0;
- ret = 0;
-
- return true;
-} // RestoreDataIterator::getNextFragment
-
-
-bool
-RestoreDataIterator::validateFragmentFooter() {
- BackupFormat::DataFile::FragmentFooter footer;
-
- if (buffer_read(&footer, sizeof(footer), 1) != 1){
- err << "getFragmentFooter:Error reading fragment footer" << endl;
- return false;
- }
-
- // TODO: Handle footer, nothing yet
- footer.SectionType = ntohl(footer.SectionType);
- footer.SectionLength = ntohl(footer.SectionLength);
- footer.TableId = ntohl(footer.TableId);
- footer.FragmentNo = ntohl(footer.FragmentNo);
- footer.NoOfRecords = ntohl(footer.NoOfRecords);
- footer.Checksum = ntohl(footer.Checksum);
-
- assert(m_count == footer.NoOfRecords);
-
- return true;
-} // RestoreDataIterator::getFragmentFooter
-
-AttributeDesc::AttributeDesc(NdbDictionary::Column *c)
- : m_column(c)
-{
- size = 8*NdbColumnImpl::getImpl(* c).m_attrSize;
- arraySize = NdbColumnImpl::getImpl(* c).m_arraySize;
-}
-
-void TableS::createAttr(NdbDictionary::Column *column)
-{
- AttributeDesc * d = new AttributeDesc(column);
- if(d == NULL) {
- ndbout_c("Restore: Failed to allocate memory");
- abort();
- }
- d->attrId = allAttributesDesc.size();
- allAttributesDesc.push_back(d);
-
- if (d->m_column->getAutoIncrement())
- m_auto_val_id= d->attrId;
-
- if(d->m_column->getPrimaryKey() /* && not variable */)
- {
- m_fixedKeys.push_back(d);
- return;
- }
-
- if(!d->m_column->getNullable())
- {
- m_fixedAttribs.push_back(d);
- return;
- }
-
- /* Nullable attr*/
- d->m_nullBitIndex = m_noOfNullable;
- m_noOfNullable++;
- m_nullBitmaskSize = (m_noOfNullable + 31) / 32;
- m_variableAttribs.push_back(d);
-} // TableS::createAttr
-
-Uint16 Twiddle16(Uint16 in)
-{
- Uint16 retVal = 0;
-
- retVal = ((in & 0xFF00) >> 8) |
- ((in & 0x00FF) << 8);
-
- return(retVal);
-} // Twiddle16
-
-Uint32 Twiddle32(Uint32 in)
-{
- Uint32 retVal = 0;
-
- retVal = ((in & 0x000000FF) << 24) |
- ((in & 0x0000FF00) << 8) |
- ((in & 0x00FF0000) >> 8) |
- ((in & 0xFF000000) >> 24);
-
- return(retVal);
-} // Twiddle32
-
-Uint64 Twiddle64(Uint64 in)
-{
- Uint64 retVal = 0;
-
- retVal =
- ((in & (Uint64)0x00000000000000FFLL) << 56) |
- ((in & (Uint64)0x000000000000FF00LL) << 40) |
- ((in & (Uint64)0x0000000000FF0000LL) << 24) |
- ((in & (Uint64)0x00000000FF000000LL) << 8) |
- ((in & (Uint64)0x000000FF00000000LL) >> 8) |
- ((in & (Uint64)0x0000FF0000000000LL) >> 24) |
- ((in & (Uint64)0x00FF000000000000LL) >> 40) |
- ((in & (Uint64)0xFF00000000000000LL) >> 56);
-
- return(retVal);
-} // Twiddle64
-
-
-RestoreLogIterator::RestoreLogIterator(const RestoreMetaData & md)
- : m_metaData(md)
-{
- debug << "RestoreLog constructor" << endl;
- setLogFile(md, 0);
-
- m_count = 0;
-}
-
-const LogEntry *
-RestoreLogIterator::getNextLogEntry(int & res) {
- // Read record length
- typedef BackupFormat::LogFile::LogEntry LogE;
-
- Uint32 gcp= 0;
- LogE * logE= 0;
- Uint32 len= ~0;
- const Uint32 stopGCP = m_metaData.getStopGCP();
- do {
- if (buffer_read_ahead(&len, sizeof(Uint32), 1) != 1){
- res= -1;
- return 0;
- }
- len= ntohl(len);
-
- Uint32 data_len = sizeof(Uint32) + len*4;
- if (buffer_get_ptr((void **)(&logE), 1, data_len) != data_len) {
- res= -2;
- return 0;
- }
-
- if(len == 0){
- res= 0;
- return 0;
- }
-
- logE->TableId= ntohl(logE->TableId);
- logE->TriggerEvent= ntohl(logE->TriggerEvent);
-
- const bool hasGcp= (logE->TriggerEvent & 0x10000) != 0;
- logE->TriggerEvent &= 0xFFFF;
-
- if(hasGcp){
- len--;
- gcp = ntohl(logE->Data[len-2]);
- }
- } while(gcp > stopGCP + 1);
-
- m_logEntry.m_table = m_metaData.getTable(logE->TableId);
- switch(logE->TriggerEvent){
- case TriggerEvent::TE_INSERT:
- m_logEntry.m_type = LogEntry::LE_INSERT;
- break;
- case TriggerEvent::TE_UPDATE:
- m_logEntry.m_type = LogEntry::LE_UPDATE;
- break;
- case TriggerEvent::TE_DELETE:
- m_logEntry.m_type = LogEntry::LE_DELETE;
- break;
- default:
- res = -1;
- return NULL;
- }
-
- const TableS * tab = m_logEntry.m_table;
- m_logEntry.clear();
-
- AttributeHeader * ah = (AttributeHeader *)&logE->Data[0];
- AttributeHeader *end = (AttributeHeader *)&logE->Data[len - 2];
- AttributeS * attr;
- while(ah < end){
- attr= m_logEntry.add_attr();
- if(attr == NULL) {
- ndbout_c("Restore: Failed to allocate memory");
- res = -1;
- return 0;
- }
-
- attr->Desc = (* tab)[ah->getAttributeId()];
- assert(attr->Desc != 0);
-
- const Uint32 sz = ah->getDataSize();
- if(sz == 0){
- attr->Data.null = true;
- attr->Data.void_value = NULL;
- } else {
- attr->Data.null = false;
- attr->Data.void_value = ah->getDataPtr();
- }
-
- Twiddle(attr->Desc, &(attr->Data));
-
- ah = ah->getNext();
- }
-
- m_count ++;
- res = 0;
- return &m_logEntry;
-}
-
-NdbOut &
-operator<<(NdbOut& ndbout, const AttributeS& attr){
- const AttributeData & data = attr.Data;
- const AttributeDesc & desc = *(attr.Desc);
-
- if (data.null)
- {
- ndbout << "<NULL>";
- return ndbout;
- }
-
- NdbRecAttr tmprec;
- tmprec.setup(desc.m_column, (char *)data.void_value);
- ndbout << tmprec;
-
- return ndbout;
-}
-
-// Print tuple data
-NdbOut&
-operator<<(NdbOut& ndbout, const TupleS& tuple)
-{
- ndbout << tuple.getTable()->getTableName() << "; ";
- for (int i = 0; i < tuple.getNoOfAttributes(); i++)
- {
- AttributeData * attr_data = tuple.getData(i);
- const AttributeDesc * attr_desc = tuple.getDesc(i);
- const AttributeS attr = {attr_desc, *attr_data};
- debug << i << " " << attr_desc->m_column->getName();
- ndbout << attr;
-
- if (i != (tuple.getNoOfAttributes() - 1))
- ndbout << delimiter << " ";
- } // for
- return ndbout;
-}
-
-// Print tuple data
-NdbOut&
-operator<<(NdbOut& ndbout, const LogEntry& logE)
-{
- switch(logE.m_type)
- {
- case LogEntry::LE_INSERT:
- ndbout << "INSERT " << logE.m_table->getTableName() << " ";
- break;
- case LogEntry::LE_DELETE:
- ndbout << "DELETE " << logE.m_table->getTableName() << " ";
- break;
- case LogEntry::LE_UPDATE:
- ndbout << "UPDATE " << logE.m_table->getTableName() << " ";
- break;
- default:
- ndbout << "Unknown log entry type (not insert, delete or update)" ;
- }
-
- for (Uint32 i= 0; i < logE.size();i++)
- {
- const AttributeS * attr = logE[i];
- ndbout << attr->Desc->m_column->getName() << "=";
- ndbout << (* attr);
- if (i < (logE.size() - 1))
- ndbout << ", ";
- }
- return ndbout;
-}
-
-
-NdbOut &
-operator<<(NdbOut& ndbout, const TableS & table){
- ndbout << endl << "Table: " << table.getTableName() << endl;
- for (int j = 0; j < table.getNoOfAttributes(); j++)
- {
- const AttributeDesc * desc = table[j];
- ndbout << desc->m_column->getName() << ": "
- << (Uint32) desc->m_column->getType();
- ndbout << " key: " << (Uint32) desc->m_column->getPrimaryKey();
- ndbout << " array: " << desc->arraySize;
- ndbout << " size: " << desc->size << endl;
- } // for
- return ndbout;
-}
-
-template class Vector<TableS*>;
-template class Vector<AttributeS*>;
-template class Vector<AttributeDesc*>;
-
diff --git a/ndb/src/kernel/blocks/backup/restore/Restore.hpp b/ndb/src/kernel/blocks/backup/restore/Restore.hpp
deleted file mode 100644
index 0ec1ab852e9..00000000000
--- a/ndb/src/kernel/blocks/backup/restore/Restore.hpp
+++ /dev/null
@@ -1,373 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef RESTORE_H
-#define RESTORE_H
-
-#include <ndb_global.h>
-#include <NdbOut.hpp>
-#include <BackupFormat.hpp>
-#include <NdbApi.hpp>
-
-#include <ndb_version.h>
-#include <version.h>
-
-static const char * delimiter = ";"; // Delimiter in file dump
-
-const int FileNameLenC = 256;
-const int TableNameLenC = 256;
-const int AttrNameLenC = 256;
-const Uint32 timeToWaitForNdbC = 10000;
-const Uint32 opsDefaultC = 1000;
-
-// Forward declarations
-//class AttributeDesc;
-struct AttributeDesc;
-struct AttributeData;
-struct AttributeS;
-
-struct AttributeData {
- bool null;
- Uint32 size;
- union {
- Int8 * int8_value;
- Uint8 * u_int8_value;
-
- Int16 * int16_value;
- Uint16 * u_int16_value;
-
- Int32 * int32_value;
- Uint32 * u_int32_value;
-
- Int64 * int64_value;
- Uint64 * u_int64_value;
-
- char * string_value;
-
- void* void_value;
- };
-};
-
-struct AttributeDesc {
- //private:
- friend class TupleS;
- friend class TableS;
- friend class RestoreDataIterator;
- friend class RestoreMetaData;
- friend struct AttributeS;
- Uint32 size; // bits
- Uint32 arraySize;
- Uint32 attrId;
- NdbDictionary::Column *m_column;
-
- Uint32 m_nullBitIndex;
-public:
-
- AttributeDesc(NdbDictionary::Column *column);
- AttributeDesc();
-
- Uint32 getSizeInWords() const { return (size * arraySize + 31)/ 32;}
-}; // AttributeDesc
-
-struct AttributeS {
- const AttributeDesc * Desc;
- AttributeData Data;
-};
-
-class TupleS {
-private:
- friend class RestoreDataIterator;
-
- class TableS *m_currentTable;
- AttributeData *allAttrData;
- bool prepareRecord(TableS &);
-
-public:
- TupleS() {
- m_currentTable= 0;
- allAttrData= 0;
- };
- ~TupleS()
- {
- if (allAttrData)
- delete [] allAttrData;
- };
- TupleS(const TupleS& tuple); // disable copy constructor
- TupleS & operator=(const TupleS& tuple);
- int getNoOfAttributes() const;
- TableS * getTable() const;
- const AttributeDesc * getDesc(int i) const;
- AttributeData * getData(int i) const;
-}; // class TupleS
-
-class TableS {
-
- friend class TupleS;
- friend class RestoreMetaData;
- friend class RestoreDataIterator;
-
- Uint32 schemaVersion;
- Uint32 backupVersion;
- Vector<AttributeDesc *> allAttributesDesc;
- Vector<AttributeDesc *> m_fixedKeys;
- //Vector<AttributeDesc *> m_variableKey;
- Vector<AttributeDesc *> m_fixedAttribs;
- Vector<AttributeDesc *> m_variableAttribs;
-
- Uint32 m_noOfNullable;
- Uint32 m_nullBitmaskSize;
-
- Uint32 m_auto_val_id;
- Uint64 m_max_auto_val;
-
- int pos;
-
- void createAttr(NdbDictionary::Column *column);
-
-public:
- class NdbDictionary::Table* m_dictTable;
- TableS (class NdbTableImpl* dictTable);
- ~TableS();
-
- Uint32 getTableId() const {
- return m_dictTable->getTableId();
- }
- /*
- void setMysqlTableName(char * tableName) {
- strpcpy(mysqlTableName, tableName);
- }
-
- char *
- void setMysqlDatabaseName(char * databaseName) {
- strpcpy(mysqlDatabaseName, databaseName);
- }
-
- table.setMysqlDatabaseName(database);
- */
- void setBackupVersion(Uint32 version) {
- backupVersion = version;
- }
-
- Uint32 getBackupVersion() const {
- return backupVersion;
- }
-
- const char * getTableName() const {
- return m_dictTable->getName();
- }
-
- int getNoOfAttributes() const {
- return allAttributesDesc.size();
- };
-
- bool have_auto_inc() const {
- return m_auto_val_id != ~(Uint32)0;
- };
-
- bool have_auto_inc(Uint32 id) const {
- return m_auto_val_id == id;
- };
-
- Uint64 get_max_auto_val() const {
- return m_max_auto_val;
- };
-
- void update_max_auto_val(const char *data, int size) {
- Uint64 val= 0;
- switch(size){
- case 8:
- val= *(Uint8*)data;
- break;
- case 16:
- val= *(Uint16*)data;
- break;
- case 24:
- val= (0xffffff)&*(Uint32*)data;
- break;
- case 32:
- val= *(Uint32*)data;
- break;
- case 64:
- val= *(Uint64*)data;
- break;
- default:
- return;
- };
- if(val > m_max_auto_val)
- m_max_auto_val= val;
- };
- /**
- * Get attribute descriptor
- */
- const AttributeDesc * operator[](int attributeId) const {
- return allAttributesDesc[attributeId];
- }
-
- TableS& operator=(TableS& org) ;
-}; // TableS;
-
-class BackupFile {
-protected:
- FILE * m_file;
- char m_path[PATH_MAX];
- char m_fileName[PATH_MAX];
- bool m_hostByteOrder;
- BackupFormat::FileHeader m_fileHeader;
- BackupFormat::FileHeader m_expectedFileHeader;
-
- Uint32 m_nodeId;
-
- void * m_buffer;
- void * m_buffer_ptr;
- Uint32 m_buffer_sz;
- Uint32 m_buffer_data_left;
- void (* free_data_callback)();
-
- bool openFile();
- void setCtlFile(Uint32 nodeId, Uint32 backupId, const char * path);
- void setDataFile(const BackupFile & bf, Uint32 no);
- void setLogFile(const BackupFile & bf, Uint32 no);
-
- Uint32 buffer_get_ptr(void **p_buf_ptr, Uint32 size, Uint32 nmemb);
- Uint32 buffer_read(void *ptr, Uint32 size, Uint32 nmemb);
- Uint32 buffer_get_ptr_ahead(void **p_buf_ptr, Uint32 size, Uint32 nmemb);
- Uint32 buffer_read_ahead(void *ptr, Uint32 size, Uint32 nmemb);
-
- void setName(const char * path, const char * name);
-
- BackupFile(void (* free_data_callback)() = 0);
- ~BackupFile();
-public:
- bool readHeader();
- bool validateFooter();
-
- const char * getPath() const { return m_path;}
- const char * getFilename() const { return m_fileName;}
- Uint32 getNodeId() const { return m_nodeId;}
- const BackupFormat::FileHeader & getFileHeader() const { return m_fileHeader;}
- bool Twiddle(const AttributeDesc * attr_desc, AttributeData * attr_data, Uint32 arraySize = 0);
-};
-
-class RestoreMetaData : public BackupFile {
-
- Vector<TableS *> allTables;
- bool readMetaFileHeader();
- bool readMetaTableDesc();
-
- bool readGCPEntry();
- Uint32 readMetaTableList();
-
- Uint32 m_startGCP;
- Uint32 m_stopGCP;
-
- bool parseTableDescriptor(const Uint32 * data, Uint32 len);
-
-public:
- RestoreMetaData(const char * path, Uint32 nodeId, Uint32 bNo);
- virtual ~RestoreMetaData();
-
- int loadContent();
-
- Uint32 getNoOfTables() const { return allTables.size();}
-
- const TableS * operator[](int i) const { return allTables[i];}
- TableS * getTable(Uint32 tableId) const;
-
- Uint32 getStopGCP() const;
-}; // RestoreMetaData
-
-
-class RestoreDataIterator : public BackupFile {
- const RestoreMetaData & m_metaData;
- Uint32 m_count;
- TableS* m_currentTable;
- TupleS m_tuple;
-
-public:
-
- // Constructor
- RestoreDataIterator(const RestoreMetaData &, void (* free_data_callback)());
- ~RestoreDataIterator() {};
-
- // Read data file fragment header
- bool readFragmentHeader(int & res);
- bool validateFragmentFooter();
-
- const TupleS *getNextTuple(int & res);
-};
-
-class LogEntry {
-public:
- enum EntryType {
- LE_INSERT,
- LE_DELETE,
- LE_UPDATE
- };
- EntryType m_type;
- TableS * m_table;
- Vector<AttributeS*> m_values;
- Vector<AttributeS*> m_values_e;
- AttributeS *add_attr() {
- AttributeS * attr;
- if (m_values_e.size() > 0) {
- attr = m_values_e[m_values_e.size()-1];
- m_values_e.erase(m_values_e.size()-1);
- }
- else
- {
- attr = new AttributeS;
- }
- m_values.push_back(attr);
- return attr;
- }
- void clear() {
- for(Uint32 i= 0; i < m_values.size(); i++)
- m_values_e.push_back(m_values[i]);
- m_values.clear();
- }
- ~LogEntry()
- {
- Uint32 i;
- for(i= 0; i< m_values.size(); i++)
- delete m_values[i];
- for(i= 0; i< m_values_e.size(); i++)
- delete m_values_e[i];
- }
- Uint32 size() const { return m_values.size(); }
- const AttributeS * operator[](int i) const { return m_values[i];}
-};
-
-class RestoreLogIterator : public BackupFile {
-private:
- const RestoreMetaData & m_metaData;
-
- Uint32 m_count;
- LogEntry m_logEntry;
-public:
- RestoreLogIterator(const RestoreMetaData &);
- virtual ~RestoreLogIterator() {};
-
- const LogEntry * getNextLogEntry(int & res);
-};
-
-NdbOut& operator<<(NdbOut& ndbout, const TableS&);
-NdbOut& operator<<(NdbOut& ndbout, const TupleS&);
-NdbOut& operator<<(NdbOut& ndbout, const LogEntry&);
-NdbOut& operator<<(NdbOut& ndbout, const RestoreMetaData&);
-
-#endif
-
-
diff --git a/ndb/src/kernel/blocks/backup/restore/consumer.cpp b/ndb/src/kernel/blocks/backup/restore/consumer.cpp
deleted file mode 100644
index e94c31b2666..00000000000
--- a/ndb/src/kernel/blocks/backup/restore/consumer.cpp
+++ /dev/null
@@ -1,107 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#include "consumer.hpp"
-
-#ifdef USE_MYSQL
-int
-BackupConsumer::create_table_string(const TableS & table,
- char * tableName,
- char *buf){
- int pos = 0;
- int pos2 = 0;
- char buf2[2048];
-
- pos += sprintf(buf+pos, "%s%s", "CREATE TABLE ", tableName);
- pos += sprintf(buf+pos, "%s", "(");
- pos2 += sprintf(buf2+pos2, "%s", " primary key(");
-
- for (int j = 0; j < table.getNoOfAttributes(); j++)
- {
- const AttributeDesc * desc = table[j];
- // ndbout << desc->name << ": ";
- pos += sprintf(buf+pos, "%s%s", desc->m_column->getName()," ");
- switch(desc->m_column->getType()){
- case NdbDictionary::Column::Int:
- pos += sprintf(buf+pos, "%s", "int");
- break;
- case NdbDictionary::Column::Unsigned:
- pos += sprintf(buf+pos, "%s", "int unsigned");
- break;
- case NdbDictionary::Column::Float:
- pos += sprintf(buf+pos, "%s", "float");
- break;
- case NdbDictionary::Column::Decimal:
- pos += sprintf(buf+pos, "%s", "decimal");
- break;
- case NdbDictionary::Column::Char:
- pos += sprintf(buf+pos, "%s", "char");
- break;
- case NdbDictionary::Column::Varchar:
- pos += sprintf(buf+pos, "%s", "varchar");
- break;
- case NdbDictionary::Column::Binary:
- pos += sprintf(buf+pos, "%s", "binary");
- break;
- case NdbDictionary::Column::Varbinary:
- pos += sprintf(buf+pos, "%s", "varchar binary");
- break;
- case NdbDictionary::Column::Bigint:
- pos += sprintf(buf+pos, "%s", "bigint");
- break;
- case NdbDictionary::Column::Bigunsigned:
- pos += sprintf(buf+pos, "%s", "bigint unsigned");
- break;
- case NdbDictionary::Column::Double:
- pos += sprintf(buf+pos, "%s", "double");
- break;
- case NdbDictionary::Column::Datetime:
- pos += sprintf(buf+pos, "%s", "datetime");
- break;
- case NdbDictionary::Column::Timespec:
- pos += sprintf(buf+pos, "%s", "time");
- break;
- case NdbDictionary::Column::Undefined:
- // pos += sprintf(buf+pos, "%s", "varchar binary");
- return -1;
- break;
- default:
- //pos += sprintf(buf+pos, "%s", "varchar binary");
- return -1;
- }
- if (desc->arraySize > 1) {
- int attrSize = desc->arraySize;
- pos += sprintf(buf+pos, "%s%u%s",
- "(",
- attrSize,
- ")");
- }
- if (desc->m_column->getPrimaryKey()) {
- pos += sprintf(buf+pos, "%s", " not null");
- pos2 += sprintf(buf2+pos2, "%s%s", desc->m_column->getName(), ",");
- }
- pos += sprintf(buf+pos, "%s", ",");
- } // for
- pos2--; // remove trailing comma
- pos2 += sprintf(buf2+pos2, "%s", ")");
- // pos--; // remove trailing comma
-
- pos += sprintf(buf+pos, "%s", buf2);
- pos += sprintf(buf+pos, "%s", ") type=ndbcluster");
- return 0;
-}
-
-#endif // USE_MYSQL
diff --git a/ndb/src/kernel/blocks/backup/restore/consumer.hpp b/ndb/src/kernel/blocks/backup/restore/consumer.hpp
deleted file mode 100644
index 692c814159f..00000000000
--- a/ndb/src/kernel/blocks/backup/restore/consumer.hpp
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef CONSUMER_HPP
-#define CONSUMER_HPP
-
-#include "Restore.hpp"
-
-class BackupConsumer {
-public:
- virtual ~BackupConsumer() { }
- virtual bool init() { return true;}
- virtual bool table(const TableS &){return true;}
- virtual bool endOfTables() { return true; }
- virtual void tuple(const TupleS &){}
- virtual void tuple_free(){}
- virtual void endOfTuples(){}
- virtual void logEntry(const LogEntry &){}
- virtual void endOfLogEntrys(){}
- virtual bool finalize_table(const TableS &){return true;}
-};
-
-#endif
diff --git a/ndb/src/kernel/blocks/backup/restore/consumer_printer.cpp b/ndb/src/kernel/blocks/backup/restore/consumer_printer.cpp
deleted file mode 100644
index 0aa5b521d29..00000000000
--- a/ndb/src/kernel/blocks/backup/restore/consumer_printer.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#include "consumer_printer.hpp"
-
-bool
-BackupPrinter::table(const TableS & tab)
-{
- if (m_print || m_print_meta)
- {
- m_ndbout << tab;
- ndbout_c("Successfully printed table: %s", tab.m_dictTable->getName());
- }
- return true;
-}
-
-void
-BackupPrinter::tuple(const TupleS & tup)
-{
- m_dataCount++;
- if (m_print || m_print_data)
- m_ndbout << tup << endl;
-}
-
-void
-BackupPrinter::logEntry(const LogEntry & logE)
-{
- if (m_print || m_print_log)
- m_ndbout << logE << endl;
- m_logCount++;
-}
-
-void
-BackupPrinter::endOfLogEntrys()
-{
- if (m_print || m_print_log)
- {
- ndbout << "Printed " << m_dataCount << " tuples and "
- << m_logCount << " log entries"
- << " to stdout." << endl;
- }
-}
diff --git a/ndb/src/kernel/blocks/backup/restore/consumer_printer.hpp b/ndb/src/kernel/blocks/backup/restore/consumer_printer.hpp
deleted file mode 100644
index 7cbc924e364..00000000000
--- a/ndb/src/kernel/blocks/backup/restore/consumer_printer.hpp
+++ /dev/null
@@ -1,50 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef CONSUMER_PRINTER_HPP
-#define CONSUMER_PRINTER_HPP
-
-#include "consumer.hpp"
-
-class BackupPrinter : public BackupConsumer
-{
- NdbOut & m_ndbout;
-public:
- BackupPrinter(NdbOut & out = ndbout) : m_ndbout(out)
- {
- m_print = false;
- m_print_log = false;
- m_print_data = false;
- m_print_meta = false;
- }
-
- virtual bool table(const TableS &);
-#ifdef USE_MYSQL
- virtual bool table(const TableS &, MYSQL* mysqlp);
-#endif
- virtual void tuple(const TupleS &);
- virtual void logEntry(const LogEntry &);
- virtual void endOfTuples() {};
- virtual void endOfLogEntrys();
- bool m_print;
- bool m_print_log;
- bool m_print_data;
- bool m_print_meta;
- Uint32 m_logCount;
- Uint32 m_dataCount;
-};
-
-#endif
diff --git a/ndb/src/kernel/blocks/backup/restore/consumer_restore.cpp b/ndb/src/kernel/blocks/backup/restore/consumer_restore.cpp
deleted file mode 100644
index a35d9d22c65..00000000000
--- a/ndb/src/kernel/blocks/backup/restore/consumer_restore.cpp
+++ /dev/null
@@ -1,672 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#include "consumer_restore.hpp"
-#include <NdbSleep.h>
-#include <NdbDictionaryImpl.hpp>
-
-extern FilteredNdbOut err;
-extern FilteredNdbOut info;
-extern FilteredNdbOut debug;
-
-static void callback(int, NdbConnection*, void*);
-
-bool
-BackupRestore::init()
-{
- release();
-
- if (!m_restore && !m_restore_meta)
- return true;
-
- m_ndb = new Ndb();
-
- if (m_ndb == NULL)
- return false;
-
- m_ndb->init(1024);
- if (m_ndb->waitUntilReady(30) != 0)
- {
- err << "Failed to connect to ndb!!" << endl;
- return false;
- }
- info << "Connected to ndb!!" << endl;
-
- m_callback = new restore_callback_t[m_parallelism];
-
- if (m_callback == 0)
- {
- err << "Failed to allocate callback structs" << endl;
- return false;
- }
-
- m_tuples = new TupleS[m_parallelism];
-
- if (m_tuples == 0)
- {
- err << "Failed to allocate tuples" << endl;
- return false;
- }
-
- m_free_callback= m_callback;
- for (Uint32 i= 0; i < m_parallelism; i++) {
- m_callback[i].restore= this;
- m_callback[i].connection= 0;
- m_callback[i].tup= &m_tuples[i];
- if (i > 0)
- m_callback[i-1].next= &(m_callback[i]);
- }
- m_callback[m_parallelism-1].next = 0;
-
- return true;
-}
-
-void BackupRestore::release()
-{
- if (m_ndb)
- {
- delete m_ndb;
- m_ndb= 0;
- }
-
- if (m_callback)
- {
- delete [] m_callback;
- m_callback= 0;
- }
-
- if (m_tuples)
- {
- delete [] m_tuples;
- m_tuples= 0;
- }
-}
-
-BackupRestore::~BackupRestore()
-{
- release();
-}
-
-static
-int
-match_blob(const char * name){
- int cnt, id1, id2;
- char buf[256];
- if((cnt = sscanf(name, "%[^/]/%[^/]/NDB$BLOB_%d_%d", buf, buf, &id1, &id2)) == 4){
- return id1;
- }
-
- return -1;
-}
-
-const NdbDictionary::Table*
-BackupRestore::get_table(const NdbDictionary::Table* tab){
- if(m_cache.m_old_table == tab)
- return m_cache.m_new_table;
- m_cache.m_old_table = tab;
-
- int cnt, id1, id2;
- char buf[256];
- if((cnt = sscanf(tab->getName(), "%[^/]/%[^/]/NDB$BLOB_%d_%d", buf, buf, &id1, &id2)) == 4){
- BaseString::snprintf(buf, sizeof(buf), "NDB$BLOB_%d_%d", m_new_tables[id1]->getTableId(), id2);
- m_cache.m_new_table = m_ndb->getDictionary()->getTable(buf);
- } else {
- m_cache.m_new_table = m_new_tables[tab->getTableId()];
- }
-
- return m_cache.m_new_table;
-}
-
-bool
-BackupRestore::finalize_table(const TableS & table){
- bool ret= true;
- if (!m_restore && !m_restore_meta)
- return ret;
- if (table.have_auto_inc())
- {
- Uint64 max_val= table.get_max_auto_val();
- Uint64 auto_val= m_ndb->readAutoIncrementValue(get_table(table.m_dictTable));
- if (max_val+1 > auto_val || auto_val == ~(Uint64)0)
- ret= m_ndb->setAutoIncrementValue(get_table(table.m_dictTable), max_val+1, false);
- }
- return ret;
-}
-
-bool
-BackupRestore::table(const TableS & table){
- if (!m_restore && !m_restore_meta)
- return true;
-
- const char * name = table.getTableName();
-
- /**
- * Ignore blob tables
- */
- if(match_blob(name) >= 0)
- return true;
-
- const NdbTableImpl & tmptab = NdbTableImpl::getImpl(* table.m_dictTable);
- if(tmptab.m_indexType != NdbDictionary::Index::Undefined){
- m_indexes.push_back(table.m_dictTable);
- return true;
- }
-
- BaseString tmp(name);
- Vector<BaseString> split;
- if(tmp.split(split, "/") != 3){
- err << "Invalid table name format " << name << endl;
- return false;
- }
-
- m_ndb->setDatabaseName(split[0].c_str());
- m_ndb->setSchemaName(split[1].c_str());
-
- NdbDictionary::Dictionary* dict = m_ndb->getDictionary();
- if(m_restore_meta){
- NdbDictionary::Table copy(*table.m_dictTable);
-
- copy.setName(split[2].c_str());
-
- if (dict->createTable(copy) == -1)
- {
- err << "Create table " << table.getTableName() << " failed: "
- << dict->getNdbError() << endl;
- return false;
- }
- info << "Successfully restored table " << table.getTableName()<< endl ;
- }
-
- const NdbDictionary::Table* tab = dict->getTable(split[2].c_str());
- if(tab == 0){
- err << "Unable to find table: " << split[2].c_str() << endl;
- return false;
- }
- if(m_restore_meta){
- m_ndb->setAutoIncrementValue(tab, ~(Uint64)0, false);
- }
- const NdbDictionary::Table* null = 0;
- m_new_tables.fill(table.m_dictTable->getTableId(), null);
- m_new_tables[table.m_dictTable->getTableId()] = tab;
- return true;
-}
-
-bool
-BackupRestore::endOfTables(){
- if(!m_restore_meta)
- return true;
-
- NdbDictionary::Dictionary* dict = m_ndb->getDictionary();
- for(size_t i = 0; i<m_indexes.size(); i++){
- const NdbTableImpl & indtab = NdbTableImpl::getImpl(* m_indexes[i]);
-
- BaseString tmp(indtab.m_primaryTable.c_str());
- Vector<BaseString> split;
- if(tmp.split(split, "/") != 3){
- err << "Invalid table name format " << indtab.m_primaryTable.c_str()
- << endl;
- return false;
- }
-
- m_ndb->setDatabaseName(split[0].c_str());
- m_ndb->setSchemaName(split[1].c_str());
-
- const NdbDictionary::Table * prim = dict->getTable(split[2].c_str());
- if(prim == 0){
- err << "Unable to find base table \"" << split[2].c_str()
- << "\" for index "
- << indtab.getName() << endl;
- return false;
- }
- NdbTableImpl& base = NdbTableImpl::getImpl(*prim);
- NdbIndexImpl* idx;
- int id;
- char idxName[255], buf[255];
- if(sscanf(indtab.getName(), "%[^/]/%[^/]/%d/%s",
- buf, buf, &id, idxName) != 4){
- err << "Invalid index name format " << indtab.getName() << endl;
- return false;
- }
- if(NdbDictInterface::create_index_obj_from_table(&idx, &indtab, &base))
- {
- err << "Failed to create index " << idxName
- << " on " << split[2].c_str() << endl;
- return false;
- }
- idx->setName(idxName);
- if(dict->createIndex(* idx) != 0)
- {
- delete idx;
- err << "Failed to create index " << idxName
- << " on " << split[2].c_str() << endl
- << dict->getNdbError() << endl;
-
- return false;
- }
- delete idx;
- info << "Successfully created index " << idxName
- << " on " << split[2].c_str() << endl;
- }
- return true;
-}
-
-void BackupRestore::tuple(const TupleS & tup)
-{
- if (!m_restore)
- return;
-
- restore_callback_t * cb = m_free_callback;
-
- if (cb == 0)
- assert(false);
-
- m_free_callback = cb->next;
- cb->retries = 0;
- *(cb->tup) = tup; // must do copy!
- tuple_a(cb);
-
- if (m_free_callback == 0)
- {
- // send-poll all transactions
- // close transaction is done in callback
- m_ndb->sendPollNdb(3000, 1);
- }
-}
-
-void BackupRestore::tuple_a(restore_callback_t *cb)
-{
- while (cb->retries < 10)
- {
- /**
- * start transactions
- */
- cb->connection = m_ndb->startTransaction();
- if (cb->connection == NULL)
- {
- /*
- if (errorHandler(cb))
- {
- continue;
- }
- */
- exitHandler();
- } // if
-
- const TupleS &tup = *(cb->tup);
- const NdbDictionary::Table * table = get_table(tup.getTable()->m_dictTable);
-
- NdbOperation * op = cb->connection->getNdbOperation(table);
-
- if (op == NULL)
- {
- if (errorHandler(cb))
- continue;
- exitHandler();
- } // if
-
- if (op->writeTuple() == -1)
- {
- if (errorHandler(cb))
- continue;
- exitHandler();
- } // if
-
- int ret = 0;
- for (int j = 0; j < 2; j++)
- {
- for (int i = 0; i < tup.getNoOfAttributes(); i++)
- {
- const AttributeDesc * attr_desc = tup.getDesc(i);
- const AttributeData * attr_data = tup.getData(i);
- int size = attr_desc->size;
- int arraySize = attr_desc->arraySize;
- char * dataPtr = attr_data->string_value;
- Uint32 length = (size * arraySize) / 8;
-
- if (j == 0 && tup.getTable()->have_auto_inc(i))
- tup.getTable()->update_max_auto_val(dataPtr,size);
-
- if (attr_desc->m_column->getPrimaryKey())
- {
- if (j == 1) continue;
- ret = op->equal(i, dataPtr, length);
- }
- else
- {
- if (j == 0) continue;
- if (attr_data->null)
- ret = op->setValue(i, NULL, 0);
- else
- ret = op->setValue(i, dataPtr, length);
- }
- if (ret < 0) {
- ndbout_c("Column: %d type %d %d %d %d",i,
- attr_desc->m_column->getType(),
- size, arraySize, attr_data->size);
- break;
- }
- }
- if (ret < 0)
- break;
- }
- if (ret < 0)
- {
- if (errorHandler(cb))
- continue;
- exitHandler();
- }
-
- // Prepare transaction (the transaction is NOT yet sent to NDB)
- cb->connection->executeAsynchPrepare(Commit, &callback, cb);
- m_transactions++;
- return;
- }
- err << "Unable to recover from errors. Exiting..." << endl;
- exitHandler();
-}
-
-void BackupRestore::cback(int result, restore_callback_t *cb)
-{
- m_transactions--;
-
- if (result < 0)
- {
- /**
- * Error. temporary or permanent?
- */
- if (errorHandler(cb))
- tuple_a(cb); // retry
- else
- {
- err << "Restore: Failed to restore data due to a unrecoverable error. Exiting..." << endl;
- exitHandler();
- }
- }
- else
- {
- /**
- * OK! close transaction
- */
- m_ndb->closeTransaction(cb->connection);
- cb->connection= 0;
- cb->next= m_free_callback;
- m_free_callback= cb;
- m_dataCount++;
- }
-}
-
-/**
- * returns true if is recoverable,
- * Error handling based on hugo
- * false if it is an error that generates an abort.
- */
-bool BackupRestore::errorHandler(restore_callback_t *cb)
-{
- NdbError error= cb->connection->getNdbError();
- m_ndb->closeTransaction(cb->connection);
- cb->connection= 0;
- cb->retries++;
- switch(error.status)
- {
- case NdbError::Success:
- return false;
- // ERROR!
- break;
-
- case NdbError::TemporaryError:
- NdbSleep_MilliSleep(10);
- return true;
- // RETRY
- break;
-
- case NdbError::UnknownResult:
- err << error << endl;
- return false;
- // ERROR!
- break;
-
- default:
- case NdbError::PermanentError:
- switch (error.code)
- {
- case 499:
- case 250:
- NdbSleep_MilliSleep(10);
- return true; //temp errors?
- default:
- break;
- }
- //ERROR
- err << error << endl;
- return false;
- break;
- }
- return false;
-}
-
-void BackupRestore::exitHandler()
-{
- release();
- exit(-1);
-}
-
-
-void
-BackupRestore::tuple_free()
-{
- if (!m_restore)
- return;
-
- if (m_transactions > 0) {
- // Send all transactions to NDB
- m_ndb->sendPreparedTransactions(0);
-
- // Poll all transactions
- while (m_transactions > 0)
- m_ndb->pollNdb(3000, m_transactions);
- }
-}
-
-void
-BackupRestore::endOfTuples()
-{
- tuple_free();
-}
-
-void
-BackupRestore::logEntry(const LogEntry & tup)
-{
- if (!m_restore)
- return;
-
- NdbConnection * trans = m_ndb->startTransaction();
- if (trans == NULL)
- {
- // Deep shit, TODO: handle the error
- err << "Cannot start transaction" << endl;
- exit(-1);
- } // if
-
- const NdbDictionary::Table * table = get_table(tup.m_table->m_dictTable);
- NdbOperation * op = trans->getNdbOperation(table);
- if (op == NULL)
- {
- err << "Cannot get operation: " << trans->getNdbError() << endl;
- exit(-1);
- } // if
-
- int check = 0;
- switch(tup.m_type)
- {
- case LogEntry::LE_INSERT:
- check = op->insertTuple();
- break;
- case LogEntry::LE_UPDATE:
- check = op->updateTuple();
- break;
- case LogEntry::LE_DELETE:
- check = op->deleteTuple();
- break;
- default:
- err << "Log entry has wrong operation type."
- << " Exiting...";
- exit(-1);
- }
-
- for (Uint32 i= 0; i < tup.size(); i++)
- {
- const AttributeS * attr = tup[i];
- int size = attr->Desc->size;
- int arraySize = attr->Desc->arraySize;
- const char * dataPtr = attr->Data.string_value;
-
- if (tup.m_table->have_auto_inc(attr->Desc->attrId))
- tup.m_table->update_max_auto_val(dataPtr,size);
-
- const Uint32 length = (size / 8) * arraySize;
- if (attr->Desc->m_column->getPrimaryKey())
- op->equal(attr->Desc->attrId, dataPtr, length);
- else
- op->setValue(attr->Desc->attrId, dataPtr, length);
- }
-
- const int ret = trans->execute(Commit);
- if (ret != 0)
- {
- // Both insert update and delete can fail during log running
- // and it's ok
- // TODO: check that the error is either tuple exists or tuple does not exist?
- switch(tup.m_type)
- {
- case LogEntry::LE_INSERT:
- break;
- case LogEntry::LE_UPDATE:
- break;
- case LogEntry::LE_DELETE:
- break;
- }
- if (false)
- {
- err << "execute failed: " << trans->getNdbError() << endl;
- exit(-1);
- }
- }
-
- m_ndb->closeTransaction(trans);
- m_logCount++;
-}
-
-void
-BackupRestore::endOfLogEntrys()
-{
- if (!m_restore)
- return;
-
- info << "Restored " << m_dataCount << " tuples and "
- << m_logCount << " log entries" << endl;
-}
-
-/*
- * callback : This is called when the transaction is polled
- *
- * (This function must have three arguments:
- * - The result of the transaction,
- * - The NdbConnection object, and
- * - A pointer to an arbitrary object.)
- */
-
-static void
-callback(int result, NdbConnection* trans, void* aObject)
-{
- restore_callback_t *cb = (restore_callback_t *)aObject;
- (cb->restore)->cback(result, cb);
-}
-
-#if 0 // old tuple impl
-void
-BackupRestore::tuple(const TupleS & tup)
-{
- if (!m_restore)
- return;
- while (1)
- {
- NdbConnection * trans = m_ndb->startTransaction();
- if (trans == NULL)
- {
- // Deep shit, TODO: handle the error
- ndbout << "Cannot start transaction" << endl;
- exit(-1);
- } // if
-
- const TableS * table = tup.getTable();
- NdbOperation * op = trans->getNdbOperation(table->getTableName());
- if (op == NULL)
- {
- ndbout << "Cannot get operation: ";
- ndbout << trans->getNdbError() << endl;
- exit(-1);
- } // if
-
- // TODO: check return value and handle error
- if (op->writeTuple() == -1)
- {
- ndbout << "writeTuple call failed: ";
- ndbout << trans->getNdbError() << endl;
- exit(-1);
- } // if
-
- for (int i = 0; i < tup.getNoOfAttributes(); i++)
- {
- const AttributeS * attr = tup[i];
- int size = attr->Desc->size;
- int arraySize = attr->Desc->arraySize;
- const char * dataPtr = attr->Data.string_value;
-
- const Uint32 length = (size * arraySize) / 8;
- if (attr->Desc->m_column->getPrimaryKey())
- op->equal(i, dataPtr, length);
- }
-
- for (int i = 0; i < tup.getNoOfAttributes(); i++)
- {
- const AttributeS * attr = tup[i];
- int size = attr->Desc->size;
- int arraySize = attr->Desc->arraySize;
- const char * dataPtr = attr->Data.string_value;
-
- const Uint32 length = (size * arraySize) / 8;
- if (!attr->Desc->m_column->getPrimaryKey())
- if (attr->Data.null)
- op->setValue(i, NULL, 0);
- else
- op->setValue(i, dataPtr, length);
- }
- int ret = trans->execute(Commit);
- if (ret != 0)
- {
- ndbout << "execute failed: ";
- ndbout << trans->getNdbError() << endl;
- exit(-1);
- }
- m_ndb->closeTransaction(trans);
- if (ret == 0)
- break;
- }
- m_dataCount++;
-}
-#endif
-
-template class Vector<NdbDictionary::Table*>;
-template class Vector<const NdbDictionary::Table*>;
diff --git a/ndb/src/kernel/blocks/backup/restore/consumer_restore.hpp b/ndb/src/kernel/blocks/backup/restore/consumer_restore.hpp
deleted file mode 100644
index 59e2734ea1f..00000000000
--- a/ndb/src/kernel/blocks/backup/restore/consumer_restore.hpp
+++ /dev/null
@@ -1,92 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef CONSUMER_RESTORE_HPP
-#define CONSUMER_RESTORE_HPP
-
-#include "consumer.hpp"
-
-struct restore_callback_t {
- class BackupRestore *restore;
- class TupleS *tup;
- class NdbConnection *connection;
- int retries;
- restore_callback_t *next;
-};
-
-
-class BackupRestore : public BackupConsumer
-{
-public:
- BackupRestore(Uint32 parallelism=1)
- {
- m_ndb = 0;
- m_logCount = m_dataCount = 0;
- m_restore = false;
- m_restore_meta = false;
- m_parallelism = parallelism;
- m_callback = 0;
- m_tuples = 0;
- m_free_callback = 0;
- m_transactions = 0;
- m_cache.m_old_table = 0;
- }
-
- virtual ~BackupRestore();
- virtual bool init();
- virtual void release();
- virtual bool table(const TableS &);
- virtual bool endOfTables();
- virtual void tuple(const TupleS &);
- virtual void tuple_free();
- virtual void tuple_a(restore_callback_t *cb);
- virtual void cback(int result, restore_callback_t *cb);
- virtual bool errorHandler(restore_callback_t *cb);
- virtual void exitHandler();
- virtual void endOfTuples();
- virtual void logEntry(const LogEntry &);
- virtual void endOfLogEntrys();
- virtual bool finalize_table(const TableS &);
- void connectToMysql();
- Ndb * m_ndb;
- bool m_restore;
- bool m_restore_meta;
- Uint32 m_logCount;
- Uint32 m_dataCount;
-
- Uint32 m_parallelism;
- Uint32 m_transactions;
-
- TupleS *m_tuples;
- restore_callback_t *m_callback;
- restore_callback_t *m_free_callback;
-
- /**
- * m_new_table_ids[X] = Y;
- * X - old table id
- * Y != 0 - new table
- */
- Vector<const NdbDictionary::Table*> m_new_tables;
- struct {
- const NdbDictionary::Table* m_old_table;
- const NdbDictionary::Table* m_new_table;
- } m_cache;
- const NdbDictionary::Table* get_table(const NdbDictionary::Table* );
-
- Vector<const NdbDictionary::Table*> m_indexes;
-};
-
-#endif
diff --git a/ndb/src/kernel/blocks/backup/restore/consumer_restorem.cpp b/ndb/src/kernel/blocks/backup/restore/consumer_restorem.cpp
deleted file mode 100644
index 6a9ec07148a..00000000000
--- a/ndb/src/kernel/blocks/backup/restore/consumer_restorem.cpp
+++ /dev/null
@@ -1,652 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#include "consumer_restore.hpp"
-#include <NdbSleep.h>
-
-extern FilteredNdbOut err;
-extern FilteredNdbOut info;
-extern FilteredNdbOut debug;
-
-static bool asynchErrorHandler(NdbConnection * trans, Ndb * ndb);
-static void callback(int result, NdbConnection* trans, void* aObject);
-
-bool
-BackupRestore::init()
-{
-
- if (!m_restore && !m_restore_meta)
- return true;
-
- m_ndb = new Ndb();
-
- if (m_ndb == NULL)
- return false;
-
- // Turn off table name completion
- m_ndb->useFullyQualifiedNames(false);
-
- m_ndb->init(1024);
- if (m_ndb->waitUntilReady(30) != 0)
- {
- ndbout << "Failed to connect to ndb!!" << endl;
- return false;
- }
- ndbout << "Connected to ndb!!" << endl;
-
-#if USE_MYSQL
- if(use_mysql)
- {
- if ( mysql_thread_safe() == 0 )
- {
- ndbout << "Not thread safe mysql library..." << endl;
- exit(-1);
- }
-
- ndbout << "Connecting to MySQL..." <<endl;
-
- /**
- * nwe param:
- * port
- * host
- * user
- */
- bool returnValue = true;
- mysql_init(&mysql);
- {
- int portNo = 3306;
- if ( mysql_real_connect(&mysql,
- ga_host,
- ga_user,
- ga_password,
- ga_database,
- ga_port,
-:: ga_socket,
- 0) == NULL )
- {
- ndbout_c("Connect failed: %s", mysql_error(&mysql));
- returnValue = false;
- }
- ndbout << "Connected to MySQL!!!" <<endl;
- }
-
- /* if(returnValue){
- mysql_set_server_option(&mysql, MYSQL_OPTION_MULTI_STATEMENTS_ON);
- }
- */
- return returnValue;
- }
-#endif
-
- if (m_callback) {
- delete [] m_callback;
- m_callback = 0;
- }
-
- m_callback = new restore_callback_t[m_parallelism];
-
- if (m_callback == 0)
- {
- ndbout << "Failed to allocate callback structs" << endl;
- return false;
- }
-
- m_free_callback = m_callback;
- for (int i= 0; i < m_parallelism; i++) {
- m_callback[i].restore = this;
- m_callback[i].connection = 0;
- m_callback[i].retries = 0;
- if (i > 0)
- m_callback[i-1].next = &(m_callback[i]);
- }
- m_callback[m_parallelism-1].next = 0;
-
- return true;
-
-}
-
-BackupRestore::~BackupRestore()
-{
- if (m_ndb != 0)
- delete m_ndb;
-
- if (m_callback)
- delete [] m_callback;
-}
-
-#ifdef USE_MYSQL
-bool
-BackupRestore::table(const TableS & table, MYSQL * mysqlp){
- if (!m_restore_meta)
- {
- return true;
- }
-
- char tmpTabName[MAX_TAB_NAME_SIZE*2];
- sprintf(tmpTabName, "%s", table.getTableName());
- char * database = strtok(tmpTabName, "/");
- char * schema = strtok( NULL , "/");
- char * tableName = strtok( NULL , "/");
-
- /**
- * this means that the user did not specify schema
- * and it is a v2x backup
- */
- if(database == NULL)
- return false;
- if(schema == NULL)
- return false;
- if(tableName==NULL)
- tableName = schema;
-
- char stmtCreateDB[255];
- sprintf(stmtCreateDB,"CREATE DATABASE %s", database);
-
- /*ignore return value. mysql_select_db will trap errors anyways*/
- if (mysql_query(mysqlp,stmtCreateDB) == 0)
- {
- //ndbout_c("%s", stmtCreateDB);
- }
-
- if (mysql_select_db(&mysql, database) != 0)
- {
- ndbout_c("Error: %s", mysql_error(&mysql));
- return false;
- }
-
- char buf [2048];
- /**
- * create table ddl
- */
- if (create_table_string(table, tableName, buf))
- {
- ndbout_c("Unable to create a table definition since the "
- "backup contains undefined types");
- return false;
- }
-
- //ndbout_c("%s", buf);
-
- if (mysql_query(mysqlp,buf) != 0)
- {
- ndbout_c("Error: %s", mysql_error(&mysql));
- return false;
- } else
- {
- ndbout_c("Successfully restored table %s into database %s", tableName, database);
- }
-
- return true;
-}
-#endif
-
-bool
-BackupRestore::table(const TableS & table){
- if (!m_restore_meta)
- {
- return true;
- }
- NdbDictionary::Dictionary* dict = m_ndb->getDictionary();
- if (dict->createTable(*table.m_dictTable) == -1)
- {
- err << "Create table " << table.getTableName() << " failed: "
- << dict->getNdbError() << endl;
- return false;
- }
- info << "Successfully restored table " << table.getTableName()<< endl ;
- return true;
-}
-
-void BackupRestore::tuple(const TupleS & tup)
-{
- if (!m_restore)
- {
- delete &tup;
- return;
- }
-
- restore_callback_t * cb = m_free_callback;
-
- if (cb)
- {
- m_free_callback = cb->next;
- cb->retries = 0;
- cb->tup = &tup;
- tuple_a(cb);
- }
-
- if (m_free_callback == 0)
- {
- // send-poll all transactions
- // close transaction is done in callback
- m_ndb->sendPollNdb(3000, 1);
- }
-}
-
-void BackupRestore::tuple_a(restore_callback_t *cb)
-{
- while (cb->retries < 10)
- {
- /**
- * start transactions
- */
- cb->connection = m_ndb->startTransaction();
- if (cb->connection == NULL)
- {
- /*
- if (asynchErrorHandler(cb->connection, m_ndb))
- {
- cb->retries++;
- continue;
- }
- */
- asynchExitHandler();
- } // if
-
- const TupleS &tup = *(cb->tup);
- const TableS * table = tup.getTable();
- NdbOperation * op = cb->connection->getNdbOperation(table->getTableName());
-
- if (op == NULL)
- {
- if (asynchErrorHandler(cb->connection, m_ndb))
- {
- cb->retries++;
- continue;
- }
- asynchExitHandler();
- } // if
-
- if (op->writeTuple() == -1)
- {
- if (asynchErrorHandler(cb->connection, m_ndb))
- {
- cb->retries++;
- continue;
- }
- asynchExitHandler();
- } // if
-
- Uint32 ret = 0;
- for (int i = 0; i < tup.getNoOfAttributes(); i++)
- {
- const AttributeS * attr = tup[i];
- int size = attr->Desc->size;
- int arraySize = attr->Desc->arraySize;
- char * dataPtr = attr->Data.string_value;
- Uint32 length = (size * arraySize) / 8;
- if (attr->Desc->m_column->getPrimaryKey())
- {
- ret = op->equal(i, dataPtr, length);
- }
- else
- {
- if (attr->Data.null)
- ret = op->setValue(i, NULL, 0);
- else
- ret = op->setValue(i, dataPtr, length);
- }
-
- if (ret<0)
- {
- ndbout_c("Column: %d type %d",i,
- tup.getTable()->m_dictTable->getColumn(i)->getType());
- if (asynchErrorHandler(cb->connection, m_ndb))
- {
- cb->retries++;
- break;
- }
- asynchExitHandler();
- }
- }
- if (ret < 0)
- continue;
-
- // Prepare transaction (the transaction is NOT yet sent to NDB)
- cb->connection->executeAsynchPrepare(Commit, &callback, cb);
- m_transactions++;
- }
- ndbout_c("Unable to recover from errors. Exiting...");
- asynchExitHandler();
-}
-
-void BackupRestore::cback(int result, restore_callback_t *cb)
-{
- if (result<0)
- {
- /**
- * Error. temporary or permanent?
- */
- if (asynchErrorHandler(cb->connection, m_ndb))
- {
- cb->retries++;
- tuple_a(cb);
- }
- else
- {
- ndbout_c("Restore: Failed to restore data "
- "due to a unrecoverable error. Exiting...");
- delete m_ndb;
- delete cb->tup;
- exit(-1);
- }
- }
- else
- {
- /**
- * OK! close transaction
- */
- m_ndb->closeTransaction(cb->connection);
- delete cb->tup;
- m_transactions--;
- }
-}
-
-void BackupRestore::asynchExitHandler()
-{
- if (m_ndb != NULL)
- delete m_ndb;
- exit(-1);
-}
-
-#if 0 // old tuple impl
-void
-BackupRestore::tuple(const TupleS & tup)
-{
- if (!m_restore)
- return;
- while (1)
- {
- NdbConnection * trans = m_ndb->startTransaction();
- if (trans == NULL)
- {
- // Deep shit, TODO: handle the error
- ndbout << "Cannot start transaction" << endl;
- exit(-1);
- } // if
-
- const TableS * table = tup.getTable();
- NdbOperation * op = trans->getNdbOperation(table->getTableName());
- if (op == NULL)
- {
- ndbout << "Cannot get operation: ";
- ndbout << trans->getNdbError() << endl;
- exit(-1);
- } // if
-
- // TODO: check return value and handle error
- if (op->writeTuple() == -1)
- {
- ndbout << "writeTuple call failed: ";
- ndbout << trans->getNdbError() << endl;
- exit(-1);
- } // if
-
- for (int i = 0; i < tup.getNoOfAttributes(); i++)
- {
- const AttributeS * attr = tup[i];
- int size = attr->Desc->size;
- int arraySize = attr->Desc->arraySize;
- const char * dataPtr = attr->Data.string_value;
-
- const Uint32 length = (size * arraySize) / 8;
- if (attr->Desc->m_column->getPrimaryKey())
- op->equal(i, dataPtr, length);
- }
-
- for (int i = 0; i < tup.getNoOfAttributes(); i++)
- {
- const AttributeS * attr = tup[i];
- int size = attr->Desc->size;
- int arraySize = attr->Desc->arraySize;
- const char * dataPtr = attr->Data.string_value;
-
- const Uint32 length = (size * arraySize) / 8;
- if (!attr->Desc->m_column->getPrimaryKey())
- if (attr->Data.null)
- op->setValue(i, NULL, 0);
- else
- op->setValue(i, dataPtr, length);
- }
- int ret = trans->execute(Commit);
- if (ret != 0)
- {
- ndbout << "execute failed: ";
- ndbout << trans->getNdbError() << endl;
- exit(-1);
- }
- m_ndb->closeTransaction(trans);
- if (ret == 0)
- break;
- }
- m_dataCount++;
-}
-#endif
-
-void
-BackupRestore::endOfTuples()
-{
- if (!m_restore)
- return;
-
- // Send all transactions to NDB
- m_ndb->sendPreparedTransactions(0);
-
- // Poll all transactions
- m_ndb->pollNdb(3000, m_transactions);
-
- // Close all transactions
- // for (int i = 0; i < nPreparedTransactions; i++)
- // m_ndb->closeTransaction(asynchTrans[i]);
-}
-
-void
-BackupRestore::logEntry(const LogEntry & tup)
-{
- if (!m_restore)
- return;
-
- NdbConnection * trans = m_ndb->startTransaction();
- if (trans == NULL)
- {
- // Deep shit, TODO: handle the error
- ndbout << "Cannot start transaction" << endl;
- exit(-1);
- } // if
-
- const TableS * table = tup.m_table;
- NdbOperation * op = trans->getNdbOperation(table->getTableName());
- if (op == NULL)
- {
- ndbout << "Cannot get operation: ";
- ndbout << trans->getNdbError() << endl;
- exit(-1);
- } // if
-
- int check = 0;
- switch(tup.m_type)
- {
- case LogEntry::LE_INSERT:
- check = op->insertTuple();
- break;
- case LogEntry::LE_UPDATE:
- check = op->updateTuple();
- break;
- case LogEntry::LE_DELETE:
- check = op->deleteTuple();
- break;
- default:
- ndbout << "Log entry has wrong operation type."
- << " Exiting...";
- exit(-1);
- }
-
- for (int i = 0; i < tup.m_values.size(); i++)
- {
- const AttributeS * attr = tup.m_values[i];
- int size = attr->Desc->size;
- int arraySize = attr->Desc->arraySize;
- const char * dataPtr = attr->Data.string_value;
-
- const Uint32 length = (size / 8) * arraySize;
- if (attr->Desc->m_column->getPrimaryKey())
- op->equal(attr->Desc->attrId, dataPtr, length);
- else
- op->setValue(attr->Desc->attrId, dataPtr, length);
- }
-
-#if 1
- trans->execute(Commit);
-#else
- const int ret = trans->execute(Commit);
- // Both insert update and delete can fail during log running
- // and it's ok
-
- if (ret != 0)
- {
- ndbout << "execute failed: ";
- ndbout << trans->getNdbError() << endl;
- exit(-1);
- }
-#endif
-
- m_ndb->closeTransaction(trans);
- m_logCount++;
-}
-
-void
-BackupRestore::endOfLogEntrys()
-{
- if (m_restore)
- {
- ndbout << "Restored " << m_dataCount << " tuples and "
- << m_logCount << " log entries" << endl;
- }
-}
-#if 0
-/*****************************************
- *
- * Callback function for asynchronous transactions
- *
- * Idea for error handling: Transaction objects have to be stored globally when
- * they are prepared.
- * In the callback function if the transaction:
- * succeeded: delete the object from global storage
- * failed but can be retried: execute the object that is in global storage
- * failed but fatal: delete the object from global storage
- *
- ******************************************/
-static void restoreCallback(int result, // Result for transaction
- NdbConnection *object, // Transaction object
- void *anything) // Not used
-{
- static Uint32 counter = 0;
-
-
- debug << "restoreCallback function called " << counter << " time(s)" << endl;
-
- ++counter;
-
- if (result == -1)
- {
- ndbout << " restoreCallback (" << counter;
- if ((counter % 10) == 1)
- {
- ndbout << "st";
- } // if
- else if ((counter % 10) == 2)
- {
- ndbout << "nd";
- } // else if
- else if ((counter % 10 ) ==3)
- {
- ndbout << "rd";
- } // else if
- else
- {
- ndbout << "th";
- } // else
- err << " time: error detected " << object->getNdbError() << endl;
- } // if
-
-} // restoreCallback
-#endif
-
-
-
-/*
- * callback : This is called when the transaction is polled
- *
- * (This function must have three arguments:
- * - The result of the transaction,
- * - The NdbConnection object, and
- * - A pointer to an arbitrary object.)
- */
-
-static void
-callback(int result, NdbConnection* trans, void* aObject)
-{
- restore_callback_t *cb = (restore_callback_t *)aObject;
- (cb->restore)->cback(result, cb);
-}
-
-/**
- * returns true if is recoverable,
- * Error handling based on hugo
- * false if it is an error that generates an abort.
- */
-static
-bool asynchErrorHandler(NdbConnection * trans, Ndb* ndb)
-{
- NdbError error = trans->getNdbError();
- ndb->closeTransaction(trans);
- switch(error.status)
- {
- case NdbError::Success:
- return false;
- // ERROR!
- break;
-
- case NdbError::TemporaryError:
- NdbSleep_MilliSleep(10);
- return true;
- // RETRY
- break;
-
- case NdbError::UnknownResult:
- ndbout << error << endl;
- return false;
- // ERROR!
- break;
-
- default:
- case NdbError::PermanentError:
- switch (error.code)
- {
- case 499:
- case 250:
- NdbSleep_MilliSleep(10);
- return true; //temp errors?
- default:
- break;
- }
- //ERROR
- ndbout << error << endl;
- return false;
- break;
- }
- return false;
-}
diff --git a/ndb/src/kernel/blocks/backup/restore/main.cpp b/ndb/src/kernel/blocks/backup/restore/main.cpp
deleted file mode 100644
index f7b1479cc93..00000000000
--- a/ndb/src/kernel/blocks/backup/restore/main.cpp
+++ /dev/null
@@ -1,378 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#include <getarg.h>
-#include <Vector.hpp>
-#include <ndb_limits.h>
-#include <NdbTCP.h>
-#include <NdbOut.hpp>
-
-#include "consumer_restore.hpp"
-#include "consumer_printer.hpp"
-
-extern FilteredNdbOut err;
-extern FilteredNdbOut info;
-extern FilteredNdbOut debug;
-
-static int ga_nodeId = 0;
-static int ga_nParallelism = 128;
-static int ga_backupId = 0;
-static bool ga_dont_ignore_systab_0 = false;
-static Vector<class BackupConsumer *> g_consumers;
-
-static const char* ga_backupPath = "." DIR_SEPARATOR;
-
-static const char* ga_connect_NDB = NULL;
-
-/**
- * print and restore flags
- */
-static bool ga_restore = false;
-static bool ga_print = false;
-bool
-readArguments(const int argc, const char** argv)
-{
-
- int _print = 0;
- int _print_meta = 0;
- int _print_data = 0;
- int _print_log = 0;
- int _restore_data = 0;
- int _restore_meta = 0;
-
-
- struct getargs args[] =
- {
- { "connect", 'c', arg_string, &ga_connect_NDB,
- "NDB Cluster connection", "\"nodeid=<api id>;host=<hostname:port>\""},
- { "nodeid", 'n', arg_integer, &ga_nodeId,
- "Backup files from node", "db node id"},
- { "backupid", 'b',arg_integer, &ga_backupId, "Backup id", "backup id"},
- { "print", '\0', arg_flag, &_print,
- "Print data and log to stdout", "print data and log"},
- { "print_data", '\0', arg_flag, &_print_data,
- "Print data to stdout", "print data"},
- { "print_meta", '\0', arg_flag, &_print_meta,
- "Print meta data to stdout", "print meta data"},
- { "print_log", '\0', arg_flag, &_print_log,
- "Print log to stdout", "print log"},
- { "restore_data", 'r', arg_flag, &_restore_data,
- "Restore table data/logs into NDB Cluster using NDBAPI",
- "Restore table data/log"},
- { "restore_meta", 'm', arg_flag, &_restore_meta,
- "Restore meta data into NDB Cluster using NDBAPI", "Restore meta data"},
- { "parallelism", 'p', arg_integer, &ga_nParallelism,
- "No of parallel transactions during restore of data."
- "(parallelism can be 1 to 1024)",
- "Parallelism"},
-#ifdef USE_MYSQL
- { "use_mysql", '\0', arg_flag, &use_mysql,
- "Restore meta data via mysql. Systab will be ignored. Data is restored "
- "using NDBAPI.", "use mysql"},
- { "user", '\0', arg_string, &ga_user, "MySQL user", "Default: root"},
- { "password", '\0', arg_string, &ga_password, "MySQL user's password",
- "Default: \"\" "},
- { "host", '\0', arg_string, &ga_host, "Hostname of MySQL server",
- "Default: localhost"},
- { "socket", '\0', arg_string, &ga_socket, "Path to MySQL server socket file",
- "Default: /tmp/mysql.sock"},
- { "port", '\0', arg_integer, &ga_port, "Port number of MySQL server",
- "Default: 3306"},
-#endif
- { "dont_ignore_systab_0", 'f', arg_flag, &ga_dont_ignore_systab_0,
- "Experimental. Do not ignore system table during restore.",
- "dont_ignore_systab_0"}
-
- };
-
- int num_args = sizeof(args) / sizeof(args[0]);
- int optind = 0;
-
- if (getarg(args, num_args, argc, argv, &optind) ||
- ga_nodeId == 0 ||
- ga_backupId == 0 ||
- ga_nParallelism < 1 ||
- ga_nParallelism >1024)
- {
- arg_printusage(args, num_args, argv[0], "<path to backup files>\n");
- return false;
- }
-
- BackupPrinter* printer = new BackupPrinter();
- if (printer == NULL)
- return false;
-
- BackupRestore* restore = new BackupRestore(ga_nParallelism);
- if (restore == NULL)
- {
- delete printer;
- return false;
- }
-
- /**
- * Got segmentation fault when using the printer's attributes directly
- * in getargs... Do not have the time to found out why... this is faster...
- */
- if (_print)
- {
- ga_print = true;
- ga_restore = true;
- printer->m_print = true;
- }
- if (_print_meta)
- {
- ga_print = true;
- printer->m_print_meta = true;
- }
- if (_print_data)
- {
- ga_print = true;
- printer->m_print_data = true;
- }
- if (_print_log)
- {
- ga_print = true;
- printer->m_print_log = true;
- }
-
- if (_restore_data)
- {
- ga_restore = true;
- restore->m_restore = true;
- }
-
- if (_restore_meta)
- {
- // ga_restore = true;
- restore->m_restore_meta = true;
- }
-
- {
- BackupConsumer * c = printer;
- g_consumers.push_back(c);
- }
- {
- BackupConsumer * c = restore;
- g_consumers.push_back(c);
- }
- // Set backup file path
- if (argv[optind] != NULL)
- {
- ga_backupPath = argv[optind];
- }
-
- return true;
-}
-
-
-void
-clearConsumers()
-{
- for(Uint32 i= 0; i<g_consumers.size(); i++)
- delete g_consumers[i];
- g_consumers.clear();
-}
-
-static bool
-checkSysTable(const char *tableName)
-{
- return ga_dont_ignore_systab_0 ||
- (strcmp(tableName, "SYSTAB_0") != 0 &&
- strcmp(tableName, "NDB$EVENTS_0") != 0 &&
- strcmp(tableName, "sys/def/SYSTAB_0") != 0 &&
- strcmp(tableName, "sys/def/NDB$EVENTS_0") != 0);
-}
-
-static void
-free_data_callback()
-{
- for(Uint32 i= 0; i < g_consumers.size(); i++)
- g_consumers[i]->tuple_free();
-}
-
-int
-main(int argc, const char** argv)
-{
- ndb_init();
- if (!readArguments(argc, argv))
- {
- return -1;
- }
-
- if (ga_connect_NDB != NULL)
- {
- // Use connection string
- Ndb::setConnectString(ga_connect_NDB);
- }
-
- /**
- * we must always load meta data, even if we will only print it to stdout
- */
- RestoreMetaData metaData(ga_backupPath, ga_nodeId, ga_backupId);
- if (!metaData.readHeader())
- {
- ndbout << "Failed to read " << metaData.getFilename() << endl << endl;
- return -1;
- }
- /**
- * check wheater we can restore the backup (right version).
- */
- int res = metaData.loadContent();
-
- if (res == 0)
- {
- ndbout_c("Restore: Failed to load content");
- return -1;
- }
-
- if (metaData.getNoOfTables() == 0)
- {
- ndbout_c("Restore: The backup contains no tables ");
- return -1;
- }
-
-
- if (!metaData.validateFooter())
- {
- ndbout_c("Restore: Failed to validate footer.");
- return -1;
- }
-
- Uint32 i;
- for(i= 0; i < g_consumers.size(); i++)
- {
- if (!g_consumers[i]->init())
- {
- clearConsumers();
- return -11;
- }
-
- }
-
- for(i = 0; i<metaData.getNoOfTables(); i++)
- {
- if (checkSysTable(metaData[i]->getTableName()))
- {
- for(Uint32 j= 0; j < g_consumers.size(); j++)
- if (!g_consumers[j]->table(* metaData[i]))
- {
- ndbout_c("Restore: Failed to restore table: %s. "
- "Exiting...",
- metaData[i]->getTableName());
- return -11;
- }
- }
- }
-
- for(i= 0; i < g_consumers.size(); i++)
- if (!g_consumers[i]->endOfTables())
- {
- ndbout_c("Restore: Failed while closing tables");
- return -11;
- }
-
- if (ga_restore || ga_print)
- {
- if (ga_restore)
- {
- RestoreDataIterator dataIter(metaData, &free_data_callback);
-
- // Read data file header
- if (!dataIter.readHeader())
- {
- ndbout << "Failed to read header of data file. Exiting..." ;
- return -11;
- }
-
-
- while (dataIter.readFragmentHeader(res= 0))
- {
- const TupleS* tuple;
- while ((tuple = dataIter.getNextTuple(res= 1)) != 0)
- {
- if (checkSysTable(tuple->getTable()->getTableName()))
- for(Uint32 i= 0; i < g_consumers.size(); i++)
- g_consumers[i]->tuple(* tuple);
- } // while (tuple != NULL);
-
- if (res < 0)
- {
- ndbout_c("Restore: An error occured while restoring data. "
- "Exiting...");
- return -1;
- }
- if (!dataIter.validateFragmentFooter()) {
- ndbout_c("Restore: Error validating fragment footer. "
- "Exiting...");
- return -1;
- }
- } // while (dataIter.readFragmentHeader(res))
-
- if (res < 0)
- {
- err << "Restore: An error occured while restoring data. Exiting... res=" << res << endl;
- return -1;
- }
-
-
- dataIter.validateFooter(); //not implemented
-
- for (i= 0; i < g_consumers.size(); i++)
- g_consumers[i]->endOfTuples();
-
- RestoreLogIterator logIter(metaData);
- if (!logIter.readHeader())
- {
- err << "Failed to read header of data file. Exiting..." << endl;
- return -1;
- }
-
- const LogEntry * logEntry = 0;
- while ((logEntry = logIter.getNextLogEntry(res= 0)) != 0)
- {
- if (checkSysTable(logEntry->m_table->getTableName()))
- for(Uint32 i= 0; i < g_consumers.size(); i++)
- g_consumers[i]->logEntry(* logEntry);
- }
- if (res < 0)
- {
- err << "Restore: An restoring the data log. Exiting... res=" << res << endl;
- return -1;
- }
- logIter.validateFooter(); //not implemented
- for (i= 0; i < g_consumers.size(); i++)
- g_consumers[i]->endOfLogEntrys();
- for(i = 0; i<metaData.getNoOfTables(); i++)
- {
- if (checkSysTable(metaData[i]->getTableName()))
- {
- for(Uint32 j= 0; j < g_consumers.size(); j++)
- if (!g_consumers[j]->finalize_table(* metaData[i]))
- {
- ndbout_c("Restore: Failed to finalize restore table: %s. "
- "Exiting...",
- metaData[i]->getTableName());
- return -11;
- }
- }
- }
- }
- }
- clearConsumers();
- return 0;
-} // main
-
-template class Vector<BackupConsumer*>;
diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
index 234d832655c..dfae180ae71 100644
--- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
+++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
@@ -39,7 +39,6 @@
#include <EventLogger.hpp>
#include <TimeQueue.hpp>
-#include <new>
#include <NdbSleep.h>
#include <SafeCounter.hpp>
@@ -150,6 +149,7 @@ void Cmvmi::execNDB_TAMPER(Signal* signal)
ndbrequire(false);
}
+#ifndef NDB_WIN32
if(ERROR_INSERTED(9996)){
simulate_error_during_shutdown= SIGSEGV;
ndbrequire(false);
@@ -159,6 +159,7 @@ void Cmvmi::execNDB_TAMPER(Signal* signal)
simulate_error_during_shutdown= SIGSEGV;
kill(getpid(), SIGABRT);
}
+#endif
}//execNDB_TAMPER()
void Cmvmi::execSET_LOGLEVELORD(Signal* signal)
@@ -193,21 +194,11 @@ void Cmvmi::execEVENT_REP(Signal* signal)
/**
* If entry is not found
*/
- Uint32 threshold = 16;
- LogLevel::EventCategory eventCategory = (LogLevel::EventCategory)0;
-
- for(unsigned int i = 0; i< EventLoggerBase::matrixSize; i++){
- if(EventLoggerBase::matrix[i].eventType == eventType){
- eventCategory = EventLoggerBase::matrix[i].eventCategory;
- threshold = EventLoggerBase::matrix[i].threshold;
- break;
- }
- }
-
- if(threshold > 15){
- // No entry found in matrix (or event that should never be printed)
+ Uint32 threshold;
+ LogLevel::EventCategory eventCategory;
+ Logger::LoggerLevel severity;
+ if (EventLoggerBase::event_lookup(eventType,eventCategory,threshold,severity))
return;
- }
SubscriberPtr ptr;
for(subscribers.first(ptr); ptr.i != RNIL; subscribers.next(ptr)){
@@ -225,14 +216,15 @@ void Cmvmi::execEVENT_REP(Signal* signal)
// Print the event info
g_eventLogger.log(eventReport->getEventType(), signal->theData);
+ return;
}//execEVENT_REP()
void
Cmvmi::execEVENT_SUBSCRIBE_REQ(Signal * signal){
EventSubscribeReq * subReq = (EventSubscribeReq *)&signal->theData[0];
SubscriberPtr ptr;
-
jamEntry();
+ DBUG_ENTER("Cmvmi::execEVENT_SUBSCRIBE_REQ");
/**
* Search for subcription
@@ -269,11 +261,13 @@ Cmvmi::execEVENT_SUBSCRIBE_REQ(Signal * signal){
category = (LogLevel::EventCategory)(subReq->theData[i] >> 16);
level = subReq->theData[i] & 0xFFFF;
ptr.p->logLevel.setLogLevel(category, level);
+ DBUG_PRINT("info",("entry %d: level=%d, category= %d", i, level, category));
}
}
signal->theData[0] = ptr.i;
sendSignal(ptr.p->blockRef, GSN_EVENT_SUBSCRIBE_CONF, signal, 1, JBB);
+ DBUG_VOID_RETURN;
}
void
@@ -1117,7 +1111,7 @@ Cmvmi::execDUMP_STATE_ORD(Signal* signal)
}//Cmvmi::execDUMP_STATE_ORD()
-BLOCK_FUNCTIONS(Cmvmi);
+BLOCK_FUNCTIONS(Cmvmi)
static Uint32 g_print;
static LinearSectionPtr g_test[3];
diff --git a/ndb/src/kernel/blocks/cmvmi/Makefile.am b/ndb/src/kernel/blocks/cmvmi/Makefile.am
index fdd43932682..dc2e12746fd 100644
--- a/ndb/src/kernel/blocks/cmvmi/Makefile.am
+++ b/ndb/src/kernel/blocks/cmvmi/Makefile.am
@@ -8,3 +8,17 @@ include $(top_srcdir)/ndb/config/type_kernel.mk.am
# Don't update the files from bitkeeper
%::SCCS/s.%
+
+windoze-dsp: libcmvmi.dsp
+
+libcmvmi.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libcmvmi_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/cmvmi/Makefile_old b/ndb/src/kernel/blocks/cmvmi/Makefile_old
deleted file mode 100644
index d75e5dbf08b..00000000000
--- a/ndb/src/kernel/blocks/cmvmi/Makefile_old
+++ /dev/null
@@ -1,9 +0,0 @@
-include .defs.mk
-
-TYPE := kernel
-
-ARCHIVE_TARGET := cmvmi
-
-SOURCES = Cmvmi.cpp
-
-include $(NDB_TOP)/Epilogue.mk
diff --git a/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp b/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
index 2705f95f6dd..95b336a0a65 100644
--- a/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
+++ b/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
@@ -264,4 +264,4 @@ Dbacc::~Dbacc()
}//Dbacc::~Dbacc()
-BLOCK_FUNCTIONS(Dbacc);
+BLOCK_FUNCTIONS(Dbacc)
diff --git a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
index 9a1bbd86562..a82c96beebd 100644
--- a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
+++ b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
@@ -1062,7 +1062,21 @@ void Dbacc::execACCFRAGREQ(Signal* signal)
{
const AccFragReq * const req = (AccFragReq*)&signal->theData[0];
jamEntry();
+ if (ERROR_INSERTED(3001)) {
+ jam();
+ addFragRefuse(signal, 1);
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }
tabptr.i = req->tableId;
+#ifndef VM_TRACE
+ // config mismatch - do not crash if release compiled
+ if (tabptr.i >= ctablesize) {
+ jam();
+ addFragRefuse(signal, 800);
+ return;
+ }
+#endif
ptrCheckGuard(tabptr, ctablesize, tabrec);
ndbrequire((req->reqInfo & 0xF) == ZADDFRAG);
ndbrequire(!getrootfragmentrec(signal, rootfragrecptr, req->fragId));
@@ -4501,6 +4515,17 @@ void Dbacc::getdirindex(Signal* signal)
/* BUCKET, AND SERCH FOR ELEMENT.THE PRIMARY KEYS WHICH IS SAVED */
/* IN THE OPERATION REC ARE THE CHECK ITEMS IN THE SEARCHING. */
/* --------------------------------------------------------------------------------- */
+
+#if __ia64 == 1
+#if __INTEL_COMPILER == 810
+int ndb_acc_ia64_icc810_dummy_var = 0;
+void ndb_acc_ia64_icc810_dummy_func()
+{
+ ndb_acc_ia64_icc810_dummy_var++;
+}
+#endif
+#endif
+
void Dbacc::getElement(Signal* signal)
{
DirRangePtr geOverflowrangeptr;
@@ -4595,6 +4620,12 @@ void Dbacc::getElement(Signal* signal)
/* WE HAVE FOUND THE ELEMENT. GET THE LOCK INDICATOR AND RETURN FOUND. */
/* --------------------------------------------------------------------------------- */
jam();
+#if __ia64 == 1
+#if __INTEL_COMPILER == 810
+ // prevents SIGSEGV under icc -O1
+ ndb_acc_ia64_icc810_dummy_func();
+#endif
+#endif
tgeLocked = ElementHeader::getLocked(gePageptr.p->word32[tgeElementptr]);
tgeResult = ZTRUE;
TdataIndex = tgeElementptr + tgeForward;
@@ -5673,7 +5704,8 @@ void Dbacc::commitOperation(Signal* signal)
Uint32 tmp2Olq;
if ((operationRecPtr.p->commitDeleteCheckFlag == ZFALSE) &&
- (operationRecPtr.p->operation != ZSCAN_OP)) {
+ (operationRecPtr.p->operation != ZSCAN_OP) &&
+ (operationRecPtr.p->operation != ZREAD)) {
jam();
/* This method is used to check whether the end result of the transaction
will be to delete the tuple. In this case all operation will be marked
diff --git a/ndb/src/kernel/blocks/dbacc/Makefile.am b/ndb/src/kernel/blocks/dbacc/Makefile.am
index 7ccfbe22f76..e44524c3edd 100644
--- a/ndb/src/kernel/blocks/dbacc/Makefile.am
+++ b/ndb/src/kernel/blocks/dbacc/Makefile.am
@@ -8,3 +8,17 @@ include $(top_srcdir)/ndb/config/type_kernel.mk.am
# Don't update the files from bitkeeper
%::SCCS/s.%
+
+windoze-dsp: libdbacc.dsp
+
+libdbacc.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libdbacc_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/dbacc/Makefile_old b/ndb/src/kernel/blocks/dbacc/Makefile_old
deleted file mode 100644
index 93a830cec95..00000000000
--- a/ndb/src/kernel/blocks/dbacc/Makefile_old
+++ /dev/null
@@ -1,11 +0,0 @@
-include .defs.mk
-
-TYPE := kernel
-
-ARCHIVE_TARGET := dbacc
-
-SOURCES = \
- DbaccInit.cpp \
- DbaccMain.cpp
-
-include $(NDB_TOP)/Epilogue.mk
diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
index d1a8128ea7f..b1e573e1cc8 100644
--- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
+++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
@@ -239,7 +239,11 @@ Dbdict::packTableIntoPagesImpl(SimpleProperties::Writer & w,
w.add(DictTabInfo::TableName, tablePtr.p->tableName);
w.add(DictTabInfo::TableId, tablePtr.i);
+#ifdef HAVE_TABLE_REORG
w.add(DictTabInfo::SecondTableId, tablePtr.p->secondTable);
+#else
+ w.add(DictTabInfo::SecondTableId, (Uint32)0);
+#endif
w.add(DictTabInfo::TableVersion, tablePtr.p->tableVersion);
w.add(DictTabInfo::NoOfKeyAttr, tablePtr.p->noOfPrimkey);
w.add(DictTabInfo::NoOfAttributes, tablePtr.p->noOfAttributes);
@@ -528,7 +532,7 @@ Dbdict::writeTableFile(Signal* signal, Uint32 tableId,
Uint32 sz = tabInfoPtr.sz + ZPAGE_HEADER_SIZE;
c_writeTableRecord.noOfPages = DIV(sz, ZSIZE_OF_PAGES_IN_WORDS);
- c_writeTableRecord.tableWriteState = WriteTableRecord::CALLBACK;
+ c_writeTableRecord.tableWriteState = WriteTableRecord::TWR_CALLBACK;
c_writeTableRecord.m_callback = * callback;
c_writeTableRecord.pageId = 0;
@@ -647,7 +651,7 @@ void Dbdict::closeWriteTableConf(Signal* signal,
case WriteTableRecord::WRITE_RESTART_FROM_OWN :
ndbrequire(false);
break;
- case WriteTableRecord::CALLBACK:
+ case WriteTableRecord::TWR_CALLBACK:
jam();
execute(signal, c_writeTableRecord.m_callback, 0);
return;
@@ -1188,7 +1192,7 @@ Dbdict::~Dbdict()
{
}//Dbdict::~Dbdict()
-BLOCK_FUNCTIONS(Dbdict);
+BLOCK_FUNCTIONS(Dbdict)
void Dbdict::initCommonData()
{
@@ -1436,6 +1440,7 @@ Uint32 Dbdict::getFreeTableRecord(Uint32 primaryTableId)
jam();
return RNIL;
}//if
+#ifdef HAVE_TABLE_REORG
bool secondFound = false;
for (tablePtr.i = firstTablePtr.i + 1; tablePtr.i < tabSize ; tablePtr.i++) {
jam();
@@ -1455,6 +1460,7 @@ Uint32 Dbdict::getFreeTableRecord(Uint32 primaryTableId)
firstTablePtr.p->tabState = TableRecord::NOT_DEFINED;
return RNIL;
}//if
+#endif
return firstTablePtr.i;
}//Dbdict::getFreeTableRecord()
@@ -2381,7 +2387,7 @@ Dbdict::restartCreateTab_readTableConf(Signal* signal,
ndbrequire(c_writeTableRecord.tableWriteState == WriteTableRecord::IDLE);
c_writeTableRecord.noOfPages = c_readTableRecord.noOfPages;
c_writeTableRecord.pageId = c_readTableRecord.pageId;
- c_writeTableRecord.tableWriteState = WriteTableRecord::CALLBACK;
+ c_writeTableRecord.tableWriteState = WriteTableRecord::TWR_CALLBACK;
c_writeTableRecord.m_callback.m_callbackData = callbackData;
c_writeTableRecord.m_callback.m_callbackFunction =
safe_cast(&Dbdict::restartCreateTab_writeTableConf);
@@ -3661,9 +3667,8 @@ Dbdict::execCREATE_FRAGMENTATION_CONF(Signal* signal){
req->tableId = tabPtr.i;
req->tableVersion = tabEntry->m_tableVersion + 1;
- sendSignal(rg, GSN_CREATE_TAB_REQ, signal,
- CreateTabReq::SignalLength, JBB);
-
+ sendFragmentedSignal(rg, GSN_CREATE_TAB_REQ, signal,
+ CreateTabReq::SignalLength, JBB);
return;
}
@@ -4624,7 +4629,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
jam();
tablePtr.p->tabState = TableRecord::DEFINING;
}//if
-
+#ifdef HAVE_TABLE_REORG
/* ---------------------------------------------------------------- */
// Get id of second table id and check that table doesn't already exist
// and set up links between first and second table.
@@ -4638,7 +4643,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
secondTablePtr.p->tabState = TableRecord::REORG_TABLE_PREPARED;
secondTablePtr.p->secondTable = tablePtr.i;
tablePtr.p->secondTable = secondTablePtr.i;
-
+#endif
/* ---------------------------------------------------------------- */
// Set table version
/* ---------------------------------------------------------------- */
@@ -5536,10 +5541,12 @@ void Dbdict::releaseTableObject(Uint32 tableId, bool removeFromHash)
nextAttrRecord = attrPtr.p->nextAttrInTable;
c_attributeRecordPool.release(attrPtr);
}//if
+#ifdef HAVE_TABLE_REORG
Uint32 secondTableId = tablePtr.p->secondTable;
initialiseTableRecord(tablePtr);
c_tableRecordPool.getPtr(tablePtr, secondTableId);
initialiseTableRecord(tablePtr);
+#endif
return;
}//releaseTableObject()
@@ -6046,11 +6053,21 @@ Dbdict::execCREATE_INDX_REQ(Signal* signal)
jam();
if (getOwnNodeId() != c_masterNodeId) {
jam();
- // forward to DICT master
- sendSignal(calcDictBlockRef(c_masterNodeId), GSN_CREATE_INDX_REQ,
- signal, signal->getLength(), JBB);
- return;
+
+ releaseSections(signal);
+ OpCreateIndex opBusy;
+ opPtr.p = &opBusy;
+ opPtr.p->save(req);
+ opPtr.p->m_isMaster = (senderRef == reference());
+ opPtr.p->key = 0;
+ opPtr.p->m_requestType = CreateIndxReq::RT_DICT_PREPARE;
+ opPtr.p->m_errorCode = CreateIndxRef::NotMaster;
+ opPtr.p->m_errorLine = __LINE__;
+ opPtr.p->m_errorNode = c_masterNodeId;
+ createIndex_sendReply(signal, opPtr, true);
+ return;
}
+
// forward initial request plus operation key to all
req->setOpKey(++c_opRecordSequence);
NodeReceiverGroup rg(DBDICT, c_aliveNodes);
@@ -6581,10 +6598,9 @@ Dbdict::execDROP_INDX_REQ(Signal* signal)
jam();
if (getOwnNodeId() != c_masterNodeId) {
jam();
- // forward to DICT master
- sendSignal(calcDictBlockRef(c_masterNodeId), GSN_DROP_INDX_REQ,
- signal, signal->getLength(), JBB);
- return;
+
+ err = DropIndxRef::NotMaster;
+ goto error;
}
// forward initial request plus operation key to all
Uint32 indexId= req->getIndexId();
@@ -6672,6 +6688,7 @@ error:
opPtr.p->save(req);
opPtr.p->m_errorCode = (DropIndxRef::ErrorCode)err;
opPtr.p->m_errorLine = __LINE__;
+ opPtr.p->m_errorNode = c_masterNodeId;
dropIndex_sendReply(signal, opPtr, true);
}
@@ -9110,9 +9127,15 @@ Dbdict::execALTER_INDX_REQ(Signal* signal)
jam();
if (! isLocal && getOwnNodeId() != c_masterNodeId) {
jam();
- // forward to DICT master
- sendSignal(calcDictBlockRef(c_masterNodeId), GSN_ALTER_INDX_REQ,
- signal, signal->getLength(), JBB);
+
+ releaseSections(signal);
+ OpAlterIndex opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = AlterIndxRef::NotMaster;
+ opPtr.p->m_errorLine = __LINE__;
+ opPtr.p->m_errorNode = c_masterNodeId;
+ alterIndex_sendReply(signal, opPtr, true);
return;
}
// forward initial request plus operation key to all
@@ -9790,9 +9813,15 @@ Dbdict::execBUILDINDXREQ(Signal* signal)
jam();
if (getOwnNodeId() != c_masterNodeId) {
jam();
- // forward to DICT master
- sendSignal(calcDictBlockRef(c_masterNodeId), GSN_BUILDINDXREQ,
- signal, signal->getLength(), JBB);
+
+ releaseSections(signal);
+ OpBuildIndex opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = BuildIndxRef::NotMaster;
+ opPtr.p->m_errorLine = __LINE__;
+ opPtr.p->m_errorNode = c_masterNodeId;
+ buildIndex_sendReply(signal, opPtr, true);
return;
}
// forward initial request plus operation key to all
@@ -10208,6 +10237,7 @@ Dbdict::buildIndex_sendReply(Signal* signal, OpBuildIndexPtr opPtr,
rep->setIndexId(opPtr.p->m_request.getIndexId());
if (sendRef) {
rep->setErrorCode(opPtr.p->m_errorCode);
+ rep->masterNodeId = opPtr.p->m_errorNode;
gsn = GSN_BUILDINDXREF;
length = BuildIndxRef::SignalLength;
}
@@ -10256,9 +10286,15 @@ Dbdict::execCREATE_TRIG_REQ(Signal* signal)
jam();
if (! isLocal && getOwnNodeId() != c_masterNodeId) {
jam();
- // forward to DICT master
- sendSignal(calcDictBlockRef(c_masterNodeId), GSN_CREATE_TRIG_REQ,
- signal, signal->getLength(), JBB);
+
+ releaseSections(signal);
+ OpCreateTrigger opBad;
+ opPtr.p = &opBad;
+ opPtr.p->save(req);
+ opPtr.p->m_errorCode = CreateTrigRef::NotMaster;
+ opPtr.p->m_errorLine = __LINE__;
+ opPtr.p->m_errorNode = c_masterNodeId;
+ createTrigger_sendReply(signal, opPtr, true);
return;
}
// forward initial request plus operation key to all
diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
index 19c03a86e22..5fc4742e829 100644
--- a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
+++ b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
@@ -151,10 +151,10 @@ public:
/* Temporary record used during add/drop table */
Uint32 myConnect;
-
+#ifdef HAVE_TABLE_REORG
/* Second table used by this table (for table reorg) */
Uint32 secondTable;
-
+#endif
/* Next record in Pool */
Uint32 nextPool;
@@ -639,7 +639,7 @@ private:
WRITE_ADD_TABLE_SLAVE = 2,
WRITE_RESTART_FROM_MASTER = 3,
WRITE_RESTART_FROM_OWN = 4,
- CALLBACK = 5
+ TWR_CALLBACK = 5
};
TableWriteState tableWriteState;
Callback m_callback;
diff --git a/ndb/src/kernel/blocks/dbdict/Makefile.am b/ndb/src/kernel/blocks/dbdict/Makefile.am
index dc4c4fe4734..9a0d68f8148 100644
--- a/ndb/src/kernel/blocks/dbdict/Makefile.am
+++ b/ndb/src/kernel/blocks/dbdict/Makefile.am
@@ -9,3 +9,17 @@ include $(top_srcdir)/ndb/config/type_kernel.mk.am
# Don't update the files from bitkeeper
%::SCCS/s.%
+
+windoze-dsp: libdbdict.dsp
+
+libdbdict.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libdbdict_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/dbdict/Makefile_old b/ndb/src/kernel/blocks/dbdict/Makefile_old
deleted file mode 100644
index 46d938114fb..00000000000
--- a/ndb/src/kernel/blocks/dbdict/Makefile_old
+++ /dev/null
@@ -1,12 +0,0 @@
-include .defs.mk
-
-TYPE := kernel
-
-ARCHIVE_TARGET := dbdict
-
-SOURCES = \
- Dbdict.cpp
-
-DIRS := printSchemafile
-
-include $(NDB_TOP)/Epilogue.mk
diff --git a/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
index 14fa262f871..ee67bf47d7b 100644
--- a/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
+++ b/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
@@ -147,7 +147,6 @@ public:
Uint32 nfConnect;
Uint32 table;
Uint32 userpointer;
- Uint32 nodeCount;
BlockReference userblockref;
};
typedef Ptr<ConnectRecord> ConnectRecordPtr;
@@ -1469,7 +1468,7 @@ private:
Uint32 c_blockCommitNo;
bool getBlockCommit() const {
- return c_blockCommit == true || cgckptflag == true;
+ return c_blockCommit || cgckptflag;
}
/**
diff --git a/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp b/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
index 7ca45ef4b43..9a5efebc56e 100644
--- a/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
+++ b/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
@@ -18,7 +18,6 @@
#define DBDIH_C
#include "Dbdih.hpp"
#include <ndb_limits.h>
-#include <new>
#define DEBUG(x) { ndbout << "DIH::" << x << endl; }
@@ -314,7 +313,7 @@ Dbdih::~Dbdih()
}//Dbdih::~Dbdih()
-BLOCK_FUNCTIONS(Dbdih);
+BLOCK_FUNCTIONS(Dbdih)
diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
index 76aa745c3e0..0bc8351a9db 100644
--- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
+++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
@@ -2976,6 +2976,8 @@ void Dbdih::execCOPY_FRAGREF(Signal* signal)
SystemError * const sysErr = (SystemError*)&signal->theData[0];
sysErr->errorCode = SystemError::CopyFragRefError;
sysErr->errorRef = reference();
+ sysErr->data1 = errorCode;
+ sysErr->data2 = 0;
sendSignal(cntrRef, GSN_SYSTEM_ERROR, signal,
SystemError::SignalLength, JBB);
return;
@@ -4492,6 +4494,8 @@ void Dbdih::handleTakeOverNewMaster(Signal* signal, Uint32 takeOverPtrI)
SystemError * const sysErr = (SystemError*)&signal->theData[0];
sysErr->errorCode = SystemError::CopyFragRefError;
sysErr->errorRef = reference();
+ sysErr->data1= 0;
+ sysErr->data2= __LINE__;
sendSignal(cntrRef, GSN_SYSTEM_ERROR, signal,
SystemError::SignalLength, JBB);
}
@@ -6174,7 +6178,7 @@ void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal){
break;
case DictTabInfo::AllNodesMediumTable:
jam();
- noOfFragments = csystemnodes;
+ noOfFragments = 2 * csystemnodes;
break;
case DictTabInfo::AllNodesLargeTable:
jam();
@@ -7080,24 +7084,22 @@ void Dbdih::execDIGETPRIMREQ(Signal* signal)
ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE);
connectPtr.i = signal->theData[0];
- if(connectPtr.i != RNIL){
+ if(connectPtr.i != RNIL)
+ {
jam();
ptrCheckGuard(connectPtr, cconnectFileSize, connectRecord);
- ndbrequire(connectPtr.p->connectState == ConnectRecord::INUSE);
- getFragstore(tabPtr.p, fragId, fragPtr);
- connectPtr.p->nodeCount = extractNodeInfo(fragPtr.p, connectPtr.p->nodes);
signal->theData[0] = connectPtr.p->userpointer;
- signal->theData[1] = passThrough;
- signal->theData[2] = connectPtr.p->nodes[0];
- sendSignal(connectPtr.p->userblockref, GSN_DIGETPRIMCONF, signal, 3, JBB);
- return;
- }//if
- //connectPtr.i == RNIL -> question without connect record
+ }
+ else
+ {
+ jam();
+ signal->theData[0] = RNIL;
+ }
+
Uint32 nodes[MAX_REPLICAS];
getFragstore(tabPtr.p, fragId, fragPtr);
Uint32 count = extractNodeInfo(fragPtr.p, nodes);
- signal->theData[0] = RNIL;
signal->theData[1] = passThrough;
signal->theData[2] = nodes[0];
signal->theData[3] = nodes[1];
@@ -12944,7 +12946,7 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
Uint32 nodeOrder[MAX_REPLICAS];
const Uint32 noOfReplicas = extractNodeInfo(fragPtr.p, nodeOrder);
char buf[100];
- snprintf(buf, sizeof(buf), " Table %d Fragment %d - ", tabPtr.i, j);
+ BaseString::snprintf(buf, sizeof(buf), " Table %d Fragment %d - ", tabPtr.i, j);
for(Uint32 k = 0; k < noOfReplicas; k++){
char tmp[100];
BaseString::snprintf(tmp, sizeof(tmp), "%d ", nodeOrder[k]);
@@ -13155,7 +13157,7 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
getFragstore(tabPtr.p, fid, fragPtr);
char buf[100], buf2[100];
- snprintf(buf, sizeof(buf), " Fragment %d: noLcpReplicas==%d ",
+ BaseString::snprintf(buf, sizeof(buf), " Fragment %d: noLcpReplicas==%d ",
fid, fragPtr.p->noLcpReplicas);
Uint32 num=0;
diff --git a/ndb/src/kernel/blocks/dbdih/Makefile.am b/ndb/src/kernel/blocks/dbdih/Makefile.am
index 2ee8017ec13..d6ad380b806 100644
--- a/ndb/src/kernel/blocks/dbdih/Makefile.am
+++ b/ndb/src/kernel/blocks/dbdih/Makefile.am
@@ -7,3 +7,17 @@ include $(top_srcdir)/ndb/config/type_kernel.mk.am
# Don't update the files from bitkeeper
%::SCCS/s.%
+
+windoze-dsp: libdbdih.dsp
+
+libdbdih.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libdbdih_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/dbdih/Makefile_old b/ndb/src/kernel/blocks/dbdih/Makefile_old
deleted file mode 100644
index 83c1b95b5c4..00000000000
--- a/ndb/src/kernel/blocks/dbdih/Makefile_old
+++ /dev/null
@@ -1,13 +0,0 @@
-include .defs.mk
-
-TYPE := kernel
-
-ARCHIVE_TARGET := dbdih
-
-DIRS := printSysfile
-
-SOURCES = \
- DbdihInit.cpp \
- DbdihMain.cpp
-
-include $(NDB_TOP)/Epilogue.mk
diff --git a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
index d6987f3e478..0c63cb5fe17 100644
--- a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
+++ b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
@@ -550,6 +550,11 @@ public:
UintR scanErrorCounter;
UintR scanLocalFragid;
UintR scanSchemaVersion;
+
+ /**
+ * This is _always_ main table, even in range scan
+ * in which case scanTcrec->fragmentptr is different
+ */
Uint32 fragPtrI;
UintR scanStoredProcId;
ScanState scanState;
@@ -2474,7 +2479,7 @@ private:
void sendExecFragRefLab(Signal* signal);
void fragrefLab(Signal* signal, BlockReference retRef,
Uint32 retPtr, Uint32 errorCode);
- void accFragRefLab(Signal* signal);
+ void abortAddFragOps(Signal* signal);
void rwConcludedLab(Signal* signal);
void sendsttorryLab(Signal* signal);
void initialiseRecordsLab(Signal* signal, Uint32 data, Uint32, Uint32);
@@ -2925,4 +2930,23 @@ Dblqh::ScanRecord::check_scan_batch_completed() const
(max_bytes > 0 && (m_curr_batch_size_bytes >= max_bytes));
}
+inline
+void
+Dblqh::i_get_acc_ptr(ScanRecord* scanP, Uint32* &acc_ptr, Uint32 index)
+{
+ if (index == 0) {
+ acc_ptr= (Uint32*)&scanP->scan_acc_op_ptr[0];
+ } else {
+ Uint32 attr_buf_index, attr_buf_rec;
+
+ AttrbufPtr regAttrPtr;
+ jam();
+ attr_buf_rec= (index + 31) / 32;
+ attr_buf_index= (index - 1) & 31;
+ regAttrPtr.i= scanP->scan_acc_op_ptr[attr_buf_rec];
+ ptrCheckGuard(regAttrPtr, cattrinbufFileSize, attrbuf);
+ acc_ptr= (Uint32*)&regAttrPtr.p->attrbuf[attr_buf_index];
+ }
+}
+
#endif
diff --git a/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp b/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
index d0fef8753cb..ec29489180c 100644
--- a/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
+++ b/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
@@ -19,7 +19,6 @@
#define DBLQH_C
#include "Dblqh.hpp"
#include <ndb_limits.h>
-#include <new>
#define DEBUG(x) { ndbout << "LQH::" << x << endl; }
@@ -454,5 +453,5 @@ Dblqh::~Dblqh()
ctcNodeFailrecFileSize);
}//Dblqh::~Dblqh()
-BLOCK_FUNCTIONS(Dblqh);
+BLOCK_FUNCTIONS(Dblqh)
diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
index cd15ad0c3b2..c79f4dfc6c7 100644
--- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -912,6 +912,10 @@ void Dblqh::execREAD_CONFIG_REQ(Signal* signal)
/* *********************************************************> */
/* LQHFRAGREQ: Create new fragments for a table. Sender DICT */
/* *********************************************************> */
+
+// this unbelievable mess could be replaced by one signal to LQH
+// and execute direct to local DICT to get everything at once
+
void Dblqh::execLQHFRAGREQ(Signal* signal)
{
jamEntry();
@@ -1049,6 +1053,11 @@ void Dblqh::execLQHFRAGREQ(Signal* signal)
addfragptr.p->lh3DistrBits = tlhstar;
addfragptr.p->tableType = tableType;
addfragptr.p->primaryTableId = primaryTableId;
+ //
+ addfragptr.p->tup1Connectptr = RNIL;
+ addfragptr.p->tup2Connectptr = RNIL;
+ addfragptr.p->tux1Connectptr = RNIL;
+ addfragptr.p->tux2Connectptr = RNIL;
if (DictTabInfo::isTable(tableType) ||
DictTabInfo::isHashIndex(tableType)) {
@@ -1329,15 +1338,21 @@ void Dblqh::execTUP_ADD_ATTCONF(Signal* signal)
{
jamEntry();
addfragptr.i = signal->theData[0];
+ // implies that operation was released on the other side
+ const bool lastAttr = signal->theData[1];
ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
switch (addfragptr.p->addfragStatus) {
case AddFragRecord::TUP_ATTR_WAIT1:
jam();
+ if (lastAttr)
+ addfragptr.p->tup1Connectptr = RNIL;
addfragptr.p->addfragStatus = AddFragRecord::TUP_ATTR_WAIT2;
sendAddAttrReq(signal);
break;
case AddFragRecord::TUP_ATTR_WAIT2:
jam();
+ if (lastAttr)
+ addfragptr.p->tup2Connectptr = RNIL;
if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) {
addfragptr.p->addfragStatus = AddFragRecord::TUX_ATTR_WAIT1;
sendAddAttrReq(signal);
@@ -1347,11 +1362,15 @@ void Dblqh::execTUP_ADD_ATTCONF(Signal* signal)
break;
case AddFragRecord::TUX_ATTR_WAIT1:
jam();
+ if (lastAttr)
+ addfragptr.p->tux1Connectptr = RNIL;
addfragptr.p->addfragStatus = AddFragRecord::TUX_ATTR_WAIT2;
sendAddAttrReq(signal);
break;
case AddFragRecord::TUX_ATTR_WAIT2:
jam();
+ if (lastAttr)
+ addfragptr.p->tux2Connectptr = RNIL;
goto done_with_attr;
break;
done_with_attr:
@@ -1455,6 +1474,7 @@ Dblqh::sendAddAttrReq(Signal* signal)
jam();
TupAddAttrConf* tupconf = (TupAddAttrConf*)signal->getDataPtrSend();
tupconf->userPtr = addfragptr.i;
+ tupconf->lastAttr = false;
sendSignal(reference(), GSN_TUP_ADD_ATTCONF,
signal, TupAddAttrConf::SignalLength, JBB);
return;
@@ -1485,6 +1505,7 @@ Dblqh::sendAddAttrReq(Signal* signal)
jam();
TuxAddAttrConf* tuxconf = (TuxAddAttrConf*)signal->getDataPtrSend();
tuxconf->userPtr = addfragptr.i;
+ tuxconf->lastAttr = false;
sendSignal(reference(), GSN_TUX_ADD_ATTRCONF,
signal, TuxAddAttrConf::SignalLength, JBB);
return;
@@ -1549,6 +1570,40 @@ void Dblqh::fragrefLab(Signal* signal,
return;
}//Dblqh::fragrefLab()
+/*
+ * Abort on-going ops.
+ */
+void Dblqh::abortAddFragOps(Signal* signal)
+{
+ fragptr.i = addfragptr.p->fragmentPtr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ signal->theData[0] = (Uint32)-1;
+ if (addfragptr.p->tup1Connectptr != RNIL) {
+ jam();
+ signal->theData[1] = addfragptr.p->tup1Connectptr;
+ sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB);
+ addfragptr.p->tup1Connectptr = RNIL;
+ }
+ if (addfragptr.p->tup2Connectptr != RNIL) {
+ jam();
+ signal->theData[1] = addfragptr.p->tup2Connectptr;
+ sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB);
+ addfragptr.p->tup2Connectptr = RNIL;
+ }
+ if (addfragptr.p->tux1Connectptr != RNIL) {
+ jam();
+ signal->theData[1] = addfragptr.p->tux1Connectptr;
+ sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB);
+ addfragptr.p->tux1Connectptr = RNIL;
+ }
+ if (addfragptr.p->tux2Connectptr != RNIL) {
+ jam();
+ signal->theData[1] = addfragptr.p->tux2Connectptr;
+ sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB);
+ addfragptr.p->tux2Connectptr = RNIL;
+ }
+}
+
/* ************>> */
/* ACCFRAGREF > */
/* ************>> */
@@ -1582,6 +1637,27 @@ void Dblqh::execTUPFRAGREF(Signal* signal)
fragptr.i = addfragptr.p->fragmentPtr;
ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
addfragptr.p->addfragErrorCode = terrorCode;
+
+ // no operation to release, just add some jams
+ switch (addfragptr.p->addfragStatus) {
+ case AddFragRecord::WAIT_TWO_TUP:
+ jam();
+ break;
+ case AddFragRecord::WAIT_ONE_TUP:
+ jam();
+ break;
+ case AddFragRecord::WAIT_TWO_TUX:
+ jam();
+ break;
+ case AddFragRecord::WAIT_ONE_TUX:
+ jam();
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }
+ abortAddFragOps(signal);
+
const Uint32 ref = addfragptr.p->dictBlockref;
const Uint32 senderData = addfragptr.p->dictConnectptr;
const Uint32 errorCode = addfragptr.p->addfragErrorCode;
@@ -1605,11 +1681,38 @@ void Dblqh::execTUXFRAGREF(Signal* signal)
void Dblqh::execTUP_ADD_ATTRREF(Signal* signal)
{
jamEntry();
-
addfragptr.i = signal->theData[0];
ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
terrorCode = signal->theData[1];
addfragptr.p->addfragErrorCode = terrorCode;
+
+ // operation was released on the other side
+ switch (addfragptr.p->addfragStatus) {
+ case AddFragRecord::TUP_ATTR_WAIT1:
+ jam();
+ ndbrequire(addfragptr.p->tup1Connectptr != RNIL);
+ addfragptr.p->tup1Connectptr = RNIL;
+ break;
+ case AddFragRecord::TUP_ATTR_WAIT2:
+ jam();
+ ndbrequire(addfragptr.p->tup2Connectptr != RNIL);
+ addfragptr.p->tup2Connectptr = RNIL;
+ break;
+ case AddFragRecord::TUX_ATTR_WAIT1:
+ jam();
+ ndbrequire(addfragptr.p->tux1Connectptr != RNIL);
+ addfragptr.p->tux1Connectptr = RNIL;
+ break;
+ case AddFragRecord::TUX_ATTR_WAIT2:
+ jam();
+ ndbrequire(addfragptr.p->tux2Connectptr != RNIL);
+ addfragptr.p->tux2Connectptr = RNIL;
+ break;
+ default:
+ ndbrequire(false);
+ break;
+ }
+ abortAddFragOps(signal);
const Uint32 Ref = addfragptr.p->dictBlockref;
const Uint32 senderData = addfragptr.p->dictConnectptr;
@@ -2981,6 +3084,7 @@ void Dblqh::execATTRINFO(Signal* signal)
return;
break;
default:
+ ndbout_c("%d", regTcPtr->transactionState);
ndbrequire(false);
break;
}//switch
@@ -7058,10 +7162,7 @@ void Dblqh::continueScanNextReqLab(Signal* signal)
// Update timer on tcConnectRecord
tcConnectptr.p->tcTimer = cLqhTimeOutCount;
-
init_acc_ptr_list(scanptr.p);
- scanptr.p->m_curr_batch_size_rows = 0;
- scanptr.p->m_curr_batch_size_bytes= 0;
scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT;
scanNextLoopLab(signal);
}//Dblqh::continueScanNextReqLab()
@@ -7219,6 +7320,8 @@ void Dblqh::closeScanRequestLab(Signal* signal)
scanptr.p->m_curr_batch_size_rows = 0;
scanptr.p->m_curr_batch_size_bytes= 0;
sendScanFragConf(signal, ZTRUE);
+ abort_scan(signal, scanptr.i, 0);
+ return;
break;
case TcConnectionrec::SCAN_TUPKEY:
case TcConnectionrec::SCAN_FIRST_STOPPED:
@@ -7260,22 +7363,32 @@ void Dblqh::scanLockReleasedLab(Signal* signal)
tcConnectptr.i = scanptr.p->scanTcrec;
ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
releaseActiveFrag(signal);
+
if (scanptr.p->scanReleaseCounter == scanptr.p->m_curr_batch_size_rows) {
if ((scanptr.p->scanErrorCounter > 0) ||
(scanptr.p->scanCompletedStatus == ZTRUE)) {
jam();
+ scanptr.p->m_curr_batch_size_rows = 0;
+ scanptr.p->m_curr_batch_size_bytes = 0;
closeScanLab(signal);
} else if (scanptr.p->check_scan_batch_completed() &&
scanptr.p->scanLockHold != ZTRUE) {
jam();
scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
sendScanFragConf(signal, ZFALSE);
+ } else if (scanptr.p->m_last_row && !scanptr.p->scanLockHold) {
+ jam();
+ closeScanLab(signal);
+ return;
} else {
jam();
/*
- We came here after releasing locks after receiving SCAN_NEXTREQ from TC. We only
- come here when scanHoldLock == ZTRUE
- */
+ * We came here after releasing locks after
+ * receiving SCAN_NEXTREQ from TC. We only come here
+ * when scanHoldLock == ZTRUE
+ */
+ scanptr.p->m_curr_batch_size_rows = 0;
+ scanptr.p->m_curr_batch_size_bytes = 0;
continueScanNextReqLab(signal);
}//if
} else if (scanptr.p->scanReleaseCounter < scanptr.p->m_curr_batch_size_rows) {
@@ -7362,25 +7475,6 @@ Dblqh::init_acc_ptr_list(ScanRecord* scanP)
scanP->scan_acc_index = 0;
}
-inline
-void
-Dblqh::i_get_acc_ptr(ScanRecord* scanP, Uint32* &acc_ptr, Uint32 index)
-{
- if (index == 0) {
- acc_ptr= (Uint32*)&scanP->scan_acc_op_ptr[0];
- } else {
- Uint32 attr_buf_index, attr_buf_rec;
-
- AttrbufPtr regAttrPtr;
- jam();
- attr_buf_rec= (index + 31) / 32;
- attr_buf_index= (index - 1) & 31;
- regAttrPtr.i= scanP->scan_acc_op_ptr[attr_buf_rec];
- ptrCheckGuard(regAttrPtr, cattrinbufFileSize, attrbuf);
- acc_ptr= (Uint32*)&regAttrPtr.p->attrbuf[attr_buf_index];
- }
-}
-
Uint32
Dblqh::get_acc_ptr_from_scan_record(ScanRecord* scanP,
Uint32 index,
@@ -7611,18 +7705,25 @@ void Dblqh::abort_scan(Signal* signal, Uint32 scan_ptr_i, Uint32 errcode){
jam();
scanptr.i = scan_ptr_i;
c_scanRecordPool.getPtr(scanptr);
+
+ fragptr.i = tcConnectptr.p->fragmentptr;
+ ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
finishScanrec(signal);
releaseScanrec(signal);
tcConnectptr.p->transactionState = TcConnectionrec::IDLE;
tcConnectptr.p->abortState = TcConnectionrec::ABORT_ACTIVE;
-
- ScanFragRef * ref = (ScanFragRef*)&signal->theData[0];
- ref->senderData = tcConnectptr.p->clientConnectrec;
- ref->transId1 = tcConnectptr.p->transid[0];
- ref->transId2 = tcConnectptr.p->transid[1];
- ref->errorCode = errcode;
- sendSignal(tcConnectptr.p->clientBlockref, GSN_SCAN_FRAGREF, signal,
- ScanFragRef::SignalLength, JBB);
+
+ if(errcode)
+ {
+ jam();
+ ScanFragRef * ref = (ScanFragRef*)&signal->theData[0];
+ ref->senderData = tcConnectptr.p->clientConnectrec;
+ ref->transId1 = tcConnectptr.p->transid[0];
+ ref->transId2 = tcConnectptr.p->transid[1];
+ ref->errorCode = errcode;
+ sendSignal(tcConnectptr.p->clientBlockref, GSN_SCAN_FRAGREF, signal,
+ ScanFragRef::SignalLength, JBB);
+ }
deleteTransidHash(signal);
releaseOprec(signal);
releaseTcrec(signal, tcConnectptr);
@@ -7904,6 +8005,13 @@ void Dblqh::nextScanConfScanLab(Signal* signal)
/*************************************************************
* STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED.
************************************************************ */
+ if (!scanptr.p->scanLockHold)
+ {
+ jam();
+ closeScanLab(signal);
+ return;
+ }
+
if (scanptr.p->scanCompletedStatus == ZTRUE) {
if ((scanptr.p->scanLockHold == ZTRUE) &&
(scanptr.p->m_curr_batch_size_rows > 0)) {
@@ -8404,8 +8512,6 @@ void Dblqh::tupScanCloseConfLab(Signal* signal)
ScanFragRef::SignalLength, JBB);
} else {
jam();
- scanptr.p->m_curr_batch_size_rows = 0;
- scanptr.p->m_curr_batch_size_bytes= 0;
sendScanFragConf(signal, ZSCAN_FRAG_CLOSED);
}//if
finishScanrec(signal);
@@ -8477,7 +8583,7 @@ Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq)
tFragPtr.i = fragptr.p->tableFragptr;
ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
scanptr.p->fragPtrI = fragptr.p->tableFragptr;
-
+
/**
* !idx uses 1 - (MAX_PARALLEL_SCANS_PER_FRAG - 1) = 1-11
* idx uses from MAX_PARALLEL_SCANS_PER_FRAG - MAX = 12-42)
@@ -8486,10 +8592,10 @@ Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq)
Uint32 stop = (idx ? MAX_PARALLEL_INDEX_SCANS_PER_FRAG : MAX_PARALLEL_SCANS_PER_FRAG - 1);
stop += start;
Uint32 free = tFragPtr.p->m_scanNumberMask.find(start);
-
+
if(free == Fragrecord::ScanNumberMask::NotFound || free >= stop){
jam();
-
+
if(scanPrio == 0){
jam();
return ScanFragRef::ZTOO_MANY_ACTIVE_SCAN_ERROR;
@@ -8500,16 +8606,15 @@ Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq)
*/
scanptr.p->scanState = ScanRecord::IN_QUEUE;
LocalDLFifoList<ScanRecord> queue(c_scanRecordPool,
- tFragPtr.p->m_queuedScans);
+ fragptr.p->m_queuedScans);
queue.add(scanptr);
return ZOK;
}
-
scanptr.p->scanNumber = free;
tFragPtr.p->m_scanNumberMask.clear(free);// Update mask
-
- LocalDLList<ScanRecord> active(c_scanRecordPool, tFragPtr.p->m_activeScans);
+
+ LocalDLList<ScanRecord> active(c_scanRecordPool, fragptr.p->m_activeScans);
active.add(scanptr);
if(scanptr.p->scanKeyinfoFlag){
jam();
@@ -8569,12 +8674,8 @@ void Dblqh::finishScanrec(Signal* signal)
{
release_acc_ptr_list(scanptr.p);
- FragrecordPtr tFragPtr;
- tFragPtr.i = scanptr.p->fragPtrI;
- ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
-
LocalDLFifoList<ScanRecord> queue(c_scanRecordPool,
- tFragPtr.p->m_queuedScans);
+ fragptr.p->m_queuedScans);
if(scanptr.p->scanState == ScanRecord::IN_QUEUE){
jam();
@@ -8592,9 +8693,13 @@ void Dblqh::finishScanrec(Signal* signal)
ndbrequire(tmp.p == scanptr.p);
}
- LocalDLList<ScanRecord> scans(c_scanRecordPool, tFragPtr.p->m_activeScans);
+ LocalDLList<ScanRecord> scans(c_scanRecordPool, fragptr.p->m_activeScans);
scans.release(scanptr);
+ FragrecordPtr tFragPtr;
+ tFragPtr.i = scanptr.p->fragPtrI;
+ ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
+
const Uint32 scanNumber = scanptr.p->scanNumber;
ndbrequire(!tFragPtr.p->m_scanNumberMask.get(scanNumber));
ScanRecordPtr restart;
@@ -8614,14 +8719,15 @@ void Dblqh::finishScanrec(Signal* signal)
return;
}
+ ndbrequire(restart.p->scanState == ScanRecord::IN_QUEUE);
+
ScanRecordPtr tmpScan = scanptr;
TcConnectionrecPtr tmpTc = tcConnectptr;
tcConnectptr.i = restart.p->scanTcrec;
ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
restart.p->scanNumber = scanNumber;
- restart.p->scanState = ScanRecord::WAIT_ACC_SCAN;
-
+
queue.remove(restart);
scans.add(restart);
if(restart.p->scanKeyinfoFlag){
@@ -8635,10 +8741,18 @@ void Dblqh::finishScanrec(Signal* signal)
ndbout_c("adding-r (%d %d)", restart.p->scanNumber, restart.p->fragPtrI);
#endif
}
-
- scanptr = restart;
- continueAfterReceivingAllAiLab(signal);
-
+
+ restart.p->scanState = ScanRecord::SCAN_FREE; // set in initScanRec
+ if(tcConnectptr.p->transactionState == TcConnectionrec::SCAN_STATE_USED)
+ {
+ jam();
+ scanptr = restart;
+ continueAfterReceivingAllAiLab(signal);
+ }
+ else
+ {
+ ndbrequire(tcConnectptr.p->transactionState == TcConnectionrec::WAIT_SCAN_AI);
+ }
scanptr = tmpScan;
tcConnectptr = tmpTc;
}//Dblqh::finishScanrec()
@@ -8725,7 +8839,7 @@ void Dblqh::sendKeyinfo20(Signal* signal,
sendSignal(ref, GSN_KEYINFO20, signal, 25, JBB);
src += KeyInfo20::DataLength;;
keyLen -= KeyInfo20::DataLength;
- } while(keyLen >= KeyInfo20::DataLength);
+ }
MEMCOPY_NO_WORDS(keyInfo->keyData, src, keyLen);
sendSignal(ref, GSN_KEYINFO20, signal,
@@ -8809,6 +8923,13 @@ void Dblqh::sendScanFragConf(Signal* signal, Uint32 scanCompleted)
conf->total_len= total_len;
sendSignal(tcConnectptr.p->clientBlockref, GSN_SCAN_FRAGCONF,
signal, ScanFragConf::SignalLength, JBB);
+
+ if(!scanptr.p->scanLockHold)
+ {
+ jam();
+ scanptr.p->m_curr_batch_size_rows = 0;
+ scanptr.p->m_curr_batch_size_bytes= 0;
+ }
}//Dblqh::sendScanFragConf()
/* ######################################################################### */
diff --git a/ndb/src/kernel/blocks/dblqh/Makefile.am b/ndb/src/kernel/blocks/dblqh/Makefile.am
index 3a58dba742e..854860b269c 100644
--- a/ndb/src/kernel/blocks/dblqh/Makefile.am
+++ b/ndb/src/kernel/blocks/dblqh/Makefile.am
@@ -9,3 +9,17 @@ include $(top_srcdir)/ndb/config/type_kernel.mk.am
# Don't update the files from bitkeeper
%::SCCS/s.%
+
+windoze-dsp: libdblqh.dsp
+
+libdblqh.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libdblqh_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/dblqh/Makefile_old b/ndb/src/kernel/blocks/dblqh/Makefile_old
deleted file mode 100644
index 520486d8058..00000000000
--- a/ndb/src/kernel/blocks/dblqh/Makefile_old
+++ /dev/null
@@ -1,12 +0,0 @@
-include .defs.mk
-
-TYPE := kernel
-
-ARCHIVE_TARGET := dblqh
-DIRS := redoLogReader
-
-SOURCES = \
- DblqhInit.cpp \
- DblqhMain.cpp
-
-include $(NDB_TOP)/Epilogue.mk
diff --git a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
index a209df24c44..fb90ccc8c90 100644
--- a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
+++ b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
@@ -1054,9 +1054,8 @@ public:
// Id of the ScanRecord this fragment scan belongs to
Uint32 scanRec;
- // The maximum number of operations that can be scanned before
- // returning to TC
- Uint16 scanFragConcurrency;
+ // The value of fragmentCompleted in the last received SCAN_FRAGCONF
+ Uint8 m_scan_frag_conf_status;
inline void startFragTimer(Uint32 timeVal){
scanFragTimer = timeVal;
@@ -1193,8 +1192,10 @@ public:
// Number of operation records per scanned fragment
// Number of operations in first batch
// Max number of bytes per batch
- Uint16 noOprecPerFrag;
- Uint16 first_batch_size;
+ union {
+ Uint16 first_batch_size_rows;
+ Uint16 batch_size_rows;
+ };
Uint32 batch_byte_size;
Uint32 scanRequestInfo; // ScanFrag format
diff --git a/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp b/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
index e38089242c3..59c8237f20a 100644
--- a/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
+++ b/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
@@ -20,7 +20,6 @@
#include <ndb_limits.h>
#include <Properties.hpp>
#include <Configuration.hpp>
-#include <new>
#define DEBUG(x) { ndbout << "TC::" << x << endl; }
@@ -365,5 +364,5 @@ Dbtc::~Dbtc()
capiConnectFilesize);
}//Dbtc::~Dbtc()
-BLOCK_FUNCTIONS(Dbtc);
+BLOCK_FUNCTIONS(Dbtc)
diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
index d8b3ee10532..97931041e2a 100644
--- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
+++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
@@ -1271,7 +1271,10 @@ void Dbtc::execTCRELEASEREQ(Signal* signal)
if (tapiBlockref == apiConnectptr.p->ndbapiBlockref) {
if (apiConnectptr.p->apiConnectstate == CS_CONNECTED ||
(apiConnectptr.p->apiConnectstate == CS_ABORTING &&
- apiConnectptr.p->abortState == AS_IDLE)){
+ apiConnectptr.p->abortState == AS_IDLE) ||
+ (apiConnectptr.p->apiConnectstate == CS_STARTED &&
+ apiConnectptr.p->firstTcConnect == RNIL))
+ {
jam(); /* JUST REPLY OK */
releaseApiCon(signal, apiConnectptr.i);
signal->theData[0] = tuserpointer;
@@ -1879,7 +1882,6 @@ void Dbtc::packKeyData000Lab(Signal* signal,
Uint32 totalLen)
{
CacheRecord * const regCachePtr = cachePtr.p;
- UintR Tmp;
jam();
Uint32 len = 0;
@@ -6366,7 +6368,7 @@ void Dbtc::sendAbortedAfterTimeout(Signal* signal, int Tcheck)
*------------------------------------------------------------------*/
char buf[96]; buf[0] = 0;
char buf2[96];
- snprintf(buf, sizeof(buf), "TC %d: %d ops:",
+ BaseString::snprintf(buf, sizeof(buf), "TC %d: %d ops:",
__LINE__, apiConnectptr.i);
for(Uint32 i = 0; i<TloopCount; i++){
BaseString::snprintf(buf2, sizeof(buf2), "%s %d", buf, tmp[i]);
@@ -8503,14 +8505,16 @@ void Dbtc::execSCAN_TABREQ(Signal* signal)
apiConnectptr.i = scanTabReq->apiConnectPtr;
tabptr.i = scanTabReq->tableId;
- if (apiConnectptr.i >= capiConnectFilesize ||
- tabptr.i >= ctabrecFilesize) {
+ if (apiConnectptr.i >= capiConnectFilesize)
+ {
jam();
warningHandlerLab(signal);
return;
}//if
+
ptrAss(apiConnectptr, apiConnectRecord);
ApiConnectRecord * transP = apiConnectptr.p;
+
if (transP->apiConnectstate != CS_CONNECTED) {
jam();
// could be left over from TCKEYREQ rollback
@@ -8524,9 +8528,16 @@ void Dbtc::execSCAN_TABREQ(Signal* signal)
} else {
jam();
errCode = ZSTATE_ERROR;
- goto SCAN_TAB_error;
+ goto SCAN_TAB_error_no_state_change;
}
}
+
+ if(tabptr.i >= ctabrecFilesize)
+ {
+ errCode = ZUNKNOWN_TABLE_ERROR;
+ goto SCAN_TAB_error;
+ }
+
ptrAss(tabptr, tableRecord);
if ((aiLength == 0) ||
(!tabptr.p->checkTable(schemaVersion)) ||
@@ -8621,8 +8632,18 @@ void Dbtc::execSCAN_TABREQ(Signal* signal)
errCode = ZNO_SCANREC_ERROR;
goto SCAN_TAB_error;
- SCAN_TAB_error:
+SCAN_TAB_error:
jam();
+ /**
+ * Prepare for up coming ATTRINFO/KEYINFO
+ */
+ transP->apiConnectstate = CS_ABORTING;
+ transP->abortState = AS_IDLE;
+ transP->transid[0] = transid1;
+ transP->transid[1] = transid2;
+
+SCAN_TAB_error_no_state_change:
+
ScanTabRef * ref = (ScanTabRef*)&signal->theData[0];
ref->apiConnectPtr = transP->ndbapiConnect;
ref->transId1 = transid1;
@@ -8646,9 +8667,9 @@ void Dbtc::initScanrec(ScanRecordPtr scanptr,
scanptr.p->scanTableref = tabptr.i;
scanptr.p->scanSchemaVersion = scanTabReq->tableSchemaVersion;
scanptr.p->scanParallel = scanParallel;
- scanptr.p->noOprecPerFrag = noOprecPerFrag;
- scanptr.p->first_batch_size= scanTabReq->first_batch_size;
- scanptr.p->batch_byte_size= scanTabReq->batch_byte_size;
+ scanptr.p->first_batch_size_rows = scanTabReq->first_batch_size;
+ scanptr.p->batch_byte_size = scanTabReq->batch_byte_size;
+ scanptr.p->batch_size_rows = noOprecPerFrag;
Uint32 tmp = 0;
const UintR ri = scanTabReq->requestInfo;
@@ -8672,7 +8693,6 @@ void Dbtc::initScanrec(ScanRecordPtr scanptr,
ndbrequire(list.seize(ptr));
ptr.p->scanRec = scanptr.i;
ptr.p->scanFragId = 0;
- ptr.p->scanFragConcurrency = noOprecPerFrag;
ptr.p->m_apiPtr = cdata[i];
}//for
@@ -8945,6 +8965,25 @@ void Dbtc::execDIGETPRIMCONF(Signal* signal)
scanptr.i = scanFragptr.p->scanRec;
ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+ /**
+ * This must be false as select count(*) otherwise
+ * can "pass" committing on backup fragments and
+ * get incorrect row count
+ */
+ if(false && ScanFragReq::getReadCommittedFlag(scanptr.p->scanRequestInfo))
+ {
+ jam();
+ Uint32 max = 3+signal->theData[6];
+ Uint32 nodeid = getOwnNodeId();
+ for(Uint32 i = 3; i<max; i++)
+ if(signal->theData[i] == nodeid)
+ {
+ jam();
+ tnodeid = nodeid;
+ break;
+ }
+ }
+
{
/**
* Check table
@@ -8989,6 +9028,8 @@ void Dbtc::execDIGETPRIMCONF(Signal* signal)
scanFragptr.p->lqhBlockref = ref;
scanFragptr.p->m_connectCount = getNodeInfo(tnodeid).m_connectCount;
sendScanFragReq(signal, scanptr.p, scanFragptr.p);
+ if(ERROR_INSERTED(8035))
+ globalTransporterRegistry.performSend();
attrbufptr.i = cachePtr.p->firstAttrbuf;
while (attrbufptr.i != RNIL) {
jam();
@@ -8998,6 +9039,8 @@ void Dbtc::execDIGETPRIMCONF(Signal* signal)
attrbufptr.p,
ref);
attrbufptr.i = attrbufptr.p->attrbuf[ZINBUF_NEXT];
+ if(ERROR_INSERTED(8035))
+ globalTransporterRegistry.performSend();
}//while
scanFragptr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
scanFragptr.p->startFragTimer(ctcTimer);
@@ -9141,6 +9184,7 @@ void Dbtc::execSCAN_FRAGCONF(Signal* signal)
const ScanFragConf * const conf = (ScanFragConf*)&signal->theData[0];
const Uint32 noCompletedOps = conf->completedOps;
+ const Uint32 status = conf->fragmentCompleted;
scanFragptr.i = conf->senderData;
c_scan_frag_pool.getPtr(scanFragptr);
@@ -9163,11 +9207,9 @@ void Dbtc::execSCAN_FRAGCONF(Signal* signal)
ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::LQH_ACTIVE);
- const Uint32 status = conf->fragmentCompleted;
-
if(scanptr.p->scanState == ScanRecord::CLOSING_SCAN){
jam();
- if(status == ZFALSE){
+ if(status == 0){
/**
* We have started closing = we sent a close -> ignore this
*/
@@ -9184,11 +9226,11 @@ void Dbtc::execSCAN_FRAGCONF(Signal* signal)
return;
}
- if(status == ZCLOSED && scanptr.p->scanNextFragId < scanptr.p->scanNoFrag){
+ if(noCompletedOps == 0 && status != 0 &&
+ scanptr.p->scanNextFragId < scanptr.p->scanNoFrag){
/**
* Start on next fragment
*/
- ndbrequire(noCompletedOps == 0);
scanFragptr.p->scanFragState = ScanFragRec::WAIT_GET_PRIMCONF;
scanFragptr.p->startFragTimer(ctcTimer);
@@ -9218,6 +9260,7 @@ void Dbtc::execSCAN_FRAGCONF(Signal* signal)
scanptr.p->m_queued_count++;
}
+ scanFragptr.p->m_scan_frag_conf_status = status;
scanFragptr.p->m_ops = noCompletedOps;
scanFragptr.p->m_totalLen = total_len;
scanFragptr.p->scanFragState = ScanFragRec::QUEUED_FOR_DELIVERY;
@@ -9311,7 +9354,6 @@ void Dbtc::execSCAN_NEXTREQ(Signal* signal)
/*********************************************************************
* APPLICATION IS CLOSING THE SCAN.
**********************************************************************/
- ndbrequire(len == 0);
close_scan_req(signal, scanptr, true);
return;
}//if
@@ -9330,11 +9372,12 @@ void Dbtc::execSCAN_NEXTREQ(Signal* signal)
// Copy op ptrs so I dont overwrite them when sending...
memcpy(signal->getDataPtrSend()+25, signal->getDataPtr()+4, 4 * len);
- ScanFragNextReq * nextReq = (ScanFragNextReq*)&signal->theData[0];
- nextReq->closeFlag = ZFALSE;
- nextReq->transId1 = apiConnectptr.p->transid[0];
- nextReq->transId2 = apiConnectptr.p->transid[1];
- nextReq->batch_size_bytes= scanP->batch_byte_size;
+ ScanFragNextReq tmp;
+ tmp.closeFlag = ZFALSE;
+ tmp.transId1 = apiConnectptr.p->transid[0];
+ tmp.transId2 = apiConnectptr.p->transid[1];
+ tmp.batch_size_rows = scanP->batch_size_rows;
+ tmp.batch_size_bytes = scanP->batch_byte_size;
ScanFragList running(c_scan_frag_pool, scanP->m_running_scan_frags);
ScanFragList delivered(c_scan_frag_pool, scanP->m_delivered_scan_frags);
@@ -9344,15 +9387,37 @@ void Dbtc::execSCAN_NEXTREQ(Signal* signal)
c_scan_frag_pool.getPtr(scanFragptr);
ndbrequire(scanFragptr.p->scanFragState == ScanFragRec::DELIVERED);
- scanFragptr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
scanFragptr.p->startFragTimer(ctcTimer);
-
scanFragptr.p->m_ops = 0;
- nextReq->senderData = scanFragptr.i;
- nextReq->batch_size_rows= scanFragptr.p->scanFragConcurrency;
- sendSignal(scanFragptr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal,
- ScanFragNextReq::SignalLength, JBB);
+ if(scanFragptr.p->m_scan_frag_conf_status)
+ {
+ /**
+ * last scan was complete
+ */
+ jam();
+ ndbrequire(scanptr.p->scanNextFragId < scanptr.p->scanNoFrag);
+ scanFragptr.p->scanFragState = ScanFragRec::WAIT_GET_PRIMCONF;
+
+ tcConnectptr.i = scanptr.p->scanTcrec;
+ ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
+ scanFragptr.p->scanFragId = scanptr.p->scanNextFragId++;
+ signal->theData[0] = tcConnectptr.p->dihConnectptr;
+ signal->theData[1] = scanFragptr.i;
+ signal->theData[2] = scanptr.p->scanTableref;
+ signal->theData[3] = scanFragptr.p->scanFragId;
+ sendSignal(cdihblockref, GSN_DIGETPRIMREQ, signal, 4, JBB);
+ }
+ else
+ {
+ jam();
+ scanFragptr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
+ ScanFragNextReq * req = (ScanFragNextReq*)signal->getDataPtrSend();
+ * req = tmp;
+ req->senderData = scanFragptr.i;
+ sendSignal(scanFragptr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal,
+ ScanFragNextReq::SignalLength, JBB);
+ }
delivered.remove(scanFragptr);
running.add(scanFragptr);
}//for
@@ -9416,7 +9481,7 @@ Dbtc::close_scan_req(Signal* signal, ScanRecordPtr scanPtr, bool req_received){
ndbrequire(curr.p->scanFragState == ScanFragRec::DELIVERED);
delivered.remove(curr);
- if(curr.p->m_ops > 0){
+ if(curr.p->m_ops > 0 && curr.p->m_scan_frag_conf_status == 0){
jam();
running.add(curr);
curr.p->scanFragState = ScanFragRec::LQH_ACTIVE;
@@ -9551,7 +9616,7 @@ void Dbtc::sendScanFragReq(Signal* signal,
req->transId1 = apiConnectptr.p->transid[0];
req->transId2 = apiConnectptr.p->transid[1];
req->clientOpPtr = scanFragP->m_apiPtr;
- req->batch_size_rows= scanFragP->scanFragConcurrency;
+ req->batch_size_rows= scanP->batch_size_rows;
req->batch_size_bytes= scanP->batch_byte_size;
sendSignal(scanFragP->lqhBlockref, GSN_SCAN_FRAGREQ, signal,
ScanFragReq::SignalLength, JBB);
@@ -9573,6 +9638,8 @@ void Dbtc::sendScanTabConf(Signal* signal, ScanRecordPtr scanPtr) {
jam();
ops += 21;
}
+
+ Uint32 left = scanPtr.p->scanNoFrag - scanPtr.p->scanNextFragId;
ScanTabConf * conf = (ScanTabConf*)&signal->theData[0];
conf->apiConnectPtr = apiConnectptr.p->ndbapiConnect;
@@ -9588,24 +9655,25 @@ void Dbtc::sendScanTabConf(Signal* signal, ScanRecordPtr scanPtr) {
ScanFragRecPtr curr = ptr; // Remove while iterating...
queued.next(ptr);
+ bool done = curr.p->m_scan_frag_conf_status && --left;
+
* ops++ = curr.p->m_apiPtr;
- * ops++ = curr.i;
+ * ops++ = done ? RNIL : curr.i;
* ops++ = (curr.p->m_totalLen << 10) + curr.p->m_ops;
queued.remove(curr);
- if(curr.p->m_ops > 0){
+ if(!done){
delivered.add(curr);
curr.p->scanFragState = ScanFragRec::DELIVERED;
curr.p->stopFragTimer();
} else {
- (* --ops) = ScanTabConf::EndOfData; ops++;
c_scan_frag_pool.release(curr);
curr.p->scanFragState = ScanFragRec::COMPLETED;
curr.p->stopFragTimer();
}
}
}
-
+
if(scanPtr.p->m_delivered_scan_frags.isEmpty() &&
scanPtr.p->m_running_scan_frags.isEmpty()){
conf->requestInfo = op_count | ScanTabConf::EndOfData;
@@ -10424,9 +10492,8 @@ Dbtc::execDUMP_STATE_ORD(Signal* signal)
sfp.i,
sfp.p->scanFragState,
sfp.p->scanFragId);
- infoEvent(" nodeid=%d, concurr=%d, timer=%d",
+ infoEvent(" nodeid=%d, timer=%d",
refToNode(sfp.p->lqhBlockref),
- sfp.p->scanFragConcurrency,
sfp.p->scanFragTimer);
}
@@ -10504,7 +10571,7 @@ Dbtc::execDUMP_STATE_ORD(Signal* signal)
sp.p->scanAiLength,
sp.p->scanParallel,
sp.p->scanReceivedOperations,
- sp.p->noOprecPerFrag);
+ sp.p->batch_size_rows);
infoEvent(" schv=%d, tab=%d, sproc=%d",
sp.p->scanSchemaVersion,
sp.p->scanTableref,
diff --git a/ndb/src/kernel/blocks/dbtc/Makefile.am b/ndb/src/kernel/blocks/dbtc/Makefile.am
index 4aa514c0aba..98ee2639bac 100644
--- a/ndb/src/kernel/blocks/dbtc/Makefile.am
+++ b/ndb/src/kernel/blocks/dbtc/Makefile.am
@@ -7,3 +7,17 @@ include $(top_srcdir)/ndb/config/type_kernel.mk.am
# Don't update the files from bitkeeper
%::SCCS/s.%
+
+windoze-dsp: libdbtc.dsp
+
+libdbtc.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libdbtc_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/dbtc/Makefile_old b/ndb/src/kernel/blocks/dbtc/Makefile_old
deleted file mode 100644
index ae876ab1f84..00000000000
--- a/ndb/src/kernel/blocks/dbtc/Makefile_old
+++ /dev/null
@@ -1,11 +0,0 @@
-include .defs.mk
-
-TYPE := kernel
-
-ARCHIVE_TARGET := dbtc
-SOURCES = \
- DbtcInit.cpp \
- DbtcMain.cpp
-
-include $(NDB_TOP)/Epilogue.mk
-
diff --git a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
index 55ad1d0910a..b48546576f9 100644
--- a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
+++ b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
@@ -504,6 +504,7 @@ struct Fragoperrec {
Uint32 noOfNewAttrCount;
Uint32 charsetIndex;
BlockReference lqhBlockrefFrag;
+ bool inUse;
};
typedef Ptr<Fragoperrec> FragoperrecPtr;
@@ -1936,6 +1937,7 @@ private:
void setUpKeyArray(Tablerec* const regTabPtr);
bool addfragtotab(Tablerec* const regTabPtr, Uint32 fragId, Uint32 fragIndex);
void deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId);
+ void abortAddFragOp(Signal* signal);
void releaseTabDescr(Tablerec* const regTabPtr);
void getFragmentrec(FragrecordPtr& regFragPtr, Uint32 fragId, Tablerec* const regTabPtr);
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
index 49de0d80bcd..8e3ca6528c2 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
@@ -1113,6 +1113,7 @@ Dbtup::updateStartLab(Signal* signal,
regOperPtr->attrinbufLen);
if (retValue == -1) {
tupkeyErrorLab(signal);
+ return -1;
}//if
} else {
jam();
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
index d33adcd08e1..0d7430e662d 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
@@ -31,7 +31,6 @@
#include <signaldata/TupKey.hpp>
#include <signaldata/DropTab.hpp>
-#include <new>
#define DEBUG(x) { ndbout << "TUP::" << x << endl; }
@@ -215,7 +214,7 @@ Dbtup::~Dbtup()
}//Dbtup::~Dbtup()
-BLOCK_FUNCTIONS(Dbtup);
+BLOCK_FUNCTIONS(Dbtup)
/* **************************************************************** */
/* ---------------------------------------------------------------- */
@@ -632,14 +631,11 @@ void Dbtup::execREAD_CONFIG_REQ(Signal* signal)
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_FRAG, &cnoOfFragrec));
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_OP_RECS, &cnoOfOprec));
-
- // MemorySpaceTuples is specified in 8k pages, divide by 4 for 32k pages
- Uint32 tmp;
- ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_PAGE, &tmp));
- Uint64 pages = (tmp * 2048 + (ZWORDS_ON_PAGE - 1))/ (Uint64)ZWORDS_ON_PAGE;
- cnoOfPage = (Uint32)pages;
+
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_PAGE, &cnoOfPage));
Uint32 noOfTriggers= 0;
+ Uint32 tmp= 0;
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_PAGE_RANGE, &tmp));
initPageRangeSize(tmp);
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_TABLE, &cnoOfTablerec));
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
index efea312b865..405f790954e 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
@@ -39,11 +39,18 @@
/* ---------------------------------------------------------------- */
void Dbtup::execTUPFRAGREQ(Signal* signal)
{
+ ljamEntry();
+
+ if (signal->theData[0] == (Uint32)-1) {
+ ljam();
+ abortAddFragOp(signal);
+ return;
+ }
+
FragoperrecPtr fragOperPtr;
FragrecordPtr regFragPtr;
TablerecPtr regTabPtr;
- ljamEntry();
Uint32 userptr = signal->theData[0];
Uint32 userblockref = signal->theData[1];
Uint32 reqinfo = signal->theData[2];
@@ -62,6 +69,17 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
Uint32 noOfAttributeGroups = signal->theData[12];
Uint32 globalCheckpointIdIndicator = signal->theData[13];
+#ifndef VM_TRACE
+ // config mismatch - do not crash if release compiled
+ if (regTabPtr.i >= cnoOfTablerec) {
+ ljam();
+ signal->theData[0] = userptr;
+ signal->theData[1] = 800;
+ sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB);
+ return;
+ }
+#endif
+
ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec);
if (cfirstfreeFragopr == RNIL) {
ljam();
@@ -132,6 +150,15 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
return;
}//if
+ if (ERROR_INSERTED(4007) && regTabPtr.p->fragid[0] == fragId ||
+ ERROR_INSERTED(4008) && regTabPtr.p->fragid[1] == fragId) {
+ ljam();
+ terrorCode = 1;
+ fragrefuse4Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId);
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }
+
if (regTabPtr.p->tableStatus == NOT_DEFINED) {
ljam();
//-------------------------------------------------------------------------------------
@@ -243,6 +270,7 @@ void Dbtup::seizeFragoperrec(FragoperrecPtr& fragOperPtr)
ptrCheckGuard(fragOperPtr, cnoOfFragoprec, fragoperrec);
cfirstfreeFragopr = fragOperPtr.p->nextFragoprec;
fragOperPtr.p->nextFragoprec = RNIL;
+ fragOperPtr.p->inUse = true;
}//Dbtup::seizeFragoperrec()
/* **************************************************************** */
@@ -273,6 +301,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
ndbrequire(fragOperPtr.p->attributeCount > 0);
fragOperPtr.p->attributeCount--;
+ const bool lastAttr = (fragOperPtr.p->attributeCount == 0);
if ((regTabPtr.p->tableStatus == DEFINING) &&
(fragOperPtr.p->definingFragment)) {
@@ -346,20 +375,30 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
return;
}//if
- if ((fragOperPtr.p->attributeCount == 0) &&
- (fragOperPtr.p->freeNullBit != 0)) {
+ if (lastAttr && (fragOperPtr.p->freeNullBit != 0)) {
ljam();
terrorCode = ZINCONSISTENT_NULL_ATTRIBUTE_COUNT;
addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
return;
}//if
}//if
+ if (ERROR_INSERTED(4009) && regTabPtr.p->fragid[0] == fragId && attrId == 0 ||
+ ERROR_INSERTED(4010) && regTabPtr.p->fragid[0] == fragId && lastAttr ||
+ ERROR_INSERTED(4011) && regTabPtr.p->fragid[1] == fragId && attrId == 0 ||
+ ERROR_INSERTED(4012) && regTabPtr.p->fragid[1] == fragId && lastAttr) {
+ ljam();
+ terrorCode = 1;
+ addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
+ CLEAR_ERROR_INSERT_VALUE;
+ return;
+ }
/* **************************************************************** */
/* ************** TUP_ADD_ATTCONF ****************** */
/* **************************************************************** */
signal->theData[0] = fragOperPtr.p->lqhPtrFrag;
- sendSignal(fragOperPtr.p->lqhBlockrefFrag, GSN_TUP_ADD_ATTCONF, signal, 1, JBB);
- if (fragOperPtr.p->attributeCount > 0) {
+ signal->theData[1] = lastAttr;
+ sendSignal(fragOperPtr.p->lqhBlockrefFrag, GSN_TUP_ADD_ATTCONF, signal, 2, JBB);
+ if (! lastAttr) {
ljam();
return; /* EXIT AND WAIT FOR MORE */
}//if
@@ -491,11 +530,11 @@ void Dbtup::fragrefuseLab(Signal* signal, FragoperrecPtr fragOperPtr)
void Dbtup::releaseFragoperrec(FragoperrecPtr fragOperPtr)
{
+ fragOperPtr.p->inUse = false;
fragOperPtr.p->nextFragoprec = cfirstfreeFragopr;
cfirstfreeFragopr = fragOperPtr.i;
}//Dbtup::releaseFragoperrec()
-
void Dbtup::deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId)
{
for (Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
@@ -510,6 +549,20 @@ void Dbtup::deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId)
ndbrequire(false);
}//Dbtup::deleteFragTab()
+/*
+ * LQH aborts on-going create table operation. The table is later
+ * dropped by DICT.
+ */
+void Dbtup::abortAddFragOp(Signal* signal)
+{
+ FragoperrecPtr fragOperPtr;
+
+ fragOperPtr.i = signal->theData[1];
+ ptrCheckGuard(fragOperPtr, cnoOfFragoprec, fragoperrec);
+ ndbrequire(fragOperPtr.p->inUse);
+ releaseFragoperrec(fragOperPtr);
+}
+
void
Dbtup::execDROP_TAB_REQ(Signal* signal)
{
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
index e6cc6f68842..cbb165c3eb1 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
@@ -706,7 +706,10 @@ Dbtup::checkUpdateOfPrimaryKey(Uint32* updateBuffer, Tablerec* const regTabPtr)
tOutBufIndex = 0;
tMaxRead = MAX_KEY_SIZE_IN_WORDS;
+ bool tmp = tXfrmFlag;
+ tXfrmFlag = false;
ndbrequire((this->*f)(&keyReadBuffer[0], ahOut, attrDescriptor, attributeOffset));
+ tXfrmFlag = tmp;
ndbrequire(tOutBufIndex == ahOut->getDataSize());
if (ahIn.getDataSize() != ahOut->getDataSize()) {
ljam();
diff --git a/ndb/src/kernel/blocks/dbtup/Makefile.am b/ndb/src/kernel/blocks/dbtup/Makefile.am
index 7e94a01d43b..e51410e6be3 100644
--- a/ndb/src/kernel/blocks/dbtup/Makefile.am
+++ b/ndb/src/kernel/blocks/dbtup/Makefile.am
@@ -25,3 +25,17 @@ include $(top_srcdir)/ndb/config/type_kernel.mk.am
# Don't update the files from bitkeeper
%::SCCS/s.%
+
+windoze-dsp: libdbtup.dsp
+
+libdbtup.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libdbtup_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/dbtup/Makefile_old b/ndb/src/kernel/blocks/dbtup/Makefile_old
deleted file mode 100644
index 87146f4b441..00000000000
--- a/ndb/src/kernel/blocks/dbtup/Makefile_old
+++ /dev/null
@@ -1,26 +0,0 @@
-include .defs.mk
-
-TYPE := kernel
-
-ARCHIVE_TARGET := dbtup
-SOURCES = \
- DbtupExecQuery.cpp \
- DbtupBuffer.cpp \
- DbtupRoutines.cpp \
- DbtupCommit.cpp \
- DbtupFixAlloc.cpp \
- DbtupTrigger.cpp \
- DbtupAbort.cpp \
- DbtupLCP.cpp \
- DbtupUndoLog.cpp \
- DbtupPageMap.cpp \
- DbtupPagMan.cpp \
- DbtupStoredProcDef.cpp \
- DbtupMeta.cpp \
- DbtupTabDesMan.cpp \
- DbtupGen.cpp \
- DbtupSystemRestart.cpp \
- DbtupIndex.cpp \
- DbtupDebug.cpp
-
-include $(NDB_TOP)/Epilogue.mk
diff --git a/ndb/src/kernel/blocks/dbtux/Dbtux.hpp b/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
index 8896324f793..2c96271eb5d 100644
--- a/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
+++ b/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
@@ -17,7 +17,6 @@
#ifndef DBTUX_H
#define DBTUX_H
-#include <new>
#include <ndb_limits.h>
#include <SimulatedBlock.hpp>
#include <AttributeDescriptor.hpp>
@@ -575,6 +574,7 @@ private:
void execDROP_TAB_REQ(Signal* signal);
bool allocDescEnt(IndexPtr indexPtr);
void freeDescEnt(IndexPtr indexPtr);
+ void abortAddFragOp(Signal* signal);
void dropIndex(Signal* signal, IndexPtr indexPtr, Uint32 senderRef, Uint32 senderData);
/*
@@ -684,6 +684,7 @@ private:
friend class NdbOut& operator<<(NdbOut&, const ScanOp&);
friend class NdbOut& operator<<(NdbOut&, const Index&);
friend class NdbOut& operator<<(NdbOut&, const Frag&);
+ friend class NdbOut& operator<<(NdbOut&, const FragOp&);
friend class NdbOut& operator<<(NdbOut&, const NodeHandle&);
FILE* debugFile;
NdbOut debugOut;
@@ -1064,7 +1065,7 @@ Dbtux::Index::Index() :
m_fragId[i] = ZNIL;
m_fragPtrI[i] = RNIL;
};
-};
+}
// Dbtux::Frag
@@ -1101,7 +1102,7 @@ Dbtux::FragOp::FragOp() :
m_fragNo(ZNIL),
m_numAttrsRecvd(ZNIL)
{
-};
+}
// Dbtux::NodeHandle
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp
index c5c22264460..1e1b0d1d5b6 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp
@@ -404,6 +404,19 @@ operator<<(NdbOut& out, const Dbtux::Frag& frag)
}
NdbOut&
+operator<<(NdbOut& out, const Dbtux::FragOp& fragOp)
+{
+ out << "[FragOp " << hex << &fragOp;
+ out << " [userPtr " << dec << fragOp.m_userPtr << "]";
+ out << " [indexId " << dec << fragOp.m_indexId << "]";
+ out << " [fragId " << dec << fragOp.m_fragId << "]";
+ out << " [fragNo " << dec << fragOp.m_fragNo << "]";
+ out << " numAttrsRecvd " << dec << fragOp.m_numAttrsRecvd << "]";
+ out << "]";
+ return out;
+}
+
+NdbOut&
operator<<(NdbOut& out, const Dbtux::NodeHandle& node)
{
const Dbtux::Frag& frag = node.m_frag;
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
index ded02696a89..8990d6c86b6 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
@@ -24,13 +24,8 @@ Dbtux::Dbtux(const Configuration& conf) :
#ifdef VM_TRACE
debugFile(0),
debugOut(*new NullOutputStream()),
- // until ndb_mgm supports dump
-#ifdef DBTUX_DEBUG_TREE
- debugFlags(DebugTree),
-#else
debugFlags(0),
#endif
-#endif
c_internalStartPhase(0),
c_typeOfStart(NodeState::ST_ILLEGAL_TYPE),
c_dataBuffer(0)
@@ -86,7 +81,7 @@ Dbtux::execCONTINUEB(Signal* signal)
jamEntry();
const Uint32* data = signal->getDataPtr();
switch (data[0]) {
- case TuxContinueB::DropIndex:
+ case TuxContinueB::DropIndex: // currently unused
{
IndexPtr indexPtr;
c_indexPool.getPtr(indexPtr, data[1]);
@@ -174,7 +169,7 @@ Dbtux::execREAD_CONFIG_REQ(Signal* signal)
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_ATTRIBUTE, &nAttribute));
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_SCAN_OP, &nScanOp));
- const Uint32 nDescPage = (nIndex + nAttribute + DescPageSize - 1) / DescPageSize;
+ const Uint32 nDescPage = (nIndex * DescHeadSize + nAttribute * DescAttrSize + DescPageSize - 1) / DescPageSize;
const Uint32 nScanBoundWords = nScanOp * ScanBoundSegmentSize * 4;
c_indexPool.setSize(nIndex);
@@ -298,4 +293,4 @@ Dbtux::copyAttrs(const Frag& frag, ConstData data1, Data data2, unsigned maxlen2
#endif
}
-BLOCK_FUNCTIONS(Dbtux);
+BLOCK_FUNCTIONS(Dbtux)
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp
index 1577c5045e0..b7526593a08 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp
@@ -29,6 +29,11 @@ void
Dbtux::execTUXFRAGREQ(Signal* signal)
{
jamEntry();
+ if (signal->theData[0] == (Uint32)-1) {
+ jam();
+ abortAddFragOp(signal);
+ return;
+ }
const TuxFragReq reqCopy = *(const TuxFragReq*)signal->getDataPtr();
const TuxFragReq* const req = &reqCopy;
IndexPtr indexPtr;
@@ -61,6 +66,11 @@ Dbtux::execTUXFRAGREQ(Signal* signal)
fragOpPtr.p->m_fragId = req->fragId;
fragOpPtr.p->m_fragNo = indexPtr.p->m_numFrags;
fragOpPtr.p->m_numAttrsRecvd = 0;
+#ifdef VM_TRACE
+ if (debugFlags & DebugMeta) {
+ debugOut << "Seize frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl;
+ }
+#endif
// check if index has place for more fragments
ndbrequire(indexPtr.p->m_numFrags < MaxIndexFragments);
// seize new fragment record
@@ -129,6 +139,14 @@ Dbtux::execTUXFRAGREQ(Signal* signal)
debugOut << "Add frag " << fragPtr.i << " " << *fragPtr.p << endl;
}
#endif
+ // error inserts
+ if (ERROR_INSERTED(12001) && fragOpPtr.p->m_fragNo == 0 ||
+ ERROR_INSERTED(12002) && fragOpPtr.p->m_fragNo == 1) {
+ jam();
+ errorCode = (TuxFragRef::ErrorCode)1;
+ CLEAR_ERROR_INSERT_VALUE;
+ break;
+ }
// success
TuxFragConf* const conf = (TuxFragConf*)signal->getDataPtrSend();
conf->userPtr = req->userPtr;
@@ -145,10 +163,18 @@ Dbtux::execTUXFRAGREQ(Signal* signal)
ref->errorCode = errorCode;
sendSignal(req->userRef, GSN_TUXFRAGREF,
signal, TuxFragRef::SignalLength, JBB);
- if (fragOpPtr.i != RNIL)
+ if (fragOpPtr.i != RNIL) {
+#ifdef VM_TRACE
+ if (debugFlags & DebugMeta) {
+ debugOut << "Release on frag error frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl;
+ }
+#endif
c_fragOpPool.release(fragOpPtr);
- if (indexPtr.i != RNIL)
- dropIndex(signal, indexPtr, 0, 0);
+ }
+ if (indexPtr.i != RNIL) {
+ jam();
+ // let DICT drop the unfinished index
+ }
}
void
@@ -203,7 +229,16 @@ Dbtux::execTUX_ADD_ATTRREQ(Signal* signal)
}
}
#endif
- if (indexPtr.p->m_numAttrs == fragOpPtr.p->m_numAttrsRecvd) {
+ const bool lastAttr = (indexPtr.p->m_numAttrs == fragOpPtr.p->m_numAttrsRecvd);
+ if (ERROR_INSERTED(12003) && fragOpPtr.p->m_fragNo == 0 && attrId == 0 ||
+ ERROR_INSERTED(12004) && fragOpPtr.p->m_fragNo == 0 && lastAttr ||
+ ERROR_INSERTED(12005) && fragOpPtr.p->m_fragNo == 1 && attrId == 0 ||
+ ERROR_INSERTED(12006) && fragOpPtr.p->m_fragNo == 1 && lastAttr) {
+ errorCode = (TuxAddAttrRef::ErrorCode)1;
+ CLEAR_ERROR_INSERT_VALUE;
+ break;
+ }
+ if (lastAttr) {
jam();
// initialize tree header
TreeHead& tree = fragPtr.p->m_tree;
@@ -246,11 +281,17 @@ Dbtux::execTUX_ADD_ATTRREQ(Signal* signal)
}
#endif
// fragment is defined
+#ifdef VM_TRACE
+ if (debugFlags & DebugMeta) {
+ debugOut << "Release frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl;
+ }
+#endif
c_fragOpPool.release(fragOpPtr);
}
// success
TuxAddAttrConf* conf = (TuxAddAttrConf*)signal->getDataPtrSend();
conf->userPtr = fragOpPtr.p->m_userPtr;
+ conf->lastAttr = lastAttr;
sendSignal(fragOpPtr.p->m_userRef, GSN_TUX_ADD_ATTRCONF,
signal, TuxAddAttrConf::SignalLength, JBB);
return;
@@ -261,8 +302,32 @@ Dbtux::execTUX_ADD_ATTRREQ(Signal* signal)
ref->errorCode = errorCode;
sendSignal(fragOpPtr.p->m_userRef, GSN_TUX_ADD_ATTRREF,
signal, TuxAddAttrRef::SignalLength, JBB);
+#ifdef VM_TRACE
+ if (debugFlags & DebugMeta) {
+ debugOut << "Release on attr error frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl;
+ }
+#endif
c_fragOpPool.release(fragOpPtr);
- dropIndex(signal, indexPtr, 0, 0);
+ // let DICT drop the unfinished index
+}
+
+/*
+ * LQH aborts on-going create index operation.
+ */
+void
+Dbtux::abortAddFragOp(Signal* signal)
+{
+ FragOpPtr fragOpPtr;
+ IndexPtr indexPtr;
+ c_fragOpPool.getPtr(fragOpPtr, signal->theData[1]);
+ c_indexPool.getPtr(indexPtr, fragOpPtr.p->m_indexId);
+#ifdef VM_TRACE
+ if (debugFlags & DebugMeta) {
+ debugOut << "Release on abort frag op " << fragOpPtr.i << " " << *fragOpPtr.p << endl;
+ }
+#endif
+ c_fragOpPool.release(fragOpPtr);
+ // let DICT drop the unfinished index
}
/*
@@ -341,20 +406,13 @@ Dbtux::dropIndex(Signal* signal, IndexPtr indexPtr, Uint32 senderRef, Uint32 sen
{
jam();
indexPtr.p->m_state = Index::Dropping;
- // drop one fragment at a time
- if (indexPtr.p->m_numFrags > 0) {
+ // drop fragments
+ while (indexPtr.p->m_numFrags > 0) {
jam();
- unsigned i = --indexPtr.p->m_numFrags;
+ Uint32 i = --indexPtr.p->m_numFrags;
FragPtr fragPtr;
c_fragPool.getPtr(fragPtr, indexPtr.p->m_fragPtrI[i]);
c_fragPool.release(fragPtr);
- // the real time break is not used for anything currently
- signal->theData[0] = TuxContinueB::DropIndex;
- signal->theData[1] = indexPtr.i;
- signal->theData[2] = senderRef;
- signal->theData[3] = senderData;
- sendSignal(reference(), GSN_CONTINUEB, signal, 4, JBB);
- return;
}
// drop attributes
if (indexPtr.p->m_descPage != RNIL) {
diff --git a/ndb/src/kernel/blocks/dbtux/Makefile.am b/ndb/src/kernel/blocks/dbtux/Makefile.am
index 7d012924522..b5951e8ed37 100644
--- a/ndb/src/kernel/blocks/dbtux/Makefile.am
+++ b/ndb/src/kernel/blocks/dbtux/Makefile.am
@@ -18,3 +18,17 @@ include $(top_srcdir)/ndb/config/type_kernel.mk.am
# Don't update the files from bitkeeper
%::SCCS/s.%
+
+windoze-dsp: libdbtux.dsp
+
+libdbtux.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libdbtux_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/dbtux/Makefile_old b/ndb/src/kernel/blocks/dbtux/Makefile_old
deleted file mode 100644
index 30927c31848..00000000000
--- a/ndb/src/kernel/blocks/dbtux/Makefile_old
+++ /dev/null
@@ -1,17 +0,0 @@
-include .defs.mk
-
-TYPE = kernel
-
-ARCHIVE_TARGET = dbtux
-
-SOURCES = \
- DbtuxGen.cpp \
- DbtuxMeta.cpp \
- DbtuxMaint.cpp \
- DbtuxNode.cpp \
- DbtuxTree.cpp \
- DbtuxScan.cpp \
- DbtuxCmp.cpp \
- DbtuxDebug.cpp
-
-include $(NDB_TOP)/Epilogue.mk
diff --git a/ndb/src/kernel/blocks/dbutil/DbUtil.cpp b/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
index f7e8981e122..b94bb8e6d7e 100644
--- a/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
+++ b/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
@@ -158,7 +158,7 @@ DbUtil::~DbUtil()
{
}
-BLOCK_FUNCTIONS(DbUtil);
+BLOCK_FUNCTIONS(DbUtil)
void
DbUtil::releasePrepare(PreparePtr prepPtr) {
diff --git a/ndb/src/kernel/blocks/dbutil/Makefile.am b/ndb/src/kernel/blocks/dbutil/Makefile.am
index 763875d578f..925356c2f76 100644
--- a/ndb/src/kernel/blocks/dbutil/Makefile.am
+++ b/ndb/src/kernel/blocks/dbutil/Makefile.am
@@ -7,3 +7,17 @@ include $(top_srcdir)/ndb/config/type_kernel.mk.am
# Don't update the files from bitkeeper
%::SCCS/s.%
+
+windoze-dsp: libdbutil.dsp
+
+libdbutil.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libdbutil_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/dbutil/Makefile_old b/ndb/src/kernel/blocks/dbutil/Makefile_old
deleted file mode 100644
index 54b7326e4e5..00000000000
--- a/ndb/src/kernel/blocks/dbutil/Makefile_old
+++ /dev/null
@@ -1,8 +0,0 @@
-include .defs.mk
-
-TYPE := kernel
-
-ARCHIVE_TARGET := dbutil
-SOURCES = DbUtil.cpp
-
-include $(NDB_TOP)/Epilogue.mk
diff --git a/ndb/src/kernel/blocks/grep/Grep.cpp b/ndb/src/kernel/blocks/grep/Grep.cpp
index 0e41182348f..e89361dab06 100644
--- a/ndb/src/kernel/blocks/grep/Grep.cpp
+++ b/ndb/src/kernel/blocks/grep/Grep.cpp
@@ -603,7 +603,7 @@ Grep::PSCoord::execCREATE_SUBID_CONF(Signal* signal)
GrepEvent::GrepPS_CreateSubIdConf,
subId,
subKey,
- (Uint32)GrepError::NO_ERROR);
+ (Uint32)GrepError::GE_NO_ERROR);
}
void
@@ -612,7 +612,7 @@ Grep::PSCoord::execCREATE_SUBID_REF(Signal* signal) {
CreateSubscriptionIdRef const * ref =
(CreateSubscriptionIdRef *)signal->getDataPtr();
Uint32 subData = ref->subscriberData;
- GrepError::Code err;
+ GrepError::GE_Code err;
Uint32 sendersBlockRef = signal->getSendersBlockRef();
if(sendersBlockRef == SUMA_REF)
@@ -624,7 +624,7 @@ Grep::PSCoord::execCREATE_SUBID_REF(Signal* signal) {
ndbrequire(false); /* Added since errorcode err unhandled
* TODO: fix correct errorcode
*/
- err= GrepError::NO_ERROR; // remove compiler warning
+ err= GrepError::GE_NO_ERROR; // remove compiler warning
}
SubCoordinatorPtr subPtr;
@@ -824,7 +824,7 @@ Grep::PSPart::execSUB_CREATE_REF(Signal* signal)
jamEntry();
SubCreateRef * const ref = (SubCreateRef *)signal->getDataPtr();
Uint32 subData = ref->subscriberData;
- GrepError::Code err = (GrepError::Code)ref->err;
+ GrepError::GE_Code err = (GrepError::GE_Code)ref->err;
SubscriptionPtr subPtr;
c_subscriptions.getPtr(subPtr, subData);
sendRefToPSCoord(signal, *subPtr.p, err /*error*/);
@@ -867,7 +867,7 @@ Grep::PSCoord::execGREP_CREATE_CONF(Signal* signal)
GrepEvent::GrepPS_SubCreateConf,
subId,
subKey,
- (Uint32)GrepError::NO_ERROR);
+ (Uint32)GrepError::GE_NO_ERROR);
c_subCoordinatorPool.release(subPtr);
@@ -889,7 +889,7 @@ Grep::PSCoord::execGREP_CREATE_REF(Signal* signal)
SubCoordinatorPtr subPtr;
c_runningSubscriptions.getPtr(subPtr, subData);
- sendRefToSS(signal, *subPtr.p, (GrepError::Code)err /*error*/);
+ sendRefToSS(signal, *subPtr.p, (GrepError::GE_Code)err /*error*/);
}
@@ -1046,7 +1046,7 @@ Grep::PSPart::execSUB_START_REF(Signal* signal)
{
SubStartRef * const ref = (SubStartRef *)signal->getDataPtr();
Uint32 subData = ref->subscriberData;
- GrepError::Code err = (GrepError::Code)ref->err;
+ GrepError::GE_Code err = (GrepError::GE_Code)ref->err;
SubscriptionData::Part part = (SubscriptionData::Part)ref->part;
SubscriptionPtr subPtr;
c_subscriptions.getPtr(subPtr, subData);
@@ -1102,7 +1102,7 @@ Grep::PSCoord::execGREP_START_CONF(Signal* signal)
EventReport::GrepSubscriptionInfo,
GrepEvent::GrepPS_SubStartMetaConf,
subId, subKey,
- (Uint32)GrepError::NO_ERROR);
+ (Uint32)GrepError::GE_NO_ERROR);
c_subCoordinatorPool.release(subPtr);
break;
@@ -1118,7 +1118,7 @@ Grep::PSCoord::execGREP_START_CONF(Signal* signal)
EventReport::GrepSubscriptionInfo,
GrepEvent::GrepPS_SubStartDataConf,
subId, subKey,
- (Uint32)GrepError::NO_ERROR);
+ (Uint32)GrepError::GE_NO_ERROR);
c_subCoordinatorPool.release(subPtr);
@@ -1145,7 +1145,7 @@ Grep::PSCoord::execGREP_START_REF(Signal* signal)
jamEntry();
GrepStartRef * const ref = (GrepStartRef *)signal->getDataPtr();
Uint32 subData = ref->senderData;
- GrepError::Code err = (GrepError::Code)ref->err;
+ GrepError::GE_Code err = (GrepError::GE_Code)ref->err;
SubscriptionData::Part part = (SubscriptionData::Part)ref->part;
SubCoordinatorPtr subPtr;
@@ -1301,7 +1301,7 @@ Grep::PSPart::execSUB_REMOVE_REF(Signal* signal)
jamEntry();
SubRemoveRef * const ref = (SubRemoveRef *)signal->getDataPtr();
Uint32 subData = ref->subscriberData;
- /* GrepError::Code err = (GrepError::Code)ref->err;*/
+ /* GrepError::GE_Code err = (GrepError::GE_Code)ref->err;*/
SubscriptionPtr subPtr;
c_subscriptions.getPtr(subPtr, subData);
@@ -1342,7 +1342,7 @@ Grep::PSCoord::execGREP_REMOVE_CONF(Signal* signal)
EventReport::GrepSubscriptionInfo,
GrepEvent::GrepPS_SubRemoveConf,
subId, subKey,
- GrepError::NO_ERROR);
+ GrepError::GE_NO_ERROR);
GrepSubRemoveConf * grepConf = (GrepSubRemoveConf *) conf;
grepConf->subscriptionId = subId;
@@ -1375,7 +1375,7 @@ Grep::PSCoord::execGREP_REMOVE_REF(Signal* signal)
subPtr.p = c_runningSubscriptions.getPtr(subPtr.i);
if(subData == subPtr.i)
{
- sendRefToSS(signal, *subPtr.p, (GrepError::Code)err /*error*/);
+ sendRefToSS(signal, *subPtr.p, (GrepError::GE_Code)err /*error*/);
c_runningSubscriptions.release(subPtr);
return;
}
@@ -1633,7 +1633,7 @@ Grep::PSPart::execSUB_SYNC_REF(Signal* signal) {
jamEntry();
SubSyncRef * const ref = (SubSyncRef *)signal->getDataPtr();
Uint32 subData = ref->subscriberData;
- GrepError::Code err = (GrepError::Code)ref->err;
+ GrepError::GE_Code err = (GrepError::GE_Code)ref->err;
SubscriptionData::Part part = (SubscriptionData::Part)ref->part;
SubscriptionPtr subPtr;
@@ -1677,7 +1677,7 @@ Grep::PSCoord::execGREP_SYNC_CONF(Signal* signal)
/* @todo Johan: Add firstGCI here. /Lars */
m_grep->sendEventRep(signal, EventReport::GrepSubscriptionInfo,
event, subId, subKey,
- (Uint32)GrepError::NO_ERROR,
+ (Uint32)GrepError::GE_NO_ERROR,
lastGCI);
/*************************
@@ -1707,7 +1707,7 @@ Grep::PSCoord::execGREP_SYNC_REF(Signal* signal) {
GrepSyncRef * const ref = (GrepSyncRef *)signal->getDataPtr();
Uint32 subData = ref->senderData;
SubscriptionData::Part part = (SubscriptionData::Part)ref->part;
- GrepError::Code err = (GrepError::Code)ref->err;
+ GrepError::GE_Code err = (GrepError::GE_Code)ref->err;
SubCoordinatorPtr subPtr;
c_runningSubscriptions.getPtr(subPtr, subData);
sendRefToSS(signal, *subPtr.p, err /*error*/, part);
@@ -1718,7 +1718,7 @@ Grep::PSCoord::execGREP_SYNC_REF(Signal* signal) {
void
Grep::PSCoord::sendRefToSS(Signal * signal,
SubCoordinator sub,
- GrepError::Code err,
+ GrepError::GE_Code err,
SubscriptionData::Part part) {
/**
@@ -1843,7 +1843,7 @@ Grep::PSCoord::sendRefToSS(Signal * signal,
void
Grep::PSPart::sendRefToPSCoord(Signal * signal,
Subscription sub,
- GrepError::Code err,
+ GrepError::GE_Code err,
SubscriptionData::Part part) {
jam();
diff --git a/ndb/src/kernel/blocks/grep/Grep.hpp b/ndb/src/kernel/blocks/grep/Grep.hpp
index eeabac36966..7d3dd916ecc 100644
--- a/ndb/src/kernel/blocks/grep/Grep.hpp
+++ b/ndb/src/kernel/blocks/grep/Grep.hpp
@@ -380,16 +380,16 @@ public:
Uint32 subId,
Uint32 subKey,
BlockReference to,
- GrepError::Code err);
+ GrepError::GE_Code err);
void sendSubRemoveRef_SS(Signal * signal,
SubCoordinator sub,
- GrepError::Code err);
+ GrepError::GE_Code err);
void sendRefToSS(Signal * signal,
SubCoordinator sub,
- GrepError::Code err,
+ GrepError::GE_Code err,
SubscriptionData::Part part = (SubscriptionData::Part)0);
void setRepRef(BlockReference rr) { m_repRef = rr; };
@@ -496,7 +496,7 @@ public:
void sendRefToPSCoord(Signal * signal,
Subscription sub,
- GrepError::Code err,
+ GrepError::GE_Code err,
SubscriptionData::Part part = (SubscriptionData::Part)0);
//protected:
diff --git a/ndb/src/kernel/blocks/grep/GrepInit.cpp b/ndb/src/kernel/blocks/grep/GrepInit.cpp
index cfb454a1f9b..d764fb1f473 100644
--- a/ndb/src/kernel/blocks/grep/GrepInit.cpp
+++ b/ndb/src/kernel/blocks/grep/GrepInit.cpp
@@ -15,7 +15,6 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include "Grep.hpp"
-#include <new>
#include <Properties.hpp>
#include <Configuration.hpp>
@@ -132,7 +131,7 @@ Grep::~Grep()
{
}
-BLOCK_FUNCTIONS(Grep);
+BLOCK_FUNCTIONS(Grep)
Grep::PSPart::PSPart(Grep * sb) :
BlockComponent(sb),
diff --git a/ndb/src/kernel/blocks/grep/Makefile.am b/ndb/src/kernel/blocks/grep/Makefile.am
index 31081c7b6a0..6d2b422784b 100644
--- a/ndb/src/kernel/blocks/grep/Makefile.am
+++ b/ndb/src/kernel/blocks/grep/Makefile.am
@@ -7,3 +7,17 @@ include $(top_srcdir)/ndb/config/type_kernel.mk.am
# Don't update the files from bitkeeper
%::SCCS/s.%
+
+windoze-dsp: libgrep.dsp
+
+libgrep.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libgrep_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/grep/Makefile_old b/ndb/src/kernel/blocks/grep/Makefile_old
deleted file mode 100644
index 5ad5a0bce3b..00000000000
--- a/ndb/src/kernel/blocks/grep/Makefile_old
+++ /dev/null
@@ -1,9 +0,0 @@
-include .defs.mk
-
-TYPE := kernel
-
-ARCHIVE_TARGET := grep
-
-SOURCES = Grep.cpp GrepInit.cpp
-
-include $(NDB_TOP)/Epilogue.mk
diff --git a/ndb/src/kernel/blocks/ndbcntr/Makefile.am b/ndb/src/kernel/blocks/ndbcntr/Makefile.am
index 9230b55b374..3f24675b2b3 100644
--- a/ndb/src/kernel/blocks/ndbcntr/Makefile.am
+++ b/ndb/src/kernel/blocks/ndbcntr/Makefile.am
@@ -10,3 +10,17 @@ include $(top_srcdir)/ndb/config/type_kernel.mk.am
# Don't update the files from bitkeeper
%::SCCS/s.%
+
+windoze-dsp: libndbcntr.dsp
+
+libndbcntr.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libndbcntr_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/ndbcntr/Makefile_old b/ndb/src/kernel/blocks/ndbcntr/Makefile_old
deleted file mode 100644
index 8e9c4f01027..00000000000
--- a/ndb/src/kernel/blocks/ndbcntr/Makefile_old
+++ /dev/null
@@ -1,12 +0,0 @@
-include .defs.mk
-
-TYPE := kernel
-
-ARCHIVE_TARGET := ndbcntr
-
-SOURCES = \
- NdbcntrInit.cpp \
- NdbcntrSysTable.cpp \
- NdbcntrMain.cpp
-
-include $(NDB_TOP)/Epilogue.mk
diff --git a/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp b/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp
index 43044eeebcd..c7b472fc91a 100644
--- a/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp
+++ b/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp
@@ -114,4 +114,4 @@ Ndbcntr::~Ndbcntr()
}//Ndbcntr::~Ndbcntr()
-BLOCK_FUNCTIONS(Ndbcntr);
+BLOCK_FUNCTIONS(Ndbcntr)
diff --git a/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp b/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
index a02bfd459b3..f76440a462a 100644
--- a/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
+++ b/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
@@ -14,19 +14,11 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-/**
- * O_DIRECT
- */
-#if 0
-//#ifdef NDB_LINUX
-#ifndef _GNU_SOURCE
-#define _GNU_SOURCE
-#endif
-#endif
-
#include <ndb_global.h>
+#include <my_sys.h>
+#include <my_pthread.h>
-#include "Error.hpp"
+#include <Error.hpp>
#include "AsyncFile.hpp"
#include <ErrorHandlingMacros.hpp>
@@ -35,13 +27,12 @@
#include <NdbThread.h>
#include <signaldata/FsOpenReq.hpp>
-#if 0
-#ifdef HAVE_PREAD
-// This is for pread and pwrite
-#ifndef __USE_UNIX98
-#define __USE_UNIX98
-#endif
-#endif
+// use this to test broken pread code
+//#define HAVE_BROKEN_PREAD
+
+#ifdef HAVE_BROKEN_PREAD
+#undef HAVE_PWRITE
+#undef HAVE_PREAD
#endif
#if defined NDB_WIN32 || defined NDB_OSE || defined NDB_SOFTOSE
@@ -227,7 +218,8 @@ AsyncFile::run()
rmrfReq(request, (char*)theFileName.c_str(), request->par.rmrf.own_directory);
break;
case Request:: end:
- closeReq(request);
+ if (theFd > 0)
+ closeReq(request);
endReq();
return;
default:
@@ -247,6 +239,7 @@ void AsyncFile::openReq(Request* request)
{
m_openedWithSync = false;
m_syncFrequency = 0;
+ m_syncCount= 0;
// for open.flags, see signal FSOPENREQ
#ifdef NDB_WIN32
@@ -337,7 +330,6 @@ void AsyncFile::openReq(Request* request)
} else {
#endif
m_openedWithSync = false;
- m_syncCount = 0;
m_syncFrequency = Global_syncFreq;
#if 0
}
@@ -395,9 +387,12 @@ AsyncFile::readBuffer(char * buf, size_t size, off_t offset){
if(dwSFP != offset) {
return GetLastError();
}
-#elif defined NDB_OSE || defined NDB_SOFTOSE
- return_value = lseek(theFd, offset, SEEK_SET);
- if (return_value != offset) {
+#elif ! defined(HAVE_PREAD)
+ off_t seek_val;
+ while((seek_val= lseek(theFd, offset, SEEK_SET)) == (off_t)-1
+ && errno == EINTR);
+ if(seek_val == (off_t)-1)
+ {
return errno;
}
#endif
@@ -416,7 +411,7 @@ AsyncFile::readBuffer(char * buf, size_t size, off_t offset){
return GetLastError();
}
bytes_read = dwBytesRead;
-#elif defined NDB_OSE || defined NDB_SOFTOSE
+#elif ! defined(HAVE_PREAD)
return_value = ::read(theFd, buf, size);
#else // UNIX
return_value = ::pread(theFd, buf, size, offset);
@@ -469,7 +464,7 @@ AsyncFile::readReq( Request * request)
void
AsyncFile::readvReq( Request * request)
{
-#if defined NDB_OSE || defined NDB_SOFTOSE
+#if ! defined(HAVE_PREAD)
readReq(request);
return;
#elif defined NDB_WIN32
@@ -499,7 +494,7 @@ AsyncFile::readvReq( Request * request)
int
AsyncFile::extendfile(Request* request) {
-#if defined NDB_OSE || defined NDB_SOFTOSE
+#if ! defined(HAVE_PWRITE)
// Find max size of this file in this request
int maxOffset = 0;
int maxSize = 0;
@@ -608,27 +603,13 @@ AsyncFile::writeBuffer(const char * buf, size_t size, off_t offset,
if(dwSFP != offset) {
return GetLastError();
}
-#elif defined NDB_OSE || defined NDB_SOFTOSE
- return_value = lseek(theFd, offset, SEEK_SET);
- if (return_value != offset) {
- DEBUG(ndbout_c("AsyncFile::writeReq, err1: return_value=%d, offset=%d\n",
- return_value, chunk_offset));
- PRINT_ERRORANDFLAGS(0);
- if (errno == 78) {
- // Could not write beyond end of file, try to extend file
- DEBUG(ndbout_c("AsyncFile::writeReq, Extend. file! filename=\"%s\" \n",
- theFileName.c_str()));
- return_value = extendfile(request);
- if (return_value == -1) {
- return errno;
- }
- return_value = lseek(theFd, offset, SEEK_SET);
- if (return_value != offset) {
- return errno;
- }
- } else {
- return errno;
- }
+#elif ! defined(HAVE_PWRITE)
+ off_t seek_val;
+ while((seek_val= lseek(theFd, offset, SEEK_SET)) == (off_t)-1
+ && errno == EINTR);
+ if(seek_val == (off_t)-1)
+ {
+ return errno;
}
#endif
@@ -650,7 +631,7 @@ AsyncFile::writeBuffer(const char * buf, size_t size, off_t offset,
DEBUG(ndbout_c("Warning partial write %d != %d", bytes_written, bytes_to_write));
}
-#elif defined NDB_OSE || defined NDB_SOFTOSE
+#elif ! defined(HAVE_PWRITE)
return_value = ::write(theFd, buf, bytes_to_write);
#else // UNIX
return_value = ::pwrite(theFd, buf, bytes_to_write, offset);
@@ -675,6 +656,7 @@ AsyncFile::writeBuffer(const char * buf, size_t size, off_t offset,
}
#endif
+ m_syncCount+= bytes_written;
buf += bytes_written;
size -= bytes_written;
offset += bytes_written;
@@ -701,6 +683,10 @@ AsyncFile::closeReq(Request * request)
hFile = INVALID_HANDLE_VALUE;
#else
if (-1 == ::close(theFd)) {
+#ifndef DBUG_OFF
+ if (theFd == -1)
+ abort();
+#endif
request->error = errno;
}
theFd = -1;
@@ -719,7 +705,8 @@ bool AsyncFile::isOpen(){
void
AsyncFile::syncReq(Request * request)
{
- if(m_openedWithSync){
+ if(m_openedWithSync ||
+ m_syncCount == 0){
return;
}
#ifdef NDB_WIN32
@@ -775,7 +762,6 @@ AsyncFile::appendReq(Request * request){
if(m_syncFrequency != 0 && m_syncCount > m_syncFrequency){
syncReq(request);
- request->error = 0;
}
}
@@ -889,7 +875,6 @@ void AsyncFile::endReq()
{
// Thread is ended with return
if (theWriteBuffer) NdbMem_Free(theWriteBuffer);
- NdbThread_Exit(0);
}
diff --git a/ndb/src/kernel/blocks/ndbfs/Makefile.am b/ndb/src/kernel/blocks/ndbfs/Makefile.am
index c2b663c5042..a22386f8612 100644
--- a/ndb/src/kernel/blocks/ndbfs/Makefile.am
+++ b/ndb/src/kernel/blocks/ndbfs/Makefile.am
@@ -11,3 +11,17 @@ include $(top_srcdir)/ndb/config/type_kernel.mk.am
# Don't update the files from bitkeeper
%::SCCS/s.%
+
+windoze-dsp: libndbfs.dsp
+
+libndbfs.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libndbfs_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/ndbfs/Makefile_old b/ndb/src/kernel/blocks/ndbfs/Makefile_old
deleted file mode 100644
index 58e1458bf16..00000000000
--- a/ndb/src/kernel/blocks/ndbfs/Makefile_old
+++ /dev/null
@@ -1,14 +0,0 @@
-include .defs.mk
-
-TYPE := kernel
-
-ARCHIVE_TARGET := ndbfs
-
-SOURCES = \
- AsyncFile.cpp \
- Ndbfs.cpp VoidFs.cpp \
- Filename.cpp \
- CircularIndex.cpp
-
-include $(NDB_TOP)/Epilogue.mk
-
diff --git a/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp b/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp
index aeab9f7828d..b98c60693f4 100644
--- a/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp
+++ b/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp
@@ -40,7 +40,6 @@ extern "C" void* runProducer(void*arg)
NdbSleep_MilliSleep(i);
i++;
}
- NdbThread_Exit(0);
return NULL;
}
@@ -58,7 +57,6 @@ extern "C" void* runConsumer(void* arg)
delete p;
}
- NdbThread_Exit(0);
return NULL;
}
@@ -92,7 +90,6 @@ extern "C" void* runProducer2(void*arg)
NdbSleep_MilliSleep(i);
i++;
}
- NdbThread_Exit(0);
return NULL;
}
@@ -111,7 +108,6 @@ extern "C" void* runConsumer2(void* arg)
delete p;
}
ndbout << "Consumer2: " << count << " received" << endl;
- NdbThread_Exit(0);
return NULL;
}
diff --git a/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp b/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
index 56e3d3abbed..9c943760e31 100644
--- a/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
+++ b/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
@@ -730,7 +730,7 @@ Ndbfs::scanIPC(Signal* signal)
jam();
report(request, signal);
theRequestPool->put(request);
- return &request;
+ return true;
}
return false;
}
@@ -1010,7 +1010,7 @@ Ndbfs::execDUMP_STATE_ORD(Signal* signal)
-BLOCK_FUNCTIONS(Ndbfs);
+BLOCK_FUNCTIONS(Ndbfs)
template class Vector<AsyncFile*>;
template class Vector<OpenFiles::OpenFileItem>;
diff --git a/ndb/src/kernel/blocks/ndbfs/VoidFs.cpp b/ndb/src/kernel/blocks/ndbfs/VoidFs.cpp
index d3407e8d4e7..d093089acfc 100644
--- a/ndb/src/kernel/blocks/ndbfs/VoidFs.cpp
+++ b/ndb/src/kernel/blocks/ndbfs/VoidFs.cpp
@@ -196,5 +196,5 @@ VoidFs::execDUMP_STATE_ORD(Signal* signal)
-BLOCK_FUNCTIONS(VoidFs);
+BLOCK_FUNCTIONS(VoidFs)
diff --git a/ndb/src/kernel/blocks/qmgr/Makefile.am b/ndb/src/kernel/blocks/qmgr/Makefile.am
index 52cadb3bd3d..278af2a7865 100644
--- a/ndb/src/kernel/blocks/qmgr/Makefile.am
+++ b/ndb/src/kernel/blocks/qmgr/Makefile.am
@@ -9,3 +9,17 @@ include $(top_srcdir)/ndb/config/type_kernel.mk.am
# Don't update the files from bitkeeper
%::SCCS/s.%
+
+windoze-dsp: libqmgr.dsp
+
+libqmgr.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libqmgr_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/qmgr/Makefile_old b/ndb/src/kernel/blocks/qmgr/Makefile_old
deleted file mode 100644
index cd15643ea60..00000000000
--- a/ndb/src/kernel/blocks/qmgr/Makefile_old
+++ /dev/null
@@ -1,11 +0,0 @@
-include .defs.mk
-
-TYPE := kernel
-
-ARCHIVE_TARGET := qmgr
-
-SOURCES = \
- QmgrInit.cpp \
- QmgrMain.cpp
-
-include $(NDB_TOP)/Epilogue.mk
diff --git a/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp b/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
index b0f1088779c..d6960ce154e 100644
--- a/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
+++ b/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
@@ -100,4 +100,4 @@ Qmgr::~Qmgr()
}//Qmgr::~Qmgr()
-BLOCK_FUNCTIONS(Qmgr);
+BLOCK_FUNCTIONS(Qmgr)
diff --git a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
index 41deb3403c8..da8596076ec 100644
--- a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
+++ b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
@@ -1934,17 +1934,27 @@ void Qmgr::execAPI_REGREQ(Signal* signal)
switch(getNodeInfo(apiNodePtr.i).getType()){
case NodeInfo::API:
compatability_check = ndbCompatible_ndb_api(NDB_VERSION, version);
+ if (!compatability_check)
+ infoEvent("Connection attempt from api or mysqld id=%d with %s "
+ "incompatible with %s", apiNodePtr.i,
+ getVersionString(version,""), NDB_VERSION_STRING);
break;
case NodeInfo::MGM:
compatability_check = ndbCompatible_ndb_mgmt(NDB_VERSION, version);
+ if (!compatability_check)
+ infoEvent("Connection attempt from management server id=%d with %s "
+ "incompatible with %s", apiNodePtr.i,
+ getVersionString(version,""), NDB_VERSION_STRING);
break;
case NodeInfo::REP:
- compatability_check = ndbCompatible_ndb_api(NDB_VERSION, version);
- break;
+ // compatability_check = ndbCompatible_ndb_api(NDB_VERSION, version);
+ // break;
case NodeInfo::DB:
case NodeInfo::INVALID:
default:
sendApiRegRef(signal, ref, ApiRegRef::WrongType);
+ infoEvent("Invalid connection attempt with type %d",
+ getNodeInfo(apiNodePtr.i).getType());
return;
}
@@ -2937,6 +2947,12 @@ void Qmgr::sendPrepFailReq(Signal* signal, Uint16 aNode)
*/
/**
+ * Should < 1/2 nodes die unconditionally. Affects only >= 3-way
+ * replication.
+ */
+static const bool g_ndb_arbit_one_half_rule = false;
+
+/**
* Config signals are logically part of CM_INIT.
*/
void
@@ -3147,7 +3163,8 @@ Qmgr::handleArbitCheck(Signal* signal)
ndbrequire(cpresident == getOwnNodeId());
NodeBitmask ndbMask;
computeArbitNdbMask(ndbMask);
- if (2 * ndbMask.count() < cnoOfNodes) {
+ if (g_ndb_arbit_one_half_rule &&
+ 2 * ndbMask.count() < cnoOfNodes) {
jam();
arbitRec.code = ArbitCode::LoseNodes;
} else {
@@ -3171,6 +3188,11 @@ Qmgr::handleArbitCheck(Signal* signal)
case CheckNodeGroups::Partitioning:
jam();
arbitRec.code = ArbitCode::Partitioning;
+ if (g_ndb_arbit_one_half_rule &&
+ 2 * ndbMask.count() > cnoOfNodes) {
+ jam();
+ arbitRec.code = ArbitCode::WinNodes;
+ }
break;
default:
ndbrequire(false);
@@ -3180,8 +3202,12 @@ Qmgr::handleArbitCheck(Signal* signal)
switch (arbitRec.code) {
case ArbitCode::LoseNodes:
jam();
+ case ArbitCode::LoseGroups:
+ jam();
goto crashme;
- case ArbitCode::WinGroups:
+ case ArbitCode::WinNodes:
+ jam();
+ case ArbitCode::WinGroups:
jam();
if (arbitRec.state == ARBIT_RUN) {
jam();
@@ -3190,9 +3216,6 @@ Qmgr::handleArbitCheck(Signal* signal)
arbitRec.state = ARBIT_INIT;
arbitRec.newstate = true;
break;
- case ArbitCode::LoseGroups:
- jam();
- goto crashme;
case ArbitCode::Partitioning:
if (arbitRec.state == ARBIT_RUN) {
jam();
@@ -3752,8 +3775,7 @@ Qmgr::execARBIT_CHOOSEREF(Signal* signal)
}
/**
- * Handle CRASH state. We must crash immediately. But it
- * would be nice to wait until event reports have been sent.
+ * Handle CRASH state. We must crash immediately.
* XXX tell other nodes in our party to crash too.
*/
void
@@ -3763,12 +3785,11 @@ Qmgr::stateArbitCrash(Signal* signal)
if (arbitRec.newstate) {
jam();
CRASH_INSERTION((Uint32)910 + arbitRec.state);
-
arbitRec.setTimestamp();
arbitRec.code = 0;
arbitRec.newstate = false;
}
-#if 0
+#ifdef ndb_arbit_crash_wait_for_event_report_to_get_out
if (! (arbitRec.getTimediff() > getArbitTimeout()))
return;
#endif
diff --git a/ndb/src/kernel/blocks/suma/Makefile.am b/ndb/src/kernel/blocks/suma/Makefile.am
index 4dacb22af51..5a74dbb74eb 100644
--- a/ndb/src/kernel/blocks/suma/Makefile.am
+++ b/ndb/src/kernel/blocks/suma/Makefile.am
@@ -7,3 +7,17 @@ include $(top_srcdir)/ndb/config/type_kernel.mk.am
# Don't update the files from bitkeeper
%::SCCS/s.%
+
+windoze-dsp: libsuma.dsp
+
+libsuma.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libsuma_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/suma/Makefile_old b/ndb/src/kernel/blocks/suma/Makefile_old
deleted file mode 100644
index 20014c94670..00000000000
--- a/ndb/src/kernel/blocks/suma/Makefile_old
+++ /dev/null
@@ -1,10 +0,0 @@
-include .defs.mk
-
-TYPE := kernel
-
-ARCHIVE_TARGET := suma
-
-SOURCES = Suma.cpp SumaInit.cpp
-
-include $(NDB_TOP)/Epilogue.mk
-
diff --git a/ndb/src/kernel/blocks/suma/Suma.cpp b/ndb/src/kernel/blocks/suma/Suma.cpp
index d11d5f7176a..44ac054dd67 100644
--- a/ndb/src/kernel/blocks/suma/Suma.cpp
+++ b/ndb/src/kernel/blocks/suma/Suma.cpp
@@ -824,7 +824,8 @@ Suma::execUTIL_SEQUENCE_CONF(Signal* signal)
return;
}
- Uint32 subId = conf->sequenceValue[0];
+ Uint64 subId;
+ memcpy(&subId,conf->sequenceValue,8);
Uint32 subData = conf->senderData;
SubscriberPtr subbPtr;
@@ -832,8 +833,8 @@ Suma::execUTIL_SEQUENCE_CONF(Signal* signal)
CreateSubscriptionIdConf * subconf = (CreateSubscriptionIdConf*)conf;
- subconf->subscriptionId = subId;
- subconf->subscriptionKey =(getOwnNodeId() << 16) | (subId & 0xFFFF);
+ subconf->subscriptionId = (Uint32)subId;
+ subconf->subscriptionKey =(getOwnNodeId() << 16) | (Uint32)(subId & 0xFFFF);
subconf->subscriberData = subbPtr.p->m_senderData;
sendSignal(subbPtr.p->m_subscriberRef, GSN_CREATE_SUBID_CONF, signal,
@@ -1888,7 +1889,7 @@ SumaParticipant::SyncRecord::nextScan(Signal* signal){
req->requestInfo = 0;
req->savePointId = 0;
ScanFragReq::setLockMode(req->requestInfo, 0);
- ScanFragReq::setHoldLockFlag(req->requestInfo, 0);
+ ScanFragReq::setHoldLockFlag(req->requestInfo, 1);
ScanFragReq::setKeyinfoFlag(req->requestInfo, 0);
ScanFragReq::setAttrLen(req->requestInfo, attrLen);
req->fragmentNoKeyLen = fd.m_fragDesc.m_fragmentNo;
@@ -3471,10 +3472,10 @@ SumaParticipant::completeSubRemoveReq(Signal* signal, SubscriptionPtr subPtr) {
*/
#if 0
ndbout_c("c_subscriptionPool.getSize() %d c_subscriptionPool.getNoOfFree()%d",
- c_subscriptionPool.getSize(),c_subscriptionPool.getNoOfFree()+1);
+ c_subscriptionPool.getSize(),c_subscriptionPool.getNoOfFree());
#endif
- if(c_subscriptionPool.getSize() == c_subscriptionPool.getNoOfFree()+1) {
+ if(c_subscriptionPool.getSize() == c_subscriptionPool.getNoOfFree()) {
jam();
#if 0
ndbout_c("SUB_REMOVE_REQ:Clearing c_tables");
@@ -3553,7 +3554,7 @@ Suma::Restart::Restart(Suma& s) : suma(s) {
c_okToStart[i] = false;
c_waitingToStart[i] = false;
}
-};
+}
void
Suma::Restart::resetNode(Uint32 sumaRef)
diff --git a/ndb/src/kernel/blocks/suma/SumaInit.cpp b/ndb/src/kernel/blocks/suma/SumaInit.cpp
index 255abd47c94..b5945db3811 100644
--- a/ndb/src/kernel/blocks/suma/SumaInit.cpp
+++ b/ndb/src/kernel/blocks/suma/SumaInit.cpp
@@ -16,7 +16,6 @@
#include "Suma.hpp"
-#include <new>
#include <Properties.hpp>
#include <Configuration.hpp>
@@ -188,6 +187,6 @@ Suma::~Suma()
{
}
-BLOCK_FUNCTIONS(Suma);
-BLOCK_FUNCTIONS(SumaParticipant);
+BLOCK_FUNCTIONS(Suma)
+BLOCK_FUNCTIONS(SumaParticipant)
diff --git a/ndb/src/kernel/blocks/trix/Makefile.am b/ndb/src/kernel/blocks/trix/Makefile.am
index 803da815cf0..343063a6283 100644
--- a/ndb/src/kernel/blocks/trix/Makefile.am
+++ b/ndb/src/kernel/blocks/trix/Makefile.am
@@ -7,3 +7,17 @@ include $(top_srcdir)/ndb/config/type_kernel.mk.am
# Don't update the files from bitkeeper
%::SCCS/s.%
+
+windoze-dsp: libtrix.dsp
+
+libtrix.dsp: Makefile \
+ $(top_srcdir)/ndb/config/win-lib.am \
+ $(top_srcdir)/ndb/config/win-name \
+ $(top_srcdir)/ndb/config/win-includes \
+ $(top_srcdir)/ndb/config/win-sources \
+ $(top_srcdir)/ndb/config/win-libraries
+ cat $(top_srcdir)/ndb/config/win-lib.am > $@
+ @$(top_srcdir)/ndb/config/win-name $@ $(noinst_LIBRARIES)
+ @$(top_srcdir)/ndb/config/win-includes $@ $(INCLUDES)
+ @$(top_srcdir)/ndb/config/win-sources $@ $(libtrix_a_SOURCES)
+ @$(top_srcdir)/ndb/config/win-libraries $@ LIB $(LDADD)
diff --git a/ndb/src/kernel/blocks/trix/Makefile_old b/ndb/src/kernel/blocks/trix/Makefile_old
deleted file mode 100644
index 5ac0da11f33..00000000000
--- a/ndb/src/kernel/blocks/trix/Makefile_old
+++ /dev/null
@@ -1,8 +0,0 @@
-include .defs.mk
-
-TYPE := kernel
-
-ARCHIVE_TARGET := trix
-SOURCES = Trix.cpp
-
-include $(NDB_TOP)/Epilogue.mk
diff --git a/ndb/src/kernel/blocks/trix/Trix.cpp b/ndb/src/kernel/blocks/trix/Trix.cpp
index 80cf9f88c0d..75bc19b6a20 100644
--- a/ndb/src/kernel/blocks/trix/Trix.cpp
+++ b/ndb/src/kernel/blocks/trix/Trix.cpp
@@ -962,6 +962,6 @@ void Trix::checkParallelism(Signal* signal, SubscriptionRecord* subRec)
}
}
-BLOCK_FUNCTIONS(Trix);
+BLOCK_FUNCTIONS(Trix)
template void append(DataBuffer<15>&,SegmentedSectionPtr,SectionSegmentPool&);