summaryrefslogtreecommitdiff
path: root/storage
diff options
context:
space:
mode:
Diffstat (limited to 'storage')
-rw-r--r--storage/ndb/include/kernel/AttributeHeader.hpp4
-rw-r--r--storage/ndb/include/kernel/GlobalSignalNumbers.h2
-rw-r--r--storage/ndb/include/kernel/ndb_limits.h1
-rw-r--r--storage/ndb/include/kernel/signaldata/CreateFragmentation.hpp12
-rw-r--r--storage/ndb/include/kernel/signaldata/DictTabInfo.hpp7
-rw-r--r--storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp7
-rw-r--r--storage/ndb/include/ndbapi/NdbDictionary.hpp16
-rw-r--r--storage/ndb/src/common/debugger/signaldata/CreateFragmentation.cpp1
-rw-r--r--storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp2
-rw-r--r--storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp2
-rw-r--r--storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp2
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp76
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp4
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp7
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp333
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp2
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp2
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp4
-rw-r--r--storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp7
-rw-r--r--storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp58
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp2
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp10
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp1
-rw-r--r--storage/ndb/src/ndbapi/NdbBlob.cpp36
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionary.cpp22
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp92
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp4
-rw-r--r--storage/ndb/test/run-test/README4
-rw-r--r--storage/ndb/tools/restore/Restore.cpp3
29 files changed, 480 insertions, 243 deletions
diff --git a/storage/ndb/include/kernel/AttributeHeader.hpp b/storage/ndb/include/kernel/AttributeHeader.hpp
index 3cb432067eb..7d89219b8b2 100644
--- a/storage/ndb/include/kernel/AttributeHeader.hpp
+++ b/storage/ndb/include/kernel/AttributeHeader.hpp
@@ -30,9 +30,9 @@ class AttributeHeader {
public:
/**
- * Psuedo columns
+ * Pseudo columns
*/
- STATIC_CONST( PSUEDO = 0x8000 );
+ STATIC_CONST( PSEUDO = 0x8000 );
STATIC_CONST( FRAGMENT = 0xFFFE ); // Read fragment no
STATIC_CONST( ROW_COUNT = 0xFFFD ); // Read row count (committed)
STATIC_CONST( COMMIT_COUNT = 0xFFFC ); // Read commit count
diff --git a/storage/ndb/include/kernel/GlobalSignalNumbers.h b/storage/ndb/include/kernel/GlobalSignalNumbers.h
index 9413f4ef56a..cc016b1f3e5 100644
--- a/storage/ndb/include/kernel/GlobalSignalNumbers.h
+++ b/storage/ndb/include/kernel/GlobalSignalNumbers.h
@@ -944,6 +944,6 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
#define GSN_TUX_BOUND_INFO 710
#define GSN_ACC_LOCKREQ 711
-#define GSN_READ_PSUEDO_REQ 712
+#define GSN_READ_PSEUDO_REQ 712
#endif
diff --git a/storage/ndb/include/kernel/ndb_limits.h b/storage/ndb/include/kernel/ndb_limits.h
index e60153e60ec..9baec7d69dc 100644
--- a/storage/ndb/include/kernel/ndb_limits.h
+++ b/storage/ndb/include/kernel/ndb_limits.h
@@ -63,6 +63,7 @@
#define MAX_FRM_DATA_SIZE 6000
#define MAX_NULL_BITS 4096
#define MAX_FRAGMENT_DATA_BYTES (4+(2 * 8 * MAX_REPLICAS * MAX_NDB_NODES))
+#define MAX_NDB_PARTITIONS 1024
#define MIN_ATTRBUF ((MAX_ATTRIBUTES_IN_TABLE/24) + 1)
/*
diff --git a/storage/ndb/include/kernel/signaldata/CreateFragmentation.hpp b/storage/ndb/include/kernel/signaldata/CreateFragmentation.hpp
index 7d53dd91154..04638b81b99 100644
--- a/storage/ndb/include/kernel/signaldata/CreateFragmentation.hpp
+++ b/storage/ndb/include/kernel/signaldata/CreateFragmentation.hpp
@@ -33,14 +33,13 @@ class CreateFragmentationReq {
friend bool printCREATE_FRAGMENTATION_REQ(FILE *,
const Uint32 *, Uint32, Uint16);
public:
- STATIC_CONST( SignalLength = 6 );
+ STATIC_CONST( SignalLength = 5 );
private:
Uint32 senderRef;
Uint32 senderData;
Uint32 fragmentationType;
Uint32 noOfFragments;
- Uint32 fragmentNode;
Uint32 primaryTableId; // use same fragmentation as this table if not RNIL
};
@@ -62,10 +61,11 @@ public:
enum ErrorCode {
OK = 0
- ,InvalidFragmentationType = 1
- ,InvalidNodeId = 2
- ,InvalidNodeType = 3
- ,InvalidPrimaryTable = 4
+ ,InvalidFragmentationType = 1301
+ ,InvalidNodeId = 1302
+ ,InvalidNodeType = 1303
+ ,InvalidPrimaryTable = 1304
+ ,InvalidNodeGroup = 1305
};
private:
diff --git a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp
index 09b00cf8993..274261583a4 100644
--- a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp
+++ b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp
@@ -151,7 +151,12 @@ public:
AllNodesSmallTable = 0,
AllNodesMediumTable = 1,
AllNodesLargeTable = 2,
- SingleFragment = 3
+ SingleFragment = 3,
+ DistrKeyHash = 4,
+ DistrKeyLin = 5,
+ UserDefined = 6,
+ DistrKeyUniqueHashIndex = 7,
+ DistrKeyOrderedIndex = 8
};
// TableType constants + objects
diff --git a/storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp b/storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp
index 20a0a863094..674ce1d1d0b 100644
--- a/storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp
+++ b/storage/ndb/include/kernel/signaldata/FireTrigOrd.hpp
@@ -52,9 +52,9 @@ class FireTrigOrd {
friend bool printFIRE_TRIG_ORD(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo);
public:
- STATIC_CONST( SignalLength = 7 );
- STATIC_CONST( SignalWithGCILength = 8 );
- STATIC_CONST( SignalWithHashValueLength = 9 );
+ STATIC_CONST( SignalLength = 8 );
+ STATIC_CONST( SignalWithGCILength = 9 );
+ STATIC_CONST( SignalWithHashValueLength = 10 );
private:
Uint32 m_connectionPtr;
@@ -64,6 +64,7 @@ private:
Uint32 m_noPrimKeyWords;
Uint32 m_noBeforeValueWords;
Uint32 m_noAfterValueWords;
+ Uint32 fragId;
Uint32 m_gci;
Uint32 m_hashValue;
// Public methods
diff --git a/storage/ndb/include/ndbapi/NdbDictionary.hpp b/storage/ndb/include/ndbapi/NdbDictionary.hpp
index 86130be4c4b..8ae40a738ad 100644
--- a/storage/ndb/include/ndbapi/NdbDictionary.hpp
+++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp
@@ -145,7 +145,10 @@ public:
FragSingle = 1, ///< Only one fragment
FragAllSmall = 2, ///< One fragment per node, default
FragAllMedium = 3, ///< two fragments per node
- FragAllLarge = 4 ///< Four fragments per node.
+ FragAllLarge = 4, ///< Four fragments per node.
+ DistrKeyHash = 5,
+ DistrKeyLin = 6,
+ UserDefined = 7
};
};
@@ -614,6 +617,12 @@ public:
const void* getFrmData() const;
Uint32 getFrmLength() const;
+ /**
+ * Get Node Group and Tablespace id's for fragments in table
+ */
+ const void *getNodeGroupIds() const;
+ Uint32 getNodeGroupIdsLength() const;
+
/** @} *******************************************************************/
/**
@@ -713,6 +722,11 @@ public:
void setFrm(const void* data, Uint32 len);
/**
+ * Set node group for fragments
+ */
+ void setNodeGroupIds(const void *data, Uint32 len);
+
+ /**
* Set table object type
*/
void setObjectType(Object::Type type);
diff --git a/storage/ndb/src/common/debugger/signaldata/CreateFragmentation.cpp b/storage/ndb/src/common/debugger/signaldata/CreateFragmentation.cpp
index 027f743b5ea..991a0cce131 100644
--- a/storage/ndb/src/common/debugger/signaldata/CreateFragmentation.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/CreateFragmentation.cpp
@@ -24,7 +24,6 @@ printCREATE_FRAGMENTATION_REQ(FILE * output, const Uint32 * theData,
fprintf(output, " senderData: %x\n", sig->senderData);
fprintf(output, " fragmentationType: %x\n", sig->fragmentationType);
fprintf(output, " noOfFragments: %x\n", sig->noOfFragments);
- fprintf(output, " fragmentNode: %x\n", sig->fragmentNode);
if (sig->primaryTableId == RNIL)
fprintf(output, " primaryTableId: none\n");
else
diff --git a/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp b/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
index a2d6fe4d64a..aa1056e5570 100644
--- a/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
+++ b/storage/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
@@ -889,7 +889,7 @@ private:
void execACC_OVER_REC(Signal* signal);
void execACC_SAVE_PAGES(Signal* signal);
void execNEXTOPERATION(Signal* signal);
- void execREAD_PSUEDO_REQ(Signal* signal);
+ void execREAD_PSEUDO_REQ(Signal* signal);
// Received signals
void execSTTOR(Signal* signal);
diff --git a/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp b/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
index 90839163a72..ccc65ccf9fa 100644
--- a/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
+++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
@@ -165,7 +165,7 @@ Dbacc::Dbacc(const class Configuration & conf):
addRecSignal(GSN_ACC_OVER_REC, &Dbacc::execACC_OVER_REC);
addRecSignal(GSN_ACC_SAVE_PAGES, &Dbacc::execACC_SAVE_PAGES);
addRecSignal(GSN_NEXTOPERATION, &Dbacc::execNEXTOPERATION);
- addRecSignal(GSN_READ_PSUEDO_REQ, &Dbacc::execREAD_PSUEDO_REQ);
+ addRecSignal(GSN_READ_PSEUDO_REQ, &Dbacc::execREAD_PSEUDO_REQ);
// Received signals
addRecSignal(GSN_STTOR, &Dbacc::execSTTOR);
diff --git a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
index 24f9715c8b4..a8bb0ab894c 100644
--- a/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
@@ -11788,7 +11788,7 @@ void Dbacc::execSET_VAR_REQ(Signal* signal)
}//execSET_VAR_REQ()
void
-Dbacc::execREAD_PSUEDO_REQ(Signal* signal){
+Dbacc::execREAD_PSEUDO_REQ(Signal* signal){
jamEntry();
fragrecptr.i = signal->theData[0];
Uint32 attrId = signal->theData[1];
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
index d51f9537154..2c93afc4afd 100644
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
@@ -271,7 +271,6 @@ Dbdict::packTableIntoPagesImpl(SimpleProperties::Writer & w,
req->senderData = RNIL;
req->fragmentationType = tablePtr.p->fragmentType;
req->noOfFragments = 0;
- req->fragmentNode = 0;
req->primaryTableId = tablePtr.i;
EXECUTE_DIRECT(DBDIH, GSN_CREATE_FRAGMENTATION_REQ, signal,
CreateFragmentationReq::SignalLength);
@@ -1492,8 +1491,11 @@ void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr)
tablePtr.p->gciTableCreated = 0;
tablePtr.p->noOfAttributes = ZNIL;
tablePtr.p->noOfNullAttr = 0;
+ tablePtr.p->ngLen = 0;
+ memset(tablePtr.p->ngData, 0, sizeof(tablePtr.p->ngData));
tablePtr.p->frmLen = 0;
memset(tablePtr.p->frmData, 0, sizeof(tablePtr.p->frmData));
+ tablePtr.p->fragmentCount = 0;
/*
tablePtr.p->lh3PageIndexBits = 0;
tablePtr.p->lh3DistrBits = 0;
@@ -2919,25 +2921,52 @@ Dbdict::execCREATE_TABLE_REQ(Signal* signal){
createTabPtr.p->m_fragmentsPtrI = RNIL;
createTabPtr.p->m_dihAddFragPtr = RNIL;
- Uint32 * theData = signal->getDataPtrSend();
+ Uint32 *theData = signal->getDataPtrSend(), i;
+ Uint16 *node_group= (Uint16*)&signal->theData[25];
CreateFragmentationReq * const req = (CreateFragmentationReq*)theData;
req->senderRef = reference();
req->senderData = createTabPtr.p->key;
+ req->primaryTableId = parseRecord.tablePtr.p->primaryTableId;
+ req->noOfFragments = parseRecord.tablePtr.p->ngLen >> 1;
req->fragmentationType = parseRecord.tablePtr.p->fragmentType;
- req->noOfFragments = 0;
- req->fragmentNode = 0;
- req->primaryTableId = RNIL;
+ for (i = 0; i < req->noOfFragments; i++)
+ node_group[i] = parseRecord.tablePtr.p->ngData[i];
if (parseRecord.tablePtr.p->isOrderedIndex()) {
+ jam();
// ordered index has same fragmentation as the table
- const Uint32 primaryTableId = parseRecord.tablePtr.p->primaryTableId;
- TableRecordPtr primaryTablePtr;
- c_tableRecordPool.getPtr(primaryTablePtr, primaryTableId);
- // fragmentationType must be consistent
- req->fragmentationType = primaryTablePtr.p->fragmentType;
- req->primaryTableId = primaryTableId;
- }
- sendSignal(DBDIH_REF, GSN_CREATE_FRAGMENTATION_REQ, signal,
- CreateFragmentationReq::SignalLength, JBB);
+ req->primaryTableId = parseRecord.tablePtr.p->primaryTableId;
+ req->fragmentationType = DictTabInfo::DistrKeyOrderedIndex;
+ }
+ else if (parseRecord.tablePtr.p->isHashIndex())
+ {
+ jam();
+ /*
+ Unique hash indexes has same amount of fragments as primary table
+ and distributed in the same manner but has always a normal hash
+ fragmentation.
+ */
+ req->primaryTableId = parseRecord.tablePtr.p->primaryTableId;
+ req->fragmentationType = DictTabInfo::DistrKeyUniqueHashIndex;
+ }
+ else
+ {
+ jam();
+ /*
+ Blob tables come here with primaryTableId != RNIL but we only need
+ it for creating the fragments so we set it to RNIL now that we got
+ what we wanted from it to avoid other side effects.
+ */
+ parseRecord.tablePtr.p->primaryTableId = RNIL;
+ }
+ EXECUTE_DIRECT(DBDIH, GSN_CREATE_FRAGMENTATION_REQ, signal,
+ CreateFragmentationReq::SignalLength);
+ jamEntry();
+ if (signal->theData[0] != 0)
+ {
+ jam();
+ parseRecord.errorCode= signal->theData[0];
+ break;
+ }
c_blockState = BS_CREATE_TAB;
return;
@@ -4884,6 +4913,10 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
tablePtr.p->frmLen = tableDesc.FrmLen;
memcpy(tablePtr.p->frmData, tableDesc.FrmData, tableDesc.FrmLen);
+ tablePtr.p->ngLen = tableDesc.FragmentDataLen;
+ memcpy(tablePtr.p->ngData, tableDesc.FragmentData,
+ tableDesc.FragmentDataLen);
+
if(tableDesc.PrimaryTableId != RNIL) {
tablePtr.p->primaryTableId = tableDesc.PrimaryTableId;
@@ -6510,7 +6543,7 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
initialiseTableRecord(indexPtr);
if (req->getIndexType() == DictTabInfo::UniqueHashIndex) {
indexPtr.p->storedTable = opPtr.p->m_storedIndex;
- indexPtr.p->fragmentType = tablePtr.p->fragmentType;
+ indexPtr.p->fragmentType = DictTabInfo::DistrKeyUniqueHashIndex;
} else if (req->getIndexType() == DictTabInfo::OrderedIndex) {
// first version will not supported logging
if (opPtr.p->m_storedIndex) {
@@ -6520,8 +6553,7 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
return;
}
indexPtr.p->storedTable = false;
- // follows table fragmentation
- indexPtr.p->fragmentType = tablePtr.p->fragmentType;
+ indexPtr.p->fragmentType = DictTabInfo::DistrKeyOrderedIndex;
} else {
jam();
opPtr.p->m_errorCode = CreateIndxRef::InvalidIndexType;
@@ -6645,7 +6677,7 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
w.add(DictTabInfo::AttributeKeyFlag, (Uint32)false);
w.add(DictTabInfo::AttributeNullableFlag, (Uint32)false);
w.add(DictTabInfo::AttributeExtType, (Uint32)DictTabInfo::ExtUnsigned);
- w.add(DictTabInfo::AttributeExtLength, tablePtr.p->tupKeyLength);
+ w.add(DictTabInfo::AttributeExtLength, tablePtr.p->tupKeyLength+1);
w.add(DictTabInfo::AttributeEnd, (Uint32)true);
}
if (indexPtr.p->isOrderedIndex()) {
@@ -11834,11 +11866,19 @@ Dbdict::alterTrigger_sendReply(Signal* signal, OpAlterTriggerPtr opPtr,
* MODULE: Support routines for index and trigger.
*/
+/*
+ This routine is used to set-up the primary key attributes of the unique
+ hash index. Since we store fragment id as part of the primary key here
+ we insert the pseudo column for getting fragment id first in the array.
+ This routine is used as part of the building of the index.
+*/
+
void
Dbdict::getTableKeyList(TableRecordPtr tablePtr, AttributeList& list)
{
jam();
list.sz = 0;
+ list.id[list.sz++] = AttributeHeader::FRAGMENT;
for (Uint32 tAttr = tablePtr.p->firstAttribute; tAttr != RNIL; ) {
AttributeRecord* aRec = c_attributeRecordPool.getPtr(tAttr);
if (aRec->tupleKey)
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
index 68bb9b628d4..4ef3791a51d 100644
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
@@ -232,6 +232,10 @@ public:
/** TODO Could preferrably be made dynamic size */
Uint32 frmLen;
char frmData[MAX_FRM_DATA_SIZE];
+ /** Node Group and Tablespace id for this table */
+ /** TODO Could preferrably be made dynamic size */
+ Uint32 ngLen;
+ Uint16 ngData[MAX_NDB_PARTITIONS];
Uint32 fragmentCount;
};
diff --git a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
index ee67bf47d7b..345d1bdac0e 100644
--- a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
+++ b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
@@ -61,6 +61,7 @@
// ------------------------------------------
// Error Codes for Transactions (None sofar)
// ------------------------------------------
+#define ZUNDEFINED_FRAGMENT_ERROR 311
// --------------------------------------
// Error Codes for Add Table
@@ -469,8 +470,10 @@ public:
TS_DROPPING = 3
};
enum Method {
- HASH = 0,
- NOTDEFINED = 1
+ LINEAR_HASH = 0,
+ NOTDEFINED = 1,
+ NORMAL_HASH = 2,
+ USER_DEFINED = 3
};
CopyStatus tabCopyStatus;
UpdateState tabUpdateState;
diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
index 2a661104347..4441452422e 100644
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
@@ -5491,11 +5491,9 @@ Dbdih::sendMASTER_LCPCONF(Signal * signal){
#endif
}
- bool ok = false;
MasterLCPConf::State lcpState;
switch (c_lcpState.lcpStatus) {
case LCP_STATUS_IDLE:
- ok = true;
jam();
/*------------------------------------------------*/
/* LOCAL CHECKPOINT IS CURRENTLY NOT ACTIVE */
@@ -5506,7 +5504,6 @@ Dbdih::sendMASTER_LCPCONF(Signal * signal){
lcpState = MasterLCPConf::LCP_STATUS_IDLE;
break;
case LCP_STATUS_ACTIVE:
- ok = true;
jam();
/*--------------------------------------------------*/
/* COPY OF RESTART INFORMATION HAS BEEN */
@@ -5515,7 +5512,6 @@ Dbdih::sendMASTER_LCPCONF(Signal * signal){
lcpState = MasterLCPConf::LCP_STATUS_ACTIVE;
break;
case LCP_TAB_COMPLETED:
- ok = true;
jam();
/*--------------------------------------------------------*/
/* ALL LCP_REPORT'S HAVE BEEN COMPLETED FOR */
@@ -5525,7 +5521,6 @@ Dbdih::sendMASTER_LCPCONF(Signal * signal){
lcpState = MasterLCPConf::LCP_TAB_COMPLETED;
break;
case LCP_TAB_SAVED:
- ok = true;
jam();
/*--------------------------------------------------------*/
/* ALL LCP_REPORT'S HAVE BEEN COMPLETED FOR */
@@ -5549,15 +5544,15 @@ Dbdih::sendMASTER_LCPCONF(Signal * signal){
break;
case LCP_COPY_GCI:
case LCP_INIT_TABLES:
- ok = true;
/**
* These two states are handled by if statements above
*/
ndbrequire(false);
lcpState= MasterLCPConf::LCP_STATUS_IDLE; // remove warning
break;
+ default:
+ ndbrequire(false);
}//switch
- ndbrequire(ok);
Uint32 failedNodeId = c_lcpState.m_MASTER_LCPREQ_FailedNodeId;
MasterLCPConf * const conf = (MasterLCPConf *)&signal->theData[0];
@@ -6158,96 +6153,136 @@ void Dbdih::execDIRELEASEREQ(Signal* signal)
3.7.1 A D D T A B L E M A I N L Y
***************************************
*/
-void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal){
+
+#define UNDEF_NODEGROUP 65535
+static inline void inc_node_or_group(Uint32 &node, Uint32 max_node)
+{
+ Uint32 next = node + 1;
+ node = (next == max_node ? 0 : next);
+}
+
+/*
+ Spread fragments in backwards compatible mode
+*/
+static void set_default_node_groups(Signal *signal, Uint32 noFrags)
+{
+ Uint16 *node_group_array = (Uint16*)&signal->theData[25];
+ Uint32 i;
+ node_group_array[0] = 0;
+ for (i = 1; i < noFrags; i++)
+ node_group_array[i] = UNDEF_NODEGROUP;
+}
+void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal)
+{
+ Uint16 node_group_id[MAX_NDB_PARTITIONS];
jamEntry();
CreateFragmentationReq * const req =
(CreateFragmentationReq*)signal->getDataPtr();
const Uint32 senderRef = req->senderRef;
const Uint32 senderData = req->senderData;
- const Uint32 fragmentNode = req->fragmentNode;
- const Uint32 fragmentType = req->fragmentationType;
- //const Uint32 fragmentCount = req->noOfFragments;
+ Uint32 noOfFragments = req->noOfFragments;
+ const Uint32 fragType = req->fragmentationType;
const Uint32 primaryTableId = req->primaryTableId;
Uint32 err = 0;
do {
- Uint32 noOfFragments = 0;
- Uint32 noOfReplicas = cnoReplicas;
- switch(fragmentType){
- case DictTabInfo::AllNodesSmallTable:
- jam();
- noOfFragments = csystemnodes;
- break;
- case DictTabInfo::AllNodesMediumTable:
- jam();
- noOfFragments = 2 * csystemnodes;
- break;
- case DictTabInfo::AllNodesLargeTable:
- jam();
- noOfFragments = 4 * csystemnodes;
- break;
- case DictTabInfo::SingleFragment:
- jam();
- noOfFragments = 1;
- break;
-#if 0
- case DictTabInfo::SpecifiedFragmentCount:
- noOfFragments = (fragmentCount == 0 ? 1 : (fragmentCount + 1)/ 2);
- break;
-#endif
- default:
- jam();
- err = CreateFragmentationRef::InvalidFragmentationType;
- break;
- }
- if(err)
- break;
-
NodeGroupRecordPtr NGPtr;
TabRecordPtr primTabPtr;
+ Uint32 count = 2;
+ Uint16 noOfReplicas = cnoReplicas;
+ Uint16 *fragments = (Uint16*)(signal->theData+25);
if (primaryTableId == RNIL) {
- if(fragmentNode == 0){
- jam();
- NGPtr.i = 0;
- if(noOfFragments < csystemnodes)
- {
- NGPtr.i = c_nextNodeGroup;
- c_nextNodeGroup = (NGPtr.i + 1 == cnoOfNodeGroups ? 0 : NGPtr.i + 1);
- }
- } else if(! (fragmentNode < MAX_NDB_NODES)) {
- jam();
- err = CreateFragmentationRef::InvalidNodeId;
- } else {
- jam();
- const Uint32 stat = Sysfile::getNodeStatus(fragmentNode,
- SYSFILE->nodeStatus);
- switch (stat) {
- case Sysfile::NS_Active:
- case Sysfile::NS_ActiveMissed_1:
- case Sysfile::NS_ActiveMissed_2:
- case Sysfile::NS_TakeOver:
+ jam();
+ switch ((DictTabInfo::FragmentType)fragType)
+ {
+ /*
+ Backward compatability and for all places in code not changed.
+ */
+ case DictTabInfo::AllNodesSmallTable:
jam();
+ noOfFragments = csystemnodes;
+ set_default_node_groups(signal, noOfFragments);
break;
- case Sysfile::NS_NotActive_NotTakenOver:
+ case DictTabInfo::AllNodesMediumTable:
jam();
+ noOfFragments = 2 * csystemnodes;
+ set_default_node_groups(signal, noOfFragments);
break;
- case Sysfile::NS_HotSpare:
+ case DictTabInfo::AllNodesLargeTable:
jam();
- case Sysfile::NS_NotDefined:
+ noOfFragments = 4 * csystemnodes;
+ set_default_node_groups(signal, noOfFragments);
+ break;
+ case DictTabInfo::SingleFragment:
jam();
+ noOfFragments = 1;
+ set_default_node_groups(signal, noOfFragments);
+ break;
default:
jam();
- err = CreateFragmentationRef::InvalidNodeType;
+ if (noOfFragments == 0)
+ {
+ jam();
+ err = CreateFragmentationRef::InvalidFragmentationType;
+ }
break;
+ }
+ if (err)
+ break;
+ /*
+ When we come here the the exact partition is specified
+ and there is an array of node groups sent along as well.
+ */
+ memcpy(&node_group_id[0], &signal->theData[25], 2 * noOfFragments);
+ Uint16 next_replica_node[MAX_NDB_NODES];
+ memset(next_replica_node,0,sizeof(next_replica_node));
+ Uint32 default_node_group= c_nextNodeGroup;
+ for(Uint32 fragNo = 0; fragNo < noOfFragments; fragNo++)
+ {
+ jam();
+ NGPtr.i = node_group_id[fragNo];
+ if (NGPtr.i == UNDEF_NODEGROUP)
+ {
+ jam();
+ NGPtr.i = default_node_group;
}
- if(err)
+ if (NGPtr.i > cnoOfNodeGroups)
+ {
+ jam();
+ err = CreateFragmentationRef::InvalidNodeGroup;
break;
- NGPtr.i = Sysfile::getNodeGroup(fragmentNode,
- SYSFILE->nodeGroups);
+ }
+ ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
+ const Uint32 max = NGPtr.p->nodeCount;
+
+ Uint32 tmp= next_replica_node[NGPtr.i];
+ for(Uint32 replicaNo = 0; replicaNo < noOfReplicas; replicaNo++)
+ {
+ jam();
+ const Uint16 nodeId = NGPtr.p->nodesInGroup[tmp];
+ fragments[count++]= nodeId;
+ inc_node_or_group(tmp, max);
+ }
+ inc_node_or_group(tmp, max);
+ next_replica_node[NGPtr.i]= tmp;
+
+ /**
+ * Next node group for next fragment
+ */
+ inc_node_or_group(default_node_group, cnoOfNodeGroups);
+ }
+ if (err)
+ {
+ jam();
break;
}
+ else
+ {
+ jam();
+ c_nextNodeGroup = default_node_group;
+ }
} else {
if (primaryTableId >= ctabFileSize) {
jam();
@@ -6261,49 +6296,14 @@ void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal){
err = CreateFragmentationRef::InvalidPrimaryTable;
break;
}
- if (noOfFragments != primTabPtr.p->totalfragments) {
- jam();
- err = CreateFragmentationRef::InvalidFragmentationType;
- break;
- }
- }
-
- Uint32 count = 2;
- Uint16 *fragments = (Uint16*)(signal->theData+25);
- if (primaryTableId == RNIL) {
- jam();
- Uint8 next_replica_node[MAX_NDB_NODES];
- memset(next_replica_node,0,sizeof(next_replica_node));
- for(Uint32 fragNo = 0; fragNo<noOfFragments; fragNo++){
- jam();
- ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
- const Uint32 max = NGPtr.p->nodeCount;
-
- Uint32 tmp= next_replica_node[NGPtr.i];
- for(Uint32 replicaNo = 0; replicaNo<noOfReplicas; replicaNo++)
- {
- jam();
- const Uint32 nodeId = NGPtr.p->nodesInGroup[tmp++];
- fragments[count++] = nodeId;
- tmp = (tmp >= max ? 0 : tmp);
- }
- tmp++;
- next_replica_node[NGPtr.i]= (tmp >= max ? 0 : tmp);
-
- /**
- * Next node group for next fragment
- */
- NGPtr.i++;
- NGPtr.i = (NGPtr.i == cnoOfNodeGroups ? 0 : NGPtr.i);
- }
- } else {
+ noOfFragments= primTabPtr.p->totalfragments;
for (Uint32 fragNo = 0;
- fragNo < primTabPtr.p->totalfragments; fragNo++) {
+ fragNo < noOfFragments; fragNo++) {
jam();
FragmentstorePtr fragPtr;
ReplicaRecordPtr replicaPtr;
getFragstore(primTabPtr.p, fragNo, fragPtr);
- fragments[count++] = fragPtr.p->preferredPrimary;
+ fragments[count++]= fragPtr.p->preferredPrimary;
for (replicaPtr.i = fragPtr.p->storedReplicas;
replicaPtr.i != RNIL;
replicaPtr.i = replicaPtr.p->nextReplica) {
@@ -6311,9 +6311,9 @@ void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal){
ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
if (replicaPtr.p->procNode != fragPtr.p->preferredPrimary) {
jam();
- fragments[count++] = replicaPtr.p->procNode;
- }//if
- }//for
+ fragments[count++]= replicaPtr.p->procNode;
+ }
+ }
for (replicaPtr.i = fragPtr.p->oldStoredReplicas;
replicaPtr.i != RNIL;
replicaPtr.i = replicaPtr.p->nextReplica) {
@@ -6321,25 +6321,26 @@ void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal){
ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
if (replicaPtr.p->procNode != fragPtr.p->preferredPrimary) {
jam();
- fragments[count++] = replicaPtr.p->procNode;
- }//if
- }//for
+ fragments[count++]= replicaPtr.p->procNode;
+ }
+ }
}
}
- ndbrequire(count == (2 + noOfReplicas * noOfFragments));
+ ndbrequire(count == (2U + noOfReplicas * noOfFragments));
CreateFragmentationConf * const conf =
(CreateFragmentationConf*)signal->getDataPtrSend();
conf->senderRef = reference();
conf->senderData = senderData;
- conf->noOfReplicas = noOfReplicas;
- conf->noOfFragments = noOfFragments;
+ conf->noOfReplicas = (Uint32)noOfReplicas;
+ conf->noOfFragments = (Uint32)noOfFragments;
- fragments[0] = noOfReplicas;
- fragments[1] = noOfFragments;
+ fragments[0]= noOfReplicas;
+ fragments[1]= noOfFragments;
if(senderRef != 0)
{
+ jam();
LinearSectionPtr ptr[3];
ptr[0].p = (Uint32*)&fragments[0];
ptr[0].sz = (count + 1) / 2;
@@ -6351,33 +6352,17 @@ void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal){
ptr,
1);
}
- else
- {
- // Execute direct
- signal->theData[0] = 0;
- }
+ // Always ACK/NACK (here ACK)
+ signal->theData[0] = 0;
return;
} while(false);
-
- if(senderRef != 0)
- {
- CreateFragmentationRef * const ref =
- (CreateFragmentationRef*)signal->getDataPtrSend();
- ref->senderRef = reference();
- ref->senderData = senderData;
- ref->errorCode = err;
- sendSignal(senderRef, GSN_CREATE_FRAGMENTATION_REF, signal,
- CreateFragmentationRef::SignalLength, JBB);
- }
- else
- {
- // Execute direct
- signal->theData[0] = err;
- }
+ // Always ACK/NACK (here NACK)
+ signal->theData[0] = err;
}
void Dbdih::execDIADDTABREQ(Signal* signal)
{
+ Uint32 fragType;
jamEntry();
DiAddTabReq * const req = (DiAddTabReq*)signal->getDataPtr();
@@ -6402,6 +6387,7 @@ void Dbdih::execDIADDTABREQ(Signal* signal)
ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
tabPtr.p->connectrec = connectPtr.i;
tabPtr.p->tableType = req->tableType;
+ fragType= req->fragType;
tabPtr.p->schemaVersion = req->schemaVersion;
tabPtr.p->primaryTableId = req->primaryTableId;
@@ -6438,9 +6424,33 @@ void Dbdih::execDIADDTABREQ(Signal* signal)
/*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/
tabPtr.p->tabStatus = TabRecord::TS_CREATING;
tabPtr.p->storedTable = req->storedTable;
- tabPtr.p->method = TabRecord::HASH;
tabPtr.p->kvalue = req->kValue;
+ switch ((DictTabInfo::FragmentType)fragType)
+ {
+ case DictTabInfo::AllNodesSmallTable:
+ case DictTabInfo::AllNodesMediumTable:
+ case DictTabInfo::AllNodesLargeTable:
+ case DictTabInfo::SingleFragment:
+ jam();
+ case DictTabInfo::DistrKeyLin:
+ jam();
+ tabPtr.p->method= TabRecord::LINEAR_HASH;
+ break;
+ case DictTabInfo::DistrKeyHash:
+ case DictTabInfo::DistrKeyUniqueHashIndex:
+ case DictTabInfo::DistrKeyOrderedIndex:
+ jam();
+ tabPtr.p->method= TabRecord::NORMAL_HASH;
+ break;
+ case DictTabInfo::UserDefined:
+ jam();
+ tabPtr.p->method= TabRecord::USER_DEFINED;
+ break;
+ default:
+ ndbrequire(false);
+ }
+
union {
Uint16 fragments[2 + MAX_FRAG_PER_NODE*MAX_REPLICAS*MAX_NDB_NODES];
Uint32 align;
@@ -6875,17 +6885,40 @@ void Dbdih::execDIGETNODESREQ(Signal* signal)
tabPtr.i = req->tableId;
Uint32 hashValue = req->hashValue;
Uint32 ttabFileSize = ctabFileSize;
+ Uint32 fragId;
+ DiGetNodesConf * const conf = (DiGetNodesConf *)&signal->theData[0];
TabRecord* regTabDesc = tabRecord;
jamEntry();
ptrCheckGuard(tabPtr, ttabFileSize, regTabDesc);
- Uint32 fragId = hashValue & tabPtr.p->mask;
- ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE);
- if (fragId < tabPtr.p->hashpointer) {
+ if (tabPtr.p->method == TabRecord::LINEAR_HASH)
+ {
jam();
- fragId = hashValue & ((tabPtr.p->mask << 1) + 1);
- }//if
+ fragId = hashValue & tabPtr.p->mask;
+ ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE);
+ if (fragId < tabPtr.p->hashpointer) {
+ jam();
+ fragId = hashValue & ((tabPtr.p->mask << 1) + 1);
+ }//if
+ }
+ else if (tabPtr.p->method == TabRecord::NORMAL_HASH)
+ {
+ jam();
+ fragId= hashValue % tabPtr.p->totalfragments;
+ }
+ else
+ {
+ jam();
+ ndbassert(tabPtr.p->method == TabRecord::USER_DEFINED);
+ fragId= hashValue;
+ if (fragId >= tabPtr.p->totalfragments)
+ {
+ jam();
+ conf->zero= 1; //Indicate error;
+ signal->theData[1]= ZUNDEFINED_FRAGMENT_ERROR;
+ return;
+ }
+ }
getFragstore(tabPtr.p, fragId, fragPtr);
- DiGetNodesConf * const conf = (DiGetNodesConf *)&signal->theData[0];
Uint32 nodeCount = extractNodeInfo(fragPtr.p, conf->nodes);
Uint32 sig2 = (nodeCount - 1) +
(fragPtr.p->distributionKey << 16);
@@ -8410,8 +8443,7 @@ void Dbdih::readPagesIntoTableLab(Signal* signal, Uint32 tableId)
rf.rwfTabPtr.p->hashpointer = readPageWord(&rf);
rf.rwfTabPtr.p->kvalue = readPageWord(&rf);
rf.rwfTabPtr.p->mask = readPageWord(&rf);
- ndbrequire(readPageWord(&rf) == TabRecord::HASH);
- rf.rwfTabPtr.p->method = TabRecord::HASH;
+ rf.rwfTabPtr.p->method = (TabRecord::Method)readPageWord(&rf);
/* ---------------------------------- */
/* Type of table, 2 = temporary table */
/* ---------------------------------- */
@@ -8505,7 +8537,7 @@ void Dbdih::packTableIntoPagesLab(Signal* signal, Uint32 tableId)
writePageWord(&wf, tabPtr.p->hashpointer);
writePageWord(&wf, tabPtr.p->kvalue);
writePageWord(&wf, tabPtr.p->mask);
- writePageWord(&wf, TabRecord::HASH);
+ writePageWord(&wf, tabPtr.p->method);
writePageWord(&wf, tabPtr.p->storedTable);
signal->theData[0] = DihContinueB::ZPACK_FRAG_INTO_PAGES;
@@ -10947,6 +10979,7 @@ void Dbdih::initCommonData()
cnoHotSpare = 0;
cnoOfActiveTables = 0;
cnoOfNodeGroups = 0;
+ c_nextNodeGroup = 0;
cnoReplicas = 0;
coldgcp = 0;
coldGcpId = 0;
diff --git a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
index fa7e8667e27..5328f42ba83 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
@@ -2096,7 +2096,7 @@ private:
void execSTART_EXEC_SR(Signal* signal);
void execEXEC_SRREQ(Signal* signal);
void execEXEC_SRCONF(Signal* signal);
- void execREAD_PSUEDO_REQ(Signal* signal);
+ void execREAD_PSEUDO_REQ(Signal* signal);
void execDUMP_STATE_ORD(Signal* signal);
void execACC_COM_BLOCK(Signal* signal);
diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
index e39d0ca68a6..0ef72bd35ad 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
@@ -337,7 +337,7 @@ Dblqh::Dblqh(const class Configuration & conf):
addRecSignal(GSN_TUX_ADD_ATTRCONF, &Dblqh::execTUX_ADD_ATTRCONF);
addRecSignal(GSN_TUX_ADD_ATTRREF, &Dblqh::execTUX_ADD_ATTRREF);
- addRecSignal(GSN_READ_PSUEDO_REQ, &Dblqh::execREAD_PSUEDO_REQ);
+ addRecSignal(GSN_READ_PSEUDO_REQ, &Dblqh::execREAD_PSEUDO_REQ);
initData();
diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
index 725ea04c148..be3d259986d 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -2613,7 +2613,7 @@ Dblqh::updatePackedList(Signal* signal, HostRecord * ahostptr, Uint16 hostId)
}//Dblqh::updatePackedList()
void
-Dblqh::execREAD_PSUEDO_REQ(Signal* signal){
+Dblqh::execREAD_PSEUDO_REQ(Signal* signal){
jamEntry();
TcConnectionrecPtr regTcPtr;
regTcPtr.i = signal->theData[0];
@@ -2627,7 +2627,7 @@ Dblqh::execREAD_PSUEDO_REQ(Signal* signal){
ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
signal->theData[0] = regFragptr.p->accFragptr[regTcPtr.p->localFragptr];
- EXECUTE_DIRECT(DBACC, GSN_READ_PSUEDO_REQ, signal, 2);
+ EXECUTE_DIRECT(DBACC, GSN_READ_PSEUDO_REQ, signal, 2);
}
else
{
diff --git a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
index 2983b02de67..79b6cec6d44 100644
--- a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
+++ b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
@@ -395,6 +395,13 @@ public:
Uint32 fireingOperation;
/**
+ * The fragment id of the firing operation. This will be appended
+ * to the Primary Key such that the record can be found even in the
+ * case of user defined partitioning.
+ */
+ Uint32 fragId;
+
+ /**
* Used for scrapping in case of node failure
*/
Uint32 nodeId;
diff --git a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
index e4cce29ba30..717aa9688c4 100644
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
@@ -11225,6 +11225,7 @@ void Dbtc::execFIRE_TRIG_ORD(Signal* signal)
c_firedTriggerHash.remove(trigPtr);
+ trigPtr.p->fragId= fireOrd->fragId;
bool ok = trigPtr.p->keyValues.getSize() == fireOrd->m_noPrimKeyWords;
ok &= trigPtr.p->afterValues.getSize() == fireOrd->m_noAfterValueWords;
ok &= trigPtr.p->beforeValues.getSize() == fireOrd->m_noBeforeValueWords;
@@ -12122,7 +12123,11 @@ void Dbtc::executeIndexOperation(Signal* signal,
Uint32 dataPos = 0;
TcKeyReq * const tcIndxReq = &indexOp->tcIndxReq;
TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend();
- Uint32 * dataPtr = &tcKeyReq->scanInfo;
+ /*
+ Data points to distrGroupHashValue since scanInfo is used to send
+ fragment id of receiving fragment
+ */
+ Uint32 * dataPtr = &tcKeyReq->distrGroupHashValue;
Uint32 tcKeyLength = TcKeyReq::StaticLength;
Uint32 tcKeyRequestInfo = tcIndxReq->requestInfo;
TcIndexData* indexData;
@@ -12161,11 +12166,16 @@ void Dbtc::executeIndexOperation(Signal* signal,
regApiPtr->executingIndexOp = indexOp->indexOpId;;
regApiPtr->noIndexOp++; // Increase count
- // Filter out AttributeHeader:s since this should not be in key
+ /*
+ Filter out AttributeHeader:s since this should not be in key.
+ Also filter out fragment id from primary key and handle that
+ separately by setting it as Distribution Key and set indicator.
+ */
+
AttributeHeader* attrHeader = (AttributeHeader *) aiIter.data;
Uint32 headerSize = attrHeader->getHeaderSize();
- Uint32 keySize = attrHeader->getDataSize();
+ Uint32 keySize = attrHeader->getDataSize() - 1;
TcKeyReq::setKeyLength(tcKeyRequestInfo, keySize);
// Skip header
if (headerSize == 1) {
@@ -12175,6 +12185,9 @@ void Dbtc::executeIndexOperation(Signal* signal,
jam();
moreKeyData = indexOp->transIdAI.next(aiIter, headerSize - 1);
}//if
+ tcKeyReq->scanInfo = *aiIter.data; //Fragment Id
+ moreKeyData = indexOp->transIdAI.next(aiIter);
+ TcKeyReq::setDistributionKeyFlag(tcKeyRequestInfo, 1U);
while(// If we have not read complete key
(keySize != 0) &&
(dataPos < keyBufSize)) {
@@ -12584,10 +12597,11 @@ void Dbtc::insertIntoIndexTable(Signal* signal,
moreAttrData = keyValues.next(iter, hops);
}
AttributeHeader pkAttrHeader(attrId, totalPrimaryKeyLength);
+ Uint32 attributesLength = afterValues.getSize() +
+ pkAttrHeader.getHeaderSize() + pkAttrHeader.getDataSize() + 1;
TcKeyReq::setKeyLength(tcKeyRequestInfo, keyLength);
- tcKeyReq->attrLen = afterValues.getSize() +
- pkAttrHeader.getHeaderSize() + pkAttrHeader.getDataSize();
+ tcKeyReq->attrLen = attributesLength;
tcKeyReq->tableId = indexData->indexId;
TcKeyReq::setOperationType(tcKeyRequestInfo, ZINSERT);
TcKeyReq::setExecutingTrigger(tcKeyRequestInfo, true);
@@ -12637,8 +12651,11 @@ void Dbtc::insertIntoIndexTable(Signal* signal,
}
tcKeyLength += dataPos;
- Uint32 attributesLength = afterValues.getSize() +
- pkAttrHeader.getHeaderSize() + pkAttrHeader.getDataSize();
+ /*
+ Size of attrinfo is unique index attributes one by one, header for each
+ of them (all contained in the afterValues data structure), plus a header,
+ the primary key (compacted) and the fragment id before the primary key
+ */
if (attributesLength <= attrBufSize) {
jam();
// ATTRINFO fits in TCKEYREQ
@@ -12655,6 +12672,10 @@ void Dbtc::insertIntoIndexTable(Signal* signal,
// as one attribute
pkAttrHeader.insertHeader(dataPtr);
dataPtr += pkAttrHeader.getHeaderSize();
+ /*
+ Insert fragment id before primary key as part of reference to tuple
+ */
+ *dataPtr++ = firedTriggerData->fragId;
moreAttrData = keyValues.first(iter);
while(moreAttrData) {
jam();
@@ -12819,6 +12840,29 @@ void Dbtc::insertIntoIndexTable(Signal* signal,
pkAttrHeader.insertHeader(dataPtr);
dataPtr += pkAttrHeader.getHeaderSize();
attrInfoPos += pkAttrHeader.getHeaderSize();
+ /*
+ Add fragment id before primary key
+ TODO: This code really needs to be made into a long signal
+ to remove this messy code.
+ */
+ if (attrInfoPos == AttrInfo::DataLength)
+ {
+ jam();
+ // Flush ATTRINFO
+#if INTERNAL_TRIGGER_TCKEYREQ_JBA
+ sendSignal(reference(), GSN_ATTRINFO, signal,
+ AttrInfo::HeaderLength + AttrInfo::DataLength, JBA);
+#else
+ EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal,
+ AttrInfo::HeaderLength + AttrInfo::DataLength);
+ jamEntry();
+#endif
+ dataPtr = (Uint32 *) &attrInfo->attrData;
+ attrInfoPos = 0;
+ }
+ attrInfoPos++;
+ *dataPtr++ = firedTriggerData->fragId;
+
moreAttrData = keyValues.first(iter);
while(moreAttrData) {
jam();
diff --git a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
index f985e44d307..e4dc2fcf2ee 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
@@ -1694,7 +1694,7 @@ private:
//------------------------------------------------------------------
//------------------------------------------------------------------
bool nullFlagCheck(Uint32 attrDes2);
- Uint32 read_psuedo(Uint32 attrId, Uint32* outBuffer);
+ Uint32 read_pseudo(Uint32 attrId, Uint32* outBuffer);
//------------------------------------------------------------------
//------------------------------------------------------------------
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
index 3170d23499a..535ff50bcd5 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
@@ -210,8 +210,8 @@ int Dbtup::readAttributes(Page* const pagePtr,
} else {
return -1;
}//if
- } else if(attributeId & AttributeHeader::PSUEDO){
- Uint32 sz = read_psuedo(attributeId,
+ } else if(attributeId & AttributeHeader::PSEUDO){
+ Uint32 sz = read_pseudo(attributeId,
outBuffer+tmpAttrBufIndex+1);
AttributeHeader::init(&outBuffer[tmpAttrBufIndex], attributeId, sz);
tOutBufIndex = tmpAttrBufIndex + 1 + sz;
@@ -995,7 +995,7 @@ Dbtup::updateDynSmallVarSize(Uint32* inBuffer,
}//Dbtup::updateDynSmallVarSize()
Uint32
-Dbtup::read_psuedo(Uint32 attrId, Uint32* outBuffer){
+Dbtup::read_pseudo(Uint32 attrId, Uint32* outBuffer){
Uint32 tmp[sizeof(SignalHeader)+25];
Signal * signal = (Signal*)&tmp;
switch(attrId){
@@ -1017,7 +1017,7 @@ Dbtup::read_psuedo(Uint32 attrId, Uint32* outBuffer){
signal->theData[0] = operPtr.p->userpointer;
signal->theData[1] = attrId;
- EXECUTE_DIRECT(DBLQH, GSN_READ_PSUEDO_REQ, signal, 2);
+ EXECUTE_DIRECT(DBLQH, GSN_READ_PSEUDO_REQ, signal, 2);
outBuffer[0] = signal->theData[0];
outBuffer[1] = signal->theData[1];
return 2;
@@ -1025,7 +1025,7 @@ Dbtup::read_psuedo(Uint32 attrId, Uint32* outBuffer){
signal->theData[0] = operPtr.p->userpointer;
signal->theData[1] = attrId;
- EXECUTE_DIRECT(DBLQH, GSN_READ_PSUEDO_REQ, signal, 2);
+ EXECUTE_DIRECT(DBLQH, GSN_READ_PSEUDO_REQ, signal, 2);
outBuffer[0] = signal->theData[0];
return 1;
default:
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
index 2b65a8402c2..ce3889f0682 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
@@ -887,6 +887,7 @@ void Dbtup::sendFireTrigOrd(Signal* signal,
fireTrigOrd->setConnectionPtr(regOperPtr->tcOpIndex);
fireTrigOrd->setTriggerId(trigPtr->triggerId);
+ fireTrigOrd->fragId= regOperPtr->fragId >> 1; //Handle two local frags
switch(regOperPtr->optype) {
case(ZINSERT):
diff --git a/storage/ndb/src/ndbapi/NdbBlob.cpp b/storage/ndb/src/ndbapi/NdbBlob.cpp
index d06d6b4ef4d..77ab87ce5c1 100644
--- a/storage/ndb/src/ndbapi/NdbBlob.cpp
+++ b/storage/ndb/src/ndbapi/NdbBlob.cpp
@@ -67,11 +67,40 @@ NdbBlob::getBlobTableName(char* btname, const NdbTableImpl* t, const NdbColumnIm
void
NdbBlob::getBlobTable(NdbTableImpl& bt, const NdbTableImpl* t, const NdbColumnImpl* c)
{
+ DBUG_ENTER("NdbBlob::getBlobTable");
char btname[NdbBlobImpl::BlobTableNameSize];
getBlobTableName(btname, t, c);
bt.setName(btname);
bt.setLogging(t->getLogging());
- bt.setFragmentType(t->getFragmentType());
+ /*
+ BLOB tables use the same fragmentation as the original table
+ but may change the fragment type if it is UserDefined since it
+ must be hash based so that the kernel can handle it on its own.
+ */
+ bt.m_primaryTableId = t->m_tableId;
+ bt.m_ng.clear();
+ switch (t->getFragmentType())
+ {
+ case NdbDictionary::Object::FragAllSmall:
+ case NdbDictionary::Object::FragAllMedium:
+ case NdbDictionary::Object::FragAllLarge:
+ case NdbDictionary::Object::FragSingle:
+ bt.setFragmentType(t->getFragmentType());
+ break;
+ case NdbDictionary::Object::DistrKeyLin:
+ case NdbDictionary::Object::DistrKeyHash:
+ bt.setFragmentType(t->getFragmentType());
+ break;
+ case NdbDictionary::Object::UserDefined:
+ bt.setFragmentType(NdbDictionary::Object::DistrKeyHash);
+ break;
+ default:
+ DBUG_ASSERT(0);
+ break;
+ }
+ DBUG_PRINT("info",
+ ("Create BLOB table with primary table = %u and Fragment Type = %u",
+ bt.m_primaryTableId, (uint)bt.getFragmentType()));
{ NdbDictionary::Column bc("PK");
bc.setType(NdbDictionary::Column::Unsigned);
assert(t->m_keyLenInWords != 0);
@@ -107,6 +136,7 @@ NdbBlob::getBlobTable(NdbTableImpl& bt, const NdbTableImpl* t, const NdbColumnIm
bc.setLength(c->getPartSize());
bt.addColumn(bc);
}
+ DBUG_VOID_RETURN;
}
// initialization
@@ -371,8 +401,8 @@ NdbBlob::setPartKeyValue(NdbOperation* anOp, Uint32 part)
DBUG_ENTER("NdbBlob::setPartKeyValue");
DBUG_PRINT("info", ("dist=%u part=%u key=", getDistKey(part), part));
DBUG_DUMP("info", theKeyBuf.data, 4 * theTable->m_keyLenInWords);
- Uint32* data = (Uint32*)theKeyBuf.data;
- unsigned size = theTable->m_keyLenInWords;
+ //Uint32* data = (Uint32*)theKeyBuf.data;
+ //unsigned size = theTable->m_keyLenInWords;
// TODO use attr ids after compatibility with 4.1.7 not needed
if (anOp->equal("PK", theKeyBuf.data) == -1 ||
anOp->equal("DIST", getDistKey(part)) == -1 ||
diff --git a/storage/ndb/src/ndbapi/NdbDictionary.cpp b/storage/ndb/src/ndbapi/NdbDictionary.cpp
index 79b6fb4c0e8..0d464c6d412 100644
--- a/storage/ndb/src/ndbapi/NdbDictionary.cpp
+++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp
@@ -412,6 +412,22 @@ NdbDictionary::Table::setFrm(const void* data, Uint32 len){
m_impl.m_frm.assign(data, len);
}
+const void*
+NdbDictionary::Table::getNodeGroupIds() const {
+ return m_impl.m_ng.get_data();
+}
+
+Uint32
+NdbDictionary::Table::getNodeGroupIdsLength() const {
+ return m_impl.m_ng.length();
+}
+
+void
+NdbDictionary::Table::setNodeGroupIds(const void* data, Uint32 noWords)
+{
+ m_impl.m_ng.assign(data, 2*noWords);
+}
+
NdbDictionary::Object::Status
NdbDictionary::Table::getObjectStatus() const {
return m_impl.m_status;
@@ -732,8 +748,10 @@ NdbDictionary::Dictionary::~Dictionary(){
}
int
-NdbDictionary::Dictionary::createTable(const Table & t){
- return m_impl.createTable(NdbTableImpl::getImpl(t));
+NdbDictionary::Dictionary::createTable(const Table & t)
+{
+ DBUG_ENTER("NdbDictionary::Dictionary::createTable");
+ DBUG_RETURN(m_impl.createTable(NdbTableImpl::getImpl(t)));
}
int
diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
index 34d1614d043..04b41b8dfbc 100644
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
@@ -233,7 +233,7 @@ NdbColumnImpl::equal(const NdbColumnImpl& col) const
}
NdbDictionary::Column *
-NdbColumnImpl::create_psuedo(const char * name){
+NdbColumnImpl::create_pseudo(const char * name){
NdbDictionary::Column * col = new NdbDictionary::Column();
col->setName(name);
if(!strcmp(name, "NDB$FRAGMENT")){
@@ -302,8 +302,9 @@ void
NdbTableImpl::init(){
m_changeMask= 0;
m_tableId= RNIL;
+ m_primaryTableId= RNIL;
m_frm.clear();
- m_fragmentType= NdbDictionary::Object::FragAllSmall;
+ m_fragmentType= NdbDictionary::Object::DistrKeyHash;
m_hashValueMask= 0;
m_hashpointerValue= 0;
m_logging= true;
@@ -390,6 +391,7 @@ NdbTableImpl::assign(const NdbTableImpl& org)
m_externalName.assign(org.m_externalName);
m_newExternalName.assign(org.m_newExternalName);
m_frm.assign(org.m_frm.get_data(), org.m_frm.length());
+ m_ng.assign(org.m_ng.get_data(), org.m_ng.length());
m_fragmentType = org.m_fragmentType;
m_fragmentCount = org.m_fragmentCount;
@@ -788,17 +790,17 @@ NdbDictionaryImpl::setTransporter(class Ndb* ndb,
m_globalHash->lock();
if(f_dictionary_count++ == 0){
NdbDictionary::Column::FRAGMENT=
- NdbColumnImpl::create_psuedo("NDB$FRAGMENT");
+ NdbColumnImpl::create_pseudo("NDB$FRAGMENT");
NdbDictionary::Column::FRAGMENT_MEMORY=
- NdbColumnImpl::create_psuedo("NDB$FRAGMENT_MEMORY");
+ NdbColumnImpl::create_pseudo("NDB$FRAGMENT_MEMORY");
NdbDictionary::Column::ROW_COUNT=
- NdbColumnImpl::create_psuedo("NDB$ROW_COUNT");
+ NdbColumnImpl::create_pseudo("NDB$ROW_COUNT");
NdbDictionary::Column::COMMIT_COUNT=
- NdbColumnImpl::create_psuedo("NDB$COMMIT_COUNT");
+ NdbColumnImpl::create_pseudo("NDB$COMMIT_COUNT");
NdbDictionary::Column::ROW_SIZE=
- NdbColumnImpl::create_psuedo("NDB$ROW_SIZE");
+ NdbColumnImpl::create_pseudo("NDB$ROW_SIZE");
NdbDictionary::Column::RANGE_NO=
- NdbColumnImpl::create_psuedo("NDB$RANGE_NO");
+ NdbColumnImpl::create_pseudo("NDB$RANGE_NO");
}
m_globalHash->unlock();
return true;
@@ -1220,6 +1222,9 @@ fragmentTypeMapping[] = {
{ DictTabInfo::AllNodesMediumTable, NdbDictionary::Object::FragAllMedium },
{ DictTabInfo::AllNodesLargeTable, NdbDictionary::Object::FragAllLarge },
{ DictTabInfo::SingleFragment, NdbDictionary::Object::FragSingle },
+ { DictTabInfo::DistrKeyHash, NdbDictionary::Object::DistrKeyHash },
+ { DictTabInfo::DistrKeyLin, NdbDictionary::Object::DistrKeyLin },
+ { DictTabInfo::UserDefined, NdbDictionary::Object::UserDefined },
{ -1, -1 }
};
@@ -1293,6 +1298,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
impl->m_externalName.assign(externalName);
impl->m_frm.assign(tableDesc.FrmData, tableDesc.FrmLen);
+ impl->m_ng.assign(tableDesc.FragmentData, tableDesc.FragmentDataLen);
impl->m_fragmentType = (NdbDictionary::Object::FragmentType)
getApiConstant(tableDesc.FragmentType,
@@ -1406,12 +1412,12 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
if(tableDesc.FragmentDataLen > 0)
{
- Uint32 replicaCount = tableDesc.FragmentData[0];
- Uint32 fragCount = tableDesc.FragmentData[1];
+ Uint16 replicaCount = tableDesc.FragmentData[0];
+ Uint16 fragCount = tableDesc.FragmentData[1];
impl->m_replicaCount = replicaCount;
impl->m_fragmentCount = fragCount;
-
+ DBUG_PRINT("info", ("replicaCount=%x , fragCount=%x",replicaCount,fragCount));
for(i = 0; i<(fragCount*replicaCount); i++)
{
impl->m_fragments.push_back(tableDesc.FragmentData[i+2]);
@@ -1452,29 +1458,35 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
int
NdbDictionaryImpl::createTable(NdbTableImpl &t)
{
+ DBUG_ENTER("NdbDictionaryImpl::createTable");
if (m_receiver.createTable(m_ndb, t) != 0)
- return -1;
+ {
+ DBUG_RETURN(-1);
+ }
if (t.m_noOfBlobs == 0)
- return 0;
+ {
+ DBUG_RETURN(0);
+ }
// update table def from DICT
Ndb_local_table_info *info=
get_local_table_info(t.m_internalName,false);
if (info == NULL) {
m_error.code= 709;
- return -1;
+ DBUG_RETURN(-1);
}
if (createBlobTables(*(info->m_table_impl)) != 0) {
int save_code = m_error.code;
(void)dropTable(t);
m_error.code= save_code;
- return -1;
+ DBUG_RETURN(-1);
}
- return 0;
+ DBUG_RETURN(0);
}
int
NdbDictionaryImpl::createBlobTables(NdbTableImpl &t)
{
+ DBUG_ENTER("NdbDictionaryImpl::createBlobTables");
for (unsigned i = 0; i < t.m_columns.size(); i++) {
NdbColumnImpl & c = *t.m_columns[i];
if (! c.getBlobType() || c.getPartSize() == 0)
@@ -1482,23 +1494,26 @@ NdbDictionaryImpl::createBlobTables(NdbTableImpl &t)
NdbTableImpl bt;
NdbBlob::getBlobTable(bt, &t, &c);
if (createTable(bt) != 0)
- return -1;
+ {
+ DBUG_RETURN(-1);
+ }
// Save BLOB table handle
Ndb_local_table_info *info=
get_local_table_info(bt.m_internalName, false);
- if (info == 0) {
- return -1;
+ if (info == 0)
+ {
+ DBUG_RETURN(-1);
}
c.m_blobTable = info->m_table_impl;
}
-
- return 0;
+ DBUG_RETURN(0);
}
int
NdbDictionaryImpl::addBlobTables(NdbTableImpl &t)
{
unsigned n= t.m_noOfBlobs;
+ DBUG_ENTER("NdbDictioanryImpl::addBlobTables");
// optimized for blob column being the last one
// and not looking for more than one if not neccessary
for (unsigned i = t.m_columns.size(); i > 0 && n > 0;) {
@@ -1512,19 +1527,19 @@ NdbDictionaryImpl::addBlobTables(NdbTableImpl &t)
// Save BLOB table handle
NdbTableImpl * cachedBlobTable = getTable(btname);
if (cachedBlobTable == 0) {
- return -1;
+ DBUG_RETURN(-1);
}
c.m_blobTable = cachedBlobTable;
}
-
- return 0;
+ DBUG_RETURN(0);
}
int
NdbDictInterface::createTable(Ndb & ndb,
NdbTableImpl & impl)
{
- return createOrAlterTable(ndb, impl, false);
+ DBUG_ENTER("NdbDictInterface::createTable");
+ DBUG_RETURN(createOrAlterTable(ndb, impl, false));
}
int NdbDictionaryImpl::alterTable(NdbTableImpl &impl)
@@ -1560,7 +1575,8 @@ int
NdbDictInterface::alterTable(Ndb & ndb,
NdbTableImpl & impl)
{
- return createOrAlterTable(ndb, impl, true);
+ DBUG_ENTER("NdbDictInterface::alterTable");
+ DBUG_RETURN(createOrAlterTable(ndb, impl, true));
}
int
@@ -1592,7 +1608,8 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
ndb.internalize_table_name(impl.m_externalName.c_str()));
impl.m_internalName.assign(internalName);
UtilBufferWriter w(m_buffer);
- DictTabInfo::Table tmpTab; tmpTab.init();
+ DictTabInfo::Table tmpTab;
+ tmpTab.init();
BaseString::snprintf(tmpTab.TableName,
sizeof(tmpTab.TableName),
internalName.c_str());
@@ -1615,6 +1632,10 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
if (col->m_distributionKey)
distKeys++;
}
+ if (distKeys == impl.m_noOfKeys)
+ distKeys= 0;
+ impl.m_noOfDistributionKeys= distKeys;
+
// Check max length of frm data
if (impl.m_frm.length() > MAX_FRM_DATA_SIZE){
@@ -1623,12 +1644,15 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
}
tmpTab.FrmLen = impl.m_frm.length();
memcpy(tmpTab.FrmData, impl.m_frm.get_data(), impl.m_frm.length());
+ tmpTab.FragmentDataLen = impl.m_ng.length();
+ memcpy(tmpTab.FragmentData, impl.m_ng.get_data(), impl.m_ng.length());
tmpTab.TableLoggedFlag = impl.m_logging;
tmpTab.TableKValue = impl.m_kvalue;
tmpTab.MinLoadFactor = impl.m_minLoadFactor;
tmpTab.MaxLoadFactor = impl.m_maxLoadFactor;
tmpTab.TableType = DictTabInfo::UserTable;
+ tmpTab.PrimaryTableId = impl.m_primaryTableId;
tmpTab.NoOfAttributes = sz;
tmpTab.FragmentType = getKernelConstant(impl.m_fragmentType,
@@ -1646,6 +1670,8 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
abort();
}
+ DBUG_PRINT("info",("impl.m_noOfDistributionKeys: %d impl.m_noOfKeys: %d distKeys: %d",
+ impl.m_noOfDistributionKeys, impl.m_noOfKeys, distKeys));
if (distKeys == impl.m_noOfKeys)
distKeys= 0;
impl.m_noOfDistributionKeys= distKeys;
@@ -1655,6 +1681,8 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
if(col == 0)
continue;
+ DBUG_PRINT("info",("column: %s(%d) col->m_distributionKey: %d",
+ col->m_name.c_str(), i, col->m_distributionKey));
DictTabInfo::Attribute tmpAttr; tmpAttr.init();
BaseString::snprintf(tmpAttr.AttributeName, sizeof(tmpAttr.AttributeName),
col->m_name.c_str());
@@ -1685,8 +1713,14 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
}
// distribution key not supported for Char attribute
if (distKeys && col->m_distributionKey && col->m_cs != NULL) {
- m_error.code= 745;
- DBUG_RETURN(-1);
+ // we can allow this for non-var char where strxfrm does nothing
+ if (col->m_type == NdbDictionary::Column::Char &&
+ (col->m_cs->state & MY_CS_BINSORT))
+ ;
+ else {
+ m_error.code= 745;
+ DBUG_RETURN(-1);
+ }
}
// charset in upper half of precision
if (col->getCharType()) {
diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
index 754d0000718..b4614ec3512 100644
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
@@ -90,7 +90,7 @@ public:
static const NdbColumnImpl & getImpl(const NdbDictionary::Column & t);
NdbDictionary::Column * m_facade;
- static NdbDictionary::Column * create_psuedo(const char *);
+ static NdbDictionary::Column * create_pseudo(const char *);
};
class NdbTableImpl : public NdbDictionary::Table, public NdbDictObjectImpl {
@@ -105,10 +105,12 @@ public:
Uint32 m_changeMask;
Uint32 m_tableId;
+ Uint32 m_primaryTableId;
BaseString m_internalName;
BaseString m_externalName;
BaseString m_newExternalName; // Used for alter table
UtilBuffer m_frm;
+ UtilBuffer m_ng;
NdbDictionary::Object::FragmentType m_fragmentType;
/**
diff --git a/storage/ndb/test/run-test/README b/storage/ndb/test/run-test/README
index d5da8f05c17..57f085711ce 100644
--- a/storage/ndb/test/run-test/README
+++ b/storage/ndb/test/run-test/README
@@ -9,7 +9,7 @@ atrt supports fully distributed test and utilizes ndb_cpcd.
atrt has the following main loop:
/**
- * Psuedo code for atrt
+ * Pseudo code for atrt
*/
read config file (default d.txt)
contact each ndb_cpcd
@@ -36,7 +36,7 @@ atrt has the following main loop:
done
/**
- * End of psuedo code
+ * End of pseudo code
*/
=================================
diff --git a/storage/ndb/tools/restore/Restore.cpp b/storage/ndb/tools/restore/Restore.cpp
index b53255820db..18edb489d77 100644
--- a/storage/ndb/tools/restore/Restore.cpp
+++ b/storage/ndb/tools/restore/Restore.cpp
@@ -226,7 +226,8 @@ RestoreMetaData::parseTableDescriptor(const Uint32 * data, Uint32 len)
return false;
debug << "parseTableInfo " << tableImpl->getName() << " done" << endl;
-
+ tableImpl->m_ng.clear();
+ tableImpl->m_fragmentType = NdbDictionary::Object::FragAllSmall;
TableS * table = new TableS(m_fileHeader.NdbVersion, tableImpl);
if(table == NULL) {
return false;