summaryrefslogtreecommitdiff
path: root/ndb
diff options
context:
space:
mode:
Diffstat (limited to 'ndb')
-rw-r--r--ndb/include/debugger/EventLogger.hpp2
-rw-r--r--ndb/include/kernel/AttributeHeader.hpp10
-rw-r--r--ndb/include/kernel/GlobalSignalNumbers.h10
-rw-r--r--ndb/include/kernel/signaldata/ArbitSignalData.hpp2
-rw-r--r--ndb/include/kernel/signaldata/DictTabInfo.hpp15
-rw-r--r--ndb/include/kernel/signaldata/ScanTab.hpp45
-rw-r--r--ndb/include/kernel/signaldata/TcKeyRef.hpp3
-rw-r--r--ndb/include/kernel/signaldata/TcRollbackRep.hpp3
-rw-r--r--ndb/include/mgmapi/mgmapi.h2
-rw-r--r--ndb/include/mgmapi/ndbd_exit_codes.h2
-rw-r--r--ndb/include/ndb_constants.h7
-rw-r--r--ndb/include/ndbapi/Ndb.hpp21
-rw-r--r--ndb/include/ndbapi/NdbDictionary.hpp47
-rw-r--r--ndb/include/ndbapi/NdbIndexScanOperation.hpp6
-rw-r--r--ndb/include/ndbapi/NdbOperation.hpp2
-rw-r--r--ndb/include/ndbapi/NdbRecAttr.hpp61
-rw-r--r--ndb/include/ndbapi/NdbReceiver.hpp6
-rw-r--r--ndb/include/ndbapi/NdbScanFilter.hpp27
-rw-r--r--ndb/include/ndbapi/NdbScanOperation.hpp9
-rw-r--r--ndb/include/ndbapi/NdbTransaction.hpp2
-rw-r--r--ndb/include/ndbapi/ndbapi_limits.h2
-rw-r--r--ndb/include/transporter/TransporterDefinitions.hpp5
-rw-r--r--ndb/include/util/BaseString.hpp7
-rw-r--r--ndb/include/util/InputStream.hpp1
-rw-r--r--ndb/include/util/OutputStream.hpp11
-rw-r--r--ndb/include/util/SimpleProperties.hpp1
-rw-r--r--ndb/include/util/SocketAuthenticator.hpp1
-rw-r--r--ndb/include/util/SocketServer.hpp1
-rw-r--r--ndb/include/util/Vector.hpp68
-rw-r--r--ndb/include/util/ndb_rand.h33
-rw-r--r--ndb/src/Makefile.am2
-rw-r--r--ndb/src/common/debugger/EventLogger.cpp104
-rw-r--r--ndb/src/common/debugger/signaldata/DictTabInfo.cpp3
-rw-r--r--ndb/src/common/debugger/signaldata/SignalNames.cpp7
-rw-r--r--ndb/src/common/mgmcommon/ConfigRetriever.cpp7
-rw-r--r--ndb/src/common/portlib/NdbTick.c4
-rw-r--r--ndb/src/common/transporter/SCI_Transporter.cpp216
-rw-r--r--ndb/src/common/transporter/SCI_Transporter.hpp18
-rw-r--r--ndb/src/common/transporter/SHM_Transporter.cpp4
-rw-r--r--ndb/src/common/transporter/TCP_Transporter.cpp11
-rw-r--r--ndb/src/common/transporter/TCP_Transporter.hpp1
-rw-r--r--ndb/src/common/transporter/Transporter.cpp4
-rw-r--r--ndb/src/common/transporter/TransporterRegistry.cpp36
-rw-r--r--ndb/src/common/util/BaseString.cpp145
-rw-r--r--ndb/src/common/util/ConfigValues.cpp10
-rw-r--r--ndb/src/common/util/File.cpp8
-rw-r--r--ndb/src/common/util/Makefile.am3
-rw-r--r--ndb/src/common/util/NdbSqlUtil.cpp2
-rw-r--r--ndb/src/common/util/OutputStream.cpp8
-rw-r--r--ndb/src/common/util/Properties.cpp4
-rw-r--r--ndb/src/common/util/SocketClient.cpp2
-rw-r--r--ndb/src/common/util/ndb_rand.c40
-rw-r--r--ndb/src/common/util/random.c8
-rw-r--r--ndb/src/common/util/socket_io.cpp4
-rw-r--r--ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj38
-rw-r--r--ndb/src/cw/cpcd/APIService.cpp1
-rw-r--r--ndb/src/cw/cpcd/main.cpp2
-rw-r--r--ndb/src/kernel/blocks/ERROR_codes.txt27
-rw-r--r--ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp188
-rw-r--r--ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp6
-rw-r--r--ndb/src/kernel/blocks/dbacc/Dbacc.hpp1
-rw-r--r--ndb/src/kernel/blocks/dbacc/DbaccInit.cpp1
-rw-r--r--ndb/src/kernel/blocks/dbacc/DbaccMain.cpp27
-rw-r--r--ndb/src/kernel/blocks/dbdict/Dbdict.cpp57
-rw-r--r--ndb/src/kernel/blocks/dbdict/Dbdict.hpp11
-rw-r--r--ndb/src/kernel/blocks/dbdih/Dbdih.hpp17
-rw-r--r--ndb/src/kernel/blocks/dbdih/DbdihInit.cpp2
-rw-r--r--ndb/src/kernel/blocks/dbdih/DbdihMain.cpp172
-rw-r--r--ndb/src/kernel/blocks/dblqh/Dblqh.hpp17
-rw-r--r--ndb/src/kernel/blocks/dblqh/DblqhInit.cpp10
-rw-r--r--ndb/src/kernel/blocks/dblqh/DblqhMain.cpp468
-rw-r--r--ndb/src/kernel/blocks/dbtc/Dbtc.hpp51
-rw-r--r--ndb/src/kernel/blocks/dbtc/DbtcInit.cpp1
-rw-r--r--ndb/src/kernel/blocks/dbtc/DbtcMain.cpp454
-rw-r--r--ndb/src/kernel/blocks/dbtup/Dbtup.hpp2
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp6
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp34
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupGen.cpp30
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp2
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp7
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp11
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupScan.cpp1
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp2
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp2
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp2
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp1
-rw-r--r--ndb/src/kernel/blocks/dbutil/DbUtil.cpp5
-rw-r--r--ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp2
-rw-r--r--ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp1
-rw-r--r--ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp25
-rw-r--r--ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp2
-rw-r--r--ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp2
-rw-r--r--ndb/src/kernel/blocks/qmgr/Qmgr.hpp3
-rw-r--r--ndb/src/kernel/blocks/qmgr/QmgrInit.cpp1
-rw-r--r--ndb/src/kernel/blocks/qmgr/QmgrMain.cpp32
-rw-r--r--ndb/src/kernel/blocks/suma/Suma.cpp6
-rw-r--r--ndb/src/kernel/blocks/suma/Suma.hpp1
-rw-r--r--ndb/src/kernel/error/TimeModule.cpp2
-rw-r--r--ndb/src/kernel/error/ndbd_exit_codes.c3
-rw-r--r--ndb/src/kernel/vm/MetaData.hpp1
-rw-r--r--ndb/src/kernel/vm/SimulatedBlock.cpp2
-rw-r--r--ndb/src/kernel/vm/ndbd_malloc.cpp2
-rw-r--r--ndb/src/libndb.ver.in2
-rw-r--r--ndb/src/mgmapi/LocalConfig.cpp18
-rw-r--r--ndb/src/mgmapi/mgmapi.cpp50
-rw-r--r--ndb/src/mgmclient/CommandInterpreter.cpp298
-rw-r--r--ndb/src/mgmclient/Makefile.am4
-rw-r--r--ndb/src/mgmclient/main.cpp2
-rw-r--r--ndb/src/mgmsrv/ConfigInfo.cpp14
-rw-r--r--ndb/src/mgmsrv/ConfigInfo.hpp7
-rw-r--r--ndb/src/mgmsrv/InitConfigFileParser.cpp40
-rw-r--r--ndb/src/mgmsrv/Makefile.am2
-rw-r--r--ndb/src/mgmsrv/MgmtSrvr.cpp26
-rw-r--r--ndb/src/mgmsrv/MgmtSrvr.hpp2
-rw-r--r--ndb/src/mgmsrv/Services.cpp103
-rw-r--r--ndb/src/mgmsrv/Services.hpp9
-rw-r--r--ndb/src/mgmsrv/main.cpp1
-rw-r--r--ndb/src/ndbapi/ClusterMgr.cpp15
-rw-r--r--ndb/src/ndbapi/ClusterMgr.hpp3
-rw-r--r--ndb/src/ndbapi/DictCache.cpp13
-rw-r--r--ndb/src/ndbapi/DictCache.hpp2
-rw-r--r--ndb/src/ndbapi/Makefile.am3
-rw-r--r--ndb/src/ndbapi/Ndb.cpp165
-rw-r--r--ndb/src/ndbapi/NdbBlob.cpp23
-rw-r--r--ndb/src/ndbapi/NdbDictionary.cpp99
-rw-r--r--ndb/src/ndbapi/NdbDictionaryImpl.cpp297
-rw-r--r--ndb/src/ndbapi/NdbDictionaryImpl.hpp14
-rw-r--r--ndb/src/ndbapi/NdbImpl.hpp43
-rw-r--r--ndb/src/ndbapi/NdbIndexOperation.cpp2
-rw-r--r--ndb/src/ndbapi/NdbOperation.cpp6
-rw-r--r--ndb/src/ndbapi/NdbOperationDefine.cpp80
-rw-r--r--ndb/src/ndbapi/NdbOperationExec.cpp9
-rw-r--r--ndb/src/ndbapi/NdbOperationSearch.cpp35
-rw-r--r--ndb/src/ndbapi/NdbRecAttr.cpp164
-rw-r--r--ndb/src/ndbapi/NdbReceiver.cpp31
-rw-r--r--ndb/src/ndbapi/NdbScanFilter.cpp307
-rw-r--r--ndb/src/ndbapi/NdbScanOperation.cpp97
-rw-r--r--ndb/src/ndbapi/NdbTransaction.cpp64
-rw-r--r--ndb/src/ndbapi/Ndbif.cpp3
-rw-r--r--ndb/src/ndbapi/Ndblist.cpp10
-rw-r--r--ndb/src/ndbapi/ObjectMap.cpp62
-rw-r--r--ndb/src/ndbapi/ObjectMap.hpp56
-rw-r--r--ndb/src/ndbapi/SignalSender.cpp20
-rw-r--r--ndb/src/ndbapi/TransporterFacade.hpp16
-rw-r--r--ndb/src/ndbapi/ndb_cluster_connection.cpp16
-rw-r--r--ndb/src/ndbapi/ndb_cluster_connection_impl.hpp2
-rw-r--r--ndb/src/ndbapi/ndberror.c7
-rw-r--r--ndb/test/include/NDBT_Test.hpp6
-rw-r--r--ndb/test/include/NdbRestarter.hpp2
-rw-r--r--ndb/test/ndbapi/Makefile.am2
-rw-r--r--ndb/test/ndbapi/benchronja.cpp19
-rw-r--r--ndb/test/ndbapi/flexAsynch.cpp19
-rw-r--r--ndb/test/ndbapi/flexHammer.cpp15
-rw-r--r--ndb/test/ndbapi/flexScan.cpp11
-rw-r--r--ndb/test/ndbapi/flexTT.cpp19
-rw-r--r--ndb/test/ndbapi/flexTimedAsynch.cpp23
-rw-r--r--ndb/test/ndbapi/initronja.cpp9
-rw-r--r--ndb/test/ndbapi/testBitfield.cpp14
-rw-r--r--ndb/test/ndbapi/testBlobs.cpp283
-rw-r--r--ndb/test/ndbapi/testIndex.cpp117
-rw-r--r--ndb/test/ndbapi/testNdbApi.cpp4
-rw-r--r--ndb/test/ndbapi/testNodeRestart.cpp297
-rw-r--r--ndb/test/ndbapi/testOperations.cpp5
-rw-r--r--ndb/test/ndbapi/testScanFilter.cpp860
-rw-r--r--ndb/test/odbc/SQL99_test/SQL99_test.cpp13
-rw-r--r--ndb/test/run-test/daily-basic-tests.txt44
-rw-r--r--ndb/test/src/NDBT_Table.cpp2
-rw-r--r--ndb/test/src/NDBT_Test.cpp57
-rw-r--r--ndb/test/src/NdbRestarter.cpp62
-rw-r--r--ndb/test/src/UtilTransactions.cpp1
-rw-r--r--ndb/tools/delete_all.cpp3
-rw-r--r--ndb/tools/desc.cpp3
-rw-r--r--ndb/tools/drop_index.cpp3
-rw-r--r--ndb/tools/drop_tab.cpp3
-rw-r--r--ndb/tools/listTables.cpp3
-rw-r--r--ndb/tools/ndb_config.cpp13
-rw-r--r--ndb/tools/restore/Restore.cpp99
-rw-r--r--ndb/tools/restore/Restore.hpp22
-rw-r--r--ndb/tools/restore/consumer.hpp1
-rw-r--r--ndb/tools/restore/consumer_printer.cpp22
-rw-r--r--ndb/tools/restore/consumer_restore.cpp60
-rw-r--r--ndb/tools/restore/restore_main.cpp405
-rw-r--r--ndb/tools/select_all.cpp5
-rw-r--r--ndb/tools/select_count.cpp3
184 files changed, 5716 insertions, 1859 deletions
diff --git a/ndb/include/debugger/EventLogger.hpp b/ndb/include/debugger/EventLogger.hpp
index 11df3f513fc..f6762743df0 100644
--- a/ndb/include/debugger/EventLogger.hpp
+++ b/ndb/include/debugger/EventLogger.hpp
@@ -175,5 +175,5 @@ private:
char m_text[MAX_TEXT_LENGTH];
};
-
+extern void getRestartAction(Uint32 action, BaseString &str);
#endif
diff --git a/ndb/include/kernel/AttributeHeader.hpp b/ndb/include/kernel/AttributeHeader.hpp
index 448952a0780..1d07d8fef2d 100644
--- a/ndb/include/kernel/AttributeHeader.hpp
+++ b/ndb/include/kernel/AttributeHeader.hpp
@@ -41,8 +41,7 @@ public:
STATIC_CONST( FRAGMENT_MEMORY= 0xFFF9 );
/** Initialize AttributeHeader at location aHeaderPtr */
- static AttributeHeader& init(void* aHeaderPtr, Uint32 anAttributeId,
- Uint32 aDataSize);
+ static void init(Uint32* aHeaderPtr, Uint32 anAttributeId, Uint32 aDataSize);
/** Returns size of AttributeHeader (usually one or two words) */
Uint32 getHeaderSize() const; // In 32-bit words
@@ -100,10 +99,11 @@ public:
*/
inline
-AttributeHeader& AttributeHeader::init(void* aHeaderPtr, Uint32 anAttributeId,
- Uint32 aDataSize)
+void AttributeHeader::init(Uint32* aHeaderPtr, Uint32 anAttributeId,
+ Uint32 aDataSize)
{
- return * new (aHeaderPtr) AttributeHeader(anAttributeId, aDataSize);
+ AttributeHeader ah(anAttributeId, aDataSize);
+ *aHeaderPtr = ah.m_value;
}
inline
diff --git a/ndb/include/kernel/GlobalSignalNumbers.h b/ndb/include/kernel/GlobalSignalNumbers.h
index 08d35a0b0cb..1ffc198de41 100644
--- a/ndb/include/kernel/GlobalSignalNumbers.h
+++ b/ndb/include/kernel/GlobalSignalNumbers.h
@@ -540,13 +540,13 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
#define GSN_ABORT_ALL_REF 446
#define GSN_ABORT_ALL_CONF 447
-#define GSN_STATISTICS_REQ 448
+/* 448 unused - formerly GSN_STATISTICS_REQ */
#define GSN_STOP_ORD 449
#define GSN_TAMPER_ORD 450
-#define GSN_SET_VAR_REQ 451
-#define GSN_SET_VAR_CONF 452
-#define GSN_SET_VAR_REF 453
-#define GSN_STATISTICS_CONF 454
+/* 451 unused - formerly GSN_SET_VAR_REQ */
+/* 452 unused - formerly GSN_SET_VAR_CONF */
+/* 453 unused - formerly GSN_SET_VAR_REF */
+/* 454 unused - formerly GSN_STATISTICS_CONF */
#define GSN_START_ORD 455
/* 457 unused */
diff --git a/ndb/include/kernel/signaldata/ArbitSignalData.hpp b/ndb/include/kernel/signaldata/ArbitSignalData.hpp
index 0cb29ebe4ae..ed7e3929414 100644
--- a/ndb/include/kernel/signaldata/ArbitSignalData.hpp
+++ b/ndb/include/kernel/signaldata/ArbitSignalData.hpp
@@ -31,6 +31,7 @@ private:
Uint32 data[2];
public:
+ ArbitTicket() {}
STATIC_CONST( DataLength = 2 );
STATIC_CONST( TextLength = DataLength * 8 ); // hex digits
@@ -142,6 +143,7 @@ public:
ArbitTicket ticket; // ticket
NodeBitmask mask; // set of nodes
+ ArbitSignalData() {}
STATIC_CONST( SignalLength = 3 + ArbitTicket::DataLength + NodeBitmask::Size );
inline bool match(ArbitSignalData& aData) const {
diff --git a/ndb/include/kernel/signaldata/DictTabInfo.hpp b/ndb/include/kernel/signaldata/DictTabInfo.hpp
index 81bc95e5128..985cb7ef643 100644
--- a/ndb/include/kernel/signaldata/DictTabInfo.hpp
+++ b/ndb/include/kernel/signaldata/DictTabInfo.hpp
@@ -46,17 +46,17 @@ inline int my_decimal_get_binary_size(uint precision, uint scale)
#endif
#define DTIMAP(x, y, z) \
- { DictTabInfo::y, offsetof(x, z), SimpleProperties::Uint32Value, 0, (~0), 0 }
+ { DictTabInfo::y, my_offsetof(x, z), SimpleProperties::Uint32Value, 0, (~0), 0 }
#define DTIMAP2(x, y, z, u, v) \
- { DictTabInfo::y, offsetof(x, z), SimpleProperties::Uint32Value, u, v, 0 }
+ { DictTabInfo::y, my_offsetof(x, z), SimpleProperties::Uint32Value, u, v, 0 }
#define DTIMAPS(x, y, z, u, v) \
- { DictTabInfo::y, offsetof(x, z), SimpleProperties::StringValue, u, v, 0 }
+ { DictTabInfo::y, my_offsetof(x, z), SimpleProperties::StringValue, u, v, 0 }
#define DTIMAPB(x, y, z, u, v, l) \
- { DictTabInfo::y, offsetof(x, z), SimpleProperties::BinaryValue, u, v, \
- offsetof(x, l) }
+ { DictTabInfo::y, my_offsetof(x, z), SimpleProperties::BinaryValue, u, v, \
+ my_offsetof(x, l) }
#define DTIBREAK(x) \
{ DictTabInfo::x, 0, SimpleProperties::InvalidValue, 0, 0, 0 }
@@ -126,6 +126,8 @@ public:
MinRowsLow = 143,
MinRowsHigh = 144,
+ SingleUserMode = 152,
+
TableEnd = 999,
AttributeName = 1000, // String, Mandatory
@@ -273,7 +275,9 @@ public:
Uint32 MaxRowsHigh;
Uint32 MinRowsLow;
Uint32 MinRowsHigh;
+ Uint32 SingleUserMode;
+ Table() {}
void init();
};
@@ -334,6 +338,7 @@ public:
Uint32 AttributeAutoIncrement;
char AttributeDefaultValue[MAX_ATTR_DEFAULT_VALUE_SIZE];
+ Attribute() {}
void init();
inline
diff --git a/ndb/include/kernel/signaldata/ScanTab.hpp b/ndb/include/kernel/signaldata/ScanTab.hpp
index 70d12c96756..38ec4cccf7b 100644
--- a/ndb/include/kernel/signaldata/ScanTab.hpp
+++ b/ndb/include/kernel/signaldata/ScanTab.hpp
@@ -46,6 +46,7 @@ public:
* Length of signal
*/
STATIC_CONST( StaticLength = 11 );
+ STATIC_CONST( MaxTotalAttrInfo = 0xFFFF );
private:
@@ -113,15 +114,15 @@ private:
z = Descending (TUX) - 1 Bit 14
x = Range Scan (TUX) - 1 Bit 15
b = Scan batch - 10 Bit 16-25 (max 1023)
- d = Distribution key flag
+ d = Distribution key flag - 1 Bit 26
1111111111222222222233
01234567890123456789012345678901
- ppppppppl hcktzxbbbbbbbbbb
+ ppppppppl hcktzxbbbbbbbbbbd
*/
-#define PARALLELL_SHIFT (0)
-#define PARALLELL_MASK (255)
+#define PARALLEL_SHIFT (0)
+#define PARALLEL_MASK (255)
#define LOCK_MODE_SHIFT (8)
#define LOCK_MODE_MASK (1)
@@ -148,11 +149,12 @@ private:
#define SCAN_BATCH_MASK (1023)
#define SCAN_DISTR_KEY_SHIFT (26)
+#define SCAN_DISTR_KEY_MASK (1)
inline
Uint8
ScanTabReq::getParallelism(const UintR & requestInfo){
- return (Uint8)((requestInfo >> PARALLELL_SHIFT) & PARALLELL_MASK);
+ return (Uint8)((requestInfo >> PARALLEL_SHIFT) & PARALLEL_MASK);
}
inline
@@ -206,58 +208,65 @@ ScanTabReq::clearRequestInfo(UintR & requestInfo){
inline
void
ScanTabReq::setParallelism(UintR & requestInfo, Uint32 type){
- ASSERT_MAX(type, PARALLELL_MASK, "ScanTabReq::setParallellism");
- requestInfo |= (type << PARALLELL_SHIFT);
+ ASSERT_MAX(type, PARALLEL_MASK, "ScanTabReq::setParallelism");
+ requestInfo= (requestInfo & ~(PARALLEL_MASK << PARALLEL_SHIFT)) |
+ ((type & PARALLEL_MASK) << PARALLEL_SHIFT);
}
inline
void
ScanTabReq::setLockMode(UintR & requestInfo, Uint32 mode){
ASSERT_MAX(mode, LOCK_MODE_MASK, "ScanTabReq::setLockMode");
- requestInfo |= (mode << LOCK_MODE_SHIFT);
+ requestInfo= (requestInfo & ~(LOCK_MODE_MASK << LOCK_MODE_SHIFT)) |
+ ((mode & LOCK_MODE_MASK) << LOCK_MODE_SHIFT);
}
inline
void
ScanTabReq::setHoldLockFlag(UintR & requestInfo, Uint32 flag){
ASSERT_BOOL(flag, "ScanTabReq::setHoldLockFlag");
- requestInfo |= (flag << HOLD_LOCK_SHIFT);
+ requestInfo= (requestInfo & ~(HOLD_LOCK_MASK << HOLD_LOCK_SHIFT)) |
+ ((flag & HOLD_LOCK_MASK) << HOLD_LOCK_SHIFT);
}
inline
void
ScanTabReq::setReadCommittedFlag(UintR & requestInfo, Uint32 flag){
ASSERT_BOOL(flag, "ScanTabReq::setReadCommittedFlag");
- requestInfo |= (flag << READ_COMMITTED_SHIFT);
+ requestInfo= (requestInfo & ~(READ_COMMITTED_MASK << READ_COMMITTED_SHIFT)) |
+ ((flag & READ_COMMITTED_MASK) << READ_COMMITTED_SHIFT);
}
inline
void
ScanTabReq::setRangeScanFlag(UintR & requestInfo, Uint32 flag){
ASSERT_BOOL(flag, "ScanTabReq::setRangeScanFlag");
- requestInfo |= (flag << RANGE_SCAN_SHIFT);
+ requestInfo= (requestInfo & ~(RANGE_SCAN_MASK << RANGE_SCAN_SHIFT)) |
+ ((flag & RANGE_SCAN_MASK) << RANGE_SCAN_SHIFT);
}
inline
void
ScanTabReq::setDescendingFlag(UintR & requestInfo, Uint32 flag){
ASSERT_BOOL(flag, "ScanTabReq::setDescendingFlag");
- requestInfo |= (flag << DESCENDING_SHIFT);
+ requestInfo= (requestInfo & ~(DESCENDING_MASK << DESCENDING_SHIFT)) |
+ ((flag & DESCENDING_MASK) << DESCENDING_SHIFT);
}
inline
void
ScanTabReq::setTupScanFlag(UintR & requestInfo, Uint32 flag){
ASSERT_BOOL(flag, "ScanTabReq::setTupScanFlag");
- requestInfo |= (flag << TUP_SCAN_SHIFT);
+ requestInfo= (requestInfo & ~(TUP_SCAN_MASK << TUP_SCAN_SHIFT)) |
+ ((flag & TUP_SCAN_MASK) << TUP_SCAN_SHIFT);
}
inline
void
ScanTabReq::setScanBatch(Uint32 & requestInfo, Uint32 flag){
ASSERT_MAX(flag, SCAN_BATCH_MASK, "ScanTabReq::setScanBatch");
- requestInfo &= ~(SCAN_BATCH_MASK << SCAN_BATCH_SHIFT);
- requestInfo |= (flag << SCAN_BATCH_SHIFT);
+ requestInfo= (requestInfo & ~(SCAN_BATCH_MASK << SCAN_BATCH_SHIFT)) |
+ ((flag & SCAN_BATCH_MASK) << SCAN_BATCH_SHIFT);
}
inline
@@ -270,7 +279,8 @@ inline
void
ScanTabReq::setKeyinfoFlag(UintR & requestInfo, Uint32 flag){
ASSERT_BOOL(flag, "ScanTabReq::setKeyinfoFlag");
- requestInfo |= (flag << KEYINFO_SHIFT);
+ requestInfo= (requestInfo & ~(KEYINFO_MASK << KEYINFO_SHIFT)) |
+ ((flag & KEYINFO_MASK) << KEYINFO_SHIFT);
}
inline
@@ -283,7 +293,8 @@ inline
void
ScanTabReq::setDistributionKeyFlag(UintR & requestInfo, Uint32 flag){
ASSERT_BOOL(flag, "ScanTabReq::setKeyinfoFlag");
- requestInfo |= (flag << SCAN_DISTR_KEY_SHIFT);
+ requestInfo= (requestInfo & ~(SCAN_DISTR_KEY_MASK << SCAN_DISTR_KEY_SHIFT)) |
+ ((flag & SCAN_DISTR_KEY_MASK) << SCAN_DISTR_KEY_SHIFT);
}
/**
diff --git a/ndb/include/kernel/signaldata/TcKeyRef.hpp b/ndb/include/kernel/signaldata/TcKeyRef.hpp
index 2846ce3854f..56f6cdae29d 100644
--- a/ndb/include/kernel/signaldata/TcKeyRef.hpp
+++ b/ndb/include/kernel/signaldata/TcKeyRef.hpp
@@ -40,12 +40,13 @@ class TcKeyRef {
friend bool printTCKEYREF(FILE *, const Uint32 *, Uint32, Uint16);
public:
- STATIC_CONST( SignalLength = 4 );
+ STATIC_CONST( SignalLength = 5 );
private:
Uint32 connectPtr;
Uint32 transId[2];
Uint32 errorCode;
+ Uint32 errorData;
};
#endif
diff --git a/ndb/include/kernel/signaldata/TcRollbackRep.hpp b/ndb/include/kernel/signaldata/TcRollbackRep.hpp
index 3b5e2f3d3cb..609756605d5 100644
--- a/ndb/include/kernel/signaldata/TcRollbackRep.hpp
+++ b/ndb/include/kernel/signaldata/TcRollbackRep.hpp
@@ -38,12 +38,13 @@ class TcRollbackRep {
friend bool printTCROLBACKREP(FILE *, const Uint32 *, Uint32, Uint16);
public:
- STATIC_CONST( SignalLength = 4 );
+ STATIC_CONST( SignalLength = 5 );
private:
Uint32 connectPtr;
Uint32 transId[2];
Uint32 returnCode;
+ Uint32 errorData;
};
#endif
diff --git a/ndb/include/mgmapi/mgmapi.h b/ndb/include/mgmapi/mgmapi.h
index 2423048f98f..e5889a1ee6d 100644
--- a/ndb/include/mgmapi/mgmapi.h
+++ b/ndb/include/mgmapi/mgmapi.h
@@ -16,6 +16,8 @@
#ifndef MGMAPI_H
#define MGMAPI_H
+#define NDB_MGM_MAX_LOGLEVEL 15
+
/**
* @mainpage MySQL Cluster Management API
*
diff --git a/ndb/include/mgmapi/ndbd_exit_codes.h b/ndb/include/mgmapi/ndbd_exit_codes.h
index 874bf0aa253..1051fd9e394 100644
--- a/ndb/include/mgmapi/ndbd_exit_codes.h
+++ b/ndb/include/mgmapi/ndbd_exit_codes.h
@@ -79,6 +79,8 @@ typedef ndbd_exit_classification_enum ndbd_exit_classification;
#define NDBD_EXIT_NO_MORE_UNDOLOG 2312
#define NDBD_EXIT_SR_UNDOLOG 2313
#define NDBD_EXIT_SINGLE_USER_MODE 2314
+#define NDBD_EXIT_NODE_DECLARED_DEAD 2315
+#define NDBD_EXIT_SR_SCHEMAFILE 2316
#define NDBD_EXIT_MEMALLOC 2327
#define NDBD_EXIT_BLOCK_JBUFCONGESTION 2334
#define NDBD_EXIT_TIME_QUEUE_SHORT 2335
diff --git a/ndb/include/ndb_constants.h b/ndb/include/ndb_constants.h
index e4f46926498..e37392ca80e 100644
--- a/ndb/include/ndb_constants.h
+++ b/ndb/include/ndb_constants.h
@@ -68,4 +68,11 @@
#define NDB_TYPE_MAX 31
+/*
+ * Table single user mode
+ */
+#define NDB_SUM_LOCKED 0
+#define NDB_SUM_READONLY 1
+#define NDB_SUM_READ_WRITE 2
+
#endif
diff --git a/ndb/include/ndbapi/Ndb.hpp b/ndb/include/ndbapi/Ndb.hpp
index 516333d1834..5f0da6ea83d 100644
--- a/ndb/include/ndbapi/Ndb.hpp
+++ b/ndb/include/ndbapi/Ndb.hpp
@@ -17,7 +17,7 @@
@mainpage NDB API Programmers' Guide
This guide assumes a basic familiarity with MySQL Cluster concepts found
- on http://dev.mysql.com/doc/mysql/en/NDBCluster.html .
+ on http://dev.mysql.com/doc/mysql/en/mysql-cluster.html.
Some of the fundamental ones are also described in section @ref secConcepts.
The NDB API is a MySQL Cluster application interface
@@ -1051,6 +1051,8 @@ class Ndb
friend class NdbDictionaryImpl;
friend class NdbDictInterface;
friend class NdbBlob;
+ friend class NdbImpl;
+ friend class NdbScanFilterImpl;
#endif
public:
@@ -1091,7 +1093,7 @@ public:
*
* @param aCatalogName is the new name of the current catalog
*/
- void setCatalogName(const char * aCatalogName);
+ int setCatalogName(const char * aCatalogName);
/**
* The current schema name can be fetched by getSchemaName.
@@ -1105,7 +1107,7 @@ public:
*
* @param aSchemaName is the new name of the current schema
*/
- void setSchemaName(const char * aSchemaName);
+ int setSchemaName(const char * aSchemaName);
#endif
/**
@@ -1120,7 +1122,7 @@ public:
*
* @param aDatabaseName is the new name of the current database
*/
- void setDatabaseName(const char * aDatabaseName);
+ int setDatabaseName(const char * aDatabaseName);
/**
* The current database schema name can be fetched by getDatabaseSchemaName.
@@ -1134,7 +1136,7 @@ public:
*
* @param aDatabaseSchemaName is the new name of the current database schema
*/
- void setDatabaseSchemaName(const char * aDatabaseSchemaName);
+ int setDatabaseSchemaName(const char * aDatabaseSchemaName);
/**
* Initializes the Ndb object
@@ -1387,9 +1389,11 @@ public:
* @return 0 or -1 on error, and tupleId in out parameter
*/
int getAutoIncrementValue(const char* aTableName,
- Uint64 & tupleId, Uint32 cacheSize);
+ Uint64 & tupleId, Uint32 cacheSize,
+ Uint64 step = 1, Uint64 start = 1);
int getAutoIncrementValue(const NdbDictionary::Table * aTable,
- Uint64 & tupleId, Uint32 cacheSize);
+ Uint64 & tupleId, Uint32 cacheSize,
+ Uint64 step = 1, Uint64 start = 1);
int readAutoIncrementValue(const char* aTableName,
Uint64 & tupleId);
int readAutoIncrementValue(const NdbDictionary::Table * aTable,
@@ -1400,7 +1404,8 @@ public:
Uint64 tupleId, bool increase);
private:
int getTupleIdFromNdb(Ndb_local_table_info* info,
- Uint64 & tupleId, Uint32 cacheSize);
+ Uint64 & tupleId, Uint32 cacheSize,
+ Uint64 step = 1, Uint64 start = 1 );
int readTupleIdFromNdb(Ndb_local_table_info* info,
Uint64 & tupleId);
int setTupleIdInNdb(Ndb_local_table_info* info,
diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp
index 3e87c30d7b2..24fb9811b3d 100644
--- a/ndb/include/ndbapi/NdbDictionary.hpp
+++ b/ndb/include/ndbapi/NdbDictionary.hpp
@@ -358,7 +358,7 @@ public:
* Set name of column
* @param name Name of the column
*/
- void setName(const char * name);
+ int setName(const char * name);
/**
* Set whether column is nullable or not
@@ -446,7 +446,7 @@ public:
void setAutoIncrement(bool);
bool getAutoIncrement() const;
void setAutoIncrementInitialValue(Uint64 val);
- void setDefaultValue(const char*);
+ int setDefaultValue(const char*);
const char* getDefaultValue() const;
static const Column * FRAGMENT;
@@ -497,6 +497,15 @@ public:
*/
class Table : public Object {
public:
+ /*
+ * Single user mode specifies access rights to table during single user mode
+ */
+ enum SingleUserMode {
+ SingleUserModeLocked = NDB_SUM_LOCKED,
+ SingleUserModeReadOnly = NDB_SUM_READONLY,
+ SingleUserModeReadWrite = NDB_SUM_READ_WRITE
+ };
+
/**
* @name General
* @{
@@ -652,13 +661,13 @@ public:
* Name of table
* @param name Name of table
*/
- void setName(const char * name);
+ int setName(const char * name);
/**
* Add a column definition to a table
* @note creates a copy
*/
- void addColumn(const Column &);
+ int addColumn(const Column &);
/**
* @see NdbDictionary::Table::getLogging.
@@ -714,7 +723,7 @@ public:
/**
* Set frm file to store with this table
*/
- void setFrm(const void* data, Uint32 len);
+ int setFrm(const void* data, Uint32 len);
/**
* Set table object type
@@ -735,6 +744,13 @@ public:
void setMinRows(Uint64 minRows);
Uint64 getMinRows() const;
+ /**
+ * Set/Get SingleUserMode
+ */
+ void setSingleUserMode(enum SingleUserMode);
+ enum SingleUserMode getSingleUserMode() const;
+
+
/** @} *******************************************************************/
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
@@ -776,7 +792,12 @@ public:
* Get the name of the table being indexed
*/
const char * getTable() const;
-
+
+ /**
+ * Get the table representing the index
+ */
+ const Table * getIndexTable() const;
+
/**
* Get the number of columns in the index
*/
@@ -859,26 +880,26 @@ public:
/**
* Set the name of an index
*/
- void setName(const char * name);
+ int setName(const char * name);
/**
* Define the name of the table to be indexed
*/
- void setTable(const char * name);
+ int setTable(const char * name);
/**
* Add a column to the index definition
* Note that the order of columns will be in
* the order they are added (only matters for ordered indexes).
*/
- void addColumn(const Column & c);
+ int addColumn(const Column & c);
/**
* Add a column name to the index definition
* Note that the order of indexes will be in
* the order they are added (only matters for ordered indexes).
*/
- void addColumnName(const char * name);
+ int addColumnName(const char * name);
#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
/**
@@ -887,7 +908,7 @@ public:
* the order they are added (only matters for ordered indexes).
* Depricated, use addColumnName instead.
*/
- void addIndexColumn(const char * name);
+ int addIndexColumn(const char * name);
#endif
/**
@@ -895,7 +916,7 @@ public:
* Note that the order of indexes will be in
* the order they are added (only matters for ordered indexes).
*/
- void addColumnNames(unsigned noOfNames, const char ** names);
+ int addColumnNames(unsigned noOfNames, const char ** names);
#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
/**
@@ -904,7 +925,7 @@ public:
* the order they are added (only matters for ordered indexes).
* Depricated, use addColumnNames instead.
*/
- void addIndexColumns(int noOfNames, const char ** names);
+ int addIndexColumns(int noOfNames, const char ** names);
#endif
/**
diff --git a/ndb/include/ndbapi/NdbIndexScanOperation.hpp b/ndb/include/ndbapi/NdbIndexScanOperation.hpp
index 550f4201b71..8c20fd58f43 100644
--- a/ndb/include/ndbapi/NdbIndexScanOperation.hpp
+++ b/ndb/include/ndbapi/NdbIndexScanOperation.hpp
@@ -63,12 +63,14 @@ public:
bool order_by,
bool order_desc = false,
bool read_range_no = false,
- bool keyinfo = false) {
+ bool keyinfo = false,
+ bool multi_range = false) {
Uint32 scan_flags =
(SF_OrderBy & -(Int32)order_by) |
(SF_Descending & -(Int32)order_desc) |
(SF_ReadRangeNo & -(Int32)read_range_no) |
- (SF_KeyInfo & -(Int32)keyinfo);
+ (SF_KeyInfo & -(Int32)keyinfo) |
+ (SF_MultiRange & -(Int32)multi_range);
return readTuples(lock_mode, scan_flags, parallel, batch);
}
diff --git a/ndb/include/ndbapi/NdbOperation.hpp b/ndb/include/ndbapi/NdbOperation.hpp
index 5e9e6b9bde9..d4e300be15e 100644
--- a/ndb/include/ndbapi/NdbOperation.hpp
+++ b/ndb/include/ndbapi/NdbOperation.hpp
@@ -908,6 +908,8 @@ protected:
// get table or index key from prepared signals
int getKeyFromTCREQ(Uint32* data, unsigned size);
+ virtual void setReadLockMode(LockMode lockMode);
+
/******************************************************************************
* These are the private variables that are defined in the operation objects.
*****************************************************************************/
diff --git a/ndb/include/ndbapi/NdbRecAttr.hpp b/ndb/include/ndbapi/NdbRecAttr.hpp
index df7f22fef60..9679d3995d3 100644
--- a/ndb/include/ndbapi/NdbRecAttr.hpp
+++ b/ndb/include/ndbapi/NdbRecAttr.hpp
@@ -147,6 +147,13 @@ public:
/**
* Get value stored in NdbRecAttr object.
+ *
+ * @return Medium value.
+ */
+ Int32 medium_value() const;
+
+ /**
+ * Get value stored in NdbRecAttr object.
*
* @return Short value.
*/
@@ -162,6 +169,13 @@ public:
/**
* Get value stored in NdbRecAttr object.
*
+ * @return Int8 value.
+ */
+ Int8 int8_value() const;
+
+ /**
+ * Get value stored in NdbRecAttr object.
+ *
* @return 64 bit unsigned value.
*/
Uint64 u_64_value() const;
@@ -176,6 +190,13 @@ public:
/**
* Get value stored in NdbRecAttr object.
*
+ * @return Unsigned medium value.
+ */
+ Uint32 u_medium_value() const;
+
+ /**
+ * Get value stored in NdbRecAttr object.
+ *
* @return Unsigned short value.
*/
Uint16 u_short_value() const;
@@ -190,6 +211,13 @@ public:
/**
* Get value stored in NdbRecAttr object.
*
+ * @return Uint8 value.
+ */
+ Uint8 u_8_value() const;
+
+ /**
+ * Get value stored in NdbRecAttr object.
+ *
* @return Float value.
*/
float float_value() const;
@@ -333,6 +361,13 @@ NdbRecAttr::char_value() const
}
inline
+Int8
+NdbRecAttr::int8_value() const
+{
+ return *(Int8*)theRef;
+}
+
+inline
Uint32
NdbRecAttr::u_32_value() const
{
@@ -354,6 +389,13 @@ NdbRecAttr::u_char_value() const
}
inline
+Uint8
+NdbRecAttr::u_8_value() const
+{
+ return *(Uint8*)theRef;
+}
+
+inline
void
NdbRecAttr::release()
{
@@ -441,6 +483,25 @@ NdbRecAttr::isNULL() const
class NdbOut& operator <<(class NdbOut&, const NdbRecAttr &);
+class NdbRecordPrintFormat
+{
+public:
+ NdbRecordPrintFormat();
+ virtual ~NdbRecordPrintFormat();
+ const char *lines_terminated_by;
+ const char *fields_terminated_by;
+ const char *start_array_enclosure;
+ const char *end_array_enclosure;
+ const char *fields_enclosed_by;
+ const char *fields_optionally_enclosed_by;
+ const char *hex_prefix;
+ const char *null_string;
+ int hex_format;
+};
+NdbOut&
+ndbrecattr_print_formatted(NdbOut& out, const NdbRecAttr &r,
+ const NdbRecordPrintFormat &f);
+
#endif // ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
#endif
diff --git a/ndb/include/ndbapi/NdbReceiver.hpp b/ndb/include/ndbapi/NdbReceiver.hpp
index 73bf5c66863..b8abd281496 100644
--- a/ndb/include/ndbapi/NdbReceiver.hpp
+++ b/ndb/include/ndbapi/NdbReceiver.hpp
@@ -38,7 +38,7 @@ public:
};
NdbReceiver(Ndb *aNdb);
- void init(ReceiverType type, void* owner);
+ int init(ReceiverType type, void* owner);
void release();
~NdbReceiver();
@@ -57,7 +57,7 @@ public:
bool checkMagicNumber() const;
- inline void next(NdbReceiver* next) { m_next = next;}
+ inline void next(NdbReceiver* next_arg) { m_next = next_arg;}
inline NdbReceiver* next() { return m_next; }
void setErrorCode(int);
@@ -75,7 +75,7 @@ private:
* At setup
*/
class NdbRecAttr * getValue(const class NdbColumnImpl*, char * user_dst_ptr);
- void do_get_value(NdbReceiver*, Uint32 rows, Uint32 key_size, Uint32 range);
+ int do_get_value(NdbReceiver*, Uint32 rows, Uint32 key_size, Uint32 range);
void prepareSend();
void calculate_batch_size(Uint32, Uint32, Uint32&, Uint32&, Uint32&);
diff --git a/ndb/include/ndbapi/NdbScanFilter.hpp b/ndb/include/ndbapi/NdbScanFilter.hpp
index 1ef62558560..02fcb6215ba 100644
--- a/ndb/include/ndbapi/NdbScanFilter.hpp
+++ b/ndb/include/ndbapi/NdbScanFilter.hpp
@@ -17,6 +17,7 @@
#define NDB_SCAN_FILTER_HPP
#include <ndb_types.h>
+#include <ndbapi_limits.h>
/**
* @class NdbScanFilter
@@ -31,8 +32,13 @@ public:
/**
* Constructor
* @param op The NdbOperation that the filter belongs to (is applied to).
+ * @param abort_on_too_large abort transaction on filter too large
+ * default: true
+ * @param max_size Maximum size of generated filter in words
*/
- NdbScanFilter(class NdbOperation * op);
+ NdbScanFilter(class NdbOperation * op,
+ bool abort_on_too_large = true,
+ Uint32 max_size = NDB_MAX_SCANFILTER_SIZE_IN_WORDS);
~NdbScanFilter();
/**
@@ -166,6 +172,25 @@ public:
/** @} *********************************************************************/
#endif
+ enum Error {
+ FilterTooLarge = 4294
+ };
+
+ /**
+ * Get filter level error.
+ *
+ * Most errors are set only on operation level, and they abort the
+ * transaction. The error FilterTooLarge is set on filter level and
+ * by default it propagates to operation level and also aborts the
+ * transaction.
+ *
+ * If option abort_on_too_large is set to false, then FilterTooLarge
+ * does not propagate. One can then either ignore this error (in
+ * which case no filtering is done) or try to define a new filter
+ * immediately.
+ */
+ const class NdbError & getNdbError() const;
+
private:
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
friend class NdbScanFilterImpl;
diff --git a/ndb/include/ndbapi/NdbScanOperation.hpp b/ndb/include/ndbapi/NdbScanOperation.hpp
index 749c91ab765..9117207b72c 100644
--- a/ndb/include/ndbapi/NdbScanOperation.hpp
+++ b/ndb/include/ndbapi/NdbScanOperation.hpp
@@ -37,13 +37,15 @@ class NdbScanOperation : public NdbOperation {
public:
/**
* Scan flags. OR-ed together and passed as second argument to
- * readTuples.
+ * readTuples. Note that SF_MultiRange has to be set if several
+ * ranges (bounds) are to be passed.
*/
enum ScanFlag {
SF_TupScan = (1 << 16), // scan TUP
SF_OrderBy = (1 << 24), // index scan in order
SF_Descending = (2 << 24), // index scan in descending order
SF_ReadRangeNo = (4 << 24), // enable @ref get_range_no
+ SF_MultiRange = (8 << 24), // scan is part of multi-range scan
SF_KeyInfo = 1 // request KeyInfo to be sent back
};
@@ -70,7 +72,8 @@ public:
*/
#ifdef ndb_readtuples_impossible_overload
int readTuples(LockMode lock_mode = LM_Read,
- Uint32 batch = 0, Uint32 parallel = 0, bool keyinfo = false);
+ Uint32 batch = 0, Uint32 parallel = 0,
+ bool keyinfo = false, bool multi_range = false);
#endif
inline int readTuples(int parallell){
@@ -209,6 +212,7 @@ protected:
int prepareSend(Uint32 TC_ConnectPtr, Uint64 TransactionId);
int doSend(int ProcessorId);
void checkForceSend(bool forceSend);
+ virtual void setReadLockMode(LockMode lockMode);
virtual void setErrorCode(int aErrorCode);
virtual void setErrorCodeAbort(int aErrorCode);
@@ -262,6 +266,7 @@ protected:
bool m_descending;
Uint32 m_read_range_no;
NdbRecAttr *m_curr_row; // Pointer to last returned row
+ bool m_multi_range; // Mark if operation is part of multi-range scan
};
inline
diff --git a/ndb/include/ndbapi/NdbTransaction.hpp b/ndb/include/ndbapi/NdbTransaction.hpp
index 13dbe8b634a..966872755ca 100644
--- a/ndb/include/ndbapi/NdbTransaction.hpp
+++ b/ndb/include/ndbapi/NdbTransaction.hpp
@@ -587,7 +587,7 @@ private:
NdbTransaction(Ndb* aNdb);
~NdbTransaction();
- void init(); // Initialize connection object for new transaction
+ int init(); // Initialize connection object for new transaction
int executeNoBlobs(ExecType execType,
AbortOption abortOption = AbortOnError,
diff --git a/ndb/include/ndbapi/ndbapi_limits.h b/ndb/include/ndbapi/ndbapi_limits.h
index 63399e4bd0a..e283913d059 100644
--- a/ndb/include/ndbapi/ndbapi_limits.h
+++ b/ndb/include/ndbapi/ndbapi_limits.h
@@ -26,4 +26,6 @@
#define NDB_MAX_TUPLE_SIZE (NDB_MAX_TUPLE_SIZE_IN_WORDS*4)
#define NDB_MAX_ACTIVE_EVENTS 100
+#define NDB_MAX_SCANFILTER_SIZE_IN_WORDS 50000
+
#endif
diff --git a/ndb/include/transporter/TransporterDefinitions.hpp b/ndb/include/transporter/TransporterDefinitions.hpp
index c1fe7619fb9..003824d01e8 100644
--- a/ndb/include/transporter/TransporterDefinitions.hpp
+++ b/ndb/include/transporter/TransporterDefinitions.hpp
@@ -116,6 +116,11 @@ struct SegmentedSectionPtr {
Uint32 i;
struct SectionSegment * p;
+ SegmentedSectionPtr() {}
+ SegmentedSectionPtr(Uint32 sz_arg, Uint32 i_arg,
+ struct SectionSegment *p_arg)
+ :sz(sz_arg), i(i_arg), p(p_arg)
+ {}
void setNull() { p = 0;}
bool isNull() const { return p == 0;}
};
diff --git a/ndb/include/util/BaseString.hpp b/ndb/include/util/BaseString.hpp
index 44e1e4614be..34ca2d2be3d 100644
--- a/ndb/include/util/BaseString.hpp
+++ b/ndb/include/util/BaseString.hpp
@@ -185,6 +185,7 @@ public:
private:
char* m_chr;
unsigned m_len;
+ friend bool operator!(const BaseString& str);
};
inline const char*
@@ -249,6 +250,12 @@ BaseString::operator!=(const char *str) const
return strcmp(m_chr, str) != 0;
}
+inline bool
+operator!(const BaseString& str)
+{
+ return str.m_chr == NULL;
+}
+
inline BaseString&
BaseString::assign(const BaseString& str)
{
diff --git a/ndb/include/util/InputStream.hpp b/ndb/include/util/InputStream.hpp
index 41d58dfe1e9..7bd2494afe8 100644
--- a/ndb/include/util/InputStream.hpp
+++ b/ndb/include/util/InputStream.hpp
@@ -26,6 +26,7 @@ class InputStream {
public:
virtual ~InputStream() {}
virtual char* gets(char * buf, int bufLen) = 0;
+ InputStream() {}
};
class FileInputStream : public InputStream {
diff --git a/ndb/include/util/OutputStream.hpp b/ndb/include/util/OutputStream.hpp
index 460915e12e7..39fbca45ccf 100644
--- a/ndb/include/util/OutputStream.hpp
+++ b/ndb/include/util/OutputStream.hpp
@@ -24,6 +24,7 @@
*/
class OutputStream {
public:
+ OutputStream() {}
virtual ~OutputStream() {}
virtual int print(const char * fmt, ...) = 0;
virtual int println(const char * fmt, ...) = 0;
@@ -34,7 +35,8 @@ class FileOutputStream : public OutputStream {
FILE * f;
public:
FileOutputStream(FILE * file = stdout);
-
+ FILE *getFile() { return f; }
+
int print(const char * fmt, ...);
int println(const char * fmt, ...);
void flush() { fflush(f); }
@@ -42,10 +44,10 @@ public:
class SocketOutputStream : public OutputStream {
NDB_SOCKET_TYPE m_socket;
- unsigned m_timeout;
+ unsigned m_timeout_ms;
public:
- SocketOutputStream(NDB_SOCKET_TYPE socket, unsigned writeTimeout = 1000);
-
+ SocketOutputStream(NDB_SOCKET_TYPE socket, unsigned write_timeout_ms = 1000);
+
int print(const char * fmt, ...);
int println(const char * fmt, ...);
};
@@ -60,6 +62,7 @@ public:
class NullOutputStream : public OutputStream {
public:
+ NullOutputStream() {}
int print(const char * /* unused */, ...) { return 1;}
int println(const char * /* unused */, ...) { return 1;}
};
diff --git a/ndb/include/util/SimpleProperties.hpp b/ndb/include/util/SimpleProperties.hpp
index 0993c272b47..82900ff48eb 100644
--- a/ndb/include/util/SimpleProperties.hpp
+++ b/ndb/include/util/SimpleProperties.hpp
@@ -168,6 +168,7 @@ public:
bool add(Uint16 key, Uint32 value);
bool add(Uint16 key, const char * value);
bool add(Uint16 key, const void* value, int len);
+ Writer() {}
protected:
virtual ~Writer() {}
virtual bool reset() = 0;
diff --git a/ndb/include/util/SocketAuthenticator.hpp b/ndb/include/util/SocketAuthenticator.hpp
index a9ab1869ebc..e76af30dc35 100644
--- a/ndb/include/util/SocketAuthenticator.hpp
+++ b/ndb/include/util/SocketAuthenticator.hpp
@@ -19,6 +19,7 @@
class SocketAuthenticator
{
public:
+ SocketAuthenticator() {}
virtual ~SocketAuthenticator() {};
virtual bool client_authenticate(int sockfd) = 0;
virtual bool server_authenticate(int sockfd) = 0;
diff --git a/ndb/include/util/SocketServer.hpp b/ndb/include/util/SocketServer.hpp
index eebfaec5831..8b49dc6db4e 100644
--- a/ndb/include/util/SocketServer.hpp
+++ b/ndb/include/util/SocketServer.hpp
@@ -59,6 +59,7 @@ public:
*/
class Service {
public:
+ Service() {}
virtual ~Service(){}
/**
diff --git a/ndb/include/util/Vector.hpp b/ndb/include/util/Vector.hpp
index aeddbbb22f0..8f403b435dd 100644
--- a/ndb/include/util/Vector.hpp
+++ b/ndb/include/util/Vector.hpp
@@ -29,14 +29,14 @@ public:
const T& operator[](unsigned i) const;
unsigned size() const { return m_size; };
- void push_back(const T &);
+ int push_back(const T &);
T& back();
void erase(unsigned index);
void clear();
- void fill(unsigned new_size, T & obj);
+ int fill(unsigned new_size, T & obj);
Vector<T>& operator=(const Vector<T>&);
@@ -52,6 +52,14 @@ private:
template<class T>
Vector<T>::Vector(int i){
m_items = new T[i];
+ if (m_items == NULL)
+ {
+ errno = ENOMEM;
+ m_size = 0;
+ m_arraySize = 0;
+ m_incSize = 0;
+ return;
+ }
m_size = 0;
m_arraySize = i;
m_incSize = 50;
@@ -89,12 +97,15 @@ Vector<T>::back(){
}
template<class T>
-void
+int
Vector<T>::push_back(const T & t){
if(m_size == m_arraySize){
T * tmp = new T [m_arraySize + m_incSize];
- if(!tmp)
- abort();
+ if(tmp == NULL)
+ {
+ errno = ENOMEM;
+ return -1;
+ }
for (unsigned k = 0; k < m_size; k++)
tmp[k] = m_items[k];
delete[] m_items;
@@ -103,6 +114,8 @@ Vector<T>::push_back(const T & t){
}
m_items[m_size] = t;
m_size++;
+
+ return 0;
}
template<class T>
@@ -123,10 +136,12 @@ Vector<T>::clear(){
}
template<class T>
-void
+int
Vector<T>::fill(unsigned new_size, T & obj){
while(m_size <= new_size)
- push_back(obj);
+ if (push_back(obj))
+ return -1;
+ return 0;
}
template<class T>
@@ -150,8 +165,8 @@ struct MutexVector : public NdbLockable {
const T& operator[](unsigned i) const;
unsigned size() const { return m_size; };
- void push_back(const T &);
- void push_back(const T &, bool lockMutex);
+ int push_back(const T &);
+ int push_back(const T &, bool lockMutex);
T& back();
void erase(unsigned index);
@@ -160,7 +175,7 @@ struct MutexVector : public NdbLockable {
void clear();
void clear(bool lockMutex);
- void fill(unsigned new_size, T & obj);
+ int fill(unsigned new_size, T & obj);
private:
T * m_items;
unsigned m_size;
@@ -171,6 +186,14 @@ private:
template<class T>
MutexVector<T>::MutexVector(int i){
m_items = new T[i];
+ if (m_items == NULL)
+ {
+ errno = ENOMEM;
+ m_size = 0;
+ m_arraySize = 0;
+ m_incSize = 0;
+ return;
+ }
m_size = 0;
m_arraySize = i;
m_incSize = 50;
@@ -208,11 +231,17 @@ MutexVector<T>::back(){
}
template<class T>
-void
+int
MutexVector<T>::push_back(const T & t){
lock();
if(m_size == m_arraySize){
T * tmp = new T [m_arraySize + m_incSize];
+ if (tmp == NULL)
+ {
+ errno = ENOMEM;
+ unlock();
+ return -1;
+ }
for (unsigned k = 0; k < m_size; k++)
tmp[k] = m_items[k];
delete[] m_items;
@@ -222,15 +251,23 @@ MutexVector<T>::push_back(const T & t){
m_items[m_size] = t;
m_size++;
unlock();
+ return 0;
}
template<class T>
-void
+int
MutexVector<T>::push_back(const T & t, bool lockMutex){
if(lockMutex)
lock();
if(m_size == m_arraySize){
T * tmp = new T [m_arraySize + m_incSize];
+ if (tmp == NULL)
+ {
+ errno = ENOMEM;
+ if(lockMutex)
+ unlock();
+ return -1;
+ }
for (unsigned k = 0; k < m_size; k++)
tmp[k] = m_items[k];
delete[] m_items;
@@ -241,6 +278,7 @@ MutexVector<T>::push_back(const T & t, bool lockMutex){
m_size++;
if(lockMutex)
unlock();
+ return 0;
}
template<class T>
@@ -288,10 +326,12 @@ MutexVector<T>::clear(bool l){
}
template<class T>
-void
+int
MutexVector<T>::fill(unsigned new_size, T & obj){
while(m_size <= new_size)
- push_back(obj);
+ if (push_back(obj))
+ return -1;
+ return 0;
}
#endif
diff --git a/ndb/include/util/ndb_rand.h b/ndb/include/util/ndb_rand.h
new file mode 100644
index 00000000000..1521ca9c4ff
--- /dev/null
+++ b/ndb/include/util/ndb_rand.h
@@ -0,0 +1,33 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef NDB_RAND_H
+#define NDB_RAND_H
+
+#define NDB_RAND_MAX 32767
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int ndb_rand(void);
+
+void ndb_srand(unsigned seed);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/ndb/src/Makefile.am b/ndb/src/Makefile.am
index 32bb98f5c83..3df7ccd8469 100644
--- a/ndb/src/Makefile.am
+++ b/ndb/src/Makefile.am
@@ -21,6 +21,8 @@ ndblib_LTLIBRARIES = libndbclient.la
libndbclient_la_SOURCES =
+libndbclient_la_LDFLAGS = -version-info @NDB_SHARED_LIB_VERSION@ @NDB_LD_VERSION_SCRIPT@
+
libndbclient_la_LIBADD = \
ndbapi/libndbapi.la \
common/transporter/libtransporter.la \
diff --git a/ndb/src/common/debugger/EventLogger.cpp b/ndb/src/common/debugger/EventLogger.cpp
index 3efd52808e2..6280d5bb9b3 100644
--- a/ndb/src/common/debugger/EventLogger.cpp
+++ b/ndb/src/common/debugger/EventLogger.cpp
@@ -16,6 +16,7 @@
#include <ndb_global.h>
#include "EventLogger.hpp"
+#include <TransporterCallback.hpp>
#include <NdbConfig.h>
#include <kernel/BlockNumbers.h>
@@ -528,10 +529,100 @@ void getTextUndoLogBlocked(QQQQ) {
theData[2]);
}
void getTextTransporterError(QQQQ) {
- BaseString::snprintf(m_text, m_text_len,
- "Transporter to node %d reported error 0x%x",
- theData[1],
- theData[2]);
+ struct myTransporterError{
+ Uint32 errorNum;
+ char errorString[256];
+ };
+ int i = 0;
+ int lenth = 0;
+ static const struct myTransporterError TransporterErrorString[]=
+ {
+ //TE_NO_ERROR = 0
+ {TE_NO_ERROR,"No error"},
+ //TE_ERROR_CLOSING_SOCKET = 0x1
+ {TE_ERROR_CLOSING_SOCKET,"Error found during closing of socket"},
+ //TE_ERROR_IN_SELECT_BEFORE_ACCEPT = 0x2
+ {TE_ERROR_IN_SELECT_BEFORE_ACCEPT,"Error found before accept. The transporter will retry"},
+ //TE_INVALID_MESSAGE_LENGTH = 0x3 | TE_DO_DISCONNECT
+ {TE_INVALID_MESSAGE_LENGTH,"Error found in message (invalid message length)"},
+ //TE_INVALID_CHECKSUM = 0x4 | TE_DO_DISCONNECT
+ {TE_INVALID_CHECKSUM,"Error found in message (checksum)"},
+ //TE_COULD_NOT_CREATE_SOCKET = 0x5
+ {TE_COULD_NOT_CREATE_SOCKET,"Error found while creating socket(can't create socket)"},
+ //TE_COULD_NOT_BIND_SOCKET = 0x6
+ {TE_COULD_NOT_BIND_SOCKET,"Error found while binding server socket"},
+ //TE_LISTEN_FAILED = 0x7
+ {TE_LISTEN_FAILED,"Error found while listening to server socket"},
+ //TE_ACCEPT_RETURN_ERROR = 0x8
+ {TE_ACCEPT_RETURN_ERROR,"Error found during accept(accept return error)"},
+ //TE_SHM_DISCONNECT = 0xb | TE_DO_DISCONNECT
+ {TE_SHM_DISCONNECT,"The remote node has disconnected"},
+ //TE_SHM_IPC_STAT = 0xc | TE_DO_DISCONNECT
+ {TE_SHM_IPC_STAT,"Unable to check shm segment"},
+ //TE_SHM_UNABLE_TO_CREATE_SEGMENT = 0xd
+ {TE_SHM_UNABLE_TO_CREATE_SEGMENT,"Unable to create shm segment"},
+ //TE_SHM_UNABLE_TO_ATTACH_SEGMENT = 0xe
+ {TE_SHM_UNABLE_TO_ATTACH_SEGMENT,"Unable to attach shm segment"},
+ //TE_SHM_UNABLE_TO_REMOVE_SEGMENT = 0xf
+ {TE_SHM_UNABLE_TO_REMOVE_SEGMENT,"Unable to remove shm segment"},
+ //TE_TOO_SMALL_SIGID = 0x10
+ {TE_TOO_SMALL_SIGID,"Sig ID too small"},
+ //TE_TOO_LARGE_SIGID = 0x11
+ {TE_TOO_LARGE_SIGID,"Sig ID too large"},
+ //TE_WAIT_STACK_FULL = 0x12 | TE_DO_DISCONNECT
+ {TE_WAIT_STACK_FULL,"Wait stack was full"},
+ //TE_RECEIVE_BUFFER_FULL = 0x13 | TE_DO_DISCONNECT
+ {TE_RECEIVE_BUFFER_FULL,"Receive buffer was full"},
+ //TE_SIGNAL_LOST_SEND_BUFFER_FULL = 0x14 | TE_DO_DISCONNECT
+ {TE_SIGNAL_LOST_SEND_BUFFER_FULL,"Send buffer was full,and trying to force send fails"},
+ //TE_SIGNAL_LOST = 0x15
+ {TE_SIGNAL_LOST,"Send failed for unknown reason(signal lost)"},
+ //TE_SEND_BUFFER_FULL = 0x16
+ {TE_SEND_BUFFER_FULL,"The send buffer was full, but sleeping for a while solved"},
+ //TE_SCI_LINK_ERROR = 0x0017
+ {TE_SCI_LINK_ERROR,"There is no link from this node to the switch"},
+ //TE_SCI_UNABLE_TO_START_SEQUENCE = 0x18 | TE_DO_DISCONNECT
+ {TE_SCI_UNABLE_TO_START_SEQUENCE,"Could not start a sequence, because system resources are exumed or no sequence has been created"},
+ //TE_SCI_UNABLE_TO_REMOVE_SEQUENCE = 0x19 | TE_DO_DISCONNECT
+ {TE_SCI_UNABLE_TO_REMOVE_SEQUENCE,"Could not remove a sequence"},
+ //TE_SCI_UNABLE_TO_CREATE_SEQUENCE = 0x1a | TE_DO_DISCONNECT
+ {TE_SCI_UNABLE_TO_CREATE_SEQUENCE,"Could not create a sequence, because system resources are exempted. Must reboot"},
+ //TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR = 0x1b | TE_DO_DISCONNECT
+ {TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR,"Tried to send data on redundant link but failed"},
+ //TE_SCI_CANNOT_INIT_LOCALSEGMENT = 0x1c | TE_DO_DISCONNECT
+ {TE_SCI_CANNOT_INIT_LOCALSEGMENT,"Cannot initialize local segment"},
+ //TE_SCI_CANNOT_MAP_REMOTESEGMENT = 0x1d | TE_DO_DISCONNEC
+ {TE_SCI_CANNOT_MAP_REMOTESEGMENT,"Cannot map remote segment"},
+ //TE_SCI_UNABLE_TO_UNMAP_SEGMENT = 0x1e | TE_DO_DISCONNECT
+ {TE_SCI_UNABLE_TO_UNMAP_SEGMENT,"Cannot free the resources used by this segment (step 1)"},
+ //TE_SCI_UNABLE_TO_REMOVE_SEGMENT = 0x1f | TE_DO_DISCONNEC
+ {TE_SCI_UNABLE_TO_REMOVE_SEGMENT,"Cannot free the resources used by this segment (step 2)"},
+ //TE_SCI_UNABLE_TO_DISCONNECT_SEGMENT = 0x20 | TE_DO_DISCONNECT
+ {TE_SCI_UNABLE_TO_DISCONNECT_SEGMENT,"Cannot disconnect from a remote segment"},
+ //TE_SHM_IPC_PERMANENT = 0x21
+ {TE_SHM_IPC_PERMANENT,"Shm ipc Permanent error"},
+ //TE_SCI_UNABLE_TO_CLOSE_CHANNEL = 0x22
+ {TE_SCI_UNABLE_TO_CLOSE_CHANNEL,"Unable to close the sci channel and the resources allocated"}
+ };
+
+ lenth = sizeof(TransporterErrorString)/sizeof(struct myTransporterError);
+ for(i=0; i<lenth; i++)
+ {
+ if(theData[2] == TransporterErrorString[i].errorNum)
+ {
+ BaseString::snprintf(m_text, m_text_len,
+ "Transporter to node %d reported error 0x%x: %s",
+ theData[1],
+ theData[2],
+ TransporterErrorString[i].errorString);
+ break;
+ }
+ }
+ if(i == lenth)
+ BaseString::snprintf(m_text, m_text_len,
+ "Transporter to node %d reported error 0x%x: unknown error",
+ theData[1],
+ theData[2]);
}
void getTextTransporterWarning(QQQQ) {
getTextTransporterError(m_text, m_text_len, theData);
@@ -835,7 +926,7 @@ const EventLoggerBase::EventRepLogLevelMatrix EventLoggerBase::matrix[] = {
ROW(NDBStopCompleted, LogLevel::llStartUp, 1, Logger::LL_INFO ),
ROW(NDBStopForced, LogLevel::llStartUp, 1, Logger::LL_ALERT ),
ROW(NDBStopAborted, LogLevel::llStartUp, 1, Logger::LL_INFO ),
- ROW(StartREDOLog, LogLevel::llStartUp, 10, Logger::LL_INFO ),
+ ROW(StartREDOLog, LogLevel::llStartUp, 4, Logger::LL_INFO ),
ROW(StartLog, LogLevel::llStartUp, 10, Logger::LL_INFO ),
ROW(UNDORecordsExecuted, LogLevel::llStartUp, 15, Logger::LL_INFO ),
ROW(StartReport, LogLevel::llStartUp, 4, Logger::LL_INFO ),
@@ -913,6 +1004,8 @@ EventLogger::close()
removeAllHandlers();
}
+#ifdef NOT_USED
+
static NdbOut&
operator<<(NdbOut& out, const LogLevel & ll)
{
@@ -922,6 +1015,7 @@ operator<<(NdbOut& out, const LogLevel & ll)
out << "]";
return out;
}
+#endif
int
EventLoggerBase::event_lookup(int eventType,
diff --git a/ndb/src/common/debugger/signaldata/DictTabInfo.cpp b/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
index 66c9c978762..5520d0f4d9f 100644
--- a/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
+++ b/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
@@ -51,6 +51,7 @@ DictTabInfo::TableMapping[] = {
DTIMAP(Table, MaxRowsHigh, MaxRowsHigh),
DTIMAP(Table, MinRowsLow, MinRowsLow),
DTIMAP(Table, MinRowsHigh, MinRowsHigh),
+ DTIMAP(Table, SingleUserMode, SingleUserMode),
DTIBREAK(AttributeName)
};
@@ -131,6 +132,8 @@ DictTabInfo::Table::init(){
MaxRowsHigh = 0;
MinRowsLow = 0;
MinRowsHigh = 0;
+
+ SingleUserMode = 0;
}
void
diff --git a/ndb/src/common/debugger/signaldata/SignalNames.cpp b/ndb/src/common/debugger/signaldata/SignalNames.cpp
index 9839fd32cf2..66e7a10cdb7 100644
--- a/ndb/src/common/debugger/signaldata/SignalNames.cpp
+++ b/ndb/src/common/debugger/signaldata/SignalNames.cpp
@@ -380,15 +380,10 @@ const GsnName SignalNames [] = {
,{ GSN_TUP_WRITELOG_REQ, "TUP_WRITELOG_REQ" }
,{ GSN_LQH_WRITELOG_REQ, "LQH_WRITELOG_REQ" }
- ,{ GSN_STATISTICS_REQ, "STATISTICS_REQ" }
,{ GSN_START_ORD, "START_ORD" }
,{ GSN_STOP_ORD, "STOP_ORD" }
,{ GSN_TAMPER_ORD, "TAMPER_ORD" }
- ,{ GSN_SET_VAR_REQ, "SET_VAR_REQ" }
- ,{ GSN_SET_VAR_CONF, "SET_VAR_CONF" }
- ,{ GSN_SET_VAR_REF, "SET_VAR_REF" }
- ,{ GSN_STATISTICS_CONF, "STATISTICS_CONF" }
-
+
,{ GSN_EVENT_SUBSCRIBE_REQ, "EVENT_SUBSCRIBE_REQ" }
,{ GSN_EVENT_SUBSCRIBE_CONF, "EVENT_SUBSCRIBE_CONF" }
,{ GSN_EVENT_SUBSCRIBE_REF, "EVENT_SUBSCRIBE_REF" }
diff --git a/ndb/src/common/mgmcommon/ConfigRetriever.cpp b/ndb/src/common/mgmcommon/ConfigRetriever.cpp
index 414f995181e..bfc9ff8e384 100644
--- a/ndb/src/common/mgmcommon/ConfigRetriever.cpp
+++ b/ndb/src/common/mgmcommon/ConfigRetriever.cpp
@@ -154,12 +154,13 @@ ConfigRetriever::getConfig() {
}
ndb_mgm_configuration *
-ConfigRetriever::getConfig(NdbMgmHandle m_handle)
+ConfigRetriever::getConfig(NdbMgmHandle m_handle_arg)
{
- ndb_mgm_configuration * conf = ndb_mgm_get_configuration(m_handle,m_version);
+ ndb_mgm_configuration * conf = ndb_mgm_get_configuration(m_handle_arg,
+ m_version);
if(conf == 0)
{
- setError(CR_ERROR, ndb_mgm_get_latest_error_desc(m_handle));
+ setError(CR_ERROR, ndb_mgm_get_latest_error_desc(m_handle_arg));
return 0;
}
return conf;
diff --git a/ndb/src/common/portlib/NdbTick.c b/ndb/src/common/portlib/NdbTick.c
index eff6b28b7eb..238e9b1956d 100644
--- a/ndb/src/common/portlib/NdbTick.c
+++ b/ndb/src/common/portlib/NdbTick.c
@@ -60,9 +60,9 @@ NdbTick_CurrentMicrosecond(NDB_TICKS * secs, Uint32 * micros){
int res = gettimeofday(&tick_time, 0);
if(secs==0) {
- NDB_TICKS secs = tick_time.tv_sec;
+ NDB_TICKS local_secs = tick_time.tv_sec;
*micros = tick_time.tv_usec;
- *micros = secs*1000000+*micros;
+ *micros = local_secs*1000000+*micros;
} else {
* secs = tick_time.tv_sec;
* micros = tick_time.tv_usec;
diff --git a/ndb/src/common/transporter/SCI_Transporter.cpp b/ndb/src/common/transporter/SCI_Transporter.cpp
index 138b79acb51..0720fe84973 100644
--- a/ndb/src/common/transporter/SCI_Transporter.cpp
+++ b/ndb/src/common/transporter/SCI_Transporter.cpp
@@ -65,13 +65,10 @@ SCI_Transporter::SCI_Transporter(TransporterRegistry &t_reg,
m_initLocal=false;
- m_swapCounter=0;
m_failCounter=0;
m_remoteNodes[0]=remoteSciNodeId0;
m_remoteNodes[1]=remoteSciNodeId1;
m_adapters = nAdapters;
- // The maximum number of times to try and create,
- // start and destroy a sequence
m_ActiveAdapterId=0;
m_StandbyAdapterId=1;
@@ -102,8 +99,6 @@ SCI_Transporter::SCI_Transporter(TransporterRegistry &t_reg,
DBUG_VOID_RETURN;
}
-
-
void SCI_Transporter::disconnectImpl()
{
DBUG_ENTER("SCI_Transporter::disconnectImpl");
@@ -129,7 +124,8 @@ void SCI_Transporter::disconnectImpl()
if(err != SCI_ERR_OK) {
report_error(TE_SCI_UNABLE_TO_CLOSE_CHANNEL);
- DBUG_PRINT("error", ("Cannot close channel to the driver. Error code 0x%x",
+ DBUG_PRINT("error",
+ ("Cannot close channel to the driver. Error code 0x%x",
err));
}
}
@@ -164,19 +160,18 @@ bool SCI_Transporter::initTransporter() {
m_sendBuffer.m_buffer = new Uint32[m_sendBuffer.m_sendBufferSize / 4];
m_sendBuffer.m_dataSize = 0;
- DBUG_PRINT("info", ("Created SCI Send Buffer with buffer size %d and packet size %d",
+ DBUG_PRINT("info",
+ ("Created SCI Send Buffer with buffer size %d and packet size %d",
m_sendBuffer.m_sendBufferSize, m_PacketSize * 4));
if(!getLinkStatus(m_ActiveAdapterId) ||
(m_adapters > 1 &&
!getLinkStatus(m_StandbyAdapterId))) {
- DBUG_PRINT("error", ("The link is not fully operational. Check the cables and the switches"));
- //reportDisconnect(remoteNodeId, 0);
- //doDisconnect();
+ DBUG_PRINT("error",
+ ("The link is not fully operational. Check the cables and the switches"));
//NDB should terminate
report_error(TE_SCI_LINK_ERROR);
DBUG_RETURN(false);
}
-
DBUG_RETURN(true);
} // initTransporter()
@@ -235,7 +230,8 @@ sci_error_t SCI_Transporter::initLocalSegment() {
DBUG_PRINT("info", ("SCInode iD %d adapter %d\n",
sciAdapters[i].localSciNodeId, i));
if(err != SCI_ERR_OK) {
- DBUG_PRINT("error", ("Cannot open an SCI virtual device. Error code 0x%x",
+ DBUG_PRINT("error",
+ ("Cannot open an SCI virtual device. Error code 0x%x",
err));
DBUG_RETURN(err);
}
@@ -269,7 +265,8 @@ sci_error_t SCI_Transporter::initLocalSegment() {
&err);
if(err != SCI_ERR_OK) {
- DBUG_PRINT("error", ("Local Segment is not accessible by an SCI adapter. Error code 0x%x\n",
+ DBUG_PRINT("error",
+ ("Local Segment is not accessible by an SCI adapter. Error code 0x%x\n",
err));
DBUG_RETURN(err);
}
@@ -303,15 +300,13 @@ sci_error_t SCI_Transporter::initLocalSegment() {
&err);
if(err != SCI_ERR_OK) {
- DBUG_PRINT("error", ("Local Segment is not available for remote connections. Error code 0x%x\n",
+ DBUG_PRINT("error",
+ ("Local Segment is not available for remote connections. Error code 0x%x\n",
err));
DBUG_RETURN(err);
}
}
-
-
setupLocalSegment();
-
DBUG_RETURN(err);
} // initLocalSegment()
@@ -343,12 +338,6 @@ bool SCI_Transporter::doSend() {
if(sizeToSend==4097)
i4097++;
#endif
- if(startSequence(m_ActiveAdapterId)!=SCI_ERR_OK) {
- DBUG_PRINT("error", ("Start sequence failed"));
- report_error(TE_SCI_UNABLE_TO_START_SEQUENCE);
- return false;
- }
-
tryagain:
retry++;
@@ -374,119 +363,36 @@ bool SCI_Transporter::doSend() {
SCI_FLAG_ERROR_CHECK,
&err);
-
if (err != SCI_ERR_OK) {
- if(err == SCI_ERR_OUT_OF_RANGE) {
- DBUG_PRINT("error", ("Data transfer : out of range error"));
- goto tryagain;
- }
- if(err == SCI_ERR_SIZE_ALIGNMENT) {
- DBUG_PRINT("error", ("Data transfer : alignment error"));
- DBUG_PRINT("info", ("sendPtr 0x%x, sizeToSend = %d", sendPtr, sizeToSend));
- goto tryagain;
- }
- if(err == SCI_ERR_OFFSET_ALIGNMENT) {
- DBUG_PRINT("error", ("Data transfer : offset alignment"));
- goto tryagain;
- }
- if(err == SCI_ERR_TRANSFER_FAILED) {
- //(m_TargetSegm[m_StandbyAdapterId].writer)->heavyLock();
- if(getLinkStatus(m_ActiveAdapterId)) {
- goto tryagain;
- }
- if (m_adapters == 1) {
- DBUG_PRINT("error", ("SCI Transfer failed"));
+ if (err == SCI_ERR_OUT_OF_RANGE ||
+ err == SCI_ERR_SIZE_ALIGNMENT ||
+ err == SCI_ERR_OFFSET_ALIGNMENT) {
+ DBUG_PRINT("error", ("Data transfer error = %d", err));
report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
return false;
- }
- m_failCounter++;
- Uint32 temp=m_ActiveAdapterId;
- switch(m_swapCounter) {
- case 0:
- /**swap from active (0) to standby (1)*/
- if(getLinkStatus(m_StandbyAdapterId)) {
- DBUG_PRINT("error", ("Swapping from adapter 0 to 1"));
+ }
+ if(err == SCI_ERR_TRANSFER_FAILED) {
+ if(getLinkStatus(m_ActiveAdapterId))
+ goto tryagain;
+ if (m_adapters == 1) {
+ DBUG_PRINT("error", ("SCI Transfer failed"));
+ report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
+ return false;
+ }
+ m_failCounter++;
+ Uint32 temp=m_ActiveAdapterId;
+ if (getLinkStatus(m_StandbyAdapterId)) {
failoverShmWriter();
SCIStoreBarrier(m_TargetSegm[m_StandbyAdapterId].sequence,0);
m_ActiveAdapterId=m_StandbyAdapterId;
m_StandbyAdapterId=temp;
- SCIRemoveSequence((m_TargetSegm[m_StandbyAdapterId].sequence),
- FLAGS,
- &err);
- if(err!=SCI_ERR_OK) {
- report_error(TE_SCI_UNABLE_TO_REMOVE_SEQUENCE);
- DBUG_PRINT("error", ("Unable to remove sequence"));
- return false;
- }
- if(startSequence(m_ActiveAdapterId)!=SCI_ERR_OK) {
- DBUG_PRINT("error", ("Start sequence failed"));
- report_error(TE_SCI_UNABLE_TO_START_SEQUENCE);
- return false;
- }
- m_swapCounter++;
- DBUG_PRINT("info", ("failover complete"));
- goto tryagain;
- } else {
- report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
- DBUG_PRINT("error", ("SCI Transfer failed"));
- return false;
- }
- return false;
- break;
- case 1:
- /** swap back from 1 to 0
- must check that the link is up */
-
- if(getLinkStatus(m_StandbyAdapterId)) {
- failoverShmWriter();
- m_ActiveAdapterId=m_StandbyAdapterId;
- m_StandbyAdapterId=temp;
- DBUG_PRINT("info", ("Swapping from 1 to 0"));
- if(createSequence(m_ActiveAdapterId)!=SCI_ERR_OK) {
- DBUG_PRINT("error", ("Unable to create sequence"));
- report_error(TE_SCI_UNABLE_TO_CREATE_SEQUENCE);
- return false;
- }
- if(startSequence(m_ActiveAdapterId)!=SCI_ERR_OK) {
- DBUG_PRINT("error", ("startSequence failed... disconnecting"));
- report_error(TE_SCI_UNABLE_TO_START_SEQUENCE);
- return false;
- }
-
- SCIRemoveSequence((m_TargetSegm[m_StandbyAdapterId].sequence)
- , FLAGS,
- &err);
- if(err!=SCI_ERR_OK) {
- DBUG_PRINT("error", ("Unable to remove sequence"));
- report_error(TE_SCI_UNABLE_TO_REMOVE_SEQUENCE);
- return false;
- }
-
- if(createSequence(m_StandbyAdapterId)!=SCI_ERR_OK) {
- DBUG_PRINT("error", ("Unable to create sequence on standby"));
- report_error(TE_SCI_UNABLE_TO_CREATE_SEQUENCE);
- return false;
- }
-
- m_swapCounter=0;
-
- DBUG_PRINT("info", ("failover complete.."));
- goto tryagain;
-
+ DBUG_PRINT("error", ("Swapping from adapter %u to %u",
+ m_StandbyAdapterId, m_ActiveAdapterId));
} else {
- DBUG_PRINT("error", ("Unrecoverable data transfer error"));
report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
- return false;
+ DBUG_PRINT("error", ("SCI Transfer failed"));
}
-
- break;
- default:
- DBUG_PRINT("error", ("Unrecoverable data transfer error"));
- report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
- return false;
- break;
- }
- }
+ }
} else {
SHM_Writer * writer = (m_TargetSegm[m_ActiveAdapterId].writer);
writer->updateWritePtr(sizeToSend);
@@ -497,7 +403,6 @@ bool SCI_Transporter::doSend() {
m_sendBuffer.m_dataSize = 0;
m_sendBuffer.m_forceSendLimit = sendLimit;
}
-
} else {
/**
* If we end up here, the SCI segment is full.
@@ -552,15 +457,12 @@ void SCI_Transporter::setupLocalSegment()
DBUG_VOID_RETURN;
} //setupLocalSegment
-
-
void SCI_Transporter::setupRemoteSegment()
{
DBUG_ENTER("SCI_Transporter::setupRemoteSegment");
Uint32 sharedSize = 0;
sharedSize =4096; //start of the buffer is page aligned
-
Uint32 sizeOfBuffer = m_BufferSize;
const Uint32 slack = MAX_MESSAGE_SIZE;
sizeOfBuffer -= sharedSize;
@@ -666,7 +568,6 @@ SCI_Transporter::init_remote()
DBUG_PRINT("error", ("Error connecting segment, err 0x%x", err));
DBUG_RETURN(false);
}
-
}
// Map the remote memory segment into program space
for(Uint32 i=0; i < m_adapters ; i++) {
@@ -679,13 +580,14 @@ SCI_Transporter::init_remote()
FLAGS,
&err);
-
- if(err!= SCI_ERR_OK) {
- DBUG_PRINT("error", ("Cannot map a segment to the remote node %d. Error code 0x%x",m_RemoteSciNodeId, err));
- //NDB SHOULD TERMINATE AND COMPUTER REBOOTED!
- report_error(TE_SCI_CANNOT_MAP_REMOTESEGMENT);
- DBUG_RETURN(false);
- }
+ if(err!= SCI_ERR_OK) {
+ DBUG_PRINT("error",
+ ("Cannot map a segment to the remote node %d. Error code 0x%x",
+ m_RemoteSciNodeId, err));
+ //NDB SHOULD TERMINATE AND COMPUTER REBOOTED!
+ report_error(TE_SCI_CANNOT_MAP_REMOTESEGMENT);
+ DBUG_RETURN(false);
+ }
}
m_mapped=true;
setupRemoteSegment();
@@ -713,7 +615,6 @@ SCI_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd)
NDB_CLOSE_SOCKET(sockfd);
DBUG_RETURN(false);
}
-
if (!init_local()) {
NDB_CLOSE_SOCKET(sockfd);
DBUG_RETURN(false);
@@ -788,29 +689,9 @@ sci_error_t SCI_Transporter::createSequence(Uint32 adapterid) {
&(m_TargetSegm[adapterid].sequence),
SCI_FLAG_FAST_BARRIER,
&err);
-
-
return err;
} // createSequence()
-
-sci_error_t SCI_Transporter::startSequence(Uint32 adapterid) {
-
- sci_error_t err;
- /** Perform preliminary error check on an SCI adapter before starting a
- * sequence of read and write operations on the mapped segment.
- */
- m_SequenceStatus = SCIStartSequence(
- (m_TargetSegm[adapterid].sequence),
- FLAGS, &err);
-
-
- // If there still is an error then data cannot be safely send
- return err;
-} // startSequence()
-
-
-
bool SCI_Transporter::disconnectLocal()
{
DBUG_ENTER("SCI_Transporter::disconnectLocal");
@@ -878,9 +759,6 @@ SCI_Transporter::~SCI_Transporter() {
DBUG_VOID_RETURN;
} // ~SCI_Transporter()
-
-
-
void SCI_Transporter::closeSCI() {
// Termination of SCI
sci_error_t err;
@@ -897,8 +775,9 @@ void SCI_Transporter::closeSCI() {
SCIClose(activeSCIDescriptor, FLAGS, &err);
if(err != SCI_ERR_OK) {
- DBUG_PRINT("error", ("Cannot close SCI channel to the driver. Error code 0x%x",
- err));
+ DBUG_PRINT("error",
+ ("Cannot close SCI channel to the driver. Error code 0x%x",
+ err));
}
SCITerminate();
DBUG_VOID_RETURN;
@@ -973,7 +852,6 @@ SCI_Transporter::getConnectionStatus() {
return false;
}
-
void
SCI_Transporter::setConnected() {
*m_remoteStatusFlag = SCICONNECTED;
@@ -983,7 +861,6 @@ SCI_Transporter::setConnected() {
*m_localStatusFlag = SCICONNECTED;
}
-
void
SCI_Transporter::setDisconnect() {
if(getLinkStatus(m_ActiveAdapterId))
@@ -994,7 +871,6 @@ SCI_Transporter::setDisconnect() {
}
}
-
bool
SCI_Transporter::checkConnected() {
if (*m_localStatusFlag == SCIDISCONNECT) {
@@ -1015,8 +891,9 @@ SCI_Transporter::initSCI() {
SCIInitialize(0, &error);
if(error != SCI_ERR_OK) {
DBUG_PRINT("error", ("Cannot initialize SISCI library."));
- DBUG_PRINT("error", ("Inconsistency between SISCI library and SISCI driver. Error code 0x%x",
- error));
+ DBUG_PRINT("error",
+ ("Inconsistency between SISCI library and SISCI driver. Error code 0x%x",
+ error));
DBUG_RETURN(false);
}
init = true;
@@ -1029,3 +906,4 @@ SCI_Transporter::get_free_buffer() const
{
return (m_TargetSegm[m_ActiveAdapterId].writer)->get_free_buffer();
}
+
diff --git a/ndb/src/common/transporter/SCI_Transporter.hpp b/ndb/src/common/transporter/SCI_Transporter.hpp
index fbba2ac4516..f774186f238 100644
--- a/ndb/src/common/transporter/SCI_Transporter.hpp
+++ b/ndb/src/common/transporter/SCI_Transporter.hpp
@@ -54,12 +54,12 @@
* local segment, the SCI transporter connects to a segment created by another
* transporter at a remote node, and the maps the remote segment into its
* virtual address space. However, since NDB Cluster relies on redundancy
- * at the network level, by using dual SCI adapters communica
- *
+ * at the network level, by using dual SCI adapters communication can be
+ * maintained even if one of the adapter cards fails (or anything on the
+ * network this adapter card exists in e.g. an SCI switch failure).
*
*/
-
/**
* class SCITransporter
* @brief - main class for the SCI transporter.
@@ -84,16 +84,6 @@ public:
sci_error_t createSequence(Uint32 adapterid);
- /**
- * starts a sequence for error checking.
- * The actual checking that a sequence is correct is done implicitly
- * in SCIMemCpy (in doSend).
- * @param adapterid the adapter on which to start the sequence.
- * @return SCI_ERR_OK if ok, otherwize something else.
- */
- sci_error_t startSequence(Uint32 adapterid);
-
-
/** Initiate Local Segment: create a memory segment,
* prepare a memory segment, map the local segment
* into memory space and make segment available.
@@ -159,7 +149,6 @@ private:
bool m_mapped;
bool m_initLocal;
bool m_sciinit;
- Uint32 m_swapCounter;
Uint32 m_failCounter;
/**
* For statistics on transfered packets
@@ -195,7 +184,6 @@ private:
*/
Uint32 m_reportFreq;
-
Uint32 m_adapters;
Uint32 m_numberOfRemoteNodes;
diff --git a/ndb/src/common/transporter/SHM_Transporter.cpp b/ndb/src/common/transporter/SHM_Transporter.cpp
index e0c2e726a92..3ce21940254 100644
--- a/ndb/src/common/transporter/SHM_Transporter.cpp
+++ b/ndb/src/common/transporter/SHM_Transporter.cpp
@@ -31,7 +31,7 @@ SHM_Transporter::SHM_Transporter(TransporterRegistry &t_reg,
const char *lHostName,
const char *rHostName,
int r_port,
- bool isMgmConnection,
+ bool isMgmConnection_arg,
NodeId lNodeId,
NodeId rNodeId,
NodeId serverNodeId,
@@ -40,7 +40,7 @@ SHM_Transporter::SHM_Transporter(TransporterRegistry &t_reg,
key_t _shmKey,
Uint32 _shmSize) :
Transporter(t_reg, tt_SHM_TRANSPORTER,
- lHostName, rHostName, r_port, isMgmConnection,
+ lHostName, rHostName, r_port, isMgmConnection_arg,
lNodeId, rNodeId, serverNodeId,
0, false, checksum, signalId),
shmKey(_shmKey),
diff --git a/ndb/src/common/transporter/TCP_Transporter.cpp b/ndb/src/common/transporter/TCP_Transporter.cpp
index 8e09c9d90c8..ea9e0944915 100644
--- a/ndb/src/common/transporter/TCP_Transporter.cpp
+++ b/ndb/src/common/transporter/TCP_Transporter.cpp
@@ -67,14 +67,14 @@ TCP_Transporter::TCP_Transporter(TransporterRegistry &t_reg,
const char *lHostName,
const char *rHostName,
int r_port,
- bool isMgmConnection,
+ bool isMgmConnection_arg,
NodeId lNodeId,
NodeId rNodeId,
NodeId serverNodeId,
bool chksm, bool signalId,
Uint32 _reportFreq) :
Transporter(t_reg, tt_TCP_TRANSPORTER,
- lHostName, rHostName, r_port, isMgmConnection,
+ lHostName, rHostName, r_port, isMgmConnection_arg,
lNodeId, rNodeId, serverNodeId,
0, false, chksm, signalId),
m_sendBuffer(sendBufSize)
@@ -155,6 +155,8 @@ TCP_Transporter::initTransporter() {
void
TCP_Transporter::setSocketOptions(){
+ int sockOptKeepAlive = 1;
+
if (setsockopt(theSocket, SOL_SOCKET, SO_RCVBUF,
(char*)&sockOptRcvBufSize, sizeof(sockOptRcvBufSize)) < 0) {
#ifdef DEBUG_TRANSPORTER
@@ -169,6 +171,11 @@ TCP_Transporter::setSocketOptions(){
#endif
}//if
+ if (setsockopt(theSocket, SOL_SOCKET, SO_KEEPALIVE,
+ (char*)&sockOptKeepAlive, sizeof(sockOptKeepAlive)) < 0) {
+ ndbout_c("The setsockopt SO_KEEPALIVE error code = %d", InetErrno);
+ }//if
+
//-----------------------------------------------
// Set the TCP_NODELAY option so also small packets are sent
// as soon as possible
diff --git a/ndb/src/common/transporter/TCP_Transporter.hpp b/ndb/src/common/transporter/TCP_Transporter.hpp
index fdb64939d5a..7b6fd0b2323 100644
--- a/ndb/src/common/transporter/TCP_Transporter.hpp
+++ b/ndb/src/common/transporter/TCP_Transporter.hpp
@@ -33,6 +33,7 @@ struct ReceiveBuffer {
Uint32 sizeOfData; // In bytes
Uint32 sizeOfBuffer;
+ ReceiveBuffer() {}
bool init(int bytes);
void destroy();
diff --git a/ndb/src/common/transporter/Transporter.cpp b/ndb/src/common/transporter/Transporter.cpp
index 20b6be8ce26..cec018575e0 100644
--- a/ndb/src/common/transporter/Transporter.cpp
+++ b/ndb/src/common/transporter/Transporter.cpp
@@ -107,7 +107,7 @@ Transporter::connect_server(NDB_SOCKET_TYPE sockfd) {
{
struct sockaddr_in addr;
SOCKET_SIZE_TYPE addrlen= sizeof(addr);
- int r= getpeername(sockfd, (struct sockaddr*)&addr, &addrlen);
+ getpeername(sockfd, (struct sockaddr*)&addr, &addrlen);
m_connect_address= (&addr)->sin_addr;
}
@@ -213,7 +213,7 @@ Transporter::connect_client(NDB_SOCKET_TYPE sockfd) {
{
struct sockaddr_in addr;
SOCKET_SIZE_TYPE addrlen= sizeof(addr);
- int r= getpeername(sockfd, (struct sockaddr*)&addr, &addrlen);
+ getpeername(sockfd, (struct sockaddr*)&addr, &addrlen);
m_connect_address= (&addr)->sin_addr;
}
diff --git a/ndb/src/common/transporter/TransporterRegistry.cpp b/ndb/src/common/transporter/TransporterRegistry.cpp
index 7a05dcb30c2..68454c672fd 100644
--- a/ndb/src/common/transporter/TransporterRegistry.cpp
+++ b/ndb/src/common/transporter/TransporterRegistry.cpp
@@ -848,22 +848,6 @@ TransporterRegistry::poll_TCP(Uint32 timeOutMillis)
return 0;
}
- struct timeval timeout;
-#ifdef NDB_OSE
- // Return directly if there are no TCP transporters configured
-
- if(timeOutMillis <= 1){
- timeout.tv_sec = 0;
- timeout.tv_usec = 1025;
- } else {
- timeout.tv_sec = timeOutMillis / 1000;
- timeout.tv_usec = (timeOutMillis % 1000) * 1000;
- }
-#else
- timeout.tv_sec = timeOutMillis / 1000;
- timeout.tv_usec = (timeOutMillis % 1000) * 1000;
-#endif
-
NDB_SOCKET_TYPE maxSocketValue = -1;
// Needed for TCP/IP connections
@@ -889,6 +873,24 @@ TransporterRegistry::poll_TCP(Uint32 timeOutMillis)
hasdata |= t->hasReceiveData();
}
+ timeOutMillis = hasdata ? 0 : timeOutMillis;
+
+ struct timeval timeout;
+#ifdef NDB_OSE
+ // Return directly if there are no TCP transporters configured
+
+ if(timeOutMillis <= 1){
+ timeout.tv_sec = 0;
+ timeout.tv_usec = 1025;
+ } else {
+ timeout.tv_sec = timeOutMillis / 1000;
+ timeout.tv_usec = (timeOutMillis % 1000) * 1000;
+ }
+#else
+ timeout.tv_sec = timeOutMillis / 1000;
+ timeout.tv_usec = (timeOutMillis % 1000) * 1000;
+#endif
+
// The highest socket value plus one
maxSocketValue++;
@@ -1416,8 +1418,6 @@ TransporterRegistry::add_transporter_interface(NodeId remoteNodeId,
bool
TransporterRegistry::start_service(SocketServer& socket_server)
{
- struct ndb_mgm_reply mgm_reply;
-
DBUG_ENTER("TransporterRegistry::start_service");
if (m_transporter_interface.size() > 0 && !nodeIdSpecified)
{
diff --git a/ndb/src/common/util/BaseString.cpp b/ndb/src/common/util/BaseString.cpp
index 6f20ae6a002..7e5adf0e9ef 100644
--- a/ndb/src/common/util/BaseString.cpp
+++ b/ndb/src/common/util/BaseString.cpp
@@ -16,19 +16,36 @@
/* -*- c-basic-offset: 4; -*- */
#include <ndb_global.h>
#include <BaseString.hpp>
-#include <basestring_vsnprintf.h>
+#include "basestring_vsnprintf.h"
BaseString::BaseString()
{
m_chr = new char[1];
+ if (m_chr == NULL)
+ {
+ errno = ENOMEM;
+ m_len = 0;
+ return;
+ }
m_chr[0] = 0;
m_len = 0;
}
BaseString::BaseString(const char* s)
{
+ if (s == NULL)
+ {
+ m_chr = NULL;
+ m_len = 0;
+ }
const size_t n = strlen(s);
m_chr = new char[n + 1];
+ if (m_chr == NULL)
+ {
+ errno = ENOMEM;
+ m_len = 0;
+ return;
+ }
memcpy(m_chr, s, n + 1);
m_len = n;
}
@@ -37,7 +54,20 @@ BaseString::BaseString(const BaseString& str)
{
const char* const s = str.m_chr;
const size_t n = str.m_len;
+ if (s == NULL)
+ {
+ m_chr = NULL;
+ m_len = 0;
+ return;
+ }
char* t = new char[n + 1];
+ if (t == NULL)
+ {
+ errno = ENOMEM;
+ m_chr = NULL;
+ m_len = 0;
+ return;
+ }
memcpy(t, s, n + 1);
m_chr = t;
m_len = n;
@@ -51,9 +81,23 @@ BaseString::~BaseString()
BaseString&
BaseString::assign(const char* s)
{
- const size_t n = strlen(s);
+ if (s == NULL)
+ {
+ m_chr = NULL;
+ m_len = 0;
+ return *this;
+ }
+ size_t n = strlen(s);
char* t = new char[n + 1];
- memcpy(t, s, n + 1);
+ if (t)
+ {
+ memcpy(t, s, n + 1);
+ }
+ else
+ {
+ errno = ENOMEM;
+ n = 0;
+ }
delete[] m_chr;
m_chr = t;
m_len = n;
@@ -64,8 +108,16 @@ BaseString&
BaseString::assign(const char* s, size_t n)
{
char* t = new char[n + 1];
- memcpy(t, s, n);
- t[n] = 0;
+ if (t)
+ {
+ memcpy(t, s, n);
+ t[n] = 0;
+ }
+ else
+ {
+ errno = ENOMEM;
+ n = 0;
+ }
delete[] m_chr;
m_chr = t;
m_len = n;
@@ -83,10 +135,19 @@ BaseString::assign(const BaseString& str, size_t n)
BaseString&
BaseString::append(const char* s)
{
- const size_t n = strlen(s);
+ size_t n = strlen(s);
char* t = new char[m_len + n + 1];
- memcpy(t, m_chr, m_len);
- memcpy(t + m_len, s, n + 1);
+ if (t)
+ {
+ memcpy(t, m_chr, m_len);
+ memcpy(t + m_len, s, n + 1);
+ }
+ else
+ {
+ errno = ENOMEM;
+ m_len = 0;
+ n = 0;
+ }
delete[] m_chr;
m_chr = t;
m_len += n;
@@ -130,8 +191,14 @@ BaseString::assfmt(const char *fmt, ...)
l = basestring_vsnprintf(buf, sizeof(buf), fmt, ap) + 1;
va_end(ap);
if(l > (int)m_len) {
+ char *t = new char[l];
+ if (t == NULL)
+ {
+ errno = ENOMEM;
+ return *this;
+ }
delete[] m_chr;
- m_chr = new char[l];
+ m_chr = t;
}
va_start(ap, fmt);
basestring_vsnprintf(m_chr, l, fmt, ap);
@@ -155,6 +222,11 @@ BaseString::appfmt(const char *fmt, ...)
l = basestring_vsnprintf(buf, sizeof(buf), fmt, ap) + 1;
va_end(ap);
char *tmp = new char[l];
+ if (tmp == NULL)
+ {
+ errno = ENOMEM;
+ return *this;
+ }
va_start(ap, fmt);
basestring_vsnprintf(tmp, l, fmt, ap);
va_end(ap);
@@ -242,9 +314,28 @@ BaseString::argify(const char *argv0, const char *src) {
Vector<char *> vargv;
if(argv0 != NULL)
- vargv.push_back(strdup(argv0));
+ {
+ char *t = strdup(argv0);
+ if (t == NULL)
+ {
+ errno = ENOMEM;
+ return NULL;
+ }
+ if (vargv.push_back(t))
+ {
+ free(t);
+ return NULL;
+ }
+ }
char *tmp = new char[strlen(src)+1];
+ if (tmp == NULL)
+ {
+ for(size_t i = 0; i < vargv.size(); i++)
+ free(vargv[i]);
+ errno = ENOMEM;
+ return NULL;
+ }
char *dst = tmp;
const char *end = src + strlen(src);
/* Copy characters from src to destination, while compacting them
@@ -287,20 +378,48 @@ BaseString::argify(const char *argv0, const char *src) {
/* Make sure the string is properly terminated */
*dst++ = '\0';
src++;
-
- vargv.push_back(strdup(begin));
+
+ {
+ char *t = strdup(begin);
+ if (t == NULL)
+ {
+ delete[] tmp;
+ for(size_t i = 0; i < vargv.size(); i++)
+ free(vargv[i]);
+ errno = ENOMEM;
+ return NULL;
+ }
+ if (vargv.push_back(t))
+ {
+ free(t);
+ delete[] tmp;
+ for(size_t i = 0; i < vargv.size(); i++)
+ free(vargv[i]);
+ return NULL;
+ }
+ }
}
end:
delete[] tmp;
- vargv.push_back(NULL);
+ if (vargv.push_back(NULL))
+ {
+ for(size_t i = 0; i < vargv.size(); i++)
+ free(vargv[i]);
+ return NULL;
+ }
/* Convert the C++ Vector into a C-vector of strings, suitable for
* calling execv().
*/
char **argv = (char **)malloc(sizeof(*argv) * (vargv.size()));
if(argv == NULL)
+ {
+ for(size_t i = 0; i < vargv.size(); i++)
+ free(vargv[i]);
+ errno = ENOMEM;
return NULL;
+ }
for(size_t i = 0; i < vargv.size(); i++){
argv[i] = vargv[i];
diff --git a/ndb/src/common/util/ConfigValues.cpp b/ndb/src/common/util/ConfigValues.cpp
index cf6dcf904a6..9309fe3fbd6 100644
--- a/ndb/src/common/util/ConfigValues.cpp
+++ b/ndb/src/common/util/ConfigValues.cpp
@@ -18,8 +18,6 @@
#include <NdbOut.hpp>
#include <NdbTCP.h>
-static Uint32 hash(Uint32 key, Uint32 size);
-static Uint32 nextHash(Uint32 key, Uint32 size, Uint32 pos, Uint32 count);
static bool findKey(const Uint32 * vals, Uint32 sz, Uint32 key, Uint32 * pos);
/**
@@ -90,18 +88,18 @@ bool
ConfigValues::getByPos(Uint32 pos, Entry * result) const {
assert(pos < (2 * m_size));
Uint32 keypart = m_values[pos];
- Uint32 val = m_values[pos+1];
+ Uint32 val2 = m_values[pos+1];
switch(::getTypeOf(keypart)){
case IntType:
case SectionType:
- result->m_int = val;
+ result->m_int = val2;
break;
case StringType:
- result->m_string = * getString(val);
+ result->m_string = * getString(val2);
break;
case Int64Type:
- result->m_int64 = * get64(val);
+ result->m_int64 = * get64(val2);
break;
case InvalidType:
default:
diff --git a/ndb/src/common/util/File.cpp b/ndb/src/common/util/File.cpp
index 23bf3415df9..fe0fdfd1c91 100644
--- a/ndb/src/common/util/File.cpp
+++ b/ndb/src/common/util/File.cpp
@@ -50,7 +50,7 @@ File_class::size(FILE* f)
MY_STAT s;
// Note that my_fstat behaves *differently* than my_stat. ARGGGHH!
- if(my_fstat(::fileno(f), &s, MYF(0)))
+ if(my_fstat(fileno(f), &s, MYF(0)))
return 0;
return s.st_size;
@@ -162,9 +162,9 @@ File_class::readChar(char* buf)
}
int
-File_class::write(const void* buf, size_t size, size_t nitems)
+File_class::write(const void* buf, size_t size_arg, size_t nitems)
{
- return ::fwrite(buf, size, nitems, m_file);
+ return ::fwrite(buf, size_arg, nitems, m_file);
}
int
@@ -196,7 +196,7 @@ File_class::flush() const
{
#if defined NDB_OSE || defined NDB_SOFTOSE
::fflush(m_file);
- return ::fsync(::fileno(m_file));
+ return ::fsync(fileno(m_file));
#else
return ::fflush(m_file);;
#endif
diff --git a/ndb/src/common/util/Makefile.am b/ndb/src/common/util/Makefile.am
index 4cc2e49f9ec..dc83a70990f 100644
--- a/ndb/src/common/util/Makefile.am
+++ b/ndb/src/common/util/Makefile.am
@@ -24,7 +24,8 @@ libgeneral_la_SOURCES = \
uucode.c random.c version.c \
strdup.c \
ConfigValues.cpp ndb_init.c basestring_vsnprintf.c \
- Bitmask.cpp
+ Bitmask.cpp \
+ ndb_rand.c
EXTRA_PROGRAMS = testBitmask
testBitmask_SOURCES = testBitmask.cpp
diff --git a/ndb/src/common/util/NdbSqlUtil.cpp b/ndb/src/common/util/NdbSqlUtil.cpp
index 1234e4ece6b..0f62d66c149 100644
--- a/ndb/src/common/util/NdbSqlUtil.cpp
+++ b/ndb/src/common/util/NdbSqlUtil.cpp
@@ -681,8 +681,6 @@ int
NdbSqlUtil::cmpBit(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
Uint32 n = (n1 < n2) ? n1 : n2;
- char* c1 = (char*)p1;
- char* c2 = (char*)p2;
int ret = memcmp(p1, p2, n);
return ret;
}
diff --git a/ndb/src/common/util/OutputStream.cpp b/ndb/src/common/util/OutputStream.cpp
index cccd76eac2c..eada1452f02 100644
--- a/ndb/src/common/util/OutputStream.cpp
+++ b/ndb/src/common/util/OutputStream.cpp
@@ -42,16 +42,16 @@ FileOutputStream::println(const char * fmt, ...){
}
SocketOutputStream::SocketOutputStream(NDB_SOCKET_TYPE socket,
- unsigned timeout){
+ unsigned write_timeout_ms){
m_socket = socket;
- m_timeout = timeout;
+ m_timeout_ms = write_timeout_ms;
}
int
SocketOutputStream::print(const char * fmt, ...){
va_list ap;
va_start(ap, fmt);
- const int ret = vprint_socket(m_socket, m_timeout, fmt, ap);
+ const int ret = vprint_socket(m_socket, m_timeout_ms, fmt, ap);
va_end(ap);
return ret;
}
@@ -59,7 +59,7 @@ int
SocketOutputStream::println(const char * fmt, ...){
va_list ap;
va_start(ap, fmt);
- const int ret = vprintln_socket(m_socket, m_timeout, fmt, ap);
+ const int ret = vprintln_socket(m_socket, m_timeout_ms, fmt, ap);
va_end(ap);
return ret;
}
diff --git a/ndb/src/common/util/Properties.cpp b/ndb/src/common/util/Properties.cpp
index 8d5c56affd3..11a1d8690ae 100644
--- a/ndb/src/common/util/Properties.cpp
+++ b/ndb/src/common/util/Properties.cpp
@@ -627,11 +627,11 @@ PropertiesImpl::getPropsPut(const char * name,
if(nvp == 0){
Properties * tmpP = new Properties();
PropertyImpl * tmpPI = new PropertyImpl(tmp2, tmpP);
- PropertyImpl * nvp = put(tmpPI);
+ PropertyImpl * nvp2 = put(tmpPI);
delete tmpP;
free(tmp2);
- return ((Properties*)nvp->value)->impl->getPropsPut(tmp+1, impl);
+ return ((Properties*)nvp2->value)->impl->getPropsPut(tmp+1, impl);
}
free(tmp2);
if(nvp->valueType != PropertiesType_Properties){
diff --git a/ndb/src/common/util/SocketClient.cpp b/ndb/src/common/util/SocketClient.cpp
index c2825901929..3d1fd07d581 100644
--- a/ndb/src/common/util/SocketClient.cpp
+++ b/ndb/src/common/util/SocketClient.cpp
@@ -88,7 +88,7 @@ SocketClient::bind(const char* bindaddress, unsigned short localport)
int ret = errno;
NDB_CLOSE_SOCKET(m_sockfd);
m_sockfd= NDB_INVALID_SOCKET;
- return errno;
+ return ret;
}
if (::bind(m_sockfd, (struct sockaddr*)&local, sizeof(local)) == -1)
diff --git a/ndb/src/common/util/ndb_rand.c b/ndb/src/common/util/ndb_rand.c
new file mode 100644
index 00000000000..4fcc483cd49
--- /dev/null
+++ b/ndb/src/common/util/ndb_rand.c
@@ -0,0 +1,40 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <ndb_rand.h>
+
+static unsigned long next= 1;
+
+/**
+ * ndb_rand
+ *
+ * constant time, cheap, pseudo-random number generator.
+ *
+ * NDB_RAND_MAX assumed to be 32767
+ *
+ * This is the POSIX example for "generating the same sequence on
+ * different machines". Although that is not one of our requirements.
+ */
+int ndb_rand(void)
+{
+ next= next * 1103515245 + 12345;
+ return((unsigned)(next/65536) % 32768);
+}
+
+void ndb_srand(unsigned seed)
+{
+ next= seed;
+}
+
diff --git a/ndb/src/common/util/random.c b/ndb/src/common/util/random.c
index 3d4a48e7ef0..20ef537d89a 100644
--- a/ndb/src/common/util/random.c
+++ b/ndb/src/common/util/random.c
@@ -197,7 +197,7 @@ int initSequence(RandomSequence *seq, SequenceValues *inputValues)
unsigned int i;
unsigned int j;
unsigned int totalLength;
- unsigned int index;
+ unsigned int idx;
if( !seq || !inputValues ) return(-1);
@@ -219,12 +219,12 @@ int initSequence(RandomSequence *seq, SequenceValues *inputValues)
/*----------------------*/
/* set the array values */
/*----------------------*/
- index = 0;
+ idx = 0;
for(i = 0; inputValues[i].length != 0; i++) {
for(j = 0; j < inputValues[i].length; j++ ) {
- seq->values[index] = inputValues[i].value;
- index++;
+ seq->values[idx] = inputValues[i].value;
+ idx++;
}
}
diff --git a/ndb/src/common/util/socket_io.cpp b/ndb/src/common/util/socket_io.cpp
index 9bc6b4d53fb..2dd2a9f31a8 100644
--- a/ndb/src/common/util/socket_io.cpp
+++ b/ndb/src/common/util/socket_io.cpp
@@ -164,8 +164,8 @@ write_socket(NDB_SOCKET_TYPE socket, int timeout_millis,
FD_SET(socket, &writeset);
timeout.tv_sec = 1;
timeout.tv_usec = 0;
- const int selectRes = select(socket + 1, 0, &writeset, 0, &timeout);
- if(selectRes != 1){
+ const int selectRes2 = select(socket + 1, 0, &writeset, 0, &timeout);
+ if(selectRes2 != 1){
return -1;
}
}
diff --git a/ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj b/ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj
index 56f9f3a8511..fb1e2fd601c 100644
--- a/ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj
+++ b/ndb/src/cw/cpcc-win32/C++/CPC_GUI.vcproj
@@ -12,8 +12,8 @@
<Configurations>
<Configuration
Name="Release|Win32"
- OutputDirectory=".\Release"
- IntermediateDirectory=".\Release"
+ OutputDirectory=".\release_obj"
+ IntermediateDirectory=".\release_obj"
ConfigurationType="1"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="FALSE"
@@ -27,10 +27,10 @@
EnableFunctionLevelLinking="TRUE"
UsePrecompiledHeader="3"
PrecompiledHeaderThrough="stdafx.h"
- PrecompiledHeaderFile=".\Release/CPC_GUI.pch"
- AssemblerListingLocation=".\Release/"
- ObjectFile=".\Release/"
- ProgramDataBaseFileName=".\Release/"
+ PrecompiledHeaderFile=".\release_obj/CPC_GUI.pch"
+ AssemblerListingLocation=".\release_obj/"
+ ObjectFile=".\release_obj/"
+ ProgramDataBaseFileName=".\release_obj/"
WarningLevel="3"
SuppressStartupBanner="TRUE"/>
<Tool
@@ -39,10 +39,10 @@
Name="VCLinkerTool"
AdditionalOptions="/MACHINE:I386"
AdditionalDependencies="mfc42.lib"
- OutputFile=".\Release/CPC_GUI.exe"
+ OutputFile=".\release_obj/CPC_GUI.exe"
LinkIncremental="1"
SuppressStartupBanner="TRUE"
- ProgramDatabaseFile=".\Release/CPC_GUI.pdb"
+ ProgramDatabaseFile=".\release_obj/CPC_GUI.pdb"
SubSystem="2"/>
<Tool
Name="VCMIDLTool"
@@ -50,7 +50,7 @@
MkTypLibCompatible="TRUE"
SuppressStartupBanner="TRUE"
TargetEnvironment="1"
- TypeLibraryName=".\Release/CPC_GUI.tlb"/>
+ TypeLibraryName=".\release_obj/CPC_GUI.tlb"/>
<Tool
Name="VCPostBuildEventTool"/>
<Tool
@@ -68,8 +68,8 @@
</Configuration>
<Configuration
Name="Debug|Win32"
- OutputDirectory=".\Debug"
- IntermediateDirectory=".\Debug"
+ OutputDirectory=".\debug_obj"
+ IntermediateDirectory=".\debug_obj"
ConfigurationType="1"
UseOfMFC="0"
ATLMinimizesCRunTimeLibraryUsage="FALSE"
@@ -82,10 +82,10 @@
RuntimeLibrary="5"
UsePrecompiledHeader="3"
PrecompiledHeaderThrough="stdafx.h"
- PrecompiledHeaderFile=".\Debug/CPC_GUI.pch"
- AssemblerListingLocation=".\Debug/"
- ObjectFile=".\Debug/"
- ProgramDataBaseFileName=".\Debug/"
+ PrecompiledHeaderFile=".\debug_obj/CPC_GUI.pch"
+ AssemblerListingLocation=".\debug_obj/"
+ ObjectFile=".\debug_obj/"
+ ProgramDataBaseFileName=".\debug_obj/"
BrowseInformation="1"
WarningLevel="3"
SuppressStartupBanner="TRUE"
@@ -96,11 +96,11 @@
Name="VCLinkerTool"
AdditionalOptions="/MACHINE:I386"
AdditionalDependencies="comctl32.lib mfc70d.lib"
- OutputFile=".\Debug/CPC_GUI.exe"
- LinkIncremental="2"
+ OutputFile=".\debug_obj/CPC_GUI.exe"
+ LinkIncremental="1"
SuppressStartupBanner="TRUE"
GenerateDebugInformation="TRUE"
- ProgramDatabaseFile=".\Debug/CPC_GUI.pdb"
+ ProgramDatabaseFile=".\debug_obj/CPC_GUI.pdb"
SubSystem="2"/>
<Tool
Name="VCMIDLTool"
@@ -108,7 +108,7 @@
MkTypLibCompatible="TRUE"
SuppressStartupBanner="TRUE"
TargetEnvironment="1"
- TypeLibraryName=".\Debug/CPC_GUI.tlb"/>
+ TypeLibraryName=".\debug_obj/CPC_GUI.tlb"/>
<Tool
Name="VCPostBuildEventTool"/>
<Tool
diff --git a/ndb/src/cw/cpcd/APIService.cpp b/ndb/src/cw/cpcd/APIService.cpp
index 5bbf2c86e23..1c1cfb94cd4 100644
--- a/ndb/src/cw/cpcd/APIService.cpp
+++ b/ndb/src/cw/cpcd/APIService.cpp
@@ -389,7 +389,6 @@ CPCDAPISession::listProcesses(Parser_t::Context & /* unused */,
void
CPCDAPISession::showVersion(Parser_t::Context & /* unused */,
const class Properties & args){
- Uint32 id;
CPCD::RequestStatus rs;
m_output->println("show version");
diff --git a/ndb/src/cw/cpcd/main.cpp b/ndb/src/cw/cpcd/main.cpp
index 7021b4bc68d..f23a92b8010 100644
--- a/ndb/src/cw/cpcd/main.cpp
+++ b/ndb/src/cw/cpcd/main.cpp
@@ -74,8 +74,6 @@ extern "C" static void sig_child(int signo, siginfo_t*, void*);
const char *progname = "ndb_cpcd";
int main(int argc, char** argv){
- int save_argc= argc;
- char** save_argv= argv;
const char *load_default_groups[]= { "ndb_cpcd",0 };
MY_INIT(argv[0]);
diff --git a/ndb/src/kernel/blocks/ERROR_codes.txt b/ndb/src/kernel/blocks/ERROR_codes.txt
index bf54d583299..2599bf40988 100644
--- a/ndb/src/kernel/blocks/ERROR_codes.txt
+++ b/ndb/src/kernel/blocks/ERROR_codes.txt
@@ -5,8 +5,8 @@ Next DBACC 3002
Next DBTUP 4014
Next DBLQH 5043
Next DBDICT 6007
-Next DBDIH 7178
-Next DBTC 8039
+Next DBDIH 7195
+Next DBTC 8052
Next CMVMI 9000
Next BACKUP 10022
Next DBUTIL 11002
@@ -71,6 +71,13 @@ Delay GCP_SAVEREQ by 10 secs
7177: Delay copying of sysfileData in execCOPY_GCIREQ
+7180: Crash master during master-take-over in execMASTER_LCPCONF
+
+7193: Dont send LCP_FRAG_ORD to self, and crash when sending first
+ LCP_FRAG_ORD(last)
+
+7194: Force removeNodeFromStored to complete in the middle of MASTER_LCPCONF
+
ERROR CODES FOR TESTING NODE FAILURE, LOCAL CHECKPOINT HANDLING:
-----------------------------------------------------------------
@@ -294,6 +301,10 @@ ABORT OF TCKEYREQ
8038 : Simulate API disconnect just after SCAN_TAB_REQ
+8039 : Simulate failure of TransactionBufferMemory allocation for OI lookup
+
+8051 : Simulate failure of allocation for saveINDXKEYINFO
+
CMVMI
-----
@@ -488,7 +499,15 @@ Dbdict:
6004 Crash in participant @ CreateTabReq::Commit
6005 Crash in participant @ CreateTabReq::CreateDrop
-Ndbcntr:
---------
+TUP:
+----
+
+4025: Fail all inserts with out of memory
+4026: Fail one insert with oom
+4027: Fail inserts randomly with oom
+4028: Fail one random insert with oom
+
+NDBCNTR:
+1000: Crash insertion on SystemError::CopyFragRef
1001: Delay sending NODE_FAILREP (to own node), until error is cleared
diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
index 8c3148862d4..1b5e7a27a0c 100644
--- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
+++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
@@ -77,11 +77,7 @@ Cmvmi::Cmvmi(const Configuration & conf) :
addRecSignal(GSN_OPEN_COMREQ, &Cmvmi::execOPEN_COMREQ);
addRecSignal(GSN_TEST_ORD, &Cmvmi::execTEST_ORD);
- addRecSignal(GSN_STATISTICS_REQ, &Cmvmi::execSTATISTICS_REQ);
addRecSignal(GSN_TAMPER_ORD, &Cmvmi::execTAMPER_ORD);
- addRecSignal(GSN_SET_VAR_REQ, &Cmvmi::execSET_VAR_REQ);
- addRecSignal(GSN_SET_VAR_CONF, &Cmvmi::execSET_VAR_CONF);
- addRecSignal(GSN_SET_VAR_REF, &Cmvmi::execSET_VAR_REF);
addRecSignal(GSN_STOP_ORD, &Cmvmi::execSTOP_ORD);
addRecSignal(GSN_START_ORD, &Cmvmi::execSTART_ORD);
addRecSignal(GSN_EVENT_SUBSCRIBE_REQ,
@@ -176,7 +172,7 @@ void Cmvmi::execNDB_TAMPER(Signal* signal)
}
else
{
- MAX_RECEIVED_SIGNALS = rand() % 128;
+ MAX_RECEIVED_SIGNALS = 1 + (rand() % 128);
}
ndbout_c("MAX_RECEIVED_SIGNALS: %d", MAX_RECEIVED_SIGNALS);
CLEAR_ERROR_INSERT_VALUE;
@@ -720,24 +716,6 @@ Cmvmi::execTEST_ORD(Signal * signal){
#endif
}
-void Cmvmi::execSTATISTICS_REQ(Signal* signal)
-{
- // TODO Note ! This is only a test implementation...
-
- static int stat1 = 0;
- jamEntry();
-
- //ndbout << "data 1: " << signal->theData[1];
-
- int x = signal->theData[0];
- stat1++;
- signal->theData[0] = stat1;
- sendSignal(x, GSN_STATISTICS_CONF, signal, 7, JBB);
-
-}//execSTATISTICS_REQ()
-
-
-
void Cmvmi::execSTOP_ORD(Signal* signal)
{
jamEntry();
@@ -856,7 +834,7 @@ void Cmvmi::execTAMPER_ORD(Signal* signal)
// to be able to indicate if we really introduced an error.
#ifdef ERROR_INSERT
TamperOrd* const tamperOrd = (TamperOrd*)&signal->theData[0];
-
+ signal->theData[2] = 0;
signal->theData[1] = tamperOrd->errorNo;
signal->theData[0] = 5;
sendSignal(DBDIH_REF, GSN_DIHNDBTAMPER, signal, 3,JBB);
@@ -864,160 +842,6 @@ void Cmvmi::execTAMPER_ORD(Signal* signal)
}//execTAMPER_ORD()
-
-
-void Cmvmi::execSET_VAR_REQ(Signal* signal)
-{
-#if 0
-
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
- jamEntry();
- switch (var) {
-
- // NDBCNTR_REF
-
- // DBTC
- case TransactionDeadlockDetectionTimeout:
- case TransactionInactiveTime:
- case NoOfConcurrentProcessesHandleTakeover:
- sendSignal(DBTC_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
- break;
-
- // DBDIH
- case TimeBetweenLocalCheckpoints:
- case TimeBetweenGlobalCheckpoints:
- sendSignal(DBDIH_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
- break;
-
- // DBLQH
- case NoOfConcurrentCheckpointsDuringRestart:
- case NoOfConcurrentCheckpointsAfterRestart:
- sendSignal(DBLQH_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
- break;
-
- // DBACC
- case NoOfDiskPagesToDiskDuringRestartACC:
- case NoOfDiskPagesToDiskAfterRestartACC:
- sendSignal(DBACC_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
- break;
-
- // DBTUP
- case NoOfDiskPagesToDiskDuringRestartTUP:
- case NoOfDiskPagesToDiskAfterRestartTUP:
- sendSignal(DBTUP_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
- break;
-
- // DBDICT
-
- // NDBCNTR
- case TimeToWaitAlive:
-
- // QMGR
- case HeartbeatIntervalDbDb: // TODO possibly Ndbcnt too
- case HeartbeatIntervalDbApi:
- case ArbitTimeout:
- sendSignal(QMGR_REF, GSN_SET_VAR_REQ, signal, 3, JBB);
- break;
-
- // NDBFS
-
- // CMVMI
- case MaxNoOfSavedMessages:
- case LockPagesInMainMemory:
- case TimeBetweenWatchDogCheck:
- case StopOnError:
- handleSET_VAR_REQ(signal);
- break;
-
-
- // Not possible to update (this could of course be handled by each block
- // instead but I havn't investigated where they belong)
- case Id:
- case ExecuteOnComputer:
- case ShmKey:
- case MaxNoOfConcurrentOperations:
- case MaxNoOfConcurrentTransactions:
- case MemorySpaceIndexes:
- case MemorySpaceTuples:
- case MemoryDiskPages:
- case NoOfFreeDiskClusters:
- case NoOfDiskClusters:
- case NoOfFragmentLogFiles:
- case NoOfDiskClustersPerDiskFile:
- case NoOfDiskFiles:
- case MaxNoOfSavedEvents:
- default:
-
- int mgmtSrvr = setVarReq->mgmtSrvrBlockRef();
- sendSignal(mgmtSrvr, GSN_SET_VAR_REF, signal, 0, JBB);
- } // switch
-
-#endif
-}//execSET_VAR_REQ()
-
-
-void Cmvmi::execSET_VAR_CONF(Signal* signal)
-{
- int mgmtSrvr = signal->theData[0];
- sendSignal(mgmtSrvr, GSN_SET_VAR_CONF, signal, 0, JBB);
-
-}//execSET_VAR_CONF()
-
-
-void Cmvmi::execSET_VAR_REF(Signal* signal)
-{
- int mgmtSrvr = signal->theData[0];
- sendSignal(mgmtSrvr, GSN_SET_VAR_REF, signal, 0, JBB);
-
-}//execSET_VAR_REF()
-
-
-void Cmvmi::handleSET_VAR_REQ(Signal* signal) {
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
- int val = setVarReq->value();
-
- switch (var) {
- case MaxNoOfSavedMessages:
- theConfig.maxNoOfErrorLogs(val);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case LockPagesInMainMemory:
- int result;
- if (val == 0) {
- result = NdbMem_MemUnlockAll();
- }
- else {
- result = NdbMem_MemLockAll();
- }
- if (result == 0) {
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- }
- else {
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- }
- break;
-
- case TimeBetweenWatchDogCheck:
- theConfig.timeBetweenWatchDogCheck(val);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case StopOnError:
- theConfig.stopOnError(val);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- return;
- } // switch
-#endif
-}
-
#ifdef VM_TRACE
class RefSignalTest {
public:
@@ -1296,7 +1120,7 @@ Cmvmi::execTESTSIG(Signal* signal){
fprintf(stdout, "\n");
for(i = 0; i<signal->header.m_noOfSections; i++){
- SegmentedSectionPtr ptr = {0,0,0};
+ SegmentedSectionPtr ptr(0,0,0);
ndbout_c("-- Section %d --", i);
signal->getSection(ptr, i);
ndbrequire(ptr.p != 0);
@@ -1354,7 +1178,7 @@ Cmvmi::execTESTSIG(Signal* signal){
LinearSectionPtr ptr[3];
const Uint32 secs = signal->getNoOfSections();
for(i = 0; i<secs; i++){
- SegmentedSectionPtr sptr = {0,0,0};
+ SegmentedSectionPtr sptr(0,0,0);
signal->getSection(sptr, i);
ptr[i].sz = sptr.sz;
ptr[i].p = new Uint32[sptr.sz];
@@ -1403,7 +1227,7 @@ Cmvmi::execTESTSIG(Signal* signal){
LinearSectionPtr ptr[3];
const Uint32 secs = signal->getNoOfSections();
for(i = 0; i<secs; i++){
- SegmentedSectionPtr sptr = {0,0,0};
+ SegmentedSectionPtr sptr(0,0,0);
signal->getSection(sptr, i);
ptr[i].sz = sptr.sz;
ptr[i].p = new Uint32[sptr.sz];
@@ -1469,7 +1293,7 @@ Cmvmi::execTESTSIG(Signal* signal){
const Uint32 secs = signal->getNoOfSections();
memset(g_test, 0, sizeof(g_test));
for(i = 0; i<secs; i++){
- SegmentedSectionPtr sptr = {0,0,0};
+ SegmentedSectionPtr sptr(0,0,0);
signal->getSection(sptr, i);
g_test[i].sz = sptr.sz;
g_test[i].p = new Uint32[sptr.sz];
diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp
index e3a20795701..712e70039c9 100644
--- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp
+++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp
@@ -55,20 +55,14 @@ private:
void execSIZEALT_ACK(Signal* signal);
void execTEST_ORD(Signal* signal);
- void execSTATISTICS_REQ(Signal* signal);
void execSTOP_ORD(Signal* signal);
void execSTART_ORD(Signal* signal);
void execTAMPER_ORD(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
- void execSET_VAR_CONF(Signal* signal);
- void execSET_VAR_REF(Signal* signal);
void execDUMP_STATE_ORD(Signal* signal);
void execEVENT_SUBSCRIBE_REQ(Signal *);
void cancelSubscription(NodeId nodeId);
-
- void handleSET_VAR_REQ(Signal* signal);
void execTESTSIG(Signal* signal);
void execNODE_START_REP(Signal* signal);
diff --git a/ndb/src/kernel/blocks/dbacc/Dbacc.hpp b/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
index 43810a08ac7..02a7e69b684 100644
--- a/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
+++ b/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
@@ -911,7 +911,6 @@ private:
void execDROP_TAB_REQ(Signal* signal);
void execFSREMOVECONF(Signal* signal);
void execREAD_CONFIG_REQ(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
void execDUMP_STATE_ORD(Signal* signal);
// Statement blocks
diff --git a/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp b/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
index 024a32ca95c..80664e8911a 100644
--- a/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
+++ b/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
@@ -178,7 +178,6 @@ Dbacc::Dbacc(const class Configuration & conf):
addRecSignal(GSN_DROP_TAB_REQ, &Dbacc::execDROP_TAB_REQ);
addRecSignal(GSN_FSREMOVECONF, &Dbacc::execFSREMOVECONF);
addRecSignal(GSN_READ_CONFIG_REQ, &Dbacc::execREAD_CONFIG_REQ, true);
- addRecSignal(GSN_SET_VAR_REQ, &Dbacc::execSET_VAR_REQ);
initData();
diff --git a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
index 40af5a52c03..37f75d82710 100644
--- a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
+++ b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
@@ -11648,33 +11648,6 @@ Dbacc::execDUMP_STATE_ORD(Signal* signal)
#endif
}//Dbacc::execDUMP_STATE_ORD()
-void Dbacc::execSET_VAR_REQ(Signal* signal)
-{
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
- int val = setVarReq->value();
-
-
- switch (var) {
-
- case NoOfDiskPagesToDiskAfterRestartACC:
- clblPagesPerTick = val;
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case NoOfDiskPagesToDiskDuringRestartACC:
- // Valid only during start so value not set.
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- } // switch
-#endif
-
-}//execSET_VAR_REQ()
-
void
Dbacc::execREAD_PSUEDO_REQ(Signal* signal){
jamEntry();
diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
index 699b5cb735b..fed40d0a904 100644
--- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
+++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
@@ -79,6 +79,9 @@
#include <NdbSleep.h>
#include <signaldata/ApiBroadcast.hpp>
+#include <EventLogger.hpp>
+extern EventLogger g_eventLogger;
+
#define ZNOT_FOUND 626
#define ZALREADYEXIST 630
@@ -289,7 +292,7 @@ Dbdict::packTableIntoPagesImpl(SimpleProperties::Writer & w,
w.add(DictTabInfo::MaxRowsHigh, tablePtr.p->maxRowsHigh);
w.add(DictTabInfo::MinRowsLow, tablePtr.p->minRowsLow);
w.add(DictTabInfo::MinRowsHigh, tablePtr.p->minRowsHigh);
-
+ w.add(DictTabInfo::SingleUserMode, tablePtr.p->singleUserMode);
if(!signal)
{
w.add(DictTabInfo::FragmentCount, tablePtr.p->fragmentCount);
@@ -1075,17 +1078,36 @@ void Dbdict::readSchemaConf(Signal* signal,
for (Uint32 n = 0; n < xsf->noOfPages; n++) {
SchemaFile * sf = &xsf->schemaPage[n];
- bool ok =
- memcmp(sf->Magic, NDB_SF_MAGIC, sizeof(sf->Magic)) == 0 &&
- sf->FileSize != 0 &&
- sf->FileSize % NDB_SF_PAGE_SIZE == 0 &&
- sf->FileSize == sf0->FileSize &&
- sf->PageNumber == n &&
- computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS) == 0;
- ndbrequire(ok || !crashInd);
- if (! ok) {
+ bool ok = false;
+ const char *reason;
+ if (memcmp(sf->Magic, NDB_SF_MAGIC, sizeof(sf->Magic)) != 0)
+ { jam(); reason = "magic code"; }
+ else if (sf->FileSize == 0)
+ { jam(); reason = "file size == 0"; }
+ else if (sf->FileSize % NDB_SF_PAGE_SIZE != 0)
+ { jam(); reason = "invalid size multiple"; }
+ else if (sf->FileSize != sf0->FileSize)
+ { jam(); reason = "invalid size"; }
+ else if (sf->PageNumber != n)
+ { jam(); reason = "invalid page number"; }
+ else if (computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS) != 0)
+ { jam(); reason = "invalid checksum"; }
+ else
+ ok = true;
+
+ if (!ok)
+ {
+ char reason_msg[128];
+ snprintf(reason_msg, sizeof(reason_msg),
+ "schema file corrupt, page %u (%s, "
+ "sz=%u sz0=%u pn=%u)",
+ n, reason, sf->FileSize, sf0->FileSize, sf->PageNumber);
+ if (crashInd)
+ progError(__LINE__, NDBD_EXIT_SR_SCHEMAFILE, reason_msg);
+ ndbrequireErr(fsPtr.p->fsState == FsConnectRecord::READ_SCHEMA1,
+ NDBD_EXIT_SR_SCHEMAFILE);
jam();
- ndbrequire(fsPtr.p->fsState == FsConnectRecord::READ_SCHEMA1);
+ infoEvent("primary %s, trying backup", reason_msg);
readSchemaRef(signal, fsPtr);
return;
}
@@ -1506,6 +1528,7 @@ void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr)
tablePtr.p->maxRowsHigh = 0;
tablePtr.p->minRowsLow = 0;
tablePtr.p->minRowsHigh = 0;
+ tablePtr.p->singleUserMode = 0;
tablePtr.p->storedTable = true;
tablePtr.p->tableType = DictTabInfo::UserTable;
tablePtr.p->primaryTableId = RNIL;
@@ -4720,8 +4743,9 @@ Dbdict::execTAB_COMMITCONF(Signal* signal){
signal->theData[4] = (Uint32)tabPtr.p->tableType;
signal->theData[5] = createTabPtr.p->key;
signal->theData[6] = (Uint32)tabPtr.p->noOfPrimkey;
-
- sendSignal(DBTC_REF, GSN_TC_SCHVERREQ, signal, 7, JBB);
+ signal->theData[7] = (Uint32)tabPtr.p->singleUserMode;
+
+ sendSignal(DBTC_REF, GSN_TC_SCHVERREQ, signal, 8, JBB);
return;
}
@@ -5086,11 +5110,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
tablePtr.p->maxRowsHigh = tableDesc.MaxRowsHigh;
tablePtr.p->minRowsLow = tableDesc.MinRowsLow;
tablePtr.p->minRowsHigh = tableDesc.MinRowsHigh;
-
- Uint64 maxRows =
- (((Uint64)tablePtr.p->maxRowsHigh) << 32) + tablePtr.p->maxRowsLow;
- Uint64 minRows =
- (((Uint64)tablePtr.p->minRowsHigh) << 32) + tablePtr.p->minRowsLow;
+ tablePtr.p->singleUserMode = tableDesc.SingleUserMode;
tablePtr.p->frmLen = tableDesc.FrmLen;
memcpy(tablePtr.p->frmData, tableDesc.FrmData, tableDesc.FrmLen);
@@ -6891,6 +6911,7 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
w.add(DictTabInfo::NoOfKeyAttr, indexPtr.p->noOfPrimkey);
w.add(DictTabInfo::NoOfNullable, indexPtr.p->noOfNullAttr);
w.add(DictTabInfo::KeyLength, indexPtr.p->tupKeyLength);
+ w.add(DictTabInfo::SingleUserMode, (Uint32)NDB_SUM_READ_WRITE);
// write index key attributes
AttributeRecordPtr aRecPtr;
c_attributeRecordPool.getPtr(aRecPtr, tablePtr.p->firstAttribute);
diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
index 6fda440f753..254c55a1c01 100644
--- a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
+++ b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
@@ -131,6 +131,7 @@ public:
* on disk. Index trigger ids are volatile.
*/
struct TableRecord : public MetaData::Table {
+ TableRecord() {}
Uint32 maxRowsLow;
Uint32 maxRowsHigh;
Uint32 minRowsLow;
@@ -237,6 +238,11 @@ public:
char frmData[MAX_FRM_DATA_SIZE];
Uint32 fragmentCount;
+
+ /*
+ * Access rights to table during single user mode
+ */
+ Uint8 singleUserMode;
};
typedef Ptr<TableRecord> TableRecordPtr;
@@ -250,6 +256,7 @@ public:
* attributes. This is wrong but convenient.
*/
struct AttributeRecord : public MetaData::Attribute {
+ AttributeRecord() {}
union {
/** Pointer to the next attribute used by ArrayPool */
Uint32 nextPool;
@@ -285,6 +292,7 @@ public:
* trigger online creates the trigger in TC (if index) and LQH-TUP.
*/
struct TriggerRecord {
+ TriggerRecord() {}
/** Trigger state */
enum TriggerState {
@@ -864,6 +872,7 @@ private:
* seize/release invokes ctor/dtor automatically.
*/
struct OpRecordCommon {
+ OpRecordCommon() {}
Uint32 key; // key shared between master and slaves
Uint32 nextHash;
Uint32 prevHash;
@@ -879,6 +888,7 @@ private:
* Create table record
*/
struct CreateTableRecord : OpRecordCommon {
+ CreateTableRecord() {}
Uint32 m_senderRef;
Uint32 m_senderData;
Uint32 m_coordinatorRef;
@@ -917,6 +927,7 @@ private:
* Drop table record
*/
struct DropTableRecord : OpRecordCommon {
+ DropTableRecord() {}
DropTableReq m_request;
Uint32 m_requestType;
diff --git a/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
index e8f24876979..e471a953391 100644
--- a/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
+++ b/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
@@ -693,7 +693,6 @@ private:
void execFSREADREF(Signal *);
void execFSWRITECONF(Signal *);
void execFSWRITEREF(Signal *);
- void execSET_VAR_REQ(Signal *);
void execCHECKNODEGROUPSREQ(Signal *);
void execSTART_INFOREQ(Signal*);
void execSTART_INFOREF(Signal*);
@@ -1288,10 +1287,21 @@ public:
private:
struct LcpState {
+ LcpState() {}
LcpStatus lcpStatus;
Uint32 lcpStatusUpdatedPlace;
+ struct Save {
+ LcpStatus m_status;
+ Uint32 m_place;
+ } m_saveState[10];
+
void setLcpStatus(LcpStatus status, Uint32 line){
+ for (Uint32 i = 9; i > 0; i--)
+ m_saveState[i] = m_saveState[i-1];
+ m_saveState[0].m_status = lcpStatus;
+ m_saveState[0].m_place = lcpStatusUpdatedPlace;
+
lcpStatus = status;
lcpStatusUpdatedPlace = line;
}
@@ -1367,6 +1377,7 @@ private:
Uint32 csystemnodes;
Uint32 currentgcp;
Uint32 c_newest_restorable_gci;
+ Uint32 c_set_initial_start_flag;
enum GcpMasterTakeOverState {
GMTOS_IDLE = 0,
@@ -1394,6 +1405,7 @@ public:
private:
class MasterTakeOverState {
public:
+ MasterTakeOverState() {}
void set(LcpMasterTakeOverState s, Uint32 line) {
state = s; updatePlace = line;
}
@@ -1481,6 +1493,7 @@ private:
* SwitchReplicaRecord - Should only be used by master
*/
struct SwitchReplicaRecord {
+ SwitchReplicaRecord() {}
void clear(){}
Uint32 nodeId;
@@ -1626,6 +1639,8 @@ private:
// NR
Uint32 c_dictLockSlavePtrI_nodeRestart; // userPtr for NR
void recvDictLockConf_nodeRestart(Signal* signal, Uint32 data, Uint32 ret);
+
+ Uint32 c_error_7181_ref;
};
#if (DIH_CDATA_SIZE < _SYSFILE_SIZE32)
diff --git a/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp b/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
index 360f320cb74..f3228b36dde 100644
--- a/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
+++ b/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
@@ -74,6 +74,7 @@ void Dbdih::initData()
c_blockCommit = false;
c_blockCommitNo = 1;
cntrlblockref = RNIL;
+ c_set_initial_start_flag = FALSE;
}//Dbdih::initData()
void Dbdih::initRecords()
@@ -216,7 +217,6 @@ Dbdih::Dbdih(const class Configuration & config):
addRecSignal(GSN_FSREADREF, &Dbdih::execFSREADREF, true);
addRecSignal(GSN_FSWRITECONF, &Dbdih::execFSWRITECONF);
addRecSignal(GSN_FSWRITEREF, &Dbdih::execFSWRITEREF, true);
- addRecSignal(GSN_SET_VAR_REQ, &Dbdih::execSET_VAR_REQ);
addRecSignal(GSN_START_INFOREQ,
&Dbdih::execSTART_INFOREQ);
diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
index 44e2293f318..88d167f0985 100644
--- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
+++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
@@ -666,6 +666,12 @@ done:
{
jam();
memcpy(sysfileData, cdata, sizeof(sysfileData));
+
+ if (c_set_initial_start_flag)
+ {
+ jam();
+ Sysfile::setInitialStartOngoing(SYSFILE->systemRestartBits);
+ }
}
c_copyGCISlave.m_copyReason = reason;
@@ -1259,6 +1265,11 @@ void Dbdih::execNDB_STTOR(Signal* signal)
// The permission is given by the master node in the alive set.
/*-----------------------------------------------------------------------*/
createMutexes(signal, 0);
+ if (cstarttype == NodeState::ST_INITIAL_NODE_RESTART)
+ {
+ jam();
+ c_set_initial_start_flag = TRUE; // In sysfile...
+ }
break;
case ZNDB_SPH3:
@@ -4618,6 +4629,8 @@ void
Dbdih::startLcpMasterTakeOver(Signal* signal, Uint32 nodeId){
jam();
+ Uint32 oldNode = c_lcpMasterTakeOverState.failedNodeId;
+
c_lcpMasterTakeOverState.minTableId = ~0;
c_lcpMasterTakeOverState.minFragId = ~0;
c_lcpMasterTakeOverState.failedNodeId = nodeId;
@@ -4636,7 +4649,20 @@ Dbdih::startLcpMasterTakeOver(Signal* signal, Uint32 nodeId){
/**
* Node failure during master take over...
*/
- g_eventLogger.info("Nodefail during master take over");
+ g_eventLogger.info("Nodefail during master take over (old: %d)", oldNode);
+ }
+
+ NodeRecordPtr nodePtr;
+ nodePtr.i = oldNode;
+ if (oldNode > 0 && oldNode < MAX_NDB_NODES)
+ {
+ jam();
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ if (nodePtr.p->m_nodefailSteps.get(NF_LCP_TAKE_OVER))
+ {
+ jam();
+ checkLocalNodefailComplete(signal, oldNode, NF_LCP_TAKE_OVER);
+ }
}
setLocalNodefailHandling(signal, nodeId, NF_LCP_TAKE_OVER);
@@ -4738,11 +4764,19 @@ void Dbdih::startRemoveFailedNode(Signal* signal, NodeRecordPtr failedNodePtr)
}
jam();
- signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
- signal->theData[1] = failedNodePtr.i;
- signal->theData[2] = 0; // Tab id
- sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
-
+
+ if (!ERROR_INSERTED(7194))
+ {
+ signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
+ signal->theData[1] = failedNodePtr.i;
+ signal->theData[2] = 0; // Tab id
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+ }
+ else
+ {
+ ndbout_c("7194 Not starting ZREMOVE_NODE_FROM_TABLE");
+ }
+
setLocalNodefailHandling(signal, failedNodePtr.i, NF_REMOVE_NODE_FROM_TABLE);
}//Dbdih::startRemoveFailedNode()
@@ -4791,6 +4825,16 @@ void Dbdih::execMASTER_GCPREQ(Signal* signal)
} else {
ndbrequire(failedNodePtr.p->nodeStatus == NodeRecord::DYING);
}//if
+
+ if (ERROR_INSERTED(7181))
+ {
+ ndbout_c("execGCP_TCFINISHED in MASTER_GCPREQ");
+ CLEAR_ERROR_INSERT_VALUE;
+ signal->theData[0] = c_error_7181_ref;
+ signal->theData[1] = coldgcp;
+ execGCP_TCFINISHED(signal);
+ }
+
MasterGCPConf::State gcpState;
switch (cgcpParticipantState) {
case GCP_PARTICIPANT_READY:
@@ -4857,6 +4901,15 @@ void Dbdih::execMASTER_GCPREQ(Signal* signal)
masterGCPConf->lcpActive[i] = SYSFILE->lcpActive[i];
sendSignal(newMasterBlockref, GSN_MASTER_GCPCONF, signal,
MasterGCPConf::SignalLength, JBB);
+
+ if (ERROR_INSERTED(7182))
+ {
+ ndbout_c("execGCP_TCFINISHED in MASTER_GCPREQ");
+ CLEAR_ERROR_INSERT_VALUE;
+ signal->theData[0] = c_error_7181_ref;
+ signal->theData[1] = coldgcp;
+ execGCP_TCFINISHED(signal);
+ }
}//Dbdih::execMASTER_GCPREQ()
void Dbdih::execMASTER_GCPCONF(Signal* signal)
@@ -5631,12 +5684,22 @@ Dbdih::checkEmptyLcpComplete(Signal *signal){
signal->theData[0] = 7012;
execDUMP_STATE_ORD(signal);
+
+ if (ERROR_INSERTED(7194))
+ {
+ ndbout_c("7194 starting ZREMOVE_NODE_FROM_TABLE");
+ signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
+ signal->theData[1] = c_lcpMasterTakeOverState.failedNodeId;
+ signal->theData[2] = 0; // Tab id
+ sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB);
+ }
c_lcpMasterTakeOverState.set(LMTOS_INITIAL, __LINE__);
MasterLCPReq * const req = (MasterLCPReq *)&signal->theData[0];
req->masterRef = reference();
req->failedNodeId = c_lcpMasterTakeOverState.failedNodeId;
sendLoopMacro(MASTER_LCPREQ, sendMASTER_LCPREQ);
+
} else {
sendMASTER_LCPCONF(signal);
}
@@ -5653,6 +5716,14 @@ void Dbdih::execMASTER_LCPREQ(Signal* signal)
jamEntry();
const BlockReference newMasterBlockref = req->masterRef;
+ if (newMasterBlockref != cmasterdihref)
+ {
+ jam();
+ ndbout_c("resending GSN_MASTER_LCPREQ");
+ sendSignalWithDelay(reference(), GSN_MASTER_LCPREQ, signal,
+ signal->getLength(), 50);
+ return;
+ }
Uint32 failedNodeId = req->failedNodeId;
/**
@@ -5945,6 +6016,15 @@ void Dbdih::execMASTER_LCPCONF(Signal* signal)
{
const MasterLCPConf * const conf = (MasterLCPConf *)&signal->theData[0];
jamEntry();
+
+ if (ERROR_INSERTED(7194))
+ {
+ ndbout_c("delaying MASTER_LCPCONF due to error 7194");
+ sendSignalWithDelay(reference(), GSN_MASTER_LCPCONF, signal,
+ 300, signal->getLength());
+ return;
+ }
+
Uint32 senderNodeId = conf->senderNodeId;
MasterLCPConf::State lcpState = (MasterLCPConf::State)conf->lcpState;
const Uint32 failedNodeId = conf->failedNodeId;
@@ -5953,6 +6033,8 @@ void Dbdih::execMASTER_LCPCONF(Signal* signal)
ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
nodePtr.p->lcpStateAtTakeOver = lcpState;
+ CRASH_INSERTION(7180);
+
#ifdef VM_TRACE
g_eventLogger.info("MASTER_LCPCONF");
printMASTER_LCP_CONF(stdout, &signal->theData[0], 0, 0);
@@ -6077,7 +6159,6 @@ void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId)
#endif
c_lcpState.keepGci = SYSFILE->keepGCI;
- c_lcpState.setLcpStatus(LCP_START_LCP_ROUND, __LINE__);
startLcpRoundLoopLab(signal, 0, 0);
break;
}
@@ -7519,10 +7600,10 @@ void Dbdih::execGCP_NODEFINISH(Signal* signal)
} else if (cmasterState == MASTER_TAKE_OVER_GCP) {
jam();
//-------------------------------------------------------------
- // We are currently taking over as master. We will delay the
- // signal until we have completed the take over gcp handling.
+ // We are currently taking over as master. Ignore
+ // signal in this case since we will discover it in reception of
+ // MASTER_GCPCONF.
//-------------------------------------------------------------
- sendSignalWithDelay(reference(), GSN_GCP_NODEFINISH, signal, 20, 3);
return;
} else {
ndbrequire(cmasterState == MASTER_ACTIVE);
@@ -7657,6 +7738,7 @@ void Dbdih::execGCP_COMMIT(Signal* signal)
cgckptflag = false;
emptyverificbuffer(signal, true);
cgcpParticipantState = GCP_PARTICIPANT_COMMIT_RECEIVED;
+ signal->theData[0] = calcDihBlockRef(masterNodeId);
signal->theData[1] = coldgcp;
sendSignal(clocaltcblockref, GSN_GCP_NOMORETRANS, signal, 2, JBB);
return;
@@ -7666,14 +7748,25 @@ void Dbdih::execGCP_TCFINISHED(Signal* signal)
{
jamEntry();
CRASH_INSERTION(7007);
+ Uint32 retRef = signal->theData[0];
Uint32 gci = signal->theData[1];
ndbrequire(gci == coldgcp);
+ if (ERROR_INSERTED(7181) || ERROR_INSERTED(7182))
+ {
+ c_error_7181_ref = retRef; // Save ref
+ ndbout_c("killing %d", refToNode(cmasterdihref));
+ signal->theData[0] = 9999;
+ sendSignal(numberToRef(CMVMI, refToNode(cmasterdihref)),
+ GSN_NDB_TAMPER, signal, 1, JBB);
+ return;
+ }
+
cgcpParticipantState = GCP_PARTICIPANT_TC_FINISHED;
signal->theData[0] = cownNodeId;
signal->theData[1] = coldgcp;
signal->theData[2] = cfailurenr;
- sendSignal(cmasterdihref, GSN_GCP_NODEFINISH, signal, 3, JBB);
+ sendSignal(retRef, GSN_GCP_NODEFINISH, signal, 3, JBB);
}//Dbdih::execGCP_TCFINISHED()
/*****************************************************************************/
@@ -9857,6 +9950,8 @@ void Dbdih::sendLastLCP_FRAG_ORD(Signal* signal)
if(ERROR_INSERTED(7075)){
continue;
}
+
+ CRASH_INSERTION(7193);
BlockReference ref = calcLqhBlockRef(nodePtr.i);
sendSignal(ref, GSN_LCP_FRAG_ORD, signal,LcpFragOrd::SignalLength, JBB);
}
@@ -10054,6 +10149,13 @@ Dbdih::checkLcpAllTablesDoneInLqh(){
CRASH_INSERTION2(7017, !isMaster());
c_lcpState.setLcpStatus(LCP_TAB_COMPLETED, __LINE__);
+
+ if (ERROR_INSERTED(7194))
+ {
+ ndbout_c("CLEARING 7194");
+ CLEAR_ERROR_INSERT_VALUE;
+ }
+
return true;
}
@@ -10209,6 +10311,11 @@ Dbdih::sendLCP_FRAG_ORD(Signal* signal,
BlockReference ref = calcLqhBlockRef(replicaPtr.p->procNode);
+ if (ERROR_INSERTED(7193) && replicaPtr.p->procNode == getOwnNodeId())
+ {
+ return;
+ }
+
LcpFragOrd * const lcpFragOrd = (LcpFragOrd *)&signal->theData[0];
lcpFragOrd->tableId = info.tableId;
lcpFragOrd->fragmentId = info.fragId;
@@ -10265,6 +10372,17 @@ Dbdih::sendLCP_COMPLETE_REP(Signal* signal){
sendSignal(c_lcpState.m_masterLcpDihRef, GSN_LCP_COMPLETE_REP, signal,
LcpCompleteRep::SignalLength, JBB);
+
+ /**
+ * Say that an initial node restart does not need to be redone
+ * once node has been part of first LCP
+ */
+ if (c_set_initial_start_flag &&
+ c_lcpState.m_participatingLQH.get(getOwnNodeId()))
+ {
+ jam();
+ c_set_initial_start_flag = FALSE;
+ }
}
/*-------------------------------------------------------------------------- */
@@ -13608,6 +13726,14 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
("immediateLcpStart = %d masterLcpNodeId = %d",
c_lcpState.immediateLcpStart,
refToNode(c_lcpState.m_masterLcpDihRef));
+
+ for (Uint32 i = 0; i<10; i++)
+ {
+ infoEvent("%u : status: %u place: %u", i,
+ c_lcpState.m_saveState[i].m_status,
+ c_lcpState.m_saveState[i].m_place);
+ }
+
infoEvent("-- Node %d LCP STATE --", getOwnNodeId());
}
@@ -14034,30 +14160,6 @@ Dbdih::execNDB_TAMPER(Signal* signal)
return;
}//Dbdih::execNDB_TAMPER()
-void Dbdih::execSET_VAR_REQ(Signal* signal) {
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
- int val = setVarReq->value();
-
-
- switch (var) {
- case TimeBetweenLocalCheckpoints:
- c_lcpState.clcpDelay = val;
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case TimeBetweenGlobalCheckpoints:
- cgcpDelay = val;
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- } // switch
-#endif
-}
-
void Dbdih::execBLOCK_COMMIT_ORD(Signal* signal){
BlockCommitOrd* const block = (BlockCommitOrd *)&signal->theData[0];
diff --git a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
index 817832bdfcb..59d795d202f 100644
--- a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
+++ b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
@@ -509,6 +509,7 @@ public:
typedef Ptr<Databuf> DatabufPtr;
struct ScanRecord {
+ ScanRecord() {}
enum ScanState {
SCAN_FREE = 0,
WAIT_STORED_PROC_COPY = 1,
@@ -1616,7 +1617,8 @@ public:
ACTIVE_WRITE_LOG = 17, ///< A write operation during
///< writing of log
READ_SR_INVALIDATE_PAGES = 18,
- WRITE_SR_INVALIDATE_PAGES = 19
+ WRITE_SR_INVALIDATE_PAGES = 19,
+ WRITE_SR_INVALIDATE_PAGES_UPDATE_PAGE0 = 20
};
/**
* We have to remember the log pages read.
@@ -2058,6 +2060,9 @@ public:
Uint8 simpleRead;
Uint8 seqNoReplica;
Uint8 tcNodeFailrec;
+#ifdef VM_TRACE
+ Uint8 tupkeyref;
+#endif
}; /* p2c: size = 280 bytes */
typedef Ptr<TcConnectionrec> TcConnectionrecPtr;
@@ -2201,7 +2206,6 @@ private:
void execFSREADCONF(Signal* signal);
void execFSREADREF(Signal* signal);
void execSCAN_HBREP(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
void execTIME_SIGNAL(Signal* signal);
void execFSSYNCCONF(Signal* signal);
@@ -2433,7 +2437,7 @@ private:
void errorReport(Signal* signal, int place);
void warningReport(Signal* signal, int place);
void invalidateLogAfterLastGCI(Signal *signal);
- void readFileInInvalidate(Signal *signal);
+ void readFileInInvalidate(Signal *signal, bool stepNext);
void exitFromInvalidate(Signal* signal);
Uint32 calcPageCheckSum(LogPageRecordPtr logP);
@@ -2665,7 +2669,8 @@ private:
UintR cfirstfreeLogFile;
UintR clogFileFileSize;
-#define ZLFO_FILE_SIZE 256 /* MAX 256 OUTSTANDING FILE OPERATIONS */
+#define ZLFO_MIN_FILE_SIZE 256
+// RedoBuffer/32K minimum ZLFO_MIN_FILE_SIZE
LogFileOperationRecord *logFileOperationRecord;
LogFileOperationRecordPtr lfoPtr;
UintR cfirstfreeLfo;
@@ -2682,7 +2687,7 @@ private:
UintR cfirstfreePageRef;
UintR cpageRefFileSize;
-#define ZSCANREC_FILE_SIZE 100
+// Configurable
ArrayPool<ScanRecord> c_scanRecordPool;
ScanRecordPtr scanptr;
UintR cscanNoFreeRec;
@@ -2899,6 +2904,7 @@ public:
*
*/
struct CommitAckMarker {
+ CommitAckMarker() {}
Uint32 transid1;
Uint32 transid2;
@@ -2925,6 +2931,7 @@ public:
void scanMarkers(Signal* signal, Uint32 tcNodeFail, Uint32 bucket, Uint32 i);
struct Counters {
+ Counters() {}
Uint32 operations;
inline void clear(){
diff --git a/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp b/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
index 0b395e250c1..adeed3e1e8b 100644
--- a/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
+++ b/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
@@ -32,11 +32,11 @@ void Dblqh::initData()
chostFileSize = MAX_NDB_NODES;
clcpFileSize = ZNO_CONCURRENT_LCP;
clcpLocrecFileSize = ZLCP_LOCREC_FILE_SIZE;
- clfoFileSize = ZLFO_FILE_SIZE;
+ clfoFileSize = 0;
clogFileFileSize = 0;
clogPartFileSize = ZLOG_PART_FILE_SIZE;
cpageRefFileSize = ZPAGE_REF_FILE_SIZE;
- cscanrecFileSize = ZSCANREC_FILE_SIZE;
+ cscanrecFileSize = 0;
ctabrecFileSize = 0;
ctcConnectrecFileSize = 0;
ctcNodeFailrecFileSize = MAX_NDB_NODES;
@@ -314,7 +314,6 @@ Dblqh::Dblqh(const class Configuration & conf):
addRecSignal(GSN_FSREADCONF, &Dblqh::execFSREADCONF);
addRecSignal(GSN_FSREADREF, &Dblqh::execFSREADREF, true);
addRecSignal(GSN_ACC_ABORTCONF, &Dblqh::execACC_ABORTCONF);
- addRecSignal(GSN_SET_VAR_REQ, &Dblqh::execSET_VAR_REQ);
addRecSignal(GSN_TIME_SIGNAL, &Dblqh::execTIME_SIGNAL);
addRecSignal(GSN_FSSYNCCONF, &Dblqh::execFSSYNCCONF);
addRecSignal(GSN_REMOVE_MARKER_ORD, &Dblqh::execREMOVE_MARKER_ORD);
@@ -339,6 +338,11 @@ Dblqh::Dblqh(const class Configuration & conf):
initData();
+ /* maximum number of log file operations */
+ clfoFileSize = clogPageFileSize;
+ if (clfoFileSize < ZLFO_MIN_FILE_SIZE)
+ clfoFileSize = ZLFO_MIN_FILE_SIZE;
+
#ifdef VM_TRACE
{
void* tmp[] = {
diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
index 6a439b24c03..e4ff1d1dbb6 100644
--- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -109,6 +109,10 @@ operator<<(NdbOut& out, Dblqh::ScanRecord::ScanType state){
//#define MARKER_TRACE 1
//#define TRACE_SCAN_TAKEOVER 1
+#ifndef DEBUG_REDO
+#define DEBUG_REDO 0
+#endif
+
const Uint32 NR_ScanNo = 0;
void Dblqh::execACC_COM_BLOCK(Signal* signal)
@@ -456,6 +460,7 @@ void Dblqh::execCONTINUEB(Signal* signal)
else
{
jam();
+ cstartRecReq = 2;
StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend();
conf->startingNodeId = getOwnNodeId();
sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal,
@@ -2764,6 +2769,12 @@ void Dblqh::execTUPKEYREF(Signal* signal)
tcConnectptr.i = tupKeyRef->userRef;
terrorCode = tupKeyRef->errorCode;
ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+
+#ifdef VM_TRACE
+ ndbrequire(tcConnectptr.p->tupkeyref == 0);
+ tcConnectptr.p->tupkeyref = 1;
+#endif
+
switch (tcConnectptr.p->transactionState) {
case TcConnectionrec::WAIT_TUP:
jam();
@@ -3329,6 +3340,10 @@ void Dblqh::seizeTcrec()
locTcConnectptr.p->tcTimer = cLqhTimeOutCount;
locTcConnectptr.p->tableref = RNIL;
locTcConnectptr.p->savePointId = 0;
+#ifdef VM_TRACE
+ locTcConnectptr.p->tupkeyref = 1;
+#endif
+
cfirstfreeTcConrec = nextTc;
tcConnectptr = locTcConnectptr;
locTcConnectptr.p->connectState = TcConnectionrec::CONNECTED;
@@ -4048,6 +4063,9 @@ void Dblqh::execACCKEYCONF(Signal* signal)
tupKeyReq->tcOpIndex = tcConnectptr.p->tcOprec;
tupKeyReq->savePointId = tcConnectptr.p->savePointId;
+#ifdef VM_TRACE
+ tcConnectptr.p->tupkeyref = 0;
+#endif
EXECUTE_DIRECT(tup, GSN_TUPKEYREQ, signal, TupKeyReq::SignalLength);
}//Dblqh::execACCKEYCONF()
@@ -5859,6 +5877,10 @@ void Dblqh::completeUnusualLab(Signal* signal)
void Dblqh::releaseTcrec(Signal* signal, TcConnectionrecPtr locTcConnectptr)
{
jam();
+#ifdef VM_TRACE
+ locTcConnectptr.p->tupkeyref = 1;
+#endif
+
locTcConnectptr.p->tcTimer = 0;
locTcConnectptr.p->transactionState = TcConnectionrec::TC_NOT_CONNECTED;
locTcConnectptr.p->nextTcConnectrec = cfirstfreeTcConrec;
@@ -5881,6 +5903,9 @@ void Dblqh::releaseTcrec(Signal* signal, TcConnectionrecPtr locTcConnectptr)
void Dblqh::releaseTcrecLog(Signal* signal, TcConnectionrecPtr locTcConnectptr)
{
jam();
+#ifdef VM_TRACE
+ locTcConnectptr.p->tupkeyref = 1;
+#endif
locTcConnectptr.p->tcTimer = 0;
locTcConnectptr.p->transactionState = TcConnectionrec::TC_NOT_CONNECTED;
locTcConnectptr.p->nextTcConnectrec = cfirstfreeTcConrec;
@@ -7548,7 +7573,6 @@ Dblqh::get_acc_ptr_from_scan_record(ScanRecord* scanP,
bool crash_flag)
{
Uint32* acc_ptr;
- Uint32 attr_buf_rec, attr_buf_index;
if (!((index < MAX_PARALLEL_OP_PER_SCAN) &&
index < scanP->scan_acc_index)) {
ndbrequire(crash_flag);
@@ -7593,7 +7617,6 @@ void Dblqh::execSCAN_FRAGREQ(Signal* signal)
const Uint32 scanLockMode = ScanFragReq::getLockMode(reqinfo);
const Uint8 keyinfo = ScanFragReq::getKeyinfoFlag(reqinfo);
const Uint8 rangeScan = ScanFragReq::getRangeScanFlag(reqinfo);
- const Uint8 tupScan = ScanFragReq::getTupScanFlag(reqinfo);
ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
if(tabptr.p->tableStatus != Tablerec::TABLE_DEFINED){
@@ -8337,8 +8360,11 @@ void Dblqh::nextScanConfLoopLab(Signal* signal)
tupKeyReq->tcOpIndex = tcConnectptr.p->tcOprec;
tupKeyReq->savePointId = tcConnectptr.p->savePointId;
Uint32 blockNo = refToBlock(tcConnectptr.p->tcTupBlockref);
+#ifdef VM_TRACE
+ tcConnectptr.p->tupkeyref = 0;
+#endif
EXECUTE_DIRECT(blockNo, GSN_TUPKEYREQ, signal,
- TupKeyReq::SignalLength);
+ TupKeyReq::SignalLength);
}
}
@@ -8979,9 +9005,6 @@ Uint32 Dblqh::sendKeyinfo20(Signal* signal,
const Uint32 scanOp = scanP->m_curr_batch_size_rows;
const Uint32 nodeId = refToNode(ref);
const bool connectedToNode = getNodeInfo(nodeId).m_connected;
- const Uint32 type = getNodeInfo(nodeId).m_type;
- const bool is_api = (type >= NodeInfo::API && type <= NodeInfo::REP);
- const bool old_dest = (getNodeInfo(nodeId).m_version < MAKE_VERSION(3,5,0));
const bool longable = true; // TODO is_api && !old_dest;
Uint32 * dst = keyInfo->keyData;
@@ -9082,7 +9105,6 @@ void Dblqh::sendScanFragConf(Signal* signal, Uint32 scanCompleted)
return;
}
ScanFragConf * conf = (ScanFragConf*)&signal->theData[0];
- NodeId tc_node_id= refToNode(tcConnectptr.p->clientBlockref);
Uint32 trans_id1= tcConnectptr.p->transid[0];
Uint32 trans_id2= tcConnectptr.p->transid[1];
@@ -9460,6 +9482,9 @@ void Dblqh::copySendTupkeyReqLab(Signal* signal)
tupKeyReq->tcOpIndex = tcConnectptr.p->tcOprec;
tupKeyReq->savePointId = tcConnectptr.p->savePointId;
Uint32 blockNo = refToBlock(tcConnectptr.p->tcTupBlockref);
+#ifdef VM_TRACE
+ tcConnectptr.p->tupkeyref = 0;
+#endif
EXECUTE_DIRECT(blockNo, GSN_TUPKEYREQ, signal,
TupKeyReq::SignalLength);
}
@@ -9639,6 +9664,15 @@ void Dblqh::copyCompletedLab(Signal* signal)
closeCopyLab(signal);
return;
}//if
+
+ if (scanptr.p->scanState == ScanRecord::WAIT_LQHKEY_COPY &&
+ scanptr.p->scanErrorCounter)
+ {
+ jam();
+ closeCopyLab(signal);
+ return;
+ }
+
if (scanptr.p->scanState == ScanRecord::WAIT_LQHKEY_COPY) {
jam();
/*---------------------------------------------------------------------------*/
@@ -9715,13 +9749,16 @@ void Dblqh::continueCopyAfterBlockedLab(Signal* signal)
void Dblqh::copyLqhKeyRefLab(Signal* signal)
{
ndbrequire(tcConnectptr.p->transid[1] == signal->theData[4]);
- tcConnectptr.p->copyCountWords -= signal->theData[3];
+ Uint32 copyWords = signal->theData[3];
scanptr.i = tcConnectptr.p->tcScanRec;
c_scanRecordPool.getPtr(scanptr);
scanptr.p->scanErrorCounter++;
tcConnectptr.p->errorCode = terrorCode;
- closeCopyLab(signal);
- return;
+
+ LqhKeyConf* conf = (LqhKeyConf*)signal->getDataPtrSend();
+ conf->transId1 = copyWords;
+ conf->transId2 = tcConnectptr.p->transid[1];
+ copyCompletedLab(signal);
}//Dblqh::copyLqhKeyRefLab()
void Dblqh::closeCopyLab(Signal* signal)
@@ -9732,6 +9769,7 @@ void Dblqh::closeCopyLab(Signal* signal)
// Wait until all of those have arrived until we start the
// close process.
/*---------------------------------------------------------------------------*/
+ scanptr.p->scanState = ScanRecord::WAIT_LQHKEY_COPY;
jam();
return;
}//if
@@ -11119,6 +11157,13 @@ void Dblqh::sendLCP_COMPLETE_REP(Signal* signal, Uint32 lcpId)
jam();
sendEMPTY_LCP_CONF(signal, true);
}
+
+ if (getNodeState().getNodeRestartInProgress() && cstartRecReq != 3)
+ {
+ jam();
+ ndbrequire(cstartRecReq == 2);
+ cstartRecReq = 3;
+ }
return;
}//Dblqh::sendCOMP_LCP_ROUND()
@@ -11368,7 +11413,6 @@ void Dblqh::sendAccContOp(Signal* signal)
{
LcpLocRecordPtr sacLcpLocptr;
- int count = 0;
sacLcpLocptr.i = lcpPtr.p->firstLcpLocAcc;
do {
ptrCheckGuard(sacLcpLocptr, clcpLocrecFileSize, lcpLocRecord);
@@ -11672,7 +11716,8 @@ void Dblqh::execGCP_SAVEREQ(Signal* signal)
return;
}
- if(getNodeState().getNodeRestartInProgress()){
+ if(getNodeState().getNodeRestartInProgress() && cstartRecReq < 2)
+ {
GCPSaveRef * const saveRef = (GCPSaveRef*)&signal->theData[0];
saveRef->dihPtr = dihPtr;
saveRef->nodeId = getOwnNodeId();
@@ -11683,15 +11728,27 @@ void Dblqh::execGCP_SAVEREQ(Signal* signal)
return;
}
- ccurrentGcprec = 0;
- gcpPtr.i = ccurrentGcprec;
- ptrCheckGuard(gcpPtr, cgcprecFileSize, gcpRecord);
-
cnewestCompletedGci = gci;
if (gci > cnewestGci) {
jam();
cnewestGci = gci;
}//if
+
+ if(getNodeState().getNodeRestartInProgress() && cstartRecReq < 3)
+ {
+ GCPSaveRef * const saveRef = (GCPSaveRef*)&signal->theData[0];
+ saveRef->dihPtr = dihPtr;
+ saveRef->nodeId = getOwnNodeId();
+ saveRef->gci = gci;
+ saveRef->errorCode = GCPSaveRef::NodeRestartInProgress;
+ sendSignal(dihBlockRef, GSN_GCP_SAVEREF, signal,
+ GCPSaveRef::SignalLength, JBB);
+ return;
+ }
+
+ ccurrentGcprec = 0;
+ gcpPtr.i = ccurrentGcprec;
+ ptrCheckGuard(gcpPtr, cgcprecFileSize, gcpRecord);
gcpPtr.p->gcpBlockref = dihBlockRef;
gcpPtr.p->gcpUserptr = dihPtr;
@@ -11945,9 +12002,10 @@ void Dblqh::execFSCLOSECONF(Signal* signal)
case LogFileRecord::CLOSE_SR_INVALIDATE_PAGES:
jam();
logFilePtr.p->logFileStatus = LogFileRecord::CLOSED;
- // Set the prev file to check if we shall close it.
- logFilePtr.i = logFilePtr.p->prevLogFile;
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+
+ logPartPtr.i = logFilePtr.p->logPartRec;
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+
exitFromInvalidate(signal);
return;
case LogFileRecord::CLOSING_INIT:
@@ -11993,7 +12051,7 @@ void Dblqh::execFSOPENCONF(Signal* signal)
case LogFileRecord::OPEN_SR_INVALIDATE_PAGES:
jam();
logFilePtr.p->logFileStatus = LogFileRecord::OPEN;
- readFileInInvalidate(signal);
+ readFileInInvalidate(signal, false);
return;
case LogFileRecord::OPENING_INIT:
jam();
@@ -12173,6 +12231,7 @@ void Dblqh::execFSWRITECONF(Signal* signal)
case LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES:
jam();
invalidateLogAfterLastGCI(signal);
+ CRASH_INSERTION(5047);
return;
case LogFileOperationRecord::WRITE_PAGE_ZERO:
jam();
@@ -12210,6 +12269,14 @@ void Dblqh::execFSWRITECONF(Signal* signal)
jam();
firstPageWriteLab(signal);
return;
+ case LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES_UPDATE_PAGE0:
+ jam();
+ // We are done...send completed signal and exit this phase.
+ releaseLfo(signal);
+ signal->theData[0] = ZSR_FOURTH_COMP;
+ signal->theData[1] = logPartPtr.i;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+ return;
default:
jam();
systemErrorLab(signal, __LINE__);
@@ -13280,6 +13347,12 @@ void Dblqh::writeSinglePage(Signal* signal, Uint32 pageNo,
signal->theData[6] = logPagePtr.i;
signal->theData[7] = pageNo;
sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
+
+ if (DEBUG_REDO)
+ ndbout_c("writeSingle 1 page at part: %u file: %u pos: %u",
+ logPartPtr.i,
+ logFilePtr.p->fileNo,
+ pageNo);
}//Dblqh::writeSinglePage()
/* ##########################################################################
@@ -13345,6 +13418,12 @@ void Dblqh::openSrLastFileLab(Signal* signal)
void Dblqh::readSrLastFileLab(Signal* signal)
{
logPartPtr.p->logLap = logPagePtr.p->logPageWord[ZPOS_LOG_LAP];
+ if (DEBUG_REDO)
+ ndbout_c("readSrLastFileLab part: %u logExecState: %u logPartState: %u logLap: %u",
+ logPartPtr.i,
+ logPartPtr.p->logExecState,
+ logPartPtr.p->logPartState,
+ logPartPtr.p->logLap);
if (logPartPtr.p->noLogFiles > ZMAX_LOG_FILES_IN_PAGE_ZERO) {
jam();
initGciInLogFileRec(signal, ZMAX_LOG_FILES_IN_PAGE_ZERO);
@@ -13816,7 +13895,7 @@ void Dblqh::srCompletedLab(Signal* signal)
* NO MORE FRAGMENTS ARE WAITING FOR SYSTEM RESTART.
* -------------------------------------------------------------------- */
lcpPtr.p->lcpState = LcpRecord::LCP_IDLE;
- if (cstartRecReq == ZTRUE) {
+ if (cstartRecReq == 1) {
jam();
/* ----------------------------------------------------------------
* WE HAVE ALSO RECEIVED AN INDICATION THAT NO MORE FRAGMENTS
@@ -13886,7 +13965,7 @@ void Dblqh::execSTART_RECREQ(Signal* signal)
ndbrequire(req->receivingNodeId == cownNodeid);
cnewestCompletedGci = cnewestGci;
- cstartRecReq = ZTRUE;
+ cstartRecReq = 1;
for (logPartPtr.i = 0; logPartPtr.i < 4; logPartPtr.i++) {
ptrAss(logPartPtr, logPartRecord);
logPartPtr.p->logPartNewestCompletedGCI = cnewestCompletedGci;
@@ -13907,6 +13986,7 @@ void Dblqh::execSTART_RECREQ(Signal* signal)
}//if
if(cstartType == NodeState::ST_INITIAL_NODE_RESTART){
jam();
+ cstartRecReq = 2;
StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend();
conf->startingNodeId = getOwnNodeId();
sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal,
@@ -14561,6 +14641,20 @@ void Dblqh::srLogLimits(Signal* signal)
break;
}//if
}//while
+
+ if (DEBUG_REDO)
+ {
+ LogFileRecordPtr tmp;
+ tmp.i = logPartPtr.p->stopLogfile;
+ ptrCheckGuard(tmp, clogFileFileSize, logFileRecord);
+ ndbout_c("srLogLimits part: %u start file: %u mb: %u stop file: %u mb: %u",
+ logPartPtr.i,
+ tlastPrepRef >> 16,
+ tlastPrepRef & 65535,
+ tmp.p->fileNo,
+ logPartPtr.p->stopMbyte);
+ }
+
/* ------------------------------------------------------------------------
* WE HAVE NOW FOUND BOTH THE START AND THE STOP OF THE LOG. NOW START
* EXECUTING THE LOG. THE FIRST ACTION IS TO OPEN THE LOG FILE WHERE TO
@@ -14987,6 +15081,12 @@ void Dblqh::execSr(Signal* signal)
case ZCOMPLETED_GCI_TYPE:
jam();
logWord = readLogword(signal);
+ if (DEBUG_REDO)
+ ndbout_c("found gci: %u part: %u file: %u page: %u",
+ logWord,
+ logPartPtr.i,
+ logFilePtr.p->fileNo,
+ logFilePtr.p->currentFilepage);
if (logWord == logPartPtr.p->logLastGci) {
jam();
/*---------------------------------------------------------------------------*/
@@ -15003,6 +15103,10 @@ void Dblqh::execSr(Signal* signal)
logPartPtr.p->headPageNo = logFilePtr.p->currentFilepage;
logPartPtr.p->headPageIndex =
logPagePtr.p->logPageWord[ZCURR_PAGE_INDEX];
+ logPartPtr.p->logLap = logPagePtr.p->logPageWord[ZPOS_LOG_LAP];
+ if (DEBUG_REDO)
+ ndbout_c("execSr part: %u logLap: %u",
+ logPartPtr.i, logPartPtr.p->logLap);
}//if
/*---------------------------------------------------------------------------*/
/* THERE IS NO NEED OF EXECUTING PAST THIS LINE SINCE THERE WILL ONLY BE LOG */
@@ -15070,8 +15174,6 @@ void Dblqh::execDEBUG_SIG(Signal* signal)
2.5 TEMPORARY VARIABLES
-----------------------
*/
- UintR tdebug;
-
jamEntry();
//logPagePtr.i = signal->theData[0];
//tdebug = logPagePtr.p->logPageWord[0];
@@ -15165,67 +15267,140 @@ void Dblqh::invalidateLogAfterLastGCI(Signal* signal) {
}
switch (lfoPtr.p->lfoState) {
- case LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES:
- jam();
- releaseLfo(signal);
- releaseLogpage(signal);
- if (logPartPtr.p->invalidatePageNo < (ZNO_MBYTES_IN_FILE * ZPAGES_IN_MBYTE - 1)) {
- // We continue in this file.
- logPartPtr.p->invalidatePageNo++;
- } else {
- // We continue in the next file.
- logFilePtr.i = logFilePtr.p->nextLogFile;
- ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
- logPartPtr.p->invalidateFileNo = logFilePtr.p->fileNo;
- // Page 0 is used for file descriptors.
- logPartPtr.p->invalidatePageNo = 1;
- if (logFilePtr.p->logFileStatus != LogFileRecord::OPEN) {
- jam();
- logFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_INVALIDATE_PAGES;
- openFileRw(signal, logFilePtr);
- return;
- break;
- }
- }
- // Read a page from the log file.
- readFileInInvalidate(signal);
- return;
- break;
-
case LogFileOperationRecord::READ_SR_INVALIDATE_PAGES:
jam();
- releaseLfo(signal);
// Check if this page must be invalidated.
// If the log lap number on a page after the head of the tail is the same
// as the actual log lap number we must invalidate this page. Otherwise it
// could be impossible to find the end of the log in a later system/node
// restart.
- if (logPagePtr.p->logPageWord[ZPOS_LOG_LAP] == logPartPtr.p->logLap) {
+ if (logPagePtr.p->logPageWord[ZPOS_LOG_LAP] == logPartPtr.p->logLap)
+ {
// This page must be invalidated.
- logPagePtr.p->logPageWord[ZPOS_LOG_LAP] = 0;
- // Contact NDBFS. Real time break.
- writeSinglePage(signal, logPartPtr.p->invalidatePageNo,
- ZPAGE_SIZE - 1, __LINE__);
- lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES;
- } else {
- // We are done with invalidating. Finish start phase 3.4.
+ // We search for end
+ // read next
+ releaseLfo(signal);
+ releaseLogpage(signal);
+ readFileInInvalidate(signal, true);
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_INVALIDATE_PAGES;
+ return;
+ }
+
+ /**
+ * We found the "last" page to invalidate...
+ * Invalidate backwards until head...
+ */
+
+ // Fall through...
+ case LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES:
+ jam();
+
+ releaseLfo(signal);
+ releaseLogpage(signal);
+
+ // Step backwards...
+ logPartPtr.p->invalidatePageNo--;
+
+ if (logPartPtr.p->invalidatePageNo == 0)
+ {
+ jam();
+
+ if (logFilePtr.p->fileNo == 0)
+ {
+ /**
+ * We're wrapping in the log...
+ * update logLap
+ */
+ logPartPtr.p->logLap--;
+ ndbrequire(logPartPtr.p->logLap); // Should always be > 0
+ if (DEBUG_REDO)
+ ndbout_c("invalidateLogAfterLastGCI part: %u wrap from file 0 -> logLap: %u",
+ logPartPtr.i, logPartPtr.p->logLap);
+ }
+
+ /**
+ * Move to prev file
+ */
+ logFilePtr.i = logFilePtr.p->prevLogFile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPartPtr.p->invalidateFileNo = logFilePtr.p->fileNo;
+ logPartPtr.p->invalidatePageNo = ZNO_MBYTES_IN_FILE * ZPAGES_IN_MBYTE - 1;
+ }
+
+ if (logPartPtr.p->invalidateFileNo == logPartPtr.p->headFileNo &&
+ logPartPtr.p->invalidatePageNo == logPartPtr.p->headPageNo)
+ {
+ /**
+ * Done...
+ */
+ logFilePtr.i = logPartPtr.p->currentLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+
+ logFilePtr.i = logFilePtr.p->nextLogFile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+
+ // Close files if necessary. Current file and the next file should be
+ // left open.
exitFromInvalidate(signal);
+ return;
}
- return;
- break;
+ seizeLogpage(signal);
+
+ /**
+ * Make page really empty
+ */
+ bzero(logPagePtr.p, sizeof(LogPageRecord));
+ writeSinglePage(signal, logPartPtr.p->invalidatePageNo,
+ ZPAGE_SIZE - 1, __LINE__);
+
+ lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES;
+ return;
default:
jam();
systemError(signal, __LINE__);
return;
break;
}
-
- return;
}//Dblqh::invalidateLogAfterLastGCI
-void Dblqh::readFileInInvalidate(Signal* signal) {
+void Dblqh::readFileInInvalidate(Signal* signal, bool stepNext)
+{
jam();
+
+ if (stepNext)
+ {
+ logPartPtr.p->invalidatePageNo++;
+ if (logPartPtr.p->invalidatePageNo == (ZNO_MBYTES_IN_FILE * ZPAGES_IN_MBYTE))
+ {
+ // We continue in the next file.
+ logFilePtr.i = logFilePtr.p->nextLogFile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPartPtr.p->invalidateFileNo = logFilePtr.p->fileNo;
+ // Page 0 is used for file descriptors.
+ logPartPtr.p->invalidatePageNo = 1;
+
+ if (logFilePtr.p->fileNo == 0)
+ {
+ /**
+ * We're wrapping in the log...
+ * update logLap
+ */
+ logPartPtr.p->logLap++;
+ if (DEBUG_REDO)
+ ndbout_c("readFileInInvalidate part: %u wrap to file 0 -> logLap: %u",
+ logPartPtr.i, logPartPtr.p->logLap);
+ }
+ if (logFilePtr.p->logFileStatus != LogFileRecord::OPEN)
+ {
+ jam();
+ logFilePtr.p->logFileStatus = LogFileRecord::OPEN_SR_INVALIDATE_PAGES;
+ openFileRw(signal, logFilePtr);
+ return;
+ }
+ }
+ }
+
// Contact NDBFS. Real time break.
readSinglePage(signal, logPartPtr.p->invalidatePageNo);
lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_INVALIDATE_PAGES;
@@ -15233,34 +15408,57 @@ void Dblqh::readFileInInvalidate(Signal* signal) {
void Dblqh::exitFromInvalidate(Signal* signal) {
jam();
- // Close files if necessary. Current file and the next file should be
- // left open.
- if (logFilePtr.i != logPartPtr.p->currentLogfile) {
- LogFileRecordPtr currentLogFilePtr;
- LogFileRecordPtr nextAfterCurrentLogFilePtr;
-
- currentLogFilePtr.i = logPartPtr.p->currentLogfile;
- ptrCheckGuard(currentLogFilePtr, clogFileFileSize, logFileRecord);
-
- nextAfterCurrentLogFilePtr.i = currentLogFilePtr.p->nextLogFile;
-
- if (logFilePtr.i != nextAfterCurrentLogFilePtr.i) {
- // This file should be closed.
- logFilePtr.p->logFileStatus = LogFileRecord::CLOSE_SR_INVALIDATE_PAGES;
- closeFile(signal, logFilePtr);
- // Return from this function and wait for close confirm. Then come back
- // and test the previous file for closing.
- return;
- }
- }
- // We are done with closing files, send completed signal and exit this phase.
- signal->theData[0] = ZSR_FOURTH_COMP;
- signal->theData[1] = logPartPtr.i;
- sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
+loop:
+ logFilePtr.i = logFilePtr.p->nextLogFile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+
+ if (logFilePtr.i == logPartPtr.p->currentLogfile)
+ {
+ jam();
+ goto done;
+ }
+
+ if (logFilePtr.p->fileNo == 0)
+ {
+ jam();
+ /**
+ * Logfile 0 shoult *not* be closed
+ */
+ goto loop;
+ }
+
+ if (logFilePtr.p->logFileStatus == LogFileRecord::CLOSED)
+ {
+ jam();
+ goto done;
+ }
+
+ jam();
+ ndbrequire(logFilePtr.p->logFileStatus == LogFileRecord::OPEN);
+ logFilePtr.p->logFileStatus = LogFileRecord::CLOSE_SR_INVALIDATE_PAGES;
+ closeFile(signal, logFilePtr);
return;
-}
+done:
+ if (DEBUG_REDO)
+ ndbout_c("exitFromInvalidate part: %u head file: %u page: %u",
+ logPartPtr.i,
+ logPartPtr.p->headFileNo,
+ logPartPtr.p->headPageNo);
+
+ logFilePtr.i = logPartPtr.p->firstLogfile;
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ logPagePtr.i = logFilePtr.p->logPageZero;
+ ptrCheckGuard(logPagePtr, clogPageFileSize, logPageRecord);
+ logPagePtr.p->logPageWord[ZPAGE_HEADER_SIZE + ZPOS_FILE_NO] =
+ logPartPtr.p->headFileNo;
+ writeSinglePage(signal, 0, ZPAGE_SIZE - 1, __LINE__);
+
+ lfoPtr.p->logFileRec = logFilePtr.i;
+ lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES_UPDATE_PAGE0;
+ return;
+}
/*---------------------------------------------------------------------------*/
/* THE EXECUTION OF A LOG RECORD IS COMPLETED. RELEASE PAGES IF THEY WERE */
@@ -15647,20 +15845,10 @@ void Dblqh::readSrFourthZeroLab(Signal* signal)
ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
logPartPtr.p->invalidateFileNo = logPartPtr.p->headFileNo;
logPartPtr.p->invalidatePageNo = logPartPtr.p->headPageNo;
-
logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG_INVALIDATE;
- seizeLfo(signal);
- initLfo(signal);
- // The state here is a little confusing, but simulates that we return
- // to invalidateLogAfterLastGCI() from an invalidate write and are ready
- // to read a page from file.
- lfoPtr.p->lfoState = LogFileOperationRecord::WRITE_SR_INVALIDATE_PAGES;
-
- /**
- * Make sure we dont release zero page
- */
- seizeLogpage(signal);
- invalidateLogAfterLastGCI(signal);
+
+ readFileInInvalidate(signal, true);
+ lfoPtr.p->lfoState = LogFileOperationRecord::READ_SR_INVALIDATE_PAGES;
return;
}//Dblqh::readSrFourthZeroLab()
@@ -15727,6 +15915,7 @@ void Dblqh::srFourthComp(Signal* signal)
else
{
jam();
+ cstartRecReq = 2;
StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend();
conf->startingNodeId = getOwnNodeId();
sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal,
@@ -16231,6 +16420,14 @@ void Dblqh::completedLogPage(Signal* signal, Uint32 clpType, Uint32 place)
signal->theData[4] = ZVAR_NO_LOG_PAGE_WORD;
signal->theData[5] = twlpNoPages;
sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 15, JBA);
+
+ if (DEBUG_REDO)
+ ndbout_c("writing %d pages at part: %u file: %u pos: %u",
+ twlpNoPages,
+ logPartPtr.i,
+ logFilePtr.p->fileNo,
+ logFilePtr.p->filePosition);
+
if (twlpType == ZNORMAL) {
jam();
lfoPtr.p->lfoState = LogFileOperationRecord::ACTIVE_WRITE_LOG;
@@ -16693,7 +16890,7 @@ void Dblqh::initialiseRecordsLab(Signal* signal, Uint32 data,
cCommitBlocked = false;
ccurrentGcprec = RNIL;
caddNodeState = ZFALSE;
- cstartRecReq = ZFALSE;
+ cstartRecReq = 0;
cnewestGci = (UintR)-1;
cnewestCompletedGci = (UintR)-1;
crestartOldestGci = 0;
@@ -17470,6 +17667,14 @@ void Dblqh::readExecLog(Signal* signal)
signal->theData[14] = lfoPtr.p->logPageArray[8];
signal->theData[15] = lfoPtr.p->logPageArray[9];
sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 16, JBA);
+
+ if (DEBUG_REDO)
+ ndbout_c("readExecLog %u page at part: %u file: %u pos: %u",
+ lfoPtr.p->noPagesRw,
+ logPartPtr.i,
+ logFilePtr.p->fileNo,
+ logPartPtr.p->execSrStartPageNo);
+
}//Dblqh::readExecLog()
/* ------------------------------------------------------------------------- */
@@ -17532,6 +17737,14 @@ void Dblqh::readExecSr(Signal* signal)
signal->theData[13] = lfoPtr.p->logPageArray[7];
signal->theData[14] = tresPageid;
sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 15, JBA);
+
+ if (DEBUG_REDO)
+ ndbout_c("readExecSr %u page at part: %u file: %u pos: %u",
+ 8,
+ logPartPtr.i,
+ logFilePtr.p->fileNo,
+ tresPageid);
+
}//Dblqh::readExecSr()
/* ------------------------------------------------------------------------- */
@@ -17681,6 +17894,13 @@ void Dblqh::readSinglePage(Signal* signal, Uint32 pageNo)
signal->theData[6] = logPagePtr.i;
signal->theData[7] = pageNo;
sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 8, JBA);
+
+ if (DEBUG_REDO)
+ ndbout_c("readSinglePage 1 page at part: %u file: %u pos: %u",
+ logPartPtr.i,
+ logFilePtr.p->fileNo,
+ pageNo);
+
}//Dblqh::readSinglePage()
/* --------------------------------------------------------------------------
@@ -18263,8 +18483,17 @@ void Dblqh::writeCompletedGciLog(Signal* signal)
jam();
changeMbyte(signal);
}//if
+
logFilePtr.p->remainingWordsInMbyte =
logFilePtr.p->remainingWordsInMbyte - ZCOMPLETED_GCI_LOG_SIZE;
+
+ if (DEBUG_REDO)
+ ndbout_c("writeCompletedGciLog gci: %u part: %u file: %u page: %u",
+ cnewestCompletedGci,
+ logPartPtr.i,
+ logFilePtr.p->fileNo,
+ logFilePtr.p->currentFilepage);
+
writeLogWord(signal, ZCOMPLETED_GCI_TYPE);
writeLogWord(signal, cnewestCompletedGci);
logPartPtr.p->logPartNewestCompletedGCI = cnewestCompletedGci;
@@ -18301,6 +18530,13 @@ void Dblqh::writeDirty(Signal* signal, Uint32 place)
signal->theData[6] = logPagePtr.i;
signal->theData[7] = logPartPtr.p->prevFilepage;
sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
+
+ if (DEBUG_REDO)
+ ndbout_c("writeDirty 1 page at part: %u file: %u pos: %u",
+ logPartPtr.i,
+ logFilePtr.p->fileNo,
+ logPartPtr.p->prevFilepage);
+
}//Dblqh::writeDirty()
/* --------------------------------------------------------------------------
@@ -18902,30 +19138,6 @@ Dblqh::execDUMP_STATE_ORD(Signal* signal)
}//Dblqh::execDUMP_STATE_ORD()
-void Dblqh::execSET_VAR_REQ(Signal* signal)
-{
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
-
- switch (var) {
-
- case NoOfConcurrentCheckpointsAfterRestart:
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case NoOfConcurrentCheckpointsDuringRestart:
- // Valid only during start so value not set.
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- } // switch
-#endif
-}//execSET_VAR_REQ()
-
-
/* **************************************************************** */
/* ---------------------------------------------------------------- */
/* ---------------------- TRIGGER HANDLING ------------------------ */
diff --git a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
index d6c4529bb72..0c5ee128ce0 100644
--- a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
+++ b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
@@ -302,6 +302,7 @@ public:
/* WHEN THE TRIGGER IS DEACTIVATED. */
/* **************************************** */
struct TcDefinedTriggerData {
+ TcDefinedTriggerData() {}
/**
* Trigger id, used to identify the trigger
*/
@@ -702,6 +703,7 @@ public:
Uint8 tckeyrec; // Ändrad från R
Uint8 tcindxrec;
Uint8 apiFailState; // Ändrad från R
+ Uint8 singleUserMode;
ReturnSignal returnsignal;
Uint8 timeOutCounter;
@@ -725,6 +727,7 @@ public:
// Index op return context
UintR indexOp;
UintR clientData;
+ Uint32 errorData;
UintR attrInfoLen;
UintR accumulatingIndexOp;
@@ -956,18 +959,30 @@ public:
/* ALL TABLES IN THE SYSTEM. */
/********************************************************/
struct TableRecord {
+ TableRecord() {}
Uint32 currentSchemaVersion;
- Uint8 enabled;
- Uint8 dropping;
+ Uint16 m_flags;
Uint8 tableType;
- Uint8 storedTable;
+ Uint8 singleUserMode;
+
+ enum {
+ TR_ENABLED = 1 << 0,
+ TR_DROPPING = 1 << 1,
+ TR_STORED_TABLE = 1 << 2
+ };
+ Uint8 get_enabled() const { return (m_flags & TR_ENABLED) != 0; }
+ Uint8 get_dropping() const { return (m_flags & TR_DROPPING) != 0; }
+ Uint8 get_storedTable() const { return (m_flags & TR_STORED_TABLE) != 0; }
+ void set_enabled(Uint8 f) { f ? m_flags |= (Uint16)TR_ENABLED : m_flags &= ~(Uint16)TR_ENABLED; }
+ void set_dropping(Uint8 f) { f ? m_flags |= (Uint16)TR_DROPPING : m_flags &= ~(Uint16)TR_DROPPING; }
+ void set_storedTable(Uint8 f) { f ? m_flags |= (Uint16)TR_STORED_TABLE : m_flags &= ~(Uint16)TR_STORED_TABLE; }
Uint8 noOfKeyAttr;
Uint8 hasCharAttr;
Uint8 noOfDistrKeys;
bool checkTable(Uint32 schemaVersion) const {
- return enabled && !dropping &&
+ return get_enabled() && !get_dropping() &&
(table_version_major(schemaVersion) == table_version_major(currentSchemaVersion));
}
@@ -1323,7 +1338,6 @@ private:
void execTIME_SIGNAL(Signal* signal);
void execAPI_FAILREQ(Signal* signal);
void execSCAN_HBREP(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
void execABORT_ALL_REQ(Signal* signal);
@@ -1484,12 +1498,12 @@ private:
void clearCommitAckMarker(ApiConnectRecord * const regApiPtr,
TcConnectRecord * const regTcPtr);
// Trigger and index handling
- bool saveINDXKEYINFO(Signal* signal,
- TcIndexOperation* indexOp,
- const Uint32 *src,
- Uint32 len);
+ int saveINDXKEYINFO(Signal* signal,
+ TcIndexOperation* indexOp,
+ const Uint32 *src,
+ Uint32 len);
bool receivedAllINDXKEYINFO(TcIndexOperation* indexOp);
- bool saveINDXATTRINFO(Signal* signal,
+ int saveINDXATTRINFO(Signal* signal,
TcIndexOperation* indexOp,
const Uint32 *src,
Uint32 len);
@@ -1660,11 +1674,13 @@ private:
UintR cfailure_nr;
UintR coperationsize;
UintR ctcTimer;
+ UintR cDbHbInterval;
ApiConnectRecordPtr tmpApiConnectptr;
UintR tcheckGcpId;
struct TransCounters {
+ TransCounters() {}
enum { Off, Timer, Started } c_trans_status;
UintR cattrinfoCount;
UintR ctransCount;
@@ -1803,6 +1819,7 @@ private:
*/
public:
struct CommitAckMarker {
+ CommitAckMarker() {}
Uint32 transid1;
Uint32 transid2;
union { Uint32 nextPool; Uint32 nextHash; };
@@ -1835,9 +1852,14 @@ private:
Uint32 transid2);
void removeMarkerForFailedAPI(Signal* signal, Uint32 nodeId, Uint32 bucket);
- bool getAllowStartTransaction() const {
- if(getNodeState().getSingleUserMode())
- return true;
+ bool getAllowStartTransaction(Uint32 nodeId, Uint32 table_single_user_mode) const {
+ if (unlikely(getNodeState().getSingleUserMode()))
+ {
+ if (getNodeState().getSingleUserApi() == nodeId || table_single_user_mode)
+ return true;
+ else
+ return false;
+ }
return getNodeState().startLevel < NodeState::SL_STOPPING_2;
}
@@ -1950,5 +1972,8 @@ private:
// those variables should be removed and exchanged for stack
// variable communication.
/**************************************************************************/
+
+ Uint32 c_gcp_ref;
};
+
#endif
diff --git a/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp b/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
index 0b46f598a89..73149f0b6fd 100644
--- a/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
+++ b/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
@@ -256,7 +256,6 @@ Dbtc::Dbtc(const class Configuration & conf):
addRecSignal(GSN_INCL_NODEREQ, &Dbtc::execINCL_NODEREQ);
addRecSignal(GSN_TIME_SIGNAL, &Dbtc::execTIME_SIGNAL);
addRecSignal(GSN_API_FAILREQ, &Dbtc::execAPI_FAILREQ);
- addRecSignal(GSN_SET_VAR_REQ, &Dbtc::execSET_VAR_REQ);
addRecSignal(GSN_TC_COMMIT_ACK, &Dbtc::execTC_COMMIT_ACK);
addRecSignal(GSN_ABORT_ALL_REQ, &Dbtc::execABORT_ALL_REQ);
diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
index 2b2e0e649a4..22fba0bd82c 100644
--- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
+++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
@@ -20,6 +20,7 @@
#include <RefConvert.hpp>
#include <ndb_limits.h>
#include <my_sys.h>
+#include <ndb_rand.h>
#include <signaldata/EventReport.hpp>
#include <signaldata/TcKeyReq.hpp>
@@ -327,19 +328,21 @@ void Dbtc::execTC_SCHVERREQ(Signal* signal)
tabptr.i = signal->theData[0];
ptrCheckGuard(tabptr, ctabrecFilesize, tableRecord);
tabptr.p->currentSchemaVersion = signal->theData[1];
- tabptr.p->storedTable = (bool)signal->theData[2];
+ tabptr.p->m_flags = 0;
+ tabptr.p->set_storedTable((bool)signal->theData[2]);
BlockReference retRef = signal->theData[3];
tabptr.p->tableType = (Uint8)signal->theData[4];
BlockReference retPtr = signal->theData[5];
Uint32 noOfKeyAttr = signal->theData[6];
+ tabptr.p->singleUserMode = (Uint8)signal->theData[7];
ndbrequire(noOfKeyAttr <= MAX_ATTRIBUTES_IN_INDEX);
const KeyDescriptor* desc = g_key_descriptor_pool.getPtr(tabptr.i);
ndbrequire(noOfKeyAttr == desc->noOfKeyAttr);
- ndbrequire(tabptr.p->enabled == false);
- tabptr.p->enabled = true;
- tabptr.p->dropping = false;
+ ndbrequire(tabptr.p->get_enabled() == false);
+ tabptr.p->set_enabled(true);
+ tabptr.p->set_dropping(false);
tabptr.p->noOfKeyAttr = desc->noOfKeyAttr;
tabptr.p->hasCharAttr = desc->hasCharAttr;
tabptr.p->noOfDistrKeys = desc->noOfDistrKeys;
@@ -363,7 +366,7 @@ Dbtc::execPREP_DROP_TAB_REQ(Signal* signal)
Uint32 senderRef = req->senderRef;
Uint32 senderData = req->senderData;
- if(!tabPtr.p->enabled){
+ if(!tabPtr.p->get_enabled()){
jam();
PrepDropTabRef* ref = (PrepDropTabRef*)signal->getDataPtrSend();
ref->senderRef = reference();
@@ -375,7 +378,7 @@ Dbtc::execPREP_DROP_TAB_REQ(Signal* signal)
return;
}
- if(tabPtr.p->dropping){
+ if(tabPtr.p->get_dropping()){
jam();
PrepDropTabRef* ref = (PrepDropTabRef*)signal->getDataPtrSend();
ref->senderRef = reference();
@@ -387,7 +390,7 @@ Dbtc::execPREP_DROP_TAB_REQ(Signal* signal)
return;
}
- tabPtr.p->dropping = true;
+ tabPtr.p->set_dropping(true);
tabPtr.p->dropTable.senderRef = senderRef;
tabPtr.p->dropTable.senderData = senderData;
@@ -423,7 +426,7 @@ Dbtc::execWAIT_DROP_TAB_CONF(Signal* signal)
tabPtr.i = conf->tableId;
ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
- ndbrequire(tabPtr.p->dropping == true);
+ ndbrequire(tabPtr.p->get_dropping() == true);
Uint32 nodeId = refToNode(conf->senderRef);
tabPtr.p->dropTable.waitDropTabCount.clearWaitingFor(nodeId);
@@ -453,7 +456,7 @@ Dbtc::execWAIT_DROP_TAB_REF(Signal* signal)
tabPtr.i = ref->tableId;
ptrCheckGuard(tabPtr, ctabrecFilesize, tableRecord);
- ndbrequire(tabPtr.p->dropping == true);
+ ndbrequire(tabPtr.p->get_dropping() == true);
Uint32 nodeId = refToNode(ref->senderRef);
tabPtr.p->dropTable.waitDropTabCount.clearWaitingFor(nodeId);
@@ -490,7 +493,7 @@ Dbtc::checkWaitDropTabFailedLqh(Signal* signal, Uint32 nodeId, Uint32 tableId)
for(Uint32 i = 0; i<RT_BREAK && tabPtr.i < ctabrecFilesize; i++, tabPtr.i++){
jam();
ptrAss(tabPtr, tableRecord);
- if(tabPtr.p->enabled && tabPtr.p->dropping){
+ if(tabPtr.p->get_enabled() && tabPtr.p->get_dropping()){
if(tabPtr.p->dropTable.waitDropTabCount.isWaitingFor(nodeId)){
jam();
conf->senderRef = calcLqhBlockRef(nodeId);
@@ -531,7 +534,7 @@ Dbtc::execDROP_TAB_REQ(Signal* signal)
Uint32 senderData = req->senderData;
DropTabReq::RequestType rt = (DropTabReq::RequestType)req->requestType;
- if(!tabPtr.p->enabled && rt == DropTabReq::OnlineDropTab){
+ if(!tabPtr.p->get_enabled() && rt == DropTabReq::OnlineDropTab){
jam();
DropTabRef* ref = (DropTabRef*)signal->getDataPtrSend();
ref->senderRef = reference();
@@ -543,7 +546,7 @@ Dbtc::execDROP_TAB_REQ(Signal* signal)
return;
}
- if(!tabPtr.p->dropping && rt == DropTabReq::OnlineDropTab){
+ if(!tabPtr.p->get_dropping() && rt == DropTabReq::OnlineDropTab){
jam();
DropTabRef* ref = (DropTabRef*)signal->getDataPtrSend();
ref->senderRef = reference();
@@ -555,8 +558,8 @@ Dbtc::execDROP_TAB_REQ(Signal* signal)
return;
}
- tabPtr.p->enabled = false;
- tabPtr.p->dropping = false;
+ tabPtr.p->set_enabled(false);
+ tabPtr.p->set_dropping(false);
DropTabConf * conf = (DropTabConf*)signal->getDataPtrSend();
conf->tableId = tabPtr.i;
@@ -640,6 +643,10 @@ void Dbtc::execREAD_CONFIG_REQ(Signal* signal)
ndb_mgm_get_int_parameter(p, CFG_DB_TRANSACTION_DEADLOCK_TIMEOUT, &val);
set_timeout_value(val);
+ val = 1500;
+ ndb_mgm_get_int_parameter(p, CFG_DB_HEARTBEAT_INTERVAL, &val);
+ cDbHbInterval = (val < 10) ? 10 : val;
+
val = 3000;
ndb_mgm_get_int_parameter(p, CFG_DB_TRANSACTION_INACTIVE_TIMEOUT, &val);
set_appl_timeout_value(val);
@@ -881,6 +888,12 @@ void Dbtc::execREAD_NODESCONF(Signal* signal)
hostptr.p->hostStatus = HS_ALIVE;
c_alive_nodes.set(i);
}//if
+
+ if (NodeBitmask::get(readNodes->startedNodes, i))
+ {
+ jam();
+ hostptr.p->m_nf_bits = HostRecord::NF_STARTED;
+ }
}//if
}//for
ndbsttorry010Lab(signal);
@@ -1199,16 +1212,14 @@ void Dbtc::execTCSEIZEREQ(Signal* signal)
const NodeId senderNodeId = refToNode(tapiBlockref);
const bool local = senderNodeId == getOwnNodeId() || senderNodeId == 0;
- if(!(senderNodeId == getNodeState().getSingleUserApi()) &&
- !getNodeState().getSingleUserMode()) {
- if(!(sl==NodeState::SL_SINGLEUSER &&
- senderNodeId == getNodeState().getSingleUserApi())) {
+ {
+ {
if (!(sl == NodeState::SL_STARTED ||
(sl == NodeState::SL_STARTING && local == true))) {
jam();
- Uint32 errCode;
- if(!(sl == NodeState::SL_SINGLEUSER && local))
+ Uint32 errCode = 0;
+ if(!local)
{
switch(sl){
case NodeState::SL_STARTING:
@@ -1216,6 +1227,8 @@ void Dbtc::execTCSEIZEREQ(Signal* signal)
break;
case NodeState::SL_STOPPING_1:
case NodeState::SL_STOPPING_2:
+ if (getNodeState().getSingleUserMode())
+ break;
case NodeState::SL_STOPPING_3:
case NodeState::SL_STOPPING_4:
if(getNodeState().stopping.systemShutdown)
@@ -1224,16 +1237,18 @@ void Dbtc::execTCSEIZEREQ(Signal* signal)
errCode = ZNODE_SHUTDOWN_IN_PROGRESS;
break;
case NodeState::SL_SINGLEUSER:
- errCode = ZCLUSTER_IN_SINGLEUSER_MODE;
break;
default:
errCode = ZWRONG_STATE;
break;
}
- signal->theData[0] = tapiPointer;
- signal->theData[1] = errCode;
- sendSignal(tapiBlockref, GSN_TCSEIZEREF, signal, 2, JBB);
- return;
+ if (errCode)
+ {
+ signal->theData[0] = tapiPointer;
+ signal->theData[1] = errCode;
+ sendSignal(tapiBlockref, GSN_TCSEIZEREF, signal, 2, JBB);
+ return;
+ }
}//if (!(sl == SL_SINGLEUSER))
} //if
}
@@ -1720,8 +1735,14 @@ Dbtc::TCKEY_abort(Signal* signal, int place)
* Initialize object before starting error handling
*/
initApiConnectRec(signal, apiConnectptr.p, true);
+start_failure:
switch(getNodeState().startLevel){
case NodeState::SL_STOPPING_2:
+ if (getNodeState().getSingleUserMode())
+ {
+ terrorCode = ZCLUSTER_IN_SINGLEUSER_MODE;
+ break;
+ }
case NodeState::SL_STOPPING_3:
case NodeState::SL_STOPPING_4:
if(getNodeState().stopping.systemShutdown)
@@ -1732,6 +1753,12 @@ Dbtc::TCKEY_abort(Signal* signal, int place)
case NodeState::SL_SINGLEUSER:
terrorCode = ZCLUSTER_IN_SINGLEUSER_MODE;
break;
+ case NodeState::SL_STOPPING_1:
+ if (getNodeState().getSingleUserMode())
+ {
+ terrorCode = ZCLUSTER_IN_SINGLEUSER_MODE;
+ break;
+ }
default:
terrorCode = ZWRONG_STATE;
break;
@@ -1753,6 +1780,13 @@ Dbtc::TCKEY_abort(Signal* signal, int place)
return;
}
+ case 60:
+ {
+ jam();
+ initApiConnectRec(signal, apiConnectptr.p, true);
+ apiConnectptr.p->m_exec_flag = 1;
+ goto start_failure;
+ }
default:
jam();
systemErrorLab(signal, __LINE__);
@@ -1760,9 +1794,18 @@ Dbtc::TCKEY_abort(Signal* signal, int place)
}//switch
}
+static
+inline
+bool
+compare_transid(Uint32* val0, Uint32* val1)
+{
+ Uint32 tmp0 = val0[0] ^ val1[0];
+ Uint32 tmp1 = val0[1] ^ val1[1];
+ return (tmp0 | tmp1) == 0;
+}
+
void Dbtc::execKEYINFO(Signal* signal)
{
- UintR compare_transid1, compare_transid2;
jamEntry();
apiConnectptr.i = signal->theData[0];
tmaxData = 20;
@@ -1772,10 +1815,8 @@ void Dbtc::execKEYINFO(Signal* signal)
}//if
ptrAss(apiConnectptr, apiConnectRecord);
ttransid_ptr = 1;
- compare_transid1 = apiConnectptr.p->transid[0] ^ signal->theData[1];
- compare_transid2 = apiConnectptr.p->transid[1] ^ signal->theData[2];
- compare_transid1 = compare_transid1 | compare_transid2;
- if (compare_transid1 != 0) {
+ if (compare_transid(apiConnectptr.p->transid, signal->theData+1) == false)
+ {
TCKEY_abort(signal, 19);
return;
}//if
@@ -2076,7 +2117,6 @@ void Dbtc::saveAttrbuf(Signal* signal)
void Dbtc::execATTRINFO(Signal* signal)
{
- UintR compare_transid1, compare_transid2;
UintR Tdata1 = signal->theData[0];
UintR Tlength = signal->length();
UintR TapiConnectFilesize = capiConnectFilesize;
@@ -2091,17 +2131,13 @@ void Dbtc::execATTRINFO(Signal* signal)
return;
}//if
- UintR Tdata2 = signal->theData[1];
- UintR Tdata3 = signal->theData[2];
ApiConnectRecord * const regApiPtr = &localApiConnectRecord[Tdata1];
- compare_transid1 = regApiPtr->transid[0] ^ Tdata2;
- compare_transid2 = regApiPtr->transid[1] ^ Tdata3;
apiConnectptr.p = regApiPtr;
- compare_transid1 = compare_transid1 | compare_transid2;
- if (compare_transid1 != 0) {
+ if (compare_transid(regApiPtr->transid, signal->theData+1) == false)
+ {
DEBUG("Drop ATTRINFO, wrong transid, lenght="<<Tlength
- << " transid("<<hex<<Tdata2<<", "<<Tdata3);
+ << " transid("<<hex<<signal->theData[1]<<", "<<signal->theData[2]);
TCKEY_abort(signal, 19);
return;
}//if
@@ -2372,6 +2408,7 @@ void Dbtc::initApiConnectRec(Signal* signal,
regApiPtr->buddyPtr = RNIL;
regApiPtr->currSavePointId = 0;
regApiPtr->m_transaction_nodes.clear();
+ regApiPtr->singleUserMode = 0;
// Trigger data
releaseFiredTriggerData(&regApiPtr->theFiredTriggers),
// Index data
@@ -2481,6 +2518,7 @@ Dbtc::seizeCacheRecord(Signal* signal)
/*****************************************************************************/
void Dbtc::execTCKEYREQ(Signal* signal)
{
+ Uint32 sendersNodeId = refToNode(signal->getSendersBlockRef());
UintR compare_transid1, compare_transid2;
UintR titcLenAiInTckeyreq;
UintR TkeyLength;
@@ -2524,9 +2562,12 @@ void Dbtc::execTCKEYREQ(Signal* signal)
bool isIndexOpReturn = regApiPtr->indexOpReturn;
regApiPtr->isIndexOp = false; // Reset marker
regApiPtr->m_exec_flag |= TexecFlag;
+ TableRecordPtr localTabptr;
+ localTabptr.i = TtabIndex;
+ localTabptr.p = &tableRecord[TtabIndex];
switch (regApiPtr->apiConnectstate) {
case CS_CONNECTED:{
- if (TstartFlag == 1 && getAllowStartTransaction() == true){
+ if (TstartFlag == 1 && getAllowStartTransaction(sendersNodeId, localTabptr.p->singleUserMode) == true){
//---------------------------------------------------------------------
// Initialise API connect record if transaction is started.
//---------------------------------------------------------------------
@@ -2534,7 +2575,7 @@ void Dbtc::execTCKEYREQ(Signal* signal)
initApiConnectRec(signal, regApiPtr);
regApiPtr->m_exec_flag = TexecFlag;
} else {
- if(getAllowStartTransaction() == true){
+ if(getAllowStartTransaction(sendersNodeId, localTabptr.p->singleUserMode) == true){
/*------------------------------------------------------------------
* WE EXPECTED A START TRANSACTION. SINCE NO OPERATIONS HAVE BEEN
* RECEIVED WE INDICATE THIS BY SETTING FIRST_TC_CONNECT TO RNIL TO
@@ -2544,9 +2585,9 @@ void Dbtc::execTCKEYREQ(Signal* signal)
return;
} else {
/**
- * getAllowStartTransaction() == false
+ * getAllowStartTransaction(sendersNodeId) == false
*/
- TCKEY_abort(signal, 57);
+ TCKEY_abort(signal, TexecFlag ? 60 : 57);
return;
}//if
}
@@ -2561,6 +2602,13 @@ void Dbtc::execTCKEYREQ(Signal* signal)
* the state will be CS_STARTED
*/
jam();
+ if (unlikely(getNodeState().getSingleUserMode()) &&
+ getNodeState().getSingleUserApi() != sendersNodeId &&
+ !localTabptr.p->singleUserMode)
+ {
+ TCKEY_abort(signal, TexecFlag ? 60 : 57);
+ return;
+ }
initApiConnectRec(signal, regApiPtr);
regApiPtr->m_exec_flag = TexecFlag;
} else {
@@ -2581,6 +2629,10 @@ void Dbtc::execTCKEYREQ(Signal* signal)
case CS_ABORTING:
if (regApiPtr->abortState == AS_IDLE) {
if (TstartFlag == 1) {
+ if(getAllowStartTransaction(sendersNodeId, localTabptr.p->singleUserMode) == false){
+ TCKEY_abort(signal, TexecFlag ? 60 : 57);
+ return;
+ }
//--------------------------------------------------------------------
// Previous transaction had been aborted and the abort was completed.
// It is then OK to start a new transaction again.
@@ -2644,9 +2696,6 @@ void Dbtc::execTCKEYREQ(Signal* signal)
return;
}//switch
- TableRecordPtr localTabptr;
- localTabptr.i = TtabIndex;
- localTabptr.p = &tableRecord[TtabIndex];
if (localTabptr.p->checkTable(tcKeyReq->tableSchemaVersion)) {
;
} else {
@@ -2705,6 +2754,8 @@ void Dbtc::execTCKEYREQ(Signal* signal)
regTcPtr->savePointId = regApiPtr->currSavePointId;
regApiPtr->executingIndexOp = RNIL;
+ regApiPtr->singleUserMode |= 1 << localTabptr.p->singleUserMode;
+
if (TcKeyReq::getExecutingTrigger(Treqinfo)) {
// Save the TcOperationPtr for fireing operation
regTcPtr->triggeringOperation = TsenderData;
@@ -2836,7 +2887,7 @@ void Dbtc::execTCKEYREQ(Signal* signal)
* THIS VARIABLE CONTROLS THE INTERVAL BETWEEN LCP'S AND
* TEMP TABLES DON'T PARTICIPATE.
* -------------------------------------------------------------------- */
- if (localTabptr.p->storedTable) {
+ if (localTabptr.p->get_storedTable()) {
coperationsize = ((Toperationsize + TattrLen) + TkeyLength) + 17;
}
c_counters.cwriteCount = TwriteCount + 1;
@@ -4695,6 +4746,7 @@ void Dbtc::copyApi(Signal* signal)
regApiPtr->lqhkeyconfrec = Tlqhkeyconfrec;
regApiPtr->commitAckMarker = TcommitAckMarker;
regApiPtr->m_transaction_nodes = Tnodes;
+ regApiPtr->singleUserMode = 0;
gcpPtr.i = TgcpPointer;
ptrCheckGuard(gcpPtr, TgcpFilesize, localGcpRecord);
@@ -4706,6 +4758,7 @@ void Dbtc::copyApi(Signal* signal)
regTmpApiPtr->firstTcConnect = RNIL;
regTmpApiPtr->lastTcConnect = RNIL;
regTmpApiPtr->m_transaction_nodes.clear();
+ regTmpApiPtr->singleUserMode = 0;
releaseAllSeizedIndexOperations(regTmpApiPtr);
}//Dbtc::copyApi()
@@ -5058,6 +5111,7 @@ void Dbtc::releaseDirtyWrite(Signal* signal)
void Dbtc::execLQHKEYREF(Signal* signal)
{
const LqhKeyRef * const lqhKeyRef = (LqhKeyRef *)signal->getDataPtr();
+ Uint32 indexId = 0;
jamEntry();
UintR compare_transid1, compare_transid2;
@@ -5109,6 +5163,9 @@ void Dbtc::execLQHKEYREF(Signal* signal)
ptrCheckGuard(opPtr, ctcConnectFilesize, localTcConnectRecord);
// The operation executed an index trigger
+ TcIndexData* indexData = c_theIndexes.getPtr(currentIndexId);
+ indexId = indexData->indexId;
+ regApiPtr->errorData = indexId;
const Uint32 opType = regTcPtr->operation;
if (errCode == ZALREADYEXIST)
errCode = terrorCode = ZNOTUNIQUE;
@@ -5121,7 +5178,6 @@ void Dbtc::execLQHKEYREF(Signal* signal)
} else {
jam();
/** ZDELETE && NOT_FOUND */
- TcIndexData* indexData = c_theIndexes.getPtr(currentIndexId);
if(indexData->indexState == IS_BUILDING && state != CS_ABORTING){
jam();
/**
@@ -5193,12 +5249,14 @@ void Dbtc::execLQHKEYREF(Signal* signal)
jam();
regApiPtr->lqhkeyreqrec--; // Compensate for extra during read
tcKeyRef->connectPtr = indexOp;
+ tcKeyRef->errorData = indexId;
EXECUTE_DIRECT(DBTC, GSN_TCKEYREF, signal, TcKeyRef::SignalLength);
apiConnectptr.i = save;
apiConnectptr.p = regApiPtr;
} else {
jam();
tcKeyRef->connectPtr = clientData;
+ tcKeyRef->errorData = indexId;
sendSignal(regApiPtr->ndbapiBlockref,
GSN_TCKEYREF, signal, TcKeyRef::SignalLength, JBB);
}//if
@@ -5410,11 +5468,32 @@ void Dbtc::execTC_COMMITREQ(Signal* signal)
}
}//Dbtc::execTC_COMMITREQ()
+/**
+ * TCROLLBACKREQ
+ *
+ * Format is:
+ *
+ * thedata[0] = apiconnectptr
+ * thedata[1] = transid[0]
+ * thedata[2] = transid[1]
+ * OPTIONAL thedata[3] = flags
+ *
+ * Flags:
+ * 0x1 = potentiallyBad data from API (try not to assert)
+ */
void Dbtc::execTCROLLBACKREQ(Signal* signal)
{
+ bool potentiallyBad= false;
UintR compare_transid1, compare_transid2;
jamEntry();
+
+ if(unlikely((signal->getLength() >= 4) && (signal->theData[3] & 0x1)))
+ {
+ ndbout_c("Trying to roll back potentially bad txn\n");
+ potentiallyBad= true;
+ }
+
apiConnectptr.i = signal->theData[0];
if (apiConnectptr.i >= capiConnectFilesize) {
goto TC_ROLL_warning;
@@ -5501,12 +5580,14 @@ void Dbtc::execTCROLLBACKREQ(Signal* signal)
TC_ROLL_warning:
jam();
- warningHandlerLab(signal, __LINE__);
+ if(likely(potentiallyBad==false))
+ warningHandlerLab(signal, __LINE__);
return;
TC_ROLL_system_error:
jam();
- systemErrorLab(signal, __LINE__);
+ if(likely(potentiallyBad==false))
+ systemErrorLab(signal, __LINE__);
return;
}//Dbtc::execTCROLLBACKREQ()
@@ -6161,9 +6242,11 @@ and otherwise we spread it out 310 ms.
void Dbtc::timeOutLoopStartLab(Signal* signal, Uint32 api_con_ptr)
{
Uint32 end_ptr, time_passed, time_out_value, mask_value;
+ Uint32 old_mask_value= 0;
const Uint32 api_con_sz= capiConnectFilesize;
const Uint32 tc_timer= ctcTimer;
const Uint32 time_out_param= ctimeOutValue;
+ const Uint32 old_time_out_param= c_abortRec.oldTimeOutValue;
ctimeOutCheckHeartbeat = tc_timer;
@@ -6184,16 +6267,50 @@ void Dbtc::timeOutLoopStartLab(Signal* signal, Uint32 api_con_ptr)
jam();
mask_value= 31;
}
+ if (time_out_param != old_time_out_param &&
+ getNodeState().getSingleUserMode())
+ {
+ // abort during single user mode, use old_mask_value as flag
+ // and calculate value to be used for connections with allowed api
+ if (old_time_out_param > 300) {
+ jam();
+ old_mask_value= 63;
+ } else if (old_time_out_param < 30) {
+ jam();
+ old_mask_value= 7;
+ } else {
+ jam();
+ old_mask_value= 31;
+ }
+ }
for ( ; api_con_ptr < end_ptr; api_con_ptr++) {
Uint32 api_timer= getApiConTimer(api_con_ptr);
jam();
if (api_timer != 0) {
- time_out_value= time_out_param + (api_con_ptr & mask_value);
+ Uint32 error= ZTIME_OUT_ERROR;
+ time_out_value= time_out_param + (ndb_rand() & mask_value);
+ if (unlikely(old_mask_value)) // abort during single user mode
+ {
+ apiConnectptr.i = api_con_ptr;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ if ((getNodeState().getSingleUserApi() ==
+ refToNode(apiConnectptr.p->ndbapiBlockref)) ||
+ !(apiConnectptr.p->singleUserMode & (1 << NDB_SUM_LOCKED)))
+ {
+ // api allowed during single user, use original timeout
+ time_out_value=
+ old_time_out_param + (api_con_ptr & old_mask_value);
+ }
+ else
+ {
+ error= ZCLUSTER_IN_SINGLEUSER_MODE;
+ }
+ }
time_passed= tc_timer - api_timer;
if (time_passed > time_out_value)
{
jam();
- timeOutFoundLab(signal, api_con_ptr, ZTIME_OUT_ERROR);
+ timeOutFoundLab(signal, api_con_ptr, error);
api_con_ptr++;
break;
}
@@ -6233,7 +6350,8 @@ void Dbtc::timeOutFoundLab(Signal* signal, Uint32 TapiConPtr, Uint32 errCode)
<< " code: " << errCode);
switch (apiConnectptr.p->apiConnectstate) {
case CS_STARTED:
- if(apiConnectptr.p->lqhkeyreqrec == apiConnectptr.p->lqhkeyconfrec){
+ if(apiConnectptr.p->lqhkeyreqrec == apiConnectptr.p->lqhkeyconfrec &&
+ errCode != ZCLUSTER_IN_SINGLEUSER_MODE){
jam();
/*
We are waiting for application to continue the transaction. In this
@@ -6276,6 +6394,7 @@ void Dbtc::timeOutFoundLab(Signal* signal, Uint32 TapiConPtr, Uint32 errCode)
// conditions should get us here. We ignore it.
/*------------------------------------------------------------------*/
case CS_PREPARE_TO_COMMIT:
+ {
jam();
/*------------------------------------------------------------------*/
/* WE ARE WAITING FOR DIH TO COMMIT THE TRANSACTION. WE SIMPLY*/
@@ -6284,12 +6403,16 @@ void Dbtc::timeOutFoundLab(Signal* signal, Uint32 TapiConPtr, Uint32 errCode)
// To ensure against strange bugs we crash the system if we have passed
// time-out period by a factor of 10 and it is also at least 5 seconds.
/*------------------------------------------------------------------*/
- if (((ctcTimer - getApiConTimer(apiConnectptr.i)) > (10 * ctimeOutValue)) &&
- ((ctcTimer - getApiConTimer(apiConnectptr.i)) > 500)) {
- jam();
- systemErrorLab(signal, __LINE__);
+ Uint32 time_passed = ctcTimer - getApiConTimer(apiConnectptr.i);
+ if (time_passed > 500 &&
+ time_passed > (5 * cDbHbInterval) &&
+ time_passed > (10 * ctimeOutValue))
+ {
+ jam();
+ systemErrorLab(signal, __LINE__);
}//if
break;
+ }
case CS_COMMIT_SENT:
jam();
/*------------------------------------------------------------------*/
@@ -6805,6 +6928,33 @@ void Dbtc::timeOutFoundFragLab(Signal* signal, UintR TscanConPtr)
c_scan_frag_pool.getPtr(ptr, TscanConPtr);
DEBUG(TscanConPtr << " timeOutFoundFragLab: scanFragState = "<< ptr.p->scanFragState);
+ const Uint32 time_out_param= ctimeOutValue;
+ const Uint32 old_time_out_param= c_abortRec.oldTimeOutValue;
+
+ if (unlikely(time_out_param != old_time_out_param &&
+ getNodeState().getSingleUserMode()))
+ {
+ jam();
+ ScanRecordPtr scanptr;
+ scanptr.i = ptr.p->scanRec;
+ ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
+ ApiConnectRecordPtr TlocalApiConnectptr;
+ TlocalApiConnectptr.i = scanptr.p->scanApiRec;
+ ptrCheckGuard(TlocalApiConnectptr, capiConnectFilesize, apiConnectRecord);
+
+ if (refToNode(TlocalApiConnectptr.p->ndbapiBlockref) ==
+ getNodeState().getSingleUserApi())
+ {
+ jam();
+ Uint32 val = ctcTimer - ptr.p->scanFragTimer;
+ if (val <= old_time_out_param)
+ {
+ jam();
+ goto next;
+ }
+ }
+ }
+
/*-------------------------------------------------------------------------*/
// The scan fragment has expired its timeout. Check its state to decide
// what to do.
@@ -6866,6 +7016,7 @@ void Dbtc::timeOutFoundFragLab(Signal* signal, UintR TscanConPtr)
break;
}//switch
+next:
signal->theData[0] = TcContinueB::ZCONTINUE_TIME_OUT_FRAG_CONTROL;
signal->theData[1] = TscanConPtr + 1;
sendSignal(cownref, GSN_CONTINUEB, signal, 2, JBB);
@@ -6893,6 +7044,7 @@ void Dbtc::timeOutFoundFragLab(Signal* signal, UintR TscanConPtr)
void Dbtc::execGCP_NOMORETRANS(Signal* signal)
{
jamEntry();
+ c_gcp_ref = signal->theData[0];
tcheckGcpId = signal->theData[1];
if (cfirstgcp != RNIL) {
jam();
@@ -6944,7 +7096,6 @@ void Dbtc::execGCP_NOMORETRANS(Signal* signal)
/*****************************************************************************/
void Dbtc::execNODE_FAILREP(Signal* signal)
{
- HostRecordPtr tmpHostptr;
jamEntry();
NodeFailRep * const nodeFail = (NodeFailRep *)&signal->theData[0];
@@ -8097,6 +8248,7 @@ void Dbtc::initApiConnectFail(Signal* signal)
apiConnectptr.p->ndbapiConnect = 0;
apiConnectptr.p->buddyPtr = RNIL;
apiConnectptr.p->m_transaction_nodes.clear();
+ apiConnectptr.p->singleUserMode = 0;
setApiConTimer(apiConnectptr.i, 0, __LINE__);
switch(ttransStatus){
case LqhTransConf::Committed:
@@ -8696,6 +8848,14 @@ void Dbtc::execSCAN_TABREQ(Signal* signal)
}
}
+ if (getNodeState().startLevel == NodeState::SL_SINGLEUSER &&
+ getNodeState().getSingleUserApi() !=
+ refToNode(apiConnectptr.p->ndbapiBlockref))
+ {
+ errCode = ZCLUSTER_IN_SINGLEUSER_MODE;
+ goto SCAN_TAB_error;
+ }
+
seizeTcConnect(signal);
tcConnectptr.p->apiConnect = apiConnectptr.i;
tcConnectptr.p->tcConnectstate = OS_WAIT_SCAN;
@@ -9936,6 +10096,7 @@ void Dbtc::sendScanTabConf(Signal* signal, ScanRecordPtr scanPtr) {
void Dbtc::gcpTcfinished(Signal* signal)
{
+ signal->theData[0] = c_gcp_ref;
signal->theData[1] = tcheckGcpId;
sendSignal(cdihblockref, GSN_GCP_TCFINISHED, signal, 2, JBB);
}//Dbtc::gcpTcfinished()
@@ -9984,6 +10145,7 @@ void Dbtc::initApiConnect(Signal* signal)
apiConnectptr.p->buddyPtr = RNIL;
apiConnectptr.p->currSavePointId = 0;
apiConnectptr.p->m_transaction_nodes.clear();
+ apiConnectptr.p->singleUserMode = 0;
}//for
apiConnectptr.i = tiacTmp - 1;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
@@ -10012,6 +10174,7 @@ void Dbtc::initApiConnect(Signal* signal)
apiConnectptr.p->buddyPtr = RNIL;
apiConnectptr.p->currSavePointId = 0;
apiConnectptr.p->m_transaction_nodes.clear();
+ apiConnectptr.p->singleUserMode = 0;
}//for
apiConnectptr.i = (2 * tiacTmp) - 1;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
@@ -10040,6 +10203,7 @@ void Dbtc::initApiConnect(Signal* signal)
apiConnectptr.p->buddyPtr = RNIL;
apiConnectptr.p->currSavePointId = 0;
apiConnectptr.p->m_transaction_nodes.clear();
+ apiConnectptr.p->singleUserMode = 0;
}//for
apiConnectptr.i = (3 * tiacTmp) - 1;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
@@ -10105,6 +10269,7 @@ void Dbtc::inithost(Signal* signal)
hostptr.p->noOfWordsTCINDXCONF = 0;
hostptr.p->noOfPackedWordsLqh = 0;
hostptr.p->hostLqhBlockRef = calcLqhBlockRef(hostptr.i);
+ hostptr.p->m_nf_bits = 0;
}//for
c_alive_nodes.clear();
}//Dbtc::inithost()
@@ -10224,10 +10389,11 @@ void Dbtc::initTable(Signal* signal)
refresh_watch_dog();
ptrAss(tabptr, tableRecord);
tabptr.p->currentSchemaVersion = 0;
- tabptr.p->storedTable = true;
+ tabptr.p->m_flags = 0;
+ tabptr.p->set_storedTable(true);
tabptr.p->tableType = 0;
- tabptr.p->enabled = false;
- tabptr.p->dropping = false;
+ tabptr.p->set_enabled(false);
+ tabptr.p->set_dropping(false);
tabptr.p->noOfKeyAttr = 0;
tabptr.p->hasCharAttr = 0;
tabptr.p->noOfDistrKeys = 0;
@@ -10360,6 +10526,7 @@ void Dbtc::releaseAbortResources(Signal* signal)
apiConnectptr.p->firstTcConnect = RNIL;
apiConnectptr.p->lastTcConnect = RNIL;
apiConnectptr.p->m_transaction_nodes.clear();
+ apiConnectptr.p->singleUserMode = 0;
// MASV let state be CS_ABORTING until all
// signals in the "air" have been received. Reset to CS_CONNECTED
@@ -10395,6 +10562,7 @@ void Dbtc::releaseAbortResources(Signal* signal)
tcRollbackRep->transId[0] = apiConnectptr.p->transid[0];
tcRollbackRep->transId[1] = apiConnectptr.p->transid[1];
tcRollbackRep->returnCode = apiConnectptr.p->returncode;
+ tcRollbackRep->errorData = apiConnectptr.p->errorData;
sendSignal(blockRef, GSN_TCROLLBACKREP, signal,
TcRollbackRep::SignalLength, JBB);
}
@@ -10999,36 +11167,6 @@ Dbtc::execDUMP_STATE_ORD(Signal* signal)
}
}//Dbtc::execDUMP_STATE_ORD()
-void Dbtc::execSET_VAR_REQ(Signal* signal)
-{
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
- int val = setVarReq->value();
-
-
- switch (var) {
-
- case TransactionInactiveTime:
- jam();
- set_appl_timeout_value(val);
- break;
- case TransactionDeadlockDetectionTimeout:
- set_timeout_value(val);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case NoOfConcurrentProcessesHandleTakeover:
- set_no_parallel_takeover(val);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- } // switch
-#endif
-}
-
void Dbtc::execABORT_ALL_REQ(Signal* signal)
{
jamEntry();
@@ -11038,7 +11176,7 @@ void Dbtc::execABORT_ALL_REQ(Signal* signal)
const Uint32 senderData = req->senderData;
const BlockReference senderRef = req->senderRef;
- if(getAllowStartTransaction() == true && !getNodeState().getSingleUserMode()){
+ if(getAllowStartTransaction(refToNode(senderRef), 0) == true && !getNodeState().getSingleUserMode()){
jam();
ref->senderData = senderData;
@@ -11462,10 +11600,22 @@ void Dbtc::execTCINDXREQ(Signal* signal)
// This is a newly started transaction, clean-up
releaseAllSeizedIndexOperations(regApiPtr);
+ regApiPtr->apiConnectstate = CS_STARTED;
regApiPtr->transid[0] = tcIndxReq->transId1;
regApiPtr->transid[1] = tcIndxReq->transId2;
}//if
+ if (getNodeState().startLevel == NodeState::SL_SINGLEUSER &&
+ getNodeState().getSingleUserApi() !=
+ refToNode(regApiPtr->ndbapiBlockref))
+ {
+ terrorCode = ZCLUSTER_IN_SINGLEUSER_MODE;
+ regApiPtr->m_exec_flag |= TcKeyReq::getExecuteFlag(tcIndxRequestInfo);
+ apiConnectptr = transPtr;
+ abortErrorLab(signal);
+ return;
+ }
+
if (ERROR_INSERTED(8036) || !seizeIndexOperation(regApiPtr, indexOpPtr)) {
jam();
// Failed to allocate index operation
@@ -11491,20 +11641,29 @@ void Dbtc::execTCINDXREQ(Signal* signal)
Uint32 includedIndexLength = MIN(indexLength, indexBufSize);
indexOp->expectedAttrInfo = attrLength;
Uint32 includedAttrLength = MIN(attrLength, attrBufSize);
- if (saveINDXKEYINFO(signal,
- indexOp,
- dataPtr,
- includedIndexLength)) {
+
+ int ret;
+ if ((ret = saveINDXKEYINFO(signal,
+ indexOp,
+ dataPtr,
+ includedIndexLength)) == 0)
+ {
jam();
// We have received all we need
readIndexTable(signal, regApiPtr, indexOp);
return;
}
+ else if (ret == -1)
+ {
+ jam();
+ return;
+ }
+
dataPtr += includedIndexLength;
if (saveINDXATTRINFO(signal,
indexOp,
dataPtr,
- includedAttrLength)) {
+ includedAttrLength) == 0) {
jam();
// We have received all we need
readIndexTable(signal, regApiPtr, indexOp);
@@ -11607,13 +11766,25 @@ void Dbtc::execINDXKEYINFO(Signal* signal)
TcIndexOperationPtr indexOpPtr;
TcIndexOperation* indexOp;
+ if (compare_transid(regApiPtr->transid, indxKeyInfo->transId) == false)
+ {
+ TCKEY_abort(signal, 19);
+ return;
+ }
+
+ if (regApiPtr->apiConnectstate == CS_ABORTING)
+ {
+ jam();
+ return;
+ }
+
if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL)
{
indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
if (saveINDXKEYINFO(signal,
indexOp,
src,
- keyInfoLength)) {
+ keyInfoLength) == 0) {
jam();
// We have received all we need
readIndexTable(signal, regApiPtr, indexOp);
@@ -11640,17 +11811,31 @@ void Dbtc::execINDXATTRINFO(Signal* signal)
TcIndexOperationPtr indexOpPtr;
TcIndexOperation* indexOp;
+ if (compare_transid(regApiPtr->transid, indxAttrInfo->transId) == false)
+ {
+ TCKEY_abort(signal, 19);
+ return;
+ }
+
+ if (regApiPtr->apiConnectstate == CS_ABORTING)
+ {
+ jam();
+ return;
+ }
+
if((indexOpPtr.i = regApiPtr->accumulatingIndexOp) != RNIL)
{
indexOp = c_theIndexOperationPool.getPtr(indexOpPtr.i);
if (saveINDXATTRINFO(signal,
indexOp,
src,
- attrInfoLength)) {
+ attrInfoLength) == 0) {
jam();
// We have received all we need
readIndexTable(signal, regApiPtr, indexOp);
+ return;
}
+ return;
}
}
@@ -11658,12 +11843,13 @@ void Dbtc::execINDXATTRINFO(Signal* signal)
* Save signal INDXKEYINFO
* Return true if we have received all needed data
*/
-bool Dbtc::saveINDXKEYINFO(Signal* signal,
- TcIndexOperation* indexOp,
- const Uint32 *src,
- Uint32 len)
+int
+Dbtc::saveINDXKEYINFO(Signal* signal,
+ TcIndexOperation* indexOp,
+ const Uint32 *src,
+ Uint32 len)
{
- if (!indexOp->keyInfo.append(src, len)) {
+ if (ERROR_INSERTED(8039) || !indexOp->keyInfo.append(src, len)) {
jam();
// Failed to seize keyInfo, abort transaction
#ifdef VM_TRACE
@@ -11673,15 +11859,17 @@ bool Dbtc::saveINDXKEYINFO(Signal* signal,
apiConnectptr.i = indexOp->connectionIndex;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
releaseIndexOperation(apiConnectptr.p, indexOp);
- terrorCode = 4000;
+ terrorCode = 289;
+ if(TcKeyReq::getExecuteFlag(indexOp->tcIndxReq.requestInfo))
+ apiConnectptr.p->m_exec_flag= 1;
abortErrorLab(signal);
- return false;
+ return -1;
}
if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) {
jam();
- return true;
+ return 0;
}
- return false;
+ return 1;
}
bool Dbtc::receivedAllINDXKEYINFO(TcIndexOperation* indexOp)
@@ -11693,12 +11881,13 @@ bool Dbtc::receivedAllINDXKEYINFO(TcIndexOperation* indexOp)
* Save signal INDXATTRINFO
* Return true if we have received all needed data
*/
-bool Dbtc::saveINDXATTRINFO(Signal* signal,
- TcIndexOperation* indexOp,
- const Uint32 *src,
- Uint32 len)
+int
+Dbtc::saveINDXATTRINFO(Signal* signal,
+ TcIndexOperation* indexOp,
+ const Uint32 *src,
+ Uint32 len)
{
- if (!indexOp->attrInfo.append(src, len)) {
+ if (ERROR_INSERTED(8051) || !indexOp->attrInfo.append(src, len)) {
jam();
#ifdef VM_TRACE
ndbout_c("Dbtc::saveINDXATTRINFO: Failed to seize attrInfo\n");
@@ -11706,15 +11895,17 @@ bool Dbtc::saveINDXATTRINFO(Signal* signal,
apiConnectptr.i = indexOp->connectionIndex;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
releaseIndexOperation(apiConnectptr.p, indexOp);
- terrorCode = 4000;
+ terrorCode = 289;
+ if(TcKeyReq::getExecuteFlag(indexOp->tcIndxReq.requestInfo))
+ apiConnectptr.p->m_exec_flag= 1;
abortErrorLab(signal);
- return false;
+ return -1;
}
if (receivedAllINDXKEYINFO(indexOp) && receivedAllINDXATTRINFO(indexOp)) {
jam();
- return true;
+ return 0;
}
- return false;
+ return 1;
}
bool Dbtc::receivedAllINDXATTRINFO(TcIndexOperation* indexOp)
@@ -11796,6 +11987,7 @@ void Dbtc::execTCKEYCONF(Signal* signal)
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
+ tcIndxRef->errorData = 0;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcKeyRef::SignalLength, JBB);
return;
@@ -11815,6 +12007,7 @@ void Dbtc::execTCKEYCONF(Signal* signal)
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
+ tcIndxRef->errorData = 0;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcKeyRef::SignalLength, JBB);
return;
@@ -11866,8 +12059,6 @@ void Dbtc::execTCKEYREF(Signal* signal)
}
const UintR TconnectIndex = indexOp->connectionIndex;
ApiConnectRecord * const regApiPtr = &apiConnectRecord[TconnectIndex];
- Uint32 tcKeyRequestInfo = indexOp->tcIndxReq.requestInfo;
- Uint32 commitFlg = TcKeyReq::getCommitFlag(tcKeyRequestInfo);
switch(indexOp->indexOpState) {
case(IOS_NOOP): {
@@ -11900,6 +12091,10 @@ void Dbtc::execTCKEYREF(Signal* signal)
tcIndxRef->transId[0] = tcKeyRef->transId[0];
tcIndxRef->transId[1] = tcKeyRef->transId[1];
tcIndxRef->errorCode = tcKeyRef->errorCode;
+ tcIndxRef->errorData = 0;
+
+ releaseIndexOperation(regApiPtr, indexOp);
+
sendSignal(regApiPtr->ndbapiBlockref,
GSN_TCINDXREF, signal, TcKeyRef::SignalLength, JBB);
return;
@@ -11974,6 +12169,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4000;
+ tcIndxRef->errorData = 0;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcKeyRef::SignalLength, JBB);
return;
@@ -11989,6 +12185,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
+ tcIndxRef->errorData = 0;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcKeyRef::SignalLength, JBB);
return;
@@ -12017,6 +12214,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
+ tcIndxRef->errorData = 0;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcKeyRef::SignalLength, JBB);
*/
@@ -12042,6 +12240,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
+ tcIndxRef->errorData = regApiPtr->errorData;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcKeyRef::SignalLength, JBB);
return;
@@ -12095,6 +12294,7 @@ void Dbtc::readIndexTable(Signal* signal,
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4000;
+ // tcIndxRef->errorData = ??; Where to find indexId
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcKeyRef::SignalLength, JBB);
return;
@@ -12237,6 +12437,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
+ tcIndxRef->errorData = 0;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
TcKeyRef::SignalLength, JBB);
return;
@@ -12432,7 +12633,18 @@ void Dbtc::executeIndexOperation(Signal* signal,
bool Dbtc::seizeIndexOperation(ApiConnectRecord* regApiPtr,
TcIndexOperationPtr& indexOpPtr)
{
- return regApiPtr->theSeizedIndexOperations.seize(indexOpPtr);
+ if (regApiPtr->theSeizedIndexOperations.seize(indexOpPtr))
+ {
+ ndbassert(indexOpPtr.p->expectedKeyInfo == 0);
+ ndbassert(indexOpPtr.p->keyInfo.getSize() == 0);
+ ndbassert(indexOpPtr.p->expectedAttrInfo == 0);
+ ndbassert(indexOpPtr.p->attrInfo.getSize() == 0);
+ ndbassert(indexOpPtr.p->expectedTransIdAI == 0);
+ ndbassert(indexOpPtr.p->transIdAI.getSize() == 0);
+ return true;
+ }
+
+ return false;
}
void Dbtc::releaseIndexOperation(ApiConnectRecord* regApiPtr,
@@ -13263,9 +13475,9 @@ void Dbtc::deleteFromIndexTable(Signal* signal,
Uint32
Dbtc::TableRecord::getErrorCode(Uint32 schemaVersion) const {
- if(!enabled)
+ if(!get_enabled())
return ZNO_SUCH_TABLE;
- if(dropping)
+ if(get_dropping())
return ZDROP_TABLE_IN_PROGRESS;
if(table_version_major(schemaVersion) != table_version_major(currentSchemaVersion))
return ZWRONG_SCHEMA_VERSION_ERROR;
diff --git a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
index 3079a530807..6fe0eefcdb5 100644
--- a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
+++ b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
@@ -747,6 +747,7 @@ typedef Ptr<RestartInfoRecord> RestartInfoRecordPtr;
/* WHEN THE TRIGGER IS DEACTIVATED. */
/* **************************************** */
struct TupTriggerData {
+ TupTriggerData() {}
/**
* Trigger id, used by DICT/TRIX to identify the trigger
@@ -1116,7 +1117,6 @@ private:
void execFSREADCONF(Signal* signal);
void execNDB_STTOR(Signal* signal);
void execREAD_CONFIG_REQ(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
void execDROP_TAB_REQ(Signal* signal);
void execALTER_TAB_REQ(Signal* signal);
void execFSREMOVECONF(Signal* signal);
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp b/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
index 017b0ec5b92..71cfa98b68b 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
@@ -389,6 +389,7 @@ Dbtup::commitRecord(Signal* signal,
fragptr.p = regFragPtr;
tabptr.p = regTabPtr;
+ Uint32 hashValue = firstOpPtr.p->hashValue;
if (opType == ZINSERT_DELETE) {
ljam();
@@ -411,6 +412,7 @@ Dbtup::commitRecord(Signal* signal,
//--------------------------------------------------------------------
Uint32 saveOpType = regOperPtr->optype;
regOperPtr->optype = ZINSERT;
+ regOperPtr->hashValue = hashValue;
operPtr.p = regOperPtr;
checkDetachedTriggers(signal,
@@ -443,6 +445,8 @@ Dbtup::commitRecord(Signal* signal,
befOpPtr.p->changeMask.clear();
befOpPtr.p->changeMask.bitOR(attributeMask);
befOpPtr.p->gci = regOperPtr->gci;
+ befOpPtr.p->optype = ZUPDATE;
+ befOpPtr.p->hashValue = hashValue;
befOpPtr.p->optype = opType;
operPtr.p = befOpPtr.p;
@@ -477,11 +481,13 @@ Dbtup::commitRecord(Signal* signal,
Uint32 fragPageId = befOpPtr.p->fragPageId;
Uint32 pageIndex = befOpPtr.p->pageIndex;
+ befOpPtr.p->optype = ZDELETE;
befOpPtr.p->realPageId = befOpPtr.p->realPageIdC;
befOpPtr.p->pageOffset = befOpPtr.p->pageOffsetC;
befOpPtr.p->fragPageId = befOpPtr.p->fragPageIdC;
befOpPtr.p->pageIndex = befOpPtr.p->pageIndexC;
befOpPtr.p->gci = regOperPtr->gci;
+ befOpPtr.p->hashValue = hashValue;
befOpPtr.p->optype = opType;
operPtr.p = befOpPtr.p;
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
index 42b86102dff..298fb183bc3 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
@@ -212,6 +212,30 @@ void Dbtup::execTUP_ALLOCREQ(Signal* signal)
//---------------------------------------------------
PagePtr pagePtr;
Uint32 pageOffset;
+
+ if (ERROR_INSERTED(4025))
+ {
+ signal->theData[0] = 827;
+ return;
+ }
+ if (ERROR_INSERTED(4026))
+ {
+ CLEAR_ERROR_INSERT_VALUE;
+ signal->theData[0] = 827;
+ return;
+ }
+ if (ERROR_INSERTED(4027) && (rand() % 100) > 25)
+ {
+ signal->theData[0] = 827;
+ return;
+ }
+ if (ERROR_INSERTED(4028) && (rand() % 100) > 25)
+ {
+ CLEAR_ERROR_INSERT_VALUE;
+ signal->theData[0] = 827;
+ return;
+ }
+
if (!allocTh(regFragPtr.p,
regTabPtr.p,
NORMAL_PAGE,
@@ -1115,6 +1139,11 @@ Dbtup::updateStartLab(Signal* signal,
} else {
jam();
retValue = interpreterStartLab(signal, pagePtr, regOperPtr->pageOffset);
+ if (retValue == -1)
+ {
+ jam();
+ return -1;
+ }
}//if
if (retValue == -1) {
@@ -1549,8 +1578,8 @@ int Dbtup::interpreterNextLab(Signal* signal,
Uint32 TdataForUpdate[3];
Uint32 Tlen;
- AttributeHeader& ah = AttributeHeader::init(&TdataForUpdate[0],
- TattrId, TattrNoOfWords);
+ AttributeHeader ah(TattrId, TattrNoOfWords);
+ TdataForUpdate[0] = ah.m_value;
TdataForUpdate[1] = TregMemBuffer[theRegister + 2];
TdataForUpdate[2] = TregMemBuffer[theRegister + 3];
Tlen = TattrNoOfWords + 1;
@@ -1566,6 +1595,7 @@ int Dbtup::interpreterNextLab(Signal* signal,
// Write a NULL value into the attribute
/* --------------------------------------------------------- */
ah.setNULL();
+ TdataForUpdate[0] = ah.m_value;
Tlen = 1;
}//if
int TnoDataRW= updateAttributes(pagePtr,
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
index f21f2eba9fc..df8df2d29f3 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
@@ -65,6 +65,7 @@ void Dbtup::initData()
undoPage = 0;
totNoOfPagesAllocated = 0;
cnoOfAllocatedPages = 0;
+ CLEAR_ERROR_INSERT_VALUE;
// Records with constant sizes
}//Dbtup::initData()
@@ -103,7 +104,6 @@ Dbtup::Dbtup(const class Configuration & conf)
addRecSignal(GSN_FSREADCONF, &Dbtup::execFSREADCONF);
addRecSignal(GSN_NDB_STTOR, &Dbtup::execNDB_STTOR);
addRecSignal(GSN_READ_CONFIG_REQ, &Dbtup::execREAD_CONFIG_REQ, true);
- addRecSignal(GSN_SET_VAR_REQ, &Dbtup::execSET_VAR_REQ);
// Trigger Signals
addRecSignal(GSN_CREATE_TRIG_REQ, &Dbtup::execCREATE_TRIG_REQ);
@@ -569,7 +569,6 @@ void Dbtup::execSTTOR(Signal* signal)
switch (startPhase) {
case ZSTARTPHASE1:
ljam();
- CLEAR_ERROR_INSERT_VALUE;
cownref = calcTupBlockRef(0);
break;
default:
@@ -1315,32 +1314,5 @@ void Dbtup::seizePendingFileOpenInfoRecord(PendingFileOpenInfoPtr& pfoiPtr)
pfoiPtr.p->pfoNextRec = RNIL;
}//Dbtup::seizePendingFileOpenInfoRecord()
-void Dbtup::execSET_VAR_REQ(Signal* signal)
-{
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)signal->getDataPtrSend();
- ConfigParamId var = setVarReq->variable();
- int val = setVarReq->value();
-
- switch (var) {
-
- case NoOfDiskPagesToDiskAfterRestartTUP:
- clblPagesPerTick = val;
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case NoOfDiskPagesToDiskDuringRestartTUP:
- // Valid only during start so value not set.
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- } // switch
-#endif
-
-}//execSET_VAR_REQ()
-
-
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp b/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp
index b0f71223c9d..964d8578217 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp
@@ -185,7 +185,6 @@ Dbtup::tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* data
PagePtr pagePtr;
pagePtr.i = pageId;
ptrCheckGuard(pagePtr, cnoOfPage, page);
- const Uint32 tabDescriptor = tablePtr.p->tabDescriptor;
const Uint32* attrIds = &tableDescriptor[tablePtr.p->readKeyArray].tabDescr;
const Uint32 numAttrs = tablePtr.p->noOfKeyAttr;
// read pk attributes from original tuple
@@ -239,7 +238,6 @@ Dbtup::accReadPk(Uint32 tableId, Uint32 fragId, Uint32 fragPageId, Uint32 pageIn
FragrecordPtr fragPtr;
getFragmentrec(fragPtr, fragId, tablePtr.p);
// get real page id and tuple offset
- PagePtr pagePtr;
Uint32 pageId = getRealpid(fragPtr.p, fragPageId);
ndbrequire((pageIndex & 0x1) == 0);
Uint32 pageOffset = ZPAGE_HEADER_SIZE + (pageIndex >> 1) * tablePtr.p->tupheadsize;
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp b/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
index 60a83e46cd9..49c7af4161a 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp
@@ -452,6 +452,13 @@ Uint32 Dbtup::leafPageRangeFull(Fragrecord* const regFragPtr, PageRangePtr curr
ptrCheckGuard(parentPageRangePtr, cnoOfPageRangeRec, pageRange);
if (parentPageRangePtr.p->currentIndexPos < 3) {
ljam();
+
+ if (c_noOfFreePageRanges < tiprNoLevels)
+ {
+ ljam();
+ return RNIL;
+ }//if
+
/* ---------------------------------------------------------------- */
/* WE HAVE FOUND AN EMPTY ENTRY IN A PAGE RANGE RECORD. */
/* ALLOCATE A NEW PAGE RANGE RECORD, FILL IN THE START RANGE, */
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
index ae3bb0dcd7c..d8036dc0dee 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
@@ -676,8 +676,6 @@ bool
Dbtup::checkUpdateOfPrimaryKey(Uint32* updateBuffer, Tablerec* const regTabPtr)
{
Uint32 keyReadBuffer[MAX_KEY_SIZE_IN_WORDS];
- Uint32 attributeHeader;
- AttributeHeader* ahOut = (AttributeHeader*)&attributeHeader;
AttributeHeader ahIn(*updateBuffer);
Uint32 attributeId = ahIn.getAttributeId();
Uint32 attrDescriptorIndex = regTabPtr->tabDescriptor + (attributeId << ZAD_LOG_SIZE);
@@ -700,16 +698,17 @@ Dbtup::checkUpdateOfPrimaryKey(Uint32* updateBuffer, Tablerec* const regTabPtr)
ReadFunction f = regTabPtr->readFunctionArray[attributeId];
- AttributeHeader::init(&attributeHeader, attributeId, 0);
+ AttributeHeader attributeHeader(attributeId, 0);
tOutBufIndex = 0;
tMaxRead = MAX_KEY_SIZE_IN_WORDS;
bool tmp = tXfrmFlag;
tXfrmFlag = true;
- ndbrequire((this->*f)(&keyReadBuffer[0], ahOut, attrDescriptor, attributeOffset));
+ ndbrequire((this->*f)(&keyReadBuffer[0], &attributeHeader, attrDescriptor,
+ attributeOffset));
tXfrmFlag = tmp;
- ndbrequire(tOutBufIndex == ahOut->getDataSize());
- if (ahIn.getDataSize() != ahOut->getDataSize()) {
+ ndbrequire(tOutBufIndex == attributeHeader.getDataSize());
+ if (ahIn.getDataSize() != attributeHeader.getDataSize()) {
ljam();
return true;
}//if
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp b/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp
index 9439509d102..59c1a1d1d78 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp
@@ -89,7 +89,6 @@ Dbtup::execNEXT_SCANREQ(Signal* signal)
FragrecordPtr fragPtr;
fragPtr.i = scan.m_fragPtrI[0];
ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
- Fragrecord& frag = *fragPtr.p;
switch (req->scanFlag) {
case NextScanReq::ZSCAN_NEXT:
jam();
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp b/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp
index f9898519bde..364a8a0bba5 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp
@@ -92,8 +92,6 @@ void Dbtup::rfrReadRestartInfoLab(Signal* signal, RestartInfoRecordPtr riPtr)
seizeDiskBufferSegmentRecord(dbsiPtr);
riPtr.p->sriDataBufferSegmentP = dbsiPtr.i;
Uint32 retPageRef = RNIL;
- Uint32 noAllocPages = 1;
- Uint32 noOfPagesAllocated;
{
/**
* Use low pages for 0-pages during SR
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp
index 32cd7ab0460..13485a31414 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp
@@ -401,8 +401,6 @@ Dbtux::nodePopUpScans(NodeHandle& node, unsigned pos)
void
Dbtux::nodeSlide(NodeHandle& dstNode, NodeHandle& srcNode, unsigned cnt, unsigned i)
{
- Frag& frag = dstNode.m_frag;
- TreeHead& tree = frag.m_tree;
ndbrequire(i <= 1);
while (cnt != 0) {
TreeEnt ent;
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
index 3d59b8aad4f..7eae1486d43 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
@@ -704,7 +704,6 @@ Dbtux::scanFirst(ScanOpPtr scanPtr)
debugOut << "Enter first scan " << scanPtr.i << " " << scan << endl;
}
#endif
- TreeHead& tree = frag.m_tree;
// set up index keys for this operation
setKeyAttrs(frag);
// scan direction 0, 1
@@ -985,7 +984,6 @@ Dbtux::scanVisible(ScanOpPtr scanPtr, TreeEnt ent)
const Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI);
Uint32 fragBit = ent.m_fragBit;
Uint32 tableFragPtrI = frag.m_tupTableFragPtrI[fragBit];
- Uint32 fragId = frag.m_fragId | fragBit;
Uint32 tupAddr = getTupAddr(frag, ent);
Uint32 tupVersion = ent.m_tupVersion;
// check for same tuple twice in row
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp
index 5bca96667b9..31772abadaf 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp
@@ -212,7 +212,6 @@ Dbtux::treeRemove(Frag& frag, TreePos treePos)
void
Dbtux::treeRemoveInner(Frag& frag, NodeHandle lubNode, unsigned pos)
{
- TreeHead& tree = frag.m_tree;
TreeEnt ent;
// find g.l.b node
NodeHandle glbNode(frag);
diff --git a/ndb/src/kernel/blocks/dbutil/DbUtil.cpp b/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
index 55d36124476..3bc95d1eba8 100644
--- a/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
+++ b/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
@@ -1168,9 +1168,7 @@ DbUtil::prepareOperation(Signal* signal, PreparePtr prepPtr)
/**************************************************************
* Attribute found - store in mapping (AttributeId, Position)
**************************************************************/
- AttributeHeader & attrMap =
- AttributeHeader::init(attrMappingIt.data,
- attrDesc.AttributeId, // 1. Store AttrId
+ AttributeHeader attrMap(attrDesc.AttributeId, // 1. Store AttrId
0);
if (attrDesc.AttributeKeyFlag) {
@@ -1199,6 +1197,7 @@ DbUtil::prepareOperation(Signal* signal, PreparePtr prepPtr)
return;
}
}
+ *(attrMappingIt.data) = attrMap.m_value;
#if 0
ndbout << "BEFORE: attrLength: " << attrLength << endl;
#endif
diff --git a/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp b/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp
index ec9d4a0dc60..6db1d22a6d2 100644
--- a/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp
+++ b/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp
@@ -82,6 +82,7 @@ public:
*/
struct StartRecord {
+ StartRecord() {}
Uint64 m_startTime;
void reset();
@@ -190,7 +191,6 @@ private:
void execNDB_STARTCONF(Signal* signal);
void execREAD_NODESREQ(Signal* signal);
void execNDB_STARTREF(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
void execSTOP_PERM_REF(Signal* signal);
void execSTOP_PERM_CONF(Signal* signal);
diff --git a/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp b/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp
index 6df52b6fbe7..fdd6e7677d3 100644
--- a/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp
+++ b/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp
@@ -80,7 +80,6 @@ Ndbcntr::Ndbcntr(const class Configuration & conf):
addRecSignal(GSN_NDB_STARTCONF, &Ndbcntr::execNDB_STARTCONF);
addRecSignal(GSN_READ_NODESREQ, &Ndbcntr::execREAD_NODESREQ);
addRecSignal(GSN_NDB_STARTREF, &Ndbcntr::execNDB_STARTREF);
- addRecSignal(GSN_SET_VAR_REQ, &Ndbcntr::execSET_VAR_REQ);
addRecSignal(GSN_STOP_PERM_REF, &Ndbcntr::execSTOP_PERM_REF);
addRecSignal(GSN_STOP_PERM_CONF, &Ndbcntr::execSTOP_PERM_CONF);
diff --git a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
index 65d80669316..32827c18802 100644
--- a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
+++ b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
@@ -75,8 +75,8 @@ static BlockInfo ALL_BLOCKS[] = {
{ DBDICT_REF, 1 , 6000, 6003 },
{ NDBFS_REF, 0 , 2000, 2999 },
{ NDBCNTR_REF, 0 , 1000, 1999 },
+ { CMVMI_REF, 1 , 9000, 9999 }, // before QMGR
{ QMGR_REF, 1 , 1, 999 },
- { CMVMI_REF, 1 , 9000, 9999 },
{ TRIX_REF, 1 , 0, 0 },
{ BACKUP_REF, 1 , 10000, 10999 },
{ DBUTIL_REF, 1 , 11000, 11999 },
@@ -179,6 +179,7 @@ void Ndbcntr::execSYSTEM_ERROR(Signal* signal)
break;
case SystemError::CopyFragRefError:
+ CRASH_INSERTION(1000);
BaseString::snprintf(buf, sizeof(buf),
"Killed by node %d as "
"copyfrag failed, error: %u",
@@ -818,7 +819,6 @@ Ndbcntr::trySystemRestart(Signal* signal){
*/
const bool allNodes = c_start.m_waiting.equal(c_allDefinedNodes);
const bool allClusterNodes = c_start.m_waiting.equal(c_clusterNodes);
- const Uint64 now = NdbTick_CurrentMillisecond();
if(!allClusterNodes){
jam();
@@ -1397,7 +1397,6 @@ void Ndbcntr::execNODE_FAILREP(Signal* signal)
const bool tMasterFailed = allFailed.get(cmasterNodeId);
const bool tStarted = !failedStarted.isclear();
const bool tStarting = !failedStarting.isclear();
- const bool tWaiting = !failedWaiting.isclear();
if(tMasterFailed){
jam();
@@ -1652,6 +1651,7 @@ void Ndbcntr::createSystableLab(Signal* signal, unsigned index)
//w.add(DictTabInfo::NoOfVariable, (Uint32)0);
//w.add(DictTabInfo::KeyLength, 1);
w.add(DictTabInfo::TableTypeVal, (Uint32)table.tableType);
+ w.add(DictTabInfo::SingleUserMode, (Uint32)NDB_SUM_READ_WRITE);
for (unsigned i = 0; i < table.columnCount; i++) {
const SysColumn& column = table.columnList[i];
@@ -2033,23 +2033,6 @@ Ndbcntr::execDUMP_STATE_ORD(Signal* signal)
}//Ndbcntr::execDUMP_STATE_ORD()
-void Ndbcntr::execSET_VAR_REQ(Signal* signal) {
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
-
- switch (var) {
- case TimeToWaitAlive:
- // Valid only during start so value not set.
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- }// switch
-#endif
-}//Ndbcntr::execSET_VAR_REQ()
-
void Ndbcntr::updateNodeState(Signal* signal, const NodeState& newState) const{
NodeStateRep * const stateRep = (NodeStateRep *)&signal->theData[0];
@@ -2478,8 +2461,6 @@ void Ndbcntr::execABORT_ALL_CONF(Signal* signal){
void Ndbcntr::execABORT_ALL_REF(Signal* signal){
jamEntry();
- AbortAllRef *abortAllRef = (AbortAllRef *)&signal->theData[0];
- AbortAllRef::ErrorCode errorCode = (AbortAllRef::ErrorCode) abortAllRef->errorCode;
StopRef * const stopRef = (StopRef *)&signal->theData[0];
stopRef->senderData = c_stopRec.stopReq.senderData;
diff --git a/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp b/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp
index da7f33ae78d..99dd4aea207 100644
--- a/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp
+++ b/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp
@@ -106,6 +106,8 @@ class AsyncFile;
class Request
{
public:
+ Request() {}
+
enum Action {
open,
close,
diff --git a/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp b/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
index 353330929e5..55b0a8c4d39 100644
--- a/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
+++ b/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
@@ -569,7 +569,7 @@ AsyncFile*
Ndbfs::createAsyncFile(){
// Check limit of open files
- if (theFiles.size()+1 == m_maxFiles) {
+ if (theFiles.size() == m_maxFiles) {
// Print info about all open files
for (unsigned i = 0; i < theFiles.size(); i++){
AsyncFile* file = theFiles[i];
diff --git a/ndb/src/kernel/blocks/qmgr/Qmgr.hpp b/ndb/src/kernel/blocks/qmgr/Qmgr.hpp
index dcca240eeb6..21395a5d750 100644
--- a/ndb/src/kernel/blocks/qmgr/Qmgr.hpp
+++ b/ndb/src/kernel/blocks/qmgr/Qmgr.hpp
@@ -101,6 +101,7 @@ public:
};
struct StartRecord {
+ StartRecord() {}
void reset(){
m_startKey++;
m_startNode = 0;
@@ -169,6 +170,7 @@ public:
};
struct ArbitRec {
+ ArbitRec() {}
ArbitState state; // state
bool newstate; // flag to initialize new state
unsigned thread; // identifies a continueB "thread"
@@ -242,7 +244,6 @@ private:
void execAPI_REGREQ(Signal* signal);
void execAPI_FAILCONF(Signal* signal);
void execREAD_NODESREQ(Signal* signal);
- void execSET_VAR_REQ(Signal* signal);
void execREAD_NODESREF(Signal* signal);
void execREAD_NODESCONF(Signal* signal);
diff --git a/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp b/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
index b8885569f0e..a087fe38c1c 100644
--- a/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
+++ b/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
@@ -82,7 +82,6 @@ Qmgr::Qmgr(const class Configuration & conf)
addRecSignal(GSN_DISCONNECT_REP, &Qmgr::execDISCONNECT_REP);
addRecSignal(GSN_API_FAILCONF, &Qmgr::execAPI_FAILCONF);
addRecSignal(GSN_READ_NODESREQ, &Qmgr::execREAD_NODESREQ);
- addRecSignal(GSN_SET_VAR_REQ, &Qmgr::execSET_VAR_REQ);
addRecSignal(GSN_API_BROADCAST_REP, &Qmgr::execAPI_BROADCAST_REP);
// Arbitration signals
diff --git a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
index 0156f334051..a76838f7007 100644
--- a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
+++ b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
@@ -845,7 +845,6 @@ void Qmgr::execCM_REGCONF(Signal* signal)
jamEntry();
const CmRegConf * const cmRegConf = (CmRegConf *)&signal->theData[0];
- Uint32 presidentNodeId = cmRegConf->presidentNodeId;
if (!ndbCompatible_ndb_ndb(NDB_VERSION, cmRegConf->presidentVersion)) {
jam();
@@ -1270,7 +1269,6 @@ Qmgr::check_startup(Signal* signal)
/**
* Check for missing node group directly
*/
- char buf[100];
NdbNodeBitmask check;
check.assign(c_definedNodes);
check.bitANDC(c_start.m_starting_nodes); // Not connected nodes
@@ -2818,7 +2816,7 @@ void Qmgr::failReportLab(Signal* signal, Uint16 aFailedNode,
if (failedNodePtr.i == getOwnNodeId()) {
jam();
- Uint32 code = 0;
+ Uint32 code = NDBD_EXIT_NODE_DECLARED_DEAD;
const char * msg = 0;
char extra[100];
switch(aFailCause){
@@ -4774,34 +4772,6 @@ Qmgr::execDUMP_STATE_ORD(Signal* signal)
#endif
}//Qmgr::execDUMP_STATE_ORD()
-void Qmgr::execSET_VAR_REQ(Signal* signal)
-{
-#if 0
- SetVarReq* const setVarReq = (SetVarReq*)&signal->theData[0];
- ConfigParamId var = setVarReq->variable();
- UintR val = setVarReq->value();
-
- switch (var) {
- case HeartbeatIntervalDbDb:
- setHbDelay(val/10);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case HeartbeatIntervalDbApi:
- setHbApiDelay(val/10);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- case ArbitTimeout:
- setArbitTimeout(val);
- sendSignal(CMVMI_REF, GSN_SET_VAR_CONF, signal, 1, JBB);
- break;
-
- default:
- sendSignal(CMVMI_REF, GSN_SET_VAR_REF, signal, 1, JBB);
- }// switch
-#endif
-}//execSET_VAR_REQ()
void
Qmgr::execAPI_BROADCAST_REP(Signal* signal)
diff --git a/ndb/src/kernel/blocks/suma/Suma.cpp b/ndb/src/kernel/blocks/suma/Suma.cpp
index be3171da7a0..006fb21bd77 100644
--- a/ndb/src/kernel/blocks/suma/Suma.cpp
+++ b/ndb/src/kernel/blocks/suma/Suma.cpp
@@ -1434,7 +1434,6 @@ SumaParticipant::SyncRecord::nextScan(Signal* signal){
LocalDataBuffer<15> attrBuf(suma.c_dataBufferPool, head);
ScanFragReq * req = (ScanFragReq *)signal->getDataPtrSend();
- const Uint32 parallelism = 16;
const Uint32 attrLen = 5 + attrBuf.getSize();
req->senderData = m_subscriptionPtrI;
@@ -1610,10 +1609,6 @@ SumaParticipant::execSCAN_HBREP(Signal* signal){
static Uint32 f_bufferLock = 0;
static Uint32 f_buffer[SUMA_BUF_SZ];
-static Uint32 f_trigBufferSize = 0;
-static Uint32 b_bufferLock = 0;
-static Uint32 b_buffer[SUMA_BUF_SZ];
-static Uint32 b_trigBufferSize = 0;
void
SumaParticipant::execTRANSID_AI(Signal* signal){
@@ -1717,7 +1712,6 @@ SumaParticipant::execSUB_REMOVE_REQ(Signal* signal) {
return;
}
- int count = 0;
{
jam();
SubscriberPtr i_subbPtr;
diff --git a/ndb/src/kernel/blocks/suma/Suma.hpp b/ndb/src/kernel/blocks/suma/Suma.hpp
index 8c423a57569..e479ebb7691 100644
--- a/ndb/src/kernel/blocks/suma/Suma.hpp
+++ b/ndb/src/kernel/blocks/suma/Suma.hpp
@@ -208,6 +208,7 @@ public:
friend struct SyncRecord;
struct Subscription {
+ Subscription() {}
Uint32 m_subscriberRef;
Uint32 m_subscriberData;
Uint32 m_senderRef;
diff --git a/ndb/src/kernel/error/TimeModule.cpp b/ndb/src/kernel/error/TimeModule.cpp
index 1c01f91f86b..2be734842ba 100644
--- a/ndb/src/kernel/error/TimeModule.cpp
+++ b/ndb/src/kernel/error/TimeModule.cpp
@@ -18,7 +18,7 @@
#include <ndb_global.h>
#include "TimeModule.hpp"
-static const char* cMonth[] = { "x", "January", "February", "Mars", "April", "May", "June",
+static const char* cMonth[] = { "x", "January", "February", "March", "April", "May", "June",
"July", "August", "September", "October", "November", "December"};
static const char* cDay[] = { "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday",
diff --git a/ndb/src/kernel/error/ndbd_exit_codes.c b/ndb/src/kernel/error/ndbd_exit_codes.c
index 37a54e33350..92bee522d24 100644
--- a/ndb/src/kernel/error/ndbd_exit_codes.c
+++ b/ndb/src/kernel/error/ndbd_exit_codes.c
@@ -57,12 +57,15 @@ static const ErrStruct errArray[] =
"error(s) on other node(s)"},
{NDBD_EXIT_PARTITIONED_SHUTDOWN, XAE, "Partitioned cluster detected. "
"Please check if cluster is already running"},
+ {NDBD_EXIT_NODE_DECLARED_DEAD, XAE,
+ "Node declared dead. See error log for details"},
{NDBD_EXIT_POINTER_NOTINRANGE, XIE, "Pointer too large"},
{NDBD_EXIT_SR_OTHERNODEFAILED, XRE, "Another node failed during system "
"restart, please investigate error(s) on other node(s)"},
{NDBD_EXIT_NODE_NOT_DEAD, XRE, "Internal node state conflict, "
"most probably resolved by restarting node again"},
{NDBD_EXIT_SR_REDOLOG, XFI, "Error while reading the REDO log"},
+ {NDBD_EXIT_SR_SCHEMAFILE, XFI, "Error while reading the schema file"},
/* Currently unused? */
{2311, XIE, "Conflict when selecting restart type"},
{NDBD_EXIT_NO_MORE_UNDOLOG, XCR,
diff --git a/ndb/src/kernel/vm/MetaData.hpp b/ndb/src/kernel/vm/MetaData.hpp
index 9c34ac2b612..23d068354b5 100644
--- a/ndb/src/kernel/vm/MetaData.hpp
+++ b/ndb/src/kernel/vm/MetaData.hpp
@@ -67,6 +67,7 @@ public:
*/
class Table {
public:
+ Table() {}
/* Table id (array index in DICT and other blocks) */
Uint32 tableId;
diff --git a/ndb/src/kernel/vm/SimulatedBlock.cpp b/ndb/src/kernel/vm/SimulatedBlock.cpp
index b58e1feed9d..5e23d95bce2 100644
--- a/ndb/src/kernel/vm/SimulatedBlock.cpp
+++ b/ndb/src/kernel/vm/SimulatedBlock.cpp
@@ -1012,6 +1012,7 @@ SimulatedBlock::assembleFragments(Signal * signal){
/**
* Don't release allocated segments
*/
+ signal->header.m_fragmentInfo = 0;
signal->header.m_noOfSections = 0;
return false;
}
@@ -1039,6 +1040,7 @@ SimulatedBlock::assembleFragments(Signal * signal){
* fragInfo = 2
*/
if(fragInfo == 2){
+ signal->header.m_fragmentInfo = 0;
signal->header.m_noOfSections = 0;
return false;
}
diff --git a/ndb/src/kernel/vm/ndbd_malloc.cpp b/ndb/src/kernel/vm/ndbd_malloc.cpp
index 9386e3c7cd3..21a26ff11d8 100644
--- a/ndb/src/kernel/vm/ndbd_malloc.cpp
+++ b/ndb/src/kernel/vm/ndbd_malloc.cpp
@@ -22,12 +22,14 @@
#include <stdio.h>
#endif
+#ifdef TRACE_MALLOC
static void xxx(size_t size, size_t *s_m, size_t *s_k, size_t *s_b)
{
*s_m = size/1024/1024;
*s_k = (size - *s_m*1024*1024)/1024;
*s_b = size - *s_m*1024*1024-*s_k*1024;
}
+#endif
static Uint64 g_allocated_memory;
void *ndbd_malloc(size_t size)
diff --git a/ndb/src/libndb.ver.in b/ndb/src/libndb.ver.in
new file mode 100644
index 00000000000..72bf93d196f
--- /dev/null
+++ b/ndb/src/libndb.ver.in
@@ -0,0 +1,2 @@
+libndbclient_@NDB_SHARED_LIB_MAJOR_VERSION@ { global: *; };
+
diff --git a/ndb/src/mgmapi/LocalConfig.cpp b/ndb/src/mgmapi/LocalConfig.cpp
index f01b6ff3da3..476e2d6dd84 100644
--- a/ndb/src/mgmapi/LocalConfig.cpp
+++ b/ndb/src/mgmapi/LocalConfig.cpp
@@ -73,9 +73,9 @@ LocalConfig::init(const char *connectString,
//4. Check Ndb.cfg in NDB_HOME
{
bool fopenError;
- char *buf= NdbConfig_NdbCfgName(1 /*true*/);
- NdbAutoPtr<char> tmp_aptr(buf);
- if(readFile(buf, fopenError))
+ char *buf2= NdbConfig_NdbCfgName(1 /*true*/);
+ NdbAutoPtr<char> tmp_aptr(buf2);
+ if(readFile(buf2, fopenError))
DBUG_RETURN(true);
if (!fopenError)
DBUG_RETURN(false);
@@ -84,9 +84,9 @@ LocalConfig::init(const char *connectString,
//5. Check Ndb.cfg in cwd
{
bool fopenError;
- char *buf= NdbConfig_NdbCfgName(0 /*false*/);
- NdbAutoPtr<char> tmp_aptr(buf);
- if(readFile(buf, fopenError))
+ char *buf2= NdbConfig_NdbCfgName(0 /*false*/);
+ NdbAutoPtr<char> tmp_aptr(buf2);
+ if(readFile(buf2, fopenError))
DBUG_RETURN(true);
if (!fopenError)
DBUG_RETURN(false);
@@ -94,9 +94,9 @@ LocalConfig::init(const char *connectString,
//7. Check
{
- char buf[256];
- BaseString::snprintf(buf, sizeof(buf), "host=localhost:%s", NDB_PORT);
- if(readConnectString(buf, "default connect string"))
+ char buf2[256];
+ BaseString::snprintf(buf2, sizeof(buf2), "host=localhost:%s", NDB_PORT);
+ if(readConnectString(buf2, "default connect string"))
DBUG_RETURN(true);
}
diff --git a/ndb/src/mgmapi/mgmapi.cpp b/ndb/src/mgmapi/mgmapi.cpp
index fa7aed8b182..42e78b8afc2 100644
--- a/ndb/src/mgmapi/mgmapi.cpp
+++ b/ndb/src/mgmapi/mgmapi.cpp
@@ -466,7 +466,6 @@ ndb_mgm_connect(NdbMgmHandle handle, int no_retries,
LocalConfig &cfg= handle->cfg;
NDB_SOCKET_TYPE sockfd= NDB_INVALID_SOCKET;
Uint32 i;
- int binderror = 0;
SocketClient s(0, 0);
s.set_connect_timeout(handle->connect_timeout);
if (!s.init())
@@ -836,12 +835,12 @@ ndb_mgm_get_status(NdbMgmHandle handle)
break;
}
- Vector<BaseString> split;
- tmp.split(split, ":.", 4);
- if(split.size() != 4)
+ Vector<BaseString> split2;
+ tmp.split(split2, ":.", 4);
+ if(split2.size() != 4)
break;
- const int id = atoi(split[1].c_str());
+ const int id = atoi(split2[1].c_str());
if(id != nodeId){
ptr++;
i++;
@@ -849,9 +848,9 @@ ndb_mgm_get_status(NdbMgmHandle handle)
ptr->node_id = id;
}
- split[3].trim(" \t\n");
+ split2[3].trim(" \t\n");
- if(status_ackumulate(ptr,split[2].c_str(), split[3].c_str()) != 0) {
+ if(status_ackumulate(ptr,split2[2].c_str(), split2[3].c_str()) != 0) {
break;
}
}
@@ -2187,43 +2186,6 @@ ndb_mgm_alloc_nodeid(NdbMgmHandle handle, unsigned int version, int nodetype,
return nodeid;
}
-/*****************************************************************************
- * Global Replication
- ******************************************************************************/
-extern "C"
-int
-ndb_mgm_rep_command(NdbMgmHandle handle, unsigned int request,
- unsigned int* replication_id,
- struct ndb_mgm_reply* /*reply*/)
-{
- SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_rep_command");
- const ParserRow<ParserDummy> replication_reply[] = {
- MGM_CMD("global replication reply", NULL, ""),
- MGM_ARG("result", String, Mandatory, "Error message"),
- MGM_ARG("id", Int, Optional, "Id of global replication"),
- MGM_END()
- };
- CHECK_HANDLE(handle, -1);
- CHECK_CONNECTED(handle, -1);
-
- Properties args;
- args.put("request", request);
- const Properties *reply;
- reply = ndb_mgm_call(handle, replication_reply, "rep", &args);
- CHECK_REPLY(reply, -1);
-
- const char * result;
- reply->get("result", &result);
- reply->get("id", replication_id);
- if(strcmp(result,"Ok")!=0) {
- delete reply;
- return -1;
- }
-
- delete reply;
- return 0;
-}
-
extern "C"
int
ndb_mgm_set_int_parameter(NdbMgmHandle handle,
diff --git a/ndb/src/mgmclient/CommandInterpreter.cpp b/ndb/src/mgmclient/CommandInterpreter.cpp
index 2ea98a57866..b72f7b12f9b 100644
--- a/ndb/src/mgmclient/CommandInterpreter.cpp
+++ b/ndb/src/mgmclient/CommandInterpreter.cpp
@@ -25,6 +25,7 @@
#include <mgmapi.h>
#include <util/BaseString.hpp>
+#include <ndbd_exit_codes.h>
class MgmtSrvr;
@@ -125,7 +126,7 @@ public:
int executeStatus(int processId, const char* parameters, bool all);
int executeEventReporting(int processId, const char* parameters, bool all);
int executeDumpState(int processId, const char* parameters, bool all);
- int executeStartBackup(char * parameters);
+ int executeStartBackup(char * parameters, bool interactive);
int executeAbortBackup(char * parameters);
int executeStop(Vector<BaseString> &command_list, unsigned command_pos,
int *node_ids, int no_of_nodes);
@@ -768,6 +769,133 @@ CommandInterpreter::printError()
}
}
+/*
+ * print log event from mgmsrv to console screen
+ */
+#define make_uint64(a,b) (((Uint64)(a)) + (((Uint64)(b)) << 32))
+#define Q64(a) make_uint64(event->EVENT.a ## _lo, event->EVENT.a ## _hi)
+#define R event->source_nodeid
+#define Q(a) event->EVENT.a
+#define QVERSION getMajor(Q(version)), getMinor(Q(version)), getBuild(Q(version))
+#define NDB_LE_(a) NDB_LE_ ## a
+static void
+printLogEvent(struct ndb_logevent* event)
+{
+ switch (event->type) {
+ /**
+ * NDB_MGM_EVENT_CATEGORY_BACKUP
+ */
+#undef EVENT
+#define EVENT BackupStarted
+ case NDB_LE_BackupStarted:
+ ndbout_c("Node %u: Backup %d started from node %d",
+ R, Q(backup_id), Q(starting_node));
+ break;
+#undef EVENT
+#define EVENT BackupFailedToStart
+ case NDB_LE_BackupFailedToStart:
+ ndbout_c("Node %u: Backup request from %d failed to start. Error: %d",
+ R, Q(starting_node), Q(error));
+ break;
+#undef EVENT
+#define EVENT BackupCompleted
+ case NDB_LE_BackupCompleted:
+ ndbout_c("Node %u: Backup %u started from node %u completed\n"
+ " StartGCP: %u StopGCP: %u\n"
+ " #Records: %u #LogRecords: %u\n"
+ " Data: %u bytes Log: %u bytes", R,
+ Q(backup_id), Q(starting_node),
+ Q(start_gci), Q(stop_gci),
+ Q(n_records), Q(n_log_records),
+ Q(n_bytes), Q(n_log_bytes));
+ break;
+#undef EVENT
+#define EVENT BackupAborted
+ case NDB_LE_BackupAborted:
+ ndbout_c("Node %u: Backup %d started from %d has been aborted. Error: %d",
+ R, Q(backup_id), Q(starting_node), Q(error));
+ break;
+ /**
+ * NDB_MGM_EVENT_CATEGORY_STARTUP
+ */
+#undef EVENT
+#define EVENT NDBStartStarted
+ case NDB_LE_NDBStartStarted:
+ ndbout_c("Node %u: Start initiated (version %d.%d.%d)",
+ R, QVERSION);
+ break;
+#undef EVENT
+#define EVENT NDBStartCompleted
+ case NDB_LE_NDBStartCompleted:
+ ndbout_c("Node %u: Started (version %d.%d.%d)",
+ R, QVERSION);
+ break;
+#undef EVENT
+#define EVENT NDBStopStarted
+ case NDB_LE_NDBStopStarted:
+ ndbout_c("Node %u: %s shutdown initiated", R,
+ (Q(stoptype) == 1 ? "Cluster" : "Node"));
+ break;
+#undef EVENT
+#define EVENT NDBStopCompleted
+ case NDB_LE_NDBStopCompleted:
+ {
+ BaseString action_str("");
+ BaseString signum_str("");
+ getRestartAction(Q(action), action_str);
+ if (Q(signum))
+ signum_str.appfmt(" Initiated by signal %d.",
+ Q(signum));
+ ndbout_c("Node %u: Node shutdown completed%s.%s",
+ R, action_str.c_str(), signum_str.c_str());
+ }
+ break;
+#undef EVENT
+#define EVENT NDBStopForced
+ case NDB_LE_NDBStopForced:
+ {
+ BaseString action_str("");
+ BaseString reason_str("");
+ BaseString sphase_str("");
+ int signum = Q(signum);
+ int error = Q(error);
+ int sphase = Q(sphase);
+ int extra = Q(extra);
+ getRestartAction(Q(action), action_str);
+ if (signum)
+ reason_str.appfmt(" Initiated by signal %d.", signum);
+ if (error)
+ {
+ ndbd_exit_classification cl;
+ ndbd_exit_status st;
+ const char *msg = ndbd_exit_message(error, &cl);
+ const char *cl_msg = ndbd_exit_classification_message(cl, &st);
+ const char *st_msg = ndbd_exit_status_message(st);
+ reason_str.appfmt(" Caused by error %d: \'%s(%s). %s\'.",
+ error, msg, cl_msg, st_msg);
+ if (extra != 0)
+ reason_str.appfmt(" (extra info %d)", extra);
+ }
+ if (sphase < 255)
+ sphase_str.appfmt(" Occured during startphase %u.", sphase);
+ ndbout_c("Node %u: Forced node shutdown completed%s.%s%s",
+ R, action_str.c_str(), sphase_str.c_str(),
+ reason_str.c_str());
+ }
+ break;
+#undef EVENT
+#define EVENT StopAborted
+ case NDB_LE_NDBStopAborted:
+ ndbout_c("Node %u: Node shutdown aborted", R);
+ break;
+ /**
+ * default nothing to print
+ */
+ default:
+ break;
+ }
+}
+
//*****************************************************************************
//*****************************************************************************
@@ -784,27 +912,25 @@ event_thread_run(void* p)
int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP,
1, NDB_MGM_EVENT_CATEGORY_STARTUP,
0 };
- int fd = ndb_mgm_listen_event(handle, filter);
- if (fd != NDB_INVALID_SOCKET)
+
+ NdbLogEventHandle log_handle= NULL;
+ struct ndb_logevent log_event;
+
+ log_handle= ndb_mgm_create_logevent_handle(handle, filter);
+ if (log_handle)
{
do_event_thread= 1;
- char *tmp= 0;
- char buf[1024];
- SocketInputStream in(fd,10);
do {
- if (tmp == 0) NdbSleep_MilliSleep(10);
- if((tmp = in.gets(buf, 1024)))
+ int res= ndb_logevent_get_next(log_handle, &log_event, 2000);
+ if (res > 0)
{
- const char ping_token[]= "<PING>";
- if (memcmp(ping_token,tmp,sizeof(ping_token)-1))
- if(tmp && strlen(tmp))
- {
- Guard g(printmutex);
- ndbout << tmp;
- }
+ Guard g(printmutex);
+ printLogEvent(&log_event);
}
+ else if (res < 0)
+ break;
} while(do_event_thread);
- NDB_CLOSE_SOCKET(fd);
+ ndb_mgm_destroy_logevent_handle(&log_handle);
}
else
{
@@ -1054,7 +1180,7 @@ CommandInterpreter::execute_impl(const char *_line, bool interactive)
else if(strcasecmp(firstToken, "START") == 0 &&
allAfterFirstToken != NULL &&
strncasecmp(allAfterFirstToken, "BACKUP", sizeof("BACKUP") - 1) == 0){
- m_error= executeStartBackup(allAfterFirstToken);
+ m_error= executeStartBackup(allAfterFirstToken, interactive);
DBUG_RETURN(true);
}
else if(strcasecmp(firstToken, "ABORT") == 0 &&
@@ -1549,7 +1675,6 @@ CommandInterpreter::executePurge(char* parameters)
return -1;
}
- int i;
char *str;
if (ndb_mgm_purge_stale_sessions(m_mgmsrv, &str)) {
@@ -1667,7 +1792,6 @@ CommandInterpreter::executeConnect(char* parameters, bool interactive)
{
BaseString *basestring = NULL;
- int retval;
disconnect();
if (!emptyString(parameters)) {
basestring= new BaseString(parameters);
@@ -2028,6 +2152,9 @@ CommandInterpreter::executeRestart(Vector<BaseString> &command_list,
return -1;
}
+ if (nostart)
+ ndbout_c("Shutting down nodes with \"-n, no start\" option, to subsequently start the nodes.");
+
result= ndb_mgm_restart3(m_mgmsrv, no_of_nodes, node_ids,
initialstart, nostart, abort, &need_disconnect);
@@ -2102,7 +2229,6 @@ CommandInterpreter::executeStatus(int processId,
ndb_mgm_node_status status;
Uint32 startPhase, version;
- bool system;
struct ndb_mgm_cluster_state *cl;
cl = ndb_mgm_get_status(m_mgmsrv);
@@ -2120,6 +2246,19 @@ CommandInterpreter::executeStatus(int processId,
ndbout << processId << ": Node not found" << endl;
return -1;
}
+ if (cl->node_states[i].node_type != NDB_MGM_NODE_TYPE_NDB){
+ if (cl->node_states[i].version != 0){
+ version = cl->node_states[i].version;
+ ndbout << "Node "<< cl->node_states[i].node_id <<": connected" ;
+ ndbout_c(" (Version %d.%d.%d)",
+ getMajor(version) ,
+ getMinor(version),
+ getBuild(version));
+
+ }else
+ ndbout << "Node "<< cl->node_states[i].node_id <<": not connected" << endl;
+ return 0;
+ }
status = cl->node_states[i].node_status;
startPhase = cl->node_states[i].start_phase;
version = cl->node_states[i].version;
@@ -2518,20 +2657,11 @@ CommandInterpreter::executeEventReporting(int processId,
* Backup
*****************************************************************************/
int
-CommandInterpreter::executeStartBackup(char* parameters)
+CommandInterpreter::executeStartBackup(char* parameters, bool interactive)
{
struct ndb_mgm_reply reply;
unsigned int backupId;
-#if 0
- int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, 0 };
- int fd = ndb_mgm_listen_event(m_mgmsrv, filter);
- if (fd < 0)
- {
- ndbout << "Initializing start of backup failed" << endl;
- printError();
- return fd;
- }
-#endif
+
Vector<BaseString> args;
{
BaseString(parameters).split(args);
@@ -2544,25 +2674,20 @@ CommandInterpreter::executeStartBackup(char* parameters)
int sz= args.size();
int result;
- if (sz == 2 &&
- args[1] == "NOWAIT")
+ int flags = 2;
+ if (sz == 2 && args[1] == "NOWAIT")
{
- result = ndb_mgm_start_backup(m_mgmsrv, 0, &backupId, &reply);
+ flags = 0;
}
- else if (sz == 1 ||
- (sz == 3 &&
- args[1] == "WAIT" &&
- args[2] == "COMPLETED"))
+ else if (sz == 1 || (sz == 3 && args[1] == "WAIT" && args[2] == "COMPLETED"))
{
+ flags = 2;
ndbout_c("Waiting for completed, this may take several minutes");
- result = ndb_mgm_start_backup(m_mgmsrv, 2, &backupId, &reply);
}
- else if (sz == 3 &&
- args[1] == "WAIT" &&
- args[2] == "STARTED")
+ else if (sz == 3 && args[1] == "WAIT" && args[2] == "STARTED")
{
ndbout_c("Waiting for started, this may take several minutes");
- result = ndb_mgm_start_backup(m_mgmsrv, 1, &backupId, &reply);
+ flags = 1;
}
else
{
@@ -2570,48 +2695,81 @@ CommandInterpreter::executeStartBackup(char* parameters)
return -1;
}
+ NdbLogEventHandle log_handle= NULL;
+ struct ndb_logevent log_event;
+ if (flags == 2 && !interactive)
+ {
+ int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, 0, 0 };
+ log_handle = ndb_mgm_create_logevent_handle(m_mgmsrv, filter);
+ if (!log_handle)
+ {
+ ndbout << "Initializing start of backup failed" << endl;
+ printError();
+ return -1;
+ }
+ }
+ result = ndb_mgm_start_backup(m_mgmsrv, flags, &backupId, &reply);
+
if (result != 0) {
ndbout << "Backup failed" << endl;
printError();
-#if 0
- close(fd);
-#endif
+
+ if (log_handle)
+ ndb_mgm_destroy_logevent_handle(&log_handle);
return result;
}
-#if 0
- ndbout_c("Waiting for completed, this may take several minutes");
- char *tmp;
- char buf[1024];
+
+ /**
+ * If interactive, event listner thread is already running
+ */
+ if (log_handle && !interactive)
{
- SocketInputStream in(fd);
int count = 0;
+ int retry = 0;
+ int res;
do {
- tmp = in.gets(buf, 1024);
- if(tmp)
+ if ((res= ndb_logevent_get_next(log_handle, &log_event, 60000)) > 0)
{
- ndbout << tmp;
- unsigned int id;
- if(sscanf(tmp, "%*[^:]: Backup %d ", &id) == 1 && id == backupId){
- count++;
- }
+ int print = 0;
+ switch (log_event.type) {
+ case NDB_LE_BackupStarted:
+ if (log_event.BackupStarted.backup_id == backupId)
+ print = 1;
+ break;
+ case NDB_LE_BackupCompleted:
+ if (log_event.BackupCompleted.backup_id == backupId)
+ print = 1;
+ break;
+ case NDB_LE_BackupAborted:
+ if (log_event.BackupAborted.backup_id == backupId)
+ print = 1;
+ break;
+ default:
+ break;
+ }
+ if (print)
+ {
+ Guard g(m_print_mutex);
+ printLogEvent(&log_event);
+ count++;
+ }
}
- } while(count < 2);
- }
+ else
+ {
+ retry++;
+ }
+ } while(res >= 0 && count < 2 && retry < 3);
- SocketInputStream in(fd, 10);
- do {
- tmp = in.gets(buf, 1024);
- if(tmp && tmp[0] != 0)
- {
- ndbout << tmp;
- }
- } while(tmp && tmp[0] != 0);
+ if (retry >= 3)
+ ndbout << "get backup event failed for " << retry << " times" << endl;
+
+ ndb_mgm_destroy_logevent_handle(&log_handle);
+ }
- close(fd);
-#endif
return 0;
}
+
int
CommandInterpreter::executeAbortBackup(char* parameters)
{
diff --git a/ndb/src/mgmclient/Makefile.am b/ndb/src/mgmclient/Makefile.am
index 8ce8bf4da45..e1287532a07 100644
--- a/ndb/src/mgmclient/Makefile.am
+++ b/ndb/src/mgmclient/Makefile.am
@@ -21,7 +21,8 @@ libndbmgmclient_la_LIBADD = ../mgmapi/libmgmapi.la \
../common/logger/liblogger.la \
../common/portlib/libportlib.la \
../common/util/libgeneral.la \
- ../common/portlib/libportlib.la
+ ../common/portlib/libportlib.la \
+ ../common/debugger/libtrace.la
ndb_mgm_SOURCES = main.cpp
@@ -35,6 +36,7 @@ INCLUDES += -I$(top_srcdir)/ndb/include/mgmapi \
LDADD_LOC = $(noinst_LTLIBRARIES) \
../common/portlib/libportlib.la \
@readline_link@ \
+ $(top_builddir)/ndb/src/libndbclient.la \
$(top_builddir)/dbug/libdbug.a \
$(top_builddir)/mysys/libmysys.a \
$(top_builddir)/strings/libmystrings.a \
diff --git a/ndb/src/mgmclient/main.cpp b/ndb/src/mgmclient/main.cpp
index 2d0103632b9..55617c74e3f 100644
--- a/ndb/src/mgmclient/main.cpp
+++ b/ndb/src/mgmclient/main.cpp
@@ -128,8 +128,6 @@ read_and_execute(int _try_reconnect)
int main(int argc, char** argv){
NDB_INIT(argv[0]);
- const char *_host = 0;
- int _port = 0;
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
diff --git a/ndb/src/mgmsrv/ConfigInfo.cpp b/ndb/src/mgmsrv/ConfigInfo.cpp
index 4c731eb9dd5..3600dfdeab3 100644
--- a/ndb/src/mgmsrv/ConfigInfo.cpp
+++ b/ndb/src/mgmsrv/ConfigInfo.cpp
@@ -458,7 +458,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::CI_INT,
"128",
"8",
- STR_VALUE(MAX_INT_RNIL) },
+ STR_VALUE(MAX_TABLES) },
{
CFG_DB_NO_ORDERED_INDEXES,
@@ -565,7 +565,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
true,
ConfigInfo::CI_INT,
"0",
- "1",
+ "0",
"2" },
{
@@ -2317,7 +2317,6 @@ ConfigInfo::ConfigInfo()
break;
case CI_BOOL:
{
- bool tmp_bool;
require(InitConfigFileParser::convertStringToBool(param._default, default_bool));
require(p->put(param._fname, default_bool));
break;
@@ -2325,7 +2324,6 @@ ConfigInfo::ConfigInfo()
case CI_INT:
case CI_INT64:
{
- Uint64 tmp_uint64;
require(InitConfigFileParser::convertStringToUint64(param._default, default_uint64));
require(p->put(param._fname, default_uint64));
break;
@@ -2841,7 +2839,7 @@ applyDefaultValues(InitConfigFileParser::Context & ctx,
Properties::Iterator it(defaults);
for(const char * name = it.first(); name != NULL; name = it.next()){
- ConfigInfo::Status st = ctx.m_info->getStatus(ctx.m_currentInfo, name);
+ (void) ctx.m_info->getStatus(ctx.m_currentInfo, name);
if(!ctx.m_currentSection->contains(name)){
switch (ctx.m_info->getType(ctx.m_currentInfo, name)){
case ConfigInfo::CI_INT:
@@ -3448,7 +3446,7 @@ saveInConfigValues(InitConfigFileParser::Context & ctx, const char * data){
if(!ctx.m_currentInfo->get(n, &info))
continue;
- Uint32 id = 0;
+ id = 0;
info->get("Id", &id);
if(id == KEY_INTERNAL)
@@ -3763,9 +3761,9 @@ check_node_vs_replicas(Vector<ConfigInfo::ConfigRuleSection>&sections,
}
}
if (db_host_count > 1 && node_group_warning.length() > 0)
- ndbout_c("Cluster configuration warning:\n%s",node_group_warning.c_str());
+ ctx.reportWarning("Cluster configuration warning:\n%s",node_group_warning.c_str());
if (db_host_count > 1 && arbitration_warning.length() > 0)
- ndbout_c("Cluster configuration warning:%s%s",arbitration_warning.c_str(),
+ ctx.reportWarning("Cluster configuration warning:%s%s",arbitration_warning.c_str(),
"\n Running arbitrator on the same host as a database node may"
"\n cause complete cluster shutdown in case of host failure.");
}
diff --git a/ndb/src/mgmsrv/ConfigInfo.hpp b/ndb/src/mgmsrv/ConfigInfo.hpp
index 08b12522807..6f9c8ad17b8 100644
--- a/ndb/src/mgmsrv/ConfigInfo.hpp
+++ b/ndb/src/mgmsrv/ConfigInfo.hpp
@@ -26,8 +26,11 @@
* A MANDATORY parameters must be specified in the config file
* An UNDEFINED parameter may or may not be specified in the config file
*/
-static const char* MANDATORY = (char*)~(UintPtr)0;// Default value for mandatory params.
-static const char* UNDEFINED = 0; // Default value for undefined params.
+
+// Default value for mandatory params.
+#define MANDATORY ((char*)~(UintPtr)0)
+// Default value for undefined params.
+#define UNDEFINED ((char*) 0)
/**
* @class ConfigInfo
diff --git a/ndb/src/mgmsrv/InitConfigFileParser.cpp b/ndb/src/mgmsrv/InitConfigFileParser.cpp
index fdfc0cde1a2..fc25197cf38 100644
--- a/ndb/src/mgmsrv/InitConfigFileParser.cpp
+++ b/ndb/src/mgmsrv/InitConfigFileParser.cpp
@@ -657,7 +657,7 @@ InitConfigFileParser::store_in_properties(Vector<struct my_option>& options,
if (options[i].var_type == GET_INT)
ctx.m_currentSection->put(options[i].name, (Uint32)value_int);
else
- ctx.m_currentSection->put(options[i].name, value_int);
+ ctx.m_currentSection->put64(options[i].name, value_int);
}
}
return true;
@@ -689,34 +689,35 @@ load_defaults(Vector<struct my_option>& options, const char* groups[])
BaseString extra_file;
BaseString group_suffix;
- const char *save_file = defaults_file;
- char *save_extra_file = defaults_extra_file;
- const char *save_group_suffix = defaults_group_suffix;
+ const char *save_file = my_defaults_file;
+ char *save_extra_file = my_defaults_extra_file;
+ const char *save_group_suffix = my_defaults_group_suffix;
- if (defaults_file)
+ if (my_defaults_file)
{
- file.assfmt("--defaults-file=%s", defaults_file);
+ file.assfmt("--defaults-file=%s", my_defaults_file);
argv[argc++] = file.c_str();
}
- if (defaults_extra_file)
+ if (my_defaults_extra_file)
{
- extra_file.assfmt("--defaults-extra-file=%s", defaults_extra_file);
+ extra_file.assfmt("--defaults-extra-file=%s", my_defaults_extra_file);
argv[argc++] = extra_file.c_str();
}
- if (defaults_group_suffix)
+ if (my_defaults_group_suffix)
{
- group_suffix.assfmt("--defaults-group-suffix=%s", defaults_group_suffix);
+ group_suffix.assfmt("--defaults-group-suffix=%s",
+ my_defaults_group_suffix);
argv[argc++] = group_suffix.c_str();
}
char ** tmp = (char**)argv;
int ret = load_defaults("my", groups, &argc, &tmp);
- defaults_file = save_file;
- defaults_extra_file = save_extra_file;
- defaults_group_suffix = save_group_suffix;
+ my_defaults_file = save_file;
+ my_defaults_extra_file = save_extra_file;
+ my_defaults_group_suffix = save_group_suffix;
if (ret == 0)
{
@@ -799,6 +800,7 @@ InitConfigFileParser::parse_mycnf()
/**
* Add ndbd, ndb_mgmd, api/mysqld
*/
+ Uint32 idx = options.size();
{
struct my_option opt;
bzero(&opt, sizeof(opt));
@@ -808,7 +810,6 @@ InitConfigFileParser::parse_mycnf()
opt.var_type = GET_STR;
opt.arg_type = REQUIRED_ARG;
options.push_back(opt);
- ndbd = &options.back();
opt.name = "ndb_mgmd";
opt.id = 256;
@@ -816,7 +817,6 @@ InitConfigFileParser::parse_mycnf()
opt.var_type = GET_STR;
opt.arg_type = REQUIRED_ARG;
options.push_back(opt);
- ndb_mgmd = &options.back();
opt.name = "mysqld";
opt.id = 256;
@@ -824,20 +824,22 @@ InitConfigFileParser::parse_mycnf()
opt.var_type = GET_STR;
opt.arg_type = REQUIRED_ARG;
options.push_back(opt);
- mysqld = &options.back();
- opt.name = "api";
+ opt.name = "ndbapi";
opt.id = 256;
opt.value = (gptr*)malloc(sizeof(char*));
opt.var_type = GET_STR;
opt.arg_type = REQUIRED_ARG;
options.push_back(opt);
- api = &options.back();
bzero(&opt, sizeof(opt));
options.push_back(opt);
- }
+ ndbd = &options[idx];
+ ndb_mgmd = &options[idx+1];
+ mysqld = &options[idx+2];
+ api = &options[idx+3];
+ }
Context ctx(m_info, m_errstream);
const char *groups[]= { "cluster_config", 0 };
diff --git a/ndb/src/mgmsrv/Makefile.am b/ndb/src/mgmsrv/Makefile.am
index 88622c08e53..3d1845957e6 100644
--- a/ndb/src/mgmsrv/Makefile.am
+++ b/ndb/src/mgmsrv/Makefile.am
@@ -38,7 +38,7 @@ INCLUDES_LOC = -I$(top_srcdir)/ndb/src/ndbapi \
-I$(top_srcdir)/ndb/src/common/mgmcommon \
-I$(top_srcdir)/ndb/src/mgmclient
-LDADD_LOC = $(top_builddir)/ndb/src/mgmclient/CommandInterpreter.o \
+LDADD_LOC = $(top_builddir)/ndb/src/mgmclient/CommandInterpreter.lo \
$(top_builddir)/ndb/src/libndbclient.la \
$(top_builddir)/dbug/libdbug.a \
$(top_builddir)/mysys/libmysys.a \
diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp
index 5818e7fe3ae..d68f42cbbb4 100644
--- a/ndb/src/mgmsrv/MgmtSrvr.cpp
+++ b/ndb/src/mgmsrv/MgmtSrvr.cpp
@@ -100,6 +100,8 @@ MgmtSrvr::logLevelThread_C(void* m)
extern EventLogger g_eventLogger;
+#ifdef NOT_USED
+
static NdbOut&
operator<<(NdbOut& out, const LogLevel & ll)
{
@@ -109,6 +111,7 @@ operator<<(NdbOut& out, const LogLevel & ll)
out << "]";
return out;
}
+#endif
void
MgmtSrvr::logLevelThreadRun()
@@ -224,10 +227,10 @@ MgmtSrvr::startEventLog()
}
}
-void
-MgmtSrvr::stopEventLog()
+void
+MgmtSrvr::stopEventLog()
{
- // Nothing yet
+ g_eventLogger.close();
}
class ErrorItem
@@ -624,6 +627,16 @@ MgmtSrvr::start(BaseString &error_string)
ndbout_c("This is probably a bug.");
}
+ /*
+ set api reg req frequency quite high:
+
+ 100 ms interval to make sure we have fairly up-to-date
+ info from the nodes. This to make sure that this info
+ is not dependent on heart beat settings in the
+ configuration
+ */
+ theFacade->theClusterMgr->set_max_api_reg_req_interval(100);
+
TransporterRegistry *reg = theFacade->get_registry();
for(unsigned int i=0;i<reg->m_transporter_interface.size();i++) {
BaseString msg;
@@ -1123,7 +1136,6 @@ int MgmtSrvr::sendSTOP_REQ(const Vector<NodeId> &node_ids,
break;
}
case GSN_STOP_CONF:{
- const StopConf * const ref = CAST_CONSTPTR(StopConf, signal->getDataPtr());
const NodeId nodeId = refToNode(signal->header.theSendersBlockRef);
#ifdef VM_TRACE
ndbout_c("Node %d single user mode", nodeId);
@@ -1153,8 +1165,6 @@ int MgmtSrvr::sendSTOP_REQ(const Vector<NodeId> &node_ids,
break;
}
case GSN_NODE_FAILREP:{
- const NodeFailRep * const rep =
- CAST_CONSTPTR(NodeFailRep, signal->getDataPtr());
break;
}
default:
@@ -1342,7 +1352,7 @@ int MgmtSrvr::restartNodes(const Vector<NodeId> &node_ids,
for (unsigned i = 0; i < node_ids.size(); i++)
{
- int result = start(node_ids[i]);
+ start(node_ids[i]);
}
return 0;
}
@@ -2494,6 +2504,8 @@ MgmtSrvr::startBackup(Uint32& backupId, int waitCompleted)
ndbout_c("I'm not master resending to %d", nodeId);
#endif
do_send = 1; // try again
+ if (!theFacade->get_node_alive(nodeId))
+ m_master_node = nodeId = 0;
continue;
}
event.Event = BackupEvent::BackupFailedToStart;
diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp
index 59f1487f7dc..6ccbbd20b09 100644
--- a/ndb/src/mgmsrv/MgmtSrvr.hpp
+++ b/ndb/src/mgmsrv/MgmtSrvr.hpp
@@ -49,6 +49,7 @@ class Ndb_mgmd_event_service : public EventLoggerBase
friend class MgmtSrvr;
public:
struct Event_listener : public EventLoggerBase {
+ Event_listener() {}
NDB_SOCKET_TYPE m_socket;
Uint32 m_parsable;
};
@@ -597,7 +598,6 @@ private:
*/
enum WaitSignalType {
NO_WAIT, // We don't expect to receive any signal
- WAIT_SET_VAR, // Accept SET_VAR_CONF and SET_VAR_REF
WAIT_SUBSCRIBE_CONF // Accept event subscription confirmation
};
diff --git a/ndb/src/mgmsrv/Services.cpp b/ndb/src/mgmsrv/Services.cpp
index a2dec949f67..b7ff4df7012 100644
--- a/ndb/src/mgmsrv/Services.cpp
+++ b/ndb/src/mgmsrv/Services.cpp
@@ -332,19 +332,6 @@ MgmApiSession::runSession()
switch(ctx.m_status) {
case Parser_t::UnknownCommand:
-#ifdef MGM_GET_CONFIG_BACKWARDS_COMPAT
- /* Backwards compatibility for old NDBs that still use
- * the old "GET CONFIG" command.
- */
- size_t i;
- for(i=0; i<strlen(ctx.m_currentToken); i++)
- ctx.m_currentToken[i] = toupper(ctx.m_currentToken[i]);
-
- if(strncmp("GET CONFIG ",
- ctx.m_currentToken,
- strlen("GET CONFIG ")) == 0)
- getConfig_old(ctx);
-#endif /* MGM_GET_CONFIG_BACKWARDS_COMPAT */
break;
default:
break;
@@ -359,32 +346,6 @@ MgmApiSession::runSession()
DBUG_VOID_RETURN;
}
-#ifdef MGM_GET_CONFIG_BACKWARDS_COMPAT
-void
-MgmApiSession::getConfig_old(Parser_t::Context &ctx) {
- Properties args;
-
- Uint32 version, node;
-
- if(sscanf(ctx.m_currentToken, "GET CONFIG %d %d",
- (int *)&version, (int *)&node) != 2) {
- m_output->println("Expected 2 arguments for GET CONFIG");
- return;
- }
-
- /* Put arguments in properties object so we can call the real function */
- args.put("version", version);
- args.put("node", node);
- getConfig_common(ctx, args, true);
-}
-#endif /* MGM_GET_CONFIG_BACKWARDS_COMPAT */
-
-void
-MgmApiSession::getConfig(Parser_t::Context &ctx,
- const class Properties &args) {
- getConfig_common(ctx, args);
-}
-
static Properties *
backward(const char * base, const Properties* reply){
Properties * ret = new Properties();
@@ -560,9 +521,9 @@ MgmApiSession::get_nodeid(Parser_t::Context &,
}
void
-MgmApiSession::getConfig_common(Parser_t::Context &,
- const class Properties &args,
- bool compat) {
+MgmApiSession::getConfig(Parser_t::Context &,
+ const class Properties &args)
+{
Uint32 version, node = 0;
args.get("version", &version);
@@ -576,47 +537,6 @@ MgmApiSession::getConfig_common(Parser_t::Context &,
return;
}
- if(version > 0 && version < makeVersion(3, 5, 0) && compat){
- Properties *reply = backward("", conf->m_oldConfig);
- reply->put("Version", version);
- reply->put("LocalNodeId", node);
-
- backward("", reply);
- //reply->print();
-
- const Uint32 size = reply->getPackedSize();
- Uint32 *buffer = new Uint32[size/4+1];
-
- reply->pack(buffer);
- delete reply;
-
- const int uurows = (size + 44)/45;
- char * uubuf = new char[uurows * 62+5];
-
- const int uusz = uuencode_mem(uubuf, (char *)buffer, size);
- delete[] buffer;
-
- m_output->println("GET CONFIG %d %d %d %d %d",
- 0, version, node, size, uusz);
-
- m_output->println("begin 664 Ndb_cfg.bin");
-
- /* XXX Need to write directly to the socket, because the uubuf is not
- * NUL-terminated. This could/should probably be done in a nicer way.
- */
- write_socket(m_socket, MAX_WRITE_TIMEOUT, uubuf, uusz);
- delete[] uubuf;
-
- m_output->println("end");
- m_output->println("");
- return;
- }
-
- if(compat){
- m_output->println("GET CONFIG %d %d %d %d %d",1, version, 0, 0, 0);
- return;
- }
-
if(node != 0){
bool compatible;
switch (m_mgmsrv.getNodeType(node)) {
@@ -645,14 +565,13 @@ MgmApiSession::getConfig_common(Parser_t::Context &,
NdbMutex_Lock(m_mgmsrv.m_configMutex);
const ConfigValues * cfg = &conf->m_configValues->m_config;
- const Uint32 size = cfg->getPackedSize();
UtilBuffer src;
cfg->pack(src);
NdbMutex_Unlock(m_mgmsrv.m_configMutex);
char *tmp_str = (char *) malloc(base64_needed_encoded_length(src.length()));
- int res = base64_encode(src.get_data(), src.length(), tmp_str);
+ (void) base64_encode(src.get_data(), src.length(), tmp_str);
m_output->println("get config reply");
m_output->println("result: Ok");
@@ -835,8 +754,6 @@ MgmApiSession::setClusterLogLevel(Parser<MgmApiSession>::Context &,
const char *reply= "set cluster loglevel reply";
Uint32 node, level, cat;
BaseString errorString;
- SetLogLevelOrd logLevel;
- int result;
DBUG_ENTER("MgmApiSession::setClusterLogLevel");
args.get("node", &node);
args.get("category", &cat);
@@ -844,8 +761,7 @@ MgmApiSession::setClusterLogLevel(Parser<MgmApiSession>::Context &,
DBUG_PRINT("enter",("node=%d, category=%d, level=%d", node, cat, level));
- /* XXX should use constants for this value */
- if(level > 15) {
+ if(level > NDB_MGM_MAX_LOGLEVEL) {
m_output->println(reply);
m_output->println("result: Invalid loglevel %d", level);
m_output->println("");
@@ -883,14 +799,12 @@ MgmApiSession::setLogLevel(Parser<MgmApiSession>::Context &,
Uint32 node = 0, level = 0, cat;
BaseString errorString;
SetLogLevelOrd logLevel;
- int result;
logLevel.clear();
args.get("node", &node);
args.get("category", &cat);
args.get("level", &level);
- /* XXX should use constants for this value */
- if(level > 15) {
+ if(level > NDB_MGM_MAX_LOGLEVEL) {
m_output->println("set loglevel reply");
m_output->println("result: Invalid loglevel", errorString.c_str());
m_output->println("");
@@ -1312,6 +1226,8 @@ MgmApiSession::setLogFilter(Parser_t::Context &ctx,
m_output->println("");
}
+#ifdef NOT_USED
+
static NdbOut&
operator<<(NdbOut& out, const LogLevel & ll)
{
@@ -1321,6 +1237,7 @@ operator<<(NdbOut& out, const LogLevel & ll)
out << "]";
return out;
}
+#endif
void
Ndb_mgmd_event_service::log(int eventType, const Uint32* theData, NodeId nodeId){
@@ -1590,7 +1507,7 @@ MgmApiSession::listen_event(Parser<MgmApiSession>::Context & ctx,
}
int level = atoi(spec[1].c_str());
- if(level < 0 || level > 15){
+ if(level < 0 || level > NDB_MGM_MAX_LOGLEVEL){
msg.appfmt("Invalid level: >%s<", spec[1].c_str());
result = -1;
goto done;
diff --git a/ndb/src/mgmsrv/Services.hpp b/ndb/src/mgmsrv/Services.hpp
index 4d904e8369e..48ba55a8b12 100644
--- a/ndb/src/mgmsrv/Services.hpp
+++ b/ndb/src/mgmsrv/Services.hpp
@@ -24,9 +24,6 @@
#include "MgmtSrvr.hpp"
-/** Undefine this to remove backwards compatibility for "GET CONFIG". */
-#define MGM_GET_CONFIG_BACKWARDS_COMPAT
-
class MgmApiSession : public SocketServer::Session
{
static void stop_session_if_timed_out(SocketServer::Session *_s, void *data);
@@ -42,9 +39,6 @@ private:
char m_err_str[1024];
int m_stopSelf; // -1 is restart, 0 do nothing, 1 stop
- void getConfig_common(Parser_t::Context &ctx,
- const class Properties &args,
- bool compat = false);
const char *get_error_text(int err_no)
{ return m_mgmsrv.getErrorText(err_no, m_err_str, sizeof(m_err_str)); }
@@ -55,9 +49,6 @@ public:
void getStatPort(Parser_t::Context &ctx, const class Properties &args);
void getConfig(Parser_t::Context &ctx, const class Properties &args);
-#ifdef MGM_GET_CONFIG_BACKWARDS_COMPAT
- void getConfig_old(Parser_t::Context &ctx);
-#endif /* MGM_GET_CONFIG_BACKWARDS_COMPAT */
void get_nodeid(Parser_t::Context &ctx, const class Properties &args);
void getVersion(Parser_t::Context &ctx, const class Properties &args);
diff --git a/ndb/src/mgmsrv/main.cpp b/ndb/src/mgmsrv/main.cpp
index 76b7ee6f146..80a832196bb 100644
--- a/ndb/src/mgmsrv/main.cpp
+++ b/ndb/src/mgmsrv/main.cpp
@@ -194,7 +194,6 @@ static void usage()
*/
int main(int argc, char** argv)
{
- int mgm_connect_result;
NDB_INIT(argv[0]);
diff --git a/ndb/src/ndbapi/ClusterMgr.cpp b/ndb/src/ndbapi/ClusterMgr.cpp
index 060e5f71b6c..d3946dddfb7 100644
--- a/ndb/src/ndbapi/ClusterMgr.cpp
+++ b/ndb/src/ndbapi/ClusterMgr.cpp
@@ -68,6 +68,7 @@ ClusterMgr::ClusterMgr(TransporterFacade & _facade):
clusterMgrThreadMutex = NdbMutex_Create();
waitForHBCond= NdbCondition_Create();
waitingForHB= false;
+ m_max_api_reg_req_interval= 0xFFFFFFFF; // MAX_INT
noOfAliveNodes= 0;
noOfConnectedNodes= 0;
theClusterMgrThread= 0;
@@ -251,7 +252,7 @@ ClusterMgr::threadMain( ){
* Start of Secure area for use of Transporter
*/
theFacade.lock_mutex();
- for (int i = 1; i < MAX_NODES; i++){
+ for (int i = 1; i < MAX_NDB_NODES; i++){
/**
* Send register request (heartbeat) to all available nodes
* at specified timing intervals
@@ -272,7 +273,8 @@ ClusterMgr::threadMain( ){
}
theNode.hbCounter += timeSlept;
- if (theNode.hbCounter >= theNode.hbFrequency) {
+ if (theNode.hbCounter >= m_max_api_reg_req_interval ||
+ theNode.hbCounter >= theNode.hbFrequency) {
/**
* It is now time to send a new Heartbeat
*/
@@ -281,13 +283,6 @@ ClusterMgr::threadMain( ){
theNode.hbCounter = 0;
}
- /**
- * If the node is of type REP,
- * then the receiver of the signal should be API_CLUSTERMGR
- */
- if (theNode.m_info.m_type == NodeInfo::REP) {
- signal.theReceiversBlockNumber = API_CLUSTERMGR;
- }
#ifdef DEBUG_REG
ndbout_c("ClusterMgr: Sending API_REGREQ to node %d", (int)nodeId);
#endif
@@ -405,7 +400,7 @@ ClusterMgr::execAPI_REGCONF(const Uint32 * theData){
node.m_state = apiRegConf->nodeState;
if (node.compatible && (node.m_state.startLevel == NodeState::SL_STARTED ||
- node.m_state.startLevel == NodeState::SL_SINGLEUSER)){
+ node.m_state.getSingleUserMode())){
set_node_alive(node, true);
} else {
set_node_alive(node, false);
diff --git a/ndb/src/ndbapi/ClusterMgr.hpp b/ndb/src/ndbapi/ClusterMgr.hpp
index b05b73c8324..467b18f2330 100644
--- a/ndb/src/ndbapi/ClusterMgr.hpp
+++ b/ndb/src/ndbapi/ClusterMgr.hpp
@@ -50,6 +50,7 @@ public:
void startThread();
void forceHB();
+ void set_max_api_reg_req_interval(unsigned int millisec) { m_max_api_reg_req_interval = millisec; }
private:
void threadMain();
@@ -83,6 +84,7 @@ public:
Uint32 m_connect_count;
private:
+ Uint32 m_max_api_reg_req_interval;
Uint32 noOfAliveNodes;
Uint32 noOfConnectedNodes;
Node theNodes[MAX_NODES];
@@ -180,6 +182,7 @@ private:
ArbitSignalData data;
NDB_TICKS timestamp;
+ ArbitSignal() {}
inline void init(GlobalSignalNumber aGsn, const Uint32* aData) {
gsn = aGsn;
if (aData != NULL)
diff --git a/ndb/src/ndbapi/DictCache.cpp b/ndb/src/ndbapi/DictCache.cpp
index 82e8d82bc24..6a815067233 100644
--- a/ndb/src/ndbapi/DictCache.cpp
+++ b/ndb/src/ndbapi/DictCache.cpp
@@ -141,7 +141,7 @@ void GlobalDictCache::printCache()
}
NdbTableImpl *
-GlobalDictCache::get(const char * name)
+GlobalDictCache::get(const char * name, int *error)
{
DBUG_ENTER("GlobalDictCache::get");
DBUG_PRINT("enter", ("name: %s", name));
@@ -151,6 +151,11 @@ GlobalDictCache::get(const char * name)
versions = m_tableHash.getData(name, len);
if(versions == 0){
versions = new Vector<TableVersion>(2);
+ if (versions == NULL)
+ {
+ *error = -1;
+ DBUG_RETURN(0);
+ }
m_tableHash.insertKey(name, len, 0, versions);
}
@@ -180,7 +185,11 @@ GlobalDictCache::get(const char * name)
tmp.m_impl = 0;
tmp.m_status = RETREIVING;
tmp.m_refCount = 1; // The one retreiving it
- versions->push_back(tmp);
+ if (versions->push_back(tmp))
+ {
+ *error = -1;
+ DBUG_RETURN(0);
+ }
DBUG_RETURN(0);
}
diff --git a/ndb/src/ndbapi/DictCache.hpp b/ndb/src/ndbapi/DictCache.hpp
index 4b569c114c9..db90a07d487 100644
--- a/ndb/src/ndbapi/DictCache.hpp
+++ b/ndb/src/ndbapi/DictCache.hpp
@@ -67,7 +67,7 @@ public:
GlobalDictCache();
~GlobalDictCache();
- NdbTableImpl * get(const char * name);
+ NdbTableImpl * get(const char * name, int *error);
NdbTableImpl* put(const char * name, NdbTableImpl *);
void drop(NdbTableImpl *);
diff --git a/ndb/src/ndbapi/Makefile.am b/ndb/src/ndbapi/Makefile.am
index 85013b540dc..1a5d10eae5b 100644
--- a/ndb/src/ndbapi/Makefile.am
+++ b/ndb/src/ndbapi/Makefile.am
@@ -48,7 +48,8 @@ libndbapi_la_SOURCES = \
DictCache.cpp \
ndb_cluster_connection.cpp \
NdbBlob.cpp \
- SignalSender.cpp
+ SignalSender.cpp \
+ ObjectMap.cpp
INCLUDES_LOC = -I$(top_srcdir)/ndb/src/mgmapi
diff --git a/ndb/src/ndbapi/Ndb.cpp b/ndb/src/ndbapi/Ndb.cpp
index 80bf0315b9c..dcdee3d4ea1 100644
--- a/ndb/src/ndbapi/Ndb.cpp
+++ b/ndb/src/ndbapi/Ndb.cpp
@@ -56,6 +56,8 @@ NdbTransaction* Ndb::doConnect(Uint32 tConNode)
// We have connections now to the desired node. Return
//****************************************************************************
DBUG_RETURN(getConnectedNdbTransaction(tConNode));
+ } else if (TretCode < 0) {
+ DBUG_RETURN(NULL);
} else if (TretCode != 0) {
tAnyAlive = 1;
}//if
@@ -79,6 +81,8 @@ NdbTransaction* Ndb::doConnect(Uint32 tConNode)
// We have connections now to the desired node. Return
//****************************************************************************
DBUG_RETURN(getConnectedNdbTransaction(tNode));
+ } else if (TretCode < 0) {
+ DBUG_RETURN(NULL);
} else if (TretCode != 0) {
tAnyAlive= 1;
}//if
@@ -107,6 +111,8 @@ NdbTransaction* Ndb::doConnect(Uint32 tConNode)
// We have connections now to the desired node. Return
//****************************************************************************
DBUG_RETURN(getConnectedNdbTransaction(tNode));
+ } else if (TretCode < 0) {
+ DBUG_RETURN(NULL);
} else if (TretCode != 0) {
tAnyAlive= 1;
}//if
@@ -176,6 +182,7 @@ Ndb::NDB_connect(Uint32 tNode)
nodeSequence = tp->getNodeSequence(tNode);
bool node_is_alive = tp->get_node_alive(tNode);
if (node_is_alive) {
+ DBUG_PRINT("info",("Sending signal to node %u", tNode));
tReturnCode = tp->sendSignal(tSignal, tNode);
releaseSignal(tSignal);
if (tReturnCode != -1) {
@@ -207,6 +214,11 @@ Ndb::NDB_connect(Uint32 tNode)
DBUG_PRINT("info",
("unsuccessful connect tReturnCode %d, tNdbCon->Status() %d",
tReturnCode, tNdbCon->Status()));
+ if (theError.code == 299)
+ {
+ // single user mode so no need to retry with other node
+ DBUG_RETURN(-1);
+ }
DBUG_RETURN(3);
}//if
}//Ndb::NDB_connect()
@@ -268,8 +280,6 @@ Ndb::waitUntilReady(int timeout)
DBUG_ENTER("Ndb::waitUntilReady");
int secondsCounter = 0;
int milliCounter = 0;
- int noChecksSinceFirstAliveFound = 0;
- int id;
if (theInitState != Initialised) {
// Ndb::init is not called
@@ -440,7 +450,11 @@ Ndb::startTransactionLocal(Uint32 aPriority, Uint32 nodeId)
theRemainingStartTransactions--;
NdbTransaction* tConNext = theTransactionList;
- tConnection->init();
+ if (tConnection->init())
+ {
+ theError.code = tConnection->theError.code;
+ DBUG_RETURN(NULL);
+ }
theTransactionList = tConnection; // into a transaction list.
tConnection->next(tConNext); // Add the active connection object
tConnection->setTransactionId(tFirstTransId);
@@ -753,17 +767,27 @@ Ndb::getNodeId()
}
/****************************************************************************
-Uint64 getTupleIdFromNdb( Uint32 aTableId, Uint32 cacheSize );
-
-Parameters: aTableId : The TableId.
- cacheSize: Prefetch this many values
-Remark: Returns a new TupleId to the application.
- The TupleId comes from SYSTAB_0 where SYSKEY_0 = TableId.
- It is initialized to (TableId << 48) + 1 in NdbcntrMain.cpp.
+Uint64 getAutoIncrementValue( const char* aTableName,
+ Uint64 & tupleId,
+ Uint32 cacheSize,
+ Uint64 step,
+ Uint64 start);
+
+Parameters: aTableName (IN) : The table name.
+ autoValue (OUT) : Returns new autoincrement value
+ cacheSize (IN) : Prefetch this many values
+ step (IN) : Specifies the step between the
+ autoincrement values.
+ start (IN) : Start value for first value
+Remark: Returns a new autoincrement value to the application.
+ The autoincrement values can be increased by steps
+ (default 1) and a number of values can be prefetched
+ by specifying cacheSize (default 10).
****************************************************************************/
int
Ndb::getAutoIncrementValue(const char* aTableName,
- Uint64 & tupleId, Uint32 cacheSize)
+ Uint64 & autoValue, Uint32 cacheSize,
+ Uint64 step, Uint64 start)
{
DBUG_ENTER("Ndb::getAutoIncrementValue");
BaseString internal_tabname(internalize_table_name(aTableName));
@@ -774,15 +798,17 @@ Ndb::getAutoIncrementValue(const char* aTableName,
theError.code = theDictionary->getNdbError().code;
DBUG_RETURN(-1);
}
- if (getTupleIdFromNdb(info, tupleId, cacheSize) == -1)
+ DBUG_PRINT("info", ("step %lu", (ulong) step));
+ if (getTupleIdFromNdb(info, autoValue, cacheSize, step, start) == -1)
DBUG_RETURN(-1);
- DBUG_PRINT("info", ("value %lu", (ulong) tupleId));
+ DBUG_PRINT("info", ("value %lu", (ulong) autoValue));
DBUG_RETURN(0);
}
int
Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable,
- Uint64 & tupleId, Uint32 cacheSize)
+ Uint64 & autoValue, Uint32 cacheSize,
+ Uint64 step, Uint64 start)
{
DBUG_ENTER("Ndb::getAutoIncrementValue");
assert(aTable != 0);
@@ -795,36 +821,73 @@ Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable,
theError.code = theDictionary->getNdbError().code;
DBUG_RETURN(-1);
}
- if (getTupleIdFromNdb(info, tupleId, cacheSize) == -1)
+ DBUG_PRINT("info", ("step %lu", (ulong) step));
+ if (getTupleIdFromNdb(info, autoValue, cacheSize, step, start) == -1)
DBUG_RETURN(-1);
- DBUG_PRINT("info", ("value %lu", (ulong)tupleId));
+ DBUG_PRINT("info", ("value %lu", (ulong) autoValue));
DBUG_RETURN(0);
}
int
Ndb::getTupleIdFromNdb(Ndb_local_table_info* info,
- Uint64 & tupleId, Uint32 cacheSize)
+ Uint64 & tupleId, Uint32 cacheSize,
+ Uint64 step, Uint64 start)
{
+/*
+ Returns a new TupleId to the application.
+ The TupleId comes from SYSTAB_0 where SYSKEY_0 = TableId.
+ It is initialized to (TableId << 48) + 1 in NdbcntrMain.cpp.
+ In most cases step= start= 1, in which case we get:
+ 1,2,3,4,5,...
+ If step=10 and start=5 and first number is 1, we get:
+ 5,15,25,35,...
+*/
DBUG_ENTER("Ndb::getTupleIdFromNdb");
- if (info->m_first_tuple_id != info->m_last_tuple_id)
+ DBUG_PRINT("info", ("Step %lu (%lu,%lu)", (ulong) step, (ulong) info->m_first_tuple_id, (ulong) info->m_last_tuple_id));
+ /*
+ Check if the next value can be taken from the pre-fetched
+ sequence.
+ */
+ if (info->m_first_tuple_id != info->m_last_tuple_id &&
+ info->m_first_tuple_id + step <= info->m_last_tuple_id)
{
assert(info->m_first_tuple_id < info->m_last_tuple_id);
- tupleId = ++info->m_first_tuple_id;
- DBUG_PRINT("info", ("next cached value %lu", (ulong)tupleId));
+ info->m_first_tuple_id += step;
+ tupleId = info->m_first_tuple_id;
+ DBUG_PRINT("info", ("Next cached value %lu", (ulong) tupleId));
}
else
{
+ /*
+ If start value is greater than step it is ignored
+ */
+ Uint64 offset = (start > step) ? 1 : start;
+
+ /*
+ Pre-fetch a number of values depending on cacheSize
+ */
if (cacheSize == 0)
cacheSize = 1;
- DBUG_PRINT("info", ("reading %u values from database", (uint)cacheSize));
+
+ DBUG_PRINT("info", ("Reading %u values from database", (uint)cacheSize));
/*
* reserve next cacheSize entries in db. adds cacheSize to NEXTID
- * and returns first tupleId in the new range.
+ * and returns first tupleId in the new range. If tupleId's are
+ * incremented in steps then multiply the cacheSize with step size.
*/
- Uint64 opValue = cacheSize;
+ Uint64 opValue = cacheSize * step;
+
if (opTupleIdOnNdb(info, opValue, 0) == -1)
DBUG_RETURN(-1);
- tupleId = opValue;
+ DBUG_PRINT("info", ("Next value fetched from database %lu", (ulong) opValue));
+ DBUG_PRINT("info", ("Increasing %lu by offset %lu, increment is %lu", (ulong) (ulong) opValue, (ulong) offset, (ulong) step));
+ Uint64 current, next;
+ Uint64 div = ((Uint64) (opValue + step - offset)) / step;
+ next = div * step + offset;
+ current = (next < step) ? next : next - step;
+ tupleId = (opValue <= current) ? current : next;
+ DBUG_PRINT("info", ("Returning %lu", (ulong) tupleId));
+ info->m_first_tuple_id = tupleId;
}
DBUG_RETURN(0);
}
@@ -983,6 +1046,8 @@ Ndb::opTupleIdOnNdb(Ndb_local_table_info* info, Uint64 & opValue, Uint32 op)
Uint64 tValue;
NdbRecAttr* tRecAttrResult;
+ NdbError savedError;
+
CHECK_STATUS_MACRO_ZERO;
BaseString currentDb(getDatabaseName());
@@ -1049,9 +1114,9 @@ Ndb::opTupleIdOnNdb(Ndb_local_table_info* info, Uint64 & opValue, Uint32 op)
}
else
{
- DBUG_PRINT("info",
- ("Setting next auto increment value (db) to %lu",
- (ulong)opValue));
+ DBUG_PRINT("info",
+ ("Setting next auto increment value (db) to %lu",
+ (ulong)opValue));
info->m_first_tuple_id = info->m_last_tuple_id = opValue - 1;
}
break;
@@ -1077,7 +1142,12 @@ Ndb::opTupleIdOnNdb(Ndb_local_table_info* info, Uint64 & opValue, Uint32 op)
error_handler:
theError.code = tConnection->theError.code;
+
+ savedError = theError;
+
this->closeTransaction(tConnection);
+ theError = savedError;
+
error_return:
// Restore current name space
setDatabaseName(currentDb.c_str());
@@ -1113,28 +1183,37 @@ const char * Ndb::getCatalogName() const
}
-void Ndb::setCatalogName(const char * a_catalog_name)
+int Ndb::setCatalogName(const char * a_catalog_name)
{
if (a_catalog_name)
{
- theImpl->m_dbname.assign(a_catalog_name);
- theImpl->update_prefix();
+ if (!theImpl->m_dbname.assign(a_catalog_name) ||
+ theImpl->update_prefix())
+ {
+ theError.code = 4000;
+ return -1;
+ }
}
+ return 0;
}
-
const char * Ndb::getSchemaName() const
{
return theImpl->m_schemaname.c_str();
}
-void Ndb::setSchemaName(const char * a_schema_name)
+int Ndb::setSchemaName(const char * a_schema_name)
{
if (a_schema_name) {
- theImpl->m_schemaname.assign(a_schema_name);
- theImpl->update_prefix();
+ if (!theImpl->m_schemaname.assign(a_schema_name) ||
+ theImpl->update_prefix())
+ {
+ theError.code = 4000;
+ return -1;
+ }
}
+ return 0;
}
/*
@@ -1145,9 +1224,9 @@ const char * Ndb::getDatabaseName() const
return getCatalogName();
}
-void Ndb::setDatabaseName(const char * a_catalog_name)
+int Ndb::setDatabaseName(const char * a_catalog_name)
{
- setCatalogName(a_catalog_name);
+ return setCatalogName(a_catalog_name);
}
const char * Ndb::getDatabaseSchemaName() const
@@ -1155,9 +1234,9 @@ const char * Ndb::getDatabaseSchemaName() const
return getSchemaName();
}
-void Ndb::setDatabaseSchemaName(const char * a_schema_name)
+int Ndb::setDatabaseSchemaName(const char * a_schema_name)
{
- setSchemaName(a_schema_name);
+ return setSchemaName(a_schema_name);
}
bool Ndb::usingFullyQualifiedNames()
@@ -1271,6 +1350,11 @@ const BaseString
Ndb::getDatabaseFromInternalName(const char * internalName)
{
char * databaseName = new char[strlen(internalName) + 1];
+ if (databaseName == NULL)
+ {
+ errno = ENOMEM;
+ return BaseString(NULL);
+ }
strcpy(databaseName, internalName);
register char *ptr = databaseName;
@@ -1287,6 +1371,11 @@ const BaseString
Ndb::getSchemaFromInternalName(const char * internalName)
{
char * schemaName = new char[strlen(internalName)];
+ if (schemaName == NULL)
+ {
+ errno = ENOMEM;
+ return BaseString(NULL);
+ }
register const char *ptr1 = internalName;
/* Scan name for the second table_name_separator */
diff --git a/ndb/src/ndbapi/NdbBlob.cpp b/ndb/src/ndbapi/NdbBlob.cpp
index 7986f5d14ba..611d0396f96 100644
--- a/ndb/src/ndbapi/NdbBlob.cpp
+++ b/ndb/src/ndbapi/NdbBlob.cpp
@@ -388,8 +388,6 @@ NdbBlob::setPartKeyValue(NdbOperation* anOp, Uint32 part)
DBUG_ENTER("NdbBlob::setPartKeyValue");
DBUG_PRINT("info", ("dist=%u part=%u key=", getDistKey(part), part));
DBUG_DUMP("info", theKeyBuf.data, 4 * theTable->m_keyLenInWords);
- Uint32* data = (Uint32*)theKeyBuf.data;
- unsigned size = theTable->m_keyLenInWords;
// TODO use attr ids after compatibility with 4.1.7 not needed
if (anOp->equal("PK", theKeyBuf.data) == -1 ||
anOp->equal("DIST", getDistKey(part)) == -1 ||
@@ -409,6 +407,12 @@ NdbBlob::getHeadInlineValue(NdbOperation* anOp)
setErrorCode(anOp);
DBUG_RETURN(-1);
}
+ /*
+ * If we get no data from this op then the operation is aborted
+ * one way or other. Following hack in 5.0 makes sure we don't read
+ * garbage. The proper fix exists only in version >= 5.1.
+ */
+ theHead->length = 0;
DBUG_RETURN(0);
}
@@ -802,7 +806,9 @@ NdbBlob::writeDataPrivate(const char* buf, Uint32 bytes)
DBUG_RETURN(-1);
Uint32 n = thePartSize - off;
if (n > len) {
- memset(thePartBuf.data + off + len, theFillChar, n - len);
+ /* If we are adding data at the end, fill rest of part. */
+ if (pos + len >= theLength)
+ memset(thePartBuf.data + off + len, theFillChar, n - len);
n = len;
}
memcpy(thePartBuf.data + off, buf, n);
@@ -886,7 +892,12 @@ NdbBlob::readParts(char* buf, Uint32 part, Uint32 count)
while (n < count) {
NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable);
if (tOp == NULL ||
- tOp->committedRead() == -1 ||
+ /*
+ * This was committedRead() before. However lock on main
+ * table tuple does not fully protect blob parts since DBTUP
+ * commits each tuple separately.
+ */
+ tOp->readTuple() == -1 ||
setPartKeyValue(tOp, part + n) == -1 ||
tOp->getValue((Uint32)3, buf) == NULL) {
setErrorCode(tOp);
@@ -1157,7 +1168,7 @@ NdbBlob::atPrepare(NdbTransaction* aCon, NdbOperation* anOp, const NdbColumnImpl
if (isReadOp()) {
// upgrade lock mode
if (theNdbOp->theLockMode == NdbOperation::LM_CommittedRead)
- theNdbOp->theLockMode = NdbOperation::LM_Read;
+ theNdbOp->setReadLockMode(NdbOperation::LM_Read);
// add read of head+inline in this op
if (getHeadInlineValue(theNdbOp) == -1)
DBUG_RETURN(-1);
@@ -1178,7 +1189,7 @@ NdbBlob::atPrepare(NdbTransaction* aCon, NdbOperation* anOp, const NdbColumnImpl
if (isScanOp()) {
// upgrade lock mode
if (theNdbOp->theLockMode == NdbOperation::LM_CommittedRead)
- theNdbOp->theLockMode = NdbOperation::LM_Read;
+ theNdbOp->setReadLockMode(NdbOperation::LM_Read);
// add read of head+inline in this op
if (getHeadInlineValue(theNdbOp) == -1)
DBUG_RETURN(-1);
diff --git a/ndb/src/ndbapi/NdbDictionary.cpp b/ndb/src/ndbapi/NdbDictionary.cpp
index 747954f4532..32a2cd8ba0c 100644
--- a/ndb/src/ndbapi/NdbDictionary.cpp
+++ b/ndb/src/ndbapi/NdbDictionary.cpp
@@ -52,9 +52,9 @@ NdbDictionary::Column::operator=(const NdbDictionary::Column& column)
return *this;
}
-void
+int
NdbDictionary::Column::setName(const char * name){
- m_impl.m_name.assign(name);
+ return !m_impl.m_name.assign(name);
}
const char*
@@ -208,10 +208,10 @@ NdbDictionary::Column::setAutoIncrementInitialValue(Uint64 val){
m_impl.m_autoIncrementInitialValue = val;
}
-void
+int
NdbDictionary::Column::setDefaultValue(const char* defaultValue)
{
- m_impl.m_defaultValue.assign(defaultValue);
+ return !m_impl.m_defaultValue.assign(defaultValue);
}
const char*
@@ -273,9 +273,9 @@ NdbDictionary::Table::operator=(const NdbDictionary::Table& table)
return *this;
}
-void
+int
NdbDictionary::Table::setName(const char * name){
- m_impl.setName(name);
+ return m_impl.setName(name);
}
const char *
@@ -288,18 +288,30 @@ NdbDictionary::Table::getTableId() const {
return m_impl.m_tableId;
}
-void
+int
NdbDictionary::Table::addColumn(const Column & c){
NdbColumnImpl* col = new NdbColumnImpl;
+ if (col == NULL)
+ {
+ errno = ENOMEM;
+ return -1;
+ }
(* col) = NdbColumnImpl::getImpl(c);
- m_impl.m_columns.push_back(col);
+ if (m_impl.m_columns.push_back(col))
+ {
+ return -1;
+ }
if(c.getPrimaryKey()){
m_impl.m_noOfKeys++;
}
if (col->getBlobType()) {
m_impl.m_noOfBlobs++;
}
- m_impl.buildColumnHash();
+ if (m_impl.buildColumnHash())
+ {
+ return -1;
+ }
+ return 0;
}
const NdbDictionary::Column*
@@ -430,9 +442,21 @@ NdbDictionary::Table::getFrmLength() const {
return m_impl.m_frm.length();
}
+enum NdbDictionary::Table::SingleUserMode
+NdbDictionary::Table::getSingleUserMode() const
+{
+ return (enum SingleUserMode)m_impl.m_single_user_mode;
+}
+
void
+NdbDictionary::Table::setSingleUserMode(enum NdbDictionary::Table::SingleUserMode mode)
+{
+ m_impl.m_single_user_mode = (Uint8)mode;
+}
+
+int
NdbDictionary::Table::setFrm(const void* data, Uint32 len){
- m_impl.m_frm.assign(data, len);
+ return m_impl.m_frm.assign(data, len);
}
NdbDictionary::Object::Status
@@ -479,6 +503,7 @@ NdbDictionary::Table::createTableInDb(Ndb* pNdb, bool equalOk) const {
/*****************************************************************
* Index facade
*/
+
NdbDictionary::Index::Index(const char * name)
: m_impl(* new NdbIndexImpl(* this))
{
@@ -497,9 +522,9 @@ NdbDictionary::Index::~Index(){
}
}
-void
+int
NdbDictionary::Index::setName(const char * name){
- m_impl.setName(name);
+ return m_impl.setName(name);
}
const char *
@@ -507,9 +532,9 @@ NdbDictionary::Index::getName() const {
return m_impl.getName();
}
-void
+int
NdbDictionary::Index::setTable(const char * table){
- m_impl.setTable(table);
+ return m_impl.setTable(table);
}
const char *
@@ -517,6 +542,15 @@ NdbDictionary::Index::getTable() const {
return m_impl.getTable();
}
+const NdbDictionary::Table *
+NdbDictionary::Index::getIndexTable() const {
+ NdbTableImpl * t = m_impl.m_table;
+ if (t) {
+ return t->m_facade;
+ }
+ return 0;
+}
+
unsigned
NdbDictionary::Index::getNoOfColumns() const {
return m_impl.m_columns.size();
@@ -544,39 +578,56 @@ NdbDictionary::Index::getIndexColumn(int no) const {
return NULL;
}
-void
+int
NdbDictionary::Index::addColumn(const Column & c){
NdbColumnImpl* col = new NdbColumnImpl;
+ if (col == NULL)
+ {
+ errno = ENOMEM;
+ return -1;
+ }
(* col) = NdbColumnImpl::getImpl(c);
- m_impl.m_columns.push_back(col);
+ if (m_impl.m_columns.push_back(col))
+ {
+ return -1;
+ }
+ return 0;
}
-void
+int
NdbDictionary::Index::addColumnName(const char * name){
const Column c(name);
- addColumn(c);
+ return addColumn(c);
}
-void
+int
NdbDictionary::Index::addIndexColumn(const char * name){
const Column c(name);
- addColumn(c);
+ return addColumn(c);
}
-void
+int
NdbDictionary::Index::addColumnNames(unsigned noOfNames, const char ** names){
for(unsigned i = 0; i < noOfNames; i++) {
const Column c(names[i]);
- addColumn(c);
+ if (addColumn(c))
+ {
+ return -1;
+ }
}
+ return 0;
}
-void
+int
NdbDictionary::Index::addIndexColumns(int noOfNames, const char ** names){
for(int i = 0; i < noOfNames; i++) {
const Column c(names[i]);
- addColumn(c);
+ if (addColumn(c))
+ {
+ return -1;
+ }
}
+ return 0;
}
void
diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp
index b3258d4d143..bf0c02714db 100644
--- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp
+++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp
@@ -318,6 +318,7 @@ NdbTableImpl::init(){
m_replicaCount= 0;
m_min_rows = 0;
m_max_rows = 0;
+ m_single_user_mode = 0;
}
bool
@@ -378,32 +379,53 @@ NdbTableImpl::equal(const NdbTableImpl& obj) const
DBUG_RETURN(false);
}
+ if(m_single_user_mode != obj.m_single_user_mode)
+ {
+ DBUG_PRINT("info",("m_single_user_mode %d != %d",
+ (int32)m_single_user_mode,
+ (int32)obj.m_single_user_mode));
+ DBUG_RETURN(false);
+ }
+
DBUG_RETURN(true);
}
-void
+int
NdbTableImpl::assign(const NdbTableImpl& org)
{
m_tableId = org.m_tableId;
- m_internalName.assign(org.m_internalName);
- m_externalName.assign(org.m_externalName);
- m_newExternalName.assign(org.m_newExternalName);
- m_frm.assign(org.m_frm.get_data(), org.m_frm.length());
+ if (!m_internalName.assign(org.m_internalName) ||
+ !m_externalName.assign(org.m_externalName) ||
+ !m_newExternalName.assign(org.m_newExternalName) ||
+ m_frm.assign(org.m_frm.get_data(), org.m_frm.length()))
+ {
+ return -1;
+ }
m_fragmentType = org.m_fragmentType;
m_fragmentCount = org.m_fragmentCount;
for(unsigned i = 0; i<org.m_columns.size(); i++){
NdbColumnImpl * col = new NdbColumnImpl();
+ if (col == NULL)
+ {
+ errno = ENOMEM;
+ return -1;
+ }
const NdbColumnImpl * iorg = org.m_columns[i];
(* col) = (* iorg);
- m_columns.push_back(col);
+ if (m_columns.push_back(col))
+ {
+ delete col;
+ return -1;
+ }
}
m_logging = org.m_logging;
m_kvalue = org.m_kvalue;
m_minLoadFactor = org.m_minLoadFactor;
m_maxLoadFactor = org.m_maxLoadFactor;
-
+ m_single_user_mode = org.m_single_user_mode;
+
if (m_index != 0)
delete m_index;
m_index = org.m_index;
@@ -418,11 +440,13 @@ NdbTableImpl::assign(const NdbTableImpl& org)
m_max_rows = org.m_max_rows;
m_min_rows = org.m_min_rows;
+
+ return 0;
}
-void NdbTableImpl::setName(const char * name)
+int NdbTableImpl::setName(const char * name)
{
- m_newExternalName.assign(name);
+ return !m_newExternalName.assign(name);
}
const char *
@@ -435,7 +459,7 @@ NdbTableImpl::getName() const
}
-void
+int
NdbTableImpl::buildColumnHash(){
const Uint32 size = m_columns.size();
@@ -448,19 +472,29 @@ NdbTableImpl::buildColumnHash(){
}
Vector<Uint32> hashValues;
- Vector<Vector<Uint32> > chains; chains.fill(size, hashValues);
+ Vector<Vector<Uint32> > chains;
+ if (chains.fill(size, hashValues))
+ {
+ return -1;
+ }
for(i = 0; i< (int) size; i++){
Uint32 hv = Hash(m_columns[i]->getName()) & 0xFFFE;
Uint32 bucket = hv & m_columnHashMask;
bucket = (bucket < size ? bucket : bucket - size);
assert(bucket < size);
- hashValues.push_back(hv);
- chains[bucket].push_back(i);
+ if (hashValues.push_back(hv) ||
+ chains[bucket].push_back(i))
+ {
+ return -1;
+ }
}
m_columnHash.clear();
Uint32 tmp = 1;
- m_columnHash.fill((unsigned)size-1, tmp); // Default no chaining
+ if (m_columnHash.fill((unsigned)size-1, tmp)) // Default no chaining
+ {
+ return -1;
+ }
Uint32 pos = 0; // In overflow vector
for(i = 0; i< (int) size; i++){
@@ -480,12 +514,18 @@ NdbTableImpl::buildColumnHash(){
for(size_t j = 0; j<sz; j++, pos++){
Uint32 col = chains[i][j];
Uint32 hv = hashValues[col];
- m_columnHash.push_back((col << 16) | hv);
+ if (m_columnHash.push_back((col << 16) | hv))
+ {
+ return -1;
+ }
}
}
}
- m_columnHash.push_back(0); // Overflow when looping in end of array
+ if (m_columnHash.push_back(0)) // Overflow when looping in end of array
+ {
+ return -1;
+ }
#if 0
for(size_t i = 0; i<m_columnHash.size(); i++){
@@ -500,6 +540,7 @@ NdbTableImpl::buildColumnHash(){
i, col > 0 ? m_columns[col]->getName() : "" , m_columnHash[i]);
}
#endif
+ return 0;
}
Uint32
@@ -553,9 +594,9 @@ NdbIndexImpl::~NdbIndexImpl(){
delete m_columns[i];
}
-void NdbIndexImpl::setName(const char * name)
+int NdbIndexImpl::setName(const char * name)
{
- m_externalName.assign(name);
+ return !m_externalName.assign(name);
}
const char *
@@ -564,10 +605,10 @@ NdbIndexImpl::getName() const
return m_externalName.c_str();
}
-void
+int
NdbIndexImpl::setTable(const char * table)
{
- m_tableName.assign(table);
+ return !m_tableName.assign(table);
}
const char *
@@ -647,14 +688,18 @@ Ndb_local_table_info *
NdbDictionaryImpl::fetchGlobalTableImpl(const BaseString& internalTableName)
{
NdbTableImpl *impl;
+ int error= 0;
m_globalHash->lock();
- impl = m_globalHash->get(internalTableName.c_str());
+ impl = m_globalHash->get(internalTableName.c_str(), &error);
m_globalHash->unlock();
if (impl == 0){
- impl = m_receiver.getTable(internalTableName,
- m_ndb.usingFullyQualifiedNames());
+ if (error == 0)
+ impl = m_receiver.getTable(internalTableName,
+ m_ndb.usingFullyQualifiedNames());
+ else
+ m_error.code = 4000;
m_globalHash->lock();
m_globalHash->put(internalTableName.c_str(), impl);
m_globalHash->unlock();
@@ -988,12 +1033,20 @@ NdbDictInterface::getTable(const BaseString& name, bool fullyQualifiedNames)
// Copy name to m_buffer to get a word sized buffer
m_buffer.clear();
- m_buffer.grow(namelen_words*4+4);
- m_buffer.append(name.c_str(), namelen);
+ if (m_buffer.grow(namelen_words*4+4) ||
+ m_buffer.append(name.c_str(), namelen))
+ {
+ m_error.code= 4000;
+ return NULL;
+ }
#ifndef IGNORE_VALGRIND_WARNINGS
Uint32 pad = 0;
- m_buffer.append(&pad, 4);
+ if (m_buffer.append(&pad, 4))
+ {
+ m_error.code= 4000;
+ return NULL;
+ }
#endif
LinearSectionPtr ptr[1];
@@ -1024,7 +1077,14 @@ NdbDictInterface::getTable(class NdbApiSignal * signal,
(Uint32*)m_buffer.get_data(),
m_buffer.length() / 4, fullyQualifiedNames);
if (rt != 0)
- rt->buildColumnHash();
+ {
+ if (rt->buildColumnHash())
+ {
+ m_error.code = 4000;
+ delete rt;
+ return NULL;
+ }
+ }
return rt;
}
@@ -1033,18 +1093,25 @@ NdbDictInterface::execGET_TABINFO_CONF(NdbApiSignal * signal,
LinearSectionPtr ptr[3])
{
const GetTabInfoConf* conf = CAST_CONSTPTR(GetTabInfoConf, signal->getDataPtr());
+ const Uint32 i = GetTabInfoConf::DICT_TAB_INFO;
if(signal->isFirstFragment()){
m_fragmentId = signal->getFragmentId();
- m_buffer.grow(4 * conf->totalLen);
+ if (m_buffer.grow(4 * conf->totalLen))
+ {
+ m_error.code= 4000;
+ goto end;
+ }
} else {
if(m_fragmentId != signal->getFragmentId()){
abort();
}
}
- const Uint32 i = GetTabInfoConf::DICT_TAB_INFO;
- m_buffer.append(ptr[i].p, 4 * ptr[i].sz);
-
+ if (m_buffer.append(ptr[i].p, 4 * ptr[i].sz))
+ {
+ m_error.code= 4000;
+ }
+end:
if(!signal->isLastFragment()){
return;
}
@@ -1153,7 +1220,8 @@ indexTypeMapping[] = {
int
NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
const Uint32 * data, Uint32 len,
- bool fullyQualifiedNames)
+ bool fullyQualifiedNames,
+ bool hostByteOrder)
{
DBUG_ENTER("NdbDictInterface::parseTableInfo");
@@ -1175,10 +1243,12 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
impl->m_tableId = tableDesc.TableId;
impl->m_version = tableDesc.TableVersion;
impl->m_status = NdbDictionary::Object::Retrieved;
- impl->m_internalName.assign(internalName);
- impl->m_externalName.assign(externalName);
-
- impl->m_frm.assign(tableDesc.FrmData, tableDesc.FrmLen);
+ if (!impl->m_internalName.assign(internalName) ||
+ !impl->m_externalName.assign(externalName) ||
+ impl->m_frm.assign(tableDesc.FrmData, tableDesc.FrmLen))
+ {
+ DBUG_RETURN(4000);
+ }
impl->m_fragmentType = (NdbDictionary::Object::FragmentType)
getApiConstant(tableDesc.FragmentType,
@@ -1195,6 +1265,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
impl->m_kvalue = tableDesc.TableKValue;
impl->m_minLoadFactor = tableDesc.MinLoadFactor;
impl->m_maxLoadFactor = tableDesc.MaxLoadFactor;
+ impl->m_single_user_mode = tableDesc.SingleUserMode;
impl->m_indexType = (NdbDictionary::Index::Type)
getApiConstant(tableDesc.TableType,
@@ -1205,7 +1276,10 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
} else {
const char * externalPrimary =
Ndb::externalizeTableName(tableDesc.PrimaryTable, fullyQualifiedNames);
- impl->m_primaryTable.assign(externalPrimary);
+ if (!impl->m_primaryTable.assign(externalPrimary))
+ {
+ DBUG_RETURN(4000);
+ }
}
Uint32 keyInfoPos = 0;
@@ -1232,6 +1306,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
// check type and compute attribute size and array size
if (! attrDesc.translateExtType()) {
+ delete col;
delete impl;
DBUG_RETURN(703);
}
@@ -1243,12 +1318,14 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
unsigned cs_number = (attrDesc.AttributeExtPrecision >> 16);
// charset is defined exactly for char types
if (col->getCharType() != (cs_number != 0)) {
+ delete col;
delete impl;
DBUG_RETURN(703);
}
if (col->getCharType()) {
col->m_cs = get_charset(cs_number, MYF(0));
if (col->m_cs == NULL) {
+ delete col;
delete impl;
DBUG_RETURN(743);
}
@@ -1266,7 +1343,12 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
col->m_nullable = attrDesc.AttributeNullableFlag;
col->m_autoIncrement = (attrDesc.AttributeAutoIncrement ? true : false);
col->m_autoIncrementInitialValue = ~0;
- col->m_defaultValue.assign(attrDesc.AttributeDefaultValue);
+ if (!col->m_defaultValue.assign(attrDesc.AttributeDefaultValue))
+ {
+ delete col;
+ delete impl;
+ DBUG_RETURN(4000);
+ }
if(attrDesc.AttributeKeyFlag){
col->m_keyInfoPos = keyInfoPos + 1;
@@ -1298,15 +1380,25 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
if(tableDesc.FragmentDataLen > 0)
{
- Uint32 replicaCount = tableDesc.FragmentData[0];
- Uint32 fragCount = tableDesc.FragmentData[1];
+ Uint16 replicaCount = tableDesc.FragmentData[0];
+ Uint16 fragCount = tableDesc.FragmentData[1];
+
+ if(hostByteOrder == false)
+ {
+ replicaCount = ((replicaCount & 0xFF00) >> 8) |((replicaCount & 0x00FF) << 8);
+ fragCount = ((fragCount & 0xFF00) >> 8) |((fragCount & 0x00FF) << 8);
+ }
impl->m_replicaCount = replicaCount;
impl->m_fragmentCount = fragCount;
- for(i = 0; i<(fragCount*replicaCount); i++)
+ for(i = 0; i<(Uint32) (fragCount*replicaCount); i++)
{
- impl->m_fragments.push_back(tableDesc.FragmentData[i+2]);
+ if (impl->m_fragments.push_back(tableDesc.FragmentData[i+2]))
+ {
+ delete impl;
+ DBUG_RETURN(4000);
+ }
}
Uint32 topBit = (1 << 31);
@@ -1470,7 +1562,11 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
}
if (!impl.m_newExternalName.empty()) {
- impl.m_externalName.assign(impl.m_newExternalName);
+ if (!impl.m_externalName.assign(impl.m_newExternalName))
+ {
+ m_error.code= 4000;
+ DBUG_RETURN(-1);
+ }
AlterTableReq::setNameFlag(impl.m_changeMask, true);
}
@@ -1479,7 +1575,11 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
const BaseString internalName(
ndb.internalize_table_name(impl.m_externalName.c_str()));
- impl.m_internalName.assign(internalName);
+ if (!impl.m_internalName.assign(internalName))
+ {
+ m_error.code= 4000;
+ DBUG_RETURN(-1);
+ }
UtilBufferWriter w(m_buffer);
DictTabInfo::Table tmpTab; tmpTab.init();
BaseString::snprintf(tmpTab.TableName,
@@ -1523,11 +1623,8 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
tmpTab.MaxRowsLow = (Uint32)(impl.m_max_rows & 0xFFFFFFFF);
tmpTab.MinRowsHigh = (Uint32)(impl.m_min_rows >> 32);
tmpTab.MinRowsLow = (Uint32)(impl.m_min_rows & 0xFFFFFFFF);
-
- Uint64 maxRows =
- (((Uint64)tmpTab.MaxRowsHigh) << 32) + tmpTab.MaxRowsLow;
- Uint64 minRows =
- (((Uint64)tmpTab.MinRowsHigh) << 32) + tmpTab.MinRowsLow;
+
+ tmpTab.SingleUserMode = impl.m_single_user_mode;
tmpTab.FragmentType = getKernelConstant(impl.m_fragmentType,
fragmentTypeMapping,
@@ -1959,13 +2056,19 @@ NdbDictionaryImpl::getIndexImpl(const char * externalName,
NdbIndexImpl* idx;
if(NdbDictInterface::create_index_obj_from_table(&idx, tab, prim) == 0){
idx->m_table = tab;
- idx->m_externalName.assign(externalName);
- idx->m_internalName.assign(internalName);
+ if (!idx->m_externalName.assign(externalName) ||
+ !idx->m_internalName.assign(internalName))
+ {
+ delete idx;
+ m_error.code = 4000;
+ return 0;
+ }
// TODO Assign idx to tab->m_index
// Don't do it right now since assign can't asign a table with index
// tab->m_index = idx;
return idx;
}
+ m_error.code = 4000;
return 0;
}
@@ -1974,11 +2077,21 @@ NdbDictInterface::create_index_obj_from_table(NdbIndexImpl** dst,
NdbTableImpl* tab,
const NdbTableImpl* prim){
NdbIndexImpl *idx = new NdbIndexImpl();
+ if (idx == NULL)
+ {
+ errno = ENOMEM;
+ return -1;
+ }
idx->m_version = tab->m_version;
idx->m_status = tab->m_status;
idx->m_indexId = tab->m_tableId;
- idx->m_externalName.assign(tab->getName());
- idx->m_tableName.assign(prim->m_externalName);
+ if (!idx->m_externalName.assign(tab->getName()) ||
+ !idx->m_tableName.assign(prim->m_externalName))
+ {
+ delete idx;
+ errno = ENOMEM;
+ return -1;
+ }
NdbDictionary::Index::Type type = idx->m_type = tab->m_indexType;
idx->m_logging = tab->m_logging;
// skip last attribute (NDB$PK or NDB$TNODE)
@@ -1991,9 +2104,20 @@ NdbDictInterface::create_index_obj_from_table(NdbIndexImpl** dst,
NdbColumnImpl* org = tab->m_columns[i];
NdbColumnImpl* col = new NdbColumnImpl;
+ if (col == NULL)
+ {
+ errno = ENOMEM;
+ delete idx;
+ return -1;
+ }
// Copy column definition
*col = * org;
- idx->m_columns.push_back(col);
+ if (idx->m_columns.push_back(col))
+ {
+ delete col;
+ delete idx;
+ return -1;
+ }
/**
* reverse map
@@ -2059,7 +2183,11 @@ NdbDictInterface::createIndex(Ndb & ndb,
}
const BaseString internalName(
ndb.internalize_index_name(&table, impl.getName()));
- impl.m_internalName.assign(internalName);
+ if (!impl.m_internalName.assign(internalName))
+ {
+ m_error.code = 4000;
+ return -1;
+ }
w.add(DictTabInfo::TableName, internalName.c_str());
w.add(DictTabInfo::TableLoggedFlag, impl.m_logging);
@@ -2345,34 +2473,72 @@ NdbDictInterface::listObjects(NdbDictionary::Dictionary::List& list,
BaseString databaseName;
BaseString schemaName;
BaseString objectName;
+ if (!databaseName || !schemaName || !objectName)
+ {
+ m_error.code= 4000;
+ return -1;
+ }
if ((element.type == NdbDictionary::Object::UniqueHashIndex) ||
(element.type == NdbDictionary::Object::OrderedIndex)) {
char * indexName = new char[n << 2];
+ if (indexName == NULL)
+ {
+ m_error.code= 4000;
+ return -1;
+ }
memcpy(indexName, &data[pos], n << 2);
- databaseName = Ndb::getDatabaseFromInternalName(indexName);
- schemaName = Ndb::getSchemaFromInternalName(indexName);
+ if (!(databaseName = Ndb::getDatabaseFromInternalName(indexName)) ||
+ !(schemaName = Ndb::getSchemaFromInternalName(indexName)))
+ {
+ delete [] indexName;
+ m_error.code= 4000;
+ return -1;
+ }
objectName = BaseString(Ndb::externalizeIndexName(indexName, fullyQualifiedNames));
delete [] indexName;
} else if ((element.type == NdbDictionary::Object::SystemTable) ||
(element.type == NdbDictionary::Object::UserTable)) {
char * tableName = new char[n << 2];
+ if (tableName == NULL)
+ {
+ m_error.code= 4000;
+ return -1;
+ }
memcpy(tableName, &data[pos], n << 2);
- databaseName = Ndb::getDatabaseFromInternalName(tableName);
- schemaName = Ndb::getSchemaFromInternalName(tableName);
+ if (!(databaseName = Ndb::getDatabaseFromInternalName(tableName)) ||
+ !(schemaName = Ndb::getSchemaFromInternalName(tableName)))
+ {
+ delete [] tableName;
+ m_error.code= 4000;
+ return -1;
+ }
objectName = BaseString(Ndb::externalizeTableName(tableName, fullyQualifiedNames));
delete [] tableName;
}
else {
char * otherName = new char[n << 2];
+ if (otherName == NULL)
+ {
+ m_error.code= 4000;
+ return -1;
+ }
memcpy(otherName, &data[pos], n << 2);
- objectName = BaseString(otherName);
+ if (!(objectName = BaseString(otherName)))
+ {
+ m_error.code= 4000;
+ return -1;
+ }
delete [] otherName;
}
- element.database = new char[databaseName.length() + 1];
+ if (!(element.database = new char[databaseName.length() + 1]) ||
+ !(element.schema = new char[schemaName.length() + 1]) ||
+ !(element.name = new char[objectName.length() + 1]))
+ {
+ m_error.code= 4000;
+ return -1;
+ }
strcpy(element.database, databaseName.c_str());
- element.schema = new char[schemaName.length() + 1];
strcpy(element.schema, schemaName.c_str());
- element.name = new char[objectName.length() + 1];
strcpy(element.name, objectName.c_str());
pos += n;
count++;
@@ -2419,7 +2585,10 @@ NdbDictInterface::execLIST_TABLES_CONF(NdbApiSignal* signal,
{
const unsigned off = ListTablesConf::HeaderLength;
const unsigned len = (signal->getLength() - off);
- m_buffer.append(signal->getDataPtr() + off, len << 2);
+ if (m_buffer.append(signal->getDataPtr() + off, len << 2))
+ {
+ m_error.code= 4000;
+ }
if (signal->getLength() < ListTablesConf::SignalLength) {
// last signal has less than full length
m_waiter.signal(NO_WAIT);
diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/ndb/src/ndbapi/NdbDictionaryImpl.hpp
index ed86f66ee11..a8757b69472 100644
--- a/ndb/src/ndbapi/NdbDictionaryImpl.hpp
+++ b/ndb/src/ndbapi/NdbDictionaryImpl.hpp
@@ -103,7 +103,7 @@ public:
~NdbTableImpl();
void init();
- void setName(const char * name);
+ int setName(const char * name);
const char * getName() const;
Uint32 m_changeMask;
@@ -120,7 +120,7 @@ public:
Uint32 m_columnHashMask;
Vector<Uint32> m_columnHash;
Vector<NdbColumnImpl *> m_columns;
- void buildColumnHash();
+ int buildColumnHash();
/**
* Fragment info
@@ -138,6 +138,7 @@ public:
int m_maxLoadFactor;
Uint16 m_keyLenInWords;
Uint16 m_fragmentCount;
+ Uint8 m_single_user_mode;
NdbDictionaryImpl * m_dictionary;
NdbIndexImpl * m_index;
@@ -165,7 +166,7 @@ public:
* Equality/assign
*/
bool equal(const NdbTableImpl&) const;
- void assign(const NdbTableImpl&);
+ int assign(const NdbTableImpl&);
static NdbTableImpl & getImpl(NdbDictionary::Table & t);
static NdbTableImpl & getImpl(const NdbDictionary::Table & t);
@@ -184,9 +185,9 @@ public:
~NdbIndexImpl();
void init();
- void setName(const char * name);
+ int setName(const char * name);
const char * getName() const;
- void setTable(const char * table);
+ int setTable(const char * table);
const char * getTable() const;
const NdbTableImpl * getIndexTable() const;
@@ -261,7 +262,8 @@ public:
static int parseTableInfo(NdbTableImpl ** dst,
const Uint32 * data, Uint32 len,
- bool fullyQualifiedNames);
+ bool fullyQualifiedNames,
+ bool hostByteOrder = true);
static int create_index_obj_from_table(NdbIndexImpl ** dst,
NdbTableImpl* index_table,
diff --git a/ndb/src/ndbapi/NdbImpl.hpp b/ndb/src/ndbapi/NdbImpl.hpp
index 90b81dabff6..dc0a057619f 100644
--- a/ndb/src/ndbapi/NdbImpl.hpp
+++ b/ndb/src/ndbapi/NdbImpl.hpp
@@ -37,7 +37,7 @@ struct Ndb_free_list_t
Ndb_free_list_t();
~Ndb_free_list_t();
- void fill(Ndb*, Uint32 cnt);
+ int fill(Ndb*, Uint32 cnt);
T* seize(Ndb*);
void release(T*);
void clear();
@@ -79,10 +79,23 @@ public:
BaseString m_prefix; // Buffer for preformatted internal name <db>/<schema>/
- void update_prefix()
+ int update_prefix()
{
- m_prefix.assfmt("%s%c%s%c", m_dbname.c_str(), table_name_separator,
- m_schemaname.c_str(), table_name_separator);
+ if (!m_prefix.assfmt("%s%c%s%c", m_dbname.c_str(), table_name_separator,
+ m_schemaname.c_str(), table_name_separator))
+ {
+ return -1;
+ }
+ return 0;
+ }
+
+/*
+ We need this friend accessor function to work around a HP compiler problem,
+ where template class friends are not working.
+*/
+ static inline void setNdbError(Ndb &ndb,int code){
+ ndb.theError.code = code;
+ return;
}
/**
@@ -194,7 +207,7 @@ Ndb_free_list_t<T>::~Ndb_free_list_t()
template<class T>
inline
-void
+int
Ndb_free_list_t<T>::fill(Ndb* ndb, Uint32 cnt)
{
if (m_free_list == 0)
@@ -202,18 +215,28 @@ Ndb_free_list_t<T>::fill(Ndb* ndb, Uint32 cnt)
m_free_cnt++;
m_alloc_cnt++;
m_free_list = new T(ndb);
+ if (m_free_list == 0)
+ {
+ NdbImpl::setNdbError(*ndb, 4000);
+ assert(false);
+ return -1;
+ }
}
while(m_alloc_cnt < cnt)
{
T* obj= new T(ndb);
if(obj == 0)
- return;
-
+ {
+ NdbImpl::setNdbError(*ndb, 4000);
+ assert(false);
+ return -1;
+ }
obj->next(m_free_list);
m_free_cnt++;
m_alloc_cnt++;
m_free_list = obj;
}
+ return 0;
}
template<class T>
@@ -234,7 +257,11 @@ Ndb_free_list_t<T>::seize(Ndb* ndb)
{
m_alloc_cnt++;
}
-
+ else
+ {
+ NdbImpl::setNdbError(*ndb, 4000);
+ assert(false);
+ }
return tmp;
}
diff --git a/ndb/src/ndbapi/NdbIndexOperation.cpp b/ndb/src/ndbapi/NdbIndexOperation.cpp
index 54386e93539..c2231149fbd 100644
--- a/ndb/src/ndbapi/NdbIndexOperation.cpp
+++ b/ndb/src/ndbapi/NdbIndexOperation.cpp
@@ -244,8 +244,6 @@ NdbIndexOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransactionId)
// Simple state is set if start and commit is set and it is
// a read request. Otherwise it is set to zero.
//-------------------------------------------------------------
- Uint8 tReadInd = (theOperationType == ReadRequest);
- Uint8 tSimpleState = tReadInd & tSimpleAlt;
//theNdbCon->theSimpleState = tSimpleState;
tcKeyReq->transId1 = tTransId1;
diff --git a/ndb/src/ndbapi/NdbOperation.cpp b/ndb/src/ndbapi/NdbOperation.cpp
index 3ab1b56a717..51b6a3f6dab 100644
--- a/ndb/src/ndbapi/NdbOperation.cpp
+++ b/ndb/src/ndbapi/NdbOperation.cpp
@@ -176,7 +176,11 @@ NdbOperation::init(const NdbTableImpl* tab, NdbTransaction* myConnection){
tcKeyReq->scanInfo = 0;
theKEYINFOptr = &tcKeyReq->keyInfo[0];
theATTRINFOptr = &tcKeyReq->attrInfo[0];
- theReceiver.init(NdbReceiver::NDB_OPERATION, this);
+ if (theReceiver.init(NdbReceiver::NDB_OPERATION, this))
+ {
+ // theReceiver sets the error code of its owner
+ return -1;
+ }
return 0;
}
diff --git a/ndb/src/ndbapi/NdbOperationDefine.cpp b/ndb/src/ndbapi/NdbOperationDefine.cpp
index 8e8d01a4252..cd357bb44c1 100644
--- a/ndb/src/ndbapi/NdbOperationDefine.cpp
+++ b/ndb/src/ndbapi/NdbOperationDefine.cpp
@@ -322,6 +322,36 @@ NdbOperation::interpretedDeleteTuple()
}//if
}//NdbOperation::interpretedDeleteTuple()
+void
+NdbOperation::setReadLockMode(LockMode lockMode)
+{
+ /* We only support changing lock mode for read operations at this time. */
+ assert(theOperationType == ReadRequest || theOperationType == ReadExclusive);
+ switch (lockMode)
+ {
+ case LM_CommittedRead:
+ theOperationType= ReadRequest;
+ theSimpleIndicator= 1;
+ theDirtyIndicator= 1;
+ break;
+ case LM_Read:
+ theNdbCon->theSimpleState= 0;
+ theOperationType= ReadRequest;
+ theSimpleIndicator= 0;
+ theDirtyIndicator= 0;
+ break;
+ case LM_Exclusive:
+ theNdbCon->theSimpleState= 0;
+ theOperationType= ReadExclusive;
+ theSimpleIndicator= 0;
+ theDirtyIndicator= 0;
+ break;
+ default:
+ /* Not supported / invalid. */
+ assert(false);
+ }
+ theLockMode= lockMode;
+}
/******************************************************************************
@@ -362,9 +392,8 @@ NdbOperation::getValue_impl(const NdbColumnImpl* tAttrInfo, char* aValue)
return NULL;
}//if
}//if
- Uint32 ah;
- AttributeHeader::init(&ah, tAttrInfo->m_attrId, 0);
- if (insertATTRINFO(ah) != -1) {
+ AttributeHeader ah(tAttrInfo->m_attrId, 0);
+ if (insertATTRINFO(ah.m_value) != -1) {
// Insert Attribute Id into ATTRINFO part.
/************************************************************************
@@ -495,12 +524,11 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo,
tAttrId = tAttrInfo->m_attrId;
const char *aValue = aValuePassed;
- Uint32 ahValue;
if (aValue == NULL) {
if (tAttrInfo->m_nullable) {
- AttributeHeader& ah = AttributeHeader::init(&ahValue, tAttrId, 0);
+ AttributeHeader ah(tAttrId, 0);
ah.setNULL();
- insertATTRINFO(ahValue);
+ insertATTRINFO(ah.m_value);
// Insert Attribute Id with the value
// NULL into ATTRINFO part.
DBUG_RETURN(0);
@@ -533,9 +561,8 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo,
}//if
const Uint32 totalSizeInWords = (sizeInBytes + 3)/4; // Including bits in last word
const Uint32 sizeInWords = sizeInBytes / 4; // Excluding bits in last word
- AttributeHeader& ah = AttributeHeader::init(&ahValue, tAttrId,
- totalSizeInWords);
- insertATTRINFO( ahValue );
+ AttributeHeader ah(tAttrId, totalSizeInWords);
+ insertATTRINFO( ah.m_value );
/***********************************************************************
* Check if the pointer of the value passed is aligned on a 4 byte boundary.
@@ -543,15 +570,34 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo,
* If it is not aligned then we start by copying the value to tempData and
* use this as aValue instead.
*************************************************************************/
- const int attributeSize = sizeInBytes;
- const int slack = sizeInBytes & 3;
+ int attributeSize = sizeInBytes;
+ int slack = (sizeInBytes & 3) ? 4 - (sizeInBytes & 3) : 0;
+ switch(tAttrInfo->m_type){
+ case NdbDictionary::Column::Varchar:
+ case NdbDictionary::Column::Varbinary:
+ attributeSize = 1 + *(Uint8*)aValue;
+ slack = 4 * totalSizeInWords - attributeSize;
+ break;
+ case NdbDictionary::Column::Longvarchar:
+ case NdbDictionary::Column::Longvarbinary:
+ {
+ const Uint8* ptr = (const Uint8*)aValue;
+ attributeSize = 2 + ptr[0] + 256 * ptr[1];
+ slack = 4 * totalSizeInWords - attributeSize;
+ break;
+ }
+ default:
+ break;
+ }
- if (((UintPtr)aValue & 3) != 0 || (slack != 0)){
- memcpy(&tempData[0], aValue, attributeSize);
- aValue = (char*)&tempData[0];
- if(slack != 0) {
- char * tmp = (char*)&tempData[0];
- memset(&tmp[attributeSize], 0, (4 - slack));
+ if (((UintPtr)aValue & 3) != 0 || (slack != 0))
+ {
+ char * tmp = (char*)tempData;
+ memcpy(tmp, aValue, attributeSize);
+ aValue = tmp;
+ if(slack != 0)
+ {
+ bzero(tmp + attributeSize, slack);
}//if
}//if
diff --git a/ndb/src/ndbapi/NdbOperationExec.cpp b/ndb/src/ndbapi/NdbOperationExec.cpp
index d8e10c04fe8..9a50b000a1a 100644
--- a/ndb/src/ndbapi/NdbOperationExec.cpp
+++ b/ndb/src/ndbapi/NdbOperationExec.cpp
@@ -24,6 +24,7 @@
#include "Interpreter.hpp"
#include <AttributeHeader.hpp>
#include <signaldata/TcKeyReq.hpp>
+#include <signaldata/TcKeyRef.hpp>
#include <signaldata/KeyInfo.hpp>
#include <signaldata/AttrInfo.hpp>
#include <signaldata/ScanTab.hpp>
@@ -176,8 +177,6 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId)
// Simple state is set if start and commit is set and it is
// a read request. Otherwise it is set to zero.
//-------------------------------------------------------------
- Uint8 tReadInd = (theOperationType == ReadRequest);
- Uint8 tSimpleState = tReadInd & tSimpleIndicator;
tcKeyReq->transId1 = tTransId1;
tcKeyReq->transId2 = tTransId2;
@@ -552,6 +551,12 @@ NdbOperation::receiveTCKEYREF( NdbApiSignal* aSignal)
theNdbCon->theReturnStatus = NdbTransaction::ReturnFailure;
}
theError.code = aSignal->readData(4);
+ if (aSignal->getLength() == TcKeyRef::SignalLength)
+ {
+ // Signal may contain additional error data
+ theError.details = (char *) aSignal->readData(5);
+ }
+
theNdbCon->setOperationErrorCodeAbort(aSignal->readData(4), ao);
if(theOperationType != ReadRequest || !theSimpleIndicator) // not simple read
diff --git a/ndb/src/ndbapi/NdbOperationSearch.cpp b/ndb/src/ndbapi/NdbOperationSearch.cpp
index 8d678117e04..7f8659916dc 100644
--- a/ndb/src/ndbapi/NdbOperationSearch.cpp
+++ b/ndb/src/ndbapi/NdbOperationSearch.cpp
@@ -66,7 +66,6 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
register Uint32 tAttrId;
- Uint32 tData;
Uint32 tKeyInfoPosition;
const char* aValue = aValuePassed;
Uint64 tempData[512];
@@ -130,6 +129,7 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
OperationType tOpType = theOperationType;
Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize;
+ const Uint32 totalSizeInWords = (sizeInBytes + 3) / 4;
Uint32 real_len;
if (! tAttrInfo->get_var_length(aValue, real_len)) {
@@ -151,20 +151,37 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
* aValue. If it is not aligned then we start by copying the value to
* tempData and use this as aValue instead.
***********************************************************************/
- const int attributeSize = sizeInBytes;
- const int slack = sizeInBytes & 3;
+ int attributeSize = sizeInBytes;
+ int slack = (sizeInBytes & 3) ? 4 - (sizeInBytes & 3) : 0;
const int align = UintPtr(aValue) & 7;
+ switch(tAttrInfo->m_type){
+ case NdbDictionary::Column::Varchar:
+ case NdbDictionary::Column::Varbinary:
+ attributeSize = 1 + *(Uint8*)aValue;
+ slack = 4 * totalSizeInWords - attributeSize;
+ break;
+ case NdbDictionary::Column::Longvarchar:
+ case NdbDictionary::Column::Longvarbinary:
+ {
+ const Uint8* ptr = (const Uint8*)aValue;
+ attributeSize = 2 + ptr[0] + 256 * ptr[1];
+ slack = 4*totalSizeInWords - attributeSize;
+ break;
+ }
+ default:
+ break;
+ }
+
if (((align & 3) != 0) || (slack != 0) || (tDistrKey && (align != 0)))
{
- ((Uint32*)tempData)[attributeSize >> 2] = 0;
- memcpy(&tempData[0], aValue, attributeSize);
- aValue = (char*)&tempData[0];
+ char * tmp = (char*)tempData;
+ memcpy(tmp, aValue, attributeSize);
+ aValue = tmp;
+ bzero(tmp + attributeSize, slack);
}//if
}
- Uint32 totalSizeInWords = (sizeInBytes + 3)/4; // Inc. bits in last word
-
if (true){ //tArraySize != 0) {
Uint32 tTupKeyLen = theTupKeyLen;
@@ -346,7 +363,6 @@ NdbOperation::insertKEYINFO(const char* aValue,
Uint32 tEndPos;
Uint32 tPos;
Uint32 signalCounter;
- Uint32 tData;
/*****************************************************************************
* Calculate the end position of the attribute in the key information. *
@@ -513,7 +529,6 @@ NdbOperation::handle_distribution_key(const Uint64* value, Uint32 len)
* Copy distribution key to linear memory
*/
NdbColumnImpl* const * cols = m_accessTable->m_columns.getBase();
- Uint32 len = 0;
Uint64 tmp[1000];
Uint32 chunk = 8;
diff --git a/ndb/src/ndbapi/NdbRecAttr.cpp b/ndb/src/ndbapi/NdbRecAttr.cpp
index abfbd76d2c3..8de163d1c22 100644
--- a/ndb/src/ndbapi/NdbRecAttr.cpp
+++ b/ndb/src/ndbapi/NdbRecAttr.cpp
@@ -83,6 +83,7 @@ NdbRecAttr::setup(const NdbColumnImpl* anAttrInfo, char* aValue)
theRef = tRef;
return 0;
}
+ errno = ENOMEM;
return -1;
}
@@ -102,7 +103,11 @@ NdbRecAttr::copyout()
NdbRecAttr *
NdbRecAttr::clone() const {
NdbRecAttr * ret = new NdbRecAttr(0);
-
+ if (ret == NULL)
+ {
+ errno = ENOMEM;
+ return NULL;
+ }
ret->theAttrId = theAttrId;
ret->theNULLind = theNULLind;
ret->theAttrSize = theAttrSize;
@@ -116,6 +121,12 @@ NdbRecAttr::clone() const {
ret->theValue = 0;
} else {
ret->theStorageX = new Uint64[((n + 7) >> 3)];
+ if (ret->theStorageX == NULL)
+ {
+ delete ret;
+ errno = ENOMEM;
+ return NULL;
+ }
ret->theRef = (char*)ret->theStorageX;
ret->theValue = 0;
}
@@ -140,8 +151,24 @@ NdbRecAttr::receive_data(const Uint32 * data, Uint32 sz){
return false;
}
+NdbRecordPrintFormat::NdbRecordPrintFormat()
+{
+ fields_terminated_by= ";";
+ start_array_enclosure= "[";
+ end_array_enclosure= "]";
+ fields_enclosed_by= "";
+ fields_optionally_enclosed_by= "\"";
+ lines_terminated_by= "\n";
+ hex_prefix= "H'";
+ null_string= "[NULL]";
+ hex_format= 0;
+}
+NdbRecordPrintFormat::~NdbRecordPrintFormat() {}
+static const NdbRecordPrintFormat default_print_format;
+
static void
-ndbrecattr_print_string(NdbOut& out, const char *type,
+ndbrecattr_print_string(NdbOut& out, const NdbRecordPrintFormat &f,
+ const char *type, bool is_binary,
const char *aref, unsigned sz)
{
const unsigned char* ref = (const unsigned char*)aref;
@@ -150,6 +177,25 @@ ndbrecattr_print_string(NdbOut& out, const char *type,
for (i=sz-1; i >= 0; i--)
if (ref[i] == 0) sz--;
else break;
+ if (!is_binary)
+ {
+ // trailing spaces are not printed
+ for (i=sz-1; i >= 0; i--)
+ if (ref[i] == 32) sz--;
+ else break;
+ }
+ if (is_binary && f.hex_format)
+ {
+ if (sz == 0)
+ {
+ out.print("0x0");
+ return;
+ }
+ out.print("0x");
+ for (len = 0; len < (int)sz; len++)
+ out.print("%02X", (int)ref[len]);
+ return;
+ }
if (sz == 0) return; // empty
for (len=0; len < (int)sz && ref[i] != 0; len++)
@@ -170,43 +216,63 @@ ndbrecattr_print_string(NdbOut& out, const char *type,
for (i= len+1; ref[i] != 0; i++)
out.print("%u]",len-i);
assert((int)sz > i);
- ndbrecattr_print_string(out,type,aref+i,sz-i);
+ ndbrecattr_print_string(out,f,type,is_binary,aref+i,sz-i);
}
}
-NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r)
+NdbOut&
+ndbrecattr_print_formatted(NdbOut& out, const NdbRecAttr &r,
+ const NdbRecordPrintFormat &f)
{
if (r.isNULL())
{
- out << "[NULL]";
+ out << f.null_string;
return out;
}
const NdbDictionary::Column* c = r.getColumn();
uint length = c->getLength();
- if (length > 1)
- out << "[";
-
- for (Uint32 j = 0; j < length; j++)
{
- if (j > 0)
- out << " ";
-
+ const char *fields_optionally_enclosed_by;
+ if (f.fields_enclosed_by[0] == '\0')
+ fields_optionally_enclosed_by=
+ f.fields_optionally_enclosed_by;
+ else
+ fields_optionally_enclosed_by= "";
+ out << f.fields_enclosed_by;
+ Uint32 j;
switch(r.getType()){
case NdbDictionary::Column::Bigunsigned:
out << r.u_64_value();
break;
case NdbDictionary::Column::Bit:
- out << hex << "H'" << r.u_32_value() << dec;
+ out << f.hex_prefix << "0x";
+ {
+ const Uint32 *buf = (Uint32 *)r.aRef();
+ int k = (length+31)/32;
+ while (k > 0 && (buf[--k] == 0));
+ out.print("%X", buf[k]);
+ while (k > 0)
+ out.print("%.8X", buf[--k]);
+ }
break;
case NdbDictionary::Column::Unsigned:
- out << r.u_32_value();
+ if (length > 1)
+ out << f.start_array_enclosure;
+ out << *(Uint32*)r.aRef();
+ for (j = 1; j < length; j++)
+ out << " " << *((Uint32*)r.aRef() + j);
+ if (length > 1)
+ out << f.end_array_enclosure;
+ break;
+ case NdbDictionary::Column::Mediumunsigned:
+ out << r.u_medium_value();
break;
case NdbDictionary::Column::Smallunsigned:
out << r.u_short_value();
break;
case NdbDictionary::Column::Tinyunsigned:
- out << (unsigned) r.u_char_value();
+ out << (unsigned) r.u_8_value();
break;
case NdbDictionary::Column::Bigint:
out << r.int64_value();
@@ -214,32 +280,47 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r)
case NdbDictionary::Column::Int:
out << r.int32_value();
break;
+ case NdbDictionary::Column::Mediumint:
+ out << r.medium_value();
+ break;
case NdbDictionary::Column::Smallint:
out << r.short_value();
break;
case NdbDictionary::Column::Tinyint:
- out << (int) r.char_value();
+ out << (int) r.int8_value();
break;
case NdbDictionary::Column::Binary:
+ if (!f.hex_format)
+ out << fields_optionally_enclosed_by;
j = r.arraySize();
- ndbrecattr_print_string(out,"Binary", r.aRef(), j);
+ ndbrecattr_print_string(out,f,"Binary", true, r.aRef(), j);
+ if (!f.hex_format)
+ out << fields_optionally_enclosed_by;
break;
case NdbDictionary::Column::Char:
+ out << fields_optionally_enclosed_by;
j = length;
- ndbrecattr_print_string(out,"Char", r.aRef(), r.arraySize());
+ ndbrecattr_print_string(out,f,"Char", false, r.aRef(), r.arraySize());
+ out << fields_optionally_enclosed_by;
break;
case NdbDictionary::Column::Varchar:
{
+ out << fields_optionally_enclosed_by;
unsigned len = *(const unsigned char*)r.aRef();
- ndbrecattr_print_string(out,"Varchar", r.aRef()+1,len);
+ ndbrecattr_print_string(out,f,"Varchar", false, r.aRef()+1,len);
j = length;
+ out << fields_optionally_enclosed_by;
}
break;
case NdbDictionary::Column::Varbinary:
{
+ if (!f.hex_format)
+ out << fields_optionally_enclosed_by;
unsigned len = *(const unsigned char*)r.aRef();
- ndbrecattr_print_string(out,"Varbinary", r.aRef()+1,len);
+ ndbrecattr_print_string(out,f,"Varbinary", true, r.aRef()+1,len);
j = length;
+ if (!f.hex_format)
+ out << fields_optionally_enclosed_by;
}
break;
case NdbDictionary::Column::Float:
@@ -332,7 +413,7 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r)
break;
case NdbDictionary::Column::Year:
{
- uint year = 1900 + r.u_char_value();
+ uint year = 1900 + r.u_8_value();
char buf[40];
sprintf(buf, "%04d", year);
out << buf;
@@ -368,16 +449,26 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r)
break;
case NdbDictionary::Column::Longvarchar:
{
+ out << fields_optionally_enclosed_by;
+ unsigned len = uint2korr(r.aRef());
+ ndbrecattr_print_string(out,f,"Longvarchar", false, r.aRef()+2,len);
+ j = length;
+ out << fields_optionally_enclosed_by;
+ }
+ break;
+ case NdbDictionary::Column::Longvarbinary:
+ {
+ if (!f.hex_format)
+ out << fields_optionally_enclosed_by;
unsigned len = uint2korr(r.aRef());
- ndbrecattr_print_string(out,"Longvarchar", r.aRef()+2,len);
+ ndbrecattr_print_string(out,f,"Longvarbinary", true, r.aRef()+2,len);
j = length;
+ if (!f.hex_format)
+ out << fields_optionally_enclosed_by;
}
break;
case NdbDictionary::Column::Undefined:
- case NdbDictionary::Column::Mediumint:
- case NdbDictionary::Column::Mediumunsigned:
- case NdbDictionary::Column::Longvarbinary:
unknown:
//default: /* no print functions for the rest, just print type */
out << (int) r.getType();
@@ -386,16 +477,17 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r)
out << " " << j << " times";
break;
}
- }
-
- if (length > 1)
- {
- out << "]";
+ out << f.fields_enclosed_by;
}
return out;
}
+NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r)
+{
+ return ndbrecattr_print_formatted(out, r, default_print_format);
+}
+
Int64
NdbRecAttr::int64_value() const
{
@@ -427,3 +519,15 @@ NdbRecAttr::double_value() const
memcpy(&val,theRef,sizeof(val));
return val;
}
+
+Int32
+NdbRecAttr::medium_value() const
+{
+ return sint3korr((unsigned char *)theRef);
+}
+
+Uint32
+NdbRecAttr::u_medium_value() const
+{
+ return uint3korr((unsigned char*)theRef);
+}
diff --git a/ndb/src/ndbapi/NdbReceiver.cpp b/ndb/src/ndbapi/NdbReceiver.cpp
index 9322f88a351..46ca59f2f42 100644
--- a/ndb/src/ndbapi/NdbReceiver.cpp
+++ b/ndb/src/ndbapi/NdbReceiver.cpp
@@ -32,7 +32,7 @@ NdbReceiver::NdbReceiver(Ndb *aNdb) :
{
theCurrentRecAttr = theFirstRecAttr = 0;
m_defined_rows = 0;
- m_rows = new NdbRecAttr*[0];
+ m_rows = NULL;
}
NdbReceiver::~NdbReceiver()
@@ -45,19 +45,26 @@ NdbReceiver::~NdbReceiver()
DBUG_VOID_RETURN;
}
-void
+int
NdbReceiver::init(ReceiverType type, void* owner)
{
theMagicNumber = 0x11223344;
m_type = type;
m_owner = owner;
+ theFirstRecAttr = NULL;
+ theCurrentRecAttr = NULL;
if (m_id == NdbObjectIdMap::InvalidId) {
if (m_ndb)
+ {
m_id = m_ndb->theImpl->theNdbObjectIdMap.map(this);
+ if (m_id == NdbObjectIdMap::InvalidId)
+ {
+ setErrorCode(4000);
+ return -1;
+ }
+ }
}
-
- theFirstRecAttr = NULL;
- theCurrentRecAttr = NULL;
+ return 0;
}
void
@@ -146,7 +153,7 @@ NdbReceiver::calculate_batch_size(Uint32 key_size,
return;
}
-void
+int
NdbReceiver::do_get_value(NdbReceiver * org,
Uint32 rows,
Uint32 key_size,
@@ -154,7 +161,11 @@ NdbReceiver::do_get_value(NdbReceiver * org,
if(rows > m_defined_rows){
delete[] m_rows;
m_defined_rows = rows;
- m_rows = new NdbRecAttr*[rows + 1];
+ if ((m_rows = new NdbRecAttr*[rows + 1]) == NULL)
+ {
+ setErrorCode(4000);
+ return -1;
+ }
}
m_rows[rows] = 0;
@@ -174,7 +185,7 @@ NdbReceiver::do_get_value(NdbReceiver * org,
// Put key-recAttr fir on each row
if(key_size && !getValue(&key, (char*)0)){
abort();
- return ; // -1
+ return -1;
}
if(range_no &&
@@ -193,7 +204,7 @@ NdbReceiver::do_get_value(NdbReceiver * org,
if(tRecAttr){
abort();
- return ;// -1;
+ return -1;
}
// Store first recAttr for each row in m_rows[i]
@@ -205,7 +216,7 @@ NdbReceiver::do_get_value(NdbReceiver * org,
}
prepareSend();
- return;
+ return 0;
}
NdbRecAttr*
diff --git a/ndb/src/ndbapi/NdbScanFilter.cpp b/ndb/src/ndbapi/NdbScanFilter.cpp
index 2e9e338d5aa..58e9f180119 100644
--- a/ndb/src/ndbapi/NdbScanFilter.cpp
+++ b/ndb/src/ndbapi/NdbScanFilter.cpp
@@ -14,11 +14,15 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <NdbScanFilter.hpp>
+#include <Ndb.hpp>
#include <NdbOperation.hpp>
#include "NdbDictionaryImpl.hpp"
#include <Vector.hpp>
#include <NdbOut.hpp>
#include <Interpreter.hpp>
+#include <signaldata/AttrInfo.hpp>
+#include "NdbApiSignal.hpp"
+#include "NdbUtil.hpp"
#ifdef VM_TRACE
#include <NdbEnv.h>
@@ -31,6 +35,7 @@
class NdbScanFilterImpl {
public:
+ NdbScanFilterImpl() {}
struct State {
NdbScanFilter::Group m_group;
Uint32 m_popCount;
@@ -41,7 +46,9 @@ public:
int m_label;
State m_current;
+ Uint32 m_negative; //used for translating NAND/NOR to AND/OR, equal 0 or 1
Vector<State> m_stack;
+ Vector<Uint32> m_stack2; //to store info of m_negative
NdbOperation * m_operation;
Uint32 m_latestAttrib;
@@ -49,14 +56,37 @@ public:
int cond_col_const(Interpreter::BinaryCondition, Uint32 attrId,
const void * value, Uint32 len);
+
+ bool m_abort_on_too_large;
+
+ NdbOperation::OperationStatus m_initial_op_status;
+ Uint32 m_initial_AI_size;
+ Uint32 m_max_size;
+
+ Uint32 get_size() {
+ assert(m_operation->theTotalCurrAI_Len >= m_initial_AI_size);
+ return m_operation->theTotalCurrAI_Len - m_initial_AI_size;
+ }
+ bool check_size() {
+ if (get_size() <= m_max_size)
+ return true;
+ handle_filter_too_large();
+ return false;
+ }
+ void handle_filter_too_large();
+
+ NdbError m_error;
};
const Uint32 LabelExit = ~0;
-NdbScanFilter::NdbScanFilter(class NdbOperation * op)
+NdbScanFilter::NdbScanFilter(class NdbOperation * op,
+ bool abort_on_too_large,
+ Uint32 max_size)
: m_impl(* new NdbScanFilterImpl())
{
+ DBUG_ENTER("NdbScanFilter::NdbScanFilter");
m_impl.m_current.m_group = (NdbScanFilter::Group)0;
m_impl.m_current.m_popCount = 0;
m_impl.m_current.m_ownLabel = 0;
@@ -65,6 +95,22 @@ NdbScanFilter::NdbScanFilter(class NdbOperation * op)
m_impl.m_label = 0;
m_impl.m_latestAttrib = ~0;
m_impl.m_operation = op;
+ m_impl.m_negative = 0;
+
+ DBUG_PRINT("info", ("op status: %d tot AI: %u in curr: %u",
+ op->theStatus,
+ op->theTotalCurrAI_Len, op->theAI_LenInCurrAI));
+
+ m_impl.m_abort_on_too_large = abort_on_too_large;
+
+ m_impl.m_initial_op_status = op->theStatus;
+ m_impl.m_initial_AI_size = op->theTotalCurrAI_Len;
+ if (max_size > NDB_MAX_SCANFILTER_SIZE_IN_WORDS)
+ max_size = NDB_MAX_SCANFILTER_SIZE_IN_WORDS;
+ m_impl.m_max_size = max_size;
+
+ m_impl.m_error.code = 0;
+ DBUG_VOID_RETURN;
}
NdbScanFilter::~NdbScanFilter(){
@@ -74,18 +120,43 @@ NdbScanFilter::~NdbScanFilter(){
int
NdbScanFilter::begin(Group group){
+ if (m_impl.m_stack2.push_back(m_impl.m_negative))
+ {
+ m_impl.m_operation->setErrorCodeAbort(4000);
+ return -1;
+ }
switch(group){
case NdbScanFilter::AND:
INT_DEBUG(("Begin(AND)"));
+ if(m_impl.m_negative == 1){
+ group = NdbScanFilter::OR;
+ }
break;
case NdbScanFilter::OR:
INT_DEBUG(("Begin(OR)"));
+ if(m_impl.m_negative == 1){
+ group = NdbScanFilter::AND;
+ }
break;
case NdbScanFilter::NAND:
INT_DEBUG(("Begin(NAND)"));
+ if(m_impl.m_negative == 0){
+ group = NdbScanFilter::OR;
+ m_impl.m_negative = 1;
+ }else{
+ group = NdbScanFilter::AND;
+ m_impl.m_negative = 0;
+ }
break;
case NdbScanFilter::NOR:
INT_DEBUG(("Begin(NOR)"));
+ if(m_impl.m_negative == 0){
+ group = NdbScanFilter::AND;
+ m_impl.m_negative = 1;
+ }else{
+ group = NdbScanFilter::OR;
+ m_impl.m_negative = 0;
+ }
break;
}
@@ -102,7 +173,11 @@ NdbScanFilter::begin(Group group){
}
NdbScanFilterImpl::State tmp = m_impl.m_current;
- m_impl.m_stack.push_back(m_impl.m_current);
+ if (m_impl.m_stack.push_back(m_impl.m_current))
+ {
+ m_impl.m_operation->setErrorCodeAbort(4000);
+ return -1;
+ }
m_impl.m_current.m_group = group;
m_impl.m_current.m_ownLabel = m_impl.m_label++;
m_impl.m_current.m_popCount = 0;
@@ -129,6 +204,13 @@ NdbScanFilter::begin(Group group){
int
NdbScanFilter::end(){
+ if(m_impl.m_stack2.size() == 0){
+ m_impl.m_operation->setErrorCodeAbort(4259);
+ return -1;
+ }
+ m_impl.m_negative = m_impl.m_stack2.back();
+ m_impl.m_stack2.erase(m_impl.m_stack2.size() - 1);
+
switch(m_impl.m_current.m_group){
case NdbScanFilter::AND:
INT_DEBUG(("End(AND pc=%d)", m_impl.m_current.m_popCount));
@@ -150,36 +232,48 @@ NdbScanFilter::end(){
}
NdbScanFilterImpl::State tmp = m_impl.m_current;
+ if(m_impl.m_stack.size() == 0){
+ m_impl.m_operation->setErrorCodeAbort(4259);
+ return -1;
+ }
m_impl.m_current = m_impl.m_stack.back();
m_impl.m_stack.erase(m_impl.m_stack.size() - 1);
switch(tmp.m_group){
case NdbScanFilter::AND:
if(tmp.m_trueLabel == (Uint32)~0){
- m_impl.m_operation->interpret_exit_ok();
+ if (m_impl.m_operation->interpret_exit_ok() == -1)
+ return -1;
} else {
- m_impl.m_operation->branch_label(tmp.m_trueLabel);
+ if (m_impl.m_operation->branch_label(tmp.m_trueLabel) == -1)
+ return -1;
}
break;
case NdbScanFilter::NAND:
if(tmp.m_trueLabel == (Uint32)~0){
- m_impl.m_operation->interpret_exit_nok();
+ if (m_impl.m_operation->interpret_exit_nok() == -1)
+ return -1;
} else {
- m_impl.m_operation->branch_label(tmp.m_falseLabel);
+ if (m_impl.m_operation->branch_label(tmp.m_falseLabel) == -1)
+ return -1;
}
break;
case NdbScanFilter::OR:
if(tmp.m_falseLabel == (Uint32)~0){
- m_impl.m_operation->interpret_exit_nok();
+ if (m_impl.m_operation->interpret_exit_nok() == -1)
+ return -1;
} else {
- m_impl.m_operation->branch_label(tmp.m_falseLabel);
+ if (m_impl.m_operation->branch_label(tmp.m_falseLabel) == -1)
+ return -1;
}
break;
case NdbScanFilter::NOR:
if(tmp.m_falseLabel == (Uint32)~0){
- m_impl.m_operation->interpret_exit_ok();
+ if (m_impl.m_operation->interpret_exit_ok() == -1)
+ return -1;
} else {
- m_impl.m_operation->branch_label(tmp.m_trueLabel);
+ if (m_impl.m_operation->branch_label(tmp.m_trueLabel) == -1)
+ return -1;
}
break;
default:
@@ -187,24 +281,29 @@ NdbScanFilter::end(){
return -1;
}
- m_impl.m_operation->def_label(tmp.m_ownLabel);
+ if (m_impl.m_operation->def_label(tmp.m_ownLabel) == -1)
+ return -1;
if(m_impl.m_stack.size() == 0){
switch(tmp.m_group){
case NdbScanFilter::AND:
case NdbScanFilter::NOR:
- m_impl.m_operation->interpret_exit_nok();
+ if (m_impl.m_operation->interpret_exit_nok() == -1)
+ return -1;
break;
case NdbScanFilter::OR:
case NdbScanFilter::NAND:
- m_impl.m_operation->interpret_exit_ok();
+ if (m_impl.m_operation->interpret_exit_ok() == -1)
+ return -1;
break;
default:
m_impl.m_operation->setErrorCodeAbort(4260);
return -1;
}
}
-
+
+ if (!m_impl.check_size())
+ return -1;
return 0;
}
@@ -217,10 +316,16 @@ NdbScanFilter::istrue(){
}
if(m_impl.m_current.m_trueLabel == (Uint32)~0){
- return m_impl.m_operation->interpret_exit_ok();
+ if (m_impl.m_operation->interpret_exit_ok() == -1)
+ return -1;
} else {
- return m_impl.m_operation->branch_label(m_impl.m_current.m_trueLabel);
+ if (m_impl.m_operation->branch_label(m_impl.m_current.m_trueLabel) == -1)
+ return -1;
}
+
+ if (!m_impl.check_size())
+ return -1;
+ return 0;
}
int
@@ -232,10 +337,16 @@ NdbScanFilter::isfalse(){
}
if(m_impl.m_current.m_falseLabel == (Uint32)~0){
- return m_impl.m_operation->interpret_exit_nok();
+ if (m_impl.m_operation->interpret_exit_nok() == -1)
+ return -1;
} else {
- return m_impl.m_operation->branch_label(m_impl.m_current.m_falseLabel);
+ if (m_impl.m_operation->branch_label(m_impl.m_current.m_falseLabel) == -1)
+ return -1;
}
+
+ if (!m_impl.check_size())
+ return -1;
+ return 0;
}
@@ -286,18 +397,28 @@ NdbScanFilterImpl::cond_col(Interpreter::UnaryCondition op, Uint32 AttrId){
}
Branch1 branch = table2[op].m_branches[m_current.m_group];
- (m_operation->* branch)(AttrId, m_current.m_ownLabel);
+ if ((m_operation->* branch)(AttrId, m_current.m_ownLabel) == -1)
+ return -1;
+
+ if (!check_size())
+ return -1;
return 0;
}
int
NdbScanFilter::isnull(int AttrId){
- return m_impl.cond_col(Interpreter::IS_NULL, AttrId);
+ if(m_impl.m_negative == 1)
+ return m_impl.cond_col(Interpreter::IS_NOT_NULL, AttrId);
+ else
+ return m_impl.cond_col(Interpreter::IS_NULL, AttrId);
}
int
NdbScanFilter::isnotnull(int AttrId){
- return m_impl.cond_col(Interpreter::IS_NOT_NULL, AttrId);
+ if(m_impl.m_negative == 1)
+ return m_impl.cond_col(Interpreter::IS_NULL, AttrId);
+ else
+ return m_impl.cond_col(Interpreter::IS_NOT_NULL, AttrId);
}
struct tab3 {
@@ -394,8 +515,17 @@ NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition op,
m_operation->setErrorCodeAbort(4260);
return -1;
}
+
+ StrBranch2 branch;
+ if(m_negative == 1){ //change NdbOperation to its negative
+ if(m_current.m_group == NdbScanFilter::AND)
+ branch = table3[op].m_branches[(Uint32)(m_current.m_group) + 1];
+ if(m_current.m_group == NdbScanFilter::OR)
+ branch = table3[op].m_branches[(Uint32)(m_current.m_group) - 1];
+ }else{
+ branch = table3[op].m_branches[(Uint32)(m_current.m_group)];
+ }
- StrBranch2 branch = table3[op].m_branches[m_current.m_group];
const NdbDictionary::Column * col =
m_operation->m_currentTable->getColumn(AttrId);
@@ -404,8 +534,12 @@ NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition op,
return -1;
}
- int ret = (m_operation->* branch)(AttrId, value, len, false, m_current.m_ownLabel);
- return ret;
+ if ((m_operation->* branch)(AttrId, value, len, false, m_current.m_ownLabel) == -1)
+ return -1;
+
+ if (!check_size())
+ return -1;
+ return 0;
}
int
@@ -431,7 +565,130 @@ NdbScanFilter::cmp(BinaryCondition cond, int ColId,
return m_impl.cond_col_const(Interpreter::NOT_LIKE, ColId, val, len);
}
return -1;
-}
+}
+
+void
+NdbScanFilterImpl::handle_filter_too_large()
+{
+ DBUG_ENTER("NdbScanFilterImpl::handle_filter_too_large");
+
+ NdbOperation* const op = m_operation;
+ m_error.code = NdbScanFilter::FilterTooLarge;
+ if (m_abort_on_too_large)
+ op->setErrorCodeAbort(m_error.code);
+
+ /*
+ * Possible interpreted parts at this point are:
+ *
+ * 1. initial read
+ * 2. interpreted program
+ *
+ * It is assumed that NdbScanFilter has created all of 2
+ * so that we don't have to save interpreter state.
+ */
+
+ const Uint32 size = get_size();
+ assert(size != 0);
+
+ // new ATTRINFO size
+ const Uint32 new_size = m_initial_AI_size;
+
+ // find last signal for new size
+ assert(op->theFirstATTRINFO != NULL);
+ NdbApiSignal* lastSignal = op->theFirstATTRINFO;
+ Uint32 n = 0;
+ while (n + AttrInfo::DataLength < new_size) {
+ lastSignal = lastSignal->next();
+ assert(lastSignal != NULL);
+ n += AttrInfo::DataLength;
+ }
+ assert(n < size);
+
+ // release remaining signals
+ NdbApiSignal* tSignal = lastSignal->next();
+ op->theNdb->releaseSignalsInList(&tSignal);
+ lastSignal->next(NULL);
+
+ // length of lastSignal
+ const Uint32 new_curr = AttrInfo::HeaderLength + new_size - n;
+ assert(new_curr <= 25);
+
+ DBUG_PRINT("info", ("op status: %d->%d tot AI: %u->%u in curr: %u->%u",
+ op->theStatus, m_initial_op_status,
+ op->theTotalCurrAI_Len, new_size,
+ op->theAI_LenInCurrAI, new_curr));
+
+ // reset op state
+ op->theStatus = m_initial_op_status;
+
+ // reset interpreter state to initial
+
+ NdbBranch* tBranch = op->theFirstBranch;
+ while (tBranch != NULL) {
+ NdbBranch* tmp = tBranch;
+ tBranch = tBranch->theNext;
+ op->theNdb->releaseNdbBranch(tmp);
+ }
+ op->theFirstBranch = NULL;
+ op->theLastBranch = NULL;
+
+ NdbLabel* tLabel = op->theFirstLabel;
+ while (tLabel != NULL) {
+ NdbLabel* tmp = tLabel;
+ tLabel = tLabel->theNext;
+ op->theNdb->releaseNdbLabel(tmp);
+ }
+ op->theFirstLabel = NULL;
+ op->theLastLabel = NULL;
+
+ NdbCall* tCall = op->theFirstCall;
+ while (tCall != NULL) {
+ NdbCall* tmp = tCall;
+ tCall = tCall->theNext;
+ op->theNdb->releaseNdbCall(tmp);
+ }
+ op->theFirstCall = NULL;
+ op->theLastCall = NULL;
+
+ NdbSubroutine* tSubroutine = op->theFirstSubroutine;
+ while (tSubroutine != NULL) {
+ NdbSubroutine* tmp = tSubroutine;
+ tSubroutine = tSubroutine->theNext;
+ op->theNdb->releaseNdbSubroutine(tmp);
+ }
+ op->theFirstSubroutine = NULL;
+ op->theLastSubroutine = NULL;
+
+ op->theNoOfLabels = 0;
+ op->theNoOfSubroutines = 0;
+
+ // reset AI size
+ op->theTotalCurrAI_Len = new_size;
+ op->theAI_LenInCurrAI = new_curr;
+
+ // reset signal pointers
+ op->theCurrentATTRINFO = lastSignal;
+ op->theATTRINFOptr = &lastSignal->getDataPtrSend()[new_curr];
+
+ // interpreter sizes are set later somewhere
+
+ DBUG_VOID_RETURN;
+}
+
+static void
+update(const NdbError & _err){
+ NdbError & error = (NdbError &) _err;
+ ndberror_struct ndberror = (ndberror_struct)error;
+ ndberror_update(&ndberror);
+ error = NdbError(ndberror);
+}
+
+const NdbError &
+NdbScanFilter::getNdbError() const
+{
+ update(m_impl.m_error);
+ return m_impl.m_error;
+}
#if 0
diff --git a/ndb/src/ndbapi/NdbScanOperation.cpp b/ndb/src/ndbapi/NdbScanOperation.cpp
index 30046978542..9176fb47297 100644
--- a/ndb/src/ndbapi/NdbScanOperation.cpp
+++ b/ndb/src/ndbapi/NdbScanOperation.cpp
@@ -136,31 +136,6 @@ NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
}
theNdbCon->theScanningOp = this;
- theLockMode = lm;
-
- bool lockExcl, lockHoldMode, readCommitted;
- switch(lm){
- case NdbScanOperation::LM_Read:
- lockExcl = false;
- lockHoldMode = true;
- readCommitted = false;
- break;
- case NdbScanOperation::LM_Exclusive:
- lockExcl = true;
- lockHoldMode = true;
- readCommitted = false;
- break;
- case NdbScanOperation::LM_CommittedRead:
- lockExcl = false;
- lockHoldMode = false;
- readCommitted = true;
- break;
- default:
- setErrorCode(4003);
- return -1;
- }
-
- m_keyInfo = ((scan_flags & SF_KeyInfo) || lockExcl) ? 1 : 0;
bool rangeScan = false;
if (m_accessTable->m_indexType == NdbDictionary::Index::OrderedIndex)
@@ -210,13 +185,13 @@ NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
Uint32 reqInfo = 0;
ScanTabReq::setParallelism(reqInfo, parallel);
ScanTabReq::setScanBatch(reqInfo, 0);
- ScanTabReq::setLockMode(reqInfo, lockExcl);
- ScanTabReq::setHoldLockFlag(reqInfo, lockHoldMode);
- ScanTabReq::setReadCommittedFlag(reqInfo, readCommitted);
ScanTabReq::setRangeScanFlag(reqInfo, rangeScan);
ScanTabReq::setTupScanFlag(reqInfo, tupScan);
req->requestInfo = reqInfo;
+ m_keyInfo = (scan_flags & SF_KeyInfo) ? 1 : 0;
+ setReadLockMode(lm);
+
Uint64 transId = theNdbCon->getTransactionId();
req->transId1 = (Uint32) transId;
req->transId2 = (Uint32) (transId >> 32);
@@ -236,6 +211,41 @@ NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
return 0;
}
+void
+NdbScanOperation::setReadLockMode(LockMode lockMode)
+{
+ bool lockExcl, lockHoldMode, readCommitted;
+ switch (lockMode)
+ {
+ case LM_CommittedRead:
+ lockExcl= false;
+ lockHoldMode= false;
+ readCommitted= true;
+ break;
+ case LM_Read:
+ lockExcl= false;
+ lockHoldMode= true;
+ readCommitted= false;
+ break;
+ case LM_Exclusive:
+ lockExcl= true;
+ lockHoldMode= true;
+ readCommitted= false;
+ m_keyInfo= 1;
+ break;
+ default:
+ /* Not supported / invalid. */
+ assert(false);
+ }
+ theLockMode= lockMode;
+ ScanTabReq *req= CAST_PTR(ScanTabReq, theSCAN_TABREQ->getDataPtrSend());
+ Uint32 reqInfo= req->requestInfo;
+ ScanTabReq::setLockMode(reqInfo, lockExcl);
+ ScanTabReq::setHoldLockFlag(reqInfo, lockHoldMode);
+ ScanTabReq::setReadCommittedFlag(reqInfo, readCommitted);
+ req->requestInfo= reqInfo;
+}
+
int
NdbScanOperation::fix_receivers(Uint32 parallel){
assert(parallel > 0);
@@ -797,9 +807,12 @@ int NdbScanOperation::prepareSendScan(Uint32 aTC_ConnectPtr,
req->requestInfo = reqInfo;
for(Uint32 i = 0; i<theParallelism; i++){
- m_receivers[i]->do_get_value(&theReceiver, batch_size,
- key_size,
- m_read_range_no);
+ if (m_receivers[i]->do_get_value(&theReceiver, batch_size,
+ key_size,
+ m_read_range_no))
+ {
+ return -1;
+ }
}
return 0;
}
@@ -829,7 +842,6 @@ NdbScanOperation::doSendScan(int aProcessorId)
tSignal = theSCAN_TABREQ;
Uint32 tupKeyLen = theTupKeyLen;
- Uint32 len = theTotalNrOfKeyWordInSignal;
Uint32 aTC_ConnectPtr = theNdbCon->theTCConPtr;
Uint64 transId = theNdbCon->theTransactionId;
@@ -837,6 +849,10 @@ NdbScanOperation::doSendScan(int aProcessorId)
// sending it. This could not be done in openScan because
// we created the ATTRINFO signals after the SCAN_TABREQ signal.
ScanTabReq * const req = CAST_PTR(ScanTabReq, tSignal->getDataPtrSend());
+ if (unlikely(theTotalCurrAI_Len > ScanTabReq::MaxTotalAttrInfo)) {
+ setErrorCode(4257);
+ return -1;
+ }
req->attrLenKeyLen = (tupKeyLen << 16) | theTotalCurrAI_Len;
Uint32 tmp = req->requestInfo;
ScanTabReq::setDistributionKeyFlag(tmp, theDistrKeyIndicator_);
@@ -1188,7 +1204,7 @@ NdbIndexScanOperation::setBound(const NdbColumnImpl* tAttrInfo,
* so it's safe to use [tIndexAttrId]
* (instead of looping as is NdbOperation::equal_impl)
*/
- if(type == BoundEQ && tDistrKey)
+ if(type == BoundEQ && tDistrKey && !m_multi_range)
{
theNoOfTupKeyLeft--;
return handle_distribution_key((Uint64*)aValue, sizeInWords);
@@ -1249,7 +1265,8 @@ NdbIndexScanOperation::readTuples(LockMode lm,
const bool order_by = scan_flags & SF_OrderBy;
const bool order_desc = scan_flags & SF_Descending;
const bool read_range_no = scan_flags & SF_ReadRangeNo;
-
+ m_multi_range = scan_flags & SF_MultiRange;
+
int res = NdbScanOperation::readTuples(lm, scan_flags, parallel, batch);
if(!res && read_range_no)
{
@@ -1299,8 +1316,6 @@ NdbIndexScanOperation::fix_get_values(){
Uint32 cnt = m_accessTable->getNoOfColumns() - 1;
assert(cnt < NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY);
- const NdbIndexImpl * idx = m_accessTable->m_index;
- const NdbTableImpl * tab = m_currentTable;
for(Uint32 i = 0; i<cnt; i++){
Uint32 val = theTupleKeyDefined[i][0];
switch(val){
@@ -1716,6 +1731,12 @@ NdbIndexScanOperation::reset_bounds(bool forceSend){
int
NdbIndexScanOperation::end_of_bound(Uint32 no)
{
+ DBUG_ENTER("end_of_bound");
+ DBUG_PRINT("info", ("Range number %u", no));
+ /* Check that SF_MultiRange has been specified if more
+ than one range is specified */
+ if (no > 0 && !m_multi_range)
+ DBUG_RETURN(-1);
if(no < (1 << 13)) // Only 12-bits no of ranges
{
Uint32 bound_head = * m_first_bound_word;
@@ -1724,9 +1745,9 @@ NdbIndexScanOperation::end_of_bound(Uint32 no)
m_first_bound_word = theKEYINFOptr + theTotalNrOfKeyWordInSignal;;
m_this_bound_start = theTupKeyLen;
- return 0;
+ DBUG_RETURN(0);
}
- return -1;
+ DBUG_RETURN(-1);
}
int
diff --git a/ndb/src/ndbapi/NdbTransaction.cpp b/ndb/src/ndbapi/NdbTransaction.cpp
index 6ddec39c4fc..7d3281a93d1 100644
--- a/ndb/src/ndbapi/NdbTransaction.cpp
+++ b/ndb/src/ndbapi/NdbTransaction.cpp
@@ -30,6 +30,7 @@
#include <signaldata/TcCommit.hpp>
#include <signaldata/TcKeyFailConf.hpp>
#include <signaldata/TcHbRep.hpp>
+#include <signaldata/TcRollbackRep.hpp>
/*****************************************************************************
NdbTransaction( Ndb* aNdb );
@@ -81,6 +82,7 @@ NdbTransaction::NdbTransaction( Ndb* aNdb ) :
{
theListState = NotInList;
theError.code = 0;
+ //theId = NdbObjectIdMap::InvalidId;
theId = theNdb->theImpl->theNdbObjectIdMap.map(this);
#define CHECK_SZ(mask, sz) assert((sizeof(mask)/sizeof(mask[0])) == sz)
@@ -106,7 +108,7 @@ void init();
Remark: Initialise connection object for new transaction.
*****************************************************************************/
-void
+int
NdbTransaction::init()
{
theListState = NotInList;
@@ -147,6 +149,17 @@ NdbTransaction::init()
//
theBlobFlag = false;
thePendingBlobOps = 0;
+ if (theId == NdbObjectIdMap::InvalidId)
+ {
+ theId = theNdb->theImpl->theNdbObjectIdMap.map(this);
+ if (theId == NdbObjectIdMap::InvalidId)
+ {
+ theError.code = 4000;
+ return -1;
+ }
+ }
+ return 0;
+
}//NdbTransaction::init()
/*****************************************************************************
@@ -469,12 +482,27 @@ NdbTransaction::executeNoBlobs(ExecType aTypeOfExec,
while (1) {
int noOfComp = tNdb->sendPollNdb(3 * timeout, 1, forceSend);
if (noOfComp == 0) {
- /**
- * This timeout situation can occur if NDB crashes.
+ /*
+ * Just for fun, this is only one of two places where
+ * we could hit this error... It's quite possible we
+ * hit it in Ndbif.cpp in Ndb::check_send_timeout()
+ *
+ * We behave rather similarly in both places.
+ * Hitting this is certainly a bug though...
*/
- ndbout << "This timeout should never occur, execute(..)" << endl;
- theError.code = 4012;
- setOperationErrorCodeAbort(4012); // Error code for "Cluster Failure"
+ g_eventLogger.error("WARNING: Timeout in executeNoBlobs() waiting for "
+ "response from NDB data nodes. This should NEVER "
+ "occur. You have likely hit a NDB Bug. Please "
+ "file a bug.");
+ DBUG_PRINT("error",("This timeout should never occure, execute()"));
+ g_eventLogger.error("Forcibly trying to rollback txn (%p"
+ ") to try to clean up data node resources.",
+ this);
+ executeNoBlobs(NdbTransaction::Rollback);
+ theError.code = 4012;
+ theError.status= NdbError::PermanentError;
+ theError.classification= NdbError::TimeoutExpired;
+ setOperationErrorCodeAbort(4012); // ndbd timeout
DBUG_RETURN(-1);
}//if
@@ -538,7 +566,12 @@ NdbTransaction::executeAsynchPrepare( ExecType aTypeOfExec,
*/
if (theError.code != 0)
DBUG_PRINT("enter", ("Resetting error %d on execute", theError.code));
- theError.code = 0;
+ /**
+ * for timeout (4012) we want sendROLLBACK to behave differently.
+ * Else, normal behaviour of reset errcode
+ */
+ if (theError.code != 4012)
+ theError.code = 0;
NdbScanOperation* tcOp = m_theFirstScanOperation;
if (tcOp != 0){
// Execute any cursor operations
@@ -861,6 +894,12 @@ NdbTransaction::sendROLLBACK() // Send a TCROLLBACKREQ signal;
tSignal.setData(theTCConPtr, 1);
tSignal.setData(tTransId1, 2);
tSignal.setData(tTransId2, 3);
+ if(theError.code == 4012)
+ {
+ g_eventLogger.error("Sending TCROLLBACKREQ with Bad flag");
+ tSignal.setLength(tSignal.getLength() + 1); // + flags
+ tSignal.setData(0x1, 4); // potentially bad data
+ }
tReturnCode = tp->sendSignal(&tSignal,theDBnode);
if (tReturnCode != -1) {
theSendStatus = sendTC_ROLLBACK;
@@ -1719,6 +1758,8 @@ Remark: Handles the reception of the ROLLBACKREP signal.
int
NdbTransaction::receiveTCROLLBACKREP( NdbApiSignal* aSignal)
{
+ DBUG_ENTER("NdbTransaction::receiveTCROLLBACKREP");
+
/****************************************************************************
Check that we are expecting signals from this transaction and that it doesn't
belong to a transaction already completed. Simply ignore messages from other
@@ -1726,6 +1767,11 @@ transactions.
****************************************************************************/
if(checkState_TransId(aSignal->getDataPtr() + 1)){
theError.code = aSignal->readData(4);// Override any previous errors
+ if (aSignal->getLength() == TcRollbackRep::SignalLength)
+ {
+ // Signal may contain additional error data
+ theError.details = (char *) aSignal->readData(5);
+ }
/**********************************************************************/
/* A serious error has occured. This could be due to deadlock or */
@@ -1737,14 +1783,14 @@ transactions.
theCompletionStatus = CompletedFailure;
theCommitStatus = Aborted;
theReturnStatus = ReturnFailure;
- return 0;
+ DBUG_RETURN(0);
} else {
#ifdef NDB_NO_DROPPED_SIGNAL
abort();
#endif
}
- return -1;
+ DBUG_RETURN(-1);
}//NdbTransaction::receiveTCROLLBACKREP()
/*******************************************************************************
diff --git a/ndb/src/ndbapi/Ndbif.cpp b/ndb/src/ndbapi/Ndbif.cpp
index 75ec5df60cb..d404436be59 100644
--- a/ndb/src/ndbapi/Ndbif.cpp
+++ b/ndb/src/ndbapi/Ndbif.cpp
@@ -816,8 +816,9 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
InvalidSignal:
#ifdef VM_TRACE
ndbout_c("Ndbif: Error Ndb::handleReceivedSignal "
- "(GSN=%d, theImpl->theWaiter.m_state=%d)"
+ "(tFirstDataPtr=%p, GSN=%d, theImpl->theWaiter.m_state=%d)"
" sender = (Block: %d Node: %d)",
+ tFirstDataPtr,
tSignalNumber,
tWaitState,
refToBlock(aSignal->theSendersBlockRef),
diff --git a/ndb/src/ndbapi/Ndblist.cpp b/ndb/src/ndbapi/Ndblist.cpp
index 812410e283f..443f9bb42fc 100644
--- a/ndb/src/ndbapi/Ndblist.cpp
+++ b/ndb/src/ndbapi/Ndblist.cpp
@@ -74,7 +74,10 @@ Ndb::checkFailedNode()
int
Ndb::createConIdleList(int aNrOfCon)
{
- theImpl->theConIdleList.fill(this, aNrOfCon);
+ if (theImpl->theConIdleList.fill(this, aNrOfCon))
+ {
+ return -1;
+ }
return aNrOfCon;
}
@@ -90,7 +93,10 @@ Ndb::createConIdleList(int aNrOfCon)
int
Ndb::createOpIdleList(int aNrOfOp)
{
- theImpl->theOpIdleList.fill(this, aNrOfOp);
+ if (theImpl->theOpIdleList.fill(this, aNrOfOp))
+ {
+ return -1;
+ }
return aNrOfOp;
}
diff --git a/ndb/src/ndbapi/ObjectMap.cpp b/ndb/src/ndbapi/ObjectMap.cpp
new file mode 100644
index 00000000000..c87911a10d4
--- /dev/null
+++ b/ndb/src/ndbapi/ObjectMap.cpp
@@ -0,0 +1,62 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "ObjectMap.hpp"
+
+NdbObjectIdMap::NdbObjectIdMap(NdbMutex* mutex, Uint32 sz, Uint32 eSz)
+{
+ m_size = 0;
+ m_firstFree = InvalidId;
+ m_map = 0;
+ m_mutex = mutex;
+ m_expandSize = eSz;
+ expand(sz);
+#ifdef DEBUG_OBJECTMAP
+ ndbout_c("NdbObjectIdMap:::NdbObjectIdMap(%u)", sz);
+#endif
+}
+
+NdbObjectIdMap::~NdbObjectIdMap()
+{
+ free(m_map);
+}
+
+int NdbObjectIdMap::expand(Uint32 incSize)
+{
+ NdbMutex_Lock(m_mutex);
+ Uint32 newSize = m_size + incSize;
+ MapEntry * tmp = (MapEntry*)realloc(m_map, newSize * sizeof(MapEntry));
+
+ if (likely(tmp != 0))
+ {
+ m_map = tmp;
+
+ for(Uint32 i = m_size; i < newSize; i++){
+ m_map[i].m_next = i + 1;
+ }
+ m_firstFree = m_size;
+ m_map[newSize-1].m_next = InvalidId;
+ m_size = newSize;
+ }
+ else
+ {
+ NdbMutex_Unlock(m_mutex);
+ g_eventLogger.error("NdbObjectIdMap::expand: realloc(%u*%u) failed",
+ newSize, sizeof(MapEntry));
+ return -1;
+ }
+ NdbMutex_Unlock(m_mutex);
+ return 0;
+}
diff --git a/ndb/src/ndbapi/ObjectMap.hpp b/ndb/src/ndbapi/ObjectMap.hpp
index 486ef08abb8..6a8dbcbeef5 100644
--- a/ndb/src/ndbapi/ObjectMap.hpp
+++ b/ndb/src/ndbapi/ObjectMap.hpp
@@ -20,6 +20,9 @@
//#include <NdbMutex.h>
#include <NdbOut.hpp>
+#include <EventLogger.hpp>
+extern EventLogger g_eventLogger;
+
//#define DEBUG_OBJECTMAP
/**
@@ -46,36 +49,17 @@ private:
} * m_map;
NdbMutex * m_mutex;
- void expand(Uint32 newSize);
+ int expand(Uint32 newSize);
};
inline
-NdbObjectIdMap::NdbObjectIdMap(NdbMutex* mutex, Uint32 sz, Uint32 eSz) {
- m_size = 0;
- m_firstFree = InvalidId;
- m_map = 0;
- m_mutex = mutex;
- m_expandSize = eSz;
- expand(sz);
-#ifdef DEBUG_OBJECTMAP
- ndbout_c("NdbObjectIdMap:::NdbObjectIdMap(%u)", sz);
-#endif
-}
-
-inline
-NdbObjectIdMap::~NdbObjectIdMap(){
- free(m_map);
-}
-
-inline
Uint32
NdbObjectIdMap::map(void * object){
// lock();
- if(m_firstFree == InvalidId){
- expand(m_expandSize);
- }
+ if(m_firstFree == InvalidId && expand(m_expandSize))
+ return InvalidId;
Uint32 ff = m_firstFree;
m_firstFree = m_map[ff].m_next;
@@ -103,7 +87,8 @@ NdbObjectIdMap::unmap(Uint32 id, void *object){
m_map[i].m_next = m_firstFree;
m_firstFree = i;
} else {
- ndbout_c("Error: NdbObjectIdMap::::unmap(%u, 0x%x) obj=0x%x", id, object, obj);
+ g_eventLogger.error("NdbObjectIdMap::unmap(%u, 0x%x) obj=0x%x",
+ id, object, obj);
return 0;
}
@@ -129,29 +114,4 @@ NdbObjectIdMap::getObject(Uint32 id){
}
return 0;
}
-
-inline void
-NdbObjectIdMap::expand(Uint32 incSize){
- NdbMutex_Lock(m_mutex);
- Uint32 newSize = m_size + incSize;
- MapEntry * tmp = (MapEntry*)realloc(m_map, newSize * sizeof(MapEntry));
-
- if (likely(tmp != 0))
- {
- m_map = tmp;
-
- for(Uint32 i = m_size; i<newSize; i++){
- m_map[i].m_next = i + 1;
- }
- m_firstFree = m_size;
- m_map[newSize-1].m_next = InvalidId;
- m_size = newSize;
- }
- else
- {
- ndbout_c("NdbObjectIdMap::expand unable to expand!!");
- }
- NdbMutex_Unlock(m_mutex);
-}
-
#endif
diff --git a/ndb/src/ndbapi/SignalSender.cpp b/ndb/src/ndbapi/SignalSender.cpp
index 199c6d6e804..393524bb96e 100644
--- a/ndb/src/ndbapi/SignalSender.cpp
+++ b/ndb/src/ndbapi/SignalSender.cpp
@@ -19,14 +19,6 @@
#include <signaldata/NFCompleteRep.hpp>
#include <signaldata/NodeFailRep.hpp>
-static
-void
-require(bool x)
-{
- if (!x)
- abort();
-}
-
SimpleSignal::SimpleSignal(bool dealloc){
memset(this, 0, sizeof(* this));
deallocSections = dealloc;
@@ -155,7 +147,10 @@ SignalSender::waitFor(Uint32 timeOutMillis, T & t)
{
SimpleSignal * s = t.check(m_jobBuffer);
if(s != 0){
- m_usedBuffer.push_back(s);
+ if (m_usedBuffer.push_back(s))
+ {
+ return 0;
+ }
return s;
}
@@ -170,7 +165,10 @@ SignalSender::waitFor(Uint32 timeOutMillis, T & t)
SimpleSignal * s = t.check(m_jobBuffer);
if(s != 0){
- m_usedBuffer.push_back(s);
+ if (m_usedBuffer.push_back(s))
+ {
+ return 0;
+ }
return s;
}
@@ -183,6 +181,7 @@ SignalSender::waitFor(Uint32 timeOutMillis, T & t)
class WaitForAny {
public:
+ WaitForAny() {}
SimpleSignal * check(Vector<SimpleSignal*> & m_jobBuffer){
if(m_jobBuffer.size() > 0){
SimpleSignal * s = m_jobBuffer[0];
@@ -202,6 +201,7 @@ SignalSender::waitFor(Uint32 timeOutMillis){
class WaitForNode {
public:
+ WaitForNode() {}
Uint32 m_nodeId;
SimpleSignal * check(Vector<SimpleSignal*> & m_jobBuffer){
Uint32 len = m_jobBuffer.size();
diff --git a/ndb/src/ndbapi/TransporterFacade.hpp b/ndb/src/ndbapi/TransporterFacade.hpp
index 2e0f08601e5..5a826bc2309 100644
--- a/ndb/src/ndbapi/TransporterFacade.hpp
+++ b/ndb/src/ndbapi/TransporterFacade.hpp
@@ -315,7 +315,8 @@ inline
bool
TransporterFacade::get_node_stopping(NodeId n) const {
const ClusterMgr::Node & node = theClusterMgr->getNodeInfo(n);
- return ((node.m_state.startLevel == NodeState::SL_STOPPING_1) ||
+ return (!node.m_state.getSingleUserMode() &&
+ (node.m_state.startLevel == NodeState::SL_STOPPING_1) ||
(node.m_state.startLevel == NodeState::SL_STOPPING_2));
}
@@ -326,16 +327,9 @@ TransporterFacade::getIsNodeSendable(NodeId n) const {
const Uint32 startLevel = node.m_state.startLevel;
if (node.m_info.m_type == NodeInfo::DB) {
- if(node.m_state.singleUserMode &&
- ownId() == node.m_state.singleUserApi) {
- return (node.compatible &&
- (node.m_state.startLevel == NodeState::SL_STOPPING_1 ||
- node.m_state.startLevel == NodeState::SL_STARTED ||
- node.m_state.startLevel == NodeState::SL_SINGLEUSER));
- }
- else
- return node.compatible && (startLevel == NodeState::SL_STARTED ||
- startLevel == NodeState::SL_STOPPING_1);
+ return node.compatible && (startLevel == NodeState::SL_STARTED ||
+ startLevel == NodeState::SL_STOPPING_1 ||
+ node.m_state.getSingleUserMode());
} else if (node.m_info.m_type == NodeInfo::REP) {
/**
* @todo Check that REP node actually has received API_REG_REQ
diff --git a/ndb/src/ndbapi/ndb_cluster_connection.cpp b/ndb/src/ndbapi/ndb_cluster_connection.cpp
index 467522aecd8..9eed5db8bad 100644
--- a/ndb/src/ndbapi/ndb_cluster_connection.cpp
+++ b/ndb/src/ndbapi/ndb_cluster_connection.cpp
@@ -353,7 +353,7 @@ Ndb_cluster_connection_impl::set_name(const char *name)
}
}
-void
+int
Ndb_cluster_connection_impl::init_nodes_vector(Uint32 nodeid,
const ndb_mgm_configuration
&config)
@@ -402,7 +402,10 @@ Ndb_cluster_connection_impl::init_nodes_vector(Uint32 nodeid,
break;
}
}
- m_impl.m_all_nodes.push_back(Node(group,remoteNodeId));
+ if (m_impl.m_all_nodes.push_back(Node(group,remoteNodeId)))
+ {
+ DBUG_RETURN(-1);
+ }
DBUG_PRINT("info",("saved %d %d", group,remoteNodeId));
for (int i= m_impl.m_all_nodes.size()-2;
i >= 0 && m_impl.m_all_nodes[i].group > m_impl.m_all_nodes[i+1].group;
@@ -449,7 +452,7 @@ Ndb_cluster_connection_impl::init_nodes_vector(Uint32 nodeid,
do_test();
#endif
- DBUG_VOID_RETURN;
+ DBUG_RETURN(0);
}
void
@@ -515,7 +518,6 @@ int Ndb_cluster_connection::connect(int no_retries, int retry_delay_in_seconds,
struct ndb_mgm_reply mgm_reply;
DBUG_ENTER("Ndb_cluster_connection::connect");
- const char* error = 0;
do {
if (m_impl.m_config_retriever == 0)
DBUG_RETURN(-1);
@@ -533,7 +535,11 @@ int Ndb_cluster_connection::connect(int no_retries, int retry_delay_in_seconds,
break;
m_impl.m_transporter_facade->start_instance(nodeId, props);
- m_impl.init_nodes_vector(nodeId, *props);
+ if (m_impl.init_nodes_vector(nodeId, *props))
+ {
+ ndbout_c("Ndb_cluster_connection::connect: malloc failure");
+ DBUG_RETURN(-1);
+ }
for(unsigned i=0;
i<m_impl.m_transporter_facade->get_registry()->m_transporter_interface.size();
diff --git a/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp b/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp
index 5bb5f0a0fca..d3ff7610e18 100644
--- a/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp
+++ b/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp
@@ -68,7 +68,7 @@ private:
};
Vector<Node> m_all_nodes;
- void init_nodes_vector(Uint32 nodeid, const ndb_mgm_configuration &config);
+ int init_nodes_vector(Uint32 nodeid, const ndb_mgm_configuration &config);
void connect_thread();
void set_name(const char *name);
diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c
index 328b0688857..4c60e384e6c 100644
--- a/ndb/src/ndbapi/ndberror.c
+++ b/ndb/src/ndbapi/ndberror.c
@@ -173,6 +173,8 @@ ErrorBundle ErrorCodes[] = {
{ 4022, TR, "Out of Send Buffer space in NDB API" },
{ 4032, TR, "Out of Send Buffer space in NDB API" },
{ 288, TR, "Out of index operations in transaction coordinator (increase MaxNoOfConcurrentIndexOperations)" },
+ { 289, TR, "Out of transaction buffer memory in TC (increase TransactionBufferMemory)" },
+
/**
* InsufficientSpace
*/
@@ -525,7 +527,8 @@ ErrorBundle ErrorCodes[] = {
{ 4270, IE, "Unknown blob error" },
{ 4335, AE, "Only one autoincrement column allowed per table. Having a table without primary key uses an autoincremented hidden key, i.e. a table without a primary key can not have an autoincremented column" },
{ 4271, AE, "Invalid index object, not retrieved via getIndex()" },
- { 4275, AE, "The blob method is incompatible with operation type or lock mode" }
+ { 4275, AE, "The blob method is incompatible with operation type or lock mode" },
+ { 4294, AE, "Scan filter is too large, discarded" }
};
static
@@ -637,8 +640,6 @@ ndberror_update(ndberror_struct * error){
if(!found){
error->status = ST_U;
}
-
- error->details = 0;
}
int
diff --git a/ndb/test/include/NDBT_Test.hpp b/ndb/test/include/NDBT_Test.hpp
index c102c569933..e476a1a0759 100644
--- a/ndb/test/include/NDBT_Test.hpp
+++ b/ndb/test/include/NDBT_Test.hpp
@@ -325,6 +325,12 @@ public:
// supply argc and argv as parameters
int execute(int, const char**);
+ // NDBT's test tables are fixed and it always create
+ // and drop fixed table when execute, add this method
+ // in order to run CTX only and adapt to some new
+ // customized testsuite
+ int executeOneCtx(Ndb_cluster_connection&,
+ const NdbDictionary::Table* ptab, const char* testname = NULL);
// These function can be used from main in the test program
// to control the behaviour of the testsuite
diff --git a/ndb/test/include/NdbRestarter.hpp b/ndb/test/include/NdbRestarter.hpp
index 63de32ac038..15da0218a16 100644
--- a/ndb/test/include/NdbRestarter.hpp
+++ b/ndb/test/include/NdbRestarter.hpp
@@ -61,6 +61,8 @@ public:
int dumpStateAllNodes(int * _args, int _num_args);
int getMasterNodeId();
+ int getNextMasterNodeId(int nodeId);
+ int getNodeGroup(int nodeId);
int getRandomNodeSameNodeGroup(int nodeId, int randomNumber);
int getRandomNodeOtherNodeGroup(int nodeId, int randomNumber);
int getRandomNotMasterNodeId(int randomNumber);
diff --git a/ndb/test/ndbapi/Makefile.am b/ndb/test/ndbapi/Makefile.am
index 4766e6b83b3..9019d71ada2 100644
--- a/ndb/test/ndbapi/Makefile.am
+++ b/ndb/test/ndbapi/Makefile.am
@@ -39,6 +39,7 @@ testOperations \
testRestartGci \
testScan \
testInterpreter \
+testScanFilter \
testScanInterpreter \
testScanPerf \
testSystemRestart \
@@ -83,6 +84,7 @@ testOperations_SOURCES = testOperations.cpp
testRestartGci_SOURCES = testRestartGci.cpp
testScan_SOURCES = testScan.cpp ScanFunctions.hpp
testInterpreter_SOURCES = testInterpreter.cpp
+testScanFilter_SOURCES = testScanFilter.cpp
testScanInterpreter_SOURCES = testScanInterpreter.cpp ScanFilter.hpp ScanInterpretTest.hpp
testScanPerf_SOURCES = testScanPerf.cpp
testSystemRestart_SOURCES = testSystemRestart.cpp
diff --git a/ndb/test/ndbapi/benchronja.cpp b/ndb/test/ndbapi/benchronja.cpp
index 4973e6e2487..73ee324a888 100644
--- a/ndb/test/ndbapi/benchronja.cpp
+++ b/ndb/test/ndbapi/benchronja.cpp
@@ -41,7 +41,14 @@
#define MAXSTRLEN 16
#define MAXATTR 64
#define MAXTABLES 64
-#define MAXTHREADS 256
+#define NDB_MAXTHREADS 256
+/*
+ NDB_MAXTHREADS used to be just MAXTHREADS, which collides with a
+ #define from <sys/thread.h> on AIX (IBM compiler). We explicitly
+ #undef it here lest someone use it by habit and get really funny
+ results. K&R says we may #undef non-existent symbols, so let's go.
+*/
+#undef MAXTHREADS
#define MAXATTRSIZE 8000
#define START_TIMER NdbTimer timer; timer.doStart();
#define STOP_TIMER timer.doStop();
@@ -56,18 +63,18 @@ struct ThreadNdb
Ndb* NdbRef;
};
-static NdbThread* threadLife[MAXTHREADS];
+static NdbThread* threadLife[NDB_MAXTHREADS];
static unsigned int tNoOfThreads;
static unsigned int tNoOfOpsPerExecute;
static unsigned int tNoOfRecords;
static unsigned int tNoOfOperations;
-static int ThreadReady[MAXTHREADS];
-static int ThreadStart[MAXTHREADS];
+static int ThreadReady[NDB_MAXTHREADS];
+static int ThreadStart[NDB_MAXTHREADS];
NDB_COMMAND(benchronja, "benchronja", "benchronja", "benchronja", 65535){
ndb_init();
- ThreadNdb tabThread[MAXTHREADS];
+ ThreadNdb tabThread[NDB_MAXTHREADS];
int i = 0 ;
int cont = 0 ;
Ndb* pMyNdb = NULL ; //( "TEST_DB" );
@@ -84,7 +91,7 @@ NDB_COMMAND(benchronja, "benchronja", "benchronja", "benchronja", 65535){
{
if (strcmp(argv[i], "-t") == 0){
tNoOfThreads = atoi(argv[i+1]);
- if ((tNoOfThreads < 1) || (tNoOfThreads > MAXTHREADS)) goto error_input;
+ if ((tNoOfThreads < 1) || (tNoOfThreads > NDB_MAXTHREADS)) goto error_input;
}else if (strcmp(argv[i], "-o") == 0){
tNoOfOperations = atoi(argv[i+1]);
if (tNoOfOperations < 1) goto error_input;
diff --git a/ndb/test/ndbapi/flexAsynch.cpp b/ndb/test/ndbapi/flexAsynch.cpp
index 20a157fc2f3..1f52315482f 100644
--- a/ndb/test/ndbapi/flexAsynch.cpp
+++ b/ndb/test/ndbapi/flexAsynch.cpp
@@ -35,7 +35,14 @@
#define MAXSTRLEN 16
#define MAXATTR 64
#define MAXTABLES 64
-#define MAXTHREADS 128
+#define NDB_MAXTHREADS 128
+/*
+ NDB_MAXTHREADS used to be just MAXTHREADS, which collides with a
+ #define from <sys/thread.h> on AIX (IBM compiler). We explicitly
+ #undef it here lest someone use it by habit and get really funny
+ results. K&R says we may #undef non-existent symbols, so let's go.
+*/
+#undef MAXTHREADS
#define MAXPAR 1024
#define MAXATTRSIZE 1000
#define PKSIZE 2
@@ -76,10 +83,10 @@ struct ThreadNdb
int ThreadNo;
};
-static NdbThread* threadLife[MAXTHREADS];
+static NdbThread* threadLife[NDB_MAXTHREADS];
static int tNodeId;
-static int ThreadReady[MAXTHREADS];
-static StartType ThreadStart[MAXTHREADS];
+static int ThreadReady[NDB_MAXTHREADS];
+static StartType ThreadStart[NDB_MAXTHREADS];
static char tableName[MAXTABLES][MAXSTRLEN+1];
static char attrName[MAXATTR][MAXSTRLEN+1];
@@ -160,7 +167,7 @@ NDB_COMMAND(flexAsynch, "flexAsynch", "flexAsynch", "flexAsynch", 65535)
return NDBT_ProgramExit(NDBT_WRONGARGS);
}
- pThreadData = new ThreadNdb[MAXTHREADS];
+ pThreadData = new ThreadNdb[NDB_MAXTHREADS];
ndbout << endl << "FLEXASYNCH - Starting normal mode" << endl;
ndbout << "Perform benchmark of insert, update and delete transactions";
@@ -844,7 +851,7 @@ readArguments(int argc, const char** argv){
while (argc > 1){
if (strcmp(argv[i], "-t") == 0){
tNoOfThreads = atoi(argv[i+1]);
- if ((tNoOfThreads < 1) || (tNoOfThreads > MAXTHREADS)){
+ if ((tNoOfThreads < 1) || (tNoOfThreads > NDB_MAXTHREADS)){
ndbout_c("Invalid no of threads");
return -1;
}
diff --git a/ndb/test/ndbapi/flexHammer.cpp b/ndb/test/ndbapi/flexHammer.cpp
index 9abac905f5a..3847bc38b35 100644
--- a/ndb/test/ndbapi/flexHammer.cpp
+++ b/ndb/test/ndbapi/flexHammer.cpp
@@ -69,7 +69,14 @@ ErrorData * flexHammerErrorData;
#define MAXSTRLEN 16
#define MAXATTR 64
#define MAXTABLES 64
-#define MAXTHREADS 256
+#define NDB_MAXTHREADS 256
+/*
+ NDB_MAXTHREADS used to be just MAXTHREADS, which collides with a
+ #define from <sys/thread.h> on AIX (IBM compiler). We explicitly
+ #undef it here lest someone use it by habit and get really funny
+ results. K&R says we may #undef non-existent symbols, so let's go.
+*/
+#undef MAXTHREADS
#define MAXATTRSIZE 100
// Max number of retries if something fails
#define MaxNoOfAttemptsC 10
@@ -122,8 +129,8 @@ static int tAttributeSize;
static int tNoOfOperations;
static int tNoOfRecords;
static int tNoOfLoops;
-static ReadyType ThreadReady[MAXTHREADS];
-static StartType ThreadStart[MAXTHREADS];
+static ReadyType ThreadReady[NDB_MAXTHREADS];
+static StartType ThreadStart[NDB_MAXTHREADS];
static char tableName[MAXTABLES][MAXSTRLEN];
static char attrName[MAXATTR][MAXSTRLEN];
static int theSimpleFlag = 0;
@@ -643,7 +650,7 @@ readArguments (int argc, const char** argv)
while (argc > 1) {
if (strcmp(argv[i], "-t") == 0) {
tNoOfThreads = atoi(argv[i+1]);
- if ((tNoOfThreads < 1) || (tNoOfThreads > MAXTHREADS))
+ if ((tNoOfThreads < 1) || (tNoOfThreads > NDB_MAXTHREADS))
return(1);
}
else if (strcmp(argv[i], "-o") == 0) {
diff --git a/ndb/test/ndbapi/flexScan.cpp b/ndb/test/ndbapi/flexScan.cpp
index cbea90f44f4..4e3def7fb91 100644
--- a/ndb/test/ndbapi/flexScan.cpp
+++ b/ndb/test/ndbapi/flexScan.cpp
@@ -68,7 +68,14 @@
#define MAXSTRLEN 16
#define MAXATTR 64
#define MAXTABLES 64
-#define MAXTHREADS 256
+#define NDB_MAXTHREADS 256
+/*
+ NDB_MAXTHREADS used to be just MAXTHREADS, which collides with a
+ #define from <sys/thread.h> on AIX (IBM compiler). We explicitly
+ #undef it here lest someone use it by habit and get really funny
+ results. K&R says we may #undef non-existent symbols, so let's go.
+*/
+#undef MAXTHREADS
#define MAXATTRSIZE 64
enum StartType {
@@ -860,7 +867,7 @@ static int readArguments(int argc, const char** argv)
if (strcmp(argv[i], "-t") == 0) {
if (argv[i + 1] != NULL) {
tNoOfThreads = atoi(argv[i + 1]);
- if ((tNoOfThreads < 1) || (tNoOfThreads > MAXTHREADS)) {
+ if ((tNoOfThreads < 1) || (tNoOfThreads > NDB_MAXTHREADS)) {
retValue = -1;
} // if
} // if
diff --git a/ndb/test/ndbapi/flexTT.cpp b/ndb/test/ndbapi/flexTT.cpp
index 71d5b6c096e..4373102f77e 100644
--- a/ndb/test/ndbapi/flexTT.cpp
+++ b/ndb/test/ndbapi/flexTT.cpp
@@ -35,7 +35,14 @@
#define MAXSTRLEN 16
#define MAXATTR 64
#define MAXTABLES 64
-#define MAXTHREADS 128
+#define NDB_MAXTHREADS 128
+/*
+ NDB_MAXTHREADS used to be just MAXTHREADS, which collides with a
+ #define from <sys/thread.h> on AIX (IBM compiler). We explicitly
+ #undef it here lest someone use it by habit and get really funny
+ results. K&R says we may #undef non-existent symbols, so let's go.
+*/
+#undef MAXTHREADS
#define MAXPAR 1024
#define MAXATTRSIZE 1000
#define PKSIZE 1
@@ -101,10 +108,10 @@ static void input_error();
ErrorData * flexTTErrorData;
-static NdbThread* threadLife[MAXTHREADS];
+static NdbThread* threadLife[NDB_MAXTHREADS];
static int tNodeId;
-static int ThreadReady[MAXTHREADS];
-static StartType ThreadStart[MAXTHREADS];
+static int ThreadReady[NDB_MAXTHREADS];
+static StartType ThreadStart[NDB_MAXTHREADS];
static char tableName[1][MAXSTRLEN+1];
static char attrName[5][MAXSTRLEN+1];
@@ -184,7 +191,7 @@ NDB_COMMAND(flexTT, "flexTT", "flexTT", "flexTT", 65535)
return NDBT_ProgramExit(NDBT_WRONGARGS);
}
- pThreadData = new ThreadNdb[MAXTHREADS];
+ pThreadData = new ThreadNdb[NDB_MAXTHREADS];
ndbout << endl << "FLEXTT - Starting normal mode" << endl;
ndbout << "Perform TimesTen benchmark" << endl;
@@ -798,7 +805,7 @@ readArguments(int argc, const char** argv){
while (argc > 1){
if (strcmp(argv[i], "-t") == 0){
tNoOfThreads = atoi(argv[i+1]);
- if ((tNoOfThreads < 1) || (tNoOfThreads > MAXTHREADS)){
+ if ((tNoOfThreads < 1) || (tNoOfThreads > NDB_MAXTHREADS)){
ndbout_c("Invalid no of threads");
return -1;
}
diff --git a/ndb/test/ndbapi/flexTimedAsynch.cpp b/ndb/test/ndbapi/flexTimedAsynch.cpp
index cc44ab8b237..b6301e59df2 100644
--- a/ndb/test/ndbapi/flexTimedAsynch.cpp
+++ b/ndb/test/ndbapi/flexTimedAsynch.cpp
@@ -57,7 +57,14 @@
#define MAXSTRLEN 16
#define MAXATTR 64
#define MAXTABLES 64
-#define MAXTHREADS 256
+#define NDB_MAXTHREADS 256
+/*
+ NDB_MAXTHREADS used to be just MAXTHREADS, which collides with a
+ #define from <sys/thread.h> on AIX (IBM compiler). We explicitly
+ #undef it here lest someone use it by habit and get really funny
+ results. K&R says we may #undef non-existent symbols, so let's go.
+*/
+#undef MAXTHREADS
#define MAXATTRSIZE 1000
#define PKSIZE 1
@@ -95,10 +102,10 @@ static int failed = 0 ; // lame global variable that keeps track of failed trans
// incremented in executeCallback() and reset in main()
/************************************************************* < epaulsa */
-static NdbThread* threadLife[MAXTHREADS];
+static NdbThread* threadLife[NDB_MAXTHREADS];
static int tNodeId;
-static int ThreadReady[MAXTHREADS];
-static StartType ThreadStart[MAXTHREADS];
+static int ThreadReady[NDB_MAXTHREADS];
+static StartType ThreadStart[NDB_MAXTHREADS];
static char tableName[MAXTABLES][MAXSTRLEN+1];
static char attrName[MAXATTR][MAXSTRLEN+1];
static int *getAttrValueTable;
@@ -174,7 +181,7 @@ void deleteAttributeSpace(){
NDB_COMMAND(flexTimedAsynch, "flexTimedAsynch", "flexTimedAsynch [-tpoilcas]", "flexTimedAsynch", 65535)
{
ndb_init();
- ThreadNdb tabThread[MAXTHREADS];
+ ThreadNdb tabThread[NDB_MAXTHREADS];
int tLoops=0;
int returnValue;
//NdbOut flexTimedAsynchNdbOut;
@@ -615,8 +622,8 @@ void readArguments(int argc, const char** argv)
if (strcmp(argv[i], "-t") == 0)
{
tNoOfThreads = atoi(argv[i+1]);
- // if ((tNoOfThreads < 1) || (tNoOfThreads > MAXTHREADS))
- if ((tNoOfThreads < 1) || (tNoOfThreads > MAXTHREADS))
+ // if ((tNoOfThreads < 1) || (tNoOfThreads > NDB_MAXTHREADS))
+ if ((tNoOfThreads < 1) || (tNoOfThreads > NDB_MAXTHREADS))
exit(-1);
}
else if (strcmp(argv[i], "-i") == 0)
@@ -628,7 +635,7 @@ void readArguments(int argc, const char** argv)
else if (strcmp(argv[i], "-p") == 0)
{
tNoOfTransInBatch = atoi(argv[i+1]);
- //if ((tNoOfTransInBatch < 1) || (tNoOfTransInBatch > MAXTHREADS))
+ //if ((tNoOfTransInBatch < 1) || (tNoOfTransInBatch > NDB_MAXTHREADS))
if ((tNoOfTransInBatch < 1) || (tNoOfTransInBatch > 10000))
exit(-1);
}
diff --git a/ndb/test/ndbapi/initronja.cpp b/ndb/test/ndbapi/initronja.cpp
index 63bbc374c62..f48b1c86da3 100644
--- a/ndb/test/ndbapi/initronja.cpp
+++ b/ndb/test/ndbapi/initronja.cpp
@@ -29,7 +29,14 @@
#define MAXSTRLEN 16
#define MAXATTR 64
#define MAXTABLES 64
-#define MAXTHREADS 256
+#define NDB_MAXTHREADS 256
+/*
+ NDB_MAXTHREADS used to be just MAXTHREADS, which collides with a
+ #define from <sys/thread.h> on AIX (IBM compiler). We explicitly
+ #undef it here lest someone use it by habit and get really funny
+ results. K&R says we may #undef non-existent symbols, so let's go.
+*/
+#undef MAXTHREADS
#define MAXATTRSIZE 8000
static unsigned int tNoOfRecords;
diff --git a/ndb/test/ndbapi/testBitfield.cpp b/ndb/test/ndbapi/testBitfield.cpp
index 6c958da693d..40a7a9d4557 100644
--- a/ndb/test/ndbapi/testBitfield.cpp
+++ b/ndb/test/ndbapi/testBitfield.cpp
@@ -10,6 +10,15 @@
static const char* _dbname = "TEST_DB";
static int g_loops = 7;
+
+NDB_STD_OPTS_VARS;
+
+static struct my_option my_long_options[] =
+{
+ NDB_STD_OPTS("ndb_desc"),
+ { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
+};
+
static void usage()
{
ndb_std_print_version();
@@ -39,8 +48,9 @@ main(int argc, char** argv){
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
- argc--;
- argv++;
+ if ((ho_error=handle_options(&argc, &argv, my_long_options,
+ ndb_std_get_one_option)))
+ return NDBT_ProgramExit(NDBT_WRONGARGS);
int res = NDBT_FAILED;
diff --git a/ndb/test/ndbapi/testBlobs.cpp b/ndb/test/ndbapi/testBlobs.cpp
index 81072f6a12a..6b08b7d686e 100644
--- a/ndb/test/ndbapi/testBlobs.cpp
+++ b/ndb/test/ndbapi/testBlobs.cpp
@@ -123,23 +123,25 @@ printusage()
<< "metadata" << endl
<< " -pk2len N length of PK2 [" << d.m_pk2len << "/" << g_max_pk2len <<"]" << endl
<< " -oneblob only 1 blob attribute [default 2]" << endl
- << "testcases for test/skip" << endl
+ << "test cases for test/skip" << endl
<< " k primary key ops" << endl
<< " i hash index ops" << endl
<< " s table scans" << endl
<< " r ordered index scans" << endl
<< " p performance test" << endl
- << "additional flags for test/skip" << endl
+ << "operations for test/skip" << endl
<< " u update existing blob value" << endl
<< " n normal insert and update" << endl
<< " w insert and update using writeTuple" << endl
+ << "blob operation styles for test/skip" << endl
<< " 0 getValue / setValue" << endl
<< " 1 setActiveHook" << endl
<< " 2 readData / writeData" << endl
- << "bug tests (no blob test)" << endl
+ << "example: -test kn0 (need all 3 parts)" << endl
+ << "bug tests" << endl
<< " -bug 4088 ndb api hang with mixed ops on index table" << endl
- << " -bug nnnn delete + write gives 626" << endl
- << " -bug nnnn acc crash on delete and long key" << endl
+ << " -bug 27018 middle partial part write clobbers rest of part" << endl
+ << " -bug 27370 Potential inconsistent blob reads for ReadCommitted reads" << endl
;
}
@@ -1027,6 +1029,32 @@ deletePk()
return 0;
}
+static int
+deleteNoPk()
+{
+ DBG("--- deleteNoPk ---");
+ Tup no_tup; // bug#24028
+ no_tup.m_pk1 = 0xb1ffb1ff;
+ sprintf(no_tup.m_pk2, "%-*.*s", g_opt.m_pk2len, g_opt.m_pk2len, "b1ffb1ff");
+ CHK((g_con = g_ndb->startTransaction()) != 0);
+ Tup& tup = no_tup;
+ DBG("deletePk pk1=" << hex << tup.m_pk1);
+ CHK((g_opr = g_con->getNdbOperation(g_opt.m_tname)) != 0);
+ CHK(g_opr->deleteTuple() == 0);
+ CHK(g_opr->equal("PK1", tup.m_pk1) == 0);
+ if (g_opt.m_pk2len != 0)
+ CHK(g_opr->equal("PK2", tup.m_pk2) == 0);
+ CHK(g_con->execute(Commit) == -1); // fail
+ // BUG: error should be on op but is on con now
+ DBG("con: " << g_con->getNdbError());
+ DBG("opr: " << g_opr->getNdbError());
+ CHK(g_con->getNdbError().code == 626 || g_opr->getNdbError().code == 626);
+ g_ndb->closeTransaction(g_con);
+ g_opr = 0;
+ g_con = 0;
+ return 0;
+}
+
// hash index ops
static int
@@ -1382,6 +1410,7 @@ testmain()
CHK(readPk(style) == 0);
}
CHK(deletePk() == 0);
+ CHK(deleteNoPk() == 0);
CHK(verifyBlob() == 0);
}
if (testcase('w')) {
@@ -1396,6 +1425,7 @@ testmain()
CHK(readPk(style) == 0);
}
CHK(deletePk() == 0);
+ CHK(deleteNoPk() == 0);
CHK(verifyBlob() == 0);
}
}
@@ -1807,14 +1837,249 @@ bugtest_4088()
}
static int
-bugtest_2222()
+bugtest_27018()
{
+ DBG("bug test 27018 - middle partial part write clobbers rest of part");
+
+ // insert rows
+ calcTups(false);
+ CHK(insertPk(false) == 0);
+ // new trans
+ for (unsigned k= 0; k < g_opt.m_rows; k++)
+ {
+ Tup& tup= g_tups[k];
+
+ CHK((g_con= g_ndb->startTransaction()) != 0);
+ CHK((g_opr= g_con->getNdbOperation(g_opt.m_tname)) != 0);
+ CHK(g_opr->updateTuple() == 0);
+ CHK(g_opr->equal("PK1", tup.m_pk1) == 0);
+ if (g_opt.m_pk2len != 0)
+ CHK(g_opr->equal("PK2", tup.m_pk2) == 0);
+ CHK(getBlobHandles(g_opr) == 0);
+ CHK(g_con->execute(NoCommit) == 0);
+
+ /* Update one byte in random position. */
+ Uint32 offset= urandom(tup.m_blob1.m_len);
+ tup.m_blob1.m_buf[0]= 0xff ^ tup.m_blob1.m_val[offset];
+ CHK(g_bh1->setPos(offset) == 0);
+ CHK(g_bh1->writeData(&(tup.m_blob1.m_buf[0]), 1) == 0);
+ CHK(g_con->execute(Commit) == 0);
+ g_ndb->closeTransaction(g_con);
+
+ CHK((g_con= g_ndb->startTransaction()) != 0);
+ CHK((g_opr= g_con->getNdbOperation(g_opt.m_tname)) != 0);
+ CHK(g_opr->readTuple() == 0);
+ CHK(g_opr->equal("PK1", tup.m_pk1) == 0);
+ if (g_opt.m_pk2len != 0)
+ CHK(g_opr->equal("PK2", tup.m_pk2) == 0);
+ CHK(getBlobHandles(g_opr) == 0);
+
+ CHK(g_bh1->getValue(tup.m_blob1.m_buf, tup.m_blob1.m_len) == 0);
+ CHK(g_con->execute(Commit) == 0);
+ Uint64 len= ~0;
+ CHK(g_bh1->getLength(len) == 0 && len == tup.m_blob1.m_len);
+ tup.m_blob1.m_buf[offset]^= 0xff;
+ CHK(memcmp(tup.m_blob1.m_buf, tup.m_blob1.m_val, tup.m_blob1.m_len) == 0);
+ g_ndb->closeTransaction(g_con);
+ }
+
return 0;
}
+
+struct bug27370_data {
+ Ndb *m_ndb;
+ char m_current_write_value;
+ char *m_writebuf;
+ Uint32 m_blob1_size;
+ Uint32 m_pk1;
+ char m_pk2[g_max_pk2len + 1];
+ bool m_thread_stop;
+};
+
+void *bugtest_27370_thread(void *arg)
+{
+ bug27370_data *data= (bug27370_data *)arg;
+
+ while (!data->m_thread_stop)
+ {
+ memset(data->m_writebuf, data->m_current_write_value, data->m_blob1_size);
+ data->m_current_write_value++;
+
+ NdbConnection *con;
+ if ((con= data->m_ndb->startTransaction()) == 0)
+ return (void *)"Failed to create transaction";
+ NdbOperation *opr;
+ if ((opr= con->getNdbOperation(g_opt.m_tname)) == 0)
+ return (void *)"Failed to create operation";
+ if (opr->writeTuple() != 0)
+ return (void *)"writeTuple() failed";
+ if (opr->equal("PK1", data->m_pk1) != 0)
+ return (void *)"equal(PK1) failed";
+ if (g_opt.m_pk2len != 0)
+ if (opr->equal("PK2", data->m_pk2) != 0)
+ return (void *)"equal(PK2) failed";
+ NdbBlob *bh;
+ if ((bh= opr->getBlobHandle("BL1")) == 0)
+ return (void *)"getBlobHandle() failed";
+ if (bh->setValue(data->m_writebuf, data->m_blob1_size) != 0)
+ return (void *)"setValue() failed";
+ if (con->execute(Commit, AbortOnError, 1) != 0)
+ return (void *)"execute() failed";
+ data->m_ndb->closeTransaction(con);
+ }
+
+ return NULL; // Success
+}
+
static int
-bugtest_3333()
+bugtest_27370()
{
+ DBG("bug test 27370 - Potential inconsistent blob reads for ReadCommitted reads");
+
+ bug27370_data data;
+
+ data.m_ndb= new Ndb(g_ncc, "TEST_DB");
+ CHK(data.m_ndb->init(20) == 0);
+ CHK(data.m_ndb->waitUntilReady() == 0);
+
+ data.m_current_write_value= 0;
+ data.m_blob1_size= g_opt.m_blob1.m_inline + 10 * g_opt.m_blob1.m_partsize;
+ CHK((data.m_writebuf= new char [data.m_blob1_size]) != 0);
+ data.m_pk1= 27370;
+ memset(data.m_pk2, 'x', g_max_pk2len);
+ data.m_pk2[g_max_pk2len]= '\0';
+ data.m_thread_stop= false;
+
+ memset(data.m_writebuf, data.m_current_write_value, data.m_blob1_size);
+ data.m_current_write_value++;
+
+ CHK((g_con= g_ndb->startTransaction()) != 0);
+ CHK((g_opr= g_con->getNdbOperation(g_opt.m_tname)) != 0);
+ CHK(g_opr->writeTuple() == 0);
+ CHK(g_opr->equal("PK1", data.m_pk1) == 0);
+ if (g_opt.m_pk2len != 0)
+ CHK(g_opr->equal("PK2", data.m_pk2) == 0);
+ CHK((g_bh1= g_opr->getBlobHandle("BL1")) != 0);
+ CHK(g_bh1->setValue(data.m_writebuf, data.m_blob1_size) == 0);
+ CHK(g_con->execute(Commit) == 0);
+ g_ndb->closeTransaction(g_con);
+ g_con= NULL;
+
+ pthread_t thread_handle;
+ CHK(pthread_create(&thread_handle, NULL, bugtest_27370_thread, &data) == 0);
+
+ DBG("bug test 27370 - PK blob reads");
+ Uint32 seen_updates= 0;
+ while (seen_updates < 50)
+ {
+ CHK((g_con= g_ndb->startTransaction()) != 0);
+ CHK((g_opr= g_con->getNdbOperation(g_opt.m_tname)) != 0);
+ CHK(g_opr->readTuple(NdbOperation::LM_CommittedRead) == 0);
+ CHK(g_opr->equal("PK1", data.m_pk1) == 0);
+ if (g_opt.m_pk2len != 0)
+ CHK(g_opr->equal("PK2", data.m_pk2) == 0);
+ CHK((g_bh1= g_opr->getBlobHandle("BL1")) != 0);
+ CHK(g_con->execute(NoCommit, AbortOnError, 1) == 0);
+
+ const Uint32 loop_max= 10;
+ char read_char;
+ char original_read_char= 0;
+ Uint32 readloop;
+ for (readloop= 0;; readloop++)
+ {
+ if (readloop > 0)
+ {
+ if (readloop > 1)
+ {
+ /* Compare against first read. */
+ CHK(read_char == original_read_char);
+ }
+ else
+ {
+ /*
+ We count the number of times we see the other thread had the
+ chance to update, so that we can be sure it had the opportunity
+ to run a reasonable number of times before we stop.
+ */
+ if (original_read_char != read_char)
+ seen_updates++;
+ original_read_char= read_char;
+ }
+ }
+ if (readloop > loop_max)
+ break;
+ Uint32 readSize= 1;
+ CHK(g_bh1->setPos(urandom(data.m_blob1_size)) == 0);
+ CHK(g_bh1->readData(&read_char, readSize) == 0);
+ CHK(readSize == 1);
+ ExecType commitType= readloop == loop_max ? Commit : NoCommit;
+ CHK(g_con->execute(commitType, AbortOnError, 1) == 0);
+ }
+ g_ndb->closeTransaction(g_con);
+ g_con= NULL;
+ }
+
+ DBG("bug test 27370 - table scan blob reads");
+ seen_updates= 0;
+ while (seen_updates < 50)
+ {
+ CHK((g_con= g_ndb->startTransaction()) != 0);
+ CHK((g_ops= g_con->getNdbScanOperation(g_opt.m_tname)) != 0);
+ CHK(g_ops->readTuples(NdbOperation::LM_CommittedRead) == 0);
+ CHK((g_bh1= g_ops->getBlobHandle("BL1")) != 0);
+ CHK(g_con->execute(NoCommit, AbortOnError, 1) == 0);
+ CHK(g_ops->nextResult(true) == 0);
+
+ const Uint32 loop_max= 10;
+ char read_char;
+ char original_read_char= 0;
+ Uint32 readloop;
+ for (readloop= 0;; readloop++)
+ {
+ if (readloop > 0)
+ {
+ if (readloop > 1)
+ {
+ /* Compare against first read. */
+ CHK(read_char == original_read_char);
+ }
+ else
+ {
+ /*
+ We count the number of times we see the other thread had the
+ chance to update, so that we can be sure it had the opportunity
+ to run a reasonable number of times before we stop.
+ */
+ if (original_read_char != read_char)
+ seen_updates++;
+ original_read_char= read_char;
+ }
+ }
+ if (readloop > loop_max)
+ break;
+ Uint32 readSize= 1;
+ CHK(g_bh1->setPos(urandom(data.m_blob1_size)) == 0);
+ CHK(g_bh1->readData(&read_char, readSize) == 0);
+ CHK(readSize == 1);
+ CHK(g_con->execute(NoCommit, AbortOnError, 1) == 0);
+ }
+
+ CHK(g_ops->nextResult(true) == 1);
+ g_ndb->closeTransaction(g_con);
+ g_con= NULL;
+ }
+
+ data.m_thread_stop= true;
+ void *thread_return;
+ CHK(pthread_join(thread_handle, &thread_return) == 0);
+ DBG("bug 27370 - thread return status: " <<
+ (thread_return ? (char *)thread_return : "<null>"));
+ CHK(thread_return == 0);
+
+ g_con= NULL;
+ g_opr= NULL;
+ g_bh1= NULL;
return 0;
}
@@ -1822,7 +2087,9 @@ static struct {
int m_bug;
int (*m_test)();
} g_bugtest[] = {
- { 4088, bugtest_4088 }
+ { 4088, bugtest_4088 },
+ { 27018, bugtest_27018 },
+ { 27370, bugtest_27370 }
};
NDB_COMMAND(testOdbcDriver, "testBlobs", "testBlobs", "testBlobs", 65535)
diff --git a/ndb/test/ndbapi/testIndex.cpp b/ndb/test/ndbapi/testIndex.cpp
index 78672cd519f..f715db1ef8c 100644
--- a/ndb/test/ndbapi/testIndex.cpp
+++ b/ndb/test/ndbapi/testIndex.cpp
@@ -1297,6 +1297,102 @@ runBug25059(NDBT_Context* ctx, NDBT_Step* step)
return res;
}
+int tcSaveINDX_test(NDBT_Context* ctx, NDBT_Step* step, int inject_err)
+{
+ int result= NDBT_OK;
+ Ndb* pNdb = GETNDB(step);
+ NdbDictionary::Dictionary * dict = pNdb->getDictionary();
+ const NdbDictionary::Index * idx = dict->getIndex(pkIdxName, *ctx->getTab());
+
+ HugoOperations ops(*ctx->getTab(), idx);
+
+ g_err << "Using INDEX: " << pkIdxName << endl;
+
+ NdbRestarter restarter;
+
+ int loops = ctx->getNumLoops();
+ const int rows = ctx->getNumRecords();
+ const int batchsize = ctx->getProperty("BatchSize", 1);
+
+ for(int bs=1; bs < loops; bs++)
+ {
+ int c= 0;
+ while (c++ < loops)
+ {
+ g_err << "BS " << bs << " LOOP #" << c << endl;
+
+ g_err << "inserting error on op#" << c << endl;
+
+ CHECK(ops.startTransaction(pNdb) == 0);
+ for(int i=1;i<=c;i++)
+ {
+ if(i==c)
+ {
+ if(restarter.insertErrorInAllNodes(inject_err)!=0)
+ {
+ g_err << "**** FAILED to insert error" << endl;
+ result= NDBT_FAILED;
+ break;
+ }
+ }
+ CHECK(ops.indexReadRecords(pNdb, pkIdxName, i,false,1) == 0);
+ if(i%bs==0 || i==c)
+ {
+ if(i<c)
+ {
+ if(ops.execute_NoCommit(pNdb, AO_IgnoreError)!=NDBT_OK)
+ {
+ g_err << "**** executeNoCommit should have succeeded" << endl;
+ result= NDBT_FAILED;
+ }
+ }
+ else
+ {
+ if(ops.execute_NoCommit(pNdb, AO_IgnoreError)!=289)
+ {
+ g_err << "**** executeNoCommit should have failed with 289"
+ << endl;
+ result= NDBT_FAILED;
+ }
+ g_err << "NdbError.code= " <<
+ ops.getTransaction()->getNdbError().code << endl;
+ break;
+ }
+ }
+ }
+
+ CHECK(ops.closeTransaction(pNdb) == 0);
+
+ if(restarter.insertErrorInAllNodes(0) != 0)
+ {
+ g_err << "**** Failed to error insert(0)" << endl;
+ return NDBT_FAILED;
+ }
+
+ CHECK(ops.startTransaction(pNdb) == 0);
+ if (ops.indexReadRecords(pNdb, pkIdxName,0,0,rows) != 0){
+ g_err << "**** Index read failed" << endl;
+ return NDBT_FAILED;
+ }
+ CHECK(ops.closeTransaction(pNdb) == 0);
+ }
+ }
+
+ return result;
+}
+
+int
+runBug28804(NDBT_Context* ctx, NDBT_Step* step)
+{
+ return tcSaveINDX_test(ctx, step, 8039);
+}
+
+int
+runBug28804_ATTRINFO(NDBT_Context* ctx, NDBT_Step* step)
+{
+ return tcSaveINDX_test(ctx, step, 8051);
+}
+
NDBT_TESTSUITE(testIndex);
TESTCASE("CreateAll",
"Test that we can create all various indexes on each table\n"
@@ -1628,6 +1724,27 @@ TESTCASE("Bug25059",
STEP(runBug25059);
FINALIZER(createPkIndex_Drop);
}
+TESTCASE("Bug28804",
+ "Test behaviour on out of TransactionBufferMemory for index lookup"){
+ TC_PROPERTY("LoggedIndexes", (unsigned)0);
+ INITIALIZER(runClearTable);
+ INITIALIZER(createPkIndex);
+ INITIALIZER(runLoadTable);
+ STEP(runBug28804);
+ FINALIZER(createPkIndex_Drop);
+ FINALIZER(runClearTable);
+}
+TESTCASE("Bug28804_ATTRINFO",
+ "Test behaviour on out of TransactionBufferMemory for index lookup"
+ " in saveINDXATTRINFO"){
+ TC_PROPERTY("LoggedIndexes", (unsigned)0);
+ INITIALIZER(runClearTable);
+ INITIALIZER(createPkIndex);
+ INITIALIZER(runLoadTable);
+ STEP(runBug28804_ATTRINFO);
+ FINALIZER(createPkIndex_Drop);
+ FINALIZER(runClearTable);
+}
NDBT_TESTSUITE_END(testIndex);
int main(int argc, const char** argv){
diff --git a/ndb/test/ndbapi/testNdbApi.cpp b/ndb/test/ndbapi/testNdbApi.cpp
index ad16c472229..5307a5bcd1c 100644
--- a/ndb/test/ndbapi/testNdbApi.cpp
+++ b/ndb/test/ndbapi/testNdbApi.cpp
@@ -1430,6 +1430,10 @@ TESTCASE("ExecuteAsynch",
"Check that executeAsync() works (BUG#27495)\n"){
INITIALIZER(runTestExecuteAsynch);
}
+TESTCASE("Bug28443",
+ ""){
+ INITIALIZER(runBug28443);
+}
NDBT_TESTSUITE_END(testNdbApi);
int main(int argc, const char** argv){
diff --git a/ndb/test/ndbapi/testNodeRestart.cpp b/ndb/test/ndbapi/testNodeRestart.cpp
index e5ced961b6f..12b0187b71f 100644
--- a/ndb/test/ndbapi/testNodeRestart.cpp
+++ b/ndb/test/ndbapi/testNodeRestart.cpp
@@ -943,12 +943,62 @@ int runBug24717(NDBT_Context* ctx, NDBT_Step* step){
restarter.startNodes(&nodeId, 1);
- for (Uint32 i = 0; i < 100; i++)
- {
- hugoTrans.pkReadRecords(pNdb, 100, 1, NdbOperation::LM_CommittedRead);
- }
-
+ do {
+ for (Uint32 i = 0; i < 100; i++)
+ {
+ hugoTrans.pkReadRecords(pNdb, 100, 1, NdbOperation::LM_CommittedRead);
+ }
+ } while (restarter.waitClusterStarted(5) != 0);
+ }
+
+ return NDBT_OK;
+}
+
+int
+runBug29364(NDBT_Context* ctx, NDBT_Step* step){
+ int result = NDBT_OK;
+ int loops = ctx->getNumLoops();
+ int records = ctx->getNumRecords();
+ NdbRestarter restarter;
+ Ndb* pNdb = GETNDB(step);
+
+ HugoTransactions hugoTrans(*ctx->getTab());
+
+ if (restarter.getNumDbNodes() < 4)
+ return NDBT_OK;
+
+ int dump0[] = { 9000, 0 } ;
+ int dump1[] = { 9001, 0 } ;
+ Uint32 ownNode = refToNode(pNdb->getReference());
+ dump0[1] = ownNode;
+
+ for (; loops; loops --)
+ {
+ int node0 = restarter.getDbNodeId(rand() % restarter.getNumDbNodes());
+ int node1 = restarter.getRandomNodeOtherNodeGroup(node0, rand());
+
+ restarter.restartOneDbNode(node0, false, true, true);
+ restarter.waitNodesNoStart(&node0, 1);
+ restarter.startNodes(&node0, 1);
restarter.waitClusterStarted();
+
+ restarter.restartOneDbNode(node1, false, true, true);
+ restarter.waitNodesNoStart(&node1, 1);
+ if (restarter.dumpStateOneNode(node1, dump0, 2))
+ return NDBT_FAILED;
+
+ restarter.startNodes(&node1, 1);
+
+ do {
+
+ for (Uint32 i = 0; i < 100; i++)
+ {
+ hugoTrans.pkReadRecords(pNdb, 100, 1, NdbOperation::LM_CommittedRead);
+ }
+ } while (restarter.waitClusterStarted(5) != 0);
+
+ if (restarter.dumpStateOneNode(node1, dump1, 1))
+ return NDBT_FAILED;
}
return NDBT_OK;
@@ -1044,6 +1094,180 @@ int runBug25554(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_OK;
}
+int
+runBug26457(NDBT_Context* ctx, NDBT_Step* step)
+{
+ NdbRestarter res;
+ if (res.getNumDbNodes() < 4)
+ return NDBT_OK;
+
+ int loops = ctx->getNumLoops();
+ while (loops --)
+ {
+retry:
+ int master = res.getMasterNodeId();
+ int next = res.getNextMasterNodeId(master);
+
+ ndbout_c("master: %d next: %d", master, next);
+
+ if (res.getNodeGroup(master) == res.getNodeGroup(next))
+ {
+ res.restartOneDbNode(next, false, false, true);
+ if (res.waitClusterStarted())
+ return NDBT_FAILED;
+ goto retry;
+ }
+
+ int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 2 };
+
+ if (res.dumpStateOneNode(next, val2, 2))
+ return NDBT_FAILED;
+
+ if (res.insertErrorInNode(next, 7180))
+ return NDBT_FAILED;
+
+ res.restartOneDbNode(master, false, false, true);
+ if (res.waitClusterStarted())
+ return NDBT_FAILED;
+ }
+
+ return NDBT_OK;
+}
+
+int
+runBug26481(NDBT_Context* ctx, NDBT_Step* step)
+{
+
+ int result = NDBT_OK;
+ int loops = ctx->getNumLoops();
+ int records = ctx->getNumRecords();
+ NdbRestarter res;
+
+ int node = res.getRandomNotMasterNodeId(rand());
+ ndbout_c("node: %d", node);
+ if (res.restartOneDbNode(node, true, true, true))
+ return NDBT_FAILED;
+
+ if (res.waitNodesNoStart(&node, 1))
+ return NDBT_FAILED;
+
+ int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
+ if (res.dumpStateOneNode(node, val2, 2))
+ return NDBT_FAILED;
+
+ if (res.insertErrorInNode(node, 7018))
+ return NDBT_FAILED;
+
+ if (res.startNodes(&node, 1))
+ return NDBT_FAILED;
+
+ res.waitNodesStartPhase(&node, 1, 3);
+
+ if (res.waitNodesNoStart(&node, 1))
+ return NDBT_FAILED;
+
+ res.startNodes(&node, 1);
+
+ if (res.waitClusterStarted())
+ return NDBT_FAILED;
+
+ return NDBT_OK;
+}
+
+int
+runBug27003(NDBT_Context* ctx, NDBT_Step* step)
+{
+ int result = NDBT_OK;
+ int loops = ctx->getNumLoops();
+ int records = ctx->getNumRecords();
+ NdbRestarter res;
+
+ static const int errnos[] = { 4025, 4026, 4027, 4028, 0 };
+
+ int node = res.getRandomNotMasterNodeId(rand());
+ ndbout_c("node: %d", node);
+ if (res.restartOneDbNode(node, false, true, true))
+ return NDBT_FAILED;
+
+ Uint32 pos = 0;
+ for (Uint32 i = 0; i<loops; i++)
+ {
+ while (errnos[pos] != 0)
+ {
+ ndbout_c("Tesing err: %d", errnos[pos]);
+
+ if (res.waitNodesNoStart(&node, 1))
+ return NDBT_FAILED;
+
+ if (res.insertErrorInNode(node, 1000))
+ return NDBT_FAILED;
+
+ if (res.insertErrorInNode(node, errnos[pos]))
+ return NDBT_FAILED;
+
+ int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
+ if (res.dumpStateOneNode(node, val2, 2))
+ return NDBT_FAILED;
+
+ res.startNodes(&node, 1);
+ NdbSleep_SecSleep(3);
+ pos++;
+ }
+ pos = 0;
+ }
+
+ if (res.waitNodesNoStart(&node, 1))
+ return NDBT_FAILED;
+
+ res.startNodes(&node, 1);
+ if (res.waitClusterStarted())
+ return NDBT_FAILED;
+
+ return NDBT_OK;
+}
+
+
+int
+runBug27283(NDBT_Context* ctx, NDBT_Step* step)
+{
+ int result = NDBT_OK;
+ int loops = ctx->getNumLoops();
+ int records = ctx->getNumRecords();
+ NdbRestarter res;
+
+ if (res.getNumDbNodes() < 2)
+ {
+ return NDBT_OK;
+ }
+
+ static const int errnos[] = { 7181, 7182, 0 };
+
+ Uint32 pos = 0;
+ for (Uint32 i = 0; i<loops; i++)
+ {
+ while (errnos[pos] != 0)
+ {
+ int master = res.getMasterNodeId();
+ int next = res.getNextMasterNodeId(master);
+ int next2 = res.getNextMasterNodeId(next);
+
+ int node = (i & 1) ? next : next2;
+ ndbout_c("Tesing err: %d", errnos[pos]);
+ if (res.insertErrorInNode(next, errnos[pos]))
+ return NDBT_FAILED;
+
+ NdbSleep_SecSleep(3);
+
+ if (res.waitClusterStarted())
+ return NDBT_FAILED;
+
+ pos++;
+ }
+ pos = 0;
+ }
+
+ return NDBT_OK;
+}
int
runBug28717(NDBT_Context* ctx, NDBT_Step* step)
@@ -1123,6 +1347,51 @@ runBug28717(NDBT_Context* ctx, NDBT_Step* step)
return NDBT_OK;
}
+int
+runBug32160(NDBT_Context* ctx, NDBT_Step* step)
+{
+ int result = NDBT_OK;
+ int loops = ctx->getNumLoops();
+ int records = ctx->getNumRecords();
+ Ndb* pNdb = GETNDB(step);
+ NdbRestarter res;
+
+ if (res.getNumDbNodes() < 2)
+ {
+ return NDBT_OK;
+ }
+
+ int master = res.getMasterNodeId();
+ int next = res.getNextMasterNodeId(master);
+
+ if (res.insertErrorInNode(next, 7194))
+ {
+ return NDBT_FAILED;
+ }
+
+ int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
+ if (res.dumpStateOneNode(master, val2, 2))
+ return NDBT_FAILED;
+
+ if (res.insertErrorInNode(master, 7193))
+ return NDBT_FAILED;
+
+ int val3[] = { 7099 };
+ if (res.dumpStateOneNode(master, val3, 1))
+ return NDBT_FAILED;
+
+ if (res.waitNodesNoStart(&master, 1))
+ return NDBT_FAILED;
+
+ if (res.startNodes(&master, 1))
+ return NDBT_FAILED;
+
+ if (res.waitClusterStarted())
+ return NDBT_FAILED;
+
+ return NDBT_OK;
+}
+
NDBT_TESTSUITE(testNodeRestart);
TESTCASE("NoLoad",
"Test that one node at a time can be stopped and then restarted "\
@@ -1444,9 +1713,27 @@ TESTCASE("Bug25364", ""){
TESTCASE("Bug25554", ""){
INITIALIZER(runBug25554);
}
+TESTCASE("Bug26457", ""){
+ INITIALIZER(runBug26457);
+}
+TESTCASE("Bug26481", ""){
+ INITIALIZER(runBug26481);
+}
+TESTCASE("Bug27003", ""){
+ INITIALIZER(runBug27003);
+}
+TESTCASE("Bug27283", ""){
+ INITIALIZER(runBug27283);
+}
TESTCASE("Bug28717", ""){
INITIALIZER(runBug28717);
}
+TESTCASE("Bug29364", ""){
+ INITIALIZER(runBug29364);
+}
+TESTCASE("Bug32160", ""){
+ INITIALIZER(runBug32160);
+}
NDBT_TESTSUITE_END(testNodeRestart);
int main(int argc, const char** argv){
diff --git a/ndb/test/ndbapi/testOperations.cpp b/ndb/test/ndbapi/testOperations.cpp
index 1f610cade4a..21151ab5c7f 100644
--- a/ndb/test/ndbapi/testOperations.cpp
+++ b/ndb/test/ndbapi/testOperations.cpp
@@ -98,11 +98,6 @@ OperationTestCase matrix[] = {
break; }
#define C3(b) if (!(b)) { \
- g_err << "ERR: "<< step->getName() \
- << " failed on line " << __LINE__ << endl; \
- abort(); return NDBT_FAILED; }
-
-#define C3(b) if (!(b)) { \
g_err << "ERR: failed on line " << __LINE__ << endl; \
return NDBT_FAILED; }
diff --git a/ndb/test/ndbapi/testScanFilter.cpp b/ndb/test/ndbapi/testScanFilter.cpp
new file mode 100644
index 00000000000..48a7027eb66
--- /dev/null
+++ b/ndb/test/ndbapi/testScanFilter.cpp
@@ -0,0 +1,860 @@
+/* Copyright (C) 2007 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <NDBT.hpp>
+#include <NDBT_Test.hpp>
+
+#define ERR_EXIT(obj, msg) \
+do \
+{ \
+fprintf(stderr, "%s: %s (%d) in %s:%d\n", \
+msg, obj->getNdbError().message, obj->getNdbError().code, __FILE__, __LINE__); \
+exit(-1); \
+} \
+while (0);
+
+#define PRINT_ERROR(code,msg) \
+do \
+{ \
+fprintf(stderr, "Error in %s, line: %d, code: %d, msg: %s.\n", __FILE__, __LINE__, code, msg); \
+} \
+while (0);
+
+#define MYSQLERROR(mysql) { \
+ PRINT_ERROR(mysql_errno(&mysql),mysql_error(&mysql)); \
+ exit(-1); }
+#define APIERROR(error) { \
+ PRINT_ERROR(error.code,error.message); \
+ exit(-1); }
+
+#define TEST_NAME "TestScanFilter"
+#define TABLE_NAME "TABLE_SCAN"
+
+const char *COL_NAME[] = {"id", "i", "j", "k", "l", "m", "n"};
+const char COL_LEN = 7;
+/*
+* Not to change TUPLE_NUM, because the column in TABLE_NAME is fixed,
+* there are six columns, 'i', 'j', 'k', 'l', 'm', 'n', and each on is equal to 1 or 1,
+* Since each tuple should be unique in this case, then TUPLE_NUM = 2 power 6 = 64
+*/
+#ifdef _AIX
+/*
+ IBM xlC_r breaks on the initialization with pow():
+ "The expression must be an integral constant expression."
+*/
+const int TUPLE_NUM = 64;
+#else
+const int TUPLE_NUM = (int)pow(2, COL_LEN-1);
+#endif
+
+/*
+* the recursive level of random scan filter, can
+* modify this parameter more or less, range from
+* 1 to 100, larger num consumes more scan time
+*/
+const int RECURSIVE_LEVEL = 10;
+
+const int MAX_STR_LEN = (RECURSIVE_LEVEL * (COL_LEN+1) * 4);
+
+/*
+* Each time stands for one test, it will produce a random
+* filter string, and scan through ndb api and through
+* calculation with tuples' data, then compare the result,
+* if they are equal, this test passed, or failed.
+* Only all TEST_NUM times tests passed, we can believe
+* the suite of test cases are okay.
+* Change TEST_NUM to larger will need more time to test
+*/
+const int TEST_NUM = 5000;
+
+
+/* Table definition*/
+static
+const
+NDBT_Attribute MYTAB1Attribs[] = {
+ NDBT_Attribute("id", NdbDictionary::Column::Unsigned, 1, true),
+ NDBT_Attribute("i", NdbDictionary::Column::Unsigned),
+ NDBT_Attribute("j", NdbDictionary::Column::Unsigned),
+ NDBT_Attribute("k", NdbDictionary::Column::Unsigned),
+ NDBT_Attribute("l", NdbDictionary::Column::Unsigned),
+ NDBT_Attribute("m", NdbDictionary::Column::Unsigned),
+ NDBT_Attribute("n", NdbDictionary::Column::Unsigned),
+};
+static
+const
+NDBT_Table MYTAB1(TABLE_NAME, sizeof(MYTAB1Attribs)/sizeof(NDBT_Attribute), MYTAB1Attribs);
+
+
+int createTable(Ndb* pNdb, const NdbDictionary::Table* tab, bool _temp,
+ bool existsOk, NDBT_CreateTableHook f)
+{
+ int r = 0;
+ do{
+ NdbDictionary::Table tmpTab(* tab);
+ tmpTab.setStoredTable(_temp ? 0 : 1);
+ if(f != 0 && f(pNdb, tmpTab, 0))
+ {
+ ndbout << "Failed to create table" << endl;
+ return NDBT_FAILED;
+ }
+ r = pNdb->getDictionary()->createTable(tmpTab);
+ if(r == -1){
+ if(!existsOk){
+ ndbout << "Error: " << pNdb->getDictionary()->getNdbError() << endl;
+ break;
+ }
+ if(pNdb->getDictionary()->getNdbError().code != 721){
+ ndbout << "Error: " << pNdb->getDictionary()->getNdbError() << endl;
+ break;
+ }
+ r = 0;
+ }
+ }while(false);
+
+ return r;
+}
+
+/*
+* Function to produce the tuples' data
+*/
+int runPopulate(NDBT_Context* ctx, NDBT_Step* step)
+{
+ Ndb *myNdb = GETNDB(step);
+ const NdbDictionary::Dictionary* myDict= myNdb->getDictionary();
+ const NdbDictionary::Table *myTable= myDict->getTable(TABLE_NAME);
+ if(myTable == NULL)
+ APIERROR(myDict->getNdbError());
+
+ NdbTransaction* myTrans = myNdb->startTransaction();
+ if (myTrans == NULL)
+ APIERROR(myNdb->getNdbError());
+
+ for(int num = 0; num < TUPLE_NUM; num++)
+ {
+ NdbOperation* myNdbOperation = myTrans->getNdbOperation(myTable);
+ if(myNdbOperation == NULL)
+ {
+ APIERROR(myTrans->getNdbError());
+ }
+
+/* the tuples' data in TABLE_NAME
++----+---+---+---+---+---+---+
+| id | i | j | k | l | m | n |
++----+---+---+---+---+---+---+
+| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| 1 | 0 | 0 | 0 | 0 | 0 | 1 |
+| 2 | 0 | 0 | 0 | 0 | 1 | 0 |
+| 3 | 0 | 0 | 0 | 0 | 1 | 1 |
+| 4 | 0 | 0 | 0 | 1 | 0 | 0 |
+| 5 | 0 | 0 | 0 | 1 | 0 | 1 |
+| 6 | 0 | 0 | 0 | 1 | 1 | 0 |
+| 7 | 0 | 0 | 0 | 1 | 1 | 1 |
+| 8 | 0 | 0 | 1 | 0 | 0 | 0 |
+| 9 | 0 | 0 | 1 | 0 | 0 | 1 |
+| 10 | 0 | 0 | 1 | 0 | 1 | 0 |
+| 11 | 0 | 0 | 1 | 0 | 1 | 1 |
+| 12 | 0 | 0 | 1 | 1 | 0 | 0 |
+| 13 | 0 | 0 | 1 | 1 | 0 | 1 |
+| 14 | 0 | 0 | 1 | 1 | 1 | 0 |
+| 15 | 0 | 0 | 1 | 1 | 1 | 1 |
+| 16 | 0 | 1 | 0 | 0 | 0 | 0 |
+| 17 | 0 | 1 | 0 | 0 | 0 | 1 |
+| 18 | 0 | 1 | 0 | 0 | 1 | 0 |
+| 19 | 0 | 1 | 0 | 0 | 1 | 1 |
+| 20 | 0 | 1 | 0 | 1 | 0 | 0 |
+| 21 | 0 | 1 | 0 | 1 | 0 | 1 |
+| 22 | 0 | 1 | 0 | 1 | 1 | 0 |
+| 23 | 0 | 1 | 0 | 1 | 1 | 1 |
+| 24 | 0 | 1 | 1 | 0 | 0 | 0 |
+| 25 | 0 | 1 | 1 | 0 | 0 | 1 |
+| 26 | 0 | 1 | 1 | 0 | 1 | 0 |
+| 27 | 0 | 1 | 1 | 0 | 1 | 1 |
+| 28 | 0 | 1 | 1 | 1 | 0 | 0 |
+| 29 | 0 | 1 | 1 | 1 | 0 | 1 |
+| 30 | 0 | 1 | 1 | 1 | 1 | 0 |
+| 31 | 0 | 1 | 1 | 1 | 1 | 1 |
+| 32 | 1 | 0 | 0 | 0 | 0 | 0 |
+| 33 | 1 | 0 | 0 | 0 | 0 | 1 |
+| 34 | 1 | 0 | 0 | 0 | 1 | 0 |
+| 35 | 1 | 0 | 0 | 0 | 1 | 1 |
+| 36 | 1 | 0 | 0 | 1 | 0 | 0 |
+| 37 | 1 | 0 | 0 | 1 | 0 | 1 |
+| 38 | 1 | 0 | 0 | 1 | 1 | 0 |
+| 39 | 1 | 0 | 0 | 1 | 1 | 1 |
+| 40 | 1 | 0 | 1 | 0 | 0 | 0 |
+| 41 | 1 | 0 | 1 | 0 | 0 | 1 |
+| 42 | 1 | 0 | 1 | 0 | 1 | 0 |
+| 43 | 1 | 0 | 1 | 0 | 1 | 1 |
+| 44 | 1 | 0 | 1 | 1 | 0 | 0 |
+| 45 | 1 | 0 | 1 | 1 | 0 | 1 |
+| 46 | 1 | 0 | 1 | 1 | 1 | 0 |
+| 47 | 1 | 0 | 1 | 1 | 1 | 1 |
+| 48 | 1 | 1 | 0 | 0 | 0 | 0 |
+| 49 | 1 | 1 | 0 | 0 | 0 | 1 |
+| 50 | 1 | 1 | 0 | 0 | 1 | 0 |
+| 51 | 1 | 1 | 0 | 0 | 1 | 1 |
+| 52 | 1 | 1 | 0 | 1 | 0 | 0 |
+| 53 | 1 | 1 | 0 | 1 | 0 | 1 |
+| 54 | 1 | 1 | 0 | 1 | 1 | 0 |
+| 55 | 1 | 1 | 0 | 1 | 1 | 1 |
+| 56 | 1 | 1 | 1 | 0 | 0 | 0 |
+| 57 | 1 | 1 | 1 | 0 | 0 | 1 |
+| 58 | 1 | 1 | 1 | 0 | 1 | 0 |
+| 59 | 1 | 1 | 1 | 0 | 1 | 1 |
+| 60 | 1 | 1 | 1 | 1 | 0 | 0 |
+| 61 | 1 | 1 | 1 | 1 | 0 | 1 |
+| 62 | 1 | 1 | 1 | 1 | 1 | 0 |
+| 63 | 1 | 1 | 1 | 1 | 1 | 1 |
++----+---+---+---+---+---+---+
+*/
+ myNdbOperation->insertTuple();
+ myNdbOperation->equal(COL_NAME[0], num);
+ for(int col = 1; col < COL_LEN; col++)
+ {
+ myNdbOperation->setValue(COL_NAME[col], (num>>(COL_LEN-1-col))&1);
+ }
+ }
+
+ int check = myTrans->execute(NdbTransaction::Commit);
+
+ myTrans->close();
+
+ if (check == -1)
+ return NDBT_FAILED;
+ else
+ return NDBT_OK;
+
+}
+
+
+
+/*
+* a=AND, o=OR, A=NAND, O=NOR
+*/
+char op_string[] = "aoAO";
+/*
+* the six columns' name of test table
+*/
+char col_string[] = "ijklmn";
+const int op_len = strlen(op_string);
+const int col_len = strlen(col_string);
+
+/*
+* get a random op from "aoAO"
+*/
+int get_rand_op_ch(char *ch)
+{
+ static unsigned int num = 0;
+ if(++num == 0)
+ num = 1;
+ srand(num*time(NULL));
+ *ch = op_string[rand() % op_len];
+ return 1;
+}
+
+/*
+* get a random order form of "ijklmn" trough exchanging letter
+*/
+void change_col_order()
+{
+ int pos1,pos2;
+ char temp;
+ for (int i = 0; i < 10; i++) //exchange for 10 times
+ {
+ srand(time(NULL)/(i+1));
+ pos1 = rand() % col_len;
+ srand((i+1)*time(NULL));
+ pos2 = rand() % col_len;
+ if (pos1 == pos2)
+ continue;
+ temp = col_string[pos1];
+ col_string[pos1] = col_string[pos2];
+ col_string[pos2] = temp;
+ }
+}
+
+/*
+* get a random sub string of "ijklmn"
+*/
+int get_rand_col_str(char *str)
+{
+ int len;
+ static unsigned int num = 0;
+ if(++num == 0)
+ num = 1;
+ srand(num*time(NULL));
+ len = rand() % col_len + 1;
+ change_col_order();
+ snprintf(str, len+1, "%s", col_string); //len+1, including '\0'
+ return len;
+}
+
+/*
+* get a random string including operation and column
+* eg, Alnikx
+*/
+int get_rand_op_str(char *str)
+{
+ char temp[256];
+ int len1, len2, len;
+ len1 = get_rand_op_ch(temp);
+ len2 = get_rand_col_str(temp+len1);
+ len = len1 + len2;
+ temp[len] = 'x';
+ snprintf(str, len+1+1, "%s", temp); //len+1, including '\0'
+ return len+1;
+}
+
+/*
+* replace a letter of source string with a new string
+* e.g., source string: 'Aijkx', replace i with new string 'olmx'
+* then source string is changed to 'Aolmxjkx'
+* source: its format should be produced from get_rand_op_str()
+* pos: range from 1 to strlen(source)-2
+*/
+int replace_a_to_str(char *source, int pos, char *newstr)
+{
+ char temp[MAX_STR_LEN];
+ snprintf(temp, pos+1, "%s", source);
+ snprintf(temp+pos, strlen(newstr)+1, "%s", newstr);
+ snprintf(temp+pos+strlen(newstr), strlen(source)-pos, "%s", source+pos+1);
+ snprintf(source, strlen(temp)+1, "%s", temp);
+ return strlen(source);
+}
+
+/*
+* check whether the inputed char is an operation
+*/
+bool check_op(char ch)
+{
+ if( ch == 'a' || ch == 'A' || ch == 'o' || ch == 'O')
+ return true;
+ else
+ return false;
+}
+
+/*
+* check whether the inputed char is end flag
+*/
+bool check_end(char ch)
+{
+ return (ch == 'x');
+}
+
+/*
+* check whether the inputed char is end flag
+*/
+bool check_col(char ch)
+{
+ if( ch == 'i' || ch == 'j' || ch == 'k'
+ || ch == 'l' || ch == 'm' || ch == 'n' )
+ return true;
+ else
+ return false;
+}
+
+/*
+* To ensure we can get a random string with RECURSIVE_LEVEL,
+* we need a position where can replace a letter with a new string.
+*/
+int get_rand_replace_pos(char *str, int len)
+{
+ int pos_op = 0;
+ int pos_x = 0;
+ int pos_col = 0;
+ int span = 0;
+ static int num = 0;
+ char temp;
+
+ for(int i = 0; i < len; i++)
+ {
+ temp = str[i];
+ if(! check_end(temp))
+ {
+ if(check_op(temp))
+ pos_op = i;
+ }
+ else
+ {
+ pos_x = i;
+ break;
+ }
+ }
+
+ if(++num == 0)
+ num = 1;
+
+ span = pos_x - pos_op - 1;
+ if(span <= 1)
+ {
+ pos_col = pos_op + 1;
+ }
+ else
+ {
+ srand(num*time(NULL));
+ pos_col = pos_op + rand() % span + 1;
+ }
+ return pos_col;
+}
+
+/*
+* Check whether the given random string is valid
+* and applicable for this test case
+*/
+bool check_random_str(char *str)
+{
+ char *p;
+ int op_num = 0;
+ int end_num = 0;
+
+ for(p = str; *p; p++)
+ {
+ bool tmp1 = false, tmp2 = false;
+ if(tmp1 = check_op(*p))
+ op_num++;
+ if(tmp2 = check_end(*p))
+ end_num++;
+ if(!(tmp1 || tmp2 || check_col(*p))) //there are illegal letters
+ return false;
+ }
+
+ if(op_num != end_num) //begins are not equal to ends
+ return false;
+
+ return true;
+}
+
+/*
+* Get a random string with RECURSIVE_LEVEL
+*/
+void get_rand_op_str_compound(char *str)
+{
+ char small_str[256];
+ int pos;
+ int tmp;
+ int level;
+ static int num = 0;
+
+ if(++num == 0)
+ num = 1;
+
+ srand(num*time(NULL));
+ level = 1 + rand() % RECURSIVE_LEVEL;
+
+ get_rand_op_str(str);
+
+ for(int i = 0; i < level; i++)
+ {
+ get_rand_op_str(small_str);
+ tmp = strlen(small_str);
+ get_rand_op_str(small_str + tmp); //get two operations
+ pos = get_rand_replace_pos(str, strlen(str));
+ replace_a_to_str(str, pos, small_str);
+ }
+
+ //check the random string
+ if(!check_random_str(str))
+ {
+ fprintf(stderr, "Error random string! \n");
+ exit(-1);
+ }
+}
+
+/*
+* get column id of i,j,k,l,m,n
+*/
+int get_column_id(char ch)
+{
+ return (ch - 'i' + 1); //from 1 to 6
+}
+
+/*
+* check whether column value of the NO. tuple is equal to 1
+* col_id: column id, range from 1 to 6
+* tuple_no: record NO., range from 0 to 63
+*/
+bool check_col_equal_one(int tuple_no, int col_id)
+{
+ int i = (int)pow((double)2, (double)(6 - col_id));
+ int j = tuple_no / i;
+ if(j % 2)
+ return true;
+ else
+ return false;
+}
+
+/*
+* get a result after all elements in the array with AND
+* value: pointer to a bool array
+* len: length of the bool array
+*/
+bool AND_op(bool *value, int len)
+{
+ for(int i = 0; i < len; i++)
+ {
+ if(! value[i])
+ return false;
+ }
+ return true;
+}
+
+/*
+* get a result after all elements in the array with OR
+* value: pointer to a bool array
+* len: length of the bool array
+*/
+bool OR_op(bool *value, int len)
+{
+ for(int i = 0; i < len; i++)
+ {
+ if(value[i])
+ return true;
+ }
+ return false;
+}
+
+/*
+* get a result after all elements in the array with NAND
+* value: pointer to a bool array
+* len: length of the bool array
+*/
+bool NAND_op(bool *value, int len)
+{
+ return (! AND_op(value, len));
+}
+
+/*
+* get a result after all elements in the array with NOR
+* value: pointer to a bool array
+* len: length of the bool array
+*/
+bool NOR_op(bool *value, int len)
+{
+ return (! OR_op(value, len));
+}
+
+/*
+* AND/NAND/OR/NOR operation for a bool array
+*/
+bool calculate_one_op(char op_type, bool *value, int len)
+{
+ switch(op_type)
+ {
+ case 'a':
+ return AND_op(value, len);
+ break;
+ case 'o':
+ return OR_op(value, len);
+ break;
+ case 'A':
+ return NAND_op(value, len);
+ break;
+ case 'O':
+ return NOR_op(value, len);
+ break;
+ }
+ return false; //make gcc happy
+}
+
+typedef struct _stack_element
+{
+ char type;
+ int num;
+}stack_element;
+
+/*
+* stack_op, store info for AND,OR,NAND,NOR
+* stack_col, store value of column(i,j,k,l,m,n) and temporary result for an operation
+*/
+stack_element stack_op[RECURSIVE_LEVEL * COL_LEN];
+bool stack_col[RECURSIVE_LEVEL * COL_LEN * 2];
+
+/*
+* check whether the given tuple is chosen by judgement condition
+* tuple_no, the NO of tuple in TABLE_NAME, range from 0 to TUPLE_NUM
+* str: a random string of scan opearation and condition
+* len: length of str
+*/
+bool check_one_tuple(int tuple_no, char *str, int len)
+{
+ int pop_op = 0;
+ int pop_col = 0;
+ for(int i = 0; i < len; i++)
+ {
+ char letter = *(str + i);
+ if(check_op(letter)) //push
+ {
+ stack_op[pop_op].type = letter;
+ stack_op[pop_op].num = 0;
+ pop_op++;
+ }
+ if(check_col(letter)) //push
+ {
+ stack_col[pop_col] = check_col_equal_one(tuple_no, get_column_id(letter));
+ pop_col++;
+ stack_op[pop_op-1].num += 1;
+ }
+ if(check_end(letter))
+ {
+ if(pop_op <= 1)
+ {
+ return calculate_one_op(stack_op[pop_op-1].type,
+ stack_col,
+ stack_op[pop_op-1].num);
+ }
+ else
+ {
+ bool tmp1 = calculate_one_op(stack_op[pop_op-1].type,
+ stack_col + pop_col - stack_op[pop_op-1].num,
+ stack_op[pop_op-1].num);
+ pop_col -= stack_op[pop_op-1].num; //pop
+ pop_op--;
+ stack_col[pop_col] = tmp1; //push
+ pop_col++;
+ stack_op[pop_op-1].num += 1;
+ }
+ }
+ }
+ return false; //make gcc happy
+}
+
+/*
+* get lists of tuples which match the scan condiction through calculating
+* str: a random string of scan opearation and condition
+*/
+void check_all_tuples(char *str, bool *res)
+{
+ for (int i = 0; i < TUPLE_NUM; i++)
+ {
+ if(check_one_tuple(i, str, strlen(str)))
+ res[i] = true;
+ }
+}
+
+/*
+* convert a letter to group number what ndbapi need
+*/
+NdbScanFilter::Group get_api_group(char op_name)
+{
+ switch (op_name) {
+ case 'a': return NdbScanFilter::AND;
+ case 'o': return NdbScanFilter::OR;
+ case 'A': return NdbScanFilter::NAND;
+ case 'O': return NdbScanFilter::NOR;
+ default:
+ fprintf(stderr, "Invalid group name %c !\n", op_name);
+ exit(3);
+ }
+}
+
+/*
+* with ndbapi, call begin, eq/ne/lt/gt/le/ge..., end
+*/
+NdbScanFilter * call_ndbapi(char *str, NdbTransaction *transaction,
+ NdbScanOperation *scan, NdbDictionary::Column const *col[])
+{
+ NdbScanFilter *scanfilter = new NdbScanFilter(scan);
+ char *p;
+
+ for (p = str; *p; p++)
+ {
+ if(check_op(*p))
+ {
+ if(scanfilter->begin(get_api_group(*p)))
+ ERR_EXIT(transaction, "filter begin() failed");
+ }
+ if(check_col(*p))
+ {
+ if(scanfilter->eq(col[*p-'i'+1]->getColumnNo(), (Uint32)1))
+ ERR_EXIT(transaction, "filter eq() failed");
+ }
+ if(check_end(*p))
+ {
+ if(scanfilter->end())
+ ERR_EXIT(transaction, "filter end() failed");
+ }
+ }
+
+ return scanfilter;
+}
+
+/*
+* get the tuples through ndbapi, and save the tuples NO.
+* str: a random string of scan opearation and condition
+*/
+void ndbapi_tuples(Ndb *ndb, char *str, bool *res)
+{
+ const NdbDictionary::Dictionary *dict = ndb->getDictionary();
+ if (!dict)
+ ERR_EXIT(ndb, "Can't get dict");
+
+ const NdbDictionary::Table *table = dict->getTable(TABLE_NAME);
+ if (!table)
+ ERR_EXIT(dict, "Can't get table"TABLE_NAME);
+
+ const NdbDictionary::Column *col[COL_LEN];
+ for(int i = 0; i < COL_LEN; i++)
+ {
+ char tmp[128];
+ col[i] = table->getColumn(COL_NAME[i]);
+ if(!col[i])
+ {
+ snprintf(tmp, 128, "Can't get column %s", COL_NAME[i]);
+ ERR_EXIT(dict, tmp);
+ }
+ }
+
+ NdbTransaction *transaction;
+ NdbScanOperation *scan;
+ NdbScanFilter *filter;
+
+ transaction = ndb->startTransaction();
+ if (!transaction)
+ ERR_EXIT(ndb, "Can't start transaction");
+
+ scan = transaction->getNdbScanOperation(table);
+ if (!scan)
+ ERR_EXIT(transaction, "Can't get scan op");
+
+ if (scan->readTuples(NdbOperation::LM_Exclusive))
+ ERR_EXIT(scan, "Can't set up read");
+
+ NdbRecAttr *rec[COL_LEN];
+ for(int i = 0; i < COL_LEN; i++)
+ {
+ char tmp[128];
+ rec[i] = scan->getValue(COL_NAME[i]);
+ if(!rec[i])
+ {
+ snprintf(tmp, 128, "Can't get rec of %s", COL_NAME[i]);
+ ERR_EXIT(scan, tmp);
+ }
+ }
+
+ filter = call_ndbapi(str, transaction, scan, col);
+
+ if (transaction->execute(NdbTransaction::NoCommit))
+ ERR_EXIT(transaction, "Can't execute");
+
+ int i,j,k,l,m,n;
+ while (scan->nextResult(true) == 0)
+ {
+ do
+ {
+ i = rec[1]->u_32_value();
+ j = rec[2]->u_32_value();
+ k = rec[3]->u_32_value();
+ l = rec[4]->u_32_value();
+ m = rec[5]->u_32_value();
+ n = rec[6]->u_32_value();
+ res[32*i+16*j+8*k+4*l+2*m+n] = true;
+ } while (scan->nextResult(false) == 0);
+ }
+
+ delete filter;
+ transaction->close();
+}
+
+/*
+* compare the result between calculation and NDBAPI
+* str: a random string of scan opearation and condition
+* return: true stands for ndbapi ok, false stands for ndbapi failed
+*/
+template class Vector<bool>;
+bool compare_cal_ndb(char *str, Ndb *ndb)
+{
+ Vector<bool> res_cal;
+ Vector<bool> res_ndb;
+
+ for(int i = 0; i < TUPLE_NUM; i++)
+ {
+ res_cal.push_back(false);
+ res_ndb.push_back(false);
+ }
+
+ check_all_tuples(str, res_cal.getBase());
+ ndbapi_tuples(ndb, str, res_ndb.getBase());
+
+ for(int i = 0; i < TUPLE_NUM; i++)
+ {
+ if(res_cal[i] != res_ndb[i])
+ return false;
+ }
+ return true;
+}
+
+
+int runCreateTables(NDBT_Context* ctx, NDBT_Step* step)
+{
+ Ndb *pNdb = GETNDB(step);
+ pNdb->getDictionary()->dropTable(MYTAB1.getName());
+ int ret = createTable(pNdb, &MYTAB1, false, true, 0);
+ if(ret)
+ return ret;
+ return NDBT_OK;
+}
+
+
+int runDropTables(NDBT_Context* ctx, NDBT_Step* step)
+{
+ int ret = GETNDB(step)->getDictionary()->dropTable(MYTAB1.getName());
+ if(ret == -1)
+ return NDBT_FAILED;
+
+ return NDBT_OK;
+}
+
+int runScanRandomFilterTest(NDBT_Context* ctx, NDBT_Step* step)
+{
+ char random_str[MAX_STR_LEN];
+ Ndb *myNdb = GETNDB(step);
+ bool res = true;
+
+ for(int i = 0; i < TEST_NUM; i++)
+ {
+ get_rand_op_str_compound(random_str);
+ if( !compare_cal_ndb(random_str, myNdb))
+ return NDBT_FAILED;
+ }
+
+ return NDBT_OK;
+}
+
+NDBT_TESTSUITE(testScanFilter);
+TESTCASE(TEST_NAME,
+ "Scan table TABLE_NAME for the records which accord with \
+ conditions of logical scan operations: AND/OR/NAND/NOR")
+{
+ INITIALIZER(runCreateTables);
+ INITIALIZER(runPopulate);
+ INITIALIZER(runScanRandomFilterTest);
+ FINALIZER(runDropTables);
+}
+
+NDBT_TESTSUITE_END(testScanFilter);
+
+
+int main(int argc, const char** argv)
+{
+ ndb_init();
+
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1))
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+
+ return testScanFilter.executeOneCtx(con, &MYTAB1, TEST_NAME);
+}
diff --git a/ndb/test/odbc/SQL99_test/SQL99_test.cpp b/ndb/test/odbc/SQL99_test/SQL99_test.cpp
index 039a77f4d53..fb77220773d 100644
--- a/ndb/test/odbc/SQL99_test/SQL99_test.cpp
+++ b/ndb/test/odbc/SQL99_test/SQL99_test.cpp
@@ -27,7 +27,14 @@ using namespace std; //
#define MAXROW 64
#define DEFROW 8
-#define MAXTHREADS 24
+/*
+ NDB_MAXTHREADS used to be just MAXTHREADS, which collides with a
+ #define from <sys/thread.h> on AIX (IBM compiler). We explicitly
+ #undef it here lest someone use it by habit and get really funny
+ results. K&R says we may #undef non-existent symbols, so let's go.
+*/
+#undef MAXTHREADS
+#define NDB_MAXTHREADS 24
#define DEFTHREADS 2
#define MAXTABLES 16
@@ -83,7 +90,7 @@ int main(int argc, char* argv[]){
char* szTableNames = (char*)malloc(sizeof(char)*nNoOfTables*MAX_TABLE_NAME) ;
memset(szTableNames, 0, sizeof(char)*nNoOfTables*MAX_TABLE_NAME) ;
- UintPtr pThreadHandles[MAXTHREADS] = { NULL } ;
+ UintPtr pThreadHandles[NDB_MAXTHREADS] = { NULL } ;
AssignTableNames(szTableNames, nNoOfTables) ;
@@ -313,7 +320,7 @@ void ParseArguments(int argc, const char** argv){
if (strcmp(argv[i], "-t") == 0)
{
nNoOfThreads = atoi(argv[i+1]);
- if ((nNoOfThreads < 1) || (nNoOfThreads > MAXTHREADS))
+ if ((nNoOfThreads < 1) || (nNoOfThreads > NDB_MAXTHREADS))
nNoOfThreads = DEFTHREADS ;
}
else if (strcmp(argv[i], "-c") == 0)
diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt
index f4a685299d6..7b4a4ca0e2d 100644
--- a/ndb/test/run-test/daily-basic-tests.txt
+++ b/ndb/test/run-test/daily-basic-tests.txt
@@ -425,6 +425,14 @@ max-time: 500
cmd: testScan
args: -n Bug24447 T1
+max-time: 1000
+cmd: testNodeRestart
+args: -n Bug27003 T1
+
+max-time: 1000
+cmd: testNodeRestart
+args: -n Bug27283 T1
+
max-time: 500
cmd: testNodeRestart
args: -n Bug15587 T1
@@ -471,12 +479,28 @@ args: -n Bug24717 T1
max-time: 1000
cmd: testNodeRestart
+args: -n Bug29364 T1
+
+max-time: 1000
+cmd: testNodeRestart
args: -n Bug25364 T1
max-time: 1000
cmd: testNodeRestart
args: -n Bug25554 T1
+max-time: 1000
+cmd: testNodeRestart
+args: -n Bug26457 T1
+
+max-time: 1000
+cmd: testNodeRestart
+args: -n Bug26481 T1
+
+max-time: 300
+cmd: testNodeRestart
+args: -n Bug32160 T1
+
# OLD FLEX
max-time: 500
cmd: flexBench
@@ -621,6 +645,10 @@ max-time: 1000
cmd: testNdbApi
args: -n BugBug28443
+max-time: 1000
+cmd: testNdbApi
+args: -n Bug28443
+
#max-time: 500
#cmd: testInterpreter
#args: T1
@@ -641,6 +669,14 @@ max-time: 600
cmd: testBlobs
args:
+max-time: 600
+cmd: testBlobs
+args: -bug 27018
+
+max-time: 600
+cmd: testBlobs
+args: -bug 27370
+
max-time: 5000
cmd: testOIBasic
args: -case abcdefz
@@ -747,3 +783,11 @@ cmd: DbAsyncGenerator
args: -time 60 -p 1 -proc 25
type: bench
+max-time: 180
+cmd: testIndex
+args: -n Bug28804 T1 T3
+
+max-time: 180
+cmd: testIndex
+args: -n Bug28804_ATTRINFO T1 T3
+
diff --git a/ndb/test/src/NDBT_Table.cpp b/ndb/test/src/NDBT_Table.cpp
index 1d1896eee7f..34db6ed9822 100644
--- a/ndb/test/src/NDBT_Table.cpp
+++ b/ndb/test/src/NDBT_Table.cpp
@@ -31,7 +31,7 @@ operator <<(class NdbOut& ndbout, const NDBT_Table & tab)
ndbout << "Number of attributes: " << tab.getNoOfColumns() << endl;
ndbout << "Number of primary keys: " << tab.getNoOfPrimaryKeys() << endl;
ndbout << "Length of frm data: " << tab.getFrmLength() << endl;
-
+ ndbout << "SingleUserMode: " << (Uint32) tab.getSingleUserMode() << endl;
//<< ((tab.getTupleKey() == TupleId) ? " tupleid" : "") <<endl;
ndbout << "TableStatus: ";
diff --git a/ndb/test/src/NDBT_Test.cpp b/ndb/test/src/NDBT_Test.cpp
index 37100732eca..391af3e5d95 100644
--- a/ndb/test/src/NDBT_Test.cpp
+++ b/ndb/test/src/NDBT_Test.cpp
@@ -817,6 +817,63 @@ NDBT_TestSuite::executeOne(Ndb_cluster_connection& con,
}
}
+int
+NDBT_TestSuite::executeOneCtx(Ndb_cluster_connection& con,
+ const NdbDictionary::Table *ptab, const char* _testname){
+
+ testSuiteTimer.doStart();
+
+ do{
+ if(tests.size() == 0)
+ break;
+
+ Ndb ndb(&con, "TEST_DB");
+ ndb.init(1024);
+
+ int result = ndb.waitUntilReady(300); // 5 minutes
+ if (result != 0){
+ g_err << name <<": Ndb was not ready" << endl;
+ break;
+ }
+
+ ndbout << name << " started [" << getDate() << "]" << endl;
+ ndbout << "|- " << ptab->getName() << endl;
+
+ for (unsigned t = 0; t < tests.size(); t++){
+
+ if (_testname != NULL &&
+ strcasecmp(tests[t]->getName(), _testname) != 0)
+ continue;
+
+ tests[t]->initBeforeTest();
+
+ ctx = new NDBT_Context(con);
+ ctx->setTab(ptab);
+ ctx->setNumRecords(records);
+ ctx->setNumLoops(loops);
+ if(remote_mgm != NULL)
+ ctx->setRemoteMgm(remote_mgm);
+ ctx->setSuite(this);
+
+ result = tests[t]->execute(ctx);
+ if (result != NDBT_OK)
+ numTestsFail++;
+ else
+ numTestsOk++;
+ numTestsExecuted++;
+
+ delete ctx;
+ }
+
+ if (numTestsFail > 0)
+ break;
+ }while(0);
+
+ testSuiteTimer.doStop();
+ int res = report(_testname);
+ return NDBT_ProgramExit(res);
+}
+
void NDBT_TestSuite::execute(Ndb_cluster_connection& con,
Ndb* ndb, const NdbDictionary::Table* pTab,
const char* _testname){
diff --git a/ndb/test/src/NdbRestarter.cpp b/ndb/test/src/NdbRestarter.cpp
index 6f13a3bfca4..b636ab4d608 100644
--- a/ndb/test/src/NdbRestarter.cpp
+++ b/ndb/test/src/NdbRestarter.cpp
@@ -128,6 +128,68 @@ NdbRestarter::getMasterNodeId(){
}
int
+NdbRestarter::getNodeGroup(int nodeId){
+ if (!isConnected())
+ return -1;
+
+ if (getStatus() != 0)
+ return -1;
+
+ for(size_t i = 0; i < ndbNodes.size(); i++)
+ {
+ if(ndbNodes[i].node_id == nodeId)
+ {
+ return ndbNodes[i].node_group;
+ }
+ }
+
+ return -1;
+}
+
+int
+NdbRestarter::getNextMasterNodeId(int nodeId){
+ if (!isConnected())
+ return -1;
+
+ if (getStatus() != 0)
+ return -1;
+
+ size_t i;
+ for(i = 0; i < ndbNodes.size(); i++)
+ {
+ if(ndbNodes[i].node_id == nodeId)
+ {
+ break;
+ }
+ }
+ assert(i < ndbNodes.size());
+ if (i == ndbNodes.size())
+ return -1;
+
+ int dynid = ndbNodes[i].dynamic_id;
+ int minid = dynid;
+ for (i = 0; i<ndbNodes.size(); i++)
+ if (ndbNodes[i].dynamic_id > minid)
+ minid = ndbNodes[i].dynamic_id;
+
+ for (i = 0; i<ndbNodes.size(); i++)
+ if (ndbNodes[i].dynamic_id > dynid &&
+ ndbNodes[i].dynamic_id < minid)
+ {
+ minid = ndbNodes[i].dynamic_id;
+ }
+
+ if (minid != ~0)
+ {
+ for (i = 0; i<ndbNodes.size(); i++)
+ if (ndbNodes[i].dynamic_id == minid)
+ return ndbNodes[i].node_id;
+ }
+
+ return getMasterNodeId();
+}
+
+int
NdbRestarter::getRandomNotMasterNodeId(int rand){
int master = getMasterNodeId();
if(master == -1)
diff --git a/ndb/test/src/UtilTransactions.cpp b/ndb/test/src/UtilTransactions.cpp
index 3a166f19c92..011cea24af9 100644
--- a/ndb/test/src/UtilTransactions.cpp
+++ b/ndb/test/src/UtilTransactions.cpp
@@ -1381,6 +1381,7 @@ UtilTransactions::compare(Ndb* pNdb, const char* tab_name2, int flags){
goto error;
}
+ row_count= 0;
{
int eof;
while((eof = pOp->nextResult(true)) == 0)
diff --git a/ndb/tools/delete_all.cpp b/ndb/tools/delete_all.cpp
index 6b50d850d3f..9b6a8cf94c6 100644
--- a/ndb/tools/delete_all.cpp
+++ b/ndb/tools/delete_all.cpp
@@ -43,9 +43,11 @@ static struct my_option my_long_options[] =
};
static void usage()
{
+#ifdef NOT_USED
char desc[] =
"tabname\n"\
"This program will delete all records in the specified table using scan delete.\n";
+#endif
ndb_std_print_version();
print_defaults(MYSQL_CONFIG_NAME,load_default_groups);
puts("");
@@ -65,6 +67,7 @@ int main(int argc, char** argv){
return NDBT_ProgramExit(NDBT_WRONGARGS);
Ndb_cluster_connection con(opt_connect_str);
+ con.set_name("ndb_delete_all");
if(con.connect(12, 5, 1) != 0)
{
ndbout << "Unable to connect to management server." << endl;
diff --git a/ndb/tools/desc.cpp b/ndb/tools/desc.cpp
index fa7b6a750b1..83258515796 100644
--- a/ndb/tools/desc.cpp
+++ b/ndb/tools/desc.cpp
@@ -42,10 +42,12 @@ static struct my_option my_long_options[] =
};
static void usage()
{
+#ifdef NOT_USED
char desc[] =
"tabname\n"\
"This program list all properties of table(s) in NDB Cluster.\n"\
" ex: desc T1 T2 T4\n";
+#endif
ndb_std_print_version();
print_defaults(MYSQL_CONFIG_NAME,load_default_groups);
puts("");
@@ -67,6 +69,7 @@ int main(int argc, char** argv){
return NDBT_ProgramExit(NDBT_WRONGARGS);
Ndb_cluster_connection con(opt_connect_str);
+ con.set_name("ndb_desc");
if(con.connect(12, 5, 1) != 0)
{
ndbout << "Unable to connect to management server." << endl;
diff --git a/ndb/tools/drop_index.cpp b/ndb/tools/drop_index.cpp
index 7cc791dcdb7..256c40e1924 100644
--- a/ndb/tools/drop_index.cpp
+++ b/ndb/tools/drop_index.cpp
@@ -36,9 +36,11 @@ static struct my_option my_long_options[] =
};
static void usage()
{
+#ifdef NOT_USED
char desc[] =
"[<table> <index>]+\n"\
"This program will drop index(es) in Ndb\n";
+#endif
ndb_std_print_version();
print_defaults(MYSQL_CONFIG_NAME,load_default_groups);
puts("");
@@ -59,6 +61,7 @@ int main(int argc, char** argv){
}
Ndb_cluster_connection con(opt_connect_str);
+ con.set_name("ndb_drop_index");
if(con.connect(12, 5, 1) != 0)
{
return NDBT_ProgramExit(NDBT_FAILED);
diff --git a/ndb/tools/drop_tab.cpp b/ndb/tools/drop_tab.cpp
index efbbba73d4b..a7accb904a4 100644
--- a/ndb/tools/drop_tab.cpp
+++ b/ndb/tools/drop_tab.cpp
@@ -36,9 +36,11 @@ static struct my_option my_long_options[] =
};
static void usage()
{
+#ifdef NOT_USED
char desc[] =
"tabname\n"\
"This program will drop one table in Ndb\n";
+#endif
ndb_std_print_version();
print_defaults(MYSQL_CONFIG_NAME,load_default_groups);
puts("");
@@ -59,6 +61,7 @@ int main(int argc, char** argv){
}
Ndb_cluster_connection con(opt_connect_str);
+ con.set_name("ndb_drop_table");
if(con.connect(12, 5, 1) != 0)
{
ndbout << "Unable to connect to management server." << endl;
diff --git a/ndb/tools/listTables.cpp b/ndb/tools/listTables.cpp
index 75bed2e35fe..009789824e8 100644
--- a/ndb/tools/listTables.cpp
+++ b/ndb/tools/listTables.cpp
@@ -189,6 +189,7 @@ static struct my_option my_long_options[] =
};
static void usage()
{
+#ifdef NOT_USED
char desc[] =
"tabname\n"\
"This program list all system objects in NDB Cluster.\n"\
@@ -196,6 +197,7 @@ static void usage()
" ex: ndb_show_tables -t 2 would show all UserTables\n"\
"To show all indexes for a table write table name as final argument\n"\
" ex: ndb_show_tables T1\n";
+#endif
ndb_std_print_version();
print_defaults(MYSQL_CONFIG_NAME,load_default_groups);
puts("");
@@ -217,6 +219,7 @@ int main(int argc, char** argv){
_tabname = argv[0];
ndb_cluster_connection = new Ndb_cluster_connection(opt_connect_str);
+ ndb_cluster_connection->set_name("ndb_show_tables");
if (ndb_cluster_connection->connect(12,5,1))
fatal("Unable to connect to management server.");
if (ndb_cluster_connection->wait_until_ready(30,0) < 0)
diff --git a/ndb/tools/ndb_config.cpp b/ndb/tools/ndb_config.cpp
index 5c842076873..31fc59a8b83 100644
--- a/ndb/tools/ndb_config.cpp
+++ b/ndb/tools/ndb_config.cpp
@@ -97,6 +97,7 @@ static void usage()
{
char desc[] =
"This program will retreive config options for a ndb cluster\n";
+ puts(desc);
ndb_std_print_version();
print_defaults(MYSQL_CONFIG_NAME,load_default_groups);
puts("");
@@ -111,12 +112,14 @@ struct Match
{
int m_key;
BaseString m_value;
+ Match() {}
virtual int eval(const Iter&);
virtual ~Match() {}
};
struct HostMatch : public Match
{
+ HostMatch() {}
virtual int eval(const Iter&);
};
@@ -131,11 +134,13 @@ struct Apply
struct NodeTypeApply : public Apply
{
+ NodeTypeApply() {}
virtual int apply(const Iter&);
};
struct ConnectionTypeApply : public Apply
{
+ ConnectionTypeApply() {}
virtual int apply(const Iter&);
};
@@ -294,10 +299,10 @@ parse_where(Vector<Match*>& where, int &argc, char**& argv)
Match m;
if(g_host)
{
- HostMatch *m = new HostMatch;
- m->m_key = CFG_NODE_HOST;
- m->m_value.assfmt("%s", g_host);
- where.push_back(m);
+ HostMatch *tmp = new HostMatch;
+ tmp->m_key = CFG_NODE_HOST;
+ tmp->m_value.assfmt("%s", g_host);
+ where.push_back(tmp);
}
if(g_type)
diff --git a/ndb/tools/restore/Restore.cpp b/ndb/tools/restore/Restore.cpp
index 8b2e9a799a4..b89d3e239c2 100644
--- a/ndb/tools/restore/Restore.cpp
+++ b/ndb/tools/restore/Restore.cpp
@@ -23,6 +23,8 @@
#include <SimpleProperties.hpp>
#include <signaldata/DictTabInfo.hpp>
+extern NdbRecordPrintFormat g_ndbrecord_print_format;
+
Uint16 Twiddle16(Uint16 in); // Byte shift 16-bit data
Uint32 Twiddle32(Uint32 in); // Byte shift 32-bit data
Uint64 Twiddle64(Uint64 in); // Byte shift 64-bit data
@@ -118,6 +120,8 @@ RestoreMetaData::loadContent()
return 0;
}
}
+ if (! markSysTables())
+ return 0;
if(!readGCPEntry())
return 0;
@@ -176,6 +180,49 @@ RestoreMetaData::readMetaTableDesc() {
}
bool
+RestoreMetaData::markSysTables()
+{
+ Uint32 i;
+ for (i = 0; i < getNoOfTables(); i++) {
+ TableS* table = allTables[i];
+ table->m_local_id = i;
+ const char* tableName = table->getTableName();
+ if ( // XXX should use type
+ strcmp(tableName, "SYSTAB_0") == 0 ||
+ strcmp(tableName, "NDB$EVENTS_0") == 0 ||
+ strcmp(tableName, "sys/def/SYSTAB_0") == 0 ||
+ strcmp(tableName, "sys/def/NDB$EVENTS_0") == 0)
+ table->isSysTable = true;
+ }
+ for (i = 0; i < getNoOfTables(); i++) {
+ TableS* blobTable = allTables[i];
+ const char* blobTableName = blobTable->getTableName();
+ // yet another match blob
+ int cnt, id1, id2;
+ char buf[256];
+ cnt = sscanf(blobTableName, "%[^/]/%[^/]/NDB$BLOB_%d_%d",
+ buf, buf, &id1, &id2);
+ if (cnt == 4) {
+ Uint32 j;
+ for (j = 0; j < getNoOfTables(); j++) {
+ TableS* table = allTables[j];
+ if (table->getTableId() == (Uint32) id1) {
+ if (table->isSysTable)
+ blobTable->isSysTable = true;
+ blobTable->m_main_table = table;
+ break;
+ }
+ }
+ if (j == getNoOfTables()) {
+ err << "Restore: Bad primary table id in " << blobTableName << endl;
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+bool
RestoreMetaData::readGCPEntry() {
Uint32 data[4];
@@ -259,6 +306,8 @@ TableS::TableS(Uint32 version, NdbTableImpl* tableImpl)
m_max_auto_val= 0;
m_noOfRecords= 0;
backupVersion = version;
+ isSysTable = false;
+ m_main_table = NULL;
for (int i = 0; i < tableImpl->getNoOfColumns(); i++)
createAttr(tableImpl->getColumn(i));
@@ -275,7 +324,11 @@ bool
RestoreMetaData::parseTableDescriptor(const Uint32 * data, Uint32 len)
{
NdbTableImpl* tableImpl = 0;
- int ret = NdbDictInterface::parseTableInfo(&tableImpl, data, len, false);
+ int ret = 0;
+ if(!m_hostByteOrder)
+ ret = NdbDictInterface::parseTableInfo(&tableImpl, data, len, false, false);
+ else
+ ret = NdbDictInterface::parseTableInfo(&tableImpl, data, len, false);
if (ret != 0) {
err << "parseTableInfo " << " failed" << endl;
@@ -405,6 +458,7 @@ RestoreDataIterator::getNextTuple(int & res)
attr_data->null = false;
attr_data->void_value = ptr;
+ attr_data->size = 4*sz;
if(!Twiddle(attr_desc, attr_data))
{
@@ -426,6 +480,11 @@ RestoreDataIterator::getNextTuple(int & res)
attr_data->null = false;
attr_data->void_value = ptr;
+ attr_data->size = 4*sz;
+
+ if(!m_hostByteOrder
+ && attr_desc->m_column->getType() == NdbDictionary::Column::Timestamp)
+ attr_data->u_int32_value[0] = Twiddle32(attr_data->u_int32_value[0]);
if(!Twiddle(attr_desc, attr_data))
{
@@ -462,12 +521,36 @@ RestoreDataIterator::getNextTuple(int & res)
attr_data->null = false;
attr_data->void_value = &data->Data[0];
+ attr_data->size = sz*4;
/**
* Compute array size
*/
const Uint32 arraySize = (4 * sz) / (attr_desc->size / 8);
assert(arraySize >= attr_desc->arraySize);
+
+ //convert the length of blob(v1) and text(v1)
+ if(!m_hostByteOrder
+ && (attr_desc->m_column->getType() == NdbDictionary::Column::Blob
+ || attr_desc->m_column->getType() == NdbDictionary::Column::Text))
+ {
+ char* p = (char*)&attr_data->u_int64_value[0];
+ Uint64 x;
+ memcpy(&x, p, sizeof(Uint64));
+ x = Twiddle64(x);
+ memcpy(p, &x, sizeof(Uint64));
+ }
+
+ if(!m_hostByteOrder
+ && attr_desc->m_column->getType() == NdbDictionary::Column::Datetime)
+ {
+ char* p = (char*)&attr_data->u_int64_value[0];
+ Uint64 x;
+ memcpy(&x, p, sizeof(Uint64));
+ x = Twiddle64(x);
+ memcpy(p, &x, sizeof(Uint64));
+ }
+
if(!Twiddle(attr_desc, attr_data, attr_desc->arraySize))
{
res = -1;
@@ -704,6 +787,7 @@ bool RestoreDataIterator::readFragmentHeader(int & ret)
return false;
}
+ info.setLevel(254);
info << "_____________________________________________________" << endl
<< "Processing data in table: " << m_currentTable->getTableName()
<< "(" << Header.TableId << ") fragment "
@@ -924,13 +1008,14 @@ operator<<(NdbOut& ndbout, const AttributeS& attr){
if (data.null)
{
- ndbout << "<NULL>";
+ ndbout << g_ndbrecord_print_format.null_string;
return ndbout;
}
NdbRecAttr tmprec(0);
- tmprec.setup(desc.m_column, (char *)data.void_value);
- ndbout << tmprec;
+ tmprec.setup(desc.m_column, 0);
+ tmprec.receive_data((Uint32*)data.void_value, (data.size+3)/4);
+ ndbrecattr_print_formatted(ndbout, tmprec, g_ndbrecord_print_format);
return ndbout;
}
@@ -939,17 +1024,15 @@ operator<<(NdbOut& ndbout, const AttributeS& attr){
NdbOut&
operator<<(NdbOut& ndbout, const TupleS& tuple)
{
- ndbout << tuple.getTable()->getTableName() << "; ";
for (int i = 0; i < tuple.getNoOfAttributes(); i++)
{
+ if (i > 0)
+ ndbout << g_ndbrecord_print_format.fields_terminated_by;
AttributeData * attr_data = tuple.getData(i);
const AttributeDesc * attr_desc = tuple.getDesc(i);
const AttributeS attr = {attr_desc, *attr_data};
debug << i << " " << attr_desc->m_column->getName();
ndbout << attr;
-
- if (i != (tuple.getNoOfAttributes() - 1))
- ndbout << delimiter << " ";
} // for
return ndbout;
}
diff --git a/ndb/tools/restore/Restore.hpp b/ndb/tools/restore/Restore.hpp
index b132dda374d..c1545159ce4 100644
--- a/ndb/tools/restore/Restore.hpp
+++ b/ndb/tools/restore/Restore.hpp
@@ -25,8 +25,6 @@
#include <ndb_version.h>
#include <version.h>
-static const char * delimiter = ";"; // Delimiter in file dump
-
const int FileNameLenC = 256;
const int TableNameLenC = 256;
const int AttrNameLenC = 256;
@@ -143,6 +141,10 @@ class TableS {
int pos;
+ bool isSysTable;
+ TableS *m_main_table;
+ Uint32 m_local_id;
+
Uint64 m_noOfRecords;
Vector<FragmentInfo *> m_fragmentInfo;
@@ -156,6 +158,9 @@ public:
Uint32 getTableId() const {
return m_dictTable->getTableId();
}
+ Uint32 getLocalId() const {
+ return m_local_id;
+ }
Uint32 getNoOfRecords() const {
return m_noOfRecords;
}
@@ -214,6 +219,9 @@ public:
memcpy(&val.u32,data,4);
v= val.u32;
break;
+ case 24:
+ v= uint3korr((unsigned char*)data);
+ break;
case 16:
memcpy(&val.u16,data,2);
v= val.u16;
@@ -235,6 +243,14 @@ public:
return allAttributesDesc[attributeId];
}
+ bool getSysTable() const {
+ return isSysTable;
+ }
+
+ const TableS *getMainTable() const {
+ return m_main_table;
+ }
+
TableS& operator=(TableS& org) ;
}; // TableS;
@@ -285,6 +301,7 @@ class RestoreMetaData : public BackupFile {
Vector<TableS *> allTables;
bool readMetaFileHeader();
bool readMetaTableDesc();
+ bool markSysTables();
bool readGCPEntry();
bool readFragmentInfo();
@@ -358,6 +375,7 @@ public:
m_values_e.push_back(m_values[i]);
m_values.clear();
}
+ LogEntry() {}
~LogEntry()
{
Uint32 i;
diff --git a/ndb/tools/restore/consumer.hpp b/ndb/tools/restore/consumer.hpp
index 14611897f19..a4e8e71012b 100644
--- a/ndb/tools/restore/consumer.hpp
+++ b/ndb/tools/restore/consumer.hpp
@@ -20,6 +20,7 @@
class BackupConsumer {
public:
+ BackupConsumer() {}
virtual ~BackupConsumer() { }
virtual bool init() { return true;}
virtual bool table(const TableS &){return true;}
diff --git a/ndb/tools/restore/consumer_printer.cpp b/ndb/tools/restore/consumer_printer.cpp
index 8fe9805c39c..e0525522284 100644
--- a/ndb/tools/restore/consumer_printer.cpp
+++ b/ndb/tools/restore/consumer_printer.cpp
@@ -14,6 +14,9 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include "consumer_printer.hpp"
+extern FilteredNdbOut info;
+extern NdbRecordPrintFormat g_ndbrecord_print_format;
+extern const char *tab_path;
bool
BackupPrinter::table(const TableS & tab)
@@ -21,7 +24,8 @@ BackupPrinter::table(const TableS & tab)
if (m_print || m_print_meta)
{
m_ndbout << tab;
- ndbout_c("Successfully printed table: %s", tab.m_dictTable->getName());
+ info.setLevel(254);
+ info << "Successfully printed table: ", tab.m_dictTable->getName();
}
return true;
}
@@ -31,7 +35,14 @@ BackupPrinter::tuple(const TupleS & tup)
{
m_dataCount++;
if (m_print || m_print_data)
- m_ndbout << tup << endl;
+ {
+ if (m_ndbout.m_out == info.m_out)
+ {
+ info.setLevel(254);
+ info << tup.getTable()->getTableName() << "; ";
+ }
+ m_ndbout << tup << g_ndbrecord_print_format.lines_terminated_by;
+ }
}
void
@@ -47,8 +58,9 @@ BackupPrinter::endOfLogEntrys()
{
if (m_print || m_print_log)
{
- ndbout << "Printed " << m_dataCount << " tuples and "
- << m_logCount << " log entries"
- << " to stdout." << endl;
+ info.setLevel(254);
+ info << "Printed " << m_dataCount << " tuples and "
+ << m_logCount << " log entries"
+ << " to stdout." << endl;
}
}
diff --git a/ndb/tools/restore/consumer_restore.cpp b/ndb/tools/restore/consumer_restore.cpp
index c955570eaa3..811868f3e77 100644
--- a/ndb/tools/restore/consumer_restore.cpp
+++ b/ndb/tools/restore/consumer_restore.cpp
@@ -205,7 +205,7 @@ BackupRestore::table(const TableS & table){
BaseString tmp(name);
Vector<BaseString> split;
if(tmp.split(split, "/") != 3){
- err << "Invalid table name format " << name << endl;
+ err << "Invalid table name format `" << name << "`" << endl;
return false;
}
@@ -230,16 +230,17 @@ BackupRestore::table(const TableS & table){
if (dict->createTable(copy) == -1)
{
- err << "Create table " << table.getTableName() << " failed: "
+ err << "Create table `" << table.getTableName() << "` failed: "
<< dict->getNdbError() << endl;
return false;
}
- info << "Successfully restored table " << table.getTableName()<< endl ;
+ info << "Successfully restored table `"
+ << table.getTableName() << "`" << endl;
}
const NdbDictionary::Table* tab = dict->getTable(split[2].c_str());
if(tab == 0){
- err << "Unable to find table: " << split[2].c_str() << endl;
+ err << "Unable to find table: `" << split[2].c_str() << "`" << endl;
return false;
}
const NdbDictionary::Table* null = 0;
@@ -257,12 +258,15 @@ BackupRestore::endOfTables(){
for(size_t i = 0; i<m_indexes.size(); i++){
NdbTableImpl & indtab = NdbTableImpl::getImpl(* m_indexes[i]);
- BaseString tmp(indtab.m_primaryTable.c_str());
Vector<BaseString> split;
- if(tmp.split(split, "/") != 3){
- err << "Invalid table name format " << indtab.m_primaryTable.c_str()
- << endl;
- return false;
+ {
+ BaseString tmp(indtab.m_primaryTable.c_str());
+ if (tmp.split(split, "/") != 3)
+ {
+ err << "Invalid table name format `" << indtab.m_primaryTable.c_str()
+ << "`" << endl;
+ return false;
+ }
}
m_ndb->setDatabaseName(split[0].c_str());
@@ -270,39 +274,41 @@ BackupRestore::endOfTables(){
const NdbDictionary::Table * prim = dict->getTable(split[2].c_str());
if(prim == 0){
- err << "Unable to find base table \"" << split[2].c_str()
- << "\" for index "
- << indtab.getName() << endl;
+ err << "Unable to find base table `" << split[2].c_str()
+ << "` for index `"
+ << indtab.getName() << "`" << endl;
return false;
}
NdbTableImpl& base = NdbTableImpl::getImpl(*prim);
NdbIndexImpl* idx;
- int id;
- char idxName[255], buf[255];
- if(sscanf(indtab.getName(), "%[^/]/%[^/]/%d/%s",
- buf, buf, &id, idxName) != 4){
- err << "Invalid index name format " << indtab.getName() << endl;
- return false;
+ Vector<BaseString> split_idx;
+ {
+ BaseString tmp(indtab.getName());
+ if (tmp.split(split_idx, "/") != 4)
+ {
+ err << "Invalid index name format `" << indtab.getName() << "`" << endl;
+ return false;
+ }
}
if(NdbDictInterface::create_index_obj_from_table(&idx, &indtab, &base))
{
- err << "Failed to create index " << idxName
- << " on " << split[2].c_str() << endl;
+ err << "Failed to create index `" << split_idx[3]
+ << "` on " << split[2].c_str() << endl;
return false;
}
- idx->setName(idxName);
+ idx->setName(split_idx[3].c_str());
if(dict->createIndex(* idx) != 0)
{
delete idx;
- err << "Failed to create index " << idxName
- << " on " << split[2].c_str() << endl
+ err << "Failed to create index `" << split_idx[3].c_str()
+ << "` on `" << split[2].c_str() << "`" << endl
<< dict->getNdbError() << endl;
return false;
}
delete idx;
- info << "Successfully created index " << idxName
- << " on " << split[2].c_str() << endl;
+ info << "Successfully created index `" << split_idx[3].c_str()
+ << "` on `" << split[2].c_str() << "`" << endl;
}
return true;
}
@@ -382,7 +388,7 @@ void BackupRestore::tuple_a(restore_callback_t *cb)
Uint32 length = (size * arraySize) / 8;
if (j == 0 && tup.getTable()->have_auto_inc(i))
- tup.getTable()->update_max_auto_val(dataPtr,size);
+ tup.getTable()->update_max_auto_val(dataPtr,size*arraySize);
if (attr_desc->m_column->getPrimaryKey())
{
@@ -596,7 +602,7 @@ BackupRestore::logEntry(const LogEntry & tup)
const char * dataPtr = attr->Data.string_value;
if (tup.m_table->have_auto_inc(attr->Desc->attrId))
- tup.m_table->update_max_auto_val(dataPtr,size);
+ tup.m_table->update_max_auto_val(dataPtr,size*arraySize);
const Uint32 length = (size / 8) * arraySize;
if (attr->Desc->m_column->getPrimaryKey())
diff --git a/ndb/tools/restore/restore_main.cpp b/ndb/tools/restore/restore_main.cpp
index 0110782ff39..9887869a0b3 100644
--- a/ndb/tools/restore/restore_main.cpp
+++ b/ndb/tools/restore/restore_main.cpp
@@ -18,7 +18,9 @@
#include <Vector.hpp>
#include <ndb_limits.h>
#include <NdbTCP.h>
+#include <NdbMem.h>
#include <NdbOut.hpp>
+#include <OutputStream.hpp>
#include <NDBT_ReturnCodes.h>
#include "consumer_restore.hpp"
@@ -33,8 +35,18 @@ static int ga_nParallelism = 128;
static int ga_backupId = 0;
static bool ga_dont_ignore_systab_0 = false;
static Vector<class BackupConsumer *> g_consumers;
+static BackupPrinter* g_printer = NULL;
-static const char* ga_backupPath = "." DIR_SEPARATOR;
+static const char* default_backupPath = "." DIR_SEPARATOR;
+static const char* ga_backupPath = default_backupPath;
+
+const char *opt_ndb_database= NULL;
+const char *opt_ndb_table= NULL;
+unsigned int opt_verbose;
+unsigned int opt_hex_format;
+Vector<BaseString> g_databases;
+Vector<BaseString> g_tables;
+NdbRecordPrintFormat g_ndbrecord_print_format;
NDB_STD_OPTS_VARS;
@@ -53,6 +65,28 @@ BaseString g_options("ndb_restore");
const char *load_default_groups[]= { "mysql_cluster","ndb_restore",0 };
+enum ndb_restore_options {
+ OPT_PRINT= NDB_STD_OPTIONS_LAST,
+ OPT_PRINT_DATA,
+ OPT_PRINT_LOG,
+ OPT_PRINT_META,
+ OPT_BACKUP_PATH,
+ OPT_HEX_FORMAT,
+ OPT_FIELDS_ENCLOSED_BY,
+ OPT_FIELDS_TERMINATED_BY,
+ OPT_FIELDS_OPTIONALLY_ENCLOSED_BY,
+ OPT_LINES_TERMINATED_BY,
+ OPT_APPEND,
+ OPT_VERBOSE
+};
+static const char *opt_fields_enclosed_by= NULL;
+static const char *opt_fields_terminated_by= NULL;
+static const char *opt_fields_optionally_enclosed_by= NULL;
+static const char *opt_lines_terminated_by= NULL;
+
+static const char *tab_path= NULL;
+static int opt_append;
+
static struct my_option my_long_options[] =
{
NDB_STD_OPTS("ndb_restore"),
@@ -78,22 +112,56 @@ static struct my_option my_long_options[] =
"(parallelism can be 1 to 1024)",
(gptr*) &ga_nParallelism, (gptr*) &ga_nParallelism, 0,
GET_INT, REQUIRED_ARG, 128, 1, 1024, 0, 1, 0 },
- { "print", 256, "Print data and log to stdout",
+ { "print", OPT_PRINT, "Print metadata, data and log to stdout",
(gptr*) &_print, (gptr*) &_print, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
- { "print_data", 257, "Print data to stdout",
+ { "print_data", OPT_PRINT_DATA, "Print data to stdout",
(gptr*) &_print_data, (gptr*) &_print_data, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
- { "print_meta", 258, "Print meta data to stdout",
+ { "print_meta", OPT_PRINT_META, "Print meta data to stdout",
(gptr*) &_print_meta, (gptr*) &_print_meta, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
- { "print_log", 259, "Print log to stdout",
+ { "print_log", OPT_PRINT_LOG, "Print log to stdout",
(gptr*) &_print_log, (gptr*) &_print_log, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
+ { "backup_path", OPT_BACKUP_PATH, "Path to backup files",
+ (gptr*) &ga_backupPath, (gptr*) &ga_backupPath, 0,
+ GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
{ "dont_ignore_systab_0", 'f',
"Experimental. Do not ignore system table during restore.",
(gptr*) &ga_dont_ignore_systab_0, (gptr*) &ga_dont_ignore_systab_0, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
+ { "fields-enclosed-by", OPT_FIELDS_ENCLOSED_BY,
+ "Fields are enclosed by ...",
+ (gptr*) &opt_fields_enclosed_by, (gptr*) &opt_fields_enclosed_by, 0,
+ GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
+ { "fields-terminated-by", OPT_FIELDS_TERMINATED_BY,
+ "Fields are terminated by ...",
+ (gptr*) &opt_fields_terminated_by,
+ (gptr*) &opt_fields_terminated_by, 0,
+ GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
+ { "fields-optionally-enclosed-by", OPT_FIELDS_OPTIONALLY_ENCLOSED_BY,
+ "Fields are optionally enclosed by ...",
+ (gptr*) &opt_fields_optionally_enclosed_by,
+ (gptr*) &opt_fields_optionally_enclosed_by, 0,
+ GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
+ { "hex", OPT_HEX_FORMAT, "print binary types in hex format",
+ (gptr*) &opt_hex_format, (gptr*) &opt_hex_format, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
+ { "tab", 'T', "Creates tab separated textfile for each table to "
+ "given path. (creates .txt files)",
+ (gptr*) &tab_path, (gptr*) &tab_path, 0,
+ GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ { "append", OPT_APPEND, "for --tab append data to file",
+ (gptr*) &opt_append, (gptr*) &opt_append, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
+ { "lines-terminated-by", OPT_LINES_TERMINATED_BY, "",
+ (gptr*) &opt_lines_terminated_by, (gptr*) &opt_lines_terminated_by, 0,
+ GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
+ { "verbose", OPT_VERBOSE,
+ "verbosity",
+ (gptr*) &opt_verbose, (gptr*) &opt_verbose, 0,
+ GET_INT, REQUIRED_ARG, 1, 0, 255, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
@@ -119,19 +187,26 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
#endif
ndb_std_get_one_option(optid, opt, argument);
switch (optid) {
+ case OPT_VERBOSE:
+ info.setThreshold(255-opt_verbose);
+ break;
case 'n':
if (ga_nodeId == 0)
{
- printf("Error in --nodeid,-n setting, see --help\n");
+ err << "Error in --nodeid,-n setting, see --help";
exit(NDBT_ProgramExit(NDBT_WRONGARGS));
}
+ info.setLevel(254);
+ info << "Nodeid = " << ga_nodeId << endl;
break;
case 'b':
if (ga_backupId == 0)
{
- printf("Error in --backupid,-b setting, see --help\n");
+ err << "Error in --backupid,-b setting, see --help";
exit(NDBT_ProgramExit(NDBT_WRONGARGS));
}
+ info.setLevel(254);
+ info << "Backup Id = " << ga_backupId << endl;
break;
}
return 0;
@@ -139,20 +214,26 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
bool
readArguments(int *pargc, char*** pargv)
{
+ Uint32 i;
+ debug << "Load defaults" << endl;
+ const char *load_default_groups[]= { "mysql_cluster","ndb_restore",0 };
+
load_defaults("my",load_default_groups,pargc,pargv);
+ debug << "handle_options" << endl;
if (handle_options(pargc, pargv, my_long_options, get_one_option))
{
exit(NDBT_ProgramExit(NDBT_WRONGARGS));
}
- BackupPrinter* printer = new BackupPrinter();
- if (printer == NULL)
+ g_printer = new BackupPrinter();
+ if (g_printer == NULL)
return false;
BackupRestore* restore = new BackupRestore(ga_nParallelism);
if (restore == NULL)
{
- delete printer;
+ delete g_printer;
+ g_printer = NULL;
return false;
}
@@ -160,22 +241,22 @@ readArguments(int *pargc, char*** pargv)
{
ga_print = true;
ga_restore = true;
- printer->m_print = true;
+ g_printer->m_print = true;
}
if (_print_meta)
{
ga_print = true;
- printer->m_print_meta = true;
+ g_printer->m_print_meta = true;
}
if (_print_data)
{
ga_print = true;
- printer->m_print_data = true;
+ g_printer->m_print_data = true;
}
if (_print_log)
{
ga_print = true;
- printer->m_print_log = true;
+ g_printer->m_print_log = true;
}
if (_restore_data)
@@ -191,19 +272,64 @@ readArguments(int *pargc, char*** pargv)
}
{
- BackupConsumer * c = printer;
+ BackupConsumer * c = g_printer;
g_consumers.push_back(c);
}
{
BackupConsumer * c = restore;
g_consumers.push_back(c);
}
- // Set backup file path
- if (*pargv[0] != NULL)
+ for (;;)
{
- ga_backupPath = *pargv[0];
+ int i= 0;
+ if (ga_backupPath == default_backupPath)
+ {
+ // Set backup file path
+ if ((*pargv)[i] == NULL)
+ break;
+ ga_backupPath = (*pargv)[i++];
+ }
+ if ((*pargv)[i] == NULL)
+ break;
+ g_databases.push_back((*pargv)[i++]);
+ while ((*pargv)[i] != NULL)
+ {
+ g_tables.push_back((*pargv)[i++]);
+ }
+ break;
}
-
+ info.setLevel(254);
+ info << "backup path = " << ga_backupPath << endl;
+ if (g_databases.size() > 0)
+ {
+ info << "Restoring only from database " << g_databases[0].c_str() << endl;
+ if (g_tables.size() > 0)
+ info << "Restoring only tables:";
+ for (unsigned i= 0; i < g_tables.size(); i++)
+ {
+ info << " " << g_tables[i].c_str();
+ }
+ if (g_tables.size() > 0)
+ info << endl;
+ }
+ /*
+ the below formatting follows the formatting from mysqldump
+ do not change unless to adopt to changes in mysqldump
+ */
+ g_ndbrecord_print_format.fields_enclosed_by=
+ opt_fields_enclosed_by ? opt_fields_enclosed_by : "";
+ g_ndbrecord_print_format.fields_terminated_by=
+ opt_fields_terminated_by ? opt_fields_terminated_by : "\t";
+ g_ndbrecord_print_format.fields_optionally_enclosed_by=
+ opt_fields_optionally_enclosed_by ? opt_fields_optionally_enclosed_by : "";
+ g_ndbrecord_print_format.lines_terminated_by=
+ opt_lines_terminated_by ? opt_lines_terminated_by : "\n";
+ if (g_ndbrecord_print_format.fields_optionally_enclosed_by[0] == '\0')
+ g_ndbrecord_print_format.null_string= "\\N";
+ else
+ g_ndbrecord_print_format.null_string= "";
+ g_ndbrecord_print_format.hex_prefix= "";
+ g_ndbrecord_print_format.hex_format= opt_hex_format;
return true;
}
@@ -215,14 +341,81 @@ clearConsumers()
g_consumers.clear();
}
-static bool
-checkSysTable(const char *tableName)
+static inline bool
+checkSysTable(const TableS* table)
+{
+ return ga_dont_ignore_systab_0 || ! table->getSysTable();
+}
+
+static inline bool
+checkSysTable(const RestoreMetaData& metaData, uint i)
+{
+ assert(i < metaData.getNoOfTables());
+ return checkSysTable(metaData[i]);
+}
+
+static inline bool
+isBlobTable(const TableS* table)
+{
+ return table->getMainTable() != NULL;
+}
+
+static inline bool
+isIndex(const TableS* table)
+{
+ const NdbTableImpl & tmptab = NdbTableImpl::getImpl(* table->m_dictTable);
+ return (int) tmptab.m_indexType != (int) NdbDictionary::Index::Undefined;
+}
+
+static inline bool
+checkDbAndTableName(const TableS* table)
{
- return ga_dont_ignore_systab_0 ||
- (strcmp(tableName, "SYSTAB_0") != 0 &&
- strcmp(tableName, "NDB$EVENTS_0") != 0 &&
- strcmp(tableName, "sys/def/SYSTAB_0") != 0 &&
- strcmp(tableName, "sys/def/NDB$EVENTS_0") != 0);
+ if (g_tables.size() == 0 &&
+ g_databases.size() == 0)
+ return true;
+ if (g_databases.size() == 0)
+ g_databases.push_back("TEST_DB");
+
+ // Filter on the main table name for indexes and blobs
+ const char *table_name;
+ if (isBlobTable(table))
+ table_name= table->getMainTable()->getTableName();
+ else if (isIndex(table))
+ table_name=
+ NdbTableImpl::getImpl(*table->m_dictTable).m_primaryTable.c_str();
+ else
+ table_name= table->getTableName();
+
+ unsigned i;
+ for (i= 0; i < g_databases.size(); i++)
+ {
+ if (strncmp(table_name, g_databases[i].c_str(),
+ g_databases[i].length()) == 0 &&
+ table_name[g_databases[i].length()] == '/')
+ {
+ // we have a match
+ if (g_databases.size() > 1 || g_tables.size() == 0)
+ return true;
+ break;
+ }
+ }
+ if (i == g_databases.size())
+ return false; // no match found
+
+ while (*table_name != '/') table_name++;
+ table_name++;
+ while (*table_name != '/') table_name++;
+ table_name++;
+
+ for (i= 0; i < g_tables.size(); i++)
+ {
+ if (strcmp(table_name, g_tables[i].c_str()) == 0)
+ {
+ // we have a match
+ return true;
+ }
+ }
+ return false;
}
static void
@@ -247,6 +440,7 @@ main(int argc, char** argv)
{
NDB_INIT(argv[0]);
+ debug << "Start readArguments" << endl;
if (!readArguments(&argc, &argv))
{
exitHandler(NDBT_FAILED);
@@ -265,10 +459,11 @@ main(int argc, char** argv)
/**
* we must always load meta data, even if we will only print it to stdout
*/
+ debug << "Start restoring meta data" << endl;
RestoreMetaData metaData(ga_backupPath, ga_nodeId, ga_backupId);
if (!metaData.readHeader())
{
- ndbout << "Failed to read " << metaData.getFilename() << endl << endl;
+ err << "Failed to read " << metaData.getFilename() << endl << endl;
exitHandler(NDBT_FAILED);
}
@@ -276,66 +471,108 @@ main(int argc, char** argv)
const Uint32 version = tmp.NdbVersion;
char buf[NDB_VERSION_STRING_BUF_SZ];
- ndbout << "Ndb version in backup files: "
- << getVersionString(version, 0, buf, sizeof(buf)) << endl;
+ info.setLevel(254);
+ info << "Ndb version in backup files: "
+ << getVersionString(version, 0, buf, sizeof(buf)) << endl;
/**
* check wheater we can restore the backup (right version).
*/
+ if (version > NDB_VERSION)
+ {
+ err << "Restore program older than backup version. Not supported. "
+ << "Use new restore program" << endl;
+ exitHandler(NDBT_FAILED);
+ }
+
+ debug << "Load content" << endl;
int res = metaData.loadContent();
if (res == 0)
{
- ndbout_c("Restore: Failed to load content");
+ err << "Restore: Failed to load content" << endl;
exitHandler(NDBT_FAILED);
}
-
+ debug << "Get no of Tables" << endl;
if (metaData.getNoOfTables() == 0)
{
- ndbout_c("Restore: The backup contains no tables ");
+ err << "The backup contains no tables" << endl;
exitHandler(NDBT_FAILED);
}
-
+ debug << "Validate Footer" << endl;
if (!metaData.validateFooter())
{
- ndbout_c("Restore: Failed to validate footer.");
+ err << "Restore: Failed to validate footer." << endl;
exitHandler(NDBT_FAILED);
}
-
+ debug << "Init Backup objects" << endl;
Uint32 i;
for(i= 0; i < g_consumers.size(); i++)
{
if (!g_consumers[i]->init())
{
clearConsumers();
+ err << "Failed to initialize consumers" << endl;
exitHandler(NDBT_FAILED);
}
}
+ Vector<OutputStream *> table_output(metaData.getNoOfTables());
+ debug << "Restoring tables" << endl;
for(i = 0; i<metaData.getNoOfTables(); i++)
{
- if (checkSysTable(metaData[i]->getTableName()))
+ const TableS *table= metaData[i];
+ table_output.push_back(NULL);
+ if (!checkDbAndTableName(table))
+ continue;
+ if (checkSysTable(table))
{
+ if (!tab_path || isBlobTable(table) || isIndex(table))
+ {
+ table_output[i]= ndbout.m_out;
+ }
+ else
+ {
+ FILE* res;
+ char filename[FN_REFLEN], tmp_path[FN_REFLEN];
+ const char *table_name;
+ table_name= table->getTableName();
+ while (*table_name != '/') table_name++;
+ table_name++;
+ while (*table_name != '/') table_name++;
+ table_name++;
+ convert_dirname(tmp_path, tab_path, NullS);
+ res= my_fopen(fn_format(filename, table_name, tmp_path, ".txt", 4),
+ opt_append ?
+ O_WRONLY|O_APPEND|O_CREAT :
+ O_WRONLY|O_TRUNC|O_CREAT,
+ MYF(MY_WME));
+ if (res == 0)
+ {
+ exitHandler(NDBT_FAILED);
+ }
+ FileOutputStream *f= new FileOutputStream(res);
+ table_output[i]= f;
+ }
for(Uint32 j= 0; j < g_consumers.size(); j++)
- if (!g_consumers[j]->table(* metaData[i]))
+ if (!g_consumers[j]->table(* table))
{
- ndbout_c("Restore: Failed to restore table: %s. "
- "Exiting...",
- metaData[i]->getTableName());
+ err << "Restore: Failed to restore table: `";
+ err << table->getTableName() << "` ... Exiting " << endl;
exitHandler(NDBT_FAILED);
- }
+ }
}
}
-
+ debug << "Close tables" << endl;
for(i= 0; i < g_consumers.size(); i++)
if (!g_consumers[i]->endOfTables())
{
- ndbout_c("Restore: Failed while closing tables");
+ err << "Restore: Failed while closing tables" << endl;
exitHandler(NDBT_FAILED);
}
-
+ debug << "Iterate over data" << endl;
if (ga_restore || ga_print)
{
if(_restore_data || _print_data)
@@ -345,7 +582,7 @@ main(int argc, char** argv)
// Read data file header
if (!dataIter.readHeader())
{
- ndbout << "Failed to read header of data file. Exiting..." ;
+ err << "Failed to read header of data file. Exiting..." << endl;
exitHandler(NDBT_FAILED);
}
@@ -355,20 +592,26 @@ main(int argc, char** argv)
const TupleS* tuple;
while ((tuple = dataIter.getNextTuple(res= 1)) != 0)
{
- if (checkSysTable(tuple->getTable()->getTableName()))
- for(Uint32 i= 0; i < g_consumers.size(); i++)
- g_consumers[i]->tuple(* tuple);
+ const TableS* table = tuple->getTable();
+ OutputStream *output = table_output[table->getLocalId()];
+ if (!output)
+ continue;
+ OutputStream *tmp = ndbout.m_out;
+ ndbout.m_out = output;
+ for(Uint32 j= 0; j < g_consumers.size(); j++)
+ g_consumers[j]->tuple(* tuple);
+ ndbout.m_out = tmp;
} // while (tuple != NULL);
if (res < 0)
{
- ndbout_c("Restore: An error occured while restoring data. "
- "Exiting...");
+ err <<" Restore: An error occured while restoring data. Exiting...";
+ err << endl;
exitHandler(NDBT_FAILED);
}
if (!dataIter.validateFragmentFooter()) {
- ndbout_c("Restore: Error validating fragment footer. "
- "Exiting...");
+ err << "Restore: Error validating fragment footer. ";
+ err << "Exiting..." << endl;
exitHandler(NDBT_FAILED);
}
} // while (dataIter.readFragmentHeader(res))
@@ -376,7 +619,7 @@ main(int argc, char** argv)
if (res < 0)
{
err << "Restore: An error occured while restoring data. Exiting... "
- << "res=" << res << endl;
+ << "res= " << res << endl;
exitHandler(NDBT_FAILED);
}
@@ -399,9 +642,12 @@ main(int argc, char** argv)
const LogEntry * logEntry = 0;
while ((logEntry = logIter.getNextLogEntry(res= 0)) != 0)
{
- if (checkSysTable(logEntry->m_table->getTableName()))
- for(Uint32 i= 0; i < g_consumers.size(); i++)
- g_consumers[i]->logEntry(* logEntry);
+ const TableS* table = logEntry->m_table;
+ OutputStream *output = table_output[table->getLocalId()];
+ if (!output)
+ continue;
+ for(Uint32 j= 0; j < g_consumers.size(); j++)
+ g_consumers[j]->logEntry(* logEntry);
}
if (res < 0)
{
@@ -418,33 +664,48 @@ main(int argc, char** argv)
{
for(i = 0; i<metaData.getNoOfTables(); i++)
{
- if (checkSysTable(metaData[i]->getTableName()))
- {
- for(Uint32 j= 0; j < g_consumers.size(); j++)
- if (!g_consumers[j]->finalize_table(* metaData[i]))
- {
- ndbout_c("Restore: Failed to finalize restore table: %s. "
- "Exiting...",
- metaData[i]->getTableName());
- exitHandler(NDBT_FAILED);
- }
- }
+ const TableS* table = metaData[i];
+ OutputStream *output = table_output[table->getLocalId()];
+ if (!output)
+ continue;
+ for(Uint32 j= 0; j < g_consumers.size(); j++)
+ if (!g_consumers[j]->finalize_table(*table))
+ {
+ err << "Restore: Failed to finalize restore table: %s. ";
+ err << "Exiting... " << metaData[i]->getTableName() << endl;
+ exitHandler(NDBT_FAILED);
+ }
}
}
}
- for(Uint32 i= 0; i < g_consumers.size(); i++)
+ for(Uint32 j= 0; j < g_consumers.size(); j++)
{
- if (g_consumers[i]->has_temp_error())
+ if (g_consumers[j]->has_temp_error())
{
clearConsumers();
ndbout_c("\nRestore successful, but encountered temporary error, "
"please look at configuration.");
- return NDBT_ProgramExit(NDBT_TEMPORARY);
+ }
+ }
+
+ clearConsumers();
+
+ for(i = 0; i < metaData.getNoOfTables(); i++)
+ {
+ if (table_output[i] &&
+ table_output[i] != ndbout.m_out)
+ {
+ my_fclose(((FileOutputStream *)table_output[i])->getFile(), MYF(MY_WME));
+ delete table_output[i];
+ table_output[i] = NULL;
}
}
- clearConsumers();
- return NDBT_ProgramExit(NDBT_OK);
+ if (opt_verbose)
+ return NDBT_ProgramExit(NDBT_OK);
+ else
+ return 0;
} // main
template class Vector<BackupConsumer*>;
+template class Vector<OutputStream*>;
diff --git a/ndb/tools/select_all.cpp b/ndb/tools/select_all.cpp
index 5d70ac32d02..27d96a9c6ed 100644
--- a/ndb/tools/select_all.cpp
+++ b/ndb/tools/select_all.cpp
@@ -39,7 +39,7 @@ NDB_STD_OPTS_VARS;
static const char* _dbname = "TEST_DB";
static const char* _delimiter = "\t";
-static int _unqualified, _header, _parallelism, _useHexFormat, _lock,
+static int _header, _parallelism, _useHexFormat, _lock,
_order, _descending;
const char *load_default_groups[]= { "mysql_cluster",0 };
@@ -75,6 +75,7 @@ static struct my_option my_long_options[] =
};
static void usage()
{
+#ifdef NOT_USED
char desc[] =
"tabname\n"\
"This program reads all records from one table in NDB Cluster\n"\
@@ -82,6 +83,7 @@ static void usage()
"(It only print error messages if it encounters a permanent error.)\n"\
"It can also be used to dump the content of a table to file \n"\
" ex: select_all --no-header --delimiter=';' T4 > T4.data\n";
+#endif
ndb_std_print_version();
print_defaults(MYSQL_CONFIG_NAME,load_default_groups);
puts("");
@@ -106,6 +108,7 @@ int main(int argc, char** argv){
}
Ndb_cluster_connection con(opt_connect_str);
+ con.set_name("ndb_select_all");
if(con.connect(12, 5, 1) != 0)
{
ndbout << "Unable to connect to management server." << endl;
diff --git a/ndb/tools/select_count.cpp b/ndb/tools/select_count.cpp
index a133f7967f8..552d156b665 100644
--- a/ndb/tools/select_count.cpp
+++ b/ndb/tools/select_count.cpp
@@ -55,9 +55,11 @@ static struct my_option my_long_options[] =
};
static void usage()
{
+#ifdef NOT_USED
char desc[] =
"tabname1 ... tabnameN\n"\
"This program will count the number of records in tables\n";
+#endif
ndb_std_print_version();
print_defaults(MYSQL_CONFIG_NAME,load_default_groups);
puts("");
@@ -81,6 +83,7 @@ int main(int argc, char** argv){
}
Ndb_cluster_connection con(opt_connect_str);
+ con.set_name("ndb_select_count");
if(con.connect(12, 5, 1) != 0)
{
ndbout << "Unable to connect to management server." << endl;