summaryrefslogtreecommitdiff
path: root/storage/ndb
diff options
context:
space:
mode:
Diffstat (limited to 'storage/ndb')
-rw-r--r--storage/ndb/include/kernel/AttributeHeader.hpp10
-rw-r--r--storage/ndb/include/kernel/GlobalSignalNumbers.h8
-rw-r--r--storage/ndb/include/kernel/signaldata/AccScan.hpp1
-rw-r--r--storage/ndb/include/kernel/signaldata/CopyFrag.hpp41
-rw-r--r--storage/ndb/include/kernel/signaldata/ScanTab.hpp1
-rw-r--r--storage/ndb/include/kernel/signaldata/TcKeyConf.hpp2
-rw-r--r--storage/ndb/include/ndb_version.h.in47
-rw-r--r--storage/ndb/include/ndbapi/Ndb.hpp1
-rw-r--r--storage/ndb/include/ndbapi/NdbOperation.hpp5
-rw-r--r--storage/ndb/include/ndbapi/NdbPool.hpp3
-rw-r--r--storage/ndb/include/ndbapi/NdbScanFilter.hpp29
-rw-r--r--storage/ndb/include/ndbapi/NdbTransaction.hpp9
-rw-r--r--storage/ndb/include/ndbapi/ndbapi_limits.h2
-rw-r--r--storage/ndb/include/util/ndb_rand.h33
-rw-r--r--storage/ndb/src/common/debugger/EventLogger.cpp12
-rw-r--r--storage/ndb/src/common/debugger/signaldata/SignalNames.cpp4
-rw-r--r--storage/ndb/src/common/debugger/signaldata/TcKeyConf.cpp4
-rw-r--r--storage/ndb/src/common/util/Makefile.am3
-rw-r--r--storage/ndb/src/common/util/ndb_rand.c40
-rw-r--r--storage/ndb/src/cw/cpcd/APIService.cpp2
-rw-r--r--storage/ndb/src/kernel/blocks/ERROR_codes.txt15
-rw-r--r--storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp32
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp8
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp5
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp150
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp2
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp3
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp180
-rw-r--r--storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp12
-rw-r--r--storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp203
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp14
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp264
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp6
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp66
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp10
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp70
-rw-r--r--storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp5
-rw-r--r--storage/ndb/src/kernel/blocks/lgman.cpp12
-rw-r--r--storage/ndb/src/kernel/blocks/pgman.cpp62
-rw-r--r--storage/ndb/src/kernel/blocks/suma/Suma.cpp6
-rw-r--r--storage/ndb/src/kernel/blocks/tsman.cpp62
-rw-r--r--storage/ndb/src/kernel/blocks/tsman.hpp24
-rw-r--r--storage/ndb/src/mgmclient/CommandInterpreter.cpp12
-rw-r--r--storage/ndb/src/mgmsrv/MgmtSrvr.cpp6
-rw-r--r--storage/ndb/src/ndbapi/NdbBlob.cpp2
-rw-r--r--storage/ndb/src/ndbapi/NdbIndexOperation.cpp3
-rw-r--r--storage/ndb/src/ndbapi/NdbOperation.cpp6
-rw-r--r--storage/ndb/src/ndbapi/NdbOperationDefine.cpp76
-rw-r--r--storage/ndb/src/ndbapi/NdbOperationExec.cpp17
-rw-r--r--storage/ndb/src/ndbapi/NdbReceiver.cpp2
-rw-r--r--storage/ndb/src/ndbapi/NdbScanFilter.cpp244
-rw-r--r--storage/ndb/src/ndbapi/NdbScanOperation.cpp5
-rw-r--r--storage/ndb/src/ndbapi/NdbTransaction.cpp6
-rw-r--r--storage/ndb/src/ndbapi/ndberror.c1
-rw-r--r--storage/ndb/test/include/HugoTransactions.hpp18
-rw-r--r--storage/ndb/test/include/UtilTransactions.hpp5
-rwxr-xr-xstorage/ndb/test/include/dbutil.hpp97
-rw-r--r--storage/ndb/test/ndbapi/testBasic.cpp35
-rw-r--r--storage/ndb/test/ndbapi/testDict.cpp259
-rw-r--r--storage/ndb/test/ndbapi/testIndex.cpp4
-rw-r--r--storage/ndb/test/ndbapi/testNodeRestart.cpp77
-rw-r--r--storage/ndb/test/ndbapi/testScan.cpp2
-rw-r--r--storage/ndb/test/ndbapi/testSystemRestart.cpp329
-rw-r--r--storage/ndb/test/ndbapi/test_event.cpp90
-rw-r--r--storage/ndb/test/run-test/daily-basic-tests.txt96
-rw-r--r--storage/ndb/test/src/HugoOperations.cpp1
-rw-r--r--storage/ndb/test/src/HugoTransactions.cpp229
-rw-r--r--storage/ndb/test/src/Makefile.am2
-rw-r--r--storage/ndb/test/src/NDBT_Thread.cpp2
-rw-r--r--storage/ndb/test/src/NdbRestarts.cpp1
-rw-r--r--storage/ndb/test/src/UtilTransactions.cpp74
-rwxr-xr-xstorage/ndb/test/src/dbutil.cpp176
72 files changed, 2775 insertions, 570 deletions
diff --git a/storage/ndb/include/kernel/AttributeHeader.hpp b/storage/ndb/include/kernel/AttributeHeader.hpp
index 613e3d19d1b..52f93b6cd05 100644
--- a/storage/ndb/include/kernel/AttributeHeader.hpp
+++ b/storage/ndb/include/kernel/AttributeHeader.hpp
@@ -52,8 +52,7 @@ public:
// NOTE: in 5.1 ctors and init take size in bytes
/** Initialize AttributeHeader at location aHeaderPtr */
- static AttributeHeader& init(void* aHeaderPtr, Uint32 anAttributeId,
- Uint32 aByteSize);
+ static void init(Uint32* aHeaderPtr, Uint32 anAttributeId, Uint32 aByteSize);
/** Returns size of AttributeHeader (usually one or two words) */
Uint32 getHeaderSize() const; // In 32-bit words
@@ -113,10 +112,11 @@ public:
*/
inline
-AttributeHeader& AttributeHeader::init(void* aHeaderPtr, Uint32 anAttributeId,
- Uint32 aByteSize)
+void AttributeHeader::init(Uint32* aHeaderPtr, Uint32 anAttributeId,
+ Uint32 aByteSize)
{
- return * new (aHeaderPtr) AttributeHeader(anAttributeId, aByteSize);
+ AttributeHeader ah(anAttributeId, aByteSize);
+ *aHeaderPtr = ah.m_value;
}
inline
diff --git a/storage/ndb/include/kernel/GlobalSignalNumbers.h b/storage/ndb/include/kernel/GlobalSignalNumbers.h
index aa0596f102a..9653c20260f 100644
--- a/storage/ndb/include/kernel/GlobalSignalNumbers.h
+++ b/storage/ndb/include/kernel/GlobalSignalNumbers.h
@@ -195,9 +195,11 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
/* 132 not unused */
/* 133 not unused */
#define GSN_CM_HEARTBEAT 134 /* distr. */
-/* 135 unused */
-/* 136 unused */
-/* 137 unused */
+
+#define GSN_PREPARE_COPY_FRAG_REQ 135
+#define GSN_PREPARE_COPY_FRAG_REF 136
+#define GSN_PREPARE_COPY_FRAG_CONF 137
+
#define GSN_CM_NODEINFOCONF 138 /* distr. */
#define GSN_CM_NODEINFOREF 139 /* distr. */
#define GSN_CM_NODEINFOREQ 140 /* distr. */
diff --git a/storage/ndb/include/kernel/signaldata/AccScan.hpp b/storage/ndb/include/kernel/signaldata/AccScan.hpp
index 73d69825069..a0aa38c8d8e 100644
--- a/storage/ndb/include/kernel/signaldata/AccScan.hpp
+++ b/storage/ndb/include/kernel/signaldata/AccScan.hpp
@@ -49,6 +49,7 @@ private:
Uint32 savePointId;
Uint32 gci;
};
+ Uint32 maxPage;
/**
* Previously there where also a scan type
diff --git a/storage/ndb/include/kernel/signaldata/CopyFrag.hpp b/storage/ndb/include/kernel/signaldata/CopyFrag.hpp
index 06dd4070264..d985358dce4 100644
--- a/storage/ndb/include/kernel/signaldata/CopyFrag.hpp
+++ b/storage/ndb/include/kernel/signaldata/CopyFrag.hpp
@@ -29,7 +29,7 @@ class CopyFragReq {
*/
friend class Dblqh;
public:
- STATIC_CONST( SignalLength = 9 );
+ STATIC_CONST( SignalLength = 10 );
private:
Uint32 userPtr;
@@ -42,6 +42,7 @@ private:
Uint32 gci;
Uint32 nodeCount;
Uint32 nodeList[1];
+ //Uint32 maxPage; is stored in nodeList[nodeCount]
};
class CopyFragConf {
@@ -95,4 +96,42 @@ struct UpdateFragDistKeyOrd
STATIC_CONST( SignalLength = 3 );
};
+struct PrepareCopyFragReq
+{
+ STATIC_CONST( SignalLength = 6 );
+
+ Uint32 senderRef;
+ Uint32 senderData;
+ Uint32 tableId;
+ Uint32 fragId;
+ Uint32 copyNodeId;
+ Uint32 startingNodeId;
+};
+
+struct PrepareCopyFragRef
+{
+ Uint32 senderRef;
+ Uint32 senderData;
+ Uint32 tableId;
+ Uint32 fragId;
+ Uint32 copyNodeId;
+ Uint32 startingNodeId;
+ Uint32 errorCode;
+
+ STATIC_CONST( SignalLength = 7 );
+};
+
+struct PrepareCopyFragConf
+{
+ STATIC_CONST( SignalLength = 7 );
+
+ Uint32 senderRef;
+ Uint32 senderData;
+ Uint32 tableId;
+ Uint32 fragId;
+ Uint32 copyNodeId;
+ Uint32 startingNodeId;
+ Uint32 maxPageNo;
+};
+
#endif
diff --git a/storage/ndb/include/kernel/signaldata/ScanTab.hpp b/storage/ndb/include/kernel/signaldata/ScanTab.hpp
index 0074078533f..3d2071ca019 100644
--- a/storage/ndb/include/kernel/signaldata/ScanTab.hpp
+++ b/storage/ndb/include/kernel/signaldata/ScanTab.hpp
@@ -46,6 +46,7 @@ public:
* Length of signal
*/
STATIC_CONST( StaticLength = 11 );
+ STATIC_CONST( MaxTotalAttrInfo = 0xFFFF );
private:
diff --git a/storage/ndb/include/kernel/signaldata/TcKeyConf.hpp b/storage/ndb/include/kernel/signaldata/TcKeyConf.hpp
index b8562875ef5..fd8932c3c87 100644
--- a/storage/ndb/include/kernel/signaldata/TcKeyConf.hpp
+++ b/storage/ndb/include/kernel/signaldata/TcKeyConf.hpp
@@ -46,7 +46,7 @@ public:
*/
STATIC_CONST( StaticLength = 5 );
STATIC_CONST( OperationLength = 2 );
- STATIC_CONST( SimpleReadBit = (((Uint32)1) << 31) );
+ STATIC_CONST( DirtyReadBit = (((Uint32)1) << 31) );
private:
diff --git a/storage/ndb/include/ndb_version.h.in b/storage/ndb/include/ndb_version.h.in
index 5405ad4d7aa..6a479433b3b 100644
--- a/storage/ndb/include/ndb_version.h.in
+++ b/storage/ndb/include/ndb_version.h.in
@@ -88,5 +88,52 @@ Uint32 ndbGetOwnVersion();
#define NDBD_NODE_VERSION_REP NDB_MAKE_VERSION(6,1,1)
+#define NDBD_PREPARE_COPY_FRAG_VERSION NDB_MAKE_VERSION(6,2,1)
+#define NDBD_PREPARE_COPY_FRAG_V2_51 NDB_MAKE_VERSION(5,1,23)
+#define NDBD_PREPARE_COPY_FRAG_V2_62 NDB_MAKE_VERSION(6,2,8)
+#define NDBD_PREPARE_COPY_FRAG_V2_63 NDB_MAKE_VERSION(6,3,6)
+
+/**
+ * 0 = NO PREP COPY FRAG SUPPORT
+ * 1 = NO MAX PAGE SUPPORT
+ * 2 = LATEST VERSION
+ */
+static
+inline
+int
+ndb_check_prep_copy_frag_version(Uint32 version)
+{
+ if (version == NDB_VERSION_D)
+ return 2;
+
+ const Uint32 major = (version >> 16) & 0xFF;
+ const Uint32 minor = (version >> 8) & 0xFF;
+ if (major >= 6)
+ {
+ if (minor == 2)
+ {
+ if (version >= NDBD_PREPARE_COPY_FRAG_V2_62)
+ return 2;
+ if (version >= NDBD_PREPARE_COPY_FRAG_VERSION)
+ return 1;
+ return 0;
+ }
+ else if (minor == 3)
+ {
+ if (version >= NDBD_PREPARE_COPY_FRAG_V2_63)
+ return 2;
+ return 1;
+ }
+ return 2;
+ }
+ else if (major == 5 && minor == 1)
+ {
+ if (version >= NDBD_PREPARE_COPY_FRAG_V2_51)
+ return 2;
+ }
+
+ return 0;
+}
+
#endif
diff --git a/storage/ndb/include/ndbapi/Ndb.hpp b/storage/ndb/include/ndbapi/Ndb.hpp
index 995ce67e61d..dca19c2ead0 100644
--- a/storage/ndb/include/ndbapi/Ndb.hpp
+++ b/storage/ndb/include/ndbapi/Ndb.hpp
@@ -1056,6 +1056,7 @@ class Ndb
friend class NdbBlob;
friend class NdbImpl;
friend class Ndb_internal;
+ friend class NdbScanFilterImpl;
#endif
public:
diff --git a/storage/ndb/include/ndbapi/NdbOperation.hpp b/storage/ndb/include/ndbapi/NdbOperation.hpp
index 06111941df4..78dbadfd7ab 100644
--- a/storage/ndb/include/ndbapi/NdbOperation.hpp
+++ b/storage/ndb/include/ndbapi/NdbOperation.hpp
@@ -93,8 +93,9 @@ public:
,LM_CommittedRead ///< Ignore locks, read last committed value
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
= 2,
- LM_Dirty = 2
+ LM_Dirty = 2,
#endif
+ LM_SimpleRead = 3 ///< Read with shared lock, but release lock directly
};
/**
@@ -842,8 +843,10 @@ protected:
virtual ~NdbOperation();
void next(NdbOperation*); // Set next pointer
NdbOperation* next(); // Get next pointer
+
public:
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ NdbTransaction* getNdbTransaction();
const NdbOperation* next() const;
const NdbRecAttr* getFirstRecAttr() const;
#endif
diff --git a/storage/ndb/include/ndbapi/NdbPool.hpp b/storage/ndb/include/ndbapi/NdbPool.hpp
index 1963bf26448..44b6d7488f0 100644
--- a/storage/ndb/include/ndbapi/NdbPool.hpp
+++ b/storage/ndb/include/ndbapi/NdbPool.hpp
@@ -17,7 +17,8 @@ class Ndb;
class NdbPool;
bool
-create_instance(Uint32 max_ndb_objects,
+create_instance(Ndb_cluster_connection* cc,
+ Uint32 max_ndb_objects,
Uint32 no_conn_obj,
Uint32 init_no_ndb_objects);
diff --git a/storage/ndb/include/ndbapi/NdbScanFilter.hpp b/storage/ndb/include/ndbapi/NdbScanFilter.hpp
index 1ef62558560..4527012a6c4 100644
--- a/storage/ndb/include/ndbapi/NdbScanFilter.hpp
+++ b/storage/ndb/include/ndbapi/NdbScanFilter.hpp
@@ -17,6 +17,7 @@
#define NDB_SCAN_FILTER_HPP
#include <ndb_types.h>
+#include <ndbapi_limits.h>
/**
* @class NdbScanFilter
@@ -31,8 +32,13 @@ public:
/**
* Constructor
* @param op The NdbOperation that the filter belongs to (is applied to).
+ * @param abort_on_too_large abort transaction on filter too large
+ * default: true
+ * @param max_size Maximum size of generated filter in words
*/
- NdbScanFilter(class NdbOperation * op);
+ NdbScanFilter(class NdbOperation * op,
+ bool abort_on_too_large = true,
+ Uint32 max_size = NDB_MAX_SCANFILTER_SIZE_IN_WORDS);
~NdbScanFilter();
/**
@@ -166,6 +172,27 @@ public:
/** @} *********************************************************************/
#endif
+ enum Error {
+ FilterTooLarge = 4294
+ };
+
+ /**
+ * Get filter level error.
+ *
+ * Most errors are set only on operation level, and they abort the
+ * transaction. The error FilterTooLarge is set on filter level and
+ * by default it propagates to operation level and also aborts the
+ * transaction.
+ *
+ * If option abort_on_too_large is set to false, then FilterTooLarge
+ * does not propagate. One can then either ignore this error (in
+ * which case no filtering is done) or try to define a new filter
+ * immediately.
+ */
+ const class NdbError & getNdbError() const;
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ NdbOperation * getNdbOperation();
+#endif
private:
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
friend class NdbScanFilterImpl;
diff --git a/storage/ndb/include/ndbapi/NdbTransaction.hpp b/storage/ndb/include/ndbapi/NdbTransaction.hpp
index 20c9c709e51..6a057655398 100644
--- a/storage/ndb/include/ndbapi/NdbTransaction.hpp
+++ b/storage/ndb/include/ndbapi/NdbTransaction.hpp
@@ -170,6 +170,15 @@ public:
#endif
};
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ /**
+ * Convenience method to fetch this transaction's Ndb* object
+ */
+ Ndb * getNdb() {
+ return theNdb;
+ }
+#endif
+
#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
/**
* Get an NdbOperation for a table.
diff --git a/storage/ndb/include/ndbapi/ndbapi_limits.h b/storage/ndb/include/ndbapi/ndbapi_limits.h
index 63399e4bd0a..e283913d059 100644
--- a/storage/ndb/include/ndbapi/ndbapi_limits.h
+++ b/storage/ndb/include/ndbapi/ndbapi_limits.h
@@ -26,4 +26,6 @@
#define NDB_MAX_TUPLE_SIZE (NDB_MAX_TUPLE_SIZE_IN_WORDS*4)
#define NDB_MAX_ACTIVE_EVENTS 100
+#define NDB_MAX_SCANFILTER_SIZE_IN_WORDS 50000
+
#endif
diff --git a/storage/ndb/include/util/ndb_rand.h b/storage/ndb/include/util/ndb_rand.h
new file mode 100644
index 00000000000..1521ca9c4ff
--- /dev/null
+++ b/storage/ndb/include/util/ndb_rand.h
@@ -0,0 +1,33 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef NDB_RAND_H
+#define NDB_RAND_H
+
+#define NDB_RAND_MAX 32767
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int ndb_rand(void);
+
+void ndb_srand(unsigned seed);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/storage/ndb/src/common/debugger/EventLogger.cpp b/storage/ndb/src/common/debugger/EventLogger.cpp
index 0964a54f906..068b0c6ac18 100644
--- a/storage/ndb/src/common/debugger/EventLogger.cpp
+++ b/storage/ndb/src/common/debugger/EventLogger.cpp
@@ -498,10 +498,10 @@ void getTextTransReportCounters(QQQQ) {
// -------------------------------------------------------------------
BaseString::snprintf(m_text, m_text_len,
"Trans. Count = %u, Commit Count = %u, "
- "Read Count = %u, Simple Read Count = %u,\n"
+ "Read Count = %u, Simple Read Count = %u, "
"Write Count = %u, AttrInfo Count = %u, "
- "Concurrent Operations = %u, Abort Count = %u\n"
- " Scans: %u Range scans: %u",
+ "Concurrent Operations = %u, Abort Count = %u"
+ " Scans = %u Range scans = %u",
theData[1],
theData[2],
theData[3],
@@ -797,9 +797,9 @@ void getTextBackupFailedToStart(QQQQ) {
}
void getTextBackupCompleted(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
- "Backup %u started from node %u completed\n"
- " StartGCP: %u StopGCP: %u\n"
- " #Records: %u #LogRecords: %u\n"
+ "Backup %u started from node %u completed."
+ " StartGCP: %u StopGCP: %u"
+ " #Records: %u #LogRecords: %u"
" Data: %u bytes Log: %u bytes",
theData[2], refToNode(theData[1]),
theData[3], theData[4], theData[6], theData[8],
diff --git a/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp
index 0d31cd5de7f..b4221cbec8e 100644
--- a/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp
@@ -640,5 +640,9 @@ const GsnName SignalNames [] = {
,{ GSN_ROUTE_ORD, "ROUTE_ORD" }
,{ GSN_NODE_VERSION_REP, "NODE_VERSION_REP" }
+
+ ,{ GSN_PREPARE_COPY_FRAG_REQ, "PREPARE_COPY_FRAG_REQ" }
+ ,{ GSN_PREPARE_COPY_FRAG_REF, "PREPARE_COPY_FRAG_REF" }
+ ,{ GSN_PREPARE_COPY_FRAG_CONF, "PREPARE_COPY_FRAG_CONF" }
};
const unsigned short NO_OF_SIGNAL_NAMES = sizeof(SignalNames)/sizeof(GsnName);
diff --git a/storage/ndb/src/common/debugger/signaldata/TcKeyConf.cpp b/storage/ndb/src/common/debugger/signaldata/TcKeyConf.cpp
index 65589f8cd6e..377863f9446 100644
--- a/storage/ndb/src/common/debugger/signaldata/TcKeyConf.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/TcKeyConf.cpp
@@ -51,11 +51,11 @@ printTCKEYCONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receive
(TcKeyConf::getMarkerFlag(confInfo) == 0)?"false":"true");
fprintf(output, "Operations:\n");
for(i = 0; i < noOfOp; i++) {
- if(sig->operations[i].attrInfoLen > TcKeyConf::SimpleReadBit)
+ if(sig->operations[i].attrInfoLen > TcKeyConf::DirtyReadBit)
fprintf(output,
" apiOperationPtr: H'%.8x, simplereadnode: %u\n",
sig->operations[i].apiOperationPtr,
- sig->operations[i].attrInfoLen & (~TcKeyConf::SimpleReadBit));
+ sig->operations[i].attrInfoLen & (~TcKeyConf::DirtyReadBit));
else
fprintf(output,
" apiOperationPtr: H'%.8x, attrInfoLen: %u\n",
diff --git a/storage/ndb/src/common/util/Makefile.am b/storage/ndb/src/common/util/Makefile.am
index d331cce7e5c..5379a425c49 100644
--- a/storage/ndb/src/common/util/Makefile.am
+++ b/storage/ndb/src/common/util/Makefile.am
@@ -24,7 +24,8 @@ libgeneral_la_SOURCES = \
uucode.c random.c version.c \
strdup.c \
ConfigValues.cpp ndb_init.c basestring_vsnprintf.c \
- Bitmask.cpp
+ Bitmask.cpp \
+ ndb_rand.c
EXTRA_PROGRAMS = testBitmask
testBitmask_SOURCES = testBitmask.cpp
diff --git a/storage/ndb/src/common/util/ndb_rand.c b/storage/ndb/src/common/util/ndb_rand.c
new file mode 100644
index 00000000000..4fcc483cd49
--- /dev/null
+++ b/storage/ndb/src/common/util/ndb_rand.c
@@ -0,0 +1,40 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <ndb_rand.h>
+
+static unsigned long next= 1;
+
+/**
+ * ndb_rand
+ *
+ * constant time, cheap, pseudo-random number generator.
+ *
+ * NDB_RAND_MAX assumed to be 32767
+ *
+ * This is the POSIX example for "generating the same sequence on
+ * different machines". Although that is not one of our requirements.
+ */
+int ndb_rand(void)
+{
+ next= next * 1103515245 + 12345;
+ return((unsigned)(next/65536) % 32768);
+}
+
+void ndb_srand(unsigned seed)
+{
+ next= seed;
+}
+
diff --git a/storage/ndb/src/cw/cpcd/APIService.cpp b/storage/ndb/src/cw/cpcd/APIService.cpp
index 1c1cfb94cd4..f60abc08817 100644
--- a/storage/ndb/src/cw/cpcd/APIService.cpp
+++ b/storage/ndb/src/cw/cpcd/APIService.cpp
@@ -145,7 +145,7 @@ CPCDAPISession::CPCDAPISession(NDB_SOCKET_TYPE sock,
: SocketServer::Session(sock)
, m_cpcd(cpcd)
{
- m_input = new SocketInputStream(sock);
+ m_input = new SocketInputStream(sock, 7*24*60*60000);
m_output = new SocketOutputStream(sock);
m_parser = new Parser<CPCDAPISession>(commands, *m_input, true, true, true);
}
diff --git a/storage/ndb/src/kernel/blocks/ERROR_codes.txt b/storage/ndb/src/kernel/blocks/ERROR_codes.txt
index acfbf649522..4d4d4fcafc4 100644
--- a/storage/ndb/src/kernel/blocks/ERROR_codes.txt
+++ b/storage/ndb/src/kernel/blocks/ERROR_codes.txt
@@ -3,15 +3,15 @@ Next NDBCNTR 1002
Next NDBFS 2000
Next DBACC 3002
Next DBTUP 4029
-Next DBLQH 5045
+Next DBLQH 5047
Next DBDICT 6008
-Next DBDIH 7186
-Next DBTC 8053
+Next DBDIH 7193
+Next DBTC 8054
Next CMVMI 9000
Next BACKUP 10038
Next DBUTIL 11002
Next DBTUX 12008
-Next SUMA 13001
+Next SUMA 13034
TESTING NODE FAILURE, ARBITRATION
---------------------------------
@@ -157,6 +157,9 @@ And crash when all have "not" been sent
7027: Crash in master when changing state to LCP_TAB_SAVED
7018: Crash in master when changing state to LCP_TAB_SAVED
+7191: Crash when receiving LCP_COMPLETE_REP
+7192: Crash in setLcpActiveStatusStart - when dead node missed to LCP's
+
ERROR CODES FOR TESTING NODE FAILURE, FAILURE IN COPY FRAGMENT PROCESS:
-----------------------------------------------------------------------
@@ -183,6 +186,8 @@ handling in DBTC to ensure that node failures are also well handled in
time-out handling. They can also be used to test multiple node failure
handling.
+5045: Crash in PREPARE_COPY_FRAG_REQ
+5046: Crash if LQHKEYREQ (NrCopy) comes when frag-state is incorrect
ERROR CODES FOR TESTING TIME-OUT HANDLING IN DBLQH
-------------------------------------------------
@@ -248,6 +253,8 @@ Delay execution of ABORTCONF signal 2 seconds to generate time-out.
8050: Send ZABORT_TIMEOUT_BREAK delayed
+8053: Crash in timeOutFoundLab, state CS_WAIT_COMMIT_CONF
+
ERROR CODES FOR TESTING TIME-OUT HANDLING IN DBTC
-------------------------------------------------
diff --git a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
index 7a992587010..3406176d7a8 100644
--- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
@@ -1124,6 +1124,38 @@ Cmvmi::execDUMP_STATE_ORD(Signal* signal)
}
#endif
#endif
+
+ if (arg == 9999)
+ {
+ Uint32 delay = 1000;
+ switch(signal->getLength()){
+ case 1:
+ break;
+ case 2:
+ delay = signal->theData[1];
+ break;
+ default:{
+ Uint32 dmin = signal->theData[1];
+ Uint32 dmax = signal->theData[2];
+ delay = dmin + (rand() % (dmax - dmin));
+ break;
+ }
+ }
+
+ signal->theData[0] = 9999;
+ if (delay == 0)
+ {
+ execNDB_TAMPER(signal);
+ }
+ else if (delay < 10)
+ {
+ sendSignal(reference(), GSN_NDB_TAMPER, signal, 1, JBB);
+ }
+ else
+ {
+ sendSignalWithDelay(reference(), GSN_NDB_TAMPER, signal, delay, 1);
+ }
+ }
}//Cmvmi::execDUMP_STATE_ORD()
void
diff --git a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
index 5bef13cd0b9..21826df28f9 100644
--- a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
+++ b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
@@ -318,6 +318,7 @@ public:
Uint8 noOfStartedChkpt;
MasterLCPConf::State lcpStateAtTakeOver;
+ Uint32 m_remove_node_from_table_lcp_id;
};
typedef Ptr<NodeRecord> NodeRecordPtr;
/**********************************************************************/
@@ -544,7 +545,8 @@ public:
TO_WAIT_ENDING = 21,
ENDING = 22,
- STARTING_LOCAL_FRAGMENTS = 24
+ STARTING_LOCAL_FRAGMENTS = 24,
+ PREPARE_COPY = 25
};
enum ToSlaveStatus {
TO_SLAVE_IDLE = 0,
@@ -555,6 +557,7 @@ public:
TO_SLAVE_COPY_COMPLETED = 5
};
Uint32 startGci;
+ Uint32 maxPage;
Uint32 toCopyNode;
Uint32 toCurrentFragid;
Uint32 toCurrentReplica;
@@ -671,6 +674,8 @@ private:
void execNODE_FAILREP(Signal *);
void execCOPY_FRAGCONF(Signal *);
void execCOPY_FRAGREF(Signal *);
+ void execPREPARE_COPY_FRAG_REF(Signal*);
+ void execPREPARE_COPY_FRAG_CONF(Signal*);
void execDIADDTABREQ(Signal *);
void execDIGETNODESREQ(Signal *);
void execDIRELEASEREQ(Signal *);
@@ -1113,6 +1118,7 @@ private:
void sendStartTo(Signal *, Uint32 takeOverPtr);
void startNextCopyFragment(Signal *, Uint32 takeOverPtr);
void toCopyFragLab(Signal *, Uint32 takeOverPtr);
+ void toStartCopyFrag(Signal *, TakeOverRecordPtr);
void startHsAddFragConfLab(Signal *);
void prepareSendCreateFragReq(Signal *, Uint32 takeOverPtr);
void sendUpdateTo(Signal *, Uint32 takeOverPtr, Uint32 updateState);
diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
index aff31d625f4..6ce281434c2 100644
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp
@@ -259,6 +259,11 @@ Dbdih::Dbdih(Block_context& ctx):
addRecSignal(GSN_START_FRAGREF,
&Dbdih::execSTART_FRAGREF);
+
+ addRecSignal(GSN_PREPARE_COPY_FRAG_REF,
+ &Dbdih::execPREPARE_COPY_FRAG_REF);
+ addRecSignal(GSN_PREPARE_COPY_FRAG_CONF,
+ &Dbdih::execPREPARE_COPY_FRAG_CONF);
apiConnectRecord = 0;
connectRecord = 0;
diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
index 762d4ea5141..5403ac5cc38 100644
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
@@ -3155,6 +3155,94 @@ void Dbdih::toCopyFragLab(Signal* signal,
TakeOverRecordPtr takeOverPtr;
RETURN_IF_TAKE_OVER_INTERRUPTED(takeOverPtrI, takeOverPtr);
+ /**
+ * Inform starting node that TakeOver is about to start
+ */
+ Uint32 nodeId = takeOverPtr.p->toStartingNode;
+
+ Uint32 version = getNodeInfo(nodeId).m_version;
+ if (ndb_check_prep_copy_frag_version(version))
+ {
+ jam();
+ TabRecordPtr tabPtr;
+ tabPtr.i = takeOverPtr.p->toCurrentTabref;
+ ptrCheckGuard(tabPtr, ctabFileSize, tabRecord);
+
+ FragmentstorePtr fragPtr;
+ getFragstore(tabPtr.p, takeOverPtr.p->toCurrentFragid, fragPtr);
+ Uint32 nodes[MAX_REPLICAS];
+ extractNodeInfo(fragPtr.p, nodes);
+
+ PrepareCopyFragReq* req= (PrepareCopyFragReq*)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = takeOverPtrI;
+ req->tableId = takeOverPtr.p->toCurrentTabref;
+ req->fragId = takeOverPtr.p->toCurrentFragid;
+ req->copyNodeId = nodes[0]; // Src
+ req->startingNodeId = takeOverPtr.p->toStartingNode; // Dst
+ Uint32 ref = calcLqhBlockRef(takeOverPtr.p->toStartingNode);
+
+ sendSignal(ref, GSN_PREPARE_COPY_FRAG_REQ, signal,
+ PrepareCopyFragReq::SignalLength, JBB);
+
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::PREPARE_COPY;
+ return;
+ }
+
+ takeOverPtr.p->maxPage = RNIL;
+ toStartCopyFrag(signal, takeOverPtr);
+}
+
+void
+Dbdih::execPREPARE_COPY_FRAG_REF(Signal* signal)
+{
+ jamEntry();
+ PrepareCopyFragRef ref = *(PrepareCopyFragRef*)signal->getDataPtr();
+
+ TakeOverRecordPtr takeOverPtr;
+ RETURN_IF_TAKE_OVER_INTERRUPTED(ref.senderData, takeOverPtr);
+
+ ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::PREPARE_COPY);
+
+ /**
+ * Treat this as copy frag ref
+ */
+ CopyFragRef * cfref = (CopyFragRef*)signal->getDataPtrSend();
+ cfref->userPtr = ref.senderData;
+ cfref->startingNodeId = ref.startingNodeId;
+ cfref->errorCode = ref.errorCode;
+ cfref->tableId = ref.tableId;
+ cfref->fragId = ref.fragId;
+ cfref->sendingNodeId = ref.copyNodeId;
+ takeOverPtr.p->toMasterStatus = TakeOverRecord::COPY_FRAG;
+ execCOPY_FRAGREF(signal);
+}
+
+void
+Dbdih::execPREPARE_COPY_FRAG_CONF(Signal* signal)
+{
+ PrepareCopyFragConf conf = *(PrepareCopyFragConf*)signal->getDataPtr();
+
+ TakeOverRecordPtr takeOverPtr;
+ RETURN_IF_TAKE_OVER_INTERRUPTED(conf.senderData, takeOverPtr);
+
+ Uint32 version = getNodeInfo(refToNode(conf.senderRef)).m_version;
+ if (ndb_check_prep_copy_frag_version(version) >= 2)
+ {
+ jam();
+ takeOverPtr.p->maxPage = conf.maxPageNo;
+ }
+ else
+ {
+ jam();
+ takeOverPtr.p->maxPage = RNIL;
+ }
+ toStartCopyFrag(signal, takeOverPtr);
+}
+
+void
+Dbdih::toStartCopyFrag(Signal* signal, TakeOverRecordPtr takeOverPtr)
+{
CreateReplicaRecordPtr createReplicaPtr;
createReplicaPtr.i = 0;
ptrAss(createReplicaPtr, createReplicaRecord);
@@ -3178,8 +3266,8 @@ void Dbdih::toCopyFragLab(Signal* signal,
createReplicaPtr.p->hotSpareUse = true;
createReplicaPtr.p->dataNodeId = takeOverPtr.p->toStartingNode;
- prepareSendCreateFragReq(signal, takeOverPtrI);
-}//Dbdih::toCopyFragLab()
+ prepareSendCreateFragReq(signal, takeOverPtr.i);
+}//Dbdih::toStartCopy()
void Dbdih::prepareSendCreateFragReq(Signal* signal, Uint32 takeOverPtrI)
{
@@ -3412,10 +3500,12 @@ void Dbdih::execCREATE_FRAGCONF(Signal* signal)
copyFragReq->schemaVersion = tabPtr.p->schemaVersion;
copyFragReq->distributionKey = fragPtr.p->distributionKey;
copyFragReq->gci = gci;
- copyFragReq->nodeCount = extractNodeInfo(fragPtr.p,
- copyFragReq->nodeList);
+ Uint32 len = copyFragReq->nodeCount =
+ extractNodeInfo(fragPtr.p,
+ copyFragReq->nodeList);
+ copyFragReq->nodeList[len] = takeOverPtr.p->maxPage;
sendSignal(ref, GSN_COPY_FRAGREQ, signal,
- CopyFragReq::SignalLength + copyFragReq->nodeCount, JBB);
+ CopyFragReq::SignalLength + len, JBB);
} else {
ndbrequire(takeOverPtr.p->toMasterStatus == TakeOverRecord::COMMIT_CREATE);
jam();
@@ -4576,13 +4666,22 @@ void Dbdih::checkTakeOverInMasterStartNodeFailure(Signal* signal,
ok = true;
jam();
//-----------------------------------------------------------------------
- // The starting node will discover the problem. We will receive either
+ // The copying node will discover the problem. We will receive either
// COPY_FRAGREQ or COPY_FRAGCONF and then we can release the take over
// record and end the process. If the copying node should also die then
// we will try to send prepare create fragment and will then discover
// that the starting node has failed.
//-----------------------------------------------------------------------
break;
+ case TakeOverRecord::PREPARE_COPY:
+ ok = true;
+ jam();
+ /**
+ * We're waiting for the starting node...which just died...
+ * endTakeOver
+ */
+ endTakeOver(takeOverPtr.i);
+ break;
case TakeOverRecord::COPY_ACTIVE:
ok = true;
jam();
@@ -5069,6 +5168,18 @@ void Dbdih::startRemoveFailedNode(Signal* signal, NodeRecordPtr failedNodePtr)
return;
}
+ /**
+ * If node has node complete LCP
+ * we need to remove it as undo might not be complete
+ * bug#31257
+ */
+ failedNodePtr.p->m_remove_node_from_table_lcp_id = RNIL;
+ if (c_lcpState.m_LCP_COMPLETE_REP_Counter_LQH.isWaitingFor(failedNodePtr.i))
+ {
+ jam();
+ failedNodePtr.p->m_remove_node_from_table_lcp_id = SYSFILE->latestLCP_ID;
+ }
+
jam();
signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE;
signal->theData[1] = failedNodePtr.i;
@@ -5710,6 +5821,11 @@ void Dbdih::removeNodeFromTable(Signal* signal,
return;
}//if
+ NodeRecordPtr nodePtr;
+ nodePtr.i = nodeId;
+ ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord);
+ const Uint32 lcpId = nodePtr.p->m_remove_node_from_table_lcp_id;
+
/**
* For each fragment
*/
@@ -5717,7 +5833,6 @@ void Dbdih::removeNodeFromTable(Signal* signal,
Uint32 noOfRemovedLcpReplicas = 0; // No of replicas in LCP removed
Uint32 noOfRemainingLcpReplicas = 0;// No of replicas in LCP remaining
- //const Uint32 lcpId = SYSFILE->latestLCP_ID;
const bool lcpOngoingFlag = (tabPtr.p->tabLcpStatus== TabRecord::TLS_ACTIVE);
const bool unlogged = (tabPtr.p->tabStorage != TabRecord::ST_NORMAL);
@@ -5752,6 +5867,23 @@ void Dbdih::removeNodeFromTable(Signal* signal,
noOfRemovedLcpReplicas ++;
replicaPtr.p->lcpOngoingFlag = false;
}
+
+ if (lcpId != RNIL)
+ {
+ jam();
+ Uint32 lcpNo = prevLcpNo(replicaPtr.p->nextLcp);
+ if (replicaPtr.p->lcpStatus[lcpNo] == ZVALID &&
+ replicaPtr.p->lcpId[lcpNo] == SYSFILE->latestLCP_ID)
+ {
+ jam();
+ replicaPtr.p->lcpStatus[lcpNo] = ZINVALID;
+ replicaPtr.p->lcpId[lcpNo] = 0;
+ replicaPtr.p->nextLcp = lcpNo;
+ ndbout_c("REMOVING lcp: %u from table: %u frag: %u node: %u",
+ SYSFILE->latestLCP_ID,
+ tabPtr.i, fragNo, nodeId);
+ }
+ }
}
}
if (!found)
@@ -10898,6 +11030,8 @@ void Dbdih::execLCP_COMPLETE_REP(Signal* signal)
{
jamEntry();
+ CRASH_INSERTION(7191);
+
#if 0
g_eventLogger.info("LCP_COMPLETE_REP");
printLCP_COMPLETE_REP(stdout,
@@ -13657,6 +13791,7 @@ void Dbdih::setLcpActiveStatusStart(Signal* signal)
// It must be taken over with the copy fragment process after a system
// crash. We indicate this by setting the active status to TAKE_OVER.
/*-------------------------------------------------------------------*/
+ c_lcpState.m_participatingLQH.set(nodePtr.i);
nodePtr.p->activeStatus = Sysfile::NS_TakeOver;
//break; // Fall through
case Sysfile::NS_TakeOver:{
@@ -13699,6 +13834,7 @@ void Dbdih::setLcpActiveStatusStart(Signal* signal)
break;
case Sysfile::NS_ActiveMissed_2:
jam();
+ CRASH_INSERTION(7192);
if ((nodePtr.p->nodeStatus == NodeRecord::ALIVE) &&
(!nodePtr.p->copyCompleted)) {
jam();
diff --git a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
index 6f8e5569831..95cad98b81c 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
@@ -2025,7 +2025,6 @@ public:
Uint8 reclenAiLqhkey;
Uint8 m_offset_current_keybuf;
Uint8 replicaType;
- Uint8 simpleRead;
Uint8 seqNoReplica;
Uint8 tcNodeFailrec;
Uint8 m_disk_table;
@@ -2145,6 +2144,7 @@ private:
void execSTORED_PROCCONF(Signal* signal);
void execSTORED_PROCREF(Signal* signal);
void execCOPY_FRAGREQ(Signal* signal);
+ void execPREPARE_COPY_FRAG_REQ(Signal* signal);
void execUPDATE_FRAG_DIST_KEY_ORD(Signal*);
void execCOPY_ACTIVEREQ(Signal* signal);
void execCOPY_STATEREQ(Signal* signal);
diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
index d6411ee1cb9..db6d201575f 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
@@ -310,6 +310,9 @@ Dblqh::Dblqh(Block_context& ctx):
addRecSignal(GSN_UPDATE_FRAG_DIST_KEY_ORD,
&Dblqh::execUPDATE_FRAG_DIST_KEY_ORD);
+ addRecSignal(GSN_PREPARE_COPY_FRAG_REQ,
+ &Dblqh::execPREPARE_COPY_FRAG_REQ);
+
initData();
#ifdef VM_TRACE
diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
index efb88bfccd2..e0449e08ddd 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -3496,7 +3496,6 @@ void Dblqh::execLQHKEYREQ(Signal* signal)
regTcPtr->dirtyOp = LqhKeyReq::getDirtyFlag(Treqinfo);
regTcPtr->opExec = LqhKeyReq::getInterpretedFlag(Treqinfo);
regTcPtr->opSimple = LqhKeyReq::getSimpleFlag(Treqinfo);
- regTcPtr->simpleRead = op == ZREAD && regTcPtr->opSimple;
regTcPtr->seqNoReplica = LqhKeyReq::getSeqNoReplica(Treqinfo);
UintR TreclenAiLqhkey = LqhKeyReq::getAIInLqhKeyReq(Treqinfo);
regTcPtr->apiVersionNo = 0;
@@ -3513,9 +3512,15 @@ void Dblqh::execLQHKEYREQ(Signal* signal)
regTcPtr->lockType =
op == ZREAD_EX ? ZUPDATE : (Operation_t) op == ZWRITE ? ZINSERT : (Operation_t) op;
}
+
+ if (regTcPtr->dirtyOp)
+ {
+ ndbrequire(regTcPtr->opSimple);
+ }
- CRASH_INSERTION2(5041, regTcPtr->simpleRead &&
- refToNode(signal->senderBlockRef()) != cownNodeid);
+ CRASH_INSERTION2(5041, (op == ZREAD &&
+ (regTcPtr->opSimple || regTcPtr->dirtyOp) &&
+ refToNode(signal->senderBlockRef()) != cownNodeid));
regTcPtr->reclenAiLqhkey = TreclenAiLqhkey;
regTcPtr->currReclenAi = TreclenAiLqhkey;
@@ -3665,6 +3670,7 @@ void Dblqh::execLQHKEYREQ(Signal* signal)
{
ndbout_c("fragptr.p->fragStatus: %d",
fragptr.p->fragStatus);
+ CRASH_INSERTION(5046);
}
ndbassert(fragptr.p->fragStatus == Fragrecord::ACTIVE_CREATION);
fragptr.p->m_copy_started_state = Fragrecord::AC_NR_COPY;
@@ -3687,8 +3693,8 @@ void Dblqh::execLQHKEYREQ(Signal* signal)
Uint8 TdistKey = LqhKeyReq::getDistributionKey(TtotReclenAi);
if ((tfragDistKey != TdistKey) &&
(regTcPtr->seqNoReplica == 0) &&
- (regTcPtr->dirtyOp == ZFALSE) &&
- (regTcPtr->simpleRead == ZFALSE)) {
+ (regTcPtr->dirtyOp == ZFALSE))
+ {
/* ----------------------------------------------------------------------
* WE HAVE DIFFERENT OPINION THAN THE DIH THAT STARTED THE TRANSACTION.
* THE REASON COULD BE THAT THIS IS AN OLD DISTRIBUTION WHICH IS NO LONGER
@@ -4778,7 +4784,18 @@ void Dblqh::tupkeyConfLab(Signal* signal)
TRACE_OP(regTcPtr, "TUPKEYCONF");
- if (regTcPtr->simpleRead) {
+ if (readLen != 0)
+ {
+ jam();
+
+ /* SET BIT 15 IN REQINFO */
+ LqhKeyReq::setApplicationAddressFlag(regTcPtr->reqinfo, 1);
+ regTcPtr->readlenAi = readLen;
+ }//if
+
+ if (regTcPtr->operation == ZREAD &&
+ (regTcPtr->opSimple || regTcPtr->dirtyOp))
+ {
jam();
/* ----------------------------------------------------------------------
* THE OPERATION IS A SIMPLE READ.
@@ -4792,14 +4809,6 @@ void Dblqh::tupkeyConfLab(Signal* signal)
commitContinueAfterBlockedLab(signal);
return;
}//if
- if (readLen != 0)
- {
- jam();
-
- /* SET BIT 15 IN REQINFO */
- LqhKeyReq::setApplicationAddressFlag(regTcPtr->reqinfo, 1);
- regTcPtr->readlenAi = readLen;
- }//if
regTcPtr->totSendlenAi = writeLen;
ndbrequire(regTcPtr->totSendlenAi == regTcPtr->currTupAiLen);
@@ -5178,12 +5187,15 @@ void Dblqh::packLqhkeyreqLab(Signal* signal)
/* */
/* ------------------------------------------------------------------------- */
sendLqhkeyconfTc(signal, regTcPtr->tcBlockref);
- if (regTcPtr->dirtyOp != ZTRUE) {
+ if (! (regTcPtr->dirtyOp ||
+ (regTcPtr->operation == ZREAD && regTcPtr->opSimple)))
+ {
jam();
regTcPtr->transactionState = TcConnectionrec::PREPARED;
releaseOprec(signal);
} else {
jam();
+
/*************************************************************>*/
/* DIRTY WRITES ARE USED IN TWO SITUATIONS. THE FIRST */
/* SITUATION IS WHEN THEY ARE USED TO UPDATE COUNTERS AND*/
@@ -6406,8 +6418,8 @@ void Dblqh::commitContinueAfterBlockedLab(Signal* signal)
Ptr<TcConnectionrec> regTcPtr = tcConnectptr;
Ptr<Fragrecord> regFragptr = fragptr;
Uint32 operation = regTcPtr.p->operation;
- Uint32 simpleRead = regTcPtr.p->simpleRead;
Uint32 dirtyOp = regTcPtr.p->dirtyOp;
+ Uint32 opSimple = regTcPtr.p->opSimple;
if (regTcPtr.p->activeCreat != Fragrecord::AC_IGNORED) {
if (operation != ZREAD) {
TupCommitReq * const tupCommitReq =
@@ -6465,20 +6477,29 @@ void Dblqh::commitContinueAfterBlockedLab(Signal* signal)
EXECUTE_DIRECT(acc, GSN_ACC_COMMITREQ, signal, 1);
}
- if (simpleRead) {
+ if (dirtyOp)
+ {
jam();
-/* ------------------------------------------------------------------------- */
-/*THE OPERATION WAS A SIMPLE READ THUS THE COMMIT PHASE IS ONLY NEEDED TO */
-/*RELEASE THE LOCKS. AT THIS POINT IN THE CODE THE LOCKS ARE RELEASED AND WE */
-/*ARE IN A POSITION TO SEND LQHKEYCONF TO TC. WE WILL ALSO RELEASE ALL */
-/*RESOURCES BELONGING TO THIS OPERATION SINCE NO MORE WORK WILL BE */
-/*PERFORMED. */
-/* ------------------------------------------------------------------------- */
+ /**
+ * The dirtyRead does not send anything but TRANSID_AI from LDM
+ */
fragptr = regFragptr;
tcConnectptr = regTcPtr;
cleanUp(signal);
return;
- }//if
+ }
+
+ /**
+ * The simpleRead will send a LQHKEYCONF
+ * but have already released the locks
+ */
+ if (opSimple)
+ {
+ fragptr = regFragptr;
+ tcConnectptr = regTcPtr;
+ packLqhkeyreqLab(signal);
+ return;
+ }
}
}//if
jamEntry();
@@ -7088,7 +7109,7 @@ void Dblqh::abortStateHandlerLab(Signal* signal)
/* ------------------------------------------------------------------------- */
return;
}//if
- if (regTcPtr->simpleRead) {
+ if (regTcPtr->opSimple) {
jam();
/* ------------------------------------------------------------------------- */
/*A SIMPLE READ IS CURRENTLY RELEASING THE LOCKS OR WAITING FOR ACCESS TO */
@@ -7356,7 +7377,8 @@ void Dblqh::continueAbortLab(Signal* signal)
void Dblqh::continueAfterLogAbortWriteLab(Signal* signal)
{
TcConnectionrec * const regTcPtr = tcConnectptr.p;
- if (regTcPtr->simpleRead) {
+ if (regTcPtr->operation == ZREAD && regTcPtr->dirtyOp)
+ {
jam();
TcKeyRef * const tcKeyRef = (TcKeyRef *) signal->getDataPtrSend();
@@ -10062,6 +10084,86 @@ Dblqh::calculateHash(Uint32 tableId, const Uint32* src)
return md5_hash(Tmp, keyLen);
}//Dblqh::calculateHash()
+/**
+ * PREPARE COPY FRAG REQ
+ */
+void
+Dblqh::execPREPARE_COPY_FRAG_REQ(Signal* signal)
+{
+ jamEntry();
+ PrepareCopyFragReq req = *(PrepareCopyFragReq*)signal->getDataPtr();
+
+ CRASH_INSERTION(5045);
+
+ tabptr.i = req.tableId;
+ ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
+
+ Uint32 max_page = RNIL;
+
+ if (getOwnNodeId() != req.startingNodeId)
+ {
+ jam();
+ /**
+ * This is currently dead code...
+ * but is provided so we can impl. a better scan+delete on
+ * starting node wo/ having to change running node
+ */
+ ndbrequire(getOwnNodeId() == req.copyNodeId);
+ c_tup->get_frag_info(req.tableId, req.fragId, &max_page);
+
+ PrepareCopyFragConf* conf = (PrepareCopyFragConf*)signal->getDataPtrSend();
+ conf->senderData = req.senderData;
+ conf->senderRef = reference();
+ conf->tableId = req.tableId;
+ conf->fragId = req.fragId;
+ conf->copyNodeId = req.copyNodeId;
+ conf->startingNodeId = req.startingNodeId;
+ conf->maxPageNo = max_page;
+ sendSignal(req.senderRef, GSN_PREPARE_COPY_FRAG_CONF,
+ signal, PrepareCopyFragConf::SignalLength, JBB);
+
+ return;
+ }
+
+ if (! DictTabInfo::isOrderedIndex(tabptr.p->tableType))
+ {
+ jam();
+ ndbrequire(getFragmentrec(signal, req.fragId));
+
+ /**
+ *
+ */
+ if (cstartType == NodeState::ST_SYSTEM_RESTART)
+ {
+ jam();
+ signal->theData[0] = fragptr.p->tabRef;
+ signal->theData[1] = fragptr.p->fragId;
+ sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB);
+ }
+
+
+ /**
+ *
+ */
+ fragptr.p->m_copy_started_state = Fragrecord::AC_IGNORED;
+ fragptr.p->fragStatus = Fragrecord::ACTIVE_CREATION;
+ fragptr.p->logFlag = Fragrecord::STATE_FALSE;
+
+ c_tup->get_frag_info(req.tableId, req.fragId, &max_page);
+ }
+
+ PrepareCopyFragConf* conf = (PrepareCopyFragConf*)signal->getDataPtrSend();
+ conf->senderData = req.senderData;
+ conf->senderRef = reference();
+ conf->tableId = req.tableId;
+ conf->fragId = req.fragId;
+ conf->copyNodeId = req.copyNodeId;
+ conf->startingNodeId = req.startingNodeId;
+ conf->maxPageNo = max_page;
+ sendSignal(req.senderRef, GSN_PREPARE_COPY_FRAG_CONF,
+ signal, PrepareCopyFragConf::SignalLength, JBB);
+}
+
/* *************************************** */
/* COPY_FRAGREQ: Start copying a fragment */
/* *************************************** */
@@ -10097,6 +10199,13 @@ void Dblqh::execCOPY_FRAGREQ(Signal* signal)
for (i = 0; i<nodeCount; i++)
nodemask.set(copyFragReq->nodeList[i]);
}
+ Uint32 maxPage = copyFragReq->nodeList[nodeCount];
+ Uint32 version = getNodeInfo(refToNode(userRef)).m_version;
+ if (ndb_check_prep_copy_frag_version(version) < 2)
+ {
+ jam();
+ maxPage = RNIL;
+ }
if (DictTabInfo::isOrderedIndex(tabptr.p->tableType)) {
jam();
@@ -10172,14 +10281,15 @@ void Dblqh::execCOPY_FRAGREQ(Signal* signal)
req->requestInfo = 0;
AccScanReq::setLockMode(req->requestInfo, 0);
AccScanReq::setReadCommittedFlag(req->requestInfo, 0);
- AccScanReq::setNRScanFlag(req->requestInfo, gci ? 1 : 0);
+ AccScanReq::setNRScanFlag(req->requestInfo, 1);
AccScanReq::setNoDiskScanFlag(req->requestInfo, 1);
req->transId1 = tcConnectptr.p->transid[0];
req->transId2 = tcConnectptr.p->transid[1];
req->savePointId = tcConnectptr.p->savePointId;
+ req->maxPage = maxPage;
sendSignal(scanptr.p->scanBlockref, GSN_ACC_SCANREQ, signal,
- AccScanReq::SignalLength, JBB);
+ AccScanReq::SignalLength + 1, JBB);
if (! nodemask.isclear())
{
@@ -14084,11 +14194,16 @@ void Dblqh::execSTART_FRAGREQ(Signal* signal)
fragptr.p->fragStatus = Fragrecord::ACTIVE_CREATION;
}
- c_tup->disk_restart_mark_no_lcp(tabptr.i, fragId);
+ c_tup->disk_restart_lcp_id(tabptr.i, fragId, RNIL);
jamEntry();
-
return;
- }//if
+ }
+ else
+ {
+ jam();
+ c_tup->disk_restart_lcp_id(tabptr.i, fragId, lcpId);
+ jamEntry();
+ }
c_lcpId = (c_lcpId == 0 ? lcpId : c_lcpId);
c_lcpId = (c_lcpId < lcpId ? c_lcpId : lcpId);
@@ -19022,7 +19137,6 @@ Dblqh::execDUMP_STATE_ORD(Signal* signal)
ndbout << " operation = " << tcRec.p->operation<<endl;
ndbout << " tcNodeFailrec = " << tcRec.p->tcNodeFailrec
<< " seqNoReplica = " << tcRec.p->seqNoReplica
- << " simpleRead = " << tcRec.p->simpleRead
<< endl;
ndbout << " replicaType = " << tcRec.p->replicaType
<< " reclenAiLqhkey = " << tcRec.p->reclenAiLqhkey
diff --git a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
index 75d79ba737f..3d5e52a525d 100644
--- a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
+++ b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
@@ -786,6 +786,7 @@ public:
UintR apiConnect; /* POINTER TO API CONNECT RECORD */
UintR nextTcConnect; /* NEXT TC RECORD*/
Uint8 dirtyOp;
+ Uint8 opSimple;
Uint8 lastReplicaNo; /* NUMBER OF THE LAST REPLICA IN THE OPERATION */
Uint8 noOfNodes; /* TOTAL NUMBER OF NODES IN OPERATION */
Uint8 operation; /* OPERATION TYPE */
@@ -886,13 +887,8 @@ public:
Uint8 opExec;
Uint8 unused;
+ Uint8 unused1;
- /**
- * IS THE OPERATION A SIMPLE TRANSACTION
- * 0 = NO, 1 = YES
- */
- Uint8 opSimple;
-
//---------------------------------------------------
// Second 16 byte cache line in second 64 byte cache
// line. Diverse use.
@@ -1464,7 +1460,7 @@ private:
void releaseAttrinfo();
void releaseGcp(Signal* signal);
void releaseKeys();
- void releaseSimpleRead(Signal*, ApiConnectRecordPtr, TcConnectRecord*);
+ void releaseDirtyRead(Signal*, ApiConnectRecordPtr, TcConnectRecord*);
void releaseDirtyWrite(Signal* signal);
void releaseTcCon();
void releaseTcConnectFail(Signal* signal);
@@ -1620,7 +1616,7 @@ private:
void startphase1x010Lab(Signal* signal);
void lqhKeyConf_checkTransactionState(Signal * signal,
- ApiConnectRecord * const regApiPtr);
+ Ptr<ApiConnectRecord> regApiPtr);
void checkDropTab(Signal* signal);
diff --git a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
index 887e6f848b1..ce20059e663 100644
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
@@ -20,6 +20,7 @@
#include <RefConvert.hpp>
#include <ndb_limits.h>
#include <my_sys.h>
+#include <ndb_rand.h>
#include <signaldata/EventReport.hpp>
#include <signaldata/TcKeyReq.hpp>
@@ -2790,9 +2791,9 @@ void Dbtc::execTCKEYREQ(Signal* signal)
Uint8 TNoDiskFlag = TcKeyReq::getNoDiskFlag(Treqinfo);
Uint8 TexecuteFlag = TexecFlag;
- regCachePtr->opSimple = TSimpleFlag;
- regCachePtr->opExec = TInterpretedFlag;
regTcPtr->dirtyOp = TDirtyFlag;
+ regTcPtr->opSimple = TSimpleFlag;
+ regCachePtr->opExec = TInterpretedFlag;
regCachePtr->distributionKeyIndicator = TDistrKeyFlag;
regCachePtr->m_no_disk_flag = TNoDiskFlag;
@@ -3246,9 +3247,10 @@ void Dbtc::sendlqhkeyreq(Signal* signal,
LqhKeyReq::setScanTakeOverFlag(tslrAttrLen, regCachePtr->scanTakeOverInd);
Tdata10 = 0;
- sig0 = regCachePtr->opSimple;
+ sig0 = regTcPtr->opSimple;
sig1 = regTcPtr->operation;
- bool simpleRead = (sig1 == ZREAD && sig0 == ZTRUE);
+ sig2 = regTcPtr->dirtyOp;
+ bool dirtyRead = (sig1 == ZREAD && sig2 == ZTRUE);
LqhKeyReq::setKeyLen(Tdata10, regCachePtr->keylen);
LqhKeyReq::setLastReplicaNo(Tdata10, regTcPtr->lastReplicaNo);
if (unlikely(version < NDBD_ROWID_VERSION))
@@ -3261,7 +3263,7 @@ void Dbtc::sendlqhkeyreq(Signal* signal,
// Indicate Application Reference is present in bit 15
/* ---------------------------------------------------------------------- */
LqhKeyReq::setApplicationAddressFlag(Tdata10, 1);
- LqhKeyReq::setDirtyFlag(Tdata10, regTcPtr->dirtyOp);
+ LqhKeyReq::setDirtyFlag(Tdata10, sig2);
LqhKeyReq::setInterpretedFlag(Tdata10, regCachePtr->opExec);
LqhKeyReq::setSimpleFlag(Tdata10, sig0);
LqhKeyReq::setOperation(Tdata10, sig1);
@@ -3322,7 +3324,7 @@ void Dbtc::sendlqhkeyreq(Signal* signal,
sig5 = regTcPtr->clientData;
sig6 = regCachePtr->scanInfo;
- if (! simpleRead)
+ if (! dirtyRead)
{
regApiPtr->m_transaction_nodes.set(regTcPtr->tcNodedata[0]);
regApiPtr->m_transaction_nodes.set(regTcPtr->tcNodedata[1]);
@@ -3395,7 +3397,6 @@ void Dbtc::packLqhkeyreq040Lab(Signal* signal,
BlockReference TBRef)
{
TcConnectRecord * const regTcPtr = tcConnectptr.p;
- CacheRecord * const regCachePtr = cachePtr.p;
#ifdef ERROR_INSERT
ApiConnectRecord * const regApiPtr = apiConnectptr.p;
if (ERROR_INSERTED(8009)) {
@@ -3420,8 +3421,8 @@ void Dbtc::packLqhkeyreq040Lab(Signal* signal,
if (anAttrBufIndex == RNIL) {
UintR TtcTimer = ctcTimer;
UintR Tread = (regTcPtr->operation == ZREAD);
- UintR Tsimple = (regCachePtr->opSimple == ZTRUE);
- UintR Tboth = Tread & Tsimple;
+ UintR Tdirty = (regTcPtr->dirtyOp == ZTRUE);
+ UintR Tboth = Tread & Tdirty;
setApiConTimer(apiConnectptr.i, TtcTimer, __LINE__);
jam();
/*--------------------------------------------------------------------
@@ -3430,7 +3431,7 @@ void Dbtc::packLqhkeyreq040Lab(Signal* signal,
releaseAttrinfo();
if (Tboth) {
jam();
- releaseSimpleRead(signal, apiConnectptr, tcConnectptr.p);
+ releaseDirtyRead(signal, apiConnectptr, tcConnectptr.p);
return;
}//if
regTcPtr->tcConnectstate = OS_OPERATING;
@@ -3490,11 +3491,11 @@ void Dbtc::releaseAttrinfo()
}//Dbtc::releaseAttrinfo()
/* ========================================================================= */
-/* ------- RELEASE ALL RECORDS CONNECTED TO A SIMPLE OPERATION ------- */
+/* ------- RELEASE ALL RECORDS CONNECTED TO A DIRTY OPERATION ------- */
/* ========================================================================= */
-void Dbtc::releaseSimpleRead(Signal* signal,
- ApiConnectRecordPtr regApiPtr,
- TcConnectRecord* regTcPtr)
+void Dbtc::releaseDirtyRead(Signal* signal,
+ ApiConnectRecordPtr regApiPtr,
+ TcConnectRecord* regTcPtr)
{
Uint32 Ttckeyrec = regApiPtr.p->tckeyrec;
Uint32 TclientData = regTcPtr->clientData;
@@ -3504,7 +3505,7 @@ void Dbtc::releaseSimpleRead(Signal* signal,
ConnectionState state = regApiPtr.p->apiConnectstate;
regApiPtr.p->tcSendArray[Ttckeyrec] = TclientData;
- regApiPtr.p->tcSendArray[Ttckeyrec + 1] = TcKeyConf::SimpleReadBit | Tnode;
+ regApiPtr.p->tcSendArray[Ttckeyrec + 1] = TcKeyConf::DirtyReadBit | Tnode;
regApiPtr.p->tckeyrec = Ttckeyrec + 2;
unlinkReadyTcCon(signal);
@@ -3534,8 +3535,8 @@ void Dbtc::releaseSimpleRead(Signal* signal,
/**
* Emulate LQHKEYCONF
*/
- lqhKeyConf_checkTransactionState(signal, regApiPtr.p);
-}//Dbtc::releaseSimpleRead()
+ lqhKeyConf_checkTransactionState(signal, regApiPtr);
+}//Dbtc::releaseDirtyRead()
/* ------------------------------------------------------------------------- */
/* ------- CHECK IF ALL TC CONNECTIONS ARE COMPLETED ------- */
@@ -3717,12 +3718,13 @@ void Dbtc::execLQHKEYCONF(Signal* signal)
TCKEY_abort(signal, 29);
return;
}//if
- ApiConnectRecord * const regApiPtr =
- &localApiConnectRecord[TapiConnectptrIndex];
+ Ptr<ApiConnectRecord> regApiPtr;
+ regApiPtr.i = TapiConnectptrIndex;
+ regApiPtr.p = &localApiConnectRecord[TapiConnectptrIndex];
apiConnectptr.i = TapiConnectptrIndex;
- apiConnectptr.p = regApiPtr;
- compare_transid1 = regApiPtr->transid[0] ^ Ttrans1;
- compare_transid2 = regApiPtr->transid[1] ^ Ttrans2;
+ apiConnectptr.p = regApiPtr.p;
+ compare_transid1 = regApiPtr.p->transid[0] ^ Ttrans1;
+ compare_transid2 = regApiPtr.p->transid[1] ^ Ttrans2;
compare_transid1 = compare_transid1 | compare_transid2;
if (compare_transid1 != 0) {
warningReport(signal, 24);
@@ -3734,25 +3736,25 @@ void Dbtc::execLQHKEYCONF(Signal* signal)
systemErrorLab(signal, __LINE__);
}//if
if (ERROR_INSERTED(8003)) {
- if (regApiPtr->apiConnectstate == CS_STARTED) {
+ if (regApiPtr.p->apiConnectstate == CS_STARTED) {
CLEAR_ERROR_INSERT_VALUE;
return;
}//if
}//if
if (ERROR_INSERTED(8004)) {
- if (regApiPtr->apiConnectstate == CS_RECEIVING) {
+ if (regApiPtr.p->apiConnectstate == CS_RECEIVING) {
CLEAR_ERROR_INSERT_VALUE;
return;
}//if
}//if
if (ERROR_INSERTED(8005)) {
- if (regApiPtr->apiConnectstate == CS_REC_COMMITTING) {
+ if (regApiPtr.p->apiConnectstate == CS_REC_COMMITTING) {
CLEAR_ERROR_INSERT_VALUE;
return;
}//if
}//if
if (ERROR_INSERTED(8006)) {
- if (regApiPtr->apiConnectstate == CS_START_COMMITTING) {
+ if (regApiPtr.p->apiConnectstate == CS_START_COMMITTING) {
CLEAR_ERROR_INSERT_VALUE;
return;
}//if
@@ -3767,10 +3769,12 @@ void Dbtc::execLQHKEYCONF(Signal* signal)
regTcPtr->lastLqhNodeId = refToNode(tlastLqhBlockref);
regTcPtr->noFiredTriggers = noFired;
- UintR Ttckeyrec = (UintR)regApiPtr->tckeyrec;
+ UintR Ttckeyrec = (UintR)regApiPtr.p->tckeyrec;
UintR TclientData = regTcPtr->clientData;
UintR TdirtyOp = regTcPtr->dirtyOp;
- ConnectionState TapiConnectstate = regApiPtr->apiConnectstate;
+ Uint32 TopSimple = regTcPtr->opSimple;
+ Uint32 Toperation = regTcPtr->operation;
+ ConnectionState TapiConnectstate = regApiPtr.p->apiConnectstate;
if (Ttckeyrec > (ZTCOPCONF_SIZE - 2)) {
TCKEY_abort(signal, 30);
return;
@@ -3795,23 +3799,34 @@ void Dbtc::execLQHKEYCONF(Signal* signal)
* since they will enter execLQHKEYCONF a second time
* Skip counting internally generated TcKeyReq
*/
- regApiPtr->tcSendArray[Ttckeyrec] = TclientData;
- regApiPtr->tcSendArray[Ttckeyrec + 1] = treadlenAi;
- regApiPtr->tckeyrec = Ttckeyrec + 2;
+ regApiPtr.p->tcSendArray[Ttckeyrec] = TclientData;
+ regApiPtr.p->tcSendArray[Ttckeyrec + 1] = treadlenAi;
+ regApiPtr.p->tckeyrec = Ttckeyrec + 2;
}//if
}//if
- if (TdirtyOp == ZTRUE) {
- UintR Tlqhkeyreqrec = regApiPtr->lqhkeyreqrec;
+ if (TdirtyOp == ZTRUE)
+ {
+ UintR Tlqhkeyreqrec = regApiPtr.p->lqhkeyreqrec;
jam();
releaseDirtyWrite(signal);
- regApiPtr->lqhkeyreqrec = Tlqhkeyreqrec - 1;
- } else {
+ regApiPtr.p->lqhkeyreqrec = Tlqhkeyreqrec - 1;
+ }
+ else if (Toperation == ZREAD && TopSimple)
+ {
+ UintR Tlqhkeyreqrec = regApiPtr.p->lqhkeyreqrec;
+ jam();
+ unlinkReadyTcCon(signal);
+ releaseTcCon();
+ regApiPtr.p->lqhkeyreqrec = Tlqhkeyreqrec - 1;
+ }
+ else
+ {
jam();
if (noFired == 0) {
jam();
// No triggers to execute
- UintR Tlqhkeyconfrec = regApiPtr->lqhkeyconfrec;
- regApiPtr->lqhkeyconfrec = Tlqhkeyconfrec + 1;
+ UintR Tlqhkeyconfrec = regApiPtr.p->lqhkeyconfrec;
+ regApiPtr.p->lqhkeyconfrec = Tlqhkeyconfrec + 1;
regTcPtr->tcConnectstate = OS_PREPARED;
}
}//if
@@ -3841,21 +3856,18 @@ void Dbtc::execLQHKEYCONF(Signal* signal)
jam();
if (regTcPtr->isIndexOp) {
jam();
- setupIndexOpReturn(regApiPtr, regTcPtr);
+ setupIndexOpReturn(regApiPtr.p, regTcPtr);
}
lqhKeyConf_checkTransactionState(signal, regApiPtr);
} else {
// We have fired triggers
jam();
saveTriggeringOpState(signal, regTcPtr);
- if (regTcPtr->noReceivedTriggers == noFired) {
- ApiConnectRecordPtr transPtr;
-
+ if (regTcPtr->noReceivedTriggers == noFired)
+ {
// We have received all data
jam();
- transPtr.i = TapiConnectptrIndex;
- transPtr.p = regApiPtr;
- executeTriggers(signal, &transPtr);
+ executeTriggers(signal, &regApiPtr);
}
// else wait for more trigger data
}
@@ -3879,7 +3891,7 @@ void Dbtc::setupIndexOpReturn(ApiConnectRecord* regApiPtr,
*/
void
Dbtc::lqhKeyConf_checkTransactionState(Signal * signal,
- ApiConnectRecord * const apiConnectPtrP)
+ Ptr<ApiConnectRecord> regApiPtr)
{
/*---------------------------------------------------------------*/
/* IF THE COMMIT FLAG IS SET IN SIGNAL TCKEYREQ THEN DBTC HAS TO */
@@ -3890,9 +3902,9 @@ Dbtc::lqhKeyConf_checkTransactionState(Signal * signal,
/* FOR ALL OPERATIONS, AND THEN WAIT FOR THE API TO CONCLUDE THE */
/* TRANSACTION */
/*---------------------------------------------------------------*/
- ConnectionState TapiConnectstate = apiConnectPtrP->apiConnectstate;
- UintR Tlqhkeyconfrec = apiConnectPtrP->lqhkeyconfrec;
- UintR Tlqhkeyreqrec = apiConnectPtrP->lqhkeyreqrec;
+ ConnectionState TapiConnectstate = regApiPtr.p->apiConnectstate;
+ UintR Tlqhkeyconfrec = regApiPtr.p->lqhkeyconfrec;
+ UintR Tlqhkeyreqrec = regApiPtr.p->lqhkeyreqrec;
int TnoOfOutStanding = Tlqhkeyreqrec - Tlqhkeyconfrec;
switch (TapiConnectstate) {
@@ -3902,11 +3914,11 @@ Dbtc::lqhKeyConf_checkTransactionState(Signal * signal,
diverify010Lab(signal);
return;
} else if (TnoOfOutStanding > 0) {
- if (apiConnectPtrP->tckeyrec == ZTCOPCONF_SIZE) {
+ if (regApiPtr.p->tckeyrec == ZTCOPCONF_SIZE) {
jam();
sendtckeyconf(signal, 0);
return;
- } else if (apiConnectPtrP->indexOpReturn) {
+ } else if (regApiPtr.p->indexOpReturn) {
jam();
sendtckeyconf(signal, 0);
return;
@@ -3925,11 +3937,11 @@ Dbtc::lqhKeyConf_checkTransactionState(Signal * signal,
sendtckeyconf(signal, 2);
return;
} else {
- if (apiConnectPtrP->tckeyrec == ZTCOPCONF_SIZE) {
+ if (regApiPtr.p->tckeyrec == ZTCOPCONF_SIZE) {
jam();
sendtckeyconf(signal, 0);
return;
- } else if (apiConnectPtrP->indexOpReturn) {
+ } else if (regApiPtr.p->indexOpReturn) {
jam();
sendtckeyconf(signal, 0);
return;
@@ -3939,11 +3951,11 @@ Dbtc::lqhKeyConf_checkTransactionState(Signal * signal,
return;
case CS_REC_COMMITTING:
if (TnoOfOutStanding > 0) {
- if (apiConnectPtrP->tckeyrec == ZTCOPCONF_SIZE) {
+ if (regApiPtr.p->tckeyrec == ZTCOPCONF_SIZE) {
jam();
sendtckeyconf(signal, 0);
return;
- } else if (apiConnectPtrP->indexOpReturn) {
+ } else if (regApiPtr.p->indexOpReturn) {
jam();
sendtckeyconf(signal, 0);
return;
@@ -3960,7 +3972,7 @@ Dbtc::lqhKeyConf_checkTransactionState(Signal * signal,
/* CONSISTING OF DIRTY WRITES AND ALL OF THOSE WERE */
/* COMPLETED. ENSURE TCKEYREC IS ZERO TO PREVENT ERRORS. */
/*---------------------------------------------------------------*/
- apiConnectPtrP->tckeyrec = 0;
+ regApiPtr.p->tckeyrec = 0;
return;
default:
TCKEY_abort(signal, 46);
@@ -4218,34 +4230,46 @@ void Dbtc::diverify010Lab(Signal* signal)
jam();
systemErrorLab(signal, __LINE__);
}//if
- if (TfirstfreeApiConnectCopy != RNIL) {
- seizeApiConnectCopy(signal);
- regApiPtr->apiConnectstate = CS_PREPARE_TO_COMMIT;
- /*-----------------------------------------------------------------------
- * WE COME HERE ONLY IF THE TRANSACTION IS PREPARED ON ALL TC CONNECTIONS.
- * THUS WE CAN START THE COMMIT PHASE BY SENDING DIVERIFY ON ALL TC
- * CONNECTIONS AND THEN WHEN ALL DIVERIFYCONF HAVE BEEN RECEIVED THE
- * COMMIT MESSAGE CAN BE SENT TO ALL INVOLVED PARTS.
- *-----------------------------------------------------------------------*/
- EXECUTE_DIRECT(DBDIH, GSN_DIVERIFYREQ, signal, 1);
- if (signal->theData[2] == 0) {
- execDIVERIFYCONF(signal);
+
+ if (regApiPtr->lqhkeyreqrec)
+ {
+ if (TfirstfreeApiConnectCopy != RNIL) {
+ seizeApiConnectCopy(signal);
+ regApiPtr->apiConnectstate = CS_PREPARE_TO_COMMIT;
+ /*-----------------------------------------------------------------------
+ * WE COME HERE ONLY IF THE TRANSACTION IS PREPARED ON ALL TC CONNECTIONS
+ * THUS WE CAN START THE COMMIT PHASE BY SENDING DIVERIFY ON ALL TC
+ * CONNECTIONS AND THEN WHEN ALL DIVERIFYCONF HAVE BEEN RECEIVED THE
+ * COMMIT MESSAGE CAN BE SENT TO ALL INVOLVED PARTS.
+ *---------------------------------------------------------------------*/
+ EXECUTE_DIRECT(DBDIH, GSN_DIVERIFYREQ, signal, 1);
+ if (signal->theData[2] == 0) {
+ execDIVERIFYCONF(signal);
+ }
+ return;
+ } else {
+ /*-----------------------------------------------------------------------
+ * There were no free copy connections available. We must abort the
+ * transaction since otherwise we will have a problem with the report
+ * to the application.
+ * This should more or less not happen but if it happens we do
+ * not want to crash and we do not want to create code to handle it
+ * properly since it is difficult to test it and will be complex to
+ * handle a problem more or less not occurring.
+ *---------------------------------------------------------------------*/
+ terrorCode = ZSEIZE_API_COPY_ERROR;
+ abortErrorLab(signal);
+ return;
}
- return;
- } else {
- /*-----------------------------------------------------------------------
- * There were no free copy connections available. We must abort the
- * transaction since otherwise we will have a problem with the report
- * to the application.
- * This should more or less not happen but if it happens we do not want to
- * crash and we do not want to create code to handle it properly since
- * it is difficult to test it and will be complex to handle a problem
- * more or less not occurring.
- *-----------------------------------------------------------------------*/
- terrorCode = ZSEIZE_API_COPY_ERROR;
- abortErrorLab(signal);
- return;
- }//if
+ }
+ else
+ {
+ jam();
+ sendtckeyconf(signal, 1);
+ regApiPtr->apiConnectstate = CS_CONNECTED;
+ regApiPtr->m_transaction_nodes.clear();
+ setApiConTimer(apiConnectptr.i, 0,__LINE__);
+ }
}//Dbtc::diverify010Lab()
/* ------------------------------------------------------------------------- */
@@ -5260,16 +5284,8 @@ void Dbtc::execLQHKEYREF(Signal* signal)
regApiPtr->lqhkeyreqrec--;
if (regApiPtr->lqhkeyconfrec == regApiPtr->lqhkeyreqrec) {
if (regApiPtr->apiConnectstate == CS_START_COMMITTING) {
- if(regApiPtr->lqhkeyconfrec) {
- jam();
- diverify010Lab(signal);
- } else {
- jam();
- sendtckeyconf(signal, 1);
- regApiPtr->apiConnectstate = CS_CONNECTED;
- regApiPtr->m_transaction_nodes.clear();
- setApiConTimer(apiConnectptr.i, 0,__LINE__);
- }
+ jam();
+ diverify010Lab(signal);
return;
} else if (regApiPtr->tckeyrec > 0 || regApiPtr->m_exec_flag) {
jam();
@@ -6278,7 +6294,7 @@ void Dbtc::timeOutLoopStartLab(Signal* signal, Uint32 api_con_ptr)
jam();
if (api_timer != 0) {
Uint32 error= ZTIME_OUT_ERROR;
- time_out_value= time_out_param + (api_con_ptr & mask_value);
+ time_out_value= time_out_param + (ndb_rand() & mask_value);
if (unlikely(old_mask_value)) // abort during single user mode
{
apiConnectptr.i = api_con_ptr;
@@ -6481,6 +6497,7 @@ void Dbtc::timeOutFoundLab(Signal* signal, Uint32 TapiConPtr, Uint32 errCode)
return;
case CS_WAIT_COMMIT_CONF:
jam();
+ CRASH_INSERTION(8053);
tcConnectptr.i = apiConnectptr.p->currentTcConnect;
ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
arrGuard(apiConnectptr.p->currentReplicaNo, MAX_REPLICAS);
diff --git a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
index f28687dca0d..45d124b8d7d 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
@@ -518,6 +518,7 @@ typedef Ptr<Fragoperrec> FragoperrecPtr;
Uint32 m_savePointId;
Uint32 m_scanGCI;
};
+ Uint32 m_endPage;
// lock waited for or obtained and not yet passed to LQH
Uint32 m_accLockOp;
@@ -641,6 +642,8 @@ typedef Ptr<Fragoperrec> FragoperrecPtr;
*/
Page_request_list::Head m_page_requests[MAX_FREE_LIST];
+ DLList<Page>::Head m_unmap_pages;
+
/**
* Current extent
*/
@@ -702,7 +705,8 @@ struct Fragrecord {
DLList<ScanOp>::Head m_scanList;
- enum { UC_LCP = 1, UC_CREATE = 2 };
+ enum { UC_LCP = 1, UC_CREATE = 2, UC_SET_LCP = 3 };
+ Uint32 m_restore_lcp_id;
Uint32 m_undo_complete;
Uint32 m_tablespace_id;
Uint32 m_logfile_group_id;
@@ -1573,6 +1577,8 @@ public:
void nr_delete_page_callback(Signal*, Uint32 op, Uint32 page);
void nr_delete_log_buffer_callback(Signal*, Uint32 op, Uint32 page);
+
+ bool get_frag_info(Uint32 tableId, Uint32 fragId, Uint32* maxPage);
private:
BLOCK_DEFINES(Dbtup);
@@ -2830,7 +2836,7 @@ private:
public:
int disk_page_load_hook(Uint32 page_id);
- void disk_page_unmap_callback(Uint32 page_id, Uint32 dirty_count);
+ void disk_page_unmap_callback(Uint32 when, Uint32 page, Uint32 dirty_count);
int disk_restart_alloc_extent(Uint32 tableId, Uint32 fragId,
const Local_key* key, Uint32 pages);
@@ -2851,11 +2857,11 @@ public:
Local_key m_key;
};
- void disk_restart_mark_no_lcp(Uint32 table, Uint32 frag);
+ void disk_restart_lcp_id(Uint32 table, Uint32 frag, Uint32 lcpId);
private:
void disk_restart_undo_next(Signal*);
- void disk_restart_undo_lcp(Uint32, Uint32, Uint32 flag);
+ void disk_restart_undo_lcp(Uint32, Uint32, Uint32 flag, Uint32 lcpId);
void disk_restart_undo_callback(Signal* signal, Uint32, Uint32);
void disk_restart_undo_alloc(Apply_undo*);
void disk_restart_undo_update(Apply_undo*);
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp
index 87705232de2..8420e7f2bde 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp
@@ -903,8 +903,10 @@ Dbtup::disk_page_set_dirty(PagePtr pagePtr)
}
void
-Dbtup::disk_page_unmap_callback(Uint32 page_id, Uint32 dirty_count)
+Dbtup::disk_page_unmap_callback(Uint32 when,
+ Uint32 page_id, Uint32 dirty_count)
{
+ jamEntry();
Ptr<GlobalPage> gpage;
m_global_page_pool.getPtr(gpage, page_id);
PagePtr pagePtr;
@@ -918,17 +920,9 @@ Dbtup::disk_page_unmap_callback(Uint32 page_id, Uint32 dirty_count)
{
return ;
}
-
- Local_key key;
- key.m_page_no = pagePtr.p->m_page_no;
- key.m_file_no = pagePtr.p->m_file_no;
- Uint32 idx = pagePtr.p->list_index;
- ndbassert((idx & 0x8000) == 0);
+ Uint32 idx = pagePtr.p->list_index;
- if (DBG_DISK)
- ndbout << "disk_page_unmap_callback " << key << endl;
-
Ptr<Tablerec> tabPtr;
tabPtr.i= pagePtr.p->m_table_id;
ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
@@ -938,26 +932,89 @@ Dbtup::disk_page_unmap_callback(Uint32 page_id, Uint32 dirty_count)
Disk_alloc_info& alloc= fragPtr.p->m_disk_alloc_info;
- if (dirty_count == 0)
+ if (when == 0)
{
- Uint32 free = pagePtr.p->free_space;
- Uint32 used = pagePtr.p->uncommitted_used_space;
- ddassert(free >= used);
- ddassert(alloc.calc_page_free_bits(free - used) == idx);
+ /**
+ * Before pageout
+ */
+ jam();
+
+ if (DBG_DISK)
+ {
+ Local_key key;
+ key.m_page_no = pagePtr.p->m_page_no;
+ key.m_file_no = pagePtr.p->m_file_no;
+ ndbout << "disk_page_unmap_callback(before) " << key
+ << " cnt: " << dirty_count << " " << (idx & ~0x8000) << endl;
+ }
+
+ ndbassert((idx & 0x8000) == 0);
+
+ ArrayPool<Page> *pool= (ArrayPool<Page>*)&m_global_page_pool;
+ LocalDLList<Page> list(*pool, alloc.m_dirty_pages[idx]);
+ LocalDLList<Page> list2(*pool, alloc.m_unmap_pages);
+ list.remove(pagePtr);
+ list2.add(pagePtr);
+
+ if (dirty_count == 0)
+ {
+ jam();
+ pagePtr.p->list_index = idx | 0x8000;
+
+ Local_key key;
+ key.m_page_no = pagePtr.p->m_page_no;
+ key.m_file_no = pagePtr.p->m_file_no;
+
+ Uint32 free = pagePtr.p->free_space;
+ Uint32 used = pagePtr.p->uncommitted_used_space;
+ ddassert(free >= used);
+ ddassert(alloc.calc_page_free_bits(free - used) == idx);
+
+ Tablespace_client tsman(0, c_tsman,
+ fragPtr.p->fragTableId,
+ fragPtr.p->fragmentId,
+ fragPtr.p->m_tablespace_id);
+
+ tsman.unmap_page(&key, idx);
+ jamEntry();
+ }
+ }
+ else if (when == 1)
+ {
+ /**
+ * After page out
+ */
+ jam();
+
+ Local_key key;
+ key.m_page_no = pagePtr.p->m_page_no;
+ key.m_file_no = pagePtr.p->m_file_no;
+ Uint32 real_free = pagePtr.p->free_space;
+ if (DBG_DISK)
+ {
+ ndbout << "disk_page_unmap_callback(after) " << key
+ << " cnt: " << dirty_count << " " << (idx & ~0x8000) << endl;
+ }
+
+ ArrayPool<Page> *pool= (ArrayPool<Page>*)&m_global_page_pool;
+ LocalDLList<Page> list(*pool, alloc.m_unmap_pages);
+ list.remove(pagePtr);
+
Tablespace_client tsman(0, c_tsman,
fragPtr.p->fragTableId,
fragPtr.p->fragmentId,
fragPtr.p->m_tablespace_id);
- tsman.unmap_page(&key, idx);
- jamEntry();
- pagePtr.p->list_index = idx | 0x8000;
+ if (DBG_DISK && alloc.calc_page_free_bits(real_free) != (idx & ~0x8000))
+ {
+ ndbout << key
+ << " calc: " << alloc.calc_page_free_bits(real_free)
+ << " idx: " << (idx & ~0x8000)
+ << endl;
+ }
+ tsman.update_page_free_bits(&key, alloc.calc_page_free_bits(real_free));
}
-
- ArrayPool<Page> *pool= (ArrayPool<Page>*)&m_global_page_pool;
- LocalDLList<Page> list(*pool, alloc.m_dirty_pages[idx]);
- list.remove(pagePtr);
}
void
@@ -969,8 +1026,6 @@ Dbtup::disk_page_alloc(Signal* signal,
Disk_alloc_info& alloc= fragPtrP->m_disk_alloc_info;
Uint64 lsn;
- Uint32 old_free = pagePtr.p->free_space;
- Uint32 old_bits= alloc.calc_page_free_bits(old_free);
if (tabPtrP->m_attributes[DD].m_no_of_varsize == 0)
{
ddassert(pagePtr.p->uncommitted_used_space > 0);
@@ -988,20 +1043,6 @@ Dbtup::disk_page_alloc(Signal* signal,
lsn= disk_page_undo_alloc(pagePtr.p, key, sz, gci, logfile_group_id);
}
-
- Uint32 new_free = pagePtr.p->free_space;
- Uint32 new_bits= alloc.calc_page_free_bits(new_free);
-
- if (old_bits != new_bits)
- {
- Tablespace_client tsman(signal, c_tsman,
- fragPtrP->fragTableId,
- fragPtrP->fragmentId,
- fragPtrP->m_tablespace_id);
-
- tsman.update_page_free_bits(key, new_bits, lsn);
- jamEntry();
- }
}
void
@@ -1016,7 +1057,6 @@ Dbtup::disk_page_free(Signal *signal,
Uint32 logfile_group_id= fragPtrP->m_logfile_group_id;
Disk_alloc_info& alloc= fragPtrP->m_disk_alloc_info;
Uint32 old_free= pagePtr.p->free_space;
- Uint32 old_bits= alloc.calc_page_free_bits(old_free);
Uint32 sz;
Uint64 lsn;
@@ -1043,19 +1083,7 @@ Dbtup::disk_page_free(Signal *signal,
}
Uint32 new_free = pagePtr.p->free_space;
- Uint32 new_bits = alloc.calc_page_free_bits(new_free);
- if (old_bits != new_bits)
- {
- Tablespace_client tsman(signal, c_tsman,
- fragPtrP->fragTableId,
- fragPtrP->fragmentId,
- fragPtrP->m_tablespace_id);
-
- tsman.update_page_free_bits(key, new_bits, lsn);
- jamEntry();
- }
-
Uint32 ext = pagePtr.p->m_extent_info_ptr;
Uint32 used = pagePtr.p->uncommitted_used_space;
Uint32 old_idx = pagePtr.p->list_index;
@@ -1341,15 +1369,23 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn,
case File_formats::Undofile::UNDO_LCP_FIRST:
case File_formats::Undofile::UNDO_LCP:
{
+ jam();
ndbrequire(len == 3);
+ Uint32 lcp = ptr[0];
Uint32 tableId = ptr[1] >> 16;
Uint32 fragId = ptr[1] & 0xFFFF;
- disk_restart_undo_lcp(tableId, fragId, Fragrecord::UC_LCP);
+ disk_restart_undo_lcp(tableId, fragId, Fragrecord::UC_LCP, lcp);
disk_restart_undo_next(signal);
+
+ if (DBG_UNDO)
+ {
+ ndbout_c("UNDO LCP %u (%u, %u)", lcp, tableId, fragId);
+ }
return;
}
case File_formats::Undofile::UNDO_TUP_ALLOC:
{
+ jam();
Disk_undo::Alloc* rec= (Disk_undo::Alloc*)ptr;
preq.m_page.m_page_no = rec->m_page_no;
preq.m_page.m_file_no = rec->m_file_no_page_idx >> 16;
@@ -1358,6 +1394,7 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn,
}
case File_formats::Undofile::UNDO_TUP_UPDATE:
{
+ jam();
Disk_undo::Update* rec= (Disk_undo::Update*)ptr;
preq.m_page.m_page_no = rec->m_page_no;
preq.m_page.m_file_no = rec->m_file_no_page_idx >> 16;
@@ -1366,6 +1403,7 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn,
}
case File_formats::Undofile::UNDO_TUP_FREE:
{
+ jam();
Disk_undo::Free* rec= (Disk_undo::Free*)ptr;
preq.m_page.m_page_no = rec->m_page_no;
preq.m_page.m_file_no = rec->m_file_no_page_idx >> 16;
@@ -1377,6 +1415,7 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn,
*
*/
{
+ jam();
Disk_undo::Create* rec= (Disk_undo::Create*)ptr;
Ptr<Tablerec> tabPtr;
tabPtr.i= rec->m_table;
@@ -1384,12 +1423,34 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn,
for(Uint32 i = 0; i<MAX_FRAG_PER_NODE; i++)
if (tabPtr.p->fragrec[i] != RNIL)
disk_restart_undo_lcp(tabPtr.i, tabPtr.p->fragid[i],
- Fragrecord::UC_CREATE);
+ Fragrecord::UC_CREATE, 0);
disk_restart_undo_next(signal);
+
+ if (DBG_UNDO)
+ {
+ ndbout_c("UNDO CREATE (%u)", tabPtr.i);
+ }
return;
}
case File_formats::Undofile::UNDO_TUP_DROP:
+ {
jam();
+ Disk_undo::Drop* rec = (Disk_undo::Drop*)ptr;
+ Ptr<Tablerec> tabPtr;
+ tabPtr.i= rec->m_table;
+ ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
+ for(Uint32 i = 0; i<MAX_FRAG_PER_NODE; i++)
+ if (tabPtr.p->fragrec[i] != RNIL)
+ disk_restart_undo_lcp(tabPtr.i, tabPtr.p->fragid[i],
+ Fragrecord::UC_CREATE, 0);
+ disk_restart_undo_next(signal);
+
+ if (DBG_UNDO)
+ {
+ ndbout_c("UNDO DROP (%u)", tabPtr.i);
+ }
+ return;
+ }
case File_formats::Undofile::UNDO_TUP_ALLOC_EXTENT:
jam();
case File_formats::Undofile::UNDO_TUP_FREE_EXTENT:
@@ -1398,6 +1459,7 @@ Dbtup::disk_restart_undo(Signal* signal, Uint64 lsn,
return;
case File_formats::Undofile::UNDO_END:
+ jam();
f_undo_done = true;
return;
default:
@@ -1431,14 +1493,32 @@ Dbtup::disk_restart_undo_next(Signal* signal)
}
void
-Dbtup::disk_restart_mark_no_lcp(Uint32 tableId, Uint32 fragId)
+Dbtup::disk_restart_lcp_id(Uint32 tableId, Uint32 fragId, Uint32 lcpId)
{
jamEntry();
- disk_restart_undo_lcp(tableId, fragId, Fragrecord::UC_CREATE);
+
+ if (lcpId == RNIL)
+ {
+ disk_restart_undo_lcp(tableId, fragId, Fragrecord::UC_CREATE, 0);
+ if (DBG_UNDO)
+ {
+ ndbout_c("mark_no_lcp (%u, %u)", tableId, fragId);
+ }
+ }
+ else
+ {
+ disk_restart_undo_lcp(tableId, fragId, Fragrecord::UC_SET_LCP, lcpId);
+ if (DBG_UNDO)
+ {
+ ndbout_c("mark_no_lcp (%u, %u)", tableId, fragId);
+ }
+
+ }
}
void
-Dbtup::disk_restart_undo_lcp(Uint32 tableId, Uint32 fragId, Uint32 flag)
+Dbtup::disk_restart_undo_lcp(Uint32 tableId, Uint32 fragId, Uint32 flag,
+ Uint32 lcpId)
{
Ptr<Tablerec> tabPtr;
tabPtr.i= tableId;
@@ -1446,11 +1526,43 @@ Dbtup::disk_restart_undo_lcp(Uint32 tableId, Uint32 fragId, Uint32 flag)
if (tabPtr.p->tableStatus == DEFINED)
{
+ jam();
FragrecordPtr fragPtr;
getFragmentrec(fragPtr, fragId, tabPtr.p);
if (!fragPtr.isNull())
{
- fragPtr.p->m_undo_complete |= flag;
+ jam();
+ switch(flag){
+ case Fragrecord::UC_CREATE:
+ jam();
+ fragPtr.p->m_undo_complete |= flag;
+ return;
+ case Fragrecord::UC_LCP:
+ jam();
+ if (fragPtr.p->m_undo_complete == 0 &&
+ fragPtr.p->m_restore_lcp_id == lcpId)
+ {
+ jam();
+ fragPtr.p->m_undo_complete |= flag;
+ if (DBG_UNDO)
+ ndbout_c("table: %u fragment: %u lcp: %u -> done",
+ tableId, fragId, lcpId);
+ }
+ return;
+ case Fragrecord::UC_SET_LCP:
+ {
+ jam();
+ if (DBG_UNDO)
+ ndbout_c("table: %u fragment: %u restore to lcp: %u",
+ tableId, fragId, lcpId);
+ ndbrequire(fragPtr.p->m_undo_complete == 0);
+ ndbrequire(fragPtr.p->m_restore_lcp_id == RNIL);
+ fragPtr.p->m_restore_lcp_id = lcpId;
+ return;
+ }
+ }
+ jamLine(flag);
+ ndbrequire(false);
}
}
}
@@ -1474,6 +1586,7 @@ Dbtup::disk_restart_undo_callback(Signal* signal,
pagePtr.p->nextList != RNIL ||
pagePtr.p->prevList != RNIL)
{
+ jam();
update = true;
pagePtr.p->list_index |= 0x8000;
pagePtr.p->nextList = pagePtr.p->prevList = RNIL;
@@ -1484,6 +1597,9 @@ Dbtup::disk_restart_undo_callback(Signal* signal,
if (tableId >= cnoOfTablerec)
{
+ jam();
+ if (DBG_UNDO)
+ ndbout_c("UNDO table> %u", tableId);
disk_restart_undo_next(signal);
return;
}
@@ -1492,6 +1608,9 @@ Dbtup::disk_restart_undo_callback(Signal* signal,
if (undo->m_table_ptr.p->tableStatus != DEFINED)
{
+ jam();
+ if (DBG_UNDO)
+ ndbout_c("UNDO !defined (%u) ", tableId);
disk_restart_undo_next(signal);
return;
}
@@ -1499,19 +1618,25 @@ Dbtup::disk_restart_undo_callback(Signal* signal,
getFragmentrec(undo->m_fragment_ptr, fragId, undo->m_table_ptr.p);
if(undo->m_fragment_ptr.isNull())
{
+ jam();
+ if (DBG_UNDO)
+ ndbout_c("UNDO fragment null %u/%u", tableId, fragId);
disk_restart_undo_next(signal);
return;
}
if (undo->m_fragment_ptr.p->m_undo_complete)
{
+ jam();
+ if (DBG_UNDO)
+ ndbout_c("UNDO undo complete %u/%u", tableId, fragId);
disk_restart_undo_next(signal);
return;
}
- Local_key key;
- key.m_page_no = pagePtr.p->m_page_no;
- key.m_file_no = pagePtr.p->m_file_no;
+ Local_key key = undo->m_key;
+// key.m_page_no = pagePtr.p->m_page_no;
+// key.m_file_no = pagePtr.p->m_file_no;
Uint64 lsn = 0;
lsn += pagePtr.p->m_page_header.m_page_lsn_hi; lsn <<= 32;
@@ -1521,6 +1646,7 @@ Dbtup::disk_restart_undo_callback(Signal* signal,
if (undo->m_lsn <= lsn)
{
+ jam();
if (DBG_UNDO)
{
ndbout << "apply: " << undo->m_lsn << "(" << lsn << " )"
@@ -1535,12 +1661,15 @@ Dbtup::disk_restart_undo_callback(Signal* signal,
*/
switch(undo->m_type){
case File_formats::Undofile::UNDO_TUP_ALLOC:
+ jam();
disk_restart_undo_alloc(undo);
break;
case File_formats::Undofile::UNDO_TUP_UPDATE:
+ jam();
disk_restart_undo_update(undo);
break;
case File_formats::Undofile::UNDO_TUP_FREE:
+ jam();
disk_restart_undo_free(undo);
break;
default:
@@ -1555,14 +1684,17 @@ Dbtup::disk_restart_undo_callback(Signal* signal,
m_pgman.update_lsn(undo->m_key, lsn);
jamEntry();
+
+ disk_restart_undo_page_bits(signal, undo);
}
else if (DBG_UNDO)
{
+ jam();
ndbout << "ignore: " << undo->m_lsn << "(" << lsn << " )"
- << key << " type: " << undo->m_type << endl;
+ << key << " type: " << undo->m_type
+ << " tab: " << tableId << endl;
}
- disk_restart_undo_page_bits(signal, undo);
disk_restart_undo_next(signal);
}
@@ -1637,16 +1769,12 @@ Dbtup::disk_restart_undo_page_bits(Signal* signal, Apply_undo* undo)
Uint32 new_bits = alloc.calc_page_free_bits(free);
pageP->list_index = 0x8000 | new_bits;
- Uint64 lsn = 0;
- lsn += pageP->m_page_header.m_page_lsn_hi; lsn <<= 32;
- lsn += pageP->m_page_header.m_page_lsn_lo;
-
Tablespace_client tsman(signal, c_tsman,
fragPtrP->fragTableId,
fragPtrP->fragmentId,
fragPtrP->m_tablespace_id);
- tsman.restart_undo_page_free_bits(&undo->m_key, new_bits, undo->m_lsn, lsn);
+ tsman.restart_undo_page_free_bits(&undo->m_key, new_bits);
jamEntry();
}
@@ -1683,6 +1811,7 @@ Dbtup::disk_restart_alloc_extent(Uint32 tableId, Uint32 fragId,
if (alloc.m_curr_extent_info_ptr_i != RNIL)
{
+ jam();
Ptr<Extent_info> old;
c_extent_pool.getPtr(old, alloc.m_curr_extent_info_ptr_i);
ndbassert(old.p->m_free_matrix_pos == RNIL);
@@ -1709,6 +1838,7 @@ void
Dbtup::disk_restart_page_bits(Uint32 tableId, Uint32 fragId,
const Local_key*, Uint32 bits)
{
+ jam();
TablerecPtr tabPtr;
FragrecordPtr fragPtr;
tabPtr.i = tableId;
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
index 45766e5e9c4..a642d704eb9 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
@@ -1957,9 +1957,8 @@ int Dbtup::interpreterNextLab(Signal* signal,
Uint32 TdataForUpdate[3];
Uint32 Tlen;
- AttributeHeader& ah= AttributeHeader::init(&TdataForUpdate[0],
- TattrId,
- TattrNoOfWords << 2);
+ AttributeHeader ah(TattrId, TattrNoOfWords << 2);
+ TdataForUpdate[0]= ah.m_value;
TdataForUpdate[1]= TregMemBuffer[theRegister + 2];
TdataForUpdate[2]= TregMemBuffer[theRegister + 3];
Tlen= TattrNoOfWords + 1;
@@ -1975,6 +1974,7 @@ int Dbtup::interpreterNextLab(Signal* signal,
// Write a NULL value into the attribute
/* --------------------------------------------------------- */
ah.setNULL();
+ TdataForUpdate[0]= ah.m_value;
Tlen= 1;
}
int TnoDataRW= updateAttributes(req_struct,
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
index 6866236f15e..176efac8058 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
@@ -143,6 +143,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
regFragPtr.p->m_lcp_scan_op = RNIL;
regFragPtr.p->m_lcp_keep_list = RNIL;
regFragPtr.p->m_var_page_chunks = RNIL;
+ regFragPtr.p->m_restore_lcp_id = RNIL;
if (ERROR_INSERTED(4007) && regTabPtr.p->fragid[0] == fragId ||
ERROR_INSERTED(4008) && regTabPtr.p->fragid[1] == fragId) {
@@ -674,11 +675,11 @@ Dbtup::undo_createtable_callback(Signal* signal, Uint32 opPtrI, Uint32 unused)
switch(ret){
case 0:
return;
+ case -1:
+ warningEvent("Failed to sync log for create of table: %u", regTabPtr.i);
default:
- ndbout_c("ret: %d", ret);
- ndbrequire(false);
+ execute(signal, req.m_callback, regFragPtr.p->m_logfile_group_id);
}
-
}
void
@@ -959,8 +960,6 @@ void Dbtup::releaseFragment(Signal* signal, Uint32 tableId,
return;
}
-#if NOT_YET_UNDO_DROP_TABLE
-#error "This code is complete, but I prefer not to enable it until I need it"
if (logfile_group_id != RNIL)
{
Callback cb;
@@ -968,8 +967,15 @@ void Dbtup::releaseFragment(Signal* signal, Uint32 tableId,
cb.m_callbackFunction =
safe_cast(&Dbtup::drop_table_log_buffer_callback);
Uint32 sz= sizeof(Disk_undo::Drop) >> 2;
- (void) c_lgman->alloc_log_space(logfile_group_id, sz);
-
+ int r0 = c_lgman->alloc_log_space(logfile_group_id, sz);
+ if (r0)
+ {
+ jam();
+ warningEvent("Failed to alloc log space for drop table: %u",
+ tabPtr.i);
+ goto done;
+ }
+
Logfile_client lgman(this, c_lgman, logfile_group_id);
int res= lgman.get_log_buffer(signal, sz, &cb);
switch(res){
@@ -977,15 +983,18 @@ void Dbtup::releaseFragment(Signal* signal, Uint32 tableId,
jam();
return;
case -1:
- ndbrequire("NOT YET IMPLEMENTED" == 0);
+ warningEvent("Failed to get log buffer for drop table: %u",
+ tabPtr.i);
+ c_lgman->free_log_space(logfile_group_id, sz);
+ goto done;
break;
default:
execute(signal, cb, logfile_group_id);
return;
}
}
-#endif
-
+
+done:
drop_table_logsync_callback(signal, tabPtr.i, RNIL);
}
@@ -997,7 +1006,20 @@ Dbtup::drop_fragment_unmap_pages(Signal *signal,
{
if (tabPtr.p->m_no_of_disk_attributes)
{
+ jam();
Disk_alloc_info& alloc_info= fragPtr.p->m_disk_alloc_info;
+
+ if (!alloc_info.m_unmap_pages.isEmpty())
+ {
+ jam();
+ ndbout_c("waiting for unmape pages");
+ signal->theData[0] = ZUNMAP_PAGES;
+ signal->theData[1] = tabPtr.i;
+ signal->theData[2] = fragPtr.i;
+ signal->theData[3] = pos;
+ sendSignal(cownref, GSN_CONTINUEB, signal, 4, JBB);
+ return;
+ }
while(alloc_info.m_dirty_pages[pos].isEmpty() && pos < MAX_FREE_LIST)
pos++;
@@ -1164,9 +1186,10 @@ Dbtup::drop_table_log_buffer_callback(Signal* signal, Uint32 tablePtrI,
switch(ret){
case 0:
return;
+ case -1:
+ warningEvent("Failed to syn log for drop of table: %u", tablePtrI);
default:
- ndbout_c("ret: %d", ret);
- ndbrequire(false);
+ execute(signal, req.m_callback, logfile_group_id);
}
}
@@ -1441,3 +1464,22 @@ Dbtup::complete_restore_lcp(Uint32 tableId, Uint32 fragId)
tabDesc += 2;
}
}
+
+bool
+Dbtup::get_frag_info(Uint32 tableId, Uint32 fragId, Uint32* maxPage)
+{
+ jamEntry();
+ TablerecPtr tabPtr;
+ tabPtr.i= tableId;
+ ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
+
+ FragrecordPtr fragPtr;
+ getFragmentrec(fragPtr, fragId, tabPtr.p);
+
+ if (maxPage)
+ {
+ * maxPage = fragPtr.p->noOfPages;
+ }
+
+ return true;
+}
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
index b0b0cec6b76..64f81dc11ab 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
@@ -818,9 +818,7 @@ Dbtup::checkUpdateOfPrimaryKey(KeyReqStruct* req_struct,
Tablerec* const regTabPtr)
{
Uint32 keyReadBuffer[MAX_KEY_SIZE_IN_WORDS];
- Uint32 attributeHeader;
TableDescriptor* attr_descr = req_struct->attr_descr;
- AttributeHeader* ahOut = (AttributeHeader*)&attributeHeader;
AttributeHeader ahIn(*updateBuffer);
Uint32 attributeId = ahIn.getAttributeId();
Uint32 attrDescriptorIndex = attributeId << ZAD_LOG_SIZE;
@@ -843,7 +841,7 @@ Dbtup::checkUpdateOfPrimaryKey(KeyReqStruct* req_struct,
ReadFunction f = regTabPtr->readFunctionArray[attributeId];
- AttributeHeader::init(&attributeHeader, attributeId, 0);
+ AttributeHeader attributeHeader(attributeId, 0);
req_struct->out_buf_index = 0;
req_struct->max_read = MAX_KEY_SIZE_IN_WORDS;
req_struct->attr_descriptor = attrDescriptor;
@@ -852,12 +850,12 @@ Dbtup::checkUpdateOfPrimaryKey(KeyReqStruct* req_struct,
req_struct->xfrm_flag = true;
ndbrequire((this->*f)(&keyReadBuffer[0],
req_struct,
- ahOut,
+ &attributeHeader,
attributeOffset));
req_struct->xfrm_flag = tmp;
- ndbrequire(req_struct->out_buf_index == ahOut->getDataSize());
- if (ahIn.getDataSize() != ahOut->getDataSize()) {
+ ndbrequire(req_struct->out_buf_index == attributeHeader.getDataSize());
+ if (ahIn.getDataSize() != attributeHeader.getDataSize()) {
jam();
return true;
}
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp
index 6e53531e118..5e9306909b4 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp
@@ -95,7 +95,23 @@ Dbtup::execACC_SCANREQ(Signal* signal)
}
}
- bits |= AccScanReq::getNRScanFlag(req->requestInfo) ? ScanOp::SCAN_NR : 0;
+ if (AccScanReq::getNRScanFlag(req->requestInfo))
+ {
+ jam();
+ bits |= ScanOp::SCAN_NR;
+ scanPtr.p->m_endPage = req->maxPage;
+ if (req->maxPage != RNIL && req->maxPage > frag.noOfPages)
+ {
+ ndbout_c("%u %u endPage: %u (noOfPages: %u)",
+ tablePtr.i, fragId,
+ req->maxPage, fragPtr.p->noOfPages);
+ }
+ }
+ else
+ {
+ jam();
+ scanPtr.p->m_endPage = RNIL;
+ }
// set up scan op
new (scanPtr.p) ScanOp();
@@ -540,7 +556,7 @@ Dbtup::scanFirst(Signal*, ScanOpPtr scanPtr)
ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
Fragrecord& frag = *fragPtr.p;
// in the future should not pre-allocate pages
- if (frag.noOfPages == 0) {
+ if (frag.noOfPages == 0 && ((bits & ScanOp::SCAN_NR) == 0)) {
jam();
scan.m_state = ScanOp::Last;
return;
@@ -632,11 +648,23 @@ Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr)
key.m_page_no++;
if (key.m_page_no >= frag.noOfPages) {
jam();
+
+ if ((bits & ScanOp::SCAN_NR) && (scan.m_endPage != RNIL))
+ {
+ jam();
+ if (key.m_page_no < scan.m_endPage)
+ {
+ jam();
+ ndbout_c("scanning page %u", key.m_page_no);
+ goto cont;
+ }
+ }
// no more pages, scan ends
pos.m_get = ScanPos::Get_undef;
scan.m_state = ScanOp::Last;
return true;
}
+ cont:
key.m_page_idx = 0;
pos.m_get = ScanPos::Get_page_mm;
// clear cached value
@@ -649,7 +677,13 @@ Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr)
{
if (pos.m_realpid_mm == RNIL) {
jam();
- pos.m_realpid_mm = getRealpid(fragPtr.p, key.m_page_no);
+ if (key.m_page_no < frag.noOfPages)
+ pos.m_realpid_mm = getRealpid(fragPtr.p, key.m_page_no);
+ else
+ {
+ ndbassert(bits & ScanOp::SCAN_NR);
+ goto nopage;
+ }
}
PagePtr pagePtr;
c_page_pool.getPtr(pagePtr, pos.m_realpid_mm);
@@ -657,9 +691,18 @@ Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr)
if (pagePtr.p->page_state == ZEMPTY_MM) {
// skip empty page
jam();
- pos.m_get = ScanPos::Get_next_page_mm;
- break; // incr loop count
+ if (! (bits & ScanOp::SCAN_NR))
+ {
+ pos.m_get = ScanPos::Get_next_page_mm;
+ break; // incr loop count
+ }
+ else
+ {
+ jam();
+ pos.m_realpid_mm = RNIL;
+ }
}
+ nopage:
pos.m_page = pagePtr.p;
pos.m_get = ScanPos::Get_tuple;
}
@@ -772,7 +815,7 @@ Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr)
uncommitted = committed = ~(unsigned)0;
int ret = tsman.get_page_free_bits(&key, &uncommitted, &committed);
ndbrequire(ret == 0);
- if (committed == 0) {
+ if (committed == 0 && uncommitted == 0) {
// skip empty page
jam();
pos.m_get = ScanPos::Get_next_page_dd;
@@ -820,11 +863,11 @@ Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr)
{
pos.m_get = ScanPos::Get_next_tuple_fs;
th = (Tuple_header*)&page->m_data[key.m_page_idx];
- thbits = th->m_header_bits;
if (likely(! (bits & ScanOp::SCAN_NR)))
{
jam();
+ thbits = th->m_header_bits;
if (! (thbits & Tuple_header::FREE))
{
goto found_tuple;
@@ -832,7 +875,15 @@ Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr)
}
else
{
- if ((foundGCI = *th->get_mm_gci(tablePtr.p)) > scanGCI)
+ if (pos.m_realpid_mm == RNIL)
+ {
+ jam();
+ foundGCI = 0;
+ goto found_deleted_rowid;
+ }
+ thbits = th->m_header_bits;
+ if ((foundGCI = *th->get_mm_gci(tablePtr.p)) > scanGCI ||
+ foundGCI == 0)
{
if (! (thbits & Tuple_header::FREE))
{
@@ -904,7 +955,8 @@ Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr)
Fix_page *mmpage = (Fix_page*)c_page_pool.getPtr(pos.m_realpid_mm);
th = (Tuple_header*)(mmpage->m_data + key_mm.m_page_idx);
- if ((foundGCI = *th->get_mm_gci(tablePtr.p)) > scanGCI)
+ if ((foundGCI = *th->get_mm_gci(tablePtr.p)) > scanGCI ||
+ foundGCI == 0)
{
if (! (thbits & Tuple_header::FREE))
break;
diff --git a/storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
index 550e255061f..f34bc41af06 100644
--- a/storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
+++ b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
@@ -1169,9 +1169,7 @@ DbUtil::prepareOperation(Signal* signal, PreparePtr prepPtr)
/**************************************************************
* Attribute found - store in mapping (AttributeId, Position)
**************************************************************/
- AttributeHeader & attrMap =
- AttributeHeader::init(attrMappingIt.data,
- attrDesc.AttributeId, // 1. Store AttrId
+ AttributeHeader attrMap(attrDesc.AttributeId, // 1. Store AttrId
0);
if (attrDesc.AttributeKeyFlag) {
@@ -1200,6 +1198,7 @@ DbUtil::prepareOperation(Signal* signal, PreparePtr prepPtr)
return;
}
}
+ *(attrMappingIt.data) = attrMap.m_value;
#if 0
ndbout << "BEFORE: attrLength: " << attrLength << endl;
#endif
diff --git a/storage/ndb/src/kernel/blocks/lgman.cpp b/storage/ndb/src/kernel/blocks/lgman.cpp
index 23738717580..0481f7b399b 100644
--- a/storage/ndb/src/kernel/blocks/lgman.cpp
+++ b/storage/ndb/src/kernel/blocks/lgman.cpp
@@ -2701,8 +2701,16 @@ Lgman::execute_undo_record(Signal* signal)
Uint32 lcp = * (ptr - len + 1);
if(m_latest_lcp && lcp > m_latest_lcp)
{
- // Just ignore
- break;
+ if (0)
+ {
+ const Uint32 * base = ptr - len + 1;
+ Uint32 lcp = base[0];
+ Uint32 tableId = base[1] >> 16;
+ Uint32 fragId = base[1] & 0xFFFF;
+
+ ndbout_c("NOT! ignoring lcp: %u tab: %u frag: %u",
+ lcp, tableId, fragId);
+ }
}
if(m_latest_lcp == 0 ||
diff --git a/storage/ndb/src/kernel/blocks/pgman.cpp b/storage/ndb/src/kernel/blocks/pgman.cpp
index d8e0c053984..aa1f04c720c 100644
--- a/storage/ndb/src/kernel/blocks/pgman.cpp
+++ b/storage/ndb/src/kernel/blocks/pgman.cpp
@@ -238,6 +238,13 @@ Pgman::execCONTINUEB(Signal* signal)
}
else
{
+ if (ERROR_INSERTED(11007))
+ {
+ ndbout << "No more writes..." << endl;
+ SET_ERROR_INSERT_VALUE(11008);
+ signal->theData[0] = 9999;
+ sendSignalWithDelay(CMVMI_REF, GSN_NDB_TAMPER, signal, 10000, 1);
+ }
signal->theData[0] = m_end_lcp_req.senderData;
sendSignal(m_end_lcp_req.senderRef, GSN_END_LCP_CONF, signal, 1, JBB);
}
@@ -493,6 +500,11 @@ Pgman::release_page_entry(Ptr<Page_entry>& ptr)
if (! (state & Page_entry::LOCKED))
ndbrequire(! (state & Page_entry::REQUEST));
+
+ if (ptr.p->m_copy_page_i != RNIL)
+ {
+ m_global_page_pool.release(ptr.p->m_copy_page_i);
+ }
set_page_state(ptr, 0);
m_page_hashlist.remove(ptr);
@@ -1142,7 +1154,8 @@ Pgman::process_cleanup(Signal* signal)
#ifdef VM_TRACE
debugOut << "PGMAN: " << ptr << " : process_cleanup" << endl;
#endif
- c_tup->disk_page_unmap_callback(ptr.p->m_real_page_i,
+ c_tup->disk_page_unmap_callback(0,
+ ptr.p->m_real_page_i,
ptr.p->m_dirty_count);
pageout(signal, ptr);
max_count--;
@@ -1180,6 +1193,11 @@ Pgman::move_cleanup_ptr(Ptr<Page_entry> ptr)
void
Pgman::execLCP_FRAG_ORD(Signal* signal)
{
+ if (ERROR_INSERTED(11008))
+ {
+ ndbout_c("Ignore LCP_FRAG_ORD");
+ return;
+ }
LcpFragOrd* ord = (LcpFragOrd*)signal->getDataPtr();
ndbrequire(ord->lcpId >= m_last_lcp_complete + 1 || m_last_lcp_complete == 0);
m_last_lcp = ord->lcpId;
@@ -1196,6 +1214,12 @@ Pgman::execLCP_FRAG_ORD(Signal* signal)
void
Pgman::execEND_LCP_REQ(Signal* signal)
{
+ if (ERROR_INSERTED(11008))
+ {
+ ndbout_c("Ignore END_LCP");
+ return;
+ }
+
EndLcpReq* req = (EndLcpReq*)signal->getDataPtr();
m_end_lcp_req = *req;
@@ -1274,7 +1298,8 @@ Pgman::process_lcp(Signal* signal)
{
DBG_LCP(" pageout()" << endl);
ptr.p->m_state |= Page_entry::LCP;
- c_tup->disk_page_unmap_callback(ptr.p->m_real_page_i,
+ c_tup->disk_page_unmap_callback(0,
+ ptr.p->m_real_page_i,
ptr.p->m_dirty_count);
pageout(signal, ptr);
}
@@ -1301,6 +1326,13 @@ Pgman::process_lcp(Signal* signal)
}
else
{
+ if (ERROR_INSERTED(11007))
+ {
+ ndbout << "No more writes..." << endl;
+ signal->theData[0] = 9999;
+ sendSignalWithDelay(CMVMI_REF, GSN_NDB_TAMPER, signal, 10000, 1);
+ SET_ERROR_INSERT_VALUE(11008);
+ }
signal->theData[0] = m_end_lcp_req.senderData;
sendSignal(m_end_lcp_req.senderRef, GSN_END_LCP_CONF, signal, 1, JBB);
}
@@ -1489,6 +1521,10 @@ Pgman::fswriteconf(Signal* signal, Ptr<Page_entry> ptr)
Page_state state = ptr.p->m_state;
ndbrequire(state & Page_entry::PAGEOUT);
+ c_tup->disk_page_unmap_callback(1,
+ ptr.p->m_real_page_i,
+ ptr.p->m_dirty_count);
+
state &= ~ Page_entry::PAGEOUT;
state &= ~ Page_entry::EMPTY;
state &= ~ Page_entry::DIRTY;
@@ -1588,8 +1624,11 @@ Pgman::fswritereq(Signal* signal, Ptr<Page_entry> ptr)
}
#endif
- sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal,
- FsReadWriteReq::FixedLength + 1, JBA);
+ if (!ERROR_INSERTED(11008))
+ {
+ sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal,
+ FsReadWriteReq::FixedLength + 1, JBA);
+ }
}
void
@@ -1739,7 +1778,7 @@ Pgman::get_page(Signal* signal, Ptr<Page_entry> ptr, Page_request page_req)
#endif
state |= Page_entry::REQUEST;
- if (only_request && req_flags & Page_request::EMPTY_PAGE)
+ if (only_request && (req_flags & Page_request::EMPTY_PAGE))
{
state |= Page_entry::EMPTY;
}
@@ -2401,7 +2440,8 @@ Pgman::execDUMP_STATE_ORD(Signal* signal)
if (pl_hash.find(ptr, key))
{
ndbout << "pageout " << ptr << endl;
- c_tup->disk_page_unmap_callback(ptr.p->m_real_page_i,
+ c_tup->disk_page_unmap_callback(0,
+ ptr.p->m_real_page_i,
ptr.p->m_dirty_count);
pageout(signal, ptr);
}
@@ -2452,6 +2492,16 @@ Pgman::execDUMP_STATE_ORD(Signal* signal)
{
SET_ERROR_INSERT_VALUE(11006);
}
+
+ if (signal->theData[0] == 11007)
+ {
+ SET_ERROR_INSERT_VALUE(11007);
+ }
+
+ if (signal->theData[0] == 11008)
+ {
+ SET_ERROR_INSERT_VALUE(11008);
+ }
}
// page cache client
diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp
index b8cc438f514..7845b83693c 100644
--- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp
+++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp
@@ -3655,6 +3655,8 @@ Suma::execSUB_GCP_COMPLETE_REP(Signal* signal)
if(m_gcp_complete_rep_count && !c_subscriber_nodes.isclear())
{
+ CRASH_INSERTION(13033);
+
NodeReceiverGroup rg(API_CLUSTERMGR, c_subscriber_nodes);
sendSignal(rg, GSN_SUB_GCP_COMPLETE_REP, signal,
SubGcpCompleteRep::SignalLength, JBB);
@@ -3674,8 +3676,8 @@ Suma::execSUB_GCP_COMPLETE_REP(Signal* signal)
{
if(m_active_buckets.get(i))
continue;
-
- if(c_buckets[i].m_buffer_tail != RNIL)
+
+ if (!c_subscriber_nodes.isclear())
{
//Uint32* dst;
get_buffer_ptr(signal, i, gci, 0);
diff --git a/storage/ndb/src/kernel/blocks/tsman.cpp b/storage/ndb/src/kernel/blocks/tsman.cpp
index 8f61ec0cf7b..3a7003d56c8 100644
--- a/storage/ndb/src/kernel/blocks/tsman.cpp
+++ b/storage/ndb/src/kernel/blocks/tsman.cpp
@@ -299,7 +299,7 @@ Tsman::execDUMP_STATE_ORD(Signal* signal){
Uint32 new_bits = curr_bits ^ rand();
Local_key key = chunks[chunk].start_page;
key.m_page_no += page;
- ndbrequire(update_page_free_bits(signal, &key, new_bits, 0) == 0);
+ ndbrequire(update_page_free_bits(signal, &key, new_bits) == 0);
}
}
}
@@ -366,6 +366,20 @@ Tsman::execCREATE_FILEGROUP_REQ(Signal* signal){
CreateFilegroupImplRef::SignalLength, JBB);
}
+NdbOut&
+operator<<(NdbOut& out, const File_formats::Datafile::Extent_header & obj)
+{
+ out << "table: " << obj.m_table
+ << " fragment: " << obj.m_fragment_id << " ";
+ for(Uint32 i = 0; i<32; i++)
+ {
+ char t[2];
+ BaseString::snprintf(t, sizeof(t), "%x", obj.get_free_bits(i));
+ out << t;
+ }
+ return out;
+}
+
void
Tsman::execDROP_FILEGROUP_REQ(Signal* signal){
jamEntry();
@@ -1590,8 +1604,7 @@ Tsman::execFREE_EXTENT_REQ(Signal* signal)
int
Tsman::update_page_free_bits(Signal* signal,
Local_key *key,
- unsigned committed_bits,
- Uint64 lsn)
+ unsigned committed_bits)
{
jamEntry();
@@ -1626,6 +1639,18 @@ Tsman::update_page_free_bits(Signal* signal,
File_formats::Datafile::Extent_header* header =
page->get_header(val.m_extent_no, val.m_extent_size);
+ if (header->m_table == RNIL)
+ {
+ ndbout << "update page free bits page: " << *key
+ << " " << *header << endl;
+ }
+
+ if (0)
+ {
+ ndbout << "update page free bits page(" << committed_bits << ") "
+ << *key << " " << *header << endl;
+ }
+
ndbrequire(header->m_table != RNIL);
Uint32 page_no_in_extent = calc_page_no_in_extent(key->m_page_no, &val);
@@ -1637,7 +1662,7 @@ Tsman::update_page_free_bits(Signal* signal,
Uint32 src = header->get_free_bits(page_no_in_extent) & UNCOMMITTED_MASK;
header->update_free_bits(page_no_in_extent, src | committed_bits);
- m_page_cache_client.update_lsn(preq.m_page, lsn);
+ m_page_cache_client.update_lsn(preq.m_page, 0);
return 0;
}
@@ -1725,6 +1750,11 @@ Tsman::unmap_page(Signal* signal, Local_key *key, Uint32 uncommitted_bits)
File_formats::Datafile::Extent_header* header =
page->get_header(val.m_extent_no, val.m_extent_size);
+ if (header->m_table == RNIL)
+ {
+ ndbout << "trying to unmap page: " << *key
+ << " " << *header << endl;
+ }
ndbrequire(header->m_table != RNIL);
Uint32 page_no_in_extent = calc_page_no_in_extent(key->m_page_no, &val);
@@ -1746,9 +1776,7 @@ Tsman::restart_undo_page_free_bits(Signal* signal,
Uint32 tableId,
Uint32 fragId,
Local_key *key,
- unsigned bits,
- Uint64 undo_lsn,
- Uint64 page_lsn)
+ unsigned bits)
{
jamEntry();
@@ -1782,21 +1810,7 @@ Tsman::restart_undo_page_free_bits(Signal* signal,
(File_formats::Datafile::Extent_page*)ptr_p;
File_formats::Datafile::Extent_header* header =
page->get_header(val.m_extent_no, val.m_extent_size);
-
- Uint64 lsn = 0;
- lsn += page->m_page_header.m_page_lsn_hi; lsn <<= 32;
- lsn += page->m_page_header.m_page_lsn_lo;
-
- if (undo_lsn > lsn && undo_lsn > page_lsn)
- {
- if (DBG_UNDO)
- ndbout << "tsman: ignore " << undo_lsn << "(" << lsn << ", "
- << page_lsn << ") "
- << *key << " "
- << " -> " << bits << endl;
- return 0;
- }
-
+
if (header->m_table == RNIL)
{
if (DBG_UNDO)
@@ -1815,7 +1829,7 @@ Tsman::restart_undo_page_free_bits(Signal* signal,
*/
if (DBG_UNDO)
{
- ndbout << "tsman: apply " << undo_lsn << "(" << lsn << ") "
+ ndbout << "tsman: apply "
<< *key << " " << (src & COMMITTED_MASK)
<< " -> " << bits << endl;
}
@@ -1863,7 +1877,7 @@ Tsman::execALLOC_PAGE_REQ(Signal* signal)
/**
* Handling of unmapped extent header pages is not implemented
*/
- int flags = 0;
+ int flags = Page_cache_client::DIRTY_REQ;
int real_page_id;
Uint32 page_no;
Uint32 src_bits;
diff --git a/storage/ndb/src/kernel/blocks/tsman.hpp b/storage/ndb/src/kernel/blocks/tsman.hpp
index 1293cc54141..20019e6d4d1 100644
--- a/storage/ndb/src/kernel/blocks/tsman.hpp
+++ b/storage/ndb/src/kernel/blocks/tsman.hpp
@@ -209,12 +209,12 @@ private:
void load_extent_page_callback(Signal*, Uint32, Uint32);
void create_file_ref(Signal*, Ptr<Tablespace>, Ptr<Datafile>,
Uint32,Uint32,Uint32);
- int update_page_free_bits(Signal*, Local_key*, unsigned committed_bits,
- Uint64 lsn);
+ int update_page_free_bits(Signal*, Local_key*, unsigned committed_bits);
+
int get_page_free_bits(Signal*, Local_key*, unsigned*, unsigned*);
int unmap_page(Signal*, Local_key*, unsigned uncommitted_bits);
int restart_undo_page_free_bits(Signal*, Uint32, Uint32, Local_key*,
- unsigned committed_bits, Uint64, Uint64);
+ unsigned committed_bits);
int alloc_extent(Signal* signal, Uint32 tablespace, Local_key* key);
int alloc_page_from_extent(Signal*, Uint32, Local_key*, Uint32 bits);
@@ -320,7 +320,7 @@ public:
/**
* Update page free bits
*/
- int update_page_free_bits(Local_key*, unsigned bits, Uint64 lsn);
+ int update_page_free_bits(Local_key*, unsigned bits);
/**
* Get page free bits
@@ -336,8 +336,7 @@ public:
/**
* Undo handling of page bits
*/
- int restart_undo_page_free_bits(Local_key*, unsigned bits,
- Uint64 lsn, Uint64 page_lsn);
+ int restart_undo_page_free_bits(Local_key*, unsigned bits);
/**
* Get tablespace info
@@ -417,10 +416,9 @@ Tablespace_client::free_extent(Local_key* key, Uint64 lsn)
inline
int
Tablespace_client::update_page_free_bits(Local_key *key,
- unsigned committed_bits,
- Uint64 lsn)
+ unsigned committed_bits)
{
- return m_tsman->update_page_free_bits(m_signal, key, committed_bits, lsn);
+ return m_tsman->update_page_free_bits(m_signal, key, committed_bits);
}
inline
@@ -442,17 +440,13 @@ Tablespace_client::unmap_page(Local_key *key, unsigned uncommitted_bits)
inline
int
Tablespace_client::restart_undo_page_free_bits(Local_key* key,
- unsigned committed_bits,
- Uint64 lsn,
- Uint64 page_lsn)
+ unsigned committed_bits)
{
return m_tsman->restart_undo_page_free_bits(m_signal,
m_table_id,
m_fragment_id,
key,
- committed_bits,
- lsn,
- page_lsn);
+ committed_bits);
}
#endif
diff --git a/storage/ndb/src/mgmclient/CommandInterpreter.cpp b/storage/ndb/src/mgmclient/CommandInterpreter.cpp
index 875cc2771ae..9e8910c9649 100644
--- a/storage/ndb/src/mgmclient/CommandInterpreter.cpp
+++ b/storage/ndb/src/mgmclient/CommandInterpreter.cpp
@@ -269,8 +269,8 @@ static const char* helpText =
"CLUSTERLOG TOGGLE [<severity>] ... Toggle severity filter on/off\n"
"CLUSTERLOG INFO Print cluster log information\n"
"<id> START Start data node (started with -n)\n"
-"<id> RESTART [-n] [-i] Restart data or management server node\n"
-"<id> STOP Stop data or management server node\n"
+"<id> RESTART [-n] [-i] [-a] Restart data or management server node\n"
+"<id> STOP [-a] Stop data or management server node\n"
"ENTER SINGLE USER MODE <id> Enter single user mode\n"
"EXIT SINGLE USER MODE Exit single user mode\n"
"<id> STATUS Print status\n"
@@ -434,7 +434,7 @@ static const char* helpTextRestart =
" NDB Cluster -- Management Client -- Help for RESTART command\n"
"---------------------------------------------------------------------------\n"
"RESTART Restart data or management server node\n\n"
-"<id> RESTART [-n] [-i] \n"
+"<id> RESTART [-n] [-i] [-a]\n"
" Restart the data or management node <id>(or All data nodes).\n\n"
" -n (--nostart) restarts the node but does not\n"
" make it join the cluster. Use '<id> START' to\n"
@@ -445,6 +445,7 @@ static const char* helpTextRestart =
" in the same node group during start up.\n\n"
" Consult the documentation before using -i.\n\n"
" INCORRECT USE OF -i WILL CAUSE DATA LOSS!\n"
+" -a Aborts the node, not syncing GCP.\n"
;
static const char* helpTextStop =
@@ -452,10 +453,11 @@ static const char* helpTextStop =
" NDB Cluster -- Management Client -- Help for STOP command\n"
"---------------------------------------------------------------------------\n"
"STOP Stop data or management server node\n\n"
-"<id> STOP Stop the data or management server node <id>.\n\n"
+"<id> STOP [-a] Stop the data or management server node <id>.\n\n"
" ALL STOP will just stop all data nodes.\n\n"
" If you desire to also shut down management servers,\n"
-" use SHUTDOWN instead.\n"
+" use SHUTDOWN instead.\n"
+" -a Aborts the node, not syncing GCP.\n"
;
static const char* helpTextEnterSingleUserMode =
diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp
index af708664a69..ee5bb5103d8 100644
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp
@@ -234,10 +234,10 @@ MgmtSrvr::startEventLog()
}
}
-void
-MgmtSrvr::stopEventLog()
+void
+MgmtSrvr::stopEventLog()
{
- // Nothing yet
+ g_eventLogger.close();
}
bool
diff --git a/storage/ndb/src/ndbapi/NdbBlob.cpp b/storage/ndb/src/ndbapi/NdbBlob.cpp
index 0fc96add084..f3d1dbe3dd1 100644
--- a/storage/ndb/src/ndbapi/NdbBlob.cpp
+++ b/storage/ndb/src/ndbapi/NdbBlob.cpp
@@ -1141,7 +1141,7 @@ NdbBlob::readTableParts(char* buf, Uint32 part, Uint32 count)
* table tuple does not fully protect blob parts since DBTUP
* commits each tuple separately.
*/
- tOp->readTuple() == -1 ||
+ tOp->readTuple(NdbOperation::LM_SimpleRead) == -1 ||
setPartKeyValue(tOp, part + n) == -1 ||
tOp->getValue((Uint32)3, buf) == NULL) {
setErrorCode(tOp);
diff --git a/storage/ndb/src/ndbapi/NdbIndexOperation.cpp b/storage/ndb/src/ndbapi/NdbIndexOperation.cpp
index fc19bd251d4..921769f09e3 100644
--- a/storage/ndb/src/ndbapi/NdbIndexOperation.cpp
+++ b/storage/ndb/src/ndbapi/NdbIndexOperation.cpp
@@ -85,6 +85,9 @@ int NdbIndexOperation::readTuple(NdbOperation::LockMode lm)
case LM_CommittedRead:
return readTuple();
break;
+ case LM_SimpleRead:
+ return readTuple();
+ break;
default:
return -1;
};
diff --git a/storage/ndb/src/ndbapi/NdbOperation.cpp b/storage/ndb/src/ndbapi/NdbOperation.cpp
index 50531292e40..ddaf5d0b233 100644
--- a/storage/ndb/src/ndbapi/NdbOperation.cpp
+++ b/storage/ndb/src/ndbapi/NdbOperation.cpp
@@ -429,3 +429,9 @@ NdbOperation::getTable() const
{
return m_currentTable;
}
+
+NdbTransaction*
+NdbOperation::getNdbTransaction()
+{
+ return theNdbCon;
+}
diff --git a/storage/ndb/src/ndbapi/NdbOperationDefine.cpp b/storage/ndb/src/ndbapi/NdbOperationDefine.cpp
index c9459ff911c..21a6185347e 100644
--- a/storage/ndb/src/ndbapi/NdbOperationDefine.cpp
+++ b/storage/ndb/src/ndbapi/NdbOperationDefine.cpp
@@ -131,6 +131,8 @@ NdbOperation::readTuple(NdbOperation::LockMode lm)
case LM_CommittedRead:
return committedRead();
break;
+ case LM_SimpleRead:
+ return simpleRead();
default:
return -1;
};
@@ -185,24 +187,22 @@ NdbOperation::readTupleExclusive()
int
NdbOperation::simpleRead()
{
- /**
- * Currently/still disabled
- */
- return readTuple();
-#if 0
+ NdbTransaction* tNdbCon = theNdbCon;
int tErrorLine = theErrorLine;
if (theStatus == Init) {
theStatus = OperationDefined;
theOperationType = ReadRequest;
theSimpleIndicator = 1;
+ theDirtyIndicator = 0;
theErrorLine = tErrorLine++;
- theLockMode = LM_Read;
+ theLockMode = LM_SimpleRead;
+ m_abortOption = AO_IgnoreError;
+ tNdbCon->theSimpleState = 0;
return 0;
} else {
setErrorCode(4200);
return -1;
}//if
-#endif
}//NdbOperation::simpleRead()
/*****************************************************************************
@@ -338,28 +338,32 @@ NdbOperation::setReadLockMode(LockMode lockMode)
{
/* We only support changing lock mode for read operations at this time. */
assert(theOperationType == ReadRequest || theOperationType == ReadExclusive);
- switch (lockMode)
- {
- case LM_CommittedRead:
- theOperationType= ReadRequest;
- theSimpleIndicator= 1;
- theDirtyIndicator= 1;
- break;
- case LM_Read:
- theNdbCon->theSimpleState= 0;
- theOperationType= ReadRequest;
- theSimpleIndicator= 0;
- theDirtyIndicator= 0;
- break;
- case LM_Exclusive:
- theNdbCon->theSimpleState= 0;
- theOperationType= ReadExclusive;
- theSimpleIndicator= 0;
- theDirtyIndicator= 0;
- break;
- default:
- /* Not supported / invalid. */
- assert(false);
+ switch (lockMode) {
+ case LM_CommittedRead: /* TODO, check theNdbCon->theSimpleState */
+ theOperationType= ReadRequest;
+ theSimpleIndicator= 1;
+ theDirtyIndicator= 1;
+ break;
+ case LM_SimpleRead: /* TODO, check theNdbCon->theSimpleState */
+ theOperationType= ReadRequest;
+ theSimpleIndicator= 1;
+ theDirtyIndicator= 0;
+ break;
+ case LM_Read:
+ theNdbCon->theSimpleState= 0;
+ theOperationType= ReadRequest;
+ theSimpleIndicator= 0;
+ theDirtyIndicator= 0;
+ break;
+ case LM_Exclusive:
+ theNdbCon->theSimpleState= 0;
+ theOperationType= ReadExclusive;
+ theSimpleIndicator= 0;
+ theDirtyIndicator= 0;
+ break;
+ default:
+ /* Not supported / invalid. */
+ assert(false);
}
theLockMode= lockMode;
}
@@ -404,9 +408,8 @@ NdbOperation::getValue_impl(const NdbColumnImpl* tAttrInfo, char* aValue)
return NULL;
}//if
}//if
- Uint32 ah;
- AttributeHeader::init(&ah, tAttrInfo->m_attrId, 0);
- if (insertATTRINFO(ah) != -1) {
+ AttributeHeader ah(tAttrInfo->m_attrId, 0);
+ if (insertATTRINFO(ah.m_value) != -1) {
// Insert Attribute Id into ATTRINFO part.
/************************************************************************
@@ -532,12 +535,11 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo,
tAttrId = tAttrInfo->m_attrId;
m_no_disk_flag &= (tAttrInfo->m_storageType == NDB_STORAGETYPE_DISK ? 0:1);
const char *aValue = aValuePassed;
- Uint32 ahValue;
if (aValue == NULL) {
if (tAttrInfo->m_nullable) {
- AttributeHeader& ah = AttributeHeader::init(&ahValue, tAttrId, 0);
+ AttributeHeader ah(tAttrId, 0);
ah.setNULL();
- insertATTRINFO(ahValue);
+ insertATTRINFO(ah.m_value);
// Insert Attribute Id with the value
// NULL into ATTRINFO part.
DBUG_RETURN(0);
@@ -573,8 +575,8 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo,
// Excluding bits in last word
const Uint32 sizeInWords = sizeInBytes / 4;
- (void) AttributeHeader::init(&ahValue, tAttrId, sizeInBytes);
- insertATTRINFO( ahValue );
+ AttributeHeader ah(tAttrId, sizeInBytes);
+ insertATTRINFO( ah.m_value );
/***********************************************************************
* Check if the pointer of the value passed is aligned on a 4 byte boundary.
diff --git a/storage/ndb/src/ndbapi/NdbOperationExec.cpp b/storage/ndb/src/ndbapi/NdbOperationExec.cpp
index 9fe85265a0c..27672e0458c 100644
--- a/storage/ndb/src/ndbapi/NdbOperationExec.cpp
+++ b/storage/ndb/src/ndbapi/NdbOperationExec.cpp
@@ -175,12 +175,11 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr,
Uint8 tInterpretIndicator = theInterpretIndicator;
Uint8 tNoDisk = m_no_disk_flag;
-//-------------------------------------------------------------
-// Simple state is set if start and commit is set and it is
-// a read request. Otherwise it is set to zero.
-//-------------------------------------------------------------
+ /**
+ * A dirty read, can not abort the transaction
+ */
Uint8 tReadInd = (theOperationType == ReadRequest);
- Uint8 tSimpleState = tReadInd & tSimpleIndicator;
+ Uint8 tDirtyState = tReadInd & tDirtyIndicator;
tcKeyReq->transId1 = tTransId1;
tcKeyReq->transId2 = tTransId2;
@@ -206,8 +205,8 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr,
tcKeyReq->setOperationType(tReqInfo, tOperationType);
tcKeyReq->setKeyLength(tReqInfo, tTupKeyLen);
- // A simple read is always ignore error
- abortOption = tSimpleState ? (Uint8) AO_IgnoreError : (Uint8) abortOption;
+ // A dirty read is always ignore error
+ abortOption = tDirtyState ? (Uint8) AO_IgnoreError : (Uint8) abortOption;
tcKeyReq->setAbortOption(tReqInfo, abortOption);
m_abortOption = abortOption;
@@ -549,8 +548,8 @@ NdbOperation::receiveTCKEYREF( NdbApiSignal* aSignal)
theStatus = Finished;
theReceiver.m_received_result_length = ~0;
- // not simple read
- if(! (theOperationType == ReadRequest && theSimpleIndicator))
+ // not dirty read
+ if(! (theOperationType == ReadRequest && theDirtyIndicator))
{
theNdbCon->OpCompleteFailure(this);
return -1;
diff --git a/storage/ndb/src/ndbapi/NdbReceiver.cpp b/storage/ndb/src/ndbapi/NdbReceiver.cpp
index 34b3a14ac6e..5a311bcbefe 100644
--- a/storage/ndb/src/ndbapi/NdbReceiver.cpp
+++ b/storage/ndb/src/ndbapi/NdbReceiver.cpp
@@ -283,7 +283,7 @@ NdbReceiver::execTRANSID_AI(const Uint32* aDataPtr, Uint32 aLength)
Uint32 tmp = m_received_result_length + aLength;
m_received_result_length = tmp;
- return (tmp == exp || (exp > TcKeyConf::SimpleReadBit) ? 1 : 0);
+ return (tmp == exp || (exp > TcKeyConf::DirtyReadBit) ? 1 : 0);
}
int
diff --git a/storage/ndb/src/ndbapi/NdbScanFilter.cpp b/storage/ndb/src/ndbapi/NdbScanFilter.cpp
index fb47772fdea..25f74ce71a4 100644
--- a/storage/ndb/src/ndbapi/NdbScanFilter.cpp
+++ b/storage/ndb/src/ndbapi/NdbScanFilter.cpp
@@ -14,11 +14,15 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <NdbScanFilter.hpp>
+#include <Ndb.hpp>
#include <NdbOperation.hpp>
#include "NdbDictionaryImpl.hpp"
#include <Vector.hpp>
#include <NdbOut.hpp>
#include <Interpreter.hpp>
+#include <signaldata/AttrInfo.hpp>
+#include "NdbApiSignal.hpp"
+#include "NdbUtil.hpp"
#ifdef VM_TRACE
#include <NdbEnv.h>
@@ -52,14 +56,37 @@ public:
int cond_col_const(Interpreter::BinaryCondition, Uint32 attrId,
const void * value, Uint32 len);
+
+ bool m_abort_on_too_large;
+
+ NdbOperation::OperationStatus m_initial_op_status;
+ Uint32 m_initial_AI_size;
+ Uint32 m_max_size;
+
+ Uint32 get_size() {
+ assert(m_operation->theTotalCurrAI_Len >= m_initial_AI_size);
+ return m_operation->theTotalCurrAI_Len - m_initial_AI_size;
+ }
+ bool check_size() {
+ if (get_size() <= m_max_size)
+ return true;
+ handle_filter_too_large();
+ return false;
+ }
+ void handle_filter_too_large();
+
+ NdbError m_error;
};
const Uint32 LabelExit = ~0;
-NdbScanFilter::NdbScanFilter(class NdbOperation * op)
+NdbScanFilter::NdbScanFilter(class NdbOperation * op,
+ bool abort_on_too_large,
+ Uint32 max_size)
: m_impl(* new NdbScanFilterImpl())
{
+ DBUG_ENTER("NdbScanFilter::NdbScanFilter");
m_impl.m_current.m_group = (NdbScanFilter::Group)0;
m_impl.m_current.m_popCount = 0;
m_impl.m_current.m_ownLabel = 0;
@@ -69,6 +96,21 @@ NdbScanFilter::NdbScanFilter(class NdbOperation * op)
m_impl.m_latestAttrib = ~0;
m_impl.m_operation = op;
m_impl.m_negative = 0;
+
+ DBUG_PRINT("info", ("op status: %d tot AI: %u in curr: %u",
+ op->theStatus,
+ op->theTotalCurrAI_Len, op->theAI_LenInCurrAI));
+
+ m_impl.m_abort_on_too_large = abort_on_too_large;
+
+ m_impl.m_initial_op_status = op->theStatus;
+ m_impl.m_initial_AI_size = op->theTotalCurrAI_Len;
+ if (max_size > NDB_MAX_SCANFILTER_SIZE_IN_WORDS)
+ max_size = NDB_MAX_SCANFILTER_SIZE_IN_WORDS;
+ m_impl.m_max_size = max_size;
+
+ m_impl.m_error.code = 0;
+ DBUG_VOID_RETURN;
}
NdbScanFilter::~NdbScanFilter(){
@@ -200,30 +242,38 @@ NdbScanFilter::end(){
switch(tmp.m_group){
case NdbScanFilter::AND:
if(tmp.m_trueLabel == (Uint32)~0){
- m_impl.m_operation->interpret_exit_ok();
+ if (m_impl.m_operation->interpret_exit_ok() == -1)
+ return -1;
} else {
- m_impl.m_operation->branch_label(tmp.m_trueLabel);
+ if (m_impl.m_operation->branch_label(tmp.m_trueLabel) == -1)
+ return -1;
}
break;
case NdbScanFilter::NAND:
if(tmp.m_trueLabel == (Uint32)~0){
- m_impl.m_operation->interpret_exit_nok();
+ if (m_impl.m_operation->interpret_exit_nok() == -1)
+ return -1;
} else {
- m_impl.m_operation->branch_label(tmp.m_falseLabel);
+ if (m_impl.m_operation->branch_label(tmp.m_falseLabel) == -1)
+ return -1;
}
break;
case NdbScanFilter::OR:
if(tmp.m_falseLabel == (Uint32)~0){
- m_impl.m_operation->interpret_exit_nok();
+ if (m_impl.m_operation->interpret_exit_nok() == -1)
+ return -1;
} else {
- m_impl.m_operation->branch_label(tmp.m_falseLabel);
+ if (m_impl.m_operation->branch_label(tmp.m_falseLabel) == -1)
+ return -1;
}
break;
case NdbScanFilter::NOR:
if(tmp.m_falseLabel == (Uint32)~0){
- m_impl.m_operation->interpret_exit_ok();
+ if (m_impl.m_operation->interpret_exit_ok() == -1)
+ return -1;
} else {
- m_impl.m_operation->branch_label(tmp.m_trueLabel);
+ if (m_impl.m_operation->branch_label(tmp.m_trueLabel) == -1)
+ return -1;
}
break;
default:
@@ -231,24 +281,29 @@ NdbScanFilter::end(){
return -1;
}
- m_impl.m_operation->def_label(tmp.m_ownLabel);
+ if (m_impl.m_operation->def_label(tmp.m_ownLabel) == -1)
+ return -1;
if(m_impl.m_stack.size() == 0){
switch(tmp.m_group){
case NdbScanFilter::AND:
case NdbScanFilter::NOR:
- m_impl.m_operation->interpret_exit_nok();
+ if (m_impl.m_operation->interpret_exit_nok() == -1)
+ return -1;
break;
case NdbScanFilter::OR:
case NdbScanFilter::NAND:
- m_impl.m_operation->interpret_exit_ok();
+ if (m_impl.m_operation->interpret_exit_ok() == -1)
+ return -1;
break;
default:
m_impl.m_operation->setErrorCodeAbort(4260);
return -1;
}
}
-
+
+ if (!m_impl.check_size())
+ return -1;
return 0;
}
@@ -261,10 +316,16 @@ NdbScanFilter::istrue(){
}
if(m_impl.m_current.m_trueLabel == (Uint32)~0){
- return m_impl.m_operation->interpret_exit_ok();
+ if (m_impl.m_operation->interpret_exit_ok() == -1)
+ return -1;
} else {
- return m_impl.m_operation->branch_label(m_impl.m_current.m_trueLabel);
+ if (m_impl.m_operation->branch_label(m_impl.m_current.m_trueLabel) == -1)
+ return -1;
}
+
+ if (!m_impl.check_size())
+ return -1;
+ return 0;
}
int
@@ -276,12 +337,22 @@ NdbScanFilter::isfalse(){
}
if(m_impl.m_current.m_falseLabel == (Uint32)~0){
- return m_impl.m_operation->interpret_exit_nok();
+ if (m_impl.m_operation->interpret_exit_nok() == -1)
+ return -1;
} else {
- return m_impl.m_operation->branch_label(m_impl.m_current.m_falseLabel);
+ if (m_impl.m_operation->branch_label(m_impl.m_current.m_falseLabel) == -1)
+ return -1;
}
+
+ if (!m_impl.check_size())
+ return -1;
+ return 0;
}
+NdbOperation *
+NdbScanFilter::getNdbOperation(){
+ return m_impl.m_operation;
+}
#define action(x, y, z)
@@ -330,7 +401,11 @@ NdbScanFilterImpl::cond_col(Interpreter::UnaryCondition op, Uint32 AttrId){
}
Branch1 branch = table2[op].m_branches[m_current.m_group];
- (m_operation->* branch)(AttrId, m_current.m_ownLabel);
+ if ((m_operation->* branch)(AttrId, m_current.m_ownLabel) == -1)
+ return -1;
+
+ if (!check_size())
+ return -1;
return 0;
}
@@ -463,8 +538,12 @@ NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition op,
return -1;
}
- int ret = (m_operation->* branch)(AttrId, value, len, false, m_current.m_ownLabel);
- return ret;
+ if ((m_operation->* branch)(AttrId, value, len, false, m_current.m_ownLabel) == -1)
+ return -1;
+
+ if (!check_size())
+ return -1;
+ return 0;
}
int
@@ -490,7 +569,130 @@ NdbScanFilter::cmp(BinaryCondition cond, int ColId,
return m_impl.cond_col_const(Interpreter::NOT_LIKE, ColId, val, len);
}
return -1;
-}
+}
+
+void
+NdbScanFilterImpl::handle_filter_too_large()
+{
+ DBUG_ENTER("NdbScanFilterImpl::handle_filter_too_large");
+
+ NdbOperation* const op = m_operation;
+ m_error.code = NdbScanFilter::FilterTooLarge;
+ if (m_abort_on_too_large)
+ op->setErrorCodeAbort(m_error.code);
+
+ /*
+ * Possible interpreted parts at this point are:
+ *
+ * 1. initial read
+ * 2. interpreted program
+ *
+ * It is assumed that NdbScanFilter has created all of 2
+ * so that we don't have to save interpreter state.
+ */
+
+ const Uint32 size = get_size();
+ assert(size != 0);
+
+ // new ATTRINFO size
+ const Uint32 new_size = m_initial_AI_size;
+
+ // find last signal for new size
+ assert(op->theFirstATTRINFO != NULL);
+ NdbApiSignal* lastSignal = op->theFirstATTRINFO;
+ Uint32 n = 0;
+ while (n + AttrInfo::DataLength < new_size) {
+ lastSignal = lastSignal->next();
+ assert(lastSignal != NULL);
+ n += AttrInfo::DataLength;
+ }
+ assert(n < size);
+
+ // release remaining signals
+ NdbApiSignal* tSignal = lastSignal->next();
+ op->theNdb->releaseSignalsInList(&tSignal);
+ lastSignal->next(NULL);
+
+ // length of lastSignal
+ const Uint32 new_curr = AttrInfo::HeaderLength + new_size - n;
+ assert(new_curr <= 25);
+
+ DBUG_PRINT("info", ("op status: %d->%d tot AI: %u->%u in curr: %u->%u",
+ op->theStatus, m_initial_op_status,
+ op->theTotalCurrAI_Len, new_size,
+ op->theAI_LenInCurrAI, new_curr));
+
+ // reset op state
+ op->theStatus = m_initial_op_status;
+
+ // reset interpreter state to initial
+
+ NdbBranch* tBranch = op->theFirstBranch;
+ while (tBranch != NULL) {
+ NdbBranch* tmp = tBranch;
+ tBranch = tBranch->theNext;
+ op->theNdb->releaseNdbBranch(tmp);
+ }
+ op->theFirstBranch = NULL;
+ op->theLastBranch = NULL;
+
+ NdbLabel* tLabel = op->theFirstLabel;
+ while (tLabel != NULL) {
+ NdbLabel* tmp = tLabel;
+ tLabel = tLabel->theNext;
+ op->theNdb->releaseNdbLabel(tmp);
+ }
+ op->theFirstLabel = NULL;
+ op->theLastLabel = NULL;
+
+ NdbCall* tCall = op->theFirstCall;
+ while (tCall != NULL) {
+ NdbCall* tmp = tCall;
+ tCall = tCall->theNext;
+ op->theNdb->releaseNdbCall(tmp);
+ }
+ op->theFirstCall = NULL;
+ op->theLastCall = NULL;
+
+ NdbSubroutine* tSubroutine = op->theFirstSubroutine;
+ while (tSubroutine != NULL) {
+ NdbSubroutine* tmp = tSubroutine;
+ tSubroutine = tSubroutine->theNext;
+ op->theNdb->releaseNdbSubroutine(tmp);
+ }
+ op->theFirstSubroutine = NULL;
+ op->theLastSubroutine = NULL;
+
+ op->theNoOfLabels = 0;
+ op->theNoOfSubroutines = 0;
+
+ // reset AI size
+ op->theTotalCurrAI_Len = new_size;
+ op->theAI_LenInCurrAI = new_curr;
+
+ // reset signal pointers
+ op->theCurrentATTRINFO = lastSignal;
+ op->theATTRINFOptr = &lastSignal->getDataPtrSend()[new_curr];
+
+ // interpreter sizes are set later somewhere
+
+ DBUG_VOID_RETURN;
+}
+
+static void
+update(const NdbError & _err){
+ NdbError & error = (NdbError &) _err;
+ ndberror_struct ndberror = (ndberror_struct)error;
+ ndberror_update(&ndberror);
+ error = NdbError(ndberror);
+}
+
+const NdbError &
+NdbScanFilter::getNdbError() const
+{
+ update(m_impl.m_error);
+ return m_impl.m_error;
+}
#if 0
diff --git a/storage/ndb/src/ndbapi/NdbScanOperation.cpp b/storage/ndb/src/ndbapi/NdbScanOperation.cpp
index 89782453a72..afbec070ac8 100644
--- a/storage/ndb/src/ndbapi/NdbScanOperation.cpp
+++ b/storage/ndb/src/ndbapi/NdbScanOperation.cpp
@@ -237,6 +237,7 @@ NdbScanOperation::setReadLockMode(LockMode lockMode)
lockHoldMode= false;
readCommitted= true;
break;
+ case LM_SimpleRead:
case LM_Read:
lockExcl= false;
lockHoldMode= true;
@@ -872,6 +873,10 @@ NdbScanOperation::doSendScan(int aProcessorId)
// sending it. This could not be done in openScan because
// we created the ATTRINFO signals after the SCAN_TABREQ signal.
ScanTabReq * const req = CAST_PTR(ScanTabReq, tSignal->getDataPtrSend());
+ if (unlikely(theTotalCurrAI_Len > ScanTabReq::MaxTotalAttrInfo)) {
+ setErrorCode(4257);
+ return -1;
+ }
req->attrLenKeyLen = (tupKeyLen << 16) | theTotalCurrAI_Len;
Uint32 tmp = req->requestInfo;
ScanTabReq::setDistributionKeyFlag(tmp, theDistrKeyIndicator_);
diff --git a/storage/ndb/src/ndbapi/NdbTransaction.cpp b/storage/ndb/src/ndbapi/NdbTransaction.cpp
index 55c6f0f4b99..bc59df722aa 100644
--- a/storage/ndb/src/ndbapi/NdbTransaction.cpp
+++ b/storage/ndb/src/ndbapi/NdbTransaction.cpp
@@ -1787,8 +1787,8 @@ from other transactions.
const Uint32 tAttrInfoLen = *tPtr++;
if (tOp && tOp->checkMagicNumber()) {
Uint32 done = tOp->execTCOPCONF(tAttrInfoLen);
- if(tAttrInfoLen > TcKeyConf::SimpleReadBit){
- Uint32 node = tAttrInfoLen & (~TcKeyConf::SimpleReadBit);
+ if(tAttrInfoLen > TcKeyConf::DirtyReadBit){
+ Uint32 node = tAttrInfoLen & (~TcKeyConf::DirtyReadBit);
NdbNodeBitmask::set(m_db_nodes, node);
if(NdbNodeBitmask::get(m_failed_db_nodes, node) && !done)
{
@@ -2182,7 +2182,7 @@ NdbTransaction::report_node_failure(Uint32 id){
* 4) X X
*/
NdbOperation* tmp = theFirstExecOpInList;
- const Uint32 len = TcKeyConf::SimpleReadBit | id;
+ const Uint32 len = TcKeyConf::DirtyReadBit | id;
Uint32 tNoComp = theNoOfOpCompleted;
Uint32 tNoSent = theNoOfOpSent;
Uint32 count = 0;
diff --git a/storage/ndb/src/ndbapi/ndberror.c b/storage/ndb/src/ndbapi/ndberror.c
index a0417e5b118..0ad2faff76a 100644
--- a/storage/ndb/src/ndbapi/ndberror.c
+++ b/storage/ndb/src/ndbapi/ndberror.c
@@ -624,6 +624,7 @@ ErrorBundle ErrorCodes[] = {
{ 4273, DMEC, IE, "No blob table in dict cache" },
{ 4274, DMEC, IE, "Corrupted main table PK in blob operation" },
{ 4275, DMEC, AE, "The blob method is incompatible with operation type or lock mode" },
+ { 4294, DMEC, AE, "Scan filter is too large, discarded" },
{ NO_CONTACT_WITH_PROCESS, DMEC, AE,
"No contact with the process (dead ?)."},
diff --git a/storage/ndb/test/include/HugoTransactions.hpp b/storage/ndb/test/include/HugoTransactions.hpp
index e2b12f261a8..e8f7b33e0ed 100644
--- a/storage/ndb/test/include/HugoTransactions.hpp
+++ b/storage/ndb/test/include/HugoTransactions.hpp
@@ -36,6 +36,16 @@ public:
int updateValue = 0,
bool abort = false);
+ int loadTableStartFrom(Ndb*,
+ int startFrom,
+ int records,
+ int batch = 512,
+ bool allowConstraintViolation = true,
+ int doSleep = 0,
+ bool oneTrans = false,
+ int updateValue = 0,
+ bool abort = false);
+
int scanReadRecords(Ndb*,
int records,
int abort = 0,
@@ -56,6 +66,11 @@ public:
int batchsize = 1,
NdbOperation::LockMode = NdbOperation::LM_Read);
+ int scanUpdateRecords(Ndb*, NdbScanOperation::ScanFlag,
+ int records,
+ int abort = 0,
+ int parallelism = 0);
+
int scanUpdateRecords(Ndb*,
int records,
int abort = 0,
@@ -90,9 +105,12 @@ public:
int records,
int percentToLock = 1,
int lockTime = 1000);
+
int fillTable(Ndb*,
int batch=512);
+ int fillTableStartFrom(Ndb*, int startFrom, int batch=512);
+
/**
* Reading using UniqHashIndex with key = pk
*/
diff --git a/storage/ndb/test/include/UtilTransactions.hpp b/storage/ndb/test/include/UtilTransactions.hpp
index 75bbcd9c776..193398c3da2 100644
--- a/storage/ndb/test/include/UtilTransactions.hpp
+++ b/storage/ndb/test/include/UtilTransactions.hpp
@@ -30,6 +30,11 @@ public:
int closeTransaction(Ndb*);
int clearTable(Ndb*,
+ NdbScanOperation::ScanFlag,
+ int records = 0,
+ int parallelism = 0);
+
+ int clearTable(Ndb*,
int records = 0,
int parallelism = 0);
diff --git a/storage/ndb/test/include/dbutil.hpp b/storage/ndb/test/include/dbutil.hpp
new file mode 100755
index 00000000000..2b36965715f
--- /dev/null
+++ b/storage/ndb/test/include/dbutil.hpp
@@ -0,0 +1,97 @@
+// dbutil.h: interface for the database utilities class.
+//////////////////////////////////////////////////////////////////////
+// Supplies a database to the test application
+//////////////////////////////////////////////////////////////////////
+
+#ifndef DBUTIL_HPP
+#define DBUTIL_HPP
+
+#if _MSC_VER > 1000
+#pragma once
+#endif // _MSC_VER > 1000
+#include <time.h>
+#include <stdio.h>
+#include <string.h>
+#include <mysql.h>
+//include "rand.h"
+#include <stdlib.h>
+
+//#define DEBUG
+#define DIE_UNLESS(expr) \
+ ((void) ((expr) ? 0 : (Die(__FILE__, __LINE__, #expr), 0)))
+#define DIE(expr) \
+ Die(__FILE__, __LINE__, #expr)
+#define myerror(msg) PrintError(msg)
+#define mysterror(stmt, msg) PrintStError(stmt, msg)
+#define CheckStmt(stmt) \
+{ \
+if ( stmt == 0) \
+ myerror(NULL); \
+DIE_UNLESS(stmt != 0); \
+}
+
+#define check_execute(stmt, r) \
+{ \
+if (r) \
+ mysterror(stmt, NULL); \
+DIE_UNLESS(r == 0);\
+}
+
+#define TRUE 1
+#define FALSE 0
+
+
+class dbutil
+{
+public:
+
+ dbutil(const char * databaseName);
+ ~dbutil();
+
+ void DatabaseLogin(const char * system,
+ const char * usr,
+ const char * password,
+ unsigned int portIn,
+ const char * sockIn,
+ bool transactional);
+ char * GetDbName(){return dbs;};
+ char * GetUser(){return user;};
+ char * GetPassword(){return pass;};
+ char * GetHost(){return host;};
+ char * GetSocket(){return socket;};
+ const char * GetServerType(){return mysql_get_server_info(myDbHandel);};
+ MYSQL* GetDbHandel(){return myDbHandel;};
+ MYSQL_STMT *STDCALL MysqlSimplePrepare(const char *query);
+ int Select_DB();
+ int Do_Query(char * stm);
+ const char * GetError();
+ int GetErrorNumber();
+ unsigned long SelectCountTable(const char * table);
+
+private:
+
+ //Connect variables
+ char * databaseName; //hold results file name
+ char host[256]; // Computer to connect to
+ char user[256]; // MySQL User
+ char pass[256]; // MySQL User Password
+ char dbs[256]; // Database to use (TPCB)
+ unsigned int port; // MySQL Server port
+ char socket[256]; // MySQL Server Unix Socket
+ MYSQL *myDbHandel;
+
+ void DatabaseLogout();
+
+ void SetDbName(const char * name){strcpy((char *)dbs, name);};
+ void SetUser(const char * userName){strcpy((char *)user, userName);};
+ void SetPassword(const char * password){strcpy((char *)pass,password);};
+ void SetHost(const char * system){strcpy((char*)host, system);};
+ void SetPort(unsigned int portIn){port=portIn;};
+ void SetSocket(const char * sockIn){strcpy((char *)socket, sockIn);};
+ void PrintError(const char *msg);
+ void PrintStError(MYSQL_STMT *stmt, const char *msg);
+ void Die(const char *file, int line, const char *expr); // stop program
+
+};
+#endif
+
diff --git a/storage/ndb/test/ndbapi/testBasic.cpp b/storage/ndb/test/ndbapi/testBasic.cpp
index 952b5a50dc5..ac23ceaad18 100644
--- a/storage/ndb/test/ndbapi/testBasic.cpp
+++ b/storage/ndb/test/ndbapi/testBasic.cpp
@@ -136,31 +136,13 @@ int runPkRead(NDBT_Context* ctx, NDBT_Step* step){
int loops = ctx->getNumLoops();
int records = ctx->getNumRecords();
int batchSize = ctx->getProperty("BatchSize", 1);
+ int lm = ctx->getProperty("LockMode", NdbOperation::LM_Read);
int i = 0;
HugoTransactions hugoTrans(*ctx->getTab());
while (i<loops) {
g_info << i << ": ";
- if (hugoTrans.pkReadRecords(GETNDB(step), records, batchSize) != NDBT_OK){
- g_info << endl;
- return NDBT_FAILED;
- }
- i++;
- }
- g_info << endl;
- return NDBT_OK;
-}
-
-int runPkDirtyRead(NDBT_Context* ctx, NDBT_Step* step){
- int loops = ctx->getNumLoops();
- int records = ctx->getNumRecords();
- int batchSize = ctx->getProperty("BatchSize", 1);
- int i = 0;
- bool dirty = true;
- HugoTransactions hugoTrans(*ctx->getTab());
- while (i<loops) {
- g_info << i << ": ";
- if (hugoTrans.pkReadRecords(GETNDB(step), records, batchSize,
- NdbOperation::LM_CommittedRead) != NDBT_OK){
+ if (hugoTrans.pkReadRecords(GETNDB(step), records, batchSize,
+ (NdbOperation::LockMode)lm) != NDBT_OK){
g_info << endl;
return NDBT_FAILED;
}
@@ -1552,14 +1534,23 @@ TESTCASE("PkInsert",
}
TESTCASE("PkRead",
"Verify that we can insert, read and delete from this table using PK"){
+ TC_PROPERTY("LockMode", NdbOperation::LM_Read);
INITIALIZER(runLoadTable);
STEP(runPkRead);
FINALIZER(runClearTable);
}
TESTCASE("PkDirtyRead",
"Verify that we can insert, dirty read and delete from this table using PK"){
+ TC_PROPERTY("LockMode", NdbOperation::LM_Dirty);
INITIALIZER(runLoadTable);
- STEP(runPkDirtyRead);
+ STEP(runPkRead);
+ FINALIZER(runClearTable);
+}
+TESTCASE("PkSimpleRead",
+ "Verify that we can insert, simple read and delete from this table using PK"){
+ TC_PROPERTY("LockMode", NdbOperation::LM_SimpleRead);
+ INITIALIZER(runLoadTable);
+ STEP(runPkRead);
FINALIZER(runClearTable);
}
TESTCASE("PkUpdate",
diff --git a/storage/ndb/test/ndbapi/testDict.cpp b/storage/ndb/test/ndbapi/testDict.cpp
index 13c071f968e..16b6e129605 100644
--- a/storage/ndb/test/ndbapi/testDict.cpp
+++ b/storage/ndb/test/ndbapi/testDict.cpp
@@ -684,7 +684,7 @@ int runTestFragmentTypes(NDBT_Context* ctx, NDBT_Step* step){
CHECK(utilTrans.selectCount(pNdb, 64, &count) == 0);
CHECK(count == records);
CHECK(hugoTrans.pkDelRecords(pNdb, records/2) == 0);
- CHECK(hugoTrans.scanUpdateRecords(pNdb, records) == 0);
+ CHECK(hugoTrans.scanUpdateRecords(pNdb, records/2) == 0);
CHECK(utilTrans.selectCount(pNdb, 64, &count) == 0);
CHECK(count == (records/2));
@@ -862,7 +862,7 @@ int runPkSizes(NDBT_Context* ctx, NDBT_Step* step){
CHECK(utilTrans.selectCount(pNdb, 64, &count) == 0);
CHECK(count == records);
CHECK(hugoTrans.pkDelRecords(pNdb, records/2) == 0);
- CHECK(hugoTrans.scanUpdateRecords(pNdb, records) == 0);
+ CHECK(hugoTrans.scanUpdateRecords(pNdb, records/2) == 0);
CHECK(utilTrans.selectCount(pNdb, 64, &count) == 0);
CHECK(count == (records/2));
CHECK(utilTrans.clearTable(pNdb, records) == 0);
@@ -2706,7 +2706,262 @@ runDictRestart(NDBT_Context* ctx, NDBT_Step* step)
return NDBT_OK;
}
+int
+runBug29501(NDBT_Context* ctx, NDBT_Step* step) {
+ NdbRestarter res;
+ NdbDictionary::LogfileGroup lg;
+ lg.setName("DEFAULT-LG");
+ lg.setUndoBufferSize(8*1024*1024);
+
+ if (res.getNumDbNodes() < 2)
+ return NDBT_OK;
+
+ Ndb* pNdb = GETNDB(step);
+ NdbDictionary::Dictionary* pDict = pNdb->getDictionary();
+
+ int node = res.getRandomNotMasterNodeId(rand());
+ res.restartOneDbNode(node, true, true, false);
+
+ if(pDict->createLogfileGroup(lg) != 0){
+ g_err << "Failed to create logfilegroup:"
+ << endl << pDict->getNdbError() << endl;
+ return NDBT_FAILED;
+ }
+
+ NdbDictionary::Undofile uf;
+ uf.setPath("undofile01.dat");
+ uf.setSize(5*1024*1024);
+ uf.setLogfileGroup("DEFAULT-LG");
+
+ if(pDict->createUndofile(uf) != 0){
+ g_err << "Failed to create undofile:"
+ << endl << pDict->getNdbError() << endl;
+ return NDBT_FAILED;
+ }
+
+ res.waitNodesNoStart(&node, 1);
+ res.startNodes(&node, 1);
+
+ if (res.waitClusterStarted()){
+ g_err << "Node restart failed"
+ << endl << pDict->getNdbError() << endl;
+ return NDBT_FAILED;
+ }
+
+ if (pDict->dropLogfileGroup(pDict->getLogfileGroup(lg.getName())) != 0){
+ g_err << "Drop of LFG Failed"
+ << endl << pDict->getNdbError() << endl;
+ return NDBT_FAILED;
+ }
+
+ return NDBT_OK;
+}
+
+int
+runDropDDObjects(NDBT_Context* ctx, NDBT_Step* step){
+ //Purpose is to drop all tables, data files, Table spaces and LFG's
+ Uint32 i = 0;
+
+ Ndb* pNdb = GETNDB(step);
+ NdbDictionary::Dictionary* pDict = pNdb->getDictionary();
+
+ NdbDictionary::Dictionary::List list;
+ if (pDict->listObjects(list) == -1)
+ return NDBT_FAILED;
+
+ //Search the list and drop all tables found
+ const char * tableFound = 0;
+ for (i = 0; i < list.count; i++){
+ switch(list.elements[i].type){
+ case NdbDictionary::Object::UserTable:
+ tableFound = list.elements[i].name;
+ if(tableFound != 0){
+ if(pDict->dropTable(tableFound) != 0){
+ g_err << "Failed to drop table: " << pDict->getNdbError() << endl;
+ return NDBT_FAILED;
+ }
+ }
+ tableFound = 0;
+ break;
+ default:
+ break;
+ }
+ }
+
+ //Search the list and drop all data file found
+ const char * dfFound = 0;
+ for (i = 0; i < list.count; i++){
+ switch(list.elements[i].type){
+ case NdbDictionary::Object::Datafile:
+ dfFound = list.elements[i].name;
+ if(dfFound != 0){
+ if(pDict->dropDatafile(pDict->getDatafile(0, dfFound)) != 0){
+ g_err << "Failed to drop datafile: " << pDict->getNdbError() << endl;
+ return NDBT_FAILED;
+ }
+ }
+ dfFound = 0;
+ break;
+ default:
+ break;
+ }
+ }
+
+ //Search the list and drop all Table Spaces Found
+ const char * tsFound = 0;
+ for (i = 0; i <list.count; i++){
+ switch(list.elements[i].type){
+ case NdbDictionary::Object::Tablespace:
+ tsFound = list.elements[i].name;
+ if(tsFound != 0){
+ if(pDict->dropTablespace(pDict->getTablespace(tsFound)) != 0){
+ g_err << "Failed to drop tablespace: " << pDict->getNdbError() << endl;
+ return NDBT_FAILED;
+ }
+ }
+ tsFound = 0;
+ break;
+ default:
+ break;
+ }
+ }
+
+ //Search the list and drop all LFG Found
+ //Currently only 1 LGF is supported, but written for future
+ //when more then one is supported.
+ const char * lgFound = 0;
+ for (i = 0; i < list.count; i++){
+ switch(list.elements[i].type){
+ case NdbDictionary::Object::LogfileGroup:
+ lgFound = list.elements[i].name;
+ if(lgFound != 0){
+ if (pDict->dropLogfileGroup(pDict->getLogfileGroup(lgFound)) != 0){
+ g_err << "Failed to drop tablespace: " << pDict->getNdbError() << endl;
+ return NDBT_FAILED;
+ }
+ }
+ lgFound = 0;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return NDBT_OK;
+}
+
+int
+runWaitStarted(NDBT_Context* ctx, NDBT_Step* step){
+
+ NdbRestarter restarter;
+ restarter.waitClusterStarted(300);
+
+ NdbSleep_SecSleep(3);
+ return NDBT_OK;
+}
+
+int
+testDropDDObjectsSetup(NDBT_Context* ctx, NDBT_Step* step){
+ //Purpose is to setup to test DropDDObjects
+ char tsname[256];
+ char dfname[256];
+
+ Ndb* pNdb = GETNDB(step);
+ NdbDictionary::Dictionary* pDict = pNdb->getDictionary();
+
+ NdbDictionary::LogfileGroup lg;
+ lg.setName("DEFAULT-LG");
+ lg.setUndoBufferSize(8*1024*1024);
+
+
+ if(pDict->createLogfileGroup(lg) != 0){
+ g_err << "Failed to create logfilegroup:"
+ << endl << pDict->getNdbError() << endl;
+ return NDBT_FAILED;
+ }
+
+ NdbDictionary::Undofile uf;
+ uf.setPath("undofile01.dat");
+ uf.setSize(5*1024*1024);
+ uf.setLogfileGroup("DEFAULT-LG");
+
+ if(pDict->createUndofile(uf) != 0){
+ g_err << "Failed to create undofile:"
+ << endl << pDict->getNdbError() << endl;
+ return NDBT_FAILED;
+ }
+
+ BaseString::snprintf(tsname, sizeof(tsname), "TS-%u", rand());
+ BaseString::snprintf(dfname, sizeof(dfname), "%s-%u.dat", tsname, rand());
+
+ if (create_tablespace(pDict, lg.getName(), tsname, dfname)){
+ g_err << "Failed to create undofile:"
+ << endl << pDict->getNdbError() << endl;
+ return NDBT_FAILED;
+ }
+
+ return NDBT_OK;
+}
+
+int
+DropDDObjectsVerify(NDBT_Context* ctx, NDBT_Step* step){
+ //Purpose is to verify test DropDDObjects worked
+ Uint32 i = 0;
+
+ Ndb* pNdb = GETNDB(step);
+ NdbDictionary::Dictionary* pDict = pNdb->getDictionary();
+
+ NdbDictionary::Dictionary::List list;
+ if (pDict->listObjects(list) == -1)
+ return NDBT_FAILED;
+
+ bool ddFound = false;
+ for (i = 0; i <list.count; i++){
+ switch(list.elements[i].type){
+ case NdbDictionary::Object::Tablespace:
+ ddFound = true;
+ break;
+ case NdbDictionary::Object::LogfileGroup:
+ ddFound = true;
+ break;
+ default:
+ break;
+ }
+ if(ddFound == true){
+ g_err << "DropDDObjects Failed: DD found:"
+ << endl;
+ return NDBT_FAILED;
+ }
+ }
+ return NDBT_OK;
+}
+
NDBT_TESTSUITE(testDict);
+TESTCASE("testDropDDObjects",
+ "* 1. start cluster\n"
+ "* 2. Create LFG\n"
+ "* 3. create TS\n"
+ "* 4. run DropDDObjects\n"
+ "* 5. Verify DropDDObjectsRestart worked\n"){
+INITIALIZER(runWaitStarted);
+INITIALIZER(runDropDDObjects);
+INITIALIZER(testDropDDObjectsSetup);
+STEP(runDropDDObjects);
+FINALIZER(DropDDObjectsVerify);
+}
+
+TESTCASE("Bug29501",
+ "* 1. start cluster\n"
+ "* 2. Restart 1 node -abort -nostart\n"
+ "* 3. create LFG\n"
+ "* 4. Restart data node\n"
+ "* 5. Restart 1 node -nostart\n"
+ "* 6. Drop LFG\n"){
+INITIALIZER(runWaitStarted);
+INITIALIZER(runDropDDObjects);
+STEP(runBug29501);
+FINALIZER(runDropDDObjects);
+}
TESTCASE("CreateAndDrop",
"Try to create and drop the table loop number of times\n"){
INITIALIZER(runCreateAndDrop);
diff --git a/storage/ndb/test/ndbapi/testIndex.cpp b/storage/ndb/test/ndbapi/testIndex.cpp
index 00e559c7a0f..bd9ff7ac607 100644
--- a/storage/ndb/test/ndbapi/testIndex.cpp
+++ b/storage/ndb/test/ndbapi/testIndex.cpp
@@ -809,7 +809,7 @@ int runSystemRestart1(NDBT_Context* ctx, NDBT_Step* step){
CHECK(hugoTrans.pkUpdateRecords(pNdb, records) == 0);
CHECK(utilTrans.verifyIndex(pNdb, idxName, 16, false) == 0);
CHECK(hugoTrans.pkDelRecords(pNdb, records/2) == 0);
- CHECK(hugoTrans.scanUpdateRecords(pNdb, records) == 0);
+ CHECK(hugoTrans.scanUpdateRecords(pNdb, records/2) == 0);
CHECK(utilTrans.verifyIndex(pNdb, idxName, 16, false) == 0);
CHECK(utilTrans.clearTable(pNdb, records) == 0);
CHECK(hugoTrans.loadTable(pNdb, records, 1) == 0);
@@ -834,7 +834,7 @@ int runSystemRestart1(NDBT_Context* ctx, NDBT_Step* step){
CHECK(utilTrans.verifyIndex(pNdb, idxName, 16, false) == 0);
CHECK(hugoTrans.pkDelRecords(pNdb, records/2) == 0);
CHECK(utilTrans.verifyIndex(pNdb, idxName, 16, false) == 0);
- CHECK(hugoTrans.scanUpdateRecords(pNdb, records) == 0);
+ CHECK(hugoTrans.scanUpdateRecords(pNdb, records/2) == 0);
CHECK(utilTrans.verifyIndex(pNdb, idxName, 16, false) == 0);
CHECK(utilTrans.clearTable(pNdb, records) == 0);
CHECK(hugoTrans.loadTable(pNdb, records, 1) == 0);
diff --git a/storage/ndb/test/ndbapi/testNodeRestart.cpp b/storage/ndb/test/ndbapi/testNodeRestart.cpp
index 99b72699762..419196e00eb 100644
--- a/storage/ndb/test/ndbapi/testNodeRestart.cpp
+++ b/storage/ndb/test/ndbapi/testNodeRestart.cpp
@@ -1762,6 +1762,80 @@ runBug28717(NDBT_Context* ctx, NDBT_Step* step)
return NDBT_OK;
}
+int
+runBug31525(NDBT_Context* ctx, NDBT_Step* step)
+{
+ int result = NDBT_OK;
+ int loops = ctx->getNumLoops();
+ int records = ctx->getNumRecords();
+ Ndb* pNdb = GETNDB(step);
+ NdbRestarter res;
+
+ if (res.getNumDbNodes() < 2)
+ {
+ return NDBT_OK;
+ }
+
+ int nodes[2];
+ nodes[0] = res.getMasterNodeId();
+ nodes[1] = res.getNextMasterNodeId(nodes[0]);
+
+ while (res.getNodeGroup(nodes[0]) != res.getNodeGroup(nodes[1]))
+ {
+ ndbout_c("Restarting %u as it not in same node group as %u",
+ nodes[1], nodes[0]);
+ if (res.restartOneDbNode(nodes[1], false, true, true))
+ return NDBT_FAILED;
+
+ if (res.waitNodesNoStart(nodes+1, 1))
+ return NDBT_FAILED;
+
+ if (res.startNodes(nodes+1, 1))
+ return NDBT_FAILED;
+
+ if (res.waitClusterStarted())
+ return NDBT_FAILED;
+
+ nodes[1] = res.getNextMasterNodeId(nodes[0]);
+ }
+
+ ndbout_c("nodes[0]: %u nodes[1]: %u", nodes[0], nodes[1]);
+
+ int val = DumpStateOrd::DihMinTimeBetweenLCP;
+ if (res.dumpStateAllNodes(&val, 1))
+ return NDBT_FAILED;
+
+ int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
+ if (res.dumpStateAllNodes(val2, 2))
+ return NDBT_FAILED;
+
+ if (res.insertErrorInAllNodes(932))
+ return NDBT_FAILED;
+
+ if (res.insertErrorInNode(nodes[1], 7192))
+ return NDBT_FAILED;
+
+ if (res.insertErrorInNode(nodes[0], 7191))
+ return NDBT_FAILED;
+
+ if (res.waitClusterNoStart())
+ return NDBT_FAILED;
+
+ if (res.startAll())
+ return NDBT_FAILED;
+
+ if (res.waitClusterStarted())
+ return NDBT_FAILED;
+
+ if (res.restartOneDbNode(nodes[1], false, false, true))
+ return NDBT_FAILED;
+
+ if (res.waitClusterStarted())
+ return NDBT_FAILED;
+
+ return NDBT_OK;
+}
+
NDBT_TESTSUITE(testNodeRestart);
TESTCASE("NoLoad",
"Test that one node at a time can be stopped and then restarted "\
@@ -2085,6 +2159,9 @@ TESTCASE("Bug21271",
STEP(runPkUpdateUntilStopped);
FINALIZER(runClearTable);
}
+TESTCASE("Bug31525", ""){
+ INITIALIZER(runBug31525);
+}
TESTCASE("Bug24717", ""){
INITIALIZER(runBug24717);
}
diff --git a/storage/ndb/test/ndbapi/testScan.cpp b/storage/ndb/test/ndbapi/testScan.cpp
index 2561869fa5f..df6dbe2e550 100644
--- a/storage/ndb/test/ndbapi/testScan.cpp
+++ b/storage/ndb/test/ndbapi/testScan.cpp
@@ -579,7 +579,7 @@ int runScanUpdateUntilStopped(NDBT_Context* ctx, NDBT_Step* step){
para = myRandom48(239)+1;
g_info << i << ": ";
- if (hugoTrans.scanUpdateRecords(GETNDB(step), records, 0, para) == NDBT_FAILED){
+ if (hugoTrans.scanUpdateRecords(GETNDB(step), 0, 0, para) == NDBT_FAILED){
return NDBT_FAILED;
}
i++;
diff --git a/storage/ndb/test/ndbapi/testSystemRestart.cpp b/storage/ndb/test/ndbapi/testSystemRestart.cpp
index 901c0e35568..89580c0cef8 100644
--- a/storage/ndb/test/ndbapi/testSystemRestart.cpp
+++ b/storage/ndb/test/ndbapi/testSystemRestart.cpp
@@ -20,6 +20,7 @@
#include <NdbRestarter.hpp>
#include <Vector.hpp>
#include <signaldata/DumpStateOrd.hpp>
+#include <NdbBackup.hpp>
int runLoadTable(NDBT_Context* ctx, NDBT_Step* step){
@@ -121,7 +122,7 @@ int runSystemRestart1(NDBT_Context* ctx, NDBT_Step* step){
CHECK(hugoTrans.loadTable(pNdb, records) == 0);
CHECK(hugoTrans.pkUpdateRecords(pNdb, records) == 0);
CHECK(hugoTrans.pkDelRecords(pNdb, records/2) == 0);
- CHECK(hugoTrans.scanUpdateRecords(pNdb, records) == 0);
+ CHECK(hugoTrans.scanUpdateRecords(pNdb, records/2) == 0);
CHECK(utilTrans.clearTable(pNdb, records) == 0);
CHECK(hugoTrans.loadTable(pNdb, records) == 0);
CHECK(utilTrans.clearTable(pNdb, records) == 0);
@@ -142,7 +143,7 @@ int runSystemRestart1(NDBT_Context* ctx, NDBT_Step* step){
CHECK(hugoTrans.loadTable(pNdb, records) == 0);
CHECK(hugoTrans.pkUpdateRecords(pNdb, records) == 0);
CHECK(hugoTrans.pkDelRecords(pNdb, records/2) == 0);
- CHECK(hugoTrans.scanUpdateRecords(pNdb, records) == 0);
+ CHECK(hugoTrans.scanUpdateRecords(pNdb, records/2) == 0);
CHECK(utilTrans.clearTable(pNdb, records) == 0);
CHECK(hugoTrans.loadTable(pNdb, records) == 0);
CHECK(utilTrans.clearTable(pNdb, records) == 0);
@@ -265,7 +266,7 @@ int runSystemRestart2(NDBT_Context* ctx, NDBT_Step* step){
CHECK(hugoTrans.loadTable(pNdb, records) == 0);
CHECK(hugoTrans.pkUpdateRecords(pNdb, records) == 0);
CHECK(hugoTrans.pkDelRecords(pNdb, records/2) == 0);
- CHECK(hugoTrans.scanUpdateRecords(pNdb, records) == 0);
+ CHECK(hugoTrans.scanUpdateRecords(pNdb, records/2) == 0);
CHECK(utilTrans.clearTable(pNdb, records) == 0);
CHECK(hugoTrans.loadTable(pNdb, records) == 0);
CHECK(utilTrans.clearTable(pNdb, records) == 0);
@@ -329,7 +330,7 @@ int runSystemRestartTestUndoLog(NDBT_Context* ctx, NDBT_Step* step){
CHECK(hugoTrans.loadTable(pNdb, records) == 0);
CHECK(hugoTrans.pkUpdateRecords(pNdb, records) == 0);
CHECK(hugoTrans.pkDelRecords(pNdb, records/2) == 0);
- CHECK(hugoTrans.scanUpdateRecords(pNdb, records) == 0);
+ CHECK(hugoTrans.scanUpdateRecords(pNdb, records/2) == 0);
CHECK(utilTrans.clearTable(pNdb, records) == 0);
CHECK(hugoTrans.loadTable(pNdb, records) == 0);
CHECK(utilTrans.clearTable(pNdb, records) == 0);
@@ -1293,6 +1294,260 @@ runBug28770(NDBT_Context* ctx, NDBT_Step* step) {
return result;
}
+int
+runStopper(NDBT_Context* ctx, NDBT_Step* step)
+{
+ NdbRestarter restarter;
+ Uint32 stop = 0;
+loop:
+ while (!ctx->isTestStopped() &&
+ ((stop = ctx->getProperty("StopAbort", Uint32(0))) == 0))
+ {
+ NdbSleep_MilliSleep(30);
+ }
+
+ if (ctx->isTestStopped())
+ {
+ return NDBT_OK;
+ }
+
+ ndbout << "Killing in " << stop << "ms..." << flush;
+ NdbSleep_MilliSleep(stop);
+ restarter.restartAll(false, true, true);
+ ctx->setProperty("StopAbort", Uint32(0));
+ goto loop;
+}
+
+int runSR_DD_1(NDBT_Context* ctx, NDBT_Step* step)
+{
+ Ndb* pNdb = GETNDB(step);
+ int result = NDBT_OK;
+ Uint32 loops = ctx->getNumLoops();
+ int count;
+ NdbRestarter restarter;
+ NdbBackup backup(GETNDB(step)->getNodeId()+1);
+ bool lcploop = ctx->getProperty("LCP", (unsigned)0);
+ bool all = ctx->getProperty("ALL", (unsigned)0);
+
+ Uint32 i = 1;
+ Uint32 backupId;
+
+ int val[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
+ int lcp = DumpStateOrd::DihMinTimeBetweenLCP;
+
+ int startFrom = 0;
+
+ HugoTransactions hugoTrans(*ctx->getTab());
+ while(i<=loops && result != NDBT_FAILED)
+ {
+
+ if (lcploop)
+ {
+ CHECK(restarter.dumpStateAllNodes(&lcp, 1) == 0);
+ }
+
+ int nodeId = restarter.getDbNodeId(rand() % restarter.getNumDbNodes());
+ //CHECK(restarter.dumpStateAllNodes(&val, 1) == 0);
+
+ ndbout << "Loop " << i << "/"<< loops <<" started" << endl;
+ ndbout << "Loading records..." << startFrom << endl;
+ CHECK(hugoTrans.loadTable(pNdb, startFrom) == 0);
+
+ if (!all)
+ {
+ ndbout << "Making " << nodeId << " crash" << endl;
+ int kill[] = { 9999, 1000, 3000 };
+ CHECK(restarter.dumpStateOneNode(nodeId, val, 2) == 0);
+ CHECK(restarter.dumpStateOneNode(nodeId, kill, 3) == 0);
+ }
+ else
+ {
+ ndbout << "Crashing cluster" << endl;
+ ctx->setProperty("StopAbort", 1000 + rand() % (3000 - 1000));
+ }
+ Uint64 end = NdbTick_CurrentMillisecond() + 4000;
+ Uint32 row = startFrom;
+ do {
+ ndbout << "Loading from " << row << " to " << row + 1000 << endl;
+ if (hugoTrans.loadTableStartFrom(pNdb, row, 1000) != 0)
+ break;
+ row += 1000;
+ } while (NdbTick_CurrentMillisecond() < end);
+
+ if (!all)
+ {
+ ndbout << "Waiting for " << nodeId << " to restart" << endl;
+ CHECK(restarter.waitNodesNoStart(&nodeId, 1) == 0);
+ ndbout << "Restarting cluster" << endl;
+ CHECK(restarter.restartAll(false, true, true) == 0);
+ }
+ else
+ {
+ ndbout << "Waiting for cluster to restart" << endl;
+ }
+ CHECK(restarter.waitClusterNoStart() == 0);
+ CHECK(restarter.startAll() == 0);
+ CHECK(restarter.waitClusterStarted() == 0);
+
+ ndbout << "Starting backup..." << flush;
+ CHECK(backup.start(backupId) == 0);
+ ndbout << "done" << endl;
+
+ int cnt = 0;
+ CHECK(hugoTrans.selectCount(pNdb, 0, &cnt) == 0);
+ ndbout << "Found " << cnt << " records..." << endl;
+ ndbout << "Clearing..." << endl;
+ CHECK(hugoTrans.clearTable(pNdb,
+ NdbScanOperation::SF_TupScan, cnt) == 0);
+
+ if (cnt > startFrom)
+ {
+ startFrom = cnt;
+ }
+ startFrom += 1000;
+ i++;
+ }
+
+ ndbout << "runSR_DD_1 finished" << endl;
+ ctx->stopTest();
+ return result;
+}
+
+int runSR_DD_2(NDBT_Context* ctx, NDBT_Step* step)
+{
+ Ndb* pNdb = GETNDB(step);
+ int result = NDBT_OK;
+ Uint32 loops = ctx->getNumLoops();
+ Uint32 rows = ctx->getNumRecords();
+ int count;
+ NdbRestarter restarter;
+ NdbBackup backup(GETNDB(step)->getNodeId()+1);
+ bool lcploop = ctx->getProperty("LCP", (unsigned)0);
+ bool all = ctx->getProperty("ALL", (unsigned)0);
+
+ Uint32 i = 1;
+ Uint32 backupId;
+
+ int val[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
+ int lcp = DumpStateOrd::DihMinTimeBetweenLCP;
+
+ int startFrom = 0;
+
+ HugoTransactions hugoTrans(*ctx->getTab());
+ while(i<=loops && result != NDBT_FAILED)
+ {
+
+ if (lcploop)
+ {
+ CHECK(restarter.dumpStateAllNodes(&lcp, 1) == 0);
+ }
+
+ int nodeId = restarter.getDbNodeId(rand() % restarter.getNumDbNodes());
+
+ if (!all)
+ {
+ ndbout << "Making " << nodeId << " crash" << endl;
+ int kill[] = { 9999, 3000, 10000 };
+ CHECK(restarter.dumpStateOneNode(nodeId, val, 2) == 0);
+ CHECK(restarter.dumpStateOneNode(nodeId, kill, 3) == 0);
+ }
+ else
+ {
+ ndbout << "Crashing cluster" << endl;
+ ctx->setProperty("StopAbort", 1000 + rand() % (3000 - 1000));
+ }
+
+ Uint64 end = NdbTick_CurrentMillisecond() + 11000;
+ Uint32 row = startFrom;
+ do {
+ if (hugoTrans.loadTable(pNdb, rows) != 0)
+ break;
+
+ if (hugoTrans.clearTable(pNdb, NdbScanOperation::SF_TupScan, rows) != 0)
+ break;
+ } while (NdbTick_CurrentMillisecond() < end);
+
+ if (!all)
+ {
+ ndbout << "Waiting for " << nodeId << " to restart" << endl;
+ CHECK(restarter.waitNodesNoStart(&nodeId, 1) == 0);
+ ndbout << "Restarting cluster" << endl;
+ CHECK(restarter.restartAll(false, true, true) == 0);
+ }
+ else
+ {
+ ndbout << "Waiting for cluster to restart" << endl;
+ }
+
+ CHECK(restarter.waitClusterNoStart() == 0);
+ CHECK(restarter.startAll() == 0);
+ CHECK(restarter.waitClusterStarted() == 0);
+
+ ndbout << "Starting backup..." << flush;
+ CHECK(backup.start(backupId) == 0);
+ ndbout << "done" << endl;
+
+ int cnt = 0;
+ CHECK(hugoTrans.selectCount(pNdb, 0, &cnt) == 0);
+ ndbout << "Found " << cnt << " records..." << endl;
+ ndbout << "Clearing..." << endl;
+ CHECK(hugoTrans.clearTable(pNdb,
+ NdbScanOperation::SF_TupScan, cnt) == 0);
+ i++;
+ }
+
+ ndbout << "runSR_DD_2 finished" << endl;
+ ctx->stopTest();
+ return result;
+}
+
+int
+runBug27434(NDBT_Context* ctx, NDBT_Step* step)
+{
+ int result = NDBT_OK;
+ NdbRestarter restarter;
+ Ndb* pNdb = GETNDB(step);
+ const Uint32 nodeCount = restarter.getNumDbNodes();
+
+ if (nodeCount < 2)
+ return NDBT_OK;
+
+ int args[] = { DumpStateOrd::DihMaxTimeBetweenLCP };
+ int dump[] = { DumpStateOrd::DihStartLcpImmediately };
+
+ int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_CHECKPOINT, 0 };
+ NdbLogEventHandle handle =
+ ndb_mgm_create_logevent_handle(restarter.handle, filter);
+
+ struct ndb_logevent event;
+
+ do {
+ int node1 = restarter.getDbNodeId(rand() % nodeCount);
+ CHECK(restarter.restartOneDbNode(node1, false, true, true) == 0);
+ NdbSleep_SecSleep(3);
+ CHECK(restarter.waitNodesNoStart(&node1, 1) == 0);
+
+ CHECK(restarter.dumpStateAllNodes(args, 1) == 0);
+
+ for (Uint32 i = 0; i<3; i++)
+ {
+ CHECK(restarter.dumpStateAllNodes(dump, 1) == 0);
+ while(ndb_logevent_get_next(handle, &event, 0) >= 0 &&
+ event.type != NDB_LE_LocalCheckpointStarted);
+ while(ndb_logevent_get_next(handle, &event, 0) >= 0 &&
+ event.type != NDB_LE_LocalCheckpointCompleted);
+ }
+
+ restarter.restartAll(false, true, true);
+ NdbSleep_SecSleep(3);
+ CHECK(restarter.waitClusterNoStart() == 0);
+ restarter.insertErrorInNode(node1, 5046);
+ restarter.startAll();
+ CHECK(restarter.waitClusterStarted() == 0);
+ } while(false);
+
+ return result;
+}
NDBT_TESTSUITE(testSystemRestart);
TESTCASE("SR1",
@@ -1474,6 +1729,72 @@ TESTCASE("Bug24664",
STEP(runBug24664);
FINALIZER(runClearTable);
}
+TESTCASE("Bug27434",
+ "")
+{
+ INITIALIZER(runWaitStarted);
+ STEP(runBug27434);
+}
+TESTCASE("SR_DD_1", "")
+{
+ TC_PROPERTY("ALL", 1);
+ INITIALIZER(runWaitStarted);
+ STEP(runStopper);
+ STEP(runSR_DD_1);
+ FINALIZER(runClearTable);
+}
+TESTCASE("SR_DD_1b", "")
+{
+ INITIALIZER(runWaitStarted);
+ STEP(runSR_DD_1);
+ FINALIZER(runClearTable);
+}
+TESTCASE("SR_DD_1_LCP", "")
+{
+ TC_PROPERTY("ALL", 1);
+ TC_PROPERTY("LCP", 1);
+ INITIALIZER(runWaitStarted);
+ STEP(runStopper);
+ STEP(runSR_DD_1);
+ FINALIZER(runClearTable);
+}
+TESTCASE("SR_DD_1b_LCP", "")
+{
+ TC_PROPERTY("LCP", 1);
+ INITIALIZER(runWaitStarted);
+ STEP(runSR_DD_1);
+ FINALIZER(runClearTable);
+}
+TESTCASE("SR_DD_2", "")
+{
+ TC_PROPERTY("ALL", 1);
+ INITIALIZER(runWaitStarted);
+ STEP(runStopper);
+ STEP(runSR_DD_2);
+ FINALIZER(runClearTable);
+}
+TESTCASE("SR_DD_2b", "")
+{
+ INITIALIZER(runWaitStarted);
+ STEP(runSR_DD_2);
+ FINALIZER(runClearTable);
+}
+TESTCASE("SR_DD_2_LCP", "")
+{
+ TC_PROPERTY("ALL", 1);
+ TC_PROPERTY("LCP", 1);
+ INITIALIZER(runWaitStarted);
+ STEP(runStopper);
+ STEP(runSR_DD_2);
+ FINALIZER(runClearTable);
+}
+TESTCASE("SR_DD_2b_LCP", "")
+{
+ TC_PROPERTY("LCP", 1);
+ INITIALIZER(runWaitStarted);
+ STEP(runSR_DD_2);
+ FINALIZER(runClearTable);
+}
TESTCASE("Bug29167", "")
{
INITIALIZER(runWaitStarted);
diff --git a/storage/ndb/test/ndbapi/test_event.cpp b/storage/ndb/test/ndbapi/test_event.cpp
index e1e0012d0d8..18825d734a4 100644
--- a/storage/ndb/test/ndbapi/test_event.cpp
+++ b/storage/ndb/test/ndbapi/test_event.cpp
@@ -21,6 +21,7 @@
#include <NdbAutoPtr.hpp>
#include <NdbRestarter.hpp>
#include <NdbRestarts.hpp>
+#include <signaldata/DumpStateOrd.hpp>
#define GETNDB(ps) ((NDBT_NdbApiStep*)ps)->getNdb()
@@ -1730,7 +1731,7 @@ runScanUpdateUntilStopped(NDBT_Context* ctx, NDBT_Step* step){
HugoTransactions hugoTrans(*ctx->getTab());
while (ctx->isTestStopped() == false)
{
- if (hugoTrans.scanUpdateRecords(GETNDB(step), records, abort,
+ if (hugoTrans.scanUpdateRecords(GETNDB(step), 0, abort,
parallelism) == NDBT_FAILED){
return NDBT_FAILED;
}
@@ -1758,6 +1759,85 @@ runInsertDeleteUntilStopped(NDBT_Context* ctx, NDBT_Step* step)
return NDBT_OK;
}
+int
+runBug31701(NDBT_Context* ctx, NDBT_Step* step)
+{
+ int result = NDBT_OK;
+
+ NdbRestarter restarter;
+
+ if (restarter.getNumDbNodes() < 2){
+ ctx->stopTest();
+ return NDBT_OK;
+ }
+ // This should really wait for applier to start...10s is likely enough
+ NdbSleep_SecSleep(10);
+
+ int nodeId = restarter.getDbNodeId(rand() % restarter.getNumDbNodes());
+
+ int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 };
+ if (restarter.dumpStateOneNode(nodeId, val2, 2))
+ return NDBT_FAILED;
+
+ restarter.insertErrorInNode(nodeId, 13033);
+ if (restarter.waitNodesNoStart(&nodeId, 1))
+ return NDBT_FAILED;
+
+ if (restarter.startNodes(&nodeId, 1))
+ return NDBT_FAILED;
+
+ if (restarter.waitClusterStarted())
+ return NDBT_FAILED;
+
+
+ int records = ctx->getNumRecords();
+ HugoTransactions hugoTrans(*ctx->getTab());
+
+ if(ctx->getPropertyWait("LastGCI", ~(Uint32)0))
+ {
+ g_err << "FAIL " << __LINE__ << endl;
+ return NDBT_FAILED;
+ }
+
+ hugoTrans.clearTable(GETNDB(step), 0);
+
+ if (hugoTrans.loadTable(GETNDB(step), 3*records, 1, true, 1) != 0){
+ g_err << "FAIL " << __LINE__ << endl;
+ return NDBT_FAILED;
+ }
+
+ if (hugoTrans.pkDelRecords(GETNDB(step), 3*records, 1, true, 1) != 0){
+ g_err << "FAIL " << __LINE__ << endl;
+ return NDBT_FAILED;
+ }
+ if (hugoTrans.loadTable(GETNDB(step), records, 1, true, 1) != 0){
+ g_err << "FAIL " << __LINE__ << endl;
+ return NDBT_FAILED;
+ }
+ if (hugoTrans.pkUpdateRecords(GETNDB(step), records, 1, 1) != 0){
+ g_err << "FAIL " << __LINE__ << endl;
+ return NDBT_FAILED;
+ }
+ if (hugoTrans.pkUpdateRecords(GETNDB(step), records, 1, 1) != 0){
+ g_err << "FAIL " << __LINE__ << endl;
+ return NDBT_FAILED;
+ }
+ if (hugoTrans.pkUpdateRecords(GETNDB(step), records, 1, 1) != 0){
+ g_err << "FAIL " << __LINE__ << endl;
+ return NDBT_FAILED;
+ }
+
+ ctx->setProperty("LastGCI", hugoTrans.m_latest_gci);
+ if(ctx->getPropertyWait("LastGCI", ~(Uint32)0))
+ {
+ g_err << "FAIL " << __LINE__ << endl;
+ return NDBT_FAILED;
+ }
+
+ ctx->stopTest();
+ return NDBT_OK;
+}
+
NDBT_TESTSUITE(test_event);
TESTCASE("BasicEventOperation",
"Verify that we can listen to Events"
@@ -1887,6 +1967,14 @@ TESTCASE("Bug27169", ""){
STEP(runRestarterLoop);
FINALIZER(runDropEvent);
}
+TESTCASE("Bug31701", ""){
+ INITIALIZER(runCreateEvent);
+ INITIALIZER(runCreateShadowTable);
+ STEP(runEventApplier);
+ STEP(runBug31701);
+ FINALIZER(runDropEvent);
+ FINALIZER(runDropShadowTable);
+}
NDBT_TESTSUITE_END(test_event);
int main(int argc, const char** argv){
diff --git a/storage/ndb/test/run-test/daily-basic-tests.txt b/storage/ndb/test/run-test/daily-basic-tests.txt
index b7a3a15dae7..103675d8e35 100644
--- a/storage/ndb/test/run-test/daily-basic-tests.txt
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt
@@ -65,6 +65,14 @@ args: -n PkRead
max-time: 500
cmd: testBasic
+args: -n PkSimpleRead
+
+max-time: 500
+cmd: testBasic
+args: -n PkDirtyRead
+
+max-time: 500
+cmd: testBasic
args: -n PkUpdate
max-time: 500
@@ -555,7 +563,7 @@ args: -n Bug25554 T1
max-time: 3000
cmd: testNodeRestart
-args: -n Bug25984
+args: -n Bug25984 T1
max-time: 1000
cmd: testNodeRestart
@@ -575,6 +583,14 @@ args: -n Bug29364 T1
#
# DICT TESTS
+max-time: 500
+cmd: testDict
+args: -n Bug29501 T1
+
+max-time: 500
+cmd: testDict
+args: -n testDropDDObjects T1
+
max-time: 1500
cmd: testDict
args: -n CreateAndDrop
@@ -864,6 +880,10 @@ max-time: 1000
cmd: testNodeRestart
args: -n Bug27466 T1
+max-time: 1500
+cmd: testSystemRestart
+args: -n Bug27434 T1
+
max-time: 1000
cmd: test_event
args: -l 10 -n Bug27169 T1
@@ -945,3 +965,77 @@ args: -n Bug28804 T1 T3
max-time: 180
cmd: testIndex
args: -n Bug28804_ATTRINFO T1 T3
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_DD_1 D1
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_DD_1b D1
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_DD_1 D2
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_DD_1b D2
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_DD_1_LCP D1
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_DD_1b_LCP D1
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_DD_1_LCP D2
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_DD_1b_LCP D2
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_DD_2 D1
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_DD_2b D1
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_DD_2 D2
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_DD_2b D2
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_DD_2_LCP D1
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_DD_2b_LCP D1
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_DD_2_LCP D2
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_DD_2b_LCP D2
+
+max-time: 600
+cmd: testNodeRestart
+args: -n Bug31525 T1
+
+max-time: 300
+cmd: test_event
+args: -n Bug31701 T1
+
+
diff --git a/storage/ndb/test/src/HugoOperations.cpp b/storage/ndb/test/src/HugoOperations.cpp
index 1a2e5180f1f..93a9eaf435a 100644
--- a/storage/ndb/test/src/HugoOperations.cpp
+++ b/storage/ndb/test/src/HugoOperations.cpp
@@ -93,6 +93,7 @@ rand_lock_mode:
case NdbOperation::LM_Read:
case NdbOperation::LM_Exclusive:
case NdbOperation::LM_CommittedRead:
+ case NdbOperation::LM_SimpleRead:
if(idx && idx->getType() == NdbDictionary::Index::OrderedIndex &&
pIndexScanOp == 0)
{
diff --git a/storage/ndb/test/src/HugoTransactions.cpp b/storage/ndb/test/src/HugoTransactions.cpp
index 3a1600815e0..0e5f7cd8115 100644
--- a/storage/ndb/test/src/HugoTransactions.cpp
+++ b/storage/ndb/test/src/HugoTransactions.cpp
@@ -341,50 +341,14 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
int
HugoTransactions::scanUpdateRecords(Ndb* pNdb,
- int records,
- int abortPercent,
- int parallelism){
- if(m_defaultScanUpdateMethod == 1){
- return scanUpdateRecords1(pNdb, records, abortPercent, parallelism);
- } else if(m_defaultScanUpdateMethod == 2){
- return scanUpdateRecords2(pNdb, records, abortPercent, parallelism);
- } else {
- return scanUpdateRecords3(pNdb, records, abortPercent, parallelism);
- }
-}
-
-// Scan all records exclusive and update
-// them one by one
-int
-HugoTransactions::scanUpdateRecords1(Ndb* pNdb,
- int records,
- int abortPercent,
- int parallelism){
- return scanUpdateRecords3(pNdb, records, abortPercent, 1);
-}
-
-// Scan all records exclusive and update
-// them batched by asking nextScanResult to
-// give us all cached records before fetching new
-// records from db
-int
-HugoTransactions::scanUpdateRecords2(Ndb* pNdb,
- int records,
- int abortPercent,
- int parallelism){
- return scanUpdateRecords3(pNdb, records, abortPercent, parallelism);
-}
-
-int
-HugoTransactions::scanUpdateRecords3(Ndb* pNdb,
- int records,
- int abortPercent,
- int parallelism){
- int retryAttempt = 0;
+ NdbScanOperation::ScanFlag flags,
+ int records,
+ int abortPercent,
+ int parallelism){
+ int retryAttempt = 0;
int check, a;
NdbScanOperation *pOp;
-
while (true){
restart:
if (retryAttempt++ >= m_retryMax){
@@ -411,8 +375,9 @@ restart:
return NDBT_FAILED;
}
- if( pOp->readTuplesExclusive(parallelism) ) {
- ERR(pTrans->getNdbError());
+ if( pOp->readTuples(NdbOperation::LM_Exclusive, flags,
+ parallelism))
+ {
closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -429,15 +394,18 @@ restart:
check = pTrans->execute(NoCommit, AbortOnError);
if( check == -1 ) {
const NdbError err = pTrans->getNdbError();
- ERR(err);
- closeTransaction(pNdb);
if (err.status == NdbError::TemporaryError){
+ ERR(err);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
+ retryAttempt++;
continue;
}
+ ERR(err);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
-
+
// Abort after 1-100 or 1-records rows
int ranVal = rand();
int abortCount = ranVal % (records == 0 ? 100 : records);
@@ -448,75 +416,114 @@ restart:
abortTrans = true;
}
+ int eof;
int rows = 0;
- while((check = pOp->nextResult(true)) == 0){
- do {
- rows++;
- NdbOperation* pUp = pOp->updateCurrentTuple();
- if(pUp == 0){
+ while((eof = pOp->nextResult(true)) == 0){
+ rows++;
+ if (calc.verifyRowValues(&row) != 0){
+ closeTransaction(pNdb);
+ return NDBT_FAILED;
+ }
+
+ if (abortCount == rows && abortTrans == true){
+ ndbout << "Scan is aborted" << endl;
+ g_info << "Scan is aborted" << endl;
+ pOp->close();
+ if( check == -1 ) {
ERR(pTrans->getNdbError());
closeTransaction(pNdb);
return NDBT_FAILED;
}
- const int updates = calc.getUpdatesValue(&row) + 1;
- const int r = calc.getIdValue(&row);
- for(a = 0; a<tab.getNoOfColumns(); a++){
- if (tab.getColumn(a)->getPrimaryKey() == false){
- if(setValueForAttr(pUp, a, r, updates ) != 0){
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
- }
- }
-
- if (rows == abortCount && abortTrans == true){
- g_info << "Scan is aborted" << endl;
- // This scan should be aborted
- closeTransaction(pNdb);
- return NDBT_OK;
- }
- } while((check = pOp->nextResult(false)) == 0);
-
- if(check != -1){
- check = pTrans->execute(Commit, AbortOnError);
- if(check != -1)
- m_latest_gci = pTrans->getGCI();
- pTrans->restart();
- }
-
- const NdbError err = pTrans->getNdbError();
- if( check == -1 ) {
+
closeTransaction(pNdb);
- ERR(err);
- if (err.status == NdbError::TemporaryError){
- NdbSleep_MilliSleep(50);
- goto restart;
- }
- return NDBT_FAILED;
+ return NDBT_OK;
}
}
-
- const NdbError err = pTrans->getNdbError();
- if( check == -1 ) {
- closeTransaction(pNdb);
- ERR(err);
+ if (eof == -1) {
+ const NdbError err = pTrans->getNdbError();
+
if (err.status == NdbError::TemporaryError){
+ ERR_INFO(err);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
- goto restart;
+ switch (err.code){
+ case 488:
+ case 245:
+ case 490:
+ // Too many active scans, no limit on number of retry attempts
+ break;
+ default:
+ retryAttempt++;
+ }
+ continue;
}
+ ERR(err);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
-
+
closeTransaction(pNdb);
+
+ g_info << rows << " rows have been read" << endl;
+ if (records != 0 && rows != records){
+ g_err << "Check expected number of records failed" << endl
+ << " expected=" << records <<", " << endl
+ << " read=" << rows << endl;
+ return NDBT_FAILED;
+ }
- g_info << rows << " rows have been updated" << endl;
return NDBT_OK;
}
return NDBT_FAILED;
}
int
+HugoTransactions::scanUpdateRecords(Ndb* pNdb,
+ int records,
+ int abortPercent,
+ int parallelism){
+
+ return scanUpdateRecords(pNdb,
+ (NdbScanOperation::ScanFlag)0,
+ records, abortPercent, parallelism);
+}
+
+// Scan all records exclusive and update
+// them one by one
+int
+HugoTransactions::scanUpdateRecords1(Ndb* pNdb,
+ int records,
+ int abortPercent,
+ int parallelism){
+ return scanUpdateRecords(pNdb,
+ (NdbScanOperation::ScanFlag)0,
+ records, abortPercent, 1);
+}
+
+// Scan all records exclusive and update
+// them batched by asking nextScanResult to
+// give us all cached records before fetching new
+// records from db
+int
+HugoTransactions::scanUpdateRecords2(Ndb* pNdb,
+ int records,
+ int abortPercent,
+ int parallelism){
+ return scanUpdateRecords(pNdb, (NdbScanOperation::ScanFlag)0,
+ records, abortPercent, parallelism);
+}
+
+int
+HugoTransactions::scanUpdateRecords3(Ndb* pNdb,
+ int records,
+ int abortPercent,
+ int parallelism)
+{
+ return scanUpdateRecords(pNdb, (NdbScanOperation::ScanFlag)0,
+ records, abortPercent, parallelism);
+}
+
+int
HugoTransactions::loadTable(Ndb* pNdb,
int records,
int batch,
@@ -524,7 +531,22 @@ HugoTransactions::loadTable(Ndb* pNdb,
int doSleep,
bool oneTrans,
int value,
- bool abort){
+ bool abort)
+{
+ return loadTableStartFrom(pNdb, 0, records, batch, allowConstraintViolation,
+ doSleep, oneTrans, value, abort);
+}
+
+int
+HugoTransactions::loadTableStartFrom(Ndb* pNdb,
+ int startFrom,
+ int records,
+ int batch,
+ bool allowConstraintViolation,
+ int doSleep,
+ bool oneTrans,
+ int value,
+ bool abort){
int check;
int retryAttempt = 0;
int retryMax = 5;
@@ -543,8 +565,9 @@ HugoTransactions::loadTable(Ndb* pNdb,
<< " -> rows/commit = " << batch << endl;
}
+ Uint32 orgbatch = batch;
g_info << "|- Inserting records..." << endl;
- for (int c=0 ; c<records ; ){
+ for (int c=0 ; c<records; ){
bool closeTrans = true;
if(c + batch > records)
@@ -578,7 +601,7 @@ HugoTransactions::loadTable(Ndb* pNdb,
}
}
- if(pkInsertRecord(pNdb, c, batch, value) != NDBT_OK)
+ if(pkInsertRecord(pNdb, c + startFrom, batch, value) != NDBT_OK)
{
ERR(pTrans->getNdbError());
closeTransaction(pNdb);
@@ -625,6 +648,7 @@ HugoTransactions::loadTable(Ndb* pNdb,
ERR(err);
NdbSleep_MilliSleep(50);
retryAttempt++;
+ batch = 1;
continue;
break;
@@ -670,7 +694,14 @@ HugoTransactions::loadTable(Ndb* pNdb,
int
HugoTransactions::fillTable(Ndb* pNdb,
- int batch){
+ int batch){
+ return fillTableStartFrom(pNdb, 0, batch);
+}
+
+int
+HugoTransactions::fillTableStartFrom(Ndb* pNdb,
+ int startFrom,
+ int batch){
int check;
int retryAttempt = 0;
int retryMax = 5;
@@ -688,7 +719,7 @@ HugoTransactions::fillTable(Ndb* pNdb,
<< " -> rows/commit = " << batch << endl;
}
- for (int c=0 ; ; ){
+ for (int c=startFrom ; ; ){
if (retryAttempt >= retryMax){
g_info << "Record " << c << " could not be inserted, has retried "
diff --git a/storage/ndb/test/src/Makefile.am b/storage/ndb/test/src/Makefile.am
index a025579cb72..aa486108235 100644
--- a/storage/ndb/test/src/Makefile.am
+++ b/storage/ndb/test/src/Makefile.am
@@ -24,7 +24,7 @@ libNDBT_a_SOURCES = \
NdbRestarter.cpp NdbRestarts.cpp NDBT_Output.cpp \
NdbBackup.cpp NdbConfig.cpp NdbGrep.cpp NDBT_Table.cpp \
NdbSchemaCon.cpp NdbSchemaOp.cpp getarg.c \
- CpcClient.cpp NdbMixRestarter.cpp NDBT_Thread.cpp
+ CpcClient.cpp NdbMixRestarter.cpp NDBT_Thread.cpp dbutil.cpp
INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/src/common/mgmcommon -I$(top_srcdir)/storage/ndb/include/mgmcommon -I$(top_srcdir)/storage/ndb/include/kernel -I$(top_srcdir)/storage/ndb/src/mgmapi
diff --git a/storage/ndb/test/src/NDBT_Thread.cpp b/storage/ndb/test/src/NDBT_Thread.cpp
index 56cf2f6815b..ff6785724ba 100644
--- a/storage/ndb/test/src/NDBT_Thread.cpp
+++ b/storage/ndb/test/src/NDBT_Thread.cpp
@@ -131,7 +131,7 @@ NDBT_Thread::exit()
m_state = Exit;
signal();
unlock();
-};
+}
void
NDBT_Thread::join()
diff --git a/storage/ndb/test/src/NdbRestarts.cpp b/storage/ndb/test/src/NdbRestarts.cpp
index 6ec520887b5..86e71f4b3fc 100644
--- a/storage/ndb/test/src/NdbRestarts.cpp
+++ b/storage/ndb/test/src/NdbRestarts.cpp
@@ -607,6 +607,7 @@ NFDuringNR_codes[] = {
5026,
7139,
7132,
+ 5045,
//LCP
8000,
diff --git a/storage/ndb/test/src/UtilTransactions.cpp b/storage/ndb/test/src/UtilTransactions.cpp
index 5a408140c8e..776ffd176b3 100644
--- a/storage/ndb/test/src/UtilTransactions.cpp
+++ b/storage/ndb/test/src/UtilTransactions.cpp
@@ -42,38 +42,9 @@ UtilTransactions::UtilTransactions(Ndb* ndb,
int
UtilTransactions::clearTable(Ndb* pNdb,
- int records,
- int parallelism){
- if(m_defaultClearMethod == 1){
- return clearTable1(pNdb, records, parallelism);
- } else if(m_defaultClearMethod == 2){
- return clearTable2(pNdb, records, parallelism);
- } else {
- return clearTable3(pNdb, records, parallelism);
- }
-}
-
-
-int
-UtilTransactions::clearTable1(Ndb* pNdb,
- int records,
- int parallelism)
-{
- return clearTable3(pNdb, records, 1);
-}
-
-int
-UtilTransactions::clearTable2(Ndb* pNdb,
- int records,
- int parallelism)
-{
- return clearTable3(pNdb, records, parallelism);
-}
-
-int
-UtilTransactions::clearTable3(Ndb* pNdb,
- int records,
- int parallelism){
+ NdbScanOperation::ScanFlag flags,
+ int records,
+ int parallelism){
// Scan all records exclusive and delete
// them one by one
int retryAttempt = 0;
@@ -116,7 +87,7 @@ UtilTransactions::clearTable3(Ndb* pNdb,
goto failed;
}
- if( pOp->readTuplesExclusive(par) ) {
+ if( pOp->readTuples(NdbOperation::LM_Exclusive, flags, par) ) {
err = pTrans->getNdbError();
goto failed;
}
@@ -180,6 +151,43 @@ UtilTransactions::clearTable3(Ndb* pNdb,
}
int
+UtilTransactions::clearTable(Ndb* pNdb,
+ int records,
+ int parallelism){
+
+ return clearTable(pNdb, (NdbScanOperation::ScanFlag)0,
+ records, parallelism);
+}
+
+
+int
+UtilTransactions::clearTable1(Ndb* pNdb,
+ int records,
+ int parallelism)
+{
+ return clearTable(pNdb, (NdbScanOperation::ScanFlag)0,
+ records, 1);
+}
+
+int
+UtilTransactions::clearTable2(Ndb* pNdb,
+ int records,
+ int parallelism)
+{
+ return clearTable(pNdb, (NdbScanOperation::ScanFlag)0,
+ records, parallelism);
+}
+
+int
+UtilTransactions::clearTable3(Ndb* pNdb,
+ int records,
+ int parallelism)
+{
+ return clearTable(pNdb, (NdbScanOperation::ScanFlag)0,
+ records, parallelism);
+}
+
+int
UtilTransactions::copyTableData(Ndb* pNdb,
const char* destName){
// Scan all records and copy
diff --git a/storage/ndb/test/src/dbutil.cpp b/storage/ndb/test/src/dbutil.cpp
new file mode 100755
index 00000000000..0c936f53182
--- /dev/null
+++ b/storage/ndb/test/src/dbutil.cpp
@@ -0,0 +1,176 @@
+// dbutil.cpp: implementation of the database utilities class.
+//
+//////////////////////////////////////////////////////////////////////
+
+#include "dbutil.hpp"
+
+//////////////////////////////////////////////////////////////////////
+// Construction/Destruction
+//////////////////////////////////////////////////////////////////////
+dbutil::dbutil(const char * dbname)
+{
+ memset(host,' ',sizeof(host));
+ memset(user,' ',sizeof(pass));
+ memset(dbs,' ',sizeof(dbs));
+ port = 0;
+ memset(socket,' ',sizeof(socket));
+ this->SetDbName(dbname);
+}
+
+dbutil::~dbutil()
+{
+ this->DatabaseLogout();
+}
+
+//////////////////////////////////////////////////////////////////////
+// Database Login
+//////////////////////////////////////////////////////////////////////
+void dbutil::DatabaseLogin(const char* system,
+ const char* usr,
+ const char* password,
+ unsigned int portIn,
+ const char* sockIn,
+ bool transactional
+ ){
+ if (!(myDbHandel = mysql_init(NULL))){
+ myerror("mysql_init() failed");
+ exit(1);
+ }
+ this->SetUser(usr);
+ this->SetHost(system);
+ this->SetPassword(password);
+ this->SetPort(portIn);
+ this->SetSocket(sockIn);
+
+ if (!(mysql_real_connect(myDbHandel, host, user, pass, "test", port, socket, 0))){
+ myerror("connection failed");
+ mysql_close(myDbHandel);
+ fprintf(stdout, "\n Check the connection options using --help or -?\n");
+ exit(1);
+ }
+
+ myDbHandel->reconnect= 1;
+
+ /* set AUTOCOMMIT */
+ if(!transactional){
+ mysql_autocommit(myDbHandel, TRUE);
+ }
+ else{
+ mysql_autocommit(myDbHandel, FALSE);
+ }
+
+ fprintf(stdout, "\n\tConnected to MySQL server version: %s (%lu)\n\n",
+ mysql_get_server_info(myDbHandel),
+ (unsigned long) mysql_get_server_version(myDbHandel));
+}
+
+//////////////////////////////////////////////////////////////////////
+// Database Logout
+//////////////////////////////////////////////////////////////////////
+void dbutil::DatabaseLogout(){
+ if (myDbHandel){
+ fprintf(stdout, "\n\tClosing the MySQL database connection ...\n\n");
+ mysql_close(myDbHandel);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////
+// Prepare MySQL Statements Cont
+//////////////////////////////////////////////////////////////////////
+MYSQL_STMT *STDCALL dbutil::MysqlSimplePrepare(const char *query){
+#ifdef DEBUG
+printf("Inside dbutil::MysqlSimplePrepare\n");
+#endif
+int result = 0;
+ MYSQL_STMT *my_stmt= mysql_stmt_init(this->GetDbHandel());
+ if (my_stmt && (result = mysql_stmt_prepare(my_stmt, query, strlen(query)))){
+ printf("res = %s\n",mysql_stmt_error(my_stmt));
+ mysql_stmt_close(my_stmt);
+ return 0;
+ }
+ return my_stmt;
+}
+//////////////////////////////////////////////////////////////////////
+// Error Printing
+//////////////////////////////////////////////////////////////////////
+void dbutil::PrintError(const char *msg){
+ if (this->GetDbHandel()
+ && mysql_errno(this->GetDbHandel())){
+ if (this->GetDbHandel()->server_version){
+ fprintf(stdout, "\n [MySQL-%s]",
+ this->GetDbHandel()->server_version);
+ }
+ else
+ fprintf(stdout, "\n [MySQL]");
+ fprintf(stdout, "[%d] %s\n",
+ mysql_errno(this->GetDbHandel()),
+ mysql_error(this->GetDbHandel()));
+ }
+ else if (msg)
+ fprintf(stderr, " [MySQL] %s\n", msg);
+}
+
+void dbutil::PrintStError(MYSQL_STMT *stmt, const char *msg)
+{
+ if (stmt && mysql_stmt_errno(stmt))
+ {
+ if (this->GetDbHandel()
+ && this->GetDbHandel()->server_version)
+ fprintf(stdout, "\n [MySQL-%s]",
+ this->GetDbHandel()->server_version);
+ else
+ fprintf(stdout, "\n [MySQL]");
+
+ fprintf(stdout, "[%d] %s\n", mysql_stmt_errno(stmt),
+ mysql_stmt_error(stmt));
+ }
+ else if (msg)
+ fprintf(stderr, " [MySQL] %s\n", msg);
+}
+/////////////////////////////////////////////////////
+int dbutil::Select_DB()
+{
+ return mysql_select_db(this->GetDbHandel(),
+ this->GetDbName());
+}
+////////////////////////////////////////////////////
+int dbutil::Do_Query(char * stm)
+{
+ return mysql_query(this->GetDbHandel(), stm);
+}
+////////////////////////////////////////////////////
+const char * dbutil::GetError()
+{
+ return mysql_error(this->GetDbHandel());
+}
+////////////////////////////////////////////////////
+int dbutil::GetErrorNumber()
+{
+ return mysql_errno(this->GetDbHandel());
+}
+////////////////////////////////////////////////////
+unsigned long dbutil::SelectCountTable(const char * table)
+{
+ unsigned long count = 0;
+ MYSQL_RES *result;
+ char query[1024];
+ MYSQL_ROW row;
+
+ sprintf(query,"select count(*) from `%s`", table);
+ if (mysql_query(this->GetDbHandel(),query) || !(result=mysql_store_result(this->GetDbHandel())))
+ {
+ printf("error\n");
+ return 1;
+ }
+ row= mysql_fetch_row(result);
+ count= (ulong) strtoull(row[0], (char**) 0, 10);
+ mysql_free_result(result);
+
+ return count;
+}
+void dbutil::Die(const char *file, int line, const char *expr){
+ fprintf(stderr, "%s:%d: check failed: '%s'\n", file, line, expr);
+ abort();
+}
+
+