summaryrefslogtreecommitdiff
path: root/storage
diff options
context:
space:
mode:
authormsvensson@neptunus.(none) <>2005-09-15 14:19:56 +0200
committermsvensson@neptunus.(none) <>2005-09-15 14:19:56 +0200
commit441c70cd9c4fc36637bab9f0c0753241f39512f8 (patch)
tree7f34954cdbe9ac7c4ba1d51dbb4d5b0b9e446077 /storage
parent10eee793638b981cc66921a6c39ecd6e79001836 (diff)
parente7e414cded22969d44d504ff56f7fe2a01230cd1 (diff)
downloadmariadb-git-441c70cd9c4fc36637bab9f0c0753241f39512f8.tar.gz
Merge bk-internal.mysql.com:/home/bk/mysql-5.1
into neptunus.(none):/home/msvensson/mysql/mysql-5.1
Diffstat (limited to 'storage')
-rw-r--r--storage/ndb/include/debugger/EventLogger.hpp1
-rw-r--r--storage/ndb/include/kernel/GlobalSignalNumbers.h152
-rw-r--r--storage/ndb/include/kernel/NodeInfo.hpp4
-rw-r--r--storage/ndb/include/kernel/signaldata/AlterTab.hpp1
-rw-r--r--storage/ndb/include/kernel/signaldata/CreateEvnt.hpp69
-rw-r--r--storage/ndb/include/kernel/signaldata/CreateTab.hpp1
-rw-r--r--storage/ndb/include/kernel/signaldata/DropTab.hpp1
-rw-r--r--storage/ndb/include/kernel/signaldata/EventReport.hpp18
-rw-r--r--storage/ndb/include/kernel/signaldata/SumaImpl.hpp424
-rw-r--r--storage/ndb/include/mgmapi/mgmapi.h1
-rw-r--r--storage/ndb/include/mgmapi/mgmapi_config_parameters.h5
-rw-r--r--storage/ndb/include/mgmapi/ndb_logevent.h16
-rw-r--r--storage/ndb/include/mgmapi/ndb_logevent.txt56
-rw-r--r--storage/ndb/include/ndbapi/Ndb.hpp29
-rw-r--r--storage/ndb/include/ndbapi/NdbDictionary.hpp41
-rw-r--r--storage/ndb/include/ndbapi/NdbError.hpp8
-rw-r--r--storage/ndb/include/ndbapi/NdbEventOperation.hpp71
-rw-r--r--storage/ndb/include/ndbapi/ndb_cluster_connection.hpp1
-rw-r--r--storage/ndb/include/ndbapi/ndberror.h5
-rw-r--r--storage/ndb/ndbapi-examples/Makefile16
-rw-r--r--storage/ndb/ndbapi-examples/mgmapi_logevent/Makefile (renamed from storage/ndb/ndbapi-examples/mgmapi_logevent_example/Makefile)0
-rw-r--r--storage/ndb/ndbapi-examples/mgmapi_logevent/mgmapi_logevent.cpp (renamed from storage/ndb/ndbapi-examples/mgmapi_logevent_example/mgmapi_logevent.cpp)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_async/Makefile (renamed from storage/ndb/ndbapi-examples/ndbapi_async_example/Makefile)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_async/ndbapi_async.cpp (renamed from storage/ndb/ndbapi-examples/ndbapi_async_example/ndbapi_async.cpp)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_async/readme.txt (renamed from storage/ndb/ndbapi-examples/ndbapi_async_example/readme.txt)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_async1/Makefile (renamed from storage/ndb/ndbapi-examples/ndbapi_async_example1/Makefile)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_async1/ndbapi_async1.cpp (renamed from storage/ndb/ndbapi-examples/ndbapi_async_example1/ndbapi_async1.cpp)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_event/Makefile (renamed from storage/ndb/ndbapi-examples/ndbapi_event_example/Makefile)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_event/ndbapi_event.cpp (renamed from storage/ndb/ndbapi-examples/ndbapi_event_example/ndbapi_event.cpp)11
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_retries/Makefile (renamed from storage/ndb/ndbapi-examples/ndbapi_retries_example/Makefile)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_retries/ndbapi_retries.cpp (renamed from storage/ndb/ndbapi-examples/ndbapi_retries_example/ndbapi_retries.cpp)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_scan/Makefile (renamed from storage/ndb/ndbapi-examples/ndbapi_scan_example/Makefile)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_scan/ndbapi_scan.cpp (renamed from storage/ndb/ndbapi-examples/ndbapi_scan_example/ndbapi_scan.cpp)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_scan/readme.txt (renamed from storage/ndb/ndbapi-examples/ndbapi_scan_example/readme.txt)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_simple/Makefile (renamed from storage/ndb/ndbapi-examples/ndbapi_simple_example/Makefile)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_simple/ndbapi_simple.cpp (renamed from storage/ndb/ndbapi-examples/ndbapi_simple_example/ndbapi_simple.cpp)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_simple_index/Makefile (renamed from storage/ndb/ndbapi-examples/ndbapi_simple_index_example/Makefile)0
-rw-r--r--storage/ndb/ndbapi-examples/ndbapi_simple_index/ndbapi_simple_index.cpp (renamed from storage/ndb/ndbapi-examples/ndbapi_simple_index_example/ndbapi_simple_index.cpp)0
-rw-r--r--storage/ndb/src/common/debugger/EventLogger.cpp41
-rw-r--r--storage/ndb/src/common/debugger/Makefile.am2
-rw-r--r--storage/ndb/src/common/debugger/signaldata/SignalNames.cpp30
-rw-r--r--storage/ndb/src/common/debugger/signaldata/SumaImpl.cpp52
-rw-r--r--storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp15
-rw-r--r--storage/ndb/src/kernel/SimBlockList.cpp5
-rw-r--r--storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp14
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp464
-rw-r--r--storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp18
-rw-r--r--storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp1
-rw-r--r--storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp5
-rw-r--r--storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp20
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp4
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp2
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp1
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp40
-rw-r--r--storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp1
-rw-r--r--storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp1
-rw-r--r--storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp25
-rw-r--r--storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp18
-rw-r--r--storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp6
-rw-r--r--storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp17
-rw-r--r--storage/ndb/src/kernel/blocks/suma/Suma.cpp5134
-rw-r--r--storage/ndb/src/kernel/blocks/suma/Suma.hpp541
-rw-r--r--storage/ndb/src/kernel/blocks/suma/SumaInit.cpp204
-rw-r--r--storage/ndb/src/kernel/blocks/trix/Trix.cpp134
-rw-r--r--storage/ndb/src/kernel/blocks/trix/Trix.hpp2
-rw-r--r--storage/ndb/src/kernel/vm/Configuration.cpp4
-rw-r--r--storage/ndb/src/kernel/vm/SimulatedBlock.cpp12
-rw-r--r--storage/ndb/src/kernel/vm/SimulatedBlock.hpp4
-rw-r--r--storage/ndb/src/mgmapi/mgmapi.cpp2
-rw-r--r--storage/ndb/src/mgmapi/ndb_logevent.cpp8
-rw-r--r--storage/ndb/src/mgmclient/CommandInterpreter.cpp295
-rw-r--r--storage/ndb/src/mgmsrv/MgmtSrvr.cpp34
-rw-r--r--storage/ndb/src/mgmsrv/MgmtSrvr.hpp7
-rw-r--r--storage/ndb/src/mgmsrv/Services.cpp30
-rw-r--r--storage/ndb/src/mgmsrv/Services.hpp2
-rw-r--r--storage/ndb/src/ndbapi/ClusterMgr.cpp28
-rw-r--r--storage/ndb/src/ndbapi/ClusterMgr.hpp6
-rw-r--r--storage/ndb/src/ndbapi/Ndb.cpp108
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionary.cpp11
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp416
-rw-r--r--storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp42
-rw-r--r--storage/ndb/src/ndbapi/NdbEventOperation.cpp93
-rw-r--r--storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp1783
-rw-r--r--storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp384
-rw-r--r--storage/ndb/src/ndbapi/NdbImpl.hpp14
-rw-r--r--storage/ndb/src/ndbapi/NdbWaiter.hpp4
-rw-r--r--storage/ndb/src/ndbapi/Ndberr.cpp7
-rw-r--r--storage/ndb/src/ndbapi/Ndbif.cpp77
-rw-r--r--storage/ndb/src/ndbapi/Ndbinit.cpp33
-rw-r--r--storage/ndb/src/ndbapi/ObjectMap.hpp13
-rw-r--r--storage/ndb/src/ndbapi/TransporterFacade.cpp43
-rw-r--r--storage/ndb/src/ndbapi/TransporterFacade.hpp12
-rw-r--r--storage/ndb/src/ndbapi/ndb_cluster_connection.cpp46
-rw-r--r--storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp1
-rw-r--r--storage/ndb/src/ndbapi/ndberror.c711
-rw-r--r--storage/ndb/test/include/HugoOperations.hpp2
-rw-r--r--storage/ndb/test/include/HugoTransactions.hpp7
-rw-r--r--storage/ndb/test/include/NDBT_Table.hpp7
-rw-r--r--storage/ndb/test/include/NDBT_Test.hpp13
-rw-r--r--storage/ndb/test/ndbapi/Makefile.am2
-rw-r--r--storage/ndb/test/ndbapi/bank/Bank.cpp4
-rw-r--r--storage/ndb/test/ndbapi/bank/Bank.hpp2
-rw-r--r--storage/ndb/test/ndbapi/bank/bankCreator.cpp4
-rw-r--r--storage/ndb/test/ndbapi/bank/bankMakeGL.cpp6
-rw-r--r--storage/ndb/test/ndbapi/bank/bankSumAccounts.cpp6
-rw-r--r--storage/ndb/test/ndbapi/bank/bankTimer.cpp4
-rw-r--r--storage/ndb/test/ndbapi/bank/bankTransactionMaker.cpp4
-rw-r--r--storage/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp6
-rw-r--r--storage/ndb/test/ndbapi/bank/testBank.cpp18
-rw-r--r--storage/ndb/test/ndbapi/bench/asyncGenerator.cpp9
-rw-r--r--storage/ndb/test/ndbapi/bench/ndb_schema.hpp2
-rw-r--r--storage/ndb/test/ndbapi/test_event.cpp1253
-rw-r--r--storage/ndb/test/ndbapi/test_event_multi_table.cpp398
-rw-r--r--storage/ndb/test/run-test/Makefile.am5
-rw-r--r--storage/ndb/test/run-test/conf-daily-basic-ndb08.txt19
-rw-r--r--storage/ndb/test/run-test/conf-daily-devel-ndb08.txt19
-rw-r--r--storage/ndb/test/run-test/conf-daily-sql-ndb08.txt20
-rw-r--r--storage/ndb/test/run-test/daily-devel-tests.txt14
-rw-r--r--storage/ndb/test/sql/BANK.sql44
-rw-r--r--storage/ndb/test/sql/T1.sql9
-rw-r--r--storage/ndb/test/src/HugoOperations.cpp4
-rw-r--r--storage/ndb/test/src/HugoTransactions.cpp322
-rw-r--r--storage/ndb/test/src/NDBT_ResultRow.cpp3
-rw-r--r--storage/ndb/test/src/NDBT_Tables.cpp2
-rw-r--r--storage/ndb/test/src/NDBT_Test.cpp238
-rw-r--r--storage/ndb/test/src/UtilTransactions.cpp21
-rw-r--r--storage/ndb/test/tools/Makefile.am3
-rw-r--r--storage/ndb/test/tools/listen.cpp169
128 files changed, 8271 insertions, 6326 deletions
diff --git a/storage/ndb/include/debugger/EventLogger.hpp b/storage/ndb/include/debugger/EventLogger.hpp
index 6308cf25465..6d09be70fe0 100644
--- a/storage/ndb/include/debugger/EventLogger.hpp
+++ b/storage/ndb/include/debugger/EventLogger.hpp
@@ -19,7 +19,6 @@
#include <logger/Logger.hpp>
#include <logger/FileLogHandler.hpp>
-#include "GrepError.hpp"
#include <kernel/kernel_types.h>
#include <kernel/LogLevel.hpp>
#include <kernel/signaldata/EventReport.hpp>
diff --git a/storage/ndb/include/kernel/GlobalSignalNumbers.h b/storage/ndb/include/kernel/GlobalSignalNumbers.h
index adaa33b09d8..141e7392250 100644
--- a/storage/ndb/include/kernel/GlobalSignalNumbers.h
+++ b/storage/ndb/include/kernel/GlobalSignalNumbers.h
@@ -169,6 +169,7 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
#define GSN_ADD_FRAGREF 110
#define GSN_ADD_FRAGREQ 111
+#define GSN_API_START_REP 120
#define GSN_API_FAILCONF 113
#define GSN_API_FAILREQ 114
#define GSN_CNTR_START_REQ 115
@@ -176,7 +177,7 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
#define GSN_CNTR_START_REF 117
#define GSN_CNTR_START_CONF 118
#define GSN_CNTR_START_REP 119
-/* 120 unused */
+/* 120 not unused */
/* 121 unused */
/* 122 unused */
/* 123 unused */
@@ -592,6 +593,7 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
#define GSN_BLOCK_COMMIT_ORD 485
#define GSN_UNBLOCK_COMMIT_ORD 486
+#define GSN_NODE_START_REP 502
#define GSN_NODE_STATE_REP 487
#define GSN_CHANGE_NODE_STATE_REQ 488
#define GSN_CHANGE_NODE_STATE_CONF 489
@@ -612,7 +614,7 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
#define GSN_WAIT_GCP_REF 500
#define GSN_WAIT_GCP_CONF 501
-/* 502 not used */
+/* 502 used */
/**
* Trigger and index signals
@@ -774,66 +776,59 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
#define GSN_ALTER_INDX_REF 604
#define GSN_ALTER_INDX_CONF 605
-/**
- * Grep signals
- */
#define GSN_ALTER_TABLE_REP 606
#define GSN_API_BROADCAST_REP 607
-#define GSN_GREP_SUB_CREATE_CONF 608
-#define GSN_GREP_CREATE_REQ 609
-#define GSN_GREP_CREATE_REF 610
-#define GSN_GREP_CREATE_CONF 611
-
-#define GSN_GREP_SUB_START_REQ 612
-#define GSN_GREP_SUB_START_REF 613
-#define GSN_GREP_SUB_START_CONF 614
-#define GSN_GREP_START_REQ 615
-#define GSN_GREP_START_REF 616
-#define GSN_GREP_START_CONF 617
-
-#define GSN_GREP_SUB_SYNC_REQ 618
-#define GSN_GREP_SUB_SYNC_REF 619
-#define GSN_GREP_SUB_SYNC_CONF 620
-#define GSN_GREP_SYNC_REQ 621
-#define GSN_GREP_SYNC_REF 622
-#define GSN_GREP_SYNC_CONF 623
-
-/**
- * REP signals
- */
-#define GSN_REP_WAITGCP_REQ 627
-#define GSN_REP_WAITGCP_REF 628
-#define GSN_REP_WAITGCP_CONF 629
-#define GSN_GREP_WAITGCP_REQ 630
-#define GSN_GREP_WAITGCP_REF 631
-#define GSN_GREP_WAITGCP_CONF 632
-#define GSN_REP_GET_GCI_REQ 633
-#define GSN_REP_GET_GCI_REF 634
-#define GSN_REP_GET_GCI_CONF 635
-#define GSN_REP_GET_GCIBUFFER_REQ 636
-#define GSN_REP_GET_GCIBUFFER_REF 637
-#define GSN_REP_GET_GCIBUFFER_CONF 638
-#define GSN_REP_INSERT_GCIBUFFER_REQ 639
-#define GSN_REP_INSERT_GCIBUFFER_REF 640
-#define GSN_REP_INSERT_GCIBUFFER_CONF 641
-#define GSN_REP_CLEAR_PS_GCIBUFFER_REQ 642
-#define GSN_REP_CLEAR_PS_GCIBUFFER_REF 643
-#define GSN_REP_CLEAR_PS_GCIBUFFER_CONF 644
-#define GSN_REP_CLEAR_SS_GCIBUFFER_REQ 645
-#define GSN_REP_CLEAR_SS_GCIBUFFER_REF 646
-#define GSN_REP_CLEAR_SS_GCIBUFFER_CONF 647
-#define GSN_REP_DATA_PAGE 648
-#define GSN_REP_GCIBUFFER_ACC_REP 649
-
-#define GSN_GREP_SUB_REMOVE_REQ 650
-#define GSN_GREP_SUB_REMOVE_REF 651
-#define GSN_GREP_SUB_REMOVE_CONF 652
-#define GSN_GREP_REMOVE_REQ 653
-#define GSN_GREP_REMOVE_REF 654
-#define GSN_GREP_REMOVE_CONF 655
-
-/* Start Global Replication */
-#define GSN_GREP_REQ 656
+#define GSN_608
+#define GSN_609
+#define GSN_610
+#define GSN_611
+
+#define GSN_612
+#define GSN_613
+#define GSN_614
+#define GSN_615
+#define GSN_616
+#define GSN_617
+
+#define GSN_618
+#define GSN_619
+#define GSN_620
+#define GSN_621
+#define GSN_622
+#define GSN_623
+
+#define GSN_627
+#define GSN_628
+#define GSN_629
+#define GSN_630
+#define GSN_631
+#define GSN_632
+#define GSN_633
+#define GSN_634
+#define GSN_635
+#define GSN_636
+#define GSN_637
+#define GSN_638
+#define GSN_639
+#define GSN_640
+#define GSN_641
+#define GSN_642
+#define GSN_643
+#define GSN_644
+#define GSN_645
+#define GSN_646
+#define GSN_647
+#define GSN_648
+#define GSN_649
+
+#define GSN_650
+#define GSN_651
+#define GSN_652
+#define GSN_653
+#define GSN_654
+#define GSN_655
+
+#define GSN_656
/**
* Management server
@@ -864,13 +859,12 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
#define GSN_CREATE_SUBID_REF 662
#define GSN_CREATE_SUBID_CONF 663
-/* GREP */
-#define GSN_GREP_CREATE_SUBID_REQ 664
-#define GSN_GREP_CREATE_SUBID_REF 665
-#define GSN_GREP_CREATE_SUBID_CONF 666
-#define GSN_REP_DROP_TABLE_REQ 667
-#define GSN_REP_DROP_TABLE_REF 668
-#define GSN_REP_DROP_TABLE_CONF 669
+#define GSN_664
+#define GSN_665
+#define GSN_666
+#define GSN_667
+#define GSN_668
+#define GSN_669
/*
* TUX
@@ -911,27 +905,27 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES;
/**
* SUMA restart protocol
*/
-#define GSN_SUMA_START_ME 691
+#define GSN_SUMA_START_ME_REQ 691
+#define GSN_SUMA_START_ME_REF 694
+#define GSN_SUMA_START_ME_CONF 695
#define GSN_SUMA_HANDOVER_REQ 692
+#define GSN_SUMA_HANDOVER_REF 696
#define GSN_SUMA_HANDOVER_CONF 693
-/* not used 694 */
-/* not used 695 */
-/* not used 696 */
+/* used 694 */
+/* used 695 */
+/* used 696 */
-/**
- * GREP restart protocol
- */
-#define GSN_GREP_START_ME 706
-#define GSN_GREP_ADD_SUB_REQ 707
-#define GSN_GREP_ADD_SUB_REF 708
-#define GSN_GREP_ADD_SUB_CONF 709
+#define GSN_706
+#define GSN_707
+#define GSN_708
+#define GSN_709
/*
* EVENT Signals
*/
-#define GSN_SUB_GCP_COMPLETE_ACC 699
+#define GSN_SUB_GCP_COMPLETE_ACK 699
#define GSN_CREATE_EVNT_REQ 700
#define GSN_CREATE_EVNT_CONF 701
diff --git a/storage/ndb/include/kernel/NodeInfo.hpp b/storage/ndb/include/kernel/NodeInfo.hpp
index 622185323a3..fffd94b5258 100644
--- a/storage/ndb/include/kernel/NodeInfo.hpp
+++ b/storage/ndb/include/kernel/NodeInfo.hpp
@@ -31,7 +31,6 @@ public:
DB = NODE_TYPE_DB, ///< Database node
API = NODE_TYPE_API, ///< NDB API node
MGM = NODE_TYPE_MGM, ///< Management node (incl. NDB API)
- REP = NODE_TYPE_REP, ///< Replication node (incl. NDB API)
INVALID = 255 ///< Invalid type
};
NodeType getType() const;
@@ -76,9 +75,6 @@ operator<<(NdbOut& ndbout, const NodeInfo & info){
case NodeInfo::MGM:
ndbout << "MGM";
break;
- case NodeInfo::REP:
- ndbout << "REP";
- break;
case NodeInfo::INVALID:
ndbout << "INVALID";
break;
diff --git a/storage/ndb/include/kernel/signaldata/AlterTab.hpp b/storage/ndb/include/kernel/signaldata/AlterTab.hpp
index 02d4eb95d2e..506ba94031a 100644
--- a/storage/ndb/include/kernel/signaldata/AlterTab.hpp
+++ b/storage/ndb/include/kernel/signaldata/AlterTab.hpp
@@ -103,6 +103,7 @@ class AlterTabConf {
friend class Dbtc;
friend class Dblqh;
friend class Dbtup;
+ friend class Suma;
/**
* For printing
diff --git a/storage/ndb/include/kernel/signaldata/CreateEvnt.hpp b/storage/ndb/include/kernel/signaldata/CreateEvnt.hpp
index 8712ce8890c..a0109506af1 100644
--- a/storage/ndb/include/kernel/signaldata/CreateEvnt.hpp
+++ b/storage/ndb/include/kernel/signaldata/CreateEvnt.hpp
@@ -97,6 +97,13 @@ public:
enum ErrorCode {
NoError = 0,
Undefined = 1,
+ NF_FakeErrorREF = 11,
+ Busy = 701
+ };
+#if 0
+ enum ErrorCode {
+ NoError = 0,
+ Undefined = 1,
UndefinedTCError = 2,
NF_FakeErrorREF = 11,
Busy = 701,
@@ -115,7 +122,7 @@ public:
InvalidAttributeOrder = 4255,
Temporary = 0x1 << 16
};
-
+#endif
STATIC_CONST( SignalLength = 5 );
union { // user block reference
@@ -132,7 +139,7 @@ public:
};
Uint32 m_errorLine;
Uint32 m_errorNode;
-
+#if 0
bool isTemporary() const
{ return (errorCode & Temporary) > 0; }
@@ -141,7 +148,7 @@ public:
ErrorCode setTemporary(ErrorCode ec)
{ return (ErrorCode) (errorCode = ((Uint32) ec | (Uint32)Temporary)); }
-
+#endif
Uint32 getUserRef() const {
return m_userRef;
}
@@ -154,11 +161,11 @@ public:
void setUserData(Uint32 val) {
m_userData = val;
}
- DropEvntRef::ErrorCode getErrorCode() const {
- return (DropEvntRef::ErrorCode)m_errorCode;
+ Uint32 getErrorCode() const {
+ return m_errorCode;
}
- void setErrorCode(DropEvntRef::ErrorCode val) {
- m_errorCode = (Uint32)val;
+ void setErrorCode(Uint32 val) {
+ m_errorCode = val;
}
Uint32 getErrorLine() const {
return m_errorLine;
@@ -193,8 +200,8 @@ struct CreateEvntReq {
// RT_TC = 5 << 8
};
STATIC_CONST( SignalLengthGet = 3 );
- STATIC_CONST( SignalLengthCreate = 5+MAXNROFATTRIBUTESINWORDS );
- STATIC_CONST( SignalLength = 7+MAXNROFATTRIBUTESINWORDS );
+ STATIC_CONST( SignalLengthCreate = 6+MAXNROFATTRIBUTESINWORDS );
+ STATIC_CONST( SignalLength = 8+MAXNROFATTRIBUTESINWORDS );
// SECTION( ATTRIBUTE_LIST_SECTION = 0 );
SECTION( EVENT_NAME_SECTION = 0 );
@@ -208,6 +215,7 @@ struct CreateEvntReq {
};
Uint32 m_requestInfo;
Uint32 m_tableId; // table to event
+ Uint32 m_tableVersion; // table version
AttributeMask::Data m_attrListBitmask;
Uint32 m_eventType; // from DictTabInfo::TableType
Uint32 m_eventId; // event table id set by DICT/SUMA
@@ -245,6 +253,12 @@ struct CreateEvntReq {
void setTableId(Uint32 val) {
m_tableId = val;
}
+ Uint32 getTableVersion() const {
+ return m_tableVersion;
+ }
+ void setTableVersion(Uint32 val) {
+ m_tableVersion = val;
+ }
AttributeMask getAttrListBitmask() const {
AttributeMask tmp;
tmp.assign(m_attrListBitmask);
@@ -281,7 +295,7 @@ class CreateEvntConf {
public:
// STATIC_CONST( InternalLength = 3 );
- STATIC_CONST( SignalLength = 7+MAXNROFATTRIBUTESINWORDS );
+ STATIC_CONST( SignalLength = 8+MAXNROFATTRIBUTESINWORDS );
union {
Uint32 m_userRef; // user block reference
@@ -293,6 +307,7 @@ public:
};
Uint32 m_requestInfo;
Uint32 m_tableId;
+ Uint32 m_tableVersion; // table version
AttributeMask m_attrListBitmask;
Uint32 m_eventType;
Uint32 m_eventId;
@@ -322,6 +337,12 @@ public:
void setTableId(Uint32 val) {
m_tableId = val;
}
+ Uint32 getTableVersion() const {
+ return m_tableVersion;
+ }
+ void setTableVersion(Uint32 val) {
+ m_tableVersion = val;
+ }
AttributeMask getAttrListBitmask() const {
return m_attrListBitmask;
}
@@ -355,7 +376,14 @@ struct CreateEvntRef {
friend class SafeCounter;
friend bool printCREATE_EVNT_REF(FILE*, const Uint32*, Uint32, Uint16);
- STATIC_CONST( SignalLength = 10 );
+ STATIC_CONST( SignalLength = 11 );
+ enum ErrorCode {
+ NoError = 0,
+ Undefined = 1,
+ NF_FakeErrorREF = 11,
+ Busy = 701
+ };
+#if 0
enum ErrorCode {
NoError = 0,
Undefined = 1,
@@ -383,6 +411,7 @@ struct CreateEvntRef {
void setTemporary();
ErrorCode setTemporary(ErrorCode ec);
static ErrorCode makeTemporary(ErrorCode ec);
+#endif
union {
Uint32 m_userRef; // user block reference
@@ -395,6 +424,7 @@ struct CreateEvntRef {
Uint32 m_requestInfo;
Uint32 m_tableId;
+ Uint32 m_tableVersion; // table version
Uint32 m_eventType;
Uint32 m_eventId;
Uint32 m_eventKey;
@@ -434,6 +464,12 @@ struct CreateEvntRef {
void setTableId(Uint32 val) {
m_tableId = val;
}
+ Uint32 getTableVersion() const {
+ return m_tableVersion;
+ }
+ void setTableVersion(Uint32 val) {
+ m_tableVersion = val;
+ }
Uint32 getEventType() const {
return m_eventType;
@@ -454,11 +490,11 @@ struct CreateEvntRef {
m_eventKey = val;
}
- CreateEvntRef::ErrorCode getErrorCode() const {
- return (CreateEvntRef::ErrorCode)errorCode;
+ Uint32 getErrorCode() const {
+ return errorCode;
}
- void setErrorCode(CreateEvntRef::ErrorCode val) {
- errorCode = (Uint32)val;
+ void setErrorCode(Uint32 val) {
+ errorCode = val;
}
Uint32 getErrorLine() const {
return m_errorLine;
@@ -473,6 +509,7 @@ struct CreateEvntRef {
m_errorNode = val;
}
};
+#if 0
inline bool CreateEvntRef::isTemporary() const
{ return (errorCode & CreateEvntRef::Temporary) > 0; }
inline void CreateEvntRef::setTemporary()
@@ -483,5 +520,5 @@ inline CreateEvntRef::ErrorCode CreateEvntRef::setTemporary(ErrorCode ec)
inline CreateEvntRef::ErrorCode CreateEvntRef::makeTemporary(ErrorCode ec)
{ return (CreateEvntRef::ErrorCode)
( (Uint32) ec | (Uint32)CreateEvntRef::Temporary ); }
-
+#endif
#endif
diff --git a/storage/ndb/include/kernel/signaldata/CreateTab.hpp b/storage/ndb/include/kernel/signaldata/CreateTab.hpp
index b2ef52a6bf7..9095be2ea23 100644
--- a/storage/ndb/include/kernel/signaldata/CreateTab.hpp
+++ b/storage/ndb/include/kernel/signaldata/CreateTab.hpp
@@ -91,6 +91,7 @@ class CreateTabConf {
* Sender(s) / Reciver(s)
*/
friend class Dbdict;
+ friend class Suma;
/**
* For printing
diff --git a/storage/ndb/include/kernel/signaldata/DropTab.hpp b/storage/ndb/include/kernel/signaldata/DropTab.hpp
index dd3946d8cc0..7e200c9d5db 100644
--- a/storage/ndb/include/kernel/signaldata/DropTab.hpp
+++ b/storage/ndb/include/kernel/signaldata/DropTab.hpp
@@ -61,6 +61,7 @@ class DropTabConf {
friend class Dbtup;
friend class Dbtux;
friend class Dbdih;
+ friend class Suma;
/**
* Receiver(s)
diff --git a/storage/ndb/include/kernel/signaldata/EventReport.hpp b/storage/ndb/include/kernel/signaldata/EventReport.hpp
index 9822a0539cf..e1cdbcfd753 100644
--- a/storage/ndb/include/kernel/signaldata/EventReport.hpp
+++ b/storage/ndb/include/kernel/signaldata/EventReport.hpp
@@ -68,6 +68,8 @@ public:
4) Add SentHeartbeat in EventLogger::getText()
*/
+ void setNodeId(Uint32 nodeId);
+ Uint32 getNodeId() const;
void setEventType(Ndb_logevent_type type);
Ndb_logevent_type getEventType() const;
UintR eventType; // DATA 0
@@ -75,14 +77,26 @@ public:
inline
void
+EventReport::setNodeId(Uint32 nodeId){
+ eventType = (nodeId << 16) | (eventType & 0xFFFF);
+}
+
+inline
+Uint32
+EventReport::getNodeId() const {
+ return eventType >> 16;
+}
+
+inline
+void
EventReport::setEventType(Ndb_logevent_type type){
- eventType = (UintR) type;
+ eventType = (eventType & 0xFFFF0000) | (((UintR) type) & 0xFFFF);
}
inline
Ndb_logevent_type
EventReport::getEventType() const {
- return (Ndb_logevent_type)eventType;
+ return (Ndb_logevent_type)(eventType & 0xFFFF);
}
#endif
diff --git a/storage/ndb/include/kernel/signaldata/SumaImpl.hpp b/storage/ndb/include/kernel/signaldata/SumaImpl.hpp
index 75fb65e1ad2..930d1c7c079 100644
--- a/storage/ndb/include/kernel/signaldata/SumaImpl.hpp
+++ b/storage/ndb/include/kernel/signaldata/SumaImpl.hpp
@@ -21,16 +21,15 @@
#include <NodeBitmask.hpp>
-class SubCreateReq {
+struct SubCreateReq {
/**
* Sender(s)/Reciver(s)
*/
- friend class Grep;
- friend class SumaParticipant;
+ friend struct Grep;
+ friend struct SumaParticipant;
friend bool printSUB_CREATE_REQ(FILE *, const Uint32 *, Uint32, Uint16);
-public:
- STATIC_CONST( SignalLength = 5 );
+ STATIC_CONST( SignalLength = 6 );
enum SubscriptionType {
SingleTableScan = 1, //
@@ -43,75 +42,57 @@ public:
RestartFlag = 0x2 << 16
};
- Uint32 subscriberRef;
- Uint32 subscriberData;
+ Uint32 senderRef;
+ Uint32 senderData;
Uint32 subscriptionId;
Uint32 subscriptionKey;
Uint32 subscriptionType;
- union {
- Uint32 tableId; // Used when doing SingelTableScan
- };
- SECTION( ATTRIBUTE_LIST = 0); // Used when doing SingelTableScan
- SECTION( TABLE_LIST = 1 );
-
+ Uint32 tableId;
};
-class SubCreateRef {
+struct SubCreateRef {
/**
* Sender(s)/Reciver(s)
*/
- friend class Grep;
- friend class SumaParticipant;
+ friend struct Grep;
+ friend struct SumaParticipant;
friend bool printSUB_CREATE_REF(FILE *, const Uint32 *, Uint32, Uint16);
-public:
- STATIC_CONST( SignalLength = 6 );
-
- Uint32 subscriberRef;
- Uint32 subscriberData;
- Uint32 subscriptionId;
- Uint32 subscriptionKey;
- Uint32 subscriptionType;
- Uint32 err;
+ STATIC_CONST( SignalLength = 3 );
- SECTION( ATTRIBUTE_LIST = 0); // Used when doing SingelTableScan
- union {
- Uint32 tableId; // Used when doing SingelTableScan
- };
+ Uint32 senderRef;
+ Uint32 senderData;
+ Uint32 errorCode;
};
-class SubCreateConf {
+struct SubCreateConf {
/**
* Sender(s)/Reciver(s)
*/
- friend class Grep;
- friend class SumaParticipant;
+ friend struct Grep;
+ friend struct SumaParticipant;
friend bool printSUB_CREATE_CONF(FILE *, const Uint32 *, Uint32, Uint16);
-public:
- STATIC_CONST( SignalLength = 3 );
+ STATIC_CONST( SignalLength = 2 );
- Uint32 subscriberData;
- Uint32 subscriptionId;
- Uint32 subscriptionKey;
+ Uint32 senderRef;
+ Uint32 senderData;
};
-class SubscriptionData {
-public:
+struct SubscriptionData {
enum Part {
MetaData = 1,
TableData = 2
};
};
-class SubStartReq {
+struct SubStartReq {
/**
* Sender(s)/Reciver(s)
*/
- friend class Suma;
+ friend struct Suma;
friend bool printSUB_START_REQ(FILE *, const Uint32 *, Uint32, Uint16);
-public:
STATIC_CONST( SignalLength = 6 );
STATIC_CONST( SignalLength2 = SignalLength+1 );
@@ -124,23 +105,18 @@ public:
Uint32 subscriberRef;
};
-class SubStartRef {
+struct SubStartRef {
/**
* Sender(s)/Reciver(s)
*/
- friend class Suma;
+ friend struct Suma;
friend bool printSUB_START_REF(FILE *, const Uint32 *, Uint32, Uint16);
-public:
enum ErrorCode {
- Undefined = 0,
+ Undefined = 1,
NF_FakeErrorREF = 11,
- Busy = 701,
- Temporary = 0x1 << 16
+ Busy = 701
};
- bool isTemporary() const;
- void setTemporary();
- ErrorCode setTemporary(ErrorCode ec);
STATIC_CONST( SignalLength = 7 );
STATIC_CONST( SignalLength2 = SignalLength+1 );
@@ -151,29 +127,19 @@ public:
Uint32 subscriptionKey;
Uint32 part; // SubscriptionData::Part
Uint32 subscriberData;
- union { // do not change the order here!
- Uint32 err;
- Uint32 errorCode;
- };
+ // do not change the order here!
+ Uint32 errorCode;
// with SignalLength2
Uint32 subscriberRef;
};
-inline bool SubStartRef::isTemporary() const
-{ return (errorCode & SubStartRef::Temporary) > 0; }
-inline void SubStartRef::setTemporary()
-{ errorCode |= SubStartRef::Temporary; }
-inline SubStartRef::ErrorCode SubStartRef::setTemporary(ErrorCode ec)
-{ return (SubStartRef::ErrorCode)
- (errorCode = ((Uint32) ec | (Uint32)SubStartRef::Temporary)); }
-class SubStartConf {
+struct SubStartConf {
/**
* Sender(s)/Reciver(s)
*/
- friend class Grep;
+ friend struct Grep;
friend bool printSUB_START_CONF(FILE *, const Uint32 *, Uint32, Uint16);
-public:
STATIC_CONST( SignalLength = 7 );
STATIC_CONST( SignalLength2 = SignalLength+1 );
@@ -188,14 +154,13 @@ public:
Uint32 subscriberRef;
};
-class SubStopReq {
+struct SubStopReq {
/**
* Sender(s)/Reciver(s)
*/
- friend class Suma;
+ friend struct Suma;
friend bool printSUB_STOP_REQ(FILE *, const Uint32 *, Uint32, Uint16);
-public:
STATIC_CONST( SignalLength = 7 );
Uint32 senderRef;
Uint32 senderData;
@@ -206,23 +171,18 @@ public:
Uint32 subscriberRef;
};
-class SubStopRef {
+struct SubStopRef {
/**
* Sender(s)/Reciver(s)
*/
- friend class Suma;
+ friend struct Suma;
friend bool printSUB_STOP_REF(FILE *, const Uint32 *, Uint32, Uint16);
-public:
enum ErrorCode {
- Undefined = 0,
+ Undefined = 1,
NF_FakeErrorREF = 11,
- Busy = 701,
- Temporary = 0x1 << 16
+ Busy = 701
};
- bool isTemporary() const;
- void setTemporary();
- ErrorCode setTemporary(ErrorCode ec);
STATIC_CONST( SignalLength = 8 );
@@ -233,27 +193,16 @@ public:
Uint32 part; // SubscriptionData::Part
Uint32 subscriberData;
Uint32 subscriberRef;
- union {
- Uint32 err;
- Uint32 errorCode;
- };
+ Uint32 errorCode;
};
-inline bool SubStopRef::isTemporary() const
-{ return (errorCode & SubStopRef::Temporary) > 0; }
-inline void SubStopRef::setTemporary()
-{ errorCode |= SubStopRef::Temporary; }
-inline SubStopRef::ErrorCode SubStopRef::setTemporary(ErrorCode ec)
-{ return (SubStopRef::ErrorCode)
- (errorCode = ((Uint32) ec | (Uint32)SubStopRef::Temporary)); }
-class SubStopConf {
+struct SubStopConf {
/**
* Sender(s)/Reciver(s)
*/
- friend class Grep;
+ friend struct Grep;
friend bool printSUB_STOP_CONF(FILE *, const Uint32 *, Uint32, Uint16);
-public:
STATIC_CONST( SignalLength = 7 );
Uint32 senderRef;
@@ -265,215 +214,181 @@ public:
Uint32 subscriberRef;
};
-class SubSyncReq {
+struct SubSyncReq {
/**
* Sender(s)/Reciver(s)
*/
- friend class Suma;
- friend class Grep;
+ friend struct Suma;
+ friend struct Grep;
friend bool printSUB_SYNC_REQ(FILE *, const Uint32 *, Uint32, Uint16);
-public:
- STATIC_CONST( SignalLength = 4 );
+ STATIC_CONST( SignalLength = 5 );
-public:
+ Uint32 senderRef;
+ Uint32 senderData;
Uint32 subscriptionId;
Uint32 subscriptionKey;
- Uint32 subscriberData;
Uint32 part; // SubscriptionData::Part
+
+ SECTION( ATTRIBUTE_LIST = 0); // Used when doing SingelTableScan
+ SECTION( TABLE_LIST = 1 );
};
-class SubSyncRef {
+struct SubSyncRef {
/**
* Sender(s)/Reciver(s)
*/
- friend class Suma;
- friend class Grep;
+ friend struct Suma;
+ friend struct Grep;
friend bool printSUB_SYNC_REF(FILE *, const Uint32 *, Uint32, Uint16);
-public:
enum ErrorCode {
- Undefined = 0,
- Temporary = 0x1 << 16
+ Undefined = 1
};
- STATIC_CONST( SignalLength = 5 );
+ STATIC_CONST( SignalLength = 3 );
- Uint32 subscriptionId;
- Uint32 subscriptionKey;
- Uint32 part; // SubscriptionData::Part
- Uint32 subscriberData;
- union {
- Uint32 errorCode;
- Uint32 err;
- };
+ Uint32 senderRef;
+ Uint32 senderData;
+ Uint32 errorCode;
};
-class SubSyncConf {
+struct SubSyncConf {
/**
* Sender(s)/Reciver(s)
*/
- friend class Suma;
- friend class Grep;
+ friend struct Suma;
+ friend struct Grep;
friend bool printSUB_SYNC_CONF(FILE *, const Uint32 *, Uint32, Uint16);
-public:
- STATIC_CONST( SignalLength = 4 );
+ STATIC_CONST( SignalLength = 2 );
- Uint32 subscriptionId;
- Uint32 subscriptionKey;
- Uint32 part; // SubscriptionData::Part
- Uint32 subscriberData;
+ Uint32 senderRef;
+ Uint32 senderData;
};
-class SubMetaData {
+struct SubMetaData {
/**
* Sender(s)/Reciver(s)
*/
- friend class SumaParticipant;
- friend class Grep;
+ friend struct SumaParticipant;
+ friend struct Grep;
friend bool printSUB_META_DATA(FILE *, const Uint32 *, Uint32, Uint16);
-public:
STATIC_CONST( SignalLength = 3 );
SECTION( DICT_TAB_INFO = 0 );
Uint32 gci;
- union { // Haven't decide what to call it
- Uint32 senderData;
- Uint32 subscriberData;
- };
- union {
- Uint32 tableId;
- };
+ Uint32 senderData;
+ Uint32 tableId;
};
-class SubTableData {
+struct SubTableData {
/**
* Sender(s)/Reciver(s)
*/
- friend class SumaParticipant;
- friend class Grep;
+ friend struct SumaParticipant;
+ friend struct Grep;
friend bool printSUB_TABLE_DATA(FILE *, const Uint32 *, Uint32, Uint16);
-public:
- STATIC_CONST( SignalLength = 7 );
+ STATIC_CONST( SignalLength = 5 );
enum LogType {
SCAN = 1,
LOG = 2,
- REMOVE_FLAGS = 0xff,
- GCINOTCONSISTENT = 0x1 << 16
+ REMOVE_FLAGS = 0xff
};
- void setGCINotConsistent() { logType |= (Uint32)GCINOTCONSISTENT; };
- bool isGCIConsistent()
- { return (logType & (Uint32)GCINOTCONSISTENT) == 0 ? true : false; };
-
- union { // Haven't decide what to call it
- Uint32 senderData;
- Uint32 subscriberData;
- };
+ Uint32 senderData;
Uint32 gci;
Uint32 tableId;
- Uint32 operation;
- Uint32 noOfAttributes;
- Uint32 dataSize;
+ Uint8 operation;
+ Uint8 req_nodeid;
+ Uint8 not_used2;
+ Uint8 not_used3;
Uint32 logType;
};
-class SubSyncContinueReq {
+struct SubSyncContinueReq {
/**
* Sender(s)/Reciver(s)
*/
- friend class SumaParticipant;
- friend class Grep;
- friend class Trix;
+ friend struct SumaParticipant;
+ friend struct Grep;
+ friend struct Trix;
friend bool printSUB_SYNC_CONTINUE_REQ(FILE *, const Uint32 *, Uint32, Uint16);
-public:
STATIC_CONST( SignalLength = 2 );
- union { // Haven't decide what to call it
- Uint32 senderData;
- Uint32 subscriberData;
- };
+ Uint32 subscriberData;
Uint32 noOfRowsSent;
};
-class SubSyncContinueRef {
+struct SubSyncContinueRef {
/**
* Sender(s)/Reciver(s)
*/
- friend class SumaParticipant;
- friend class Grep;
- friend class Trix;
+ friend struct SumaParticipant;
+ friend struct Grep;
+ friend struct Trix;
friend bool printSUB_SYNC_CONTINUE_REF(FILE *, const Uint32 *, Uint32, Uint16);
-public:
STATIC_CONST( SignalLength = 2 );
Uint32 subscriptionId;
Uint32 subscriptionKey;
};
-class SubSyncContinueConf {
+struct SubSyncContinueConf {
/**
* Sender(s)/Reciver(s)
*/
- friend class SumaParticipant;
- friend class Grep;
- friend class Trix;
+ friend struct SumaParticipant;
+ friend struct Grep;
+ friend struct Trix;
friend bool printSUB_SYNC_CONTINUE_CONF(FILE *, const Uint32 *, Uint32, Uint16);
-public:
STATIC_CONST( SignalLength = 2 );
Uint32 subscriptionId;
Uint32 subscriptionKey;
};
-class SubGcpCompleteRep {
+struct SubGcpCompleteRep {
/**
* Sender(s)/Reciver(s)
*/
- friend class Dbdih;
- friend class SumaParticipant;
- friend class Grep;
- friend class Trix;
+ friend struct Dbdih;
+ friend struct SumaParticipant;
+ friend struct Grep;
+ friend struct Trix;
friend bool printSUB_GCP_COMPLETE_REP(FILE *, const Uint32 *, Uint32, Uint16);
-public:
STATIC_CONST( SignalLength = 3 );
Uint32 gci;
Uint32 senderRef;
- union { // Haven't decide what to call it
- Uint32 senderData;
- Uint32 subscriberData;
- };
+ Uint32 gcp_complete_rep_count;
};
-class SubGcpCompleteAcc {
+struct SubGcpCompleteAck {
/**
* Sender(s)/Reciver(s)
*/
-public:
STATIC_CONST( SignalLength = SubGcpCompleteRep::SignalLength );
SubGcpCompleteRep rep;
};
-class SubRemoveReq {
+struct SubRemoveReq {
/**
* Sender(s)/Reciver(s)
*/
- friend class Grep;
- friend class SumaParticipant;
+ friend struct Grep;
+ friend struct SumaParticipant;
friend bool printSUB_REMOVE_REQ(FILE *, const Uint32 *, Uint32, Uint16);
-public:
STATIC_CONST( SignalLength = 4 );
Uint32 senderRef;
@@ -482,138 +397,127 @@ public:
Uint32 subscriptionKey;
};
-class SubRemoveRef {
+struct SubRemoveRef {
/**
* Sender(s)/Reciver(s)
*/
- friend class Grep;
- friend class SumaParticipant;
+ friend struct Grep;
+ friend struct SumaParticipant;
friend bool printSUB_REMOVE_REF(FILE *, const Uint32 *, Uint32, Uint16);
-public:
STATIC_CONST( SignalLength = 5 );
enum ErrorCode {
- Undefined = 0,
+ Undefined = 1,
NF_FakeErrorREF = 11,
- Busy = 701,
- Temporary = 0x1 << 16
+ Busy = 701
};
- bool isTemporary() const;
- void setTemporary();
- ErrorCode setTemporary(ErrorCode ec);
Uint32 senderRef;
Uint32 subscriptionId;
Uint32 subscriptionKey;
- union {
- Uint32 err;
- Uint32 errorCode;
- };
- union { // Haven't decide what to call it
- Uint32 senderData;
- Uint32 subscriberData;
- };
+ Uint32 errorCode;
+ Uint32 senderData;
};
-inline bool SubRemoveRef::isTemporary() const
-{ return (err & SubRemoveRef::Temporary) > 0; }
-inline void SubRemoveRef::setTemporary()
-{ err |= SubRemoveRef::Temporary; }
-inline SubRemoveRef::ErrorCode SubRemoveRef::setTemporary(ErrorCode ec)
-{ return (SubRemoveRef::ErrorCode)
- (errorCode = ((Uint32) ec | (Uint32)SubRemoveRef::Temporary)); }
-class SubRemoveConf {
+struct SubRemoveConf {
/**
* Sender(s)/Reciver(s)
*/
- friend class Grep;
- friend class SumaParticipant;
+ friend struct Grep;
+ friend struct SumaParticipant;
friend bool printSUB_REMOVE_CONF(FILE *, const Uint32 *, Uint32, Uint16);
-public:
STATIC_CONST( SignalLength = 5 );
Uint32 senderRef;
Uint32 subscriptionId;
Uint32 subscriptionKey;
- Uint32 err;
- union { // Haven't decide what to call it
- Uint32 senderData;
- Uint32 subscriberData;
- };
-
+ Uint32 errorCode;
+ Uint32 senderData;
};
-class CreateSubscriptionIdReq {
- friend class Grep;
- friend class SumaParticipant;
+struct CreateSubscriptionIdReq {
+ friend struct Grep;
+ friend struct SumaParticipant;
friend bool printCREATE_SUBSCRIPTION_ID_REQ(FILE *, const Uint32 *,
Uint32, Uint16);
-public:
- STATIC_CONST( SignalLength = 3 );
+ STATIC_CONST( SignalLength = 2 );
- Uint32 subscriptionId;
- Uint32 subscriptionKey;
- union { // Haven't decide what to call it
- Uint32 senderData;
- Uint32 subscriberData;
- };
+ Uint32 senderRef;
+ Uint32 senderData;
};
-class CreateSubscriptionIdConf {
- friend class Grep;
- friend class SumaParticipant;
+struct CreateSubscriptionIdConf {
+ friend struct Grep;
+ friend struct SumaParticipant;
friend bool printCREATE_SUBSCRIPTION_ID_CONF(FILE *, const Uint32 *,
Uint32, Uint16);
-public:
- STATIC_CONST( SignalLength = 3 );
+ STATIC_CONST( SignalLength = 4 );
+ Uint32 senderRef;
+ Uint32 senderData;
Uint32 subscriptionId;
Uint32 subscriptionKey;
- union { // Haven't decide what to call it
- Uint32 senderData;
- Uint32 subscriberData;
- };
};
-class CreateSubscriptionIdRef {
- friend class Grep;
- friend class SumaParticipant;
+struct CreateSubscriptionIdRef {
+ friend struct Grep;
+ friend struct SumaParticipant;
friend bool printCREATE_SUBSCRIPTION_ID_REF(FILE *, const Uint32 *,
Uint32, Uint16);
-public:
- STATIC_CONST( SignalLength = 4 );
+ STATIC_CONST( SignalLength = 3 );
- Uint32 subscriptionId;
- Uint32 subscriptionKey;
- union { // Haven't decide what to call it
- Uint32 senderData;
- Uint32 subscriberData;
- };
- Uint32 err;
+ Uint32 senderRef;
+ Uint32 senderData;
+ Uint32 errorCode;
};
-class SumaStartMe {
-public:
+struct SumaStartMeReq {
STATIC_CONST( SignalLength = 1 );
Uint32 unused;
};
-class SumaHandoverReq {
-public:
+struct SumaStartMeRef {
STATIC_CONST( SignalLength = 1 );
- Uint32 gci;
+ Uint32 errorCode;
+ enum {
+ Busy = 0x1
+ };
};
-class SumaHandoverConf {
-public:
+struct SumaStartMeConf {
STATIC_CONST( SignalLength = 1 );
+ Uint32 unused;
+};
+
+struct SumaHandoverReq {
+ STATIC_CONST( SignalLength = 3 );
Uint32 gci;
+ Uint32 nodeId;
+ Uint32 theBucketMask[1];
};
+
+struct SumaHandoverConf {
+ STATIC_CONST( SignalLength = 3 );
+ Uint32 gci;
+ Uint32 nodeId;
+ Uint32 theBucketMask[1];
+};
+
+struct SumaContinueB
+{
+ enum
+ {
+ RESEND_BUCKET = 1
+ ,RELEASE_GCI = 2
+ ,OUT_OF_BUFFER_RELEASE = 3
+ };
+};
+
#endif
diff --git a/storage/ndb/include/mgmapi/mgmapi.h b/storage/ndb/include/mgmapi/mgmapi.h
index 924d65c2847..888599286e9 100644
--- a/storage/ndb/include/mgmapi/mgmapi.h
+++ b/storage/ndb/include/mgmapi/mgmapi.h
@@ -171,7 +171,6 @@ extern "C" {
= NODE_TYPE_MGM
#endif
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
- ,NDB_MGM_NODE_TYPE_REP = NODE_TYPE_REP /** A replication node */
,NDB_MGM_NODE_TYPE_MIN = 0 /** Min valid value*/
,NDB_MGM_NODE_TYPE_MAX = 3 /** Max valid value*/
#endif
diff --git a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h
index 8f95e159b38..48edc6eaaa0 100644
--- a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h
+++ b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h
@@ -4,7 +4,6 @@
#define CFG_SYS_NAME 3
#define CFG_SYS_PRIMARY_MGM_NODE 1
#define CFG_SYS_CONFIG_GENERATION 2
-#define CFG_SYS_REPLICATION_ROLE 7
#define CFG_SYS_PORT_BASE 8
#define CFG_NODE_ID 3
@@ -139,8 +138,6 @@
#define CFG_OSE_PRIO_B_SIZE 603
#define CFG_OSE_RECEIVE_ARRAY_SIZE 604
-#define CFG_REP_HEARTBEAT_INTERVAL 700
-
/**
* API Config variables
*
@@ -162,8 +159,6 @@
#define NODE_TYPE_DB 0
#define NODE_TYPE_API 1
#define NODE_TYPE_MGM 2
-#define NODE_TYPE_REP 3
-#define NODE_TYPE_EXT_REP 4
#define CONNECTION_TYPE_TCP 0
#define CONNECTION_TYPE_SHM 1
diff --git a/storage/ndb/include/mgmapi/ndb_logevent.h b/storage/ndb/include/mgmapi/ndb_logevent.h
index d5744b0fffe..f3f9d04e6d0 100644
--- a/storage/ndb/include/mgmapi/ndb_logevent.h
+++ b/storage/ndb/include/mgmapi/ndb_logevent.h
@@ -159,7 +159,11 @@ extern "C" {
/** NDB_MGM_EVENT_CATEGORY_BACKUP */
NDB_LE_BackupCompleted = 56,
/** NDB_MGM_EVENT_CATEGORY_BACKUP */
- NDB_LE_BackupAborted = 57
+ NDB_LE_BackupAborted = 57,
+
+ /** NDB_MGM_EVENT_CATEGORY_INFO */
+ NDB_LE_EventBufferStatus = 58
+
};
/**
@@ -565,6 +569,16 @@ extern "C" {
struct {
/* TODO */
} InfoEvent;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned usage;
+ unsigned alloc;
+ unsigned max;
+ unsigned apply_gci_l;
+ unsigned apply_gci_h;
+ unsigned latest_gci_l;
+ unsigned latest_gci_h;
+ } EventBufferStatus;
/** Log event data for @ref NDB_LE_BackupStarted */
struct {
diff --git a/storage/ndb/include/mgmapi/ndb_logevent.txt b/storage/ndb/include/mgmapi/ndb_logevent.txt
new file mode 100644
index 00000000000..6fe2d3f87b7
--- /dev/null
+++ b/storage/ndb/include/mgmapi/ndb_logevent.txt
@@ -0,0 +1,56 @@
+To add a new event edit the following 3 files in totally 5 places:
+
+example shows EventBufferUsage added.
+
+Public interface:
+
+*** ndb/include/mgmapi/ndb_logevent.h
+
+ /** NDB_MGM_EVENT_CATEGORY_INFO */
+ NDB_LE_EventBufferUsage = 58
+...
+
+
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned usage;
+ unsigned avail;
+ } EventBufferUsage;
+
+*** ndb/src/mgmapi/ndb_logevent.cpp
+
+ ROW( EventBufferUsage, "usage", 1, usage),
+ ROW( EventBufferUsage, "avail", 2, avail),
+
+
+
+Internal impl:
+
+*** ndb/src/common/debugger/EventLogger.cpp
+
+void getTextEventBufferUsage(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len,
+ "Event buffer usage: %d(%d\%)",
+ theData[1],
+ theData[2] ? (theData[1]*100)/theData[2] : 0);
+}
+
+...
+
+ ROW(EventBufferUsage, LogLevel::llInfo, 7, Logger::LL_INFO ),
+
+*** sending event from ndbd kernel
+
+ Uint32 *data= signal->getDataPtrSend();
+ data[0]= NDB_LE_EventBufferUsage;
+ data[1]= 0;
+ data[2]= 0;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
+
+*** sending event from ndbapi (internal)
+
+ Uint32 data[3];
+ data[0]= NDB_LE_EventBufferUsage;
+ data[1]= 0;
+ data[2]= 0;
+ m_ndb->theImpl->send_event_report(data,3);
diff --git a/storage/ndb/include/ndbapi/Ndb.hpp b/storage/ndb/include/ndbapi/Ndb.hpp
index 1c9d5ed8ad0..cae2c4653e6 100644
--- a/storage/ndb/include/ndbapi/Ndb.hpp
+++ b/storage/ndb/include/ndbapi/Ndb.hpp
@@ -1051,6 +1051,7 @@ class Ndb
friend class NdbReceiver;
friend class NdbOperation;
friend class NdbEventOperationImpl;
+ friend class NdbEventBuffer;
friend class NdbTransaction;
friend class Table;
friend class NdbApiSignal;
@@ -1060,6 +1061,7 @@ class Ndb
friend class NdbDictionaryImpl;
friend class NdbDictInterface;
friend class NdbBlob;
+ friend class NdbImpl;
#endif
public:
@@ -1206,13 +1208,10 @@ public:
*
* @param eventName
* unique identifier of the event
- * @param bufferLength
- * circular buffer size for storing event data
*
* @return Object representing an event, NULL on failure
*/
- NdbEventOperation* createEventOperation(const char* eventName,
- const int bufferLength);
+ NdbEventOperation* createEventOperation(const char* eventName);
/**
* Drop a subscription to an event
*
@@ -1230,9 +1229,24 @@ public:
* @param aMillisecondNumber
* maximum time to wait
*
- * @return the number of events that has occured, -1 on failure
+ * @return > 0 if events available, 0 if no events available, < 0 on failure
*/
- int pollEvents(int aMillisecondNumber);
+ int pollEvents(int aMillisecondNumber, Uint64 *latestGCI= 0);
+
+ /**
+ * Returns an event operation that has data after a pollEvents
+ *
+ * @return an event operations that has data, NULL if no events left with data.
+ */
+ NdbEventOperation *nextEvent();
+
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ NdbEventOperation *getEventOperation(NdbEventOperation* eventOp= 0);
+ Uint64 getLatestGCI();
+ void forceGCP();
+ void setReportThreshEventGCISlip(unsigned thresh);
+ void setReportThreshEventFreeMem(unsigned thresh);
+#endif
/** @} *********************************************************************/
@@ -1496,7 +1510,6 @@ private:
NdbIndexScanOperation* getScanOperation(); // Get a scan operation from idle
NdbIndexOperation* getIndexOperation();// Get an index operation from idle
- class NdbGlobalEventBufferHandle* getGlobalEventBufferHandle();
NdbBlob* getNdbBlob();// Get a blob handle etc
void releaseSignal(NdbApiSignal* anApiSignal);
@@ -1635,7 +1648,7 @@ private:
class NdbImpl * theImpl;
class NdbDictionaryImpl* theDictionary;
- class NdbGlobalEventBufferHandle* theGlobalEventBufferHandle;
+ class NdbEventBuffer* theEventBuffer;
NdbTransaction* theConIdleList; // First connection in idle list.
diff --git a/storage/ndb/include/ndbapi/NdbDictionary.hpp b/storage/ndb/include/ndbapi/NdbDictionary.hpp
index 00fe709677f..d820759b935 100644
--- a/storage/ndb/include/ndbapi/NdbDictionary.hpp
+++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp
@@ -737,6 +737,8 @@ public:
/** @} *******************************************************************/
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ const char *getMysqlName() const;
+
void setStoredTable(bool x) { setLogging(x); }
bool getStoredTable() const { return getLogging(); }
@@ -949,12 +951,37 @@ public:
/** TableEvent must match 1 << TriggerEvent */
#endif
enum TableEvent {
- TE_INSERT=1, ///< Insert event on table
- TE_DELETE=2, ///< Delete event on table
- TE_UPDATE=4, ///< Update event on table
- TE_ALL=7 ///< Any/all event on table (not relevant when
- ///< events are received)
+ TE_INSERT =1<<0, ///< Insert event on table
+ TE_DELETE =1<<1, ///< Delete event on table
+ TE_UPDATE =1<<2, ///< Update event on table
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ TE_SCAN =1<<3, ///< Scan event on table
+ TE_FIRST_NON_DATA_EVENT =1<<4,
+#endif
+ TE_DROP =1<<4, ///< Drop of table
+ TE_ALTER =1<<5, ///< Alter of table
+ TE_CREATE =1<<6, ///< Create of table
+ TE_GCP_COMPLETE=1<<7, ///< GCP is complete
+ TE_CLUSTER_FAILURE=1<<8, ///< Cluster is unavailable
+ TE_STOP =1<<9, ///< Stop of event operation
+ TE_ALL=0xFFFF ///< Any/all event on table (not relevant when
+ ///< events are received)
+ };
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ enum _TableEvent {
+ _TE_INSERT=0,
+ _TE_DELETE=1,
+ _TE_UPDATE=2,
+ _TE_SCAN=3,
+ _TE_FIRST_NON_DATA_EVENT=4,
+ _TE_DROP=4,
+ _TE_ALTER=5,
+ _TE_CREATE=6,
+ _TE_GCP_COMPLETE=7,
+ _TE_CLUSTER_FAILURE=8,
+ _TE_STOP=9
};
+#endif
/**
* Specifies the durability of an event
* (future version may supply other types)
@@ -1321,6 +1348,10 @@ public:
*/
void invalidateIndex(const char * indexName,
const char * tableName);
+ /**
+ * Force gcp and wait for gcp complete
+ */
+ int forceGCPWait();
#endif
/** @} *******************************************************************/
diff --git a/storage/ndb/include/ndbapi/NdbError.hpp b/storage/ndb/include/ndbapi/NdbError.hpp
index f67b3c4ccaa..da322897dc6 100644
--- a/storage/ndb/include/ndbapi/NdbError.hpp
+++ b/storage/ndb/include/ndbapi/NdbError.hpp
@@ -192,6 +192,11 @@ struct NdbError {
int code;
/**
+ * Mysql error code
+ */
+ int mysql_code;
+
+ /**
* Error message
*/
const char * message;
@@ -209,6 +214,7 @@ struct NdbError {
status = UnknownResult;
classification = NoError;
code = 0;
+ mysql_code = 0;
message = 0;
details = 0;
}
@@ -216,6 +222,7 @@ struct NdbError {
status = (NdbError::Status) ndberror.status;
classification = (NdbError::Classification) ndberror.classification;
code = ndberror.code;
+ mysql_code = ndberror.mysql_code;
message = ndberror.message;
details = ndberror.details;
}
@@ -224,6 +231,7 @@ struct NdbError {
ndberror.status = (ndberror_status_enum) status;
ndberror.classification = (ndberror_classification_enum) classification;
ndberror.code = code;
+ ndberror.mysql_code = mysql_code;
ndberror.message = message;
ndberror.details = details;
return ndberror;
diff --git a/storage/ndb/include/ndbapi/NdbEventOperation.hpp b/storage/ndb/include/ndbapi/NdbEventOperation.hpp
index 55ee96b3144..6fe3573eb6a 100644
--- a/storage/ndb/include/ndbapi/NdbEventOperation.hpp
+++ b/storage/ndb/include/ndbapi/NdbEventOperation.hpp
@@ -33,7 +33,7 @@ class NdbEventOperationImpl;
* - To listen to events, an NdbEventOperation object is instantiated by
* Ndb::createEventOperation()
* - execute() starts the event flow. Use Ndb::pollEvents() to wait
- * for an event to occur. Use next() to iterate
+ * for an event to occur. Use Ndb::nextEvent() to iterate
* through the events that have occured.
* - The instance is removed by Ndb::dropEventOperation()
*
@@ -56,9 +56,9 @@ class NdbEventOperationImpl;
* - Today all events INSERT/DELETE/UPDATE and all changed attributes are
* sent to the API, even if only specific attributes have been specified.
* These are however hidden from the user and only relevant data is shown
- * after next().
+ * after Ndb::nextEvent().
* - "False" exits from Ndb::pollEvents() may occur and thus
- * the subsequent next() will return zero,
+ * the subsequent Ndb::nextEvent() will return NULL,
* since there was no available data. Just do Ndb::pollEvents() again.
* - Event code does not check table schema version. Make sure to drop events
* after table is dropped. Will be fixed in later
@@ -86,6 +86,7 @@ public:
enum State {
EO_CREATED, ///< Created but execute() not called
EO_EXECUTING, ///< execute() called
+ EO_DROPPED, ///< Waiting to be deleted, Object unusable.
EO_ERROR ///< An error has occurred. Object unusable.
};
/**
@@ -95,18 +96,14 @@ public:
/**
* Activates the NdbEventOperation to start receiving events. The
- * changed attribute values may be retrieved after next() has returned
- * a value greater than zero. The getValue() methods must be called
+ * changed attribute values may be retrieved after Ndb::nextEvent()
+ * has returned not NULL. The getValue() methods must be called
* prior to execute().
*
* @return 0 if successful otherwise -1.
*/
int execute();
- // about the event operation
- // getting data
- // NdbResultSet* getResultSet();
-
/**
* Defines a retrieval operation of an attribute value.
* The NDB API allocate memory for the NdbRecAttr object that
@@ -129,8 +126,8 @@ public:
* the database! The NdbRecAttr object returned by this method
* is <em>not</em> readable/printable before the
* execute() has been made and
- * next() has returned a value greater than
- * zero. If a specific attribute has not changed the corresponding
+ * Ndb::nextEvent() has returned not NULL.
+ * If a specific attribute has not changed the corresponding
* NdbRecAttr will be in state UNDEFINED. This is checked by
* NdbRecAttr::isNULL() which then returns -1.
*
@@ -149,43 +146,30 @@ public:
*/
NdbRecAttr *getPreValue(const char *anAttrName, char *aValue = 0);
- /**
- * Retrieves event resultset if available, inserted into the NdbRecAttrs
- * specified in getValue() and getPreValue(). To avoid polling for
- * a resultset, one can use Ndb::pollEvents()
- * which will wait on a mutex until an event occurs or the specified
- * timeout occurs.
- *
- * @return >=0 if successful otherwise -1. Return value indicates number
- * of available events. By sending pOverRun one may query for buffer
- * overflow and *pOverRun will indicate the number of events that have
- * overwritten.
- *
- * @return number of available events, -1 on failure
- */
- int next(int *pOverRun=0);
+ int isOverrun() const;
/**
* In the current implementation a nodefailiure may cause loss of events,
* in which case isConsistent() will return false
*/
- bool isConsistent();
+ bool isConsistent() const;
/**
* Query for occured event type.
*
- * @note Only valid after next() has been called and returned value >= 0
+ * @note Only valid after Ndb::nextEvent() has been called and
+ * returned a not NULL value
*
* @return type of event
*/
- NdbDictionary::Event::TableEvent getEventType();
+ NdbDictionary::Event::TableEvent getEventType() const;
/**
* Retrieve the GCI of the latest retrieved event
*
* @return GCI number
*/
- Uint32 getGCI();
+ Uint64 getGCI() const;
/**
* Retrieve the complete GCI in the cluster (not necessarily
@@ -193,7 +177,7 @@ public:
*
* @return GCI number
*/
- Uint32 getLatestGCI();
+ Uint64 getLatestGCI() const;
/**
* Get the latest error
@@ -203,6 +187,26 @@ public:
const struct NdbError & getNdbError() const;
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ /** these are subject to change at any time */
+ const NdbDictionary::Table *getTable() const;
+ const NdbDictionary::Event *getEvent() const;
+ const NdbRecAttr *getFirstPkAttr() const;
+ const NdbRecAttr *getFirstPkPreAttr() const;
+ const NdbRecAttr *getFirstDataAttr() const;
+ const NdbRecAttr *getFirstDataPreAttr() const;
+
+ bool validateTable(NdbDictionary::Table &table) const;
+
+ void setCustomData(void * data);
+ void * getCustomData() const;
+
+ void clearError();
+ int hasError() const;
+
+ int getReqNodeId() const;
+#endif
+
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/*
*
*/
@@ -212,11 +216,10 @@ public:
private:
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
friend class NdbEventOperationImpl;
- friend class Ndb;
+ friend class NdbEventBuffer;
#endif
- NdbEventOperation(Ndb *theNdb, const char* eventName,int bufferLength);
+ NdbEventOperation(Ndb *theNdb, const char* eventName);
~NdbEventOperation();
- static int wait(void *p, int aMillisecondNumber);
class NdbEventOperationImpl &m_impl;
NdbEventOperation(NdbEventOperationImpl& impl);
};
diff --git a/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp b/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp
index 97db76563aa..bf7f14a5d44 100644
--- a/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp
+++ b/storage/ndb/include/ndbapi/ndb_cluster_connection.hpp
@@ -79,6 +79,7 @@ public:
int timeout_after_first_alive);
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ int get_no_ready();
const char *get_connectstring(char *buf, int buf_sz) const;
int get_connected_port() const;
const char *get_connected_host() const;
diff --git a/storage/ndb/include/ndbapi/ndberror.h b/storage/ndb/include/ndbapi/ndberror.h
index 2225f68f08d..0088bbdcc63 100644
--- a/storage/ndb/include/ndbapi/ndberror.h
+++ b/storage/ndb/include/ndbapi/ndberror.h
@@ -72,6 +72,11 @@ typedef struct {
int code;
/**
+ * Mysql error code
+ */
+ int mysql_code;
+
+ /**
* Error message
*/
const char * message;
diff --git a/storage/ndb/ndbapi-examples/Makefile b/storage/ndb/ndbapi-examples/Makefile
index 965dc3ec29f..8e60857dc81 100644
--- a/storage/ndb/ndbapi-examples/Makefile
+++ b/storage/ndb/ndbapi-examples/Makefile
@@ -1,11 +1,11 @@
-BIN_DIRS := ndbapi_simple_example \
- ndbapi_async_example \
- ndbapi_async_example1 \
- ndbapi_retries_example \
- ndbapi_simple_index_example \
- ndbapi_event_example \
- ndbapi_scan_example \
- mgmapi_logevent_example
+BIN_DIRS := ndbapi_simple \
+ ndbapi_async \
+ ndbapi_async1 \
+ ndbapi_retries \
+ ndbapi_simple_index \
+ ndbapi_event \
+ ndbapi_scan \
+ mgmapi_logevent
bins: $(patsubst %, _bins_%, $(BIN_DIRS))
diff --git a/storage/ndb/ndbapi-examples/mgmapi_logevent_example/Makefile b/storage/ndb/ndbapi-examples/mgmapi_logevent/Makefile
index f96989c885c..f96989c885c 100644
--- a/storage/ndb/ndbapi-examples/mgmapi_logevent_example/Makefile
+++ b/storage/ndb/ndbapi-examples/mgmapi_logevent/Makefile
diff --git a/storage/ndb/ndbapi-examples/mgmapi_logevent_example/mgmapi_logevent.cpp b/storage/ndb/ndbapi-examples/mgmapi_logevent/mgmapi_logevent.cpp
index 5ec1fba6314..5ec1fba6314 100644
--- a/storage/ndb/ndbapi-examples/mgmapi_logevent_example/mgmapi_logevent.cpp
+++ b/storage/ndb/ndbapi-examples/mgmapi_logevent/mgmapi_logevent.cpp
diff --git a/storage/ndb/ndbapi-examples/ndbapi_async_example/Makefile b/storage/ndb/ndbapi-examples/ndbapi_async/Makefile
index c5de3b06fc7..c5de3b06fc7 100644
--- a/storage/ndb/ndbapi-examples/ndbapi_async_example/Makefile
+++ b/storage/ndb/ndbapi-examples/ndbapi_async/Makefile
diff --git a/storage/ndb/ndbapi-examples/ndbapi_async_example/ndbapi_async.cpp b/storage/ndb/ndbapi-examples/ndbapi_async/ndbapi_async.cpp
index aa745f4d28d..aa745f4d28d 100644
--- a/storage/ndb/ndbapi-examples/ndbapi_async_example/ndbapi_async.cpp
+++ b/storage/ndb/ndbapi-examples/ndbapi_async/ndbapi_async.cpp
diff --git a/storage/ndb/ndbapi-examples/ndbapi_async_example/readme.txt b/storage/ndb/ndbapi-examples/ndbapi_async/readme.txt
index 47cb4bf9ffa..47cb4bf9ffa 100644
--- a/storage/ndb/ndbapi-examples/ndbapi_async_example/readme.txt
+++ b/storage/ndb/ndbapi-examples/ndbapi_async/readme.txt
diff --git a/storage/ndb/ndbapi-examples/ndbapi_async_example1/Makefile b/storage/ndb/ndbapi-examples/ndbapi_async1/Makefile
index cc6bcebb71b..cc6bcebb71b 100644
--- a/storage/ndb/ndbapi-examples/ndbapi_async_example1/Makefile
+++ b/storage/ndb/ndbapi-examples/ndbapi_async1/Makefile
diff --git a/storage/ndb/ndbapi-examples/ndbapi_async_example1/ndbapi_async1.cpp b/storage/ndb/ndbapi-examples/ndbapi_async1/ndbapi_async1.cpp
index e8bc19e267b..e8bc19e267b 100644
--- a/storage/ndb/ndbapi-examples/ndbapi_async_example1/ndbapi_async1.cpp
+++ b/storage/ndb/ndbapi-examples/ndbapi_async1/ndbapi_async1.cpp
diff --git a/storage/ndb/ndbapi-examples/ndbapi_event_example/Makefile b/storage/ndb/ndbapi-examples/ndbapi_event/Makefile
index d8f7a03aac6..d8f7a03aac6 100644
--- a/storage/ndb/ndbapi-examples/ndbapi_event_example/Makefile
+++ b/storage/ndb/ndbapi-examples/ndbapi_event/Makefile
diff --git a/storage/ndb/ndbapi-examples/ndbapi_event_example/ndbapi_event.cpp b/storage/ndb/ndbapi-examples/ndbapi_event/ndbapi_event.cpp
index 286f6fafbab..6c1ba0c067d 100644
--- a/storage/ndb/ndbapi-examples/ndbapi_event_example/ndbapi_event.cpp
+++ b/storage/ndb/ndbapi-examples/ndbapi_event/ndbapi_event.cpp
@@ -29,6 +29,7 @@
* createEventOperation()
* dropEventOperation()
* pollEvents()
+ * nextEvent()
*
* NdbDictionary
* createEvent()
@@ -43,8 +44,6 @@
* getValue()
* getPreValue()
* execute()
- * next()
- * isConsistent()
* getEventType()
*
*/
@@ -148,7 +147,7 @@ int main()
// Start "transaction" for handling events
NdbEventOperation* op;
printf("create EventOperation\n");
- if ((op = myNdb->createEventOperation(eventName,100)) == NULL)
+ if ((op = myNdb->createEventOperation(eventName)) == NULL)
APIERROR(myNdb->getNdbError());
printf("get values\n");
@@ -172,11 +171,8 @@ int main()
int r= myNdb->pollEvents(1000); // wait for event or 1000 ms
if (r > 0) {
// printf("got data! %d\n", r);
- int overrun;
- while (op->next(&overrun) > 0) {
+ while ((op= myNdb->nextEvent())) {
i++;
- if (!op->isConsistent())
- printf("A node failure has occured and events might be missing\n");
switch (op->getEventType()) {
case NdbDictionary::Event::TE_INSERT:
printf("%u INSERT: ", i);
@@ -190,7 +186,6 @@ int main()
default:
abort(); // should not happen
}
- printf("overrun %u pk %u: ", overrun, recAttr[0]->u_32_value());
for (int i = 1; i < noEventColumnName; i++) {
if (recAttr[i]->isNULL() >= 0) { // we have a value
printf(" post[%u]=", i);
diff --git a/storage/ndb/ndbapi-examples/ndbapi_retries_example/Makefile b/storage/ndb/ndbapi-examples/ndbapi_retries/Makefile
index 3dee4f77e35..3dee4f77e35 100644
--- a/storage/ndb/ndbapi-examples/ndbapi_retries_example/Makefile
+++ b/storage/ndb/ndbapi-examples/ndbapi_retries/Makefile
diff --git a/storage/ndb/ndbapi-examples/ndbapi_retries_example/ndbapi_retries.cpp b/storage/ndb/ndbapi-examples/ndbapi_retries/ndbapi_retries.cpp
index 8c29fe31446..8c29fe31446 100644
--- a/storage/ndb/ndbapi-examples/ndbapi_retries_example/ndbapi_retries.cpp
+++ b/storage/ndb/ndbapi-examples/ndbapi_retries/ndbapi_retries.cpp
diff --git a/storage/ndb/ndbapi-examples/ndbapi_scan_example/Makefile b/storage/ndb/ndbapi-examples/ndbapi_scan/Makefile
index e3a7d9c97b0..e3a7d9c97b0 100644
--- a/storage/ndb/ndbapi-examples/ndbapi_scan_example/Makefile
+++ b/storage/ndb/ndbapi-examples/ndbapi_scan/Makefile
diff --git a/storage/ndb/ndbapi-examples/ndbapi_scan_example/ndbapi_scan.cpp b/storage/ndb/ndbapi-examples/ndbapi_scan/ndbapi_scan.cpp
index 69ffd99b8ca..69ffd99b8ca 100644
--- a/storage/ndb/ndbapi-examples/ndbapi_scan_example/ndbapi_scan.cpp
+++ b/storage/ndb/ndbapi-examples/ndbapi_scan/ndbapi_scan.cpp
diff --git a/storage/ndb/ndbapi-examples/ndbapi_scan_example/readme.txt b/storage/ndb/ndbapi-examples/ndbapi_scan/readme.txt
index 47cb4bf9ffa..47cb4bf9ffa 100644
--- a/storage/ndb/ndbapi-examples/ndbapi_scan_example/readme.txt
+++ b/storage/ndb/ndbapi-examples/ndbapi_scan/readme.txt
diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_example/Makefile b/storage/ndb/ndbapi-examples/ndbapi_simple/Makefile
index b792c4c4a47..b792c4c4a47 100644
--- a/storage/ndb/ndbapi-examples/ndbapi_simple_example/Makefile
+++ b/storage/ndb/ndbapi-examples/ndbapi_simple/Makefile
diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_example/ndbapi_simple.cpp b/storage/ndb/ndbapi-examples/ndbapi_simple/ndbapi_simple.cpp
index 152d4fa44af..152d4fa44af 100644
--- a/storage/ndb/ndbapi-examples/ndbapi_simple_example/ndbapi_simple.cpp
+++ b/storage/ndb/ndbapi-examples/ndbapi_simple/ndbapi_simple.cpp
diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_index_example/Makefile b/storage/ndb/ndbapi-examples/ndbapi_simple_index/Makefile
index 3b3ac7f484a..3b3ac7f484a 100644
--- a/storage/ndb/ndbapi-examples/ndbapi_simple_index_example/Makefile
+++ b/storage/ndb/ndbapi-examples/ndbapi_simple_index/Makefile
diff --git a/storage/ndb/ndbapi-examples/ndbapi_simple_index_example/ndbapi_simple_index.cpp b/storage/ndb/ndbapi-examples/ndbapi_simple_index/ndbapi_simple_index.cpp
index 5afaf6078d1..5afaf6078d1 100644
--- a/storage/ndb/ndbapi-examples/ndbapi_simple_index_example/ndbapi_simple_index.cpp
+++ b/storage/ndb/ndbapi-examples/ndbapi_simple_index/ndbapi_simple_index.cpp
diff --git a/storage/ndb/src/common/debugger/EventLogger.cpp b/storage/ndb/src/common/debugger/EventLogger.cpp
index d18b0feb1ad..06345695107 100644
--- a/storage/ndb/src/common/debugger/EventLogger.cpp
+++ b/storage/ndb/src/common/debugger/EventLogger.cpp
@@ -16,12 +16,11 @@
#include <ndb_global.h>
-#include "EventLogger.hpp"
+#include <EventLogger.hpp>
#include <NdbConfig.h>
#include <kernel/BlockNumbers.h>
#include <signaldata/ArbitSignalData.hpp>
-#include <GrepEvent.hpp>
#include <NodeState.hpp>
#include <version.h>
@@ -571,6 +570,43 @@ void getTextUNDORecordsExecuted(QQQQ) {
void getTextInfoEvent(QQQQ) {
BaseString::snprintf(m_text, m_text_len, (char *)&theData[1]);
}
+const char bytes_unit[]= "B";
+const char kbytes_unit[]= "KB";
+const char mbytes_unit[]= "MB";
+static void convert_unit(unsigned &data, const char *&unit)
+{
+ if (data < 16*1024)
+ {
+ unit= bytes_unit;
+ return;
+ }
+ if (data < 16*1024*1024)
+ {
+ data= (data+1023)/1024;
+ unit= kbytes_unit;
+ return;
+ }
+ data= (data+1024*1024-1)/(1024*1024);
+ unit= mbytes_unit;
+}
+
+void getTextEventBufferStatus(QQQQ) {
+ unsigned used= theData[1], alloc= theData[2], max_= theData[3];
+ const char *used_unit, *alloc_unit, *max_unit;
+ convert_unit(used, used_unit);
+ convert_unit(alloc, alloc_unit);
+ convert_unit(max_, max_unit);
+ BaseString::snprintf(m_text, m_text_len,
+ "Event buffer status: used=%d%s(%d%) alloc=%d%s(%d%) "
+ "max=%d%s apply_gci=%lld latest_gci=%lld",
+ used, used_unit,
+ theData[2] ? (theData[1]*100)/theData[2] : 0,
+ alloc, alloc_unit,
+ theData[3] ? (theData[2]*100)/theData[3] : 0,
+ max_, max_unit,
+ theData[4]+(((Uint64)theData[5])<<32),
+ theData[6]+(((Uint64)theData[7])<<32));
+}
void getTextWarningEvent(QQQQ) {
BaseString::snprintf(m_text, m_text_len, (char *)&theData[1]);
}
@@ -715,6 +751,7 @@ const EventLoggerBase::EventRepLogLevelMatrix EventLoggerBase::matrix[] = {
ROW(SentHeartbeat, LogLevel::llInfo, 12, Logger::LL_INFO ),
ROW(CreateLogBytes, LogLevel::llInfo, 11, Logger::LL_INFO ),
ROW(InfoEvent, LogLevel::llInfo, 2, Logger::LL_INFO ),
+ ROW(EventBufferStatus, LogLevel::llInfo, 7, Logger::LL_INFO ),
// Backup
ROW(BackupStarted, LogLevel::llBackup, 7, Logger::LL_INFO ),
diff --git a/storage/ndb/src/common/debugger/Makefile.am b/storage/ndb/src/common/debugger/Makefile.am
index 71b8ed55561..fae820e5bd7 100644
--- a/storage/ndb/src/common/debugger/Makefile.am
+++ b/storage/ndb/src/common/debugger/Makefile.am
@@ -2,7 +2,7 @@ SUBDIRS = signaldata
noinst_LTLIBRARIES = libtrace.la
-libtrace_la_SOURCES = SignalLoggerManager.cpp DebuggerNames.cpp BlockNames.cpp EventLogger.cpp GrepError.cpp
+libtrace_la_SOURCES = SignalLoggerManager.cpp DebuggerNames.cpp BlockNames.cpp EventLogger.cpp
include $(top_srcdir)/storage/ndb/config/common.mk.am
include $(top_srcdir)/storage/ndb/config/type_kernel.mk.am
diff --git a/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp
index 984d28819c0..96424098086 100644
--- a/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp
@@ -14,7 +14,7 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#include "GlobalSignalNumbers.h"
+#include <GlobalSignalNumbers.h>
const GsnName SignalNames [] = {
{ GSN_API_REGCONF, "API_REGCONF" }
@@ -506,10 +506,13 @@ const GsnName SignalNames [] = {
,{ GSN_CREATE_EVNT_CONF, "CREATE_EVNT_CONF" }
,{ GSN_CREATE_EVNT_REF, "CREATE_EVNT_REF" }
- ,{ GSN_SUMA_START_ME, "SUMA_START_ME" }
+ ,{ GSN_SUMA_START_ME_REQ, "SUMA_START_ME_REQ" }
+ ,{ GSN_SUMA_START_ME_REF, "SUMA_START_ME_REF" }
+ ,{ GSN_SUMA_START_ME_CONF, "SUMA_START_ME_CONF" }
,{ GSN_SUMA_HANDOVER_REQ, "SUMA_HANDOVER_REQ"}
+ ,{ GSN_SUMA_HANDOVER_REF, "SUMA_HANDOVER_REF"}
,{ GSN_SUMA_HANDOVER_CONF, "SUMA_HANDOVER_CONF"}
-
+
,{ GSN_DROP_EVNT_REQ, "DROP_EVNT_REQ" }
,{ GSN_DROP_EVNT_CONF, "DROP_EVNT_CONF" }
,{ GSN_DROP_EVNT_REF, "DROP_EVNT_REF" }
@@ -555,25 +558,6 @@ const GsnName SignalNames [] = {
,{ GSN_UTIL_RELEASE_CONF, "UTIL_RELEASE_CONF" }
,{ GSN_UTIL_RELEASE_REF, "UTIL_RELASE_REF" }
- ,{ GSN_GREP_CREATE_REQ, "GREP_CREATE_REQ" },
- { GSN_GREP_CREATE_REF, "GREP_CREATE_REF" },
- { GSN_GREP_CREATE_CONF, "GREP_CREATE_CONF" },
- { GSN_GREP_START_REQ, "GREP_START_REQ" },
- { GSN_GREP_START_REF, "GREP_START_REF" },
- { GSN_GREP_START_CONF, "GREP_START_CONF" },
- { GSN_GREP_SYNC_REQ, "GREP_SYNC_REQ" },
- { GSN_GREP_SYNC_REF, "GREP_SYNC_REF" },
- { GSN_GREP_SYNC_CONF, "GREP_SYNC_CONF" },
- //{ GSN_REP_CONNECT_REQ, "REP_CONNECT_REQ" }, Not used
- //{ GSN_REP_CONNECT_REF, "REP_CONNECT_REF" }, Not used
- //{ GSN_REP_CONNECT_CONF, "REP_CONNECT_CONF" }, Not used
- { GSN_REP_WAITGCP_REQ, "REP_WAIT_GCP_REQ" },
- { GSN_REP_WAITGCP_REF, "REP_WAIT_GCP_REF" },
- { GSN_REP_WAITGCP_CONF, "REP_WAIT_GCP_CONF" },
- { GSN_GREP_WAITGCP_REQ, "GREP_WAIT_GCP_REQ" },
- { GSN_GREP_WAITGCP_REF, "GREP_WAIT_GCP_REF" },
- { GSN_GREP_WAITGCP_CONF, "GREP_WAIT_GCP_CONF" }
-
/* Suma Block Services **/
,{ GSN_SUB_CREATE_REQ, "SUB_CREATE_REQ" }
,{ GSN_SUB_CREATE_REF, "SUB_CREATE_REF" }
@@ -596,7 +580,7 @@ const GsnName SignalNames [] = {
,{ GSN_SUB_SYNC_CONTINUE_REF, "SUB_SYNC_CONTINUE_REF" }
,{ GSN_SUB_SYNC_CONTINUE_CONF, "SUB_SYNC_CONTINUE_CONF" }
,{ GSN_SUB_GCP_COMPLETE_REP, "SUB_GCP_COMPLETE_REP" }
- ,{ GSN_SUB_GCP_COMPLETE_ACC, "SUB_GCP_COMPLETE_ACC" }
+ ,{ GSN_SUB_GCP_COMPLETE_ACK, "SUB_GCP_COMPLETE_ACK" }
,{ GSN_CREATE_SUBID_REQ, "CREATE_SUBID_REQ" }
,{ GSN_CREATE_SUBID_REF, "CREATE_SUBID_REF" }
diff --git a/storage/ndb/src/common/debugger/signaldata/SumaImpl.cpp b/storage/ndb/src/common/debugger/signaldata/SumaImpl.cpp
index e50a3040fe3..c582af7d5d3 100644
--- a/storage/ndb/src/common/debugger/signaldata/SumaImpl.cpp
+++ b/storage/ndb/src/common/debugger/signaldata/SumaImpl.cpp
@@ -20,8 +20,8 @@ bool
printSUB_CREATE_REQ(FILE * output, const Uint32 * theData,
Uint32 len, Uint16 receiverBlockNo) {
const SubCreateReq * const sig = (SubCreateReq *)theData;
- fprintf(output, " subscriberRef: %x\n", sig->subscriberRef);
- fprintf(output, " subscriberData: %x\n", sig->subscriberData);
+ fprintf(output, " senderRef: %x\n", sig->senderRef);
+ fprintf(output, " senderData: %x\n", sig->senderData);
fprintf(output, " subscriptionId: %x\n", sig->subscriptionId);
fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey);
fprintf(output, " subscriptionType: %x\n", sig->subscriptionType);
@@ -33,9 +33,7 @@ bool
printSUB_CREATE_CONF(FILE * output, const Uint32 * theData,
Uint32 len, Uint16 receiverBlockNo) {
const SubCreateConf * const sig = (SubCreateConf *)theData;
- fprintf(output, " subscriptionId: %x\n", sig->subscriptionId);
- fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey);
- fprintf(output, " subscriberData: %x\n", sig->subscriberData);
+ fprintf(output, " senderData: %x\n", sig->senderData);
return false;
}
@@ -43,9 +41,7 @@ bool
printSUB_CREATE_REF(FILE * output, const Uint32 * theData,
Uint32 len, Uint16 receiverBlockNo) {
const SubCreateRef * const sig = (SubCreateRef *)theData;
- fprintf(output, " subscriptionId: %x\n", sig->subscriptionId);
- fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey);
- fprintf(output, " subscriberData: %x\n", sig->subscriberData);
+ fprintf(output, " senderData: %x\n", sig->senderData);
return false;
}
@@ -66,7 +62,7 @@ printSUB_REMOVE_CONF(FILE * output, const Uint32 * theData,
const SubRemoveConf * const sig = (SubRemoveConf *)theData;
fprintf(output, " subscriptionId: %x\n", sig->subscriptionId);
fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey);
- fprintf(output, " subscriberData: %x\n", sig->subscriberData);
+ fprintf(output, " senderData: %x\n", sig->senderData);
return false;
}
@@ -77,8 +73,8 @@ printSUB_REMOVE_REF(FILE * output, const Uint32 * theData,
const SubRemoveRef * const sig = (SubRemoveRef *)theData;
fprintf(output, " subscriptionId: %x\n", sig->subscriptionId);
fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey);
- fprintf(output, " subscriberData: %x\n", sig->subscriberData);
- fprintf(output, " err: %x\n", sig->err);
+ fprintf(output, " senderData: %x\n", sig->senderData);
+ fprintf(output, " errorCode: %x\n", sig->errorCode);
return false;
}
@@ -88,7 +84,7 @@ printSUB_START_REQ(FILE * output, const Uint32 * theData,
const SubStartReq * const sig = (SubStartReq *)theData;
fprintf(output, " subscriptionId: %x\n", sig->subscriptionId);
fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey);
- fprintf(output, " subscriberData: %x\n", sig->subscriberData);
+ fprintf(output, " senderData: %x\n", sig->senderData);
return false;
}
@@ -99,8 +95,8 @@ printSUB_START_REF(FILE * output, const Uint32 * theData,
fprintf(output, " subscriptionId: %x\n", sig->subscriptionId);
fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey);
fprintf(output, " startPart: %x\n", sig->part);
- fprintf(output, " subscriberData: %x\n", sig->subscriberData);
- fprintf(output, " err: %x\n", sig->err);
+ fprintf(output, " senderData: %x\n", sig->senderData);
+ fprintf(output, " errorCode: %x\n", sig->errorCode);
return false;
}
@@ -111,7 +107,7 @@ printSUB_START_CONF(FILE * output, const Uint32 * theData,
fprintf(output, " subscriptionId: %x\n", sig->subscriptionId);
fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey);
fprintf(output, " startPart: %x\n", sig->part);
- fprintf(output, " subscriberData: %x\n", sig->subscriberData);
+ fprintf(output, " senderData: %x\n", sig->senderData);
return false;
}
@@ -121,7 +117,7 @@ printSUB_STOP_REQ(FILE * output, const Uint32 * theData,
const SubStopReq * const sig = (SubStopReq *)theData;
fprintf(output, " subscriptionId: %x\n", sig->subscriptionId);
fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey);
- fprintf(output, " subscriberData: %x\n", sig->subscriberData);
+ fprintf(output, " senderData: %x\n", sig->senderData);
return false;
}
@@ -131,8 +127,8 @@ printSUB_STOP_REF(FILE * output, const Uint32 * theData,
const SubStopRef * const sig = (SubStopRef *)theData;
fprintf(output, " subscriptionId: %x\n", sig->subscriptionId);
fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey);
- fprintf(output, " subscriberData: %x\n", sig->subscriberData);
- fprintf(output, " err: %x\n", sig->err);
+ fprintf(output, " senderData: %x\n", sig->senderData);
+ fprintf(output, " errorCode: %x\n", sig->errorCode);
return false;
}
@@ -142,7 +138,7 @@ printSUB_STOP_CONF(FILE * output, const Uint32 * theData,
const SubStopConf * const sig = (SubStopConf *)theData;
fprintf(output, " subscriptionId: %x\n", sig->subscriptionId);
fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey);
- fprintf(output, " subscriberData: %x\n", sig->subscriberData);
+ fprintf(output, " senderData: %x\n", sig->senderData);
return false;
}
@@ -160,11 +156,8 @@ bool
printSUB_SYNC_REF(FILE * output, const Uint32 * theData,
Uint32 len, Uint16 receiverBlockNo) {
const SubSyncRef * const sig = (SubSyncRef *)theData;
- fprintf(output, " subscriptionId: %x\n", sig->subscriptionId);
- fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey);
- fprintf(output, " syncPart: %x\n", sig->part);
- fprintf(output, " subscriberData: %x\n", sig->subscriberData);
- fprintf(output, " err: %x\n", sig->err);
+ fprintf(output, " senderData: %x\n", sig->senderData);
+ fprintf(output, " errorCode: %x\n", sig->errorCode);
return false;
}
@@ -172,10 +165,7 @@ bool
printSUB_SYNC_CONF(FILE * output, const Uint32 * theData,
Uint32 len, Uint16 receiverBlockNo) {
const SubSyncConf * const sig = (SubSyncConf *)theData;
- fprintf(output, " subscriptionId: %x\n", sig->subscriptionId);
- fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey);
- fprintf(output, " syncPart: %x\n", sig->part);
- fprintf(output, " subscriberData: %x\n", sig->subscriberData);
+ fprintf(output, " senderData: %x\n", sig->senderData);
return false;
}
@@ -185,7 +175,7 @@ printSUB_META_DATA(FILE * output, const Uint32 * theData,
const SubMetaData * const sig = (SubMetaData *)theData;
fprintf(output, " gci: %x\n", sig->gci);
fprintf(output, " senderData: %x\n", sig->senderData);
- fprintf(output, " subscriberData: %x\n", sig->subscriberData);
+ fprintf(output, " senderData: %x\n", sig->senderData);
fprintf(output, " tableId: %x\n", sig->tableId);
return false;
}
@@ -195,12 +185,10 @@ printSUB_TABLE_DATA(FILE * output, const Uint32 * theData,
Uint32 len, Uint16 receiverBlockNo) {
const SubTableData * const sig = (SubTableData *)theData;
fprintf(output, " senderData: %x\n", sig->senderData);
- fprintf(output, " subscriberData: %x\n", sig->subscriberData);
+ fprintf(output, " senderData: %x\n", sig->senderData);
fprintf(output, " gci: %x\n", sig->gci);
fprintf(output, " tableId: %x\n", sig->tableId);
fprintf(output, " operation: %x\n", sig->operation);
- fprintf(output, " noOfAttributes: %x\n", sig->noOfAttributes);
- fprintf(output, " dataSize: %x\n", sig->dataSize);
return false;
}
diff --git a/storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp b/storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp
index b3d0221fedb..1f23bd7543c 100644
--- a/storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp
+++ b/storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp
@@ -62,7 +62,10 @@ ConfigRetriever::ConfigRetriever(const char * _connect_string,
if (ndb_mgm_set_connectstring(m_handle, _connect_string))
{
- setError(CR_ERROR, ndb_mgm_get_latest_error_desc(m_handle));
+ BaseString tmp(ndb_mgm_get_latest_error_msg(m_handle));
+ tmp.append(" : ");
+ tmp.append(ndb_mgm_get_latest_error_desc(m_handle));
+ setError(CR_ERROR, tmp.c_str());
DBUG_VOID_RETURN;
}
resetError();
@@ -147,7 +150,10 @@ ConfigRetriever::getConfig(NdbMgmHandle m_handle)
ndb_mgm_configuration * conf = ndb_mgm_get_configuration(m_handle,m_version);
if(conf == 0)
{
- setError(CR_ERROR, ndb_mgm_get_latest_error_desc(m_handle));
+ BaseString tmp(ndb_mgm_get_latest_error_msg(m_handle));
+ tmp.append(" : ");
+ tmp.append(ndb_mgm_get_latest_error_desc(m_handle));
+ setError(CR_ERROR, tmp.c_str());
return 0;
}
return conf;
@@ -349,7 +355,10 @@ ConfigRetriever::allocNodeId(int no_retries, int retry_delay_in_seconds)
no_retries--;
NdbSleep_SecSleep(retry_delay_in_seconds);
}
- setError(CR_ERROR, ndb_mgm_get_latest_error_desc(m_handle));
+ BaseString tmp(ndb_mgm_get_latest_error_msg(m_handle));
+ tmp.append(" : ");
+ tmp.append(ndb_mgm_get_latest_error_desc(m_handle));
+ setError(CR_ERROR, tmp.c_str());
} else
setError(CR_ERROR, "management server handle not initialized");
return 0;
diff --git a/storage/ndb/src/kernel/SimBlockList.cpp b/storage/ndb/src/kernel/SimBlockList.cpp
index 6029fc7e225..e2c342af34a 100644
--- a/storage/ndb/src/kernel/SimBlockList.cpp
+++ b/storage/ndb/src/kernel/SimBlockList.cpp
@@ -66,7 +66,7 @@ void * operator new (size_t sz, SIMBLOCKLIST_DUMMY dummy){
void
SimBlockList::load(const Configuration & conf){
- noOfBlocks = 16;
+ noOfBlocks = 15;
theList = new SimulatedBlock * [noOfBlocks];
Dbdict* dbdict = 0;
Dbdih* dbdih = 0;
@@ -96,8 +96,7 @@ SimBlockList::load(const Configuration & conf){
theList[11] = NEW_BLOCK(Backup)(conf);
theList[12] = NEW_BLOCK(DbUtil)(conf);
theList[13] = NEW_BLOCK(Suma)(conf);
- theList[14] = 0; //NEW_BLOCK(Grep)(conf);
- theList[15] = NEW_BLOCK(Dbtux)(conf);
+ theList[14] = NEW_BLOCK(Dbtux)(conf);
// Metadata common part shared by block instances
ptrMetaDataCommon = new MetaData::Common(*dbdict, *dbdih);
diff --git a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
index 6f6aee6a7f7..4b06ccc2ba4 100644
--- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
@@ -117,7 +117,6 @@ Cmvmi::Cmvmi(const Configuration & conf) :
break;
case NodeInfo::API:
case NodeInfo::MGM:
- case NodeInfo::REP:
break;
default:
ndbrequire(false);
@@ -189,6 +188,12 @@ void Cmvmi::execEVENT_REP(Signal* signal)
//-----------------------------------------------------------------------
EventReport * const eventReport = (EventReport *)&signal->theData[0];
Ndb_logevent_type eventType = eventReport->getEventType();
+ Uint32 nodeId= eventReport->getNodeId();
+ if (nodeId == 0)
+ {
+ nodeId= refToNode(signal->getSendersBlockRef());
+ eventReport->setNodeId(nodeId);
+ }
jamEntry();
@@ -322,10 +327,6 @@ void Cmvmi::execSTTOR(Signal* signal)
signal->theData[1] = 0; // no id
signal->theData[2] = NodeInfo::API;
execOPEN_COMREQ(signal);
- signal->theData[0] = 0; // no answer
- signal->theData[1] = 0; // no id
- signal->theData[2] = NodeInfo::REP;
- execOPEN_COMREQ(signal);
globalData.theStartLevel = NodeState::SL_STARTED;
sendSTTORRY(signal);
} else {
@@ -1024,9 +1025,6 @@ Cmvmi::execDUMP_STATE_ORD(Signal* signal)
case NodeInfo::MGM:
nodeTypeStr = "MGM";
break;
- case NodeInfo::REP:
- nodeTypeStr = "REP";
- break;
case NodeInfo::INVALID:
nodeTypeStr = 0;
break;
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
index 2059e992051..66df7f0d3c7 100644
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
@@ -60,8 +60,6 @@
#include <signaldata/UtilExecute.hpp>
#include <signaldata/UtilRelease.hpp>
#include <signaldata/SumaImpl.hpp>
-#include <GrepError.hpp>
-//#include <signaldata/DropEvnt.hpp>
#include <signaldata/LqhFrag.hpp>
@@ -1290,9 +1288,6 @@ Dbdict::Dbdict(const class Configuration & conf):
addRecSignal(GSN_SUB_STOP_CONF, &Dbdict::execSUB_STOP_CONF);
addRecSignal(GSN_SUB_STOP_REF, &Dbdict::execSUB_STOP_REF);
- addRecSignal(GSN_SUB_SYNC_CONF, &Dbdict::execSUB_SYNC_CONF);
- addRecSignal(GSN_SUB_SYNC_REF, &Dbdict::execSUB_SYNC_REF);
-
addRecSignal(GSN_DROP_EVNT_REQ, &Dbdict::execDROP_EVNT_REQ);
addRecSignal(GSN_SUB_REMOVE_REQ, &Dbdict::execSUB_REMOVE_REQ);
@@ -2690,13 +2685,27 @@ Dbdict::restartCreateTab_activateComplete(Signal* signal,
c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
tabPtr.p->tabState = TableRecord::DEFINED;
- c_opCreateTable.release(createTabPtr);
+ releaseCreateTableOp(signal,createTabPtr);
c_restartRecord.activeTable++;
checkSchemaStatus(signal);
}
void
+Dbdict::releaseCreateTableOp(Signal* signal, CreateTableRecordPtr createTabPtr)
+{
+ if (createTabPtr.p->m_tabInfoPtrI != RNIL)
+ {
+ jam();
+ SegmentedSectionPtr tabInfoPtr;
+ getSection(tabInfoPtr, createTabPtr.p->m_tabInfoPtrI);
+ signal->setSection(tabInfoPtr, 0);
+ releaseSections(signal);
+ }
+ c_opCreateTable.release(createTabPtr);
+}
+
+void
Dbdict::restartDropTab(Signal* signal, Uint32 tableId){
const Uint32 key = ++c_opRecordSequence;
@@ -3777,7 +3786,7 @@ Dbdict::execALTER_TAB_CONF(Signal * signal){
TableRecordPtr tabPtr;
c_tableRecordPool.getPtr(tabPtr, alterTabPtr.p->m_tablePtrI);
releaseTableObject(tabPtr.i, false);
- c_opCreateTable.release(alterTabPtr);
+ releaseCreateTableOp(signal,alterTabPtr);
c_blockState = BS_IDLE;
}
else {
@@ -3891,6 +3900,7 @@ Dbdict::alterTab_writeSchemaConf(Signal* signal,
writeTableFile(signal, tableId, tabInfoPtr, &callback);
+ alterTabPtr.p->m_tabInfoPtrI = RNIL;
signal->setSection(tabInfoPtr, 0);
releaseSections(signal);
}
@@ -3915,6 +3925,17 @@ Dbdict::alterTab_writeTableConf(Signal* signal,
conf->tableVersion = tabPtr.p->tableVersion;
conf->gci = tabPtr.p->gciTableCreated;
conf->requestType = AlterTabReq::AlterTableCommit;
+ {
+ AlterTabConf tmp= *conf;
+ if (coordinatorRef == reference())
+ conf->senderRef = alterTabPtr.p->m_senderRef;
+ else
+ conf->senderRef = 0;
+ EXECUTE_DIRECT(SUMA, GSN_ALTER_TAB_CONF, signal,
+ AlterTabConf::SignalLength);
+ jamEntry();
+ *conf= tmp;
+ }
sendSignal(coordinatorRef, GSN_ALTER_TAB_CONF, signal,
AlterTabConf::SignalLength, JBB);
@@ -3943,7 +3964,7 @@ Dbdict::alterTab_writeTableConf(Signal* signal,
// Release resources
c_tableRecordPool.getPtr(tabPtr, alterTabPtr.p->m_tablePtrI);
releaseTableObject(tabPtr.i, false);
- c_opCreateTable.release(alterTabPtr);
+ releaseCreateTableOp(signal,alterTabPtr);
c_blockState = BS_IDLE;
}
}
@@ -4133,7 +4154,7 @@ Dbdict::createTab_reply(Signal* signal,
//@todo check api failed
sendSignal(createTabPtr.p->m_senderRef, GSN_CREATE_TABLE_REF, signal,
CreateTableRef::SignalLength, JBB);
- c_opCreateTable.release(createTabPtr);
+ releaseCreateTableOp(signal,createTabPtr);
c_blockState = BS_IDLE;
return;
}
@@ -4192,7 +4213,7 @@ Dbdict::createTab_startLcpMutex_unlocked(Signal* signal,
//@todo check api failed
sendSignal(createTabPtr.p->m_senderRef, GSN_CREATE_TABLE_CONF, signal,
CreateTableConf::SignalLength, JBB);
- c_opCreateTable.release(createTabPtr);
+ releaseCreateTableOp(signal,createTabPtr);
c_blockState = BS_IDLE;
return;
}
@@ -4323,10 +4344,11 @@ Dbdict::createTab_writeSchemaConf1(Signal* signal,
SegmentedSectionPtr tabInfoPtr;
getSection(tabInfoPtr, createTabPtr.p->m_tabInfoPtrI);
writeTableFile(signal, createTabPtr.p->m_tablePtrI, tabInfoPtr, &callback);
-
+#if 0
createTabPtr.p->m_tabInfoPtrI = RNIL;
signal->setSection(tabInfoPtr, 0);
releaseSections(signal);
+#endif
}
void
@@ -4814,12 +4836,28 @@ Dbdict::createTab_alterComplete(Signal* signal,
CreateTabConf * const conf = (CreateTabConf*)signal->getDataPtr();
conf->senderRef = reference();
conf->senderData = createTabPtr.p->key;
+ {
+ CreateTabConf tmp= *conf;
+ conf->senderData = createTabPtr.p->m_tablePtrI;
+#if 0
+ signal->header.m_noOfSections = 1;
+ SegmentedSectionPtr tabInfoPtr;
+ getSection(tabInfoPtr, createTabPtr.p->m_tabInfoPtrI);
+ signal->setSection(tabInfoPtr, 0);
+#endif
+ sendSignal(SUMA_REF, GSN_CREATE_TAB_CONF, signal,
+ CreateTabConf::SignalLength, JBB);
+ *conf= tmp;
+#if 0
+ signal->header.m_noOfSections = 0;
+#endif
+ }
sendSignal(createTabPtr.p->m_coordinatorRef, GSN_CREATE_TAB_CONF,
signal, CreateTabConf::SignalLength, JBB);
if(createTabPtr.p->m_coordinatorRef != reference()){
jam();
- c_opCreateTable.release(createTabPtr);
+ releaseCreateTableOp(signal,createTabPtr);
}
}
@@ -4887,7 +4925,7 @@ Dbdict::createTab_dropComplete(Signal* signal,
if(createTabPtr.p->m_coordinatorRef != reference()){
jam();
- c_opCreateTable.release(createTabPtr);
+ releaseCreateTableOp(signal,createTabPtr);
}
c_opDropTable.release(dropTabPtr);
@@ -5056,7 +5094,8 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
if (parseP->requestType != DictTabInfo::AlterTableFromAPI) {
jam();
#ifdef VM_TRACE
- ndbout_c("Dbdict: name=%s,id=%u", tablePtr.p->tableName, tablePtr.i);
+ ndbout_c("Dbdict: name=%s, id=%u, version=%u",
+ tablePtr.p->tableName, tablePtr.i, tablePtr.p->tableVersion);
TableRecordPtr tmp;
ndbrequire(!c_tableRecordHash.find(tmp, * tablePtr.p));
#endif
@@ -5705,7 +5744,6 @@ Dbdict::execDROP_TAB_CONF(Signal* signal){
conf->senderData = dropTabPtr.p->m_request.senderData;
conf->tableId = dropTabPtr.p->m_request.tableId;
conf->tableVersion = dropTabPtr.p->m_request.tableVersion;
-
Uint32 ref = dropTabPtr.p->m_request.senderRef;
sendSignal(ref, GSN_DROP_TABLE_CONF, signal,
DropTableConf::SignalLength, JBB);
@@ -5966,7 +6004,17 @@ Dbdict::dropTab_writeSchemaConf(Signal* signal,
conf->senderRef = reference();
conf->senderData = dropTabPtrI;
conf->tableId = dropTabPtr.p->m_request.tableId;
-
+ {
+ DropTabConf tmp= *conf;
+ if (dropTabPtr.p->m_coordinatorRef == reference())
+ conf->senderRef = dropTabPtr.p->m_request.senderRef;
+ else
+ conf->senderRef = 0;
+ EXECUTE_DIRECT(SUMA, GSN_DROP_TAB_CONF, signal,
+ DropTabConf::SignalLength);
+ jamEntry();
+ *conf= tmp;
+ }
dropTabPtr.p->m_participantData.m_gsn = GSN_DROP_TAB_CONF;
sendSignal(dropTabPtr.p->m_coordinatorRef, GSN_DROP_TAB_CONF, signal,
DropTabConf::SignalLength, JBB);
@@ -6113,7 +6161,7 @@ void Dbdict::sendGET_TABLEID_REF(Signal* signal,
* The format of GetTabInfo Req/Ref is the same
*/
BlockReference retRef = req->senderRef;
- ref->err = errorCode;
+ ref->err = errorCode;
sendSignal(retRef, GSN_GET_TABLEID_REF, signal,
GetTableIdRef::SignalLength, JBB);
}//sendGET_TABINFOREF()
@@ -7538,6 +7586,8 @@ void Dbdict::execUTIL_RELEASE_REF(Signal *signal)
const Uint32 Dbdict::sysTab_NDBEVENTS_0_szs[EVENT_SYSTEM_TABLE_LENGTH] = {
sizeof(((sysTab_NDBEVENTS_0*)0)->NAME),
sizeof(((sysTab_NDBEVENTS_0*)0)->EVENT_TYPE),
+ sizeof(((sysTab_NDBEVENTS_0*)0)->TABLEID),
+ sizeof(((sysTab_NDBEVENTS_0*)0)->TABLEVERSION),
sizeof(((sysTab_NDBEVENTS_0*)0)->TABLE_NAME),
sizeof(((sysTab_NDBEVENTS_0*)0)->ATTRIBUTE_MASK),
sizeof(((sysTab_NDBEVENTS_0*)0)->SUBID),
@@ -7755,7 +7805,7 @@ Dbdict::execCREATE_EVNT_REQ(Signal* signal)
CreateEvntRef * ret = (CreateEvntRef *)signal->getDataPtrSend();
ret->senderRef = reference();
- ret->setErrorCode(CreateEvntRef::SeizeError);
+ ret->setErrorCode(747);
ret->setErrorLine(__LINE__);
ret->setErrorNode(reference());
sendSignal(signal->senderBlockRef(), GSN_CREATE_EVNT_REF, signal,
@@ -7796,7 +7846,7 @@ Dbdict::execCREATE_EVNT_REQ(Signal* signal)
jam();
releaseSections(signal);
- evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorCode = 1;
evntRecPtr.p->m_errorLine = __LINE__;
evntRecPtr.p->m_errorNode = reference();
@@ -7810,8 +7860,10 @@ Dbdict::execCREATE_EVNT_REQ(Signal* signal)
*****************************************************************/
void
-Dbdict::createEvent_RT_USER_CREATE(Signal* signal, OpCreateEventPtr evntRecPtr){
+Dbdict::createEvent_RT_USER_CREATE(Signal* signal, OpCreateEventPtr evntRecPtr)
+{
jam();
+ DBUG_ENTER("Dbdict::createEvent_RT_USER_CREATE");
evntRecPtr.p->m_request.setUserRef(signal->senderBlockRef());
#ifdef EVENT_DEBUG
@@ -7839,12 +7891,12 @@ Dbdict::createEvent_RT_USER_CREATE(Signal* signal, OpCreateEventPtr evntRecPtr){
jam();
releaseSections(signal);
- evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorCode = 1;
evntRecPtr.p->m_errorLine = __LINE__;
evntRecPtr.p->m_errorNode = reference();
createEvent_sendReply(signal, evntRecPtr);
- return;
+ DBUG_VOID_RETURN;
}
r0.getString(evntRecPtr.p->m_eventRec.NAME);
{
@@ -7865,24 +7917,19 @@ Dbdict::createEvent_RT_USER_CREATE(Signal* signal, OpCreateEventPtr evntRecPtr){
jam();
releaseSections(signal);
- evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorCode = 1;
evntRecPtr.p->m_errorLine = __LINE__;
evntRecPtr.p->m_errorNode = reference();
createEvent_sendReply(signal, evntRecPtr);
- return;
+ DBUG_VOID_RETURN;
}
r0.getString(evntRecPtr.p->m_eventRec.TABLE_NAME);
{
int len = strlen(evntRecPtr.p->m_eventRec.TABLE_NAME);
memset(evntRecPtr.p->m_eventRec.TABLE_NAME+len, 0, MAX_TAB_NAME_SIZE-len);
}
-
-#ifdef EVENT_DEBUG
- ndbout_c("event name: %s",evntRecPtr.p->m_eventRec.NAME);
- ndbout_c("table name: %s",evntRecPtr.p->m_eventRec.TABLE_NAME);
-#endif
-
+
releaseSections(signal);
// Send request to SUMA
@@ -7891,6 +7938,7 @@ Dbdict::createEvent_RT_USER_CREATE(Signal* signal, OpCreateEventPtr evntRecPtr){
(CreateSubscriptionIdReq *)signal->getDataPtrSend();
// make sure we save the original sender for later
+ sumaIdReq->senderRef = reference();
sumaIdReq->senderData = evntRecPtr.i;
#ifdef EVENT_DEBUG
ndbout << "sumaIdReq->senderData = " << sumaIdReq->senderData << endl;
@@ -7899,12 +7947,13 @@ Dbdict::createEvent_RT_USER_CREATE(Signal* signal, OpCreateEventPtr evntRecPtr){
CreateSubscriptionIdReq::SignalLength, JBB);
// we should now return in either execCREATE_SUBID_CONF
// or execCREATE_SUBID_REF
+ DBUG_VOID_RETURN;
}
void Dbdict::execCREATE_SUBID_REF(Signal* signal)
{
- jamEntry();
- EVENT_TRACE;
+ jamEntry();
+ DBUG_ENTER("Dbdict::execCREATE_SUBID_REF");
CreateSubscriptionIdRef * const ref =
(CreateSubscriptionIdRef *)signal->getDataPtr();
OpCreateEventPtr evntRecPtr;
@@ -7912,17 +7961,26 @@ void Dbdict::execCREATE_SUBID_REF(Signal* signal)
evntRecPtr.i = ref->senderData;
ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
- evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
- evntRecPtr.p->m_errorLine = __LINE__;
+ if (ref->errorCode)
+ {
+ evntRecPtr.p->m_errorCode = ref->errorCode;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ }
+ else
+ {
+ evntRecPtr.p->m_errorCode = 1;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ }
evntRecPtr.p->m_errorNode = reference();
createEvent_sendReply(signal, evntRecPtr);
+ DBUG_VOID_RETURN;
}
void Dbdict::execCREATE_SUBID_CONF(Signal* signal)
{
jamEntry();
- EVENT_TRACE;
+ DBUG_ENTER("Dbdict::execCREATE_SUBID_CONF");
CreateSubscriptionIdConf const * sumaIdConf =
(CreateSubscriptionIdConf *)signal->getDataPtr();
@@ -7941,6 +7999,7 @@ void Dbdict::execCREATE_SUBID_CONF(Signal* signal)
prepareTransactionEventSysTable(&c, signal, evntRecId,
UtilPrepareReq::Insert);
+ DBUG_VOID_RETURN;
}
void
@@ -7958,46 +8017,47 @@ Dbdict::createEventComplete_RT_USER_CREATE(Signal* signal,
*/
void interpretUtilPrepareErrorCode(UtilPrepareRef::ErrorCode errorCode,
- bool& temporary, Uint32& line)
+ Uint32& error, Uint32& line)
{
+ DBUG_ENTER("interpretUtilPrepareErrorCode");
switch (errorCode) {
case UtilPrepareRef::NO_ERROR:
jam();
+ error = 1;
line = __LINE__;
- EVENT_TRACE;
- break;
+ DBUG_VOID_RETURN;
case UtilPrepareRef::PREPARE_SEIZE_ERROR:
jam();
- temporary = true;
+ error = 748;
line = __LINE__;
- EVENT_TRACE;
- break;
+ DBUG_VOID_RETURN;
case UtilPrepareRef::PREPARE_PAGES_SEIZE_ERROR:
jam();
+ error = 1;
line = __LINE__;
- EVENT_TRACE;
- break;
+ DBUG_VOID_RETURN;
case UtilPrepareRef::PREPARED_OPERATION_SEIZE_ERROR:
jam();
+ error = 1;
line = __LINE__;
- EVENT_TRACE;
- break;
+ DBUG_VOID_RETURN;
case UtilPrepareRef::DICT_TAB_INFO_ERROR:
jam();
+ error = 1;
line = __LINE__;
- EVENT_TRACE;
- break;
+ DBUG_VOID_RETURN;
case UtilPrepareRef::MISSING_PROPERTIES_SECTION:
jam();
+ error = 1;
line = __LINE__;
- EVENT_TRACE;
- break;
+ DBUG_VOID_RETURN;
default:
jam();
+ error = 1;
line = __LINE__;
- EVENT_TRACE;
- break;
+ DBUG_VOID_RETURN;
}
+ DBUG_VOID_RETURN;
}
void
@@ -8020,25 +8080,28 @@ Dbdict::createEventUTIL_PREPARE(Signal* signal,
switch (evntRecPtr.p->m_requestType) {
case CreateEvntReq::RT_USER_GET:
-#ifdef EVENT_DEBUG
- printf("get type = %d\n", CreateEvntReq::RT_USER_GET);
-#endif
jam();
executeTransEventSysTable(&c, signal,
evntRecPtr.i, evntRecPtr.p->m_eventRec,
prepareId, UtilPrepareReq::Read);
break;
case CreateEvntReq::RT_USER_CREATE:
-#ifdef EVENT_DEBUG
- printf("create type = %d\n", CreateEvntReq::RT_USER_CREATE);
-#endif
{
evntRecPtr.p->m_eventRec.EVENT_TYPE = evntRecPtr.p->m_request.getEventType();
+ evntRecPtr.p->m_eventRec.TABLEID = evntRecPtr.p->m_request.getTableId();
+ evntRecPtr.p->m_eventRec.TABLEVERSION=evntRecPtr.p->m_request.getTableVersion();
AttributeMask m = evntRecPtr.p->m_request.getAttrListBitmask();
memcpy(evntRecPtr.p->m_eventRec.ATTRIBUTE_MASK, &m,
sizeof(evntRecPtr.p->m_eventRec.ATTRIBUTE_MASK));
evntRecPtr.p->m_eventRec.SUBID = evntRecPtr.p->m_request.getEventId();
evntRecPtr.p->m_eventRec.SUBKEY = evntRecPtr.p->m_request.getEventKey();
+ DBUG_PRINT("info",
+ ("CREATE: event name: %s table name: %s table id: %u table version: %u",
+ evntRecPtr.p->m_eventRec.NAME,
+ evntRecPtr.p->m_eventRec.TABLE_NAME,
+ evntRecPtr.p->m_eventRec.TABLEID,
+ evntRecPtr.p->m_eventRec.TABLEVERSION));
+
}
jam();
executeTransEventSysTable(&c, signal,
@@ -8063,17 +8126,9 @@ Dbdict::createEventUTIL_PREPARE(Signal* signal,
evntRecPtr.i = ref->getSenderData();
ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
- bool temporary = false;
- interpretUtilPrepareErrorCode(errorCode,
- temporary, evntRecPtr.p->m_errorLine);
- if (temporary) {
- evntRecPtr.p->m_errorCode =
- CreateEvntRef::makeTemporary(CreateEvntRef::Undefined);
- }
-
- if (evntRecPtr.p->m_errorCode == 0) {
- evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
- }
+ Uint32 err;
+ interpretUtilPrepareErrorCode(errorCode, evntRecPtr.p->m_errorCode,
+ evntRecPtr.p->m_errorLine);
evntRecPtr.p->m_errorNode = reference();
createEvent_sendReply(signal, evntRecPtr);
@@ -8244,20 +8299,22 @@ void Dbdict::createEventUTIL_EXECUTE(Signal *signal,
switch (evntRec->m_requestType) {
case CreateEvntReq::RT_USER_GET: {
-#ifdef EVENT_DEBUG
- printf("get type = %d\n", CreateEvntReq::RT_USER_GET);
-#endif
parseReadEventSys(signal, evntRecPtr.p->m_eventRec);
evntRec->m_request.setEventType(evntRecPtr.p->m_eventRec.EVENT_TYPE);
- evntRec->m_request.setAttrListBitmask(*(AttributeMask*)evntRecPtr.p->m_eventRec.ATTRIBUTE_MASK);
+ evntRec->m_request.setTableId(evntRecPtr.p->m_eventRec.TABLEID);
+ evntRec->m_request.setTableVersion(evntRecPtr.p->m_eventRec.TABLEVERSION);
+ evntRec->m_request.setAttrListBitmask(*(AttributeMask*)
+ evntRecPtr.p->m_eventRec.ATTRIBUTE_MASK);
evntRec->m_request.setEventId(evntRecPtr.p->m_eventRec.SUBID);
evntRec->m_request.setEventKey(evntRecPtr.p->m_eventRec.SUBKEY);
-#ifdef EVENT_DEBUG
- printf("EventName: %s\n", evntRec->m_eventRec.NAME);
- printf("TableName: %s\n", evntRec->m_eventRec.TABLE_NAME);
-#endif
+ DBUG_PRINT("info",
+ ("GET: event name: %s table name: %s table id: %u table version: %u",
+ evntRecPtr.p->m_eventRec.NAME,
+ evntRecPtr.p->m_eventRec.TABLE_NAME,
+ evntRecPtr.p->m_eventRec.TABLEID,
+ evntRecPtr.p->m_eventRec.TABLEVERSION));
// find table id for event table
TableRecord keyRecord;
@@ -8268,7 +8325,7 @@ void Dbdict::createEventUTIL_EXECUTE(Signal *signal,
if (tablePtr.i == RNIL) {
jam();
- evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorCode = 723;
evntRecPtr.p->m_errorLine = __LINE__;
evntRecPtr.p->m_errorNode = reference();
@@ -8277,6 +8334,7 @@ void Dbdict::createEventUTIL_EXECUTE(Signal *signal,
}
evntRec->m_request.setTableId(tablePtr.p->tableId);
+ evntRec->m_request.setTableVersion(tablePtr.p->tableVersion);
createEventComplete_RT_USER_GET(signal, evntRecPtr);
return;
@@ -8307,21 +8365,21 @@ void Dbdict::createEventUTIL_EXECUTE(Signal *signal,
switch (ref->getTCErrorCode()) {
case ZNOT_FOUND:
jam();
- evntRecPtr.p->m_errorCode = CreateEvntRef::EventNotFound;
+ evntRecPtr.p->m_errorCode = 4710;
break;
case ZALREADYEXIST:
jam();
- evntRecPtr.p->m_errorCode = CreateEvntRef::EventNameExists;
+ evntRecPtr.p->m_errorCode = 746;
break;
default:
jam();
- evntRecPtr.p->m_errorCode = CreateEvntRef::UndefinedTCError;
+ evntRecPtr.p->m_errorCode = ref->getTCErrorCode();
break;
}
break;
default:
jam();
- evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorCode = ref->getErrorCode();
break;
}
@@ -8357,7 +8415,7 @@ Dbdict::createEvent_RT_USER_GET(Signal* signal, OpCreateEventPtr evntRecPtr){
jam();
releaseSections(signal);
- evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorCode = 1;
evntRecPtr.p->m_errorLine = __LINE__;
evntRecPtr.p->m_errorNode = reference();
@@ -8493,8 +8551,8 @@ Dbdict::createEvent_RT_DICT_AFTER_GET(Signal* signal, OpCreateEventPtr evntRecPt
SubCreateReq * sumaReq = (SubCreateReq *)signal->getDataPtrSend();
- sumaReq->subscriberRef = reference(); // reference to DICT
- sumaReq->subscriberData = evntRecPtr.i;
+ sumaReq->senderRef = reference(); // reference to DICT
+ sumaReq->senderData = evntRecPtr.i;
sumaReq->subscriptionId = evntRecPtr.p->m_request.getEventId();
sumaReq->subscriptionKey = evntRecPtr.p->m_request.getEventKey();
sumaReq->subscriptionType = SubCreateReq::TableEvent;
@@ -8505,106 +8563,56 @@ Dbdict::createEvent_RT_DICT_AFTER_GET(Signal* signal, OpCreateEventPtr evntRecPt
#endif
sendSignal(SUMA_REF, GSN_SUB_CREATE_REQ, signal,
- SubCreateReq::SignalLength+1 /*to get table Id*/, JBB);
+ SubCreateReq::SignalLength, JBB);
}
void Dbdict::execSUB_CREATE_REF(Signal* signal)
{
jamEntry();
- EVENT_TRACE;
+ DBUG_ENTER("Dbdict::execSUB_CREATE_REF");
+
SubCreateRef * const ref = (SubCreateRef *)signal->getDataPtr();
OpCreateEventPtr evntRecPtr;
- evntRecPtr.i = ref->subscriberData;
+ evntRecPtr.i = ref->senderData;
ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
-#ifdef EVENT_PH2_DEBUG
- ndbout_c("DBDICT(Participant) got SUB_CREATE_REF evntRecPtr.i = (%d)", evntRecPtr.i);
-#endif
-
- if (ref->err == GrepError::SUBSCRIPTION_ID_NOT_UNIQUE) {
+ if (ref->errorCode == 1415) {
jam();
-#ifdef EVENT_PH2_DEBUG
- ndbout_c("SUBSCRIPTION_ID_NOT_UNIQUE");
-#endif
createEvent_sendReply(signal, evntRecPtr);
- return;
+ DBUG_VOID_RETURN;
}
-#ifdef EVENT_PH2_DEBUG
- ndbout_c("Other error");
-#endif
-
- evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
- evntRecPtr.p->m_errorLine = __LINE__;
+ if (ref->errorCode)
+ {
+ evntRecPtr.p->m_errorCode = ref->errorCode;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ }
+ else
+ {
+ evntRecPtr.p->m_errorCode = 1;
+ evntRecPtr.p->m_errorLine = __LINE__;
+ }
evntRecPtr.p->m_errorNode = reference();
createEvent_sendReply(signal, evntRecPtr);
+ DBUG_VOID_RETURN;
}
void Dbdict::execSUB_CREATE_CONF(Signal* signal)
{
jamEntry();
+ DBUG_ENTER("Dbdict::execSUB_CREATE_CONF");
EVENT_TRACE;
SubCreateConf * const sumaConf = (SubCreateConf *)signal->getDataPtr();
-
- const Uint32 subscriptionId = sumaConf->subscriptionId;
- const Uint32 subscriptionKey = sumaConf->subscriptionKey;
- const Uint32 evntRecId = sumaConf->subscriberData;
-
- OpCreateEvent *evntRec;
- ndbrequire((evntRec = c_opCreateEvent.getPtr(evntRecId)) != NULL);
-
-#ifdef EVENT_PH2_DEBUG
- ndbout_c("DBDICT(Participant) got SUB_CREATE_CONF evntRecPtr.i = (%d)", evntRecId);
-#endif
-
- SubSyncReq *sumaSync = (SubSyncReq *)signal->getDataPtrSend();
-
- sumaSync->subscriptionId = subscriptionId;
- sumaSync->subscriptionKey = subscriptionKey;
- sumaSync->part = (Uint32) SubscriptionData::MetaData;
- sumaSync->subscriberData = evntRecId;
-
- sendSignal(SUMA_REF, GSN_SUB_SYNC_REQ, signal,
- SubSyncReq::SignalLength, JBB);
-}
-
-void Dbdict::execSUB_SYNC_REF(Signal* signal)
-{
- jamEntry();
- EVENT_TRACE;
- SubSyncRef * const ref = (SubSyncRef *)signal->getDataPtr();
OpCreateEventPtr evntRecPtr;
-
- evntRecPtr.i = ref->subscriberData;
+ evntRecPtr.i = sumaConf->senderData;
ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
- evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
- evntRecPtr.p->m_errorLine = __LINE__;
- evntRecPtr.p->m_errorNode = reference();
-
createEvent_sendReply(signal, evntRecPtr);
-}
-
-void Dbdict::execSUB_SYNC_CONF(Signal* signal)
-{
- jamEntry();
- EVENT_TRACE;
-
- SubSyncConf * const sumaSyncConf = (SubSyncConf *)signal->getDataPtr();
-
- // Uint32 subscriptionId = sumaSyncConf->subscriptionId;
- // Uint32 subscriptionKey = sumaSyncConf->subscriptionKey;
- OpCreateEventPtr evntRecPtr;
- evntRecPtr.i = sumaSyncConf->subscriberData;
- ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL);
-
- ndbrequire(sumaSyncConf->part == (Uint32)SubscriptionData::MetaData);
-
- createEvent_sendReply(signal, evntRecPtr);
+ DBUG_VOID_RETURN;
}
/****************************************************
@@ -8631,7 +8639,7 @@ void Dbdict::createEvent_sendReply(Signal* signal,
if (evntRecPtr.p->m_reqTracker.hasRef()) {
ptr = NULL; // we don't want to return anything if there's an error
if (!evntRecPtr.p->hasError()) {
- evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined;
+ evntRecPtr.p->m_errorCode = 1;
evntRecPtr.p->m_errorLine = __LINE__;
evntRecPtr.p->m_errorNode = reference();
jam();
@@ -8655,6 +8663,7 @@ void Dbdict::createEvent_sendReply(Signal* signal,
ret->setUserData(evntRecPtr.p->m_request.getUserData());
ret->senderRef = reference();
ret->setTableId(evntRecPtr.p->m_request.getTableId());
+ ret->setTableVersion(evntRecPtr.p->m_request.getTableVersion());
ret->setEventType(evntRecPtr.p->m_request.getEventType());
ret->setRequestType(evntRecPtr.p->m_request.getRequestType());
@@ -8680,6 +8689,7 @@ void Dbdict::createEvent_sendReply(Signal* signal,
evntConf->setUserData(evntRecPtr.p->m_request.getUserData());
evntConf->senderRef = reference();
evntConf->setTableId(evntRecPtr.p->m_request.getTableId());
+ evntConf->setTableVersion(evntRecPtr.p->m_request.getTableVersion());
evntConf->setAttrListBitmask(evntRecPtr.p->m_request.getAttrListBitmask());
evntConf->setEventType(evntRecPtr.p->m_request.getEventType());
evntConf->setRequestType(evntRecPtr.p->m_request.getRequestType());
@@ -8728,7 +8738,7 @@ void Dbdict::execSUB_START_REQ(Signal* signal)
// ret->setErrorLine(__LINE__);
// ret->setErrorNode(reference());
ref->senderRef = reference();
- ref->setTemporary(SubStartRef::Busy);
+ ref->errorCode = SubStartRef::Busy;
sendSignal(origSenderRef, GSN_SUB_START_REF, signal,
SubStartRef::SignalLength2, JBB);
@@ -8789,6 +8799,7 @@ void Dbdict::execSUB_START_REF(Signal* signal)
const SubStartRef* ref = (SubStartRef*) signal->getDataPtr();
Uint32 senderRef = ref->senderRef;
+ Uint32 err = ref->errorCode;
OpSubEventPtr subbPtr;
c_opSubEvent.getPtr(subbPtr, ref->senderData);
@@ -8803,27 +8814,13 @@ void Dbdict::execSUB_START_REF(Signal* signal)
ndbout_c("DBDICT(Participant) got GSN_SUB_START_REF = (%d)", subbPtr.i);
#endif
- if (ref->isTemporary()){
- jam();
- SubStartReq* req = (SubStartReq*)signal->getDataPtrSend();
- { // fix
- Uint32 subscriberRef = ref->subscriberRef;
- req->subscriberRef = subscriberRef;
- }
- req->senderRef = reference();
- req->senderData = subbPtr.i;
- sendSignal(SUMA_REF, GSN_SUB_START_REQ,
- signal, SubStartReq::SignalLength2, JBB);
- } else {
- jam();
-
- SubStartRef* ref = (SubStartRef*) signal->getDataPtrSend();
- ref->senderRef = reference();
- ref->senderData = subbPtr.p->m_senderData;
- sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_REF,
- signal, SubStartRef::SignalLength2, JBB);
- c_opSubEvent.release(subbPtr);
- }
+ jam();
+ SubStartRef* ref = (SubStartRef*) signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = subbPtr.p->m_senderData;
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_REF,
+ signal, SubStartRef::SignalLength2, JBB);
+ c_opSubEvent.release(subbPtr);
return;
}
/*
@@ -8833,11 +8830,15 @@ void Dbdict::execSUB_START_REF(Signal* signal)
#ifdef EVENT_PH3_DEBUG
ndbout_c("DBDICT(Coordinator) got GSN_SUB_START_REF = (%d)", subbPtr.i);
#endif
- if (ref->errorCode == SubStartRef::NF_FakeErrorREF){
+ if (err == SubStartRef::NF_FakeErrorREF){
jam();
subbPtr.p->m_reqTracker.ignoreRef(c_counterMgr, refToNode(senderRef));
} else {
jam();
+ if (subbPtr.p->m_errorCode == 0)
+ {
+ subbPtr.p->m_errorCode= err ? err : 1;
+ }
subbPtr.p->m_reqTracker.reportRef(c_counterMgr, refToNode(senderRef));
}
completeSubStartReq(signal,subbPtr.i,0);
@@ -8940,7 +8941,7 @@ void Dbdict::execSUB_STOP_REQ(Signal* signal)
// ret->setErrorLine(__LINE__);
// ret->setErrorNode(reference());
ref->senderRef = reference();
- ref->setTemporary(SubStopRef::Busy);
+ ref->errorCode = SubStopRef::Busy;
sendSignal(origSenderRef, GSN_SUB_STOP_REF, signal,
SubStopRef::SignalLength, JBB);
@@ -8997,6 +8998,7 @@ void Dbdict::execSUB_STOP_REF(Signal* signal)
jamEntry();
const SubStopRef* ref = (SubStopRef*) signal->getDataPtr();
Uint32 senderRef = ref->senderRef;
+ Uint32 err = ref->errorCode;
OpSubEventPtr subbPtr;
c_opSubEvent.getPtr(subbPtr, ref->senderData);
@@ -9006,33 +9008,27 @@ void Dbdict::execSUB_STOP_REF(Signal* signal)
* Participant
*/
jam();
- if (ref->isTemporary()){
- jam();
- SubStopReq* req = (SubStopReq*)signal->getDataPtrSend();
- req->senderRef = reference();
- req->senderData = subbPtr.i;
- sendSignal(SUMA_REF, GSN_SUB_STOP_REQ,
- signal, SubStopReq::SignalLength, JBB);
- } else {
- jam();
- SubStopRef* ref = (SubStopRef*) signal->getDataPtrSend();
- ref->senderRef = reference();
- ref->senderData = subbPtr.p->m_senderData;
- sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_REF,
- signal, SubStopRef::SignalLength, JBB);
- c_opSubEvent.release(subbPtr);
- }
+ SubStopRef* ref = (SubStopRef*) signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = subbPtr.p->m_senderData;
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_REF,
+ signal, SubStopRef::SignalLength, JBB);
+ c_opSubEvent.release(subbPtr);
return;
}
/*
* Coordinator
*/
ndbrequire(refToBlock(senderRef) == DBDICT);
- if (ref->errorCode == SubStopRef::NF_FakeErrorREF){
+ if (err == SubStopRef::NF_FakeErrorREF){
jam();
subbPtr.p->m_reqTracker.ignoreRef(c_counterMgr, refToNode(senderRef));
} else {
jam();
+ if (subbPtr.p->m_errorCode == 0)
+ {
+ subbPtr.p->m_errorCode= err ? err : 1;
+ }
subbPtr.p->m_reqTracker.reportRef(c_counterMgr, refToNode(senderRef));
}
completeSubStopReq(signal,subbPtr.i,0);
@@ -9092,17 +9088,9 @@ void Dbdict::completeSubStopReq(Signal* signal,
#endif
SubStopRef* ref = (SubStopRef*)signal->getDataPtrSend();
- ref->senderRef = reference();
- ref->senderData = subbPtr.p->m_senderData;
- /*
- ref->subscriptionId = subbPtr.p->m_senderData;
- ref->subscriptionKey = subbPtr.p->m_senderData;
- ref->part = subbPtr.p->m_part; // SubscriptionData::Part
- ref->subscriberData = subbPtr.p->m_subscriberData;
- ref->subscriberRef = subbPtr.p->m_subscriberRef;
- */
- ref->errorCode = subbPtr.p->m_errorCode;
-
+ ref->senderRef = reference();
+ ref->senderData = subbPtr.p->m_senderData;
+ ref->errorCode = subbPtr.p->m_errorCode;
sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_REF,
signal, SubStopRef::SignalLength, JBB);
@@ -9132,7 +9120,7 @@ void
Dbdict::execDROP_EVNT_REQ(Signal* signal)
{
jamEntry();
- EVENT_TRACE;
+ DBUG_ENTER("Dbdict::execDROP_EVNT_REQ");
DropEvntReq *req = (DropEvntReq*)signal->getDataPtr();
const Uint32 senderRef = signal->senderBlockRef();
@@ -9145,12 +9133,12 @@ Dbdict::execDROP_EVNT_REQ(Signal* signal)
releaseSections(signal);
DropEvntRef * ret = (DropEvntRef *)signal->getDataPtrSend();
- ret->setErrorCode(DropEvntRef::SeizeError);
+ ret->setErrorCode(747);
ret->setErrorLine(__LINE__);
ret->setErrorNode(reference());
sendSignal(senderRef, GSN_DROP_EVNT_REF, signal,
DropEvntRef::SignalLength, JBB);
- return;
+ DBUG_VOID_RETURN;
}
#ifdef EVENT_DEBUG
@@ -9175,12 +9163,12 @@ Dbdict::execDROP_EVNT_REQ(Signal* signal)
jam();
releaseSections(signal);
- evntRecPtr.p->m_errorCode = DropEvntRef::Undefined;
+ evntRecPtr.p->m_errorCode = 1;
evntRecPtr.p->m_errorLine = __LINE__;
evntRecPtr.p->m_errorNode = reference();
dropEvent_sendReply(signal, evntRecPtr);
- return;
+ DBUG_VOID_RETURN;
}
r0.getString(evntRecPtr.p->m_eventRec.NAME);
{
@@ -9201,6 +9189,7 @@ Dbdict::execDROP_EVNT_REQ(Signal* signal)
prepareTransactionEventSysTable(&c, signal, evntRecPtr.i,
UtilPrepareReq::Read);
+ DBUG_VOID_RETURN;
}
void
@@ -9274,6 +9263,7 @@ void
Dbdict::execSUB_REMOVE_REQ(Signal* signal)
{
jamEntry();
+ DBUG_ENTER("Dbdict::execSUB_REMOVE_REQ");
Uint32 origSenderRef = signal->senderBlockRef();
@@ -9282,11 +9272,11 @@ Dbdict::execSUB_REMOVE_REQ(Signal* signal)
SubRemoveRef * ref = (SubRemoveRef *)signal->getDataPtrSend();
jam();
ref->senderRef = reference();
- ref->setTemporary(SubRemoveRef::Busy);
+ ref->errorCode = SubRemoveRef::Busy;
sendSignal(origSenderRef, GSN_SUB_REMOVE_REF, signal,
SubRemoveRef::SignalLength, JBB);
- return;
+ DBUG_VOID_RETURN;
}
{
@@ -9301,6 +9291,7 @@ Dbdict::execSUB_REMOVE_REQ(Signal* signal)
req->senderData = subbPtr.i;
sendSignal(SUMA_REF, GSN_SUB_REMOVE_REQ, signal, SubRemoveReq::SignalLength, JBB);
+ DBUG_VOID_RETURN;
}
/*
@@ -9311,8 +9302,11 @@ void
Dbdict::execSUB_REMOVE_REF(Signal* signal)
{
jamEntry();
+ DBUG_ENTER("Dbdict::execSUB_REMOVE_REF");
+
const SubRemoveRef* ref = (SubRemoveRef*) signal->getDataPtr();
Uint32 senderRef = ref->senderRef;
+ Uint32 err= ref->errorCode;
if (refToBlock(senderRef) == SUMA) {
/*
@@ -9321,8 +9315,8 @@ Dbdict::execSUB_REMOVE_REF(Signal* signal)
jam();
OpSubEventPtr subbPtr;
c_opSubEvent.getPtr(subbPtr, ref->senderData);
- if (ref->errorCode == (Uint32) GrepError::SUBSCRIPTION_ID_NOT_FOUND) {
- // conf this since this may occur if a nodefailiure has occured
+ if (err == 1407) {
+ // conf this since this may occur if a nodefailure has occured
// earlier so that the systable was not cleared
SubRemoveConf* conf = (SubRemoveConf*) signal->getDataPtrSend();
conf->senderRef = reference();
@@ -9337,7 +9331,7 @@ Dbdict::execSUB_REMOVE_REF(Signal* signal)
signal, SubRemoveRef::SignalLength, JBB);
}
c_opSubEvent.release(subbPtr);
- return;
+ DBUG_VOID_RETURN;
}
/*
* Coordinator
@@ -9345,14 +9339,21 @@ Dbdict::execSUB_REMOVE_REF(Signal* signal)
ndbrequire(refToBlock(senderRef) == DBDICT);
OpDropEventPtr eventRecPtr;
c_opDropEvent.getPtr(eventRecPtr, ref->senderData);
- if (ref->errorCode == SubRemoveRef::NF_FakeErrorREF){
+ if (err == SubRemoveRef::NF_FakeErrorREF){
jam();
eventRecPtr.p->m_reqTracker.ignoreRef(c_counterMgr, refToNode(senderRef));
} else {
jam();
+ if (eventRecPtr.p->m_errorCode == 0)
+ {
+ eventRecPtr.p->m_errorCode= err ? err : 1;
+ eventRecPtr.p->m_errorLine= __LINE__;
+ eventRecPtr.p->m_errorNode= reference();
+ }
eventRecPtr.p->m_reqTracker.reportRef(c_counterMgr, refToNode(senderRef));
}
completeSubRemoveReq(signal,eventRecPtr.i,0);
+ DBUG_VOID_RETURN;
}
void
@@ -9400,9 +9401,12 @@ Dbdict::completeSubRemoveReq(Signal* signal, Uint32 ptrI, Uint32 xxx)
if (evntRecPtr.p->m_reqTracker.hasRef()) {
jam();
- evntRecPtr.p->m_errorNode = reference();
- evntRecPtr.p->m_errorLine = __LINE__;
- evntRecPtr.p->m_errorCode = DropEvntRef::Undefined;
+ if ( evntRecPtr.p->m_errorCode == 0 )
+ {
+ evntRecPtr.p->m_errorNode = reference();
+ evntRecPtr.p->m_errorLine = __LINE__;
+ evntRecPtr.p->m_errorCode = 1;
+ }
dropEvent_sendReply(signal, evntRecPtr);
return;
}
@@ -9478,18 +9482,8 @@ Dbdict::dropEventUtilPrepareRef(Signal* signal,
evntRecPtr.i = ref->getSenderData();
ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL);
- bool temporary = false;
interpretUtilPrepareErrorCode((UtilPrepareRef::ErrorCode)ref->getErrorCode(),
- temporary, evntRecPtr.p->m_errorLine);
- if (temporary) {
- evntRecPtr.p->m_errorCode = (DropEvntRef::ErrorCode)
- ((Uint32) DropEvntRef::Undefined | (Uint32) DropEvntRef::Temporary);
- }
-
- if (evntRecPtr.p->m_errorCode == 0) {
- evntRecPtr.p->m_errorCode = DropEvntRef::Undefined;
- evntRecPtr.p->m_errorLine = __LINE__;
- }
+ evntRecPtr.p->m_errorCode, evntRecPtr.p->m_errorLine);
evntRecPtr.p->m_errorNode = reference();
dropEvent_sendReply(signal, evntRecPtr);
@@ -9516,17 +9510,17 @@ Dbdict::dropEventUtilExecuteRef(Signal* signal,
switch (ref->getTCErrorCode()) {
case ZNOT_FOUND:
jam();
- evntRecPtr.p->m_errorCode = DropEvntRef::EventNotFound;
+ evntRecPtr.p->m_errorCode = 4710;
break;
default:
jam();
- evntRecPtr.p->m_errorCode = DropEvntRef::UndefinedTCError;
+ evntRecPtr.p->m_errorCode = ref->getTCErrorCode();
break;
}
break;
default:
jam();
- evntRecPtr.p->m_errorCode = DropEvntRef::Undefined;
+ evntRecPtr.p->m_errorCode = ref->getErrorCode();
break;
}
dropEvent_sendReply(signal, evntRecPtr);
diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
index 632bff0c8f7..f75f5c904c0 100644
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
@@ -107,11 +107,13 @@
*/
#define EVENT_SYSTEM_TABLE_NAME "sys/def/NDB$EVENTS_0"
-#define EVENT_SYSTEM_TABLE_LENGTH 6
+#define EVENT_SYSTEM_TABLE_LENGTH 8
struct sysTab_NDBEVENTS_0 {
char NAME[MAX_TAB_NAME_SIZE];
Uint32 EVENT_TYPE;
+ Uint32 TABLEID;
+ Uint32 TABLEVERSION;
char TABLE_NAME[MAX_TAB_NAME_SIZE];
Uint32 ATTRIBUTE_MASK[MAXNROFATTRIBUTESINWORDS];
Uint32 SUBID;
@@ -546,9 +548,6 @@ private:
void execSUB_CREATE_CONF(Signal* signal);
void execSUB_CREATE_REF (Signal* signal);
- void execSUB_SYNC_CONF(Signal* signal);
- void execSUB_SYNC_REF (Signal* signal);
-
void execSUB_REMOVE_REQ(Signal* signal);
void execSUB_REMOVE_CONF(Signal* signal);
void execSUB_REMOVE_REF(Signal* signal);
@@ -1388,7 +1387,7 @@ private:
CreateEvntReq::RequestType m_requestType;
Uint32 m_requestFlag;
// error info
- CreateEvntRef::ErrorCode m_errorCode;
+ Uint32 m_errorCode;
Uint32 m_errorLine;
Uint32 m_errorNode;
// ctor
@@ -1432,24 +1431,24 @@ private:
sysTab_NDBEVENTS_0 m_eventRec;
RequestTracker m_reqTracker;
// error info
- DropEvntRef::ErrorCode m_errorCode;
+ Uint32 m_errorCode;
Uint32 m_errorLine;
Uint32 m_errorNode;
// ctor
OpDropEvent() {
memset(&m_request, 0, sizeof(m_request));
- m_errorCode = DropEvntRef::NoError;
+ m_errorCode = 0;
m_errorLine = 0;
m_errorNode = 0;
}
void init(const DropEvntReq* req) {
m_request = *req;
- m_errorCode = DropEvntRef::NoError;
+ m_errorCode = 0;
m_errorLine = 0;
m_errorNode = 0;
}
bool hasError() {
- return m_errorCode != DropEvntRef::NoError;
+ return m_errorCode != 0;
}
void setError(const DropEvntRef* ref) {
if (ref != 0 && ! hasError()) {
@@ -1782,6 +1781,7 @@ private:
/* ------------------------------------------------------------ */
// Add Table Handling
/* ------------------------------------------------------------ */
+ void releaseCreateTableOp(Signal* signal, CreateTableRecordPtr createTabPtr);
/* ------------------------------------------------------------ */
// Add Fragment Handling
diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
index 21d6a1182be..af76a74a133 100644
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
@@ -7903,7 +7903,6 @@ void Dbdih::writingCopyGciLab(Signal* signal, FileRecordPtr filePtr)
SubGcpCompleteRep * const rep = (SubGcpCompleteRep*)signal->getDataPtr();
rep->gci = coldgcp;
- rep->senderData = 0;
sendSignal(SUMA_REF, GSN_SUB_GCP_COMPLETE_REP, signal,
SubGcpCompleteRep::SignalLength, JBB);
}
diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
index 42688796801..eb4e7a6bb83 100644
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -3272,6 +3272,7 @@ void Dblqh::seizeTcrec()
locTcConnectptr.p->tcTimer = cLqhTimeOutCount;
locTcConnectptr.p->tableref = RNIL;
locTcConnectptr.p->savePointId = 0;
+ locTcConnectptr.p->gci = 0;
cfirstfreeTcConrec = nextTc;
tcConnectptr = locTcConnectptr;
locTcConnectptr.p->connectState = TcConnectionrec::CONNECTED;
@@ -5422,6 +5423,7 @@ void Dblqh::commitReqLab(Signal* signal, Uint32 gci)
TcConnectionrec * const regTcPtr = tcConnectptr.p;
TcConnectionrec::LogWriteState logWriteState = regTcPtr->logWriteState;
TcConnectionrec::TransactionState transState = regTcPtr->transactionState;
+ ndbrequire(regTcPtr->gci == gci || regTcPtr->gci == 0);
regTcPtr->gci = gci;
if (transState == TcConnectionrec::PREPARED) {
if (logWriteState == TcConnectionrec::WRITTEN) {
@@ -5491,6 +5493,7 @@ void Dblqh::execLQH_WRITELOG_REQ(Signal* signal)
Uint32 newestGci = cnewestGci;
TcConnectionrec::LogWriteState logWriteState = regTcPtr->logWriteState;
TcConnectionrec::TransactionState transState = regTcPtr->transactionState;
+ ndbrequire(regTcPtr->gci == gci || regTcPtr->gci == 0);
regTcPtr->gci = gci;
if (gci > newestGci) {
jam();
@@ -8890,7 +8893,7 @@ Uint32 Dblqh::sendKeyinfo20(Signal* signal,
const Uint32 nodeId = refToNode(ref);
const bool connectedToNode = getNodeInfo(nodeId).m_connected;
const Uint32 type = getNodeInfo(nodeId).m_type;
- const bool is_api = (type >= NodeInfo::API && type <= NodeInfo::REP);
+ const bool is_api = (type >= NodeInfo::API && type <= NodeInfo::MGM);
const bool old_dest = (getNodeInfo(nodeId).m_version < MAKE_VERSION(3,5,0));
const bool longable = true; // TODO is_api && !old_dest;
diff --git a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
index c79f686f78c..c3a6e95719a 100644
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
@@ -3908,7 +3908,7 @@ void Dbtc::sendtckeyconf(Signal* signal, UintR TcommitFlag)
const UintR TopWords = (UintR)regApiPtr->tckeyrec;
localHostptr.i = refToNode(regApiPtr->ndbapiBlockref);
const Uint32 type = getNodeInfo(localHostptr.i).m_type;
- const bool is_api = (type >= NodeInfo::API && type <= NodeInfo::REP);
+ const bool is_api = (type >= NodeInfo::API && type <= NodeInfo::MGM);
const BlockNumber TblockNum = refToBlock(regApiPtr->ndbapiBlockref);
const Uint32 Tmarker = (regApiPtr->commitAckMarker == RNIL) ? 0 : 1;
ptrAss(localHostptr, hostRecord);
@@ -4590,7 +4590,8 @@ void Dbtc::sendApiCommit(Signal* signal)
commitConf->transId1 = regApiPtr->transid[0];
commitConf->transId2 = regApiPtr->transid[1];
commitConf->gci = regApiPtr->globalcheckpointid;
- sendSignal(regApiPtr->ndbapiBlockref, GSN_TC_COMMITCONF, signal,
+
+ sendSignal(regApiPtr->ndbapiBlockref, GSN_TC_COMMITCONF, signal,
TcCommitConf::SignalLength, JBB);
} else if (regApiPtr->returnsignal == RS_NO_RETURN) {
jam();
@@ -5127,6 +5128,19 @@ void Dbtc::execLQHKEYREF(Signal* signal)
return;
}
+ /* Only ref in certain situations */
+ {
+ const Uint32 opType = regTcPtr->operation;
+ if ( (opType == ZDELETE && errCode != ZNOT_FOUND)
+ || (opType == ZINSERT && errCode != ZALREADYEXIST)
+ || (opType == ZUPDATE && errCode != ZNOT_FOUND)
+ || (opType == ZWRITE && errCode != 839 && errCode != 840))
+ {
+ TCKEY_abort(signal, 49);
+ return;
+ }
+ }
+
/* *************** */
/* TCKEYREF < */
/* *************** */
@@ -11336,7 +11350,7 @@ void Dbtc::sendTcIndxConf(Signal* signal, UintR TcommitFlag)
const UintR TopWords = (UintR)regApiPtr->tcindxrec;
localHostptr.i = refToNode(regApiPtr->ndbapiBlockref);
const Uint32 type = getNodeInfo(localHostptr.i).m_type;
- const bool is_api = (type >= NodeInfo::API && type <= NodeInfo::REP);
+ const bool is_api = (type >= NodeInfo::API && type <= NodeInfo::MGM);
const BlockNumber TblockNum = refToBlock(regApiPtr->ndbapiBlockref);
const Uint32 Tmarker = (regApiPtr->commitAckMarker == RNIL ? 0 : 1);
ptrAss(localHostptr, hostRecord);
diff --git a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
index f0480853c1a..bf07a39f00d 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
@@ -345,6 +345,7 @@
#endif
class Dbtup: public SimulatedBlock {
+ friend class Suma;
public:
typedef bool (Dbtup::* ReadFunction)(Uint32*,
@@ -420,7 +421,8 @@ enum State {
SAME_PAGE = 64,
DEFINING = 65,
TUPLE_BLOCKED = 66,
- ERROR_WAIT_STORED_PROCREQ = 67
+ ERROR_WAIT_STORED_PROCREQ = 67,
+ DROPPING = 68
};
// Records
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp
index 6a478bea917..0c825af9f4b 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp
@@ -146,7 +146,7 @@ void Dbtup::sendReadAttrinfo(Signal* signal,
bool connectedToNode = getNodeInfo(nodeId).m_connected;
const Uint32 type = getNodeInfo(nodeId).m_type;
- bool is_api = (type >= NodeInfo::API && type <= NodeInfo::REP);
+ bool is_api = (type >= NodeInfo::API && type <= NodeInfo::MGM);
bool old_dest = (getNodeInfo(nodeId).m_version < MAKE_VERSION(3,5,0));
const Uint32 TpacketTA = hostBuffer[nodeId].noOfPacketsTA;
const Uint32 TpacketLen = hostBuffer[nodeId].packetLenTA;
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
index bacba2a880c..91bdd58e406 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
@@ -590,6 +590,7 @@ Dbtup::execDROP_TAB_REQ(Signal* signal)
tabPtr.p->m_dropTable.tabUserRef = req->senderRef;
tabPtr.p->m_dropTable.tabUserPtr = req->senderData;
+ tabPtr.p->tableStatus = DROPPING;
signal->theData[0] = ZREL_FRAG;
signal->theData[1] = tabPtr.i;
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
index de3e6434c95..0cdfe97ecb5 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
@@ -123,14 +123,21 @@ Dbtup::execCREATE_TRIG_REQ(Signal* signal)
BlockReference senderRef = signal->getSendersBlockRef();
const CreateTrigReq reqCopy = *(const CreateTrigReq*)signal->getDataPtr();
const CreateTrigReq* const req = &reqCopy;
+ CreateTrigRef::ErrorCode error= CreateTrigRef::NoError;
// Find table
TablerecPtr tabPtr;
tabPtr.i = req->getTableId();
ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec);
+ if (tabPtr.p->tableStatus != DEFINED )
+ {
+ ljam();
+ error= CreateTrigRef::InvalidTable;
+ }
// Create trigger and associate it with the table
- if (createTrigger(tabPtr.p, req)) {
+ else if (createTrigger(tabPtr.p, req))
+ {
ljam();
// Send conf
CreateTrigConf* const conf = (CreateTrigConf*)signal->getDataPtrSend();
@@ -143,21 +150,26 @@ Dbtup::execCREATE_TRIG_REQ(Signal* signal)
conf->setTriggerInfo(req->getTriggerInfo());
sendSignal(senderRef, GSN_CREATE_TRIG_CONF,
signal, CreateTrigConf::SignalLength, JBB);
- } else {
+ return;
+ }
+ else
+ {
ljam();
- // Send ref
- CreateTrigRef* const ref = (CreateTrigRef*)signal->getDataPtrSend();
- ref->setUserRef(reference());
- ref->setConnectionPtr(req->getConnectionPtr());
- ref->setRequestType(req->getRequestType());
- ref->setTableId(req->getTableId());
- ref->setIndexId(req->getIndexId());
- ref->setTriggerId(req->getTriggerId());
- ref->setTriggerInfo(req->getTriggerInfo());
- ref->setErrorCode(CreateTrigRef::TooManyTriggers);
- sendSignal(senderRef, GSN_CREATE_TRIG_REF,
- signal, CreateTrigRef::SignalLength, JBB);
+ error= CreateTrigRef::TooManyTriggers;
}
+ ndbassert(error != CreateTrigRef::NoError);
+ // Send ref
+ CreateTrigRef* const ref = (CreateTrigRef*)signal->getDataPtrSend();
+ ref->setUserRef(reference());
+ ref->setConnectionPtr(req->getConnectionPtr());
+ ref->setRequestType(req->getRequestType());
+ ref->setTableId(req->getTableId());
+ ref->setIndexId(req->getIndexId());
+ ref->setTriggerId(req->getTriggerId());
+ ref->setTriggerInfo(req->getTriggerInfo());
+ ref->setErrorCode(error);
+ sendSignal(senderRef, GSN_CREATE_TRIG_REF,
+ signal, CreateTrigRef::SignalLength, JBB);
}//Dbtup::execCREATE_TRIG_REQ()
void
diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp b/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp
index 657133bda36..c88779792dc 100644
--- a/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp
@@ -159,6 +159,7 @@ private:
BLOCK_DEFINES(Ndbcntr);
// Transit signals
+ void execAPI_START_REP(Signal*);
void execCONTINUEB(Signal* signal);
void execREAD_NODESCONF(Signal* signal);
void execREAD_NODESREF(Signal* signal);
diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp
index 97ca3f44b3a..18f4b9fe0bf 100644
--- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp
@@ -55,6 +55,7 @@ Ndbcntr::Ndbcntr(const class Configuration & conf):
addRecSignal(GSN_CNTR_START_CONF, &Ndbcntr::execCNTR_START_CONF);
addRecSignal(GSN_CNTR_WAITREP, &Ndbcntr::execCNTR_WAITREP);
addRecSignal(GSN_CNTR_START_REP, &Ndbcntr::execCNTR_START_REP);
+ addRecSignal(GSN_API_START_REP, &Ndbcntr::execAPI_START_REP, true);
addRecSignal(GSN_NODE_FAILREP, &Ndbcntr::execNODE_FAILREP);
addRecSignal(GSN_SYSTEM_ERROR , &Ndbcntr::execSYSTEM_ERROR);
diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
index 97b16f1dbc4..60461b3d46e 100644
--- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
@@ -136,6 +136,16 @@ void Ndbcntr::execCONTINUEB(Signal* signal)
}//switch
}//Ndbcntr::execCONTINUEB()
+void
+Ndbcntr::execAPI_START_REP(Signal* signal)
+{
+ if(refToBlock(signal->getSendersBlockRef()) == QMGR)
+ {
+ for(Uint32 i = 0; i<ALL_BLOCKS_SZ; i++){
+ sendSignal(ALL_BLOCKS[i].Ref, GSN_API_START_REP, signal, 1, JBB);
+ }
+ }
+}
/*******************************/
/* SYSTEM_ERROR */
/*******************************/
@@ -202,10 +212,6 @@ void Ndbcntr::execSTTOR(Signal* signal)
jamEntry();
cstartPhase = signal->theData[1];
- NodeState newState(NodeState::SL_STARTING, cstartPhase,
- (NodeState::StartType)ctypeOfStart);
- updateNodeState(signal, newState);
-
cndbBlocksCount = 0;
cinternalStartphase = cstartPhase - 1;
@@ -566,6 +572,13 @@ Ndbcntr::execCNTR_START_REP(Signal* signal){
Uint32 nodeId = signal->theData[0];
c_startedNodes.set(nodeId);
c_start.m_starting.clear(nodeId);
+
+ /**
+ * Inform all interested blocks that node has started
+ */
+ for(Uint32 i = 0; i<ALL_BLOCKS_SZ; i++){
+ sendSignal(ALL_BLOCKS[i].Ref, GSN_NODE_START_REP, signal, 1, JBB);
+ }
if(!c_start.m_starting.isclear()){
jam();
@@ -2532,6 +2545,10 @@ void Ndbcntr::Missra::sendNextSTTOR(Signal* signal){
currentBlockIndex = 0;
+ NodeState newState(NodeState::SL_STARTING, currentStartPhase,
+ (NodeState::StartType)cntr.ctypeOfStart);
+ cntr.updateNodeState(signal, newState);
+
if(start != 0){
/**
* At least one wanted this start phase, report it
diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp
index 2a65271a32a..abfc0973a78 100644
--- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp
@@ -53,22 +53,30 @@ column_NDBEVENTS_0[] = {
DictTabInfo::ExtUnsigned, 1,
false, false
},
- { 2, "TABLE_NAME",
+ { 2, "TABLEID",
+ DictTabInfo::ExtUnsigned, 1,
+ false, false
+ },
+ { 3, "TABLEVERSION",
+ DictTabInfo::ExtUnsigned, 1,
+ false, false
+ },
+ { 4, "TABLE_NAME",
DictTabInfo::ExtBinary, MAX_TAB_NAME_SIZE,
false, false
},
- { 3, "ATTRIBUTE_MASK",
+ { 5, "ATTRIBUTE_MASK",
DictTabInfo::ExtUnsigned, MAXNROFATTRIBUTESINWORDS,
false, false
},
- { 4, "SUBID",
+ { 6, "SUBID",
DictTabInfo::ExtUnsigned, 1,
false, false
},
- { 5, "SUBKEY",
+ { 7, "SUBKEY",
DictTabInfo::ExtUnsigned, 1,
false, false
- }
+ },
};
const Ndbcntr::SysTable
diff --git a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
index 45073b63a5d..adbef0c7404 100644
--- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
+++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
@@ -367,7 +367,11 @@ void AsyncFile::openReq(Request* request)
break;
return;
}
- const int mode = S_IRUSR | S_IWUSR | S_IRGRP;
+ // allow for user to choose any permissionsa with umask
+ const int mode =
+ S_IRUSR | S_IWUSR |
+ S_IRGRP | S_IWGRP |
+ S_IROTH | S_IWOTH;
if (-1 == (theFd = ::open(theFileName.c_str(), new_flags, mode))) {
PRINT_ERRORANDFLAGS(new_flags);
diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
index 7052e0da98a..75cb92b98c1 100644
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
@@ -1931,9 +1931,6 @@ void Qmgr::execAPI_REGREQ(Signal* signal)
case NodeInfo::MGM:
compatability_check = ndbCompatible_ndb_mgmt(NDB_VERSION, version);
break;
- case NodeInfo::REP:
- // compatability_check = ndbCompatible_ndb_api(NDB_VERSION, version);
- // break;
case NodeInfo::DB:
case NodeInfo::INVALID:
default:
@@ -1964,7 +1961,7 @@ void Qmgr::execAPI_REGREQ(Signal* signal)
apiRegConf->qmgrRef = reference();
apiRegConf->apiHeartbeatFrequency = (chbApiDelay / 10);
apiRegConf->version = NDB_VERSION;
- apiRegConf->nodeState = getNodeState();
+ NodeState state= apiRegConf->nodeState = getNodeState();
{
NodeRecPtr nodePtr;
nodePtr.i = getOwnNodeId();
@@ -1982,9 +1979,12 @@ void Qmgr::execAPI_REGREQ(Signal* signal)
sendSignal(ref, GSN_API_REGCONF, signal, ApiRegConf::SignalLength, JBB);
- if ((getNodeState().startLevel == NodeState::SL_STARTED ||
- getNodeState().getSingleUserMode())
- && apiNodePtr.p->phase == ZAPI_INACTIVE) {
+ if (apiNodePtr.p->phase == ZAPI_INACTIVE &&
+ (state.startLevel == NodeState::SL_STARTED ||
+ state.getSingleUserMode() ||
+ (state.startLevel == NodeState::SL_STARTING &&
+ state.starting.startPhase >= 100)))
+ {
jam();
/**----------------------------------------------------------------------
* THE API NODE IS REGISTERING. WE WILL ACCEPT IT BY CHANGING STATE AND
@@ -1994,6 +1994,9 @@ void Qmgr::execAPI_REGREQ(Signal* signal)
apiNodePtr.p->blockRef = ref;
signal->theData[0] = apiNodePtr.i;
sendSignal(CMVMI_REF, GSN_ENABLE_COMORD, signal, 1, JBA);
+
+ signal->theData[0] = apiNodePtr.i;
+ EXECUTE_DIRECT(NDBCNTR, GSN_API_START_REP, signal, 1);
}
return;
}//Qmgr::execAPI_REGREQ()
diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp
index c4225ad2a4c..fb830354798 100644
--- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp
+++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp
@@ -39,9 +39,14 @@
#include <signaldata/TrigAttrInfo.hpp>
#include <signaldata/CheckNodeGroups.hpp>
#include <signaldata/GCPSave.hpp>
-#include <GrepError.hpp>
+#include <signaldata/CreateTab.hpp>
+#include <signaldata/DropTab.hpp>
+#include <signaldata/AlterTab.hpp>
+
+#include <ndbapi/NdbDictionary.hpp>
#include <DebuggerNames.hpp>
+#include <../dbtup/Dbtup.hpp>
//#define HANDOVER_DEBUG
//#define NODEFAIL_DEBUG
@@ -72,6 +77,7 @@
Uint32 g_subPtrI = RNIL;
static const Uint32 SUMA_SEQUENCE = 0xBABEBABE;
+static const Uint32 MAX_CONCURRENT_GCP = 2;
/**************************************************************
*
@@ -80,11 +86,12 @@ static const Uint32 SUMA_SEQUENCE = 0xBABEBABE;
*/
#define PRINT_ONLY 0
-static Uint32 g_TypeOfStart = NodeState::ST_ILLEGAL_TYPE;
void
-Suma::getNodeGroupMembers(Signal* signal) {
+Suma::getNodeGroupMembers(Signal* signal)
+{
jam();
+ DBUG_ENTER("Suma::getNodeGroupMembers");
/**
* Ask DIH for nodeGroupMembers
*/
@@ -99,25 +106,45 @@ Suma::getNodeGroupMembers(Signal* signal) {
jamEntry();
c_nodeGroup = sd->output;
- c_noNodesInGroup = 0;
- for (int i = 0; i < MAX_NDB_NODES; i++) {
- if (sd->mask.get(i)) {
- if (i == getOwnNodeId()) c_idInNodeGroup = c_noNodesInGroup;
- c_nodesInGroup[c_noNodesInGroup] = i;
- c_noNodesInGroup++;
+ c_nodes_in_nodegroup_mask.assign(sd->mask);
+ c_noNodesInGroup = c_nodes_in_nodegroup_mask.count();
+ Uint32 i, pos= 0;
+
+ for (i = 0; i < MAX_NDB_NODES; i++) {
+ if (sd->mask.get(i))
+ {
+ c_nodesInGroup[pos++] = i;
}
}
+
+ const Uint32 replicas= c_noNodesInGroup;
- // ndbout_c("c_noNodesInGroup=%d", c_noNodesInGroup);
+ Uint32 buckets= 1;
+ for(i = 1; i <= replicas; i++)
+ buckets *= i;
+
+ for(i = 0; i<buckets; i++)
+ {
+ Bucket* ptr= c_buckets+i;
+ for(Uint32 j= 0; j< replicas; j++)
+ {
+ ptr->m_nodes[j] = c_nodesInGroup[(i + j) % replicas];
+ }
+ }
+
+ c_no_of_buckets= buckets;
ndbrequire(c_noNodesInGroup > 0); // at least 1 node in the nodegroup
-#ifdef NODEFAIL_DEBUG
+#ifndef DBUG_OFF
for (Uint32 i = 0; i < c_noNodesInGroup; i++) {
- ndbout_c ("Suma: NodeGroup %u, me %u, me in group %u, member[%u] %u",
- c_nodeGroup, getOwnNodeId(), c_idInNodeGroup,
- i, c_nodesInGroup[i]);
+ DBUG_PRINT("exit",("Suma: NodeGroup %u, me %u, "
+ "member[%u] %u",
+ c_nodeGroup, getOwnNodeId(),
+ i, c_nodesInGroup[i]));
}
#endif
+
+ DBUG_VOID_RETURN;
}
void
@@ -128,85 +155,134 @@ Suma::execSTTOR(Signal* signal) {
const Uint32 startphase = signal->theData[1];
const Uint32 typeOfStart = signal->theData[7];
- DBUG_PRINT("info",("startphase = %u, typeOfStart = %u", startphase, typeOfStart));
+ DBUG_PRINT("info",("startphase = %u, typeOfStart = %u",
+ startphase, typeOfStart));
- if(startphase == 1){
- jam();
- c_restartLock = true;
- }
-
- if(startphase == 3){
+ if(startphase == 3)
+ {
jam();
- g_TypeOfStart = typeOfStart;
+ ndbrequire((m_tup = (Dbtup*)globalData.getBlock(DBTUP)) != 0);
signal->theData[0] = reference();
sendSignal(NDBCNTR_REF, GSN_READ_NODESREQ, signal, 1, JBB);
-
-#if 0
-
- /**
- * Debug
- */
-
-
- SubscriptionPtr subPtr;
- Ptr<SyncRecord> syncPtr;
- ndbrequire(c_subscriptions.seize(subPtr));
- ndbrequire(c_syncPool.seize(syncPtr));
-
-
- ndbout_c("Suma: subPtr.i = %d syncPtr.i = %d", subPtr.i, syncPtr.i);
-
- subPtr.p->m_syncPtrI = syncPtr.i;
- subPtr.p->m_subscriptionType = SubCreateReq::DatabaseSnapshot;
- syncPtr.p->m_subscriptionPtrI = subPtr.i;
- syncPtr.p->ptrI = syncPtr.i;
- g_subPtrI = subPtr.i;
- // sendSTTORRY(signal);
-#endif
DBUG_VOID_RETURN;
}
- if(startphase == 5) {
+ if(startphase == 5)
+ {
getNodeGroupMembers(signal);
- if (g_TypeOfStart == NodeState::ST_NODE_RESTART) {
+ if (typeOfStart == NodeState::ST_NODE_RESTART ||
+ typeOfStart == NodeState::ST_INITIAL_NODE_RESTART)
+ {
jam();
- for (Uint32 i = 0; i < c_noNodesInGroup; i++) {
- Uint32 ref = calcSumaBlockRef(c_nodesInGroup[i]);
- if (ref != reference())
- sendSignal(ref, GSN_SUMA_START_ME, signal,
- 1 /*SumaStartMe::SignalLength*/, JBB);
- }
+
+ send_start_me_req(signal);
+ return;
}
}
- if(startphase == 7) {
- c_restartLock = false; // may be set false earlier with HANDOVER_REQ
-
- if (g_TypeOfStart != NodeState::ST_NODE_RESTART) {
- for( int i = 0; i < NO_OF_BUCKETS; i++) {
- if (getResponsibleSumaNodeId(i) == refToNode(reference())) {
+ if(startphase == 7)
+ {
+ if (typeOfStart != NodeState::ST_NODE_RESTART &&
+ typeOfStart != NodeState::ST_INITIAL_NODE_RESTART)
+ {
+ for( Uint32 i = 0; i < c_no_of_buckets; i++)
+ {
+ if (get_responsible_node(i) == getOwnNodeId())
+ {
// I'm running this bucket
DBUG_PRINT("info",("bucket %u set to true", i));
- c_buckets[i].active = true;
+ m_active_buckets.set(i);
+ ndbout_c("m_active_buckets.set(%d)", i);
}
}
}
-
- if(g_TypeOfStart == NodeState::ST_INITIAL_START &&
- c_masterNodeId == getOwnNodeId()) {
+
+ if(!m_active_buckets.isclear())
+ m_gcp_complete_rep_count = 1; // I contribute 1 gcp complete rep
+ else
+ m_gcp_complete_rep_count = 0; // I contribute 1 gcp complete rep
+
+ if(typeOfStart == NodeState::ST_INITIAL_START &&
+ c_masterNodeId == getOwnNodeId())
+ {
jam();
createSequence(signal);
DBUG_VOID_RETURN;
}//if
}//if
+ if(startphase == 100)
+ {
+ /**
+ * Allow API's to connect
+ */
+ sendSTTORRY(signal);
+ return;
+ }
+ if(startphase == 101)
+ {
+ if (typeOfStart == NodeState::ST_NODE_RESTART ||
+ typeOfStart == NodeState::ST_INITIAL_NODE_RESTART)
+ {
+ /**
+ * Handover code here
+ */
+ c_startup.m_wait_handover= true;
+ check_start_handover(signal);
+ return;
+ }
+ }
sendSTTORRY(signal);
DBUG_VOID_RETURN;
}
void
+Suma::send_start_me_req(Signal* signal)
+{
+ Uint32 nodeId= c_startup.m_restart_server_node_id;
+ do {
+ nodeId = c_alive_nodes.find(nodeId + 1);
+
+ if(nodeId == getOwnNodeId())
+ continue;
+ if(nodeId == NdbNodeBitmask::NotFound)
+ {
+ nodeId = 0;
+ continue;
+ }
+ break;
+ } while(true);
+
+
+ infoEvent("Suma: asking node %d to recreate subscriptions on me", nodeId);
+ c_startup.m_restart_server_node_id= nodeId;
+ sendSignal(calcSumaBlockRef(nodeId),
+ GSN_SUMA_START_ME_REQ, signal, 1, JBB);
+}
+
+void
+Suma::execSUMA_START_ME_REF(Signal* signal)
+{
+ const SumaStartMeRef* ref= (SumaStartMeRef*)signal->getDataPtr();
+ ndbrequire(ref->errorCode == SumaStartMeRef::Busy);
+
+ infoEvent("Suma: node %d refused %d",
+ c_startup.m_restart_server_node_id, ref->errorCode);
+ send_start_me_req(signal);
+}
+
+void
+Suma::execSUMA_START_ME_CONF(Signal* signal)
+{
+ infoEvent("Suma: node %d has completed restoring me",
+ c_startup.m_restart_server_node_id);
+ sendSTTORRY(signal);
+ c_startup.m_restart_server_node_id= 0;
+}
+
+void
Suma::createSequence(Signal* signal)
{
jam();
@@ -241,35 +317,71 @@ Suma::execREAD_NODESCONF(Signal* signal){
jamEntry();
ReadNodesConf * const conf = (ReadNodesConf *)signal->getDataPtr();
- c_aliveNodes.clear();
- c_preparingNodes.clear();
+ if(getNodeState().getNodeRestartInProgress())
+ {
+ c_alive_nodes.assign(NdbNodeBitmask::Size, conf->startedNodes);
+ c_alive_nodes.set(getOwnNodeId());
+ }
+ else
+ {
+ c_alive_nodes.assign(NdbNodeBitmask::Size, conf->startingNodes);
+ NdbNodeBitmask tmp;
+ tmp.assign(NdbNodeBitmask::Size, conf->startedNodes);
+ ndbrequire(tmp.isclear()); // No nodes can be started during SR
+ }
+
+ c_masterNodeId = conf->masterNodeId;
+
+ sendSTTORRY(signal);
+}
- Uint32 count = 0;
- for(Uint32 i = 0; i < MAX_NDB_NODES; i++){
- if(NodeBitmask::get(conf->allNodes, i)){
- jam();
-
- count++;
+void
+Suma::execAPI_START_REP(Signal* signal)
+{
+ Uint32 nodeId = signal->theData[0];
+ c_connected_nodes.set(nodeId);
+
+ check_start_handover(signal);
+}
- NodePtr node;
- ndbrequire(c_nodes.seize(node));
-
- node.p->nodeId = i;
- if(NodeBitmask::get(conf->inactiveNodes, i)){
- jam();
- node.p->alive = 0;
- } else {
- jam();
- node.p->alive = 1;
- c_aliveNodes.set(i);
- }
- } else
- jam();
+void
+Suma::check_start_handover(Signal* signal)
+{
+ if(c_startup.m_wait_handover)
+ {
+ NodeBitmask tmp;
+ tmp.assign(c_connected_nodes);
+ tmp.bitAND(c_subscriber_nodes);
+ if(!c_subscriber_nodes.equal(tmp))
+ {
+ return;
+ }
+
+ c_startup.m_wait_handover= false;
+ send_handover_req(signal);
}
- c_masterNodeId = conf->masterNodeId;
- ndbrequire(count == conf->noOfNodes);
+}
- sendSTTORRY(signal);
+void
+Suma::send_handover_req(Signal* signal)
+{
+ c_startup.m_handover_nodes.assign(c_alive_nodes);
+ c_startup.m_handover_nodes.bitAND(c_nodes_in_nodegroup_mask);
+ c_startup.m_handover_nodes.clear(getOwnNodeId());
+ Uint32 gci= m_last_complete_gci + 3;
+
+ SumaHandoverReq* req= (SumaHandoverReq*)signal->getDataPtrSend();
+ char buf[255];
+ c_startup.m_handover_nodes.getText(buf);
+ infoEvent("Suma: initiate handover with nodes %s GCI: %d",
+ buf, gci);
+
+ req->gci = gci;
+ req->nodeId = getOwnNodeId();
+
+ NodeReceiverGroup rg(SUMA, c_startup.m_handover_nodes);
+ sendSignal(rg, GSN_SUMA_HANDOVER_REQ, signal,
+ SumaHandoverReq::SignalLength, JBB);
}
#if 0
@@ -313,8 +425,10 @@ Suma::sendSTTORRY(Signal* signal){
signal->theData[4] = 3;
signal->theData[5] = 5;
signal->theData[6] = 7;
- signal->theData[7] = 255; // No more start phases from missra
- sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 8, JBB);
+ signal->theData[7] = 100;
+ signal->theData[8] = 101;
+ signal->theData[9] = 255; // No more start phases from missra
+ sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 10, JBB);
}
void
@@ -326,12 +440,22 @@ Suma::execNDB_STTOR(Signal* signal)
void
Suma::execCONTINUEB(Signal* signal){
jamEntry();
-}
-
-void
-SumaParticipant::execCONTINUEB(Signal* signal)
-{
- jamEntry();
+ Uint32 type= signal->theData[0];
+ switch(type){
+ case SumaContinueB::RELEASE_GCI:
+ release_gci(signal, signal->theData[1], signal->theData[2]);
+ return;
+ case SumaContinueB::RESEND_BUCKET:
+ resend_bucket(signal,
+ signal->theData[1],
+ signal->theData[2],
+ signal->theData[3],
+ signal->theData[4]);
+ return;
+ case SumaContinueB::OUT_OF_BUFFER_RELEASE:
+ out_of_buffer_release(signal, signal->theData[1]);
+ return;
+ }
}
/*****************************************************************************
@@ -348,32 +472,60 @@ void Suma::execAPI_FAILREQ(Signal* signal)
//BlockReference retRef = signal->theData[1];
c_failedApiNodes.set(failedApiNode);
+ c_connected_nodes.clear(failedApiNode);
bool found = removeSubscribersOnNode(signal, failedApiNode);
if(!found){
jam();
c_failedApiNodes.clear(failedApiNode);
}
+
+ SubGcpCompleteAck * const ack = (SubGcpCompleteAck*)signal->getDataPtr();
+ Ptr<Gcp_record> gcp;
+ for(c_gcp_list.first(gcp); !gcp.isNull(); c_gcp_list.next(gcp))
+ {
+ ack->rep.gci = gcp.p->m_gci;
+ if(gcp.p->m_subscribers.get(failedApiNode))
+ {
+ ack->rep.senderRef = numberToRef(0, failedApiNode);
+ sendSignal(SUMA_REF, GSN_SUB_GCP_COMPLETE_ACK, signal,
+ SubGcpCompleteAck::SignalLength, JBB);
+ }
+ }
+
+ c_subscriber_nodes.clear(failedApiNode);
+
+ check_start_handover(signal);
+
DBUG_VOID_RETURN;
}//execAPI_FAILREQ()
bool
-SumaParticipant::removeSubscribersOnNode(Signal *signal, Uint32 nodeId)
+Suma::removeSubscribersOnNode(Signal *signal, Uint32 nodeId)
{
- DBUG_ENTER("SumaParticipant::removeSubscribersOnNode");
+ DBUG_ENTER("Suma::removeSubscribersOnNode");
bool found = false;
- SubscriberPtr i_subbPtr;
- c_dataSubscribers.first(i_subbPtr);
- while(!i_subbPtr.isNull()){
- SubscriberPtr subbPtr = i_subbPtr;
- c_dataSubscribers.next(i_subbPtr);
- jam();
- if (refToNode(subbPtr.p->m_subscriberRef) == nodeId) {
+ KeyTable<Table>::Iterator it;
+ for(c_tables.first(it);!it.isNull();c_tables.next(it))
+ {
+ LocalDLList<Subscriber> subbs(c_subscriberPool,it.curr.p->c_subscribers);
+ SubscriberPtr i_subbPtr;
+ for(subbs.first(i_subbPtr);!i_subbPtr.isNull();)
+ {
+ SubscriberPtr subbPtr = i_subbPtr;
+ subbs.next(i_subbPtr);
jam();
- c_dataSubscribers.remove(subbPtr);
- c_removeDataSubscribers.add(subbPtr);
- found = true;
+ if (refToNode(subbPtr.p->m_senderRef) == nodeId) {
+ jam();
+ subbs.remove(subbPtr);
+ c_removeDataSubscribers.add(subbPtr);
+ found = true;
+ }
+ }
+ if (subbs.isEmpty())
+ {
+ // ToDo handle this
}
}
if(found){
@@ -384,10 +536,10 @@ SumaParticipant::removeSubscribersOnNode(Signal *signal, Uint32 nodeId)
}
void
-SumaParticipant::sendSubStopReq(Signal *signal, bool unlock){
- DBUG_ENTER("SumaParticipant::sendSubStopReq");
+Suma::sendSubStopReq(Signal *signal, bool unlock){
static bool remove_lock = false;
jam();
+ DBUG_ENTER("Suma::sendSubStopReq");
SubscriberPtr subbPtr;
c_removeDataSubscribers.first(subbPtr);
@@ -416,75 +568,54 @@ SumaParticipant::sendSubStopReq(Signal *signal, bool unlock){
SubStopReq * const req = (SubStopReq*)signal->getDataPtrSend();
req->senderRef = reference();
req->senderData = subbPtr.i;
- req->subscriberRef = subbPtr.p->m_subscriberRef;
- req->subscriberData = subbPtr.p->m_subscriberData;
+ req->subscriberRef = subbPtr.p->m_senderRef;
+ req->subscriberData = subbPtr.p->m_senderData;
req->subscriptionId = subPtr.p->m_subscriptionId;
req->subscriptionKey = subPtr.p->m_subscriptionKey;
req->part = SubscriptionData::TableData;
- sendSignal(SUMA_REF, GSN_SUB_STOP_REQ, signal, SubStopReq::SignalLength, JBB);
+ sendSignal(SUMA_REF,GSN_SUB_STOP_REQ,signal,SubStopReq::SignalLength,JBB);
DBUG_VOID_RETURN;
}
void
-SumaParticipant::execSUB_STOP_CONF(Signal* signal){
+Suma::execSUB_STOP_CONF(Signal* signal){
jamEntry();
- DBUG_ENTER("SumaParticipant::execSUB_STOP_CONF");
-
- SubStopConf * const conf = (SubStopConf*)signal->getDataPtr();
-
- // Uint32 subscriberData = conf->subscriberData;
- // Uint32 subscriberRef = conf->subscriberRef;
-
- Subscription key;
- key.m_subscriptionId = conf->subscriptionId;
- key.m_subscriptionKey = conf->subscriptionKey;
-
- SubscriptionPtr subPtr;
- if(c_subscriptions.find(subPtr, key)) {
- jam();
- if (subPtr.p->m_markRemove) {
- jam();
- ndbrequire(false);
- ndbrequire(subPtr.p->m_nSubscribers > 0);
- subPtr.p->m_nSubscribers--;
- if (subPtr.p->m_nSubscribers == 0){
- jam();
- completeSubRemoveReq(signal, subPtr);
- }
- }
- }
-
+ DBUG_ENTER("Suma::execSUB_STOP_CONF");
+ ndbassert(signal->getNoOfSections() == 0);
sendSubStopReq(signal,true);
DBUG_VOID_RETURN;
}
void
-SumaParticipant::execSUB_STOP_REF(Signal* signal){
+Suma::execSUB_STOP_REF(Signal* signal){
jamEntry();
- DBUG_ENTER("SumaParticipant::execSUB_STOP_REF");
+ DBUG_ENTER("Suma::execSUB_STOP_REF");
+ ndbassert(signal->getNoOfSections() == 0);
SubStopRef * const ref = (SubStopRef*)signal->getDataPtr();
- Uint32 subscriptionId = ref->subscriptionId;
+ Uint32 senderData = ref->senderData;
+ Uint32 subscriptionId = ref->subscriptionId;
Uint32 subscriptionKey = ref->subscriptionKey;
- Uint32 part = ref->part;
- Uint32 subscriberData = ref->subscriberData;
- Uint32 subscriberRef = ref->subscriberRef;
- // Uint32 err = ref->err;
+ Uint32 part = ref->part;
+ Uint32 subscriberData = ref->subscriberData;
+ Uint32 subscriberRef = ref->subscriberRef;
- if(!ref->isTemporary()){
+ if(ref->errorCode != 1411){
ndbrequire(false);
}
SubStopReq * const req = (SubStopReq*)signal->getDataPtrSend();
- req->subscriberRef = subscriberRef;
- req->subscriberData = subscriberData;
- req->subscriptionId = subscriptionId;
+ req->senderRef = reference();
+ req->senderData = senderData;
+ req->subscriberRef = subscriberRef;
+ req->subscriberData = subscriberData;
+ req->subscriptionId = subscriptionId;
req->subscriptionKey = subscriptionKey;
req->part = part;
- sendSignal(SUMA_REF, GSN_SUB_STOP_REQ, signal, SubStopReq::SignalLength, JBB);
+ sendSignal(SUMA_REF,GSN_SUB_STOP_REQ,signal,SubStopReq::SignalLength,JBB);
DBUG_VOID_RETURN;
}
@@ -493,71 +624,55 @@ void
Suma::execNODE_FAILREP(Signal* signal){
jamEntry();
DBUG_ENTER("Suma::execNODE_FAILREP");
+ ndbassert(signal->getNoOfSections() == 0);
- NodeFailRep * const rep = (NodeFailRep*)signal->getDataPtr();
+ const NodeFailRep * rep = (NodeFailRep*)signal->getDataPtr();
+ NdbNodeBitmask failed; failed.assign(NdbNodeBitmask::Size, rep->theNodes);
- bool changed = false;
+ if(failed.get(Restart.nodeId))
+ {
+ Restart.nodeId = 0;
+ }
- NodePtr nodePtr;
-#ifdef NODEFAIL_DEBUG
- ndbout_c("Suma: nodefailrep");
-#endif
- c_nodeFailGCI = getFirstGCI(signal);
+ signal->theData[0] = SumaContinueB::RESEND_BUCKET;
- for(c_nodes.first(nodePtr); nodePtr.i != RNIL; c_nodes.next(nodePtr)){
- if(NodeBitmask::get(rep->theNodes, nodePtr.p->nodeId)){
- if(nodePtr.p->alive){
- ndbassert(c_aliveNodes.get(nodePtr.p->nodeId));
- changed = true;
- jam();
- } else {
- ndbassert(!c_aliveNodes.get(nodePtr.p->nodeId));
- jam();
- }
-
- if (c_preparingNodes.get(nodePtr.p->nodeId)) {
- jam();
- // we are currently preparing this node that died
- // it's ok just to clear and go back to waiting for it to start up
- Restart.resetNode(calcSumaBlockRef(nodePtr.p->nodeId));
- c_preparingNodes.clear(nodePtr.p->nodeId);
- } else if (c_handoverToDo) {
- jam();
- // TODO what if I'm a SUMA that is currently restarting and the SUMA
- // responsible for restarting me is the one that died?
-
- // a node has failed whilst handover is going on
- // let's check if we're in the process of handover with that node
- c_handoverToDo = false;
- for( int i = 0; i < NO_OF_BUCKETS; i++) {
- if (c_buckets[i].handover) {
- // I'm doing handover, but is it with the dead node?
- if (getResponsibleSumaNodeId(i) == nodePtr.p->nodeId) {
- // so it was the dead node, has handover started?
- if (c_buckets[i].handover_started) {
- jam();
- // we're not ok and will have lost data!
- // set not active to indicate this -
- // this will generate takeover behaviour
- c_buckets[i].active = false;
- c_buckets[i].handover_started = false;
- } // else we're ok to revert back to state before
- c_buckets[i].handover = false;
- } else {
- jam();
- // ok, we're doing handover with a different node
- c_handoverToDo = true;
- }
- }
- }
- }
+ NdbNodeBitmask tmp;
+ tmp.assign(c_alive_nodes);
+ tmp.bitANDC(failed);
- c_failoverBuffer.nodeFailRep();
+ NdbNodeBitmask takeover_nodes;
- nodePtr.p->alive = 0;
- c_aliveNodes.clear(nodePtr.p->nodeId); // this has to be done after the loop above
+ if(c_nodes_in_nodegroup_mask.overlaps(failed))
+ {
+ for( Uint32 i = 0; i < c_no_of_buckets; i++)
+ {
+ if(m_active_buckets.get(i))
+ continue;
+ else if(m_switchover_buckets.get(i))
+ {
+ Uint32 state= c_buckets[i].m_state;
+ if((state & Bucket::BUCKET_HANDOVER) &&
+ failed.get(get_responsible_node(i)))
+ {
+ m_active_buckets.set(i);
+ m_switchover_buckets.clear(i);
+ ndbout_c("aborting handover");
+ }
+ else if(state & Bucket::BUCKET_STARTING)
+ {
+ progError(__LINE__, ERR_SYSTEM_ERROR,
+ "Nodefailure during SUMA takeover");
+ }
+ }
+ else if(get_responsible_node(i, tmp) == getOwnNodeId())
+ {
+ start_resend(signal, i);
+ }
}
}
+
+ c_alive_nodes.assign(tmp);
+
DBUG_VOID_RETURN;
}
@@ -566,49 +681,11 @@ Suma::execINCL_NODEREQ(Signal* signal){
jamEntry();
//const Uint32 senderRef = signal->theData[0];
- const Uint32 inclNode = signal->theData[1];
-
- NodePtr node;
- for(c_nodes.first(node); node.i != RNIL; c_nodes.next(node)){
- jam();
- const Uint32 nodeId = node.p->nodeId;
- if(inclNode == nodeId){
- jam();
-
- ndbrequire(node.p->alive == 0);
- ndbrequire(!c_aliveNodes.get(nodeId));
-
- for (Uint32 j = 0; j < c_noNodesInGroup; j++) {
- jam();
- if (c_nodesInGroup[j] == nodeId) {
- // the starting node is part of my node group
- jam();
- c_preparingNodes.set(nodeId); // set as being prepared
- for (Uint32 i = 0; i < c_noNodesInGroup; i++) {
- jam();
- if (i == c_idInNodeGroup) {
- jam();
- // I'm responsible for restarting this SUMA
- // ALL dict's should have meta data info so it is ok to start
- Restart.startNode(signal, calcSumaBlockRef(nodeId));
- break;
- }//if
- if (c_aliveNodes.get(c_nodesInGroup[i])) {
- jam();
- break; // another Suma takes care of this
- }//if
- }//for
- break;
- }//if
- }//for
-
- node.p->alive = 1;
- c_aliveNodes.set(nodeId);
-
- break;
- }//if
- }//for
+ const Uint32 nodeId = signal->theData[1];
+ ndbrequire(!c_alive_nodes.get(nodeId));
+ c_alive_nodes.set(nodeId);
+
#if 0 // if we include this DIH's got to be prepared, later if needed...
signal->theData[0] = reference();
@@ -629,10 +706,10 @@ Suma::execSIGNAL_DROPPED_REP(Signal* signal){
*/
static unsigned
-count_subscribers(const DLList<SumaParticipant::Subscriber> &subs)
+count_subscribers(const DLList<Suma::Subscriber> &subs)
{
unsigned n= 0;
- SumaParticipant::SubscriberPtr i_subbPtr;
+ Suma::SubscriberPtr i_subbPtr;
subs.first(i_subbPtr);
while(!i_subbPtr.isNull()){
n++;
@@ -646,13 +723,14 @@ Suma::execDUMP_STATE_ORD(Signal* signal){
jamEntry();
Uint32 tCase = signal->theData[0];
+#if 0
if(tCase >= 8000 && tCase <= 8003){
SubscriptionPtr subPtr;
c_subscriptions.getPtr(subPtr, g_subPtrI);
Ptr<SyncRecord> syncPtr;
c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI);
-
+
if(tCase == 8000){
syncPtr.p->startMeta(signal);
}
@@ -674,15 +752,15 @@ Suma::execDUMP_STATE_ORD(Signal* signal){
attrs.append(att, 3);
}
}
-
+#endif
if(tCase == 8004){
infoEvent("Suma: c_subscriberPool size: %d free: %d",
c_subscriberPool.getSize(),
c_subscriberPool.getNoOfFree());
infoEvent("Suma: c_tablePool size: %d free: %d",
- c_tablePool_.getSize(),
- c_tablePool_.getNoOfFree());
+ c_tablePool.getSize(),
+ c_tablePool.getNoOfFree());
infoEvent("Suma: c_subscriptionPool size: %d free: %d",
c_subscriptionPool.getSize(),
@@ -698,119 +776,34 @@ Suma::execDUMP_STATE_ORD(Signal* signal){
infoEvent("Suma: c_metaSubscribers count: %d",
count_subscribers(c_metaSubscribers));
+#if 0
infoEvent("Suma: c_dataSubscribers count: %d",
count_subscribers(c_dataSubscribers));
infoEvent("Suma: c_prepDataSubscribers count: %d",
count_subscribers(c_prepDataSubscribers));
+#endif
infoEvent("Suma: c_removeDataSubscribers count: %d",
count_subscribers(c_removeDataSubscribers));
}
-}
-/********************************************************************
- *
- * Convert a table name (db+schema+tablename) to tableId
- *
- */
-
-#if 0
-void
-SumaParticipant::convertNameToId(SubscriptionPtr subPtr, Signal * signal)
-{
- jam();
- if(subPtr.p->m_currentTable < subPtr.p->m_maxTables) {
- jam();
-
- GetTableIdReq * req = (GetTableIdReq *)signal->getDataPtrSend();
- char * tableName = subPtr.p->m_tableNames[subPtr.p->m_currentTable];
- const Uint32 strLen = strlen(tableName) + 1; // NULL Terminated
- req->senderRef = reference();
- req->senderData = subPtr.i;
- req->len = strLen;
-
- LinearSectionPtr ptr[1];
- ptr[0].p = (Uint32*)tableName;
- ptr[0].sz = strLen;
-
- sendSignal(DBDICT_REF,
- GSN_GET_TABLEID_REQ,
- signal,
- GetTableIdReq::SignalLength,
- JBB,
- ptr,
- 1);
- } else {
- jam();
- sendSubCreateConf(signal, subPtr.p->m_subscriberRef, subPtr);
- }
-}
-#endif
-
-
-void
-SumaParticipant::addTableId(Uint32 tableId,
- SubscriptionPtr subPtr, SyncRecord *psyncRec)
-{
-#ifdef NODEFAIL_DEBUG
- ndbout_c("SumaParticipant::addTableId(%u,%u,%u), current_table=%u",
- tableId, subPtr.i, psyncRec, subPtr.p->m_currentTable);
-#endif
- subPtr.p->m_tables[tableId] = 1;
- subPtr.p->m_currentTable++;
- if(psyncRec != NULL)
- psyncRec->m_tableList.append(&tableId, 1);
-}
-
-#if 0
-void
-SumaParticipant::execGET_TABLEID_CONF(Signal * signal)
-{
- jamEntry();
-
- GetTableIdConf* conf = (GetTableIdConf *)signal->getDataPtr();
- Uint32 tableId = conf->tableId;
- //Uint32 schemaVersion = conf->schemaVersion;
- Uint32 senderData = conf->senderData;
-
- SubscriptionPtr subPtr;
- Ptr<SyncRecord> syncPtr;
-
- c_subscriptions.getPtr(subPtr, senderData);
- c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI);
-
- /*
- * add to m_tableList
- */
- addTableId(tableId, subPtr, syncPtr.p);
-
- convertNameToId(subPtr, signal);
-}
-
-void
-SumaParticipant::execGET_TABLEID_REF(Signal * signal)
-{
- jamEntry();
- GetTableIdRef const * ref = (GetTableIdRef *)signal->getDataPtr();
- Uint32 senderData = ref->senderData;
- // Uint32 err = ref->err;
-
- SubscriptionPtr subPtr;
- c_subscriptions.getPtr(subPtr, senderData);
- Uint32 subData = subPtr.p->m_subscriberData;
- SubCreateRef * reff = (SubCreateRef*)ref;
- /**
- * @todo: map ref->err to GrepError.
- */
- reff->err = GrepError::SELECTED_TABLE_NOT_FOUND;
- reff->subscriberData = subData;
- sendSignal(subPtr.p->m_subscriberRef,
- GSN_SUB_CREATE_REF,
- signal,
- SubCreateRef::SignalLength,
- JBB);
+ if(tCase == 8005)
+ {
+ for(Uint32 i = 0; i<c_no_of_buckets; i++)
+ {
+ Bucket* ptr= c_buckets + i;
+ infoEvent("Bucket %d %d%d-%x switch gci: %d max_acked_gci: %d max_gci: %d tail: %d head: %d",
+ i,
+ m_active_buckets.get(i),
+ m_switchover_buckets.get(i),
+ ptr->m_state,
+ ptr->m_switchover_gci,
+ ptr->m_max_acked_gci,
+ ptr->m_buffer_head.m_max_gci,
+ ptr->m_buffer_tail,
+ ptr->m_buffer_head.m_page_id);
+ }
+ }
}
-#endif
-
/*************************************************************
*
@@ -822,7 +815,8 @@ void
Suma::execCREATE_SUBID_REQ(Signal* signal)
{
jamEntry();
-
+ DBUG_ENTER("Suma::execCREATE_SUBID_REQ");
+ ndbassert(signal->getNoOfSections() == 0);
CRASH_INSERTION(13001);
CreateSubscriptionIdReq const * req =
@@ -830,29 +824,32 @@ Suma::execCREATE_SUBID_REQ(Signal* signal)
SubscriberPtr subbPtr;
if(!c_subscriberPool.seize(subbPtr)){
jam();
- sendSubIdRef(signal, GrepError::SUBSCRIPTION_ID_NOMEM);
- return;
+ sendSubIdRef(signal, req->senderRef, req->senderData, 1412);
+ DBUG_VOID_RETURN;
}
+ DBUG_PRINT("info",("c_subscriberPool size: %d free: %d",
+ c_subscriberPool.getSize(),
+ c_subscriberPool.getNoOfFree()));
- subbPtr.p->m_subscriberRef = signal->getSendersBlockRef();
- subbPtr.p->m_senderData = req->senderData;
- subbPtr.p->m_subscriberData = subbPtr.i;
+ subbPtr.p->m_senderRef = req->senderRef;
+ subbPtr.p->m_senderData = req->senderData;
UtilSequenceReq * utilReq = (UtilSequenceReq*)signal->getDataPtrSend();
-
- utilReq->senderData = subbPtr.p->m_subscriberData;
+ utilReq->senderData = subbPtr.i;
utilReq->sequenceId = SUMA_SEQUENCE;
utilReq->requestType = UtilSequenceReq::NextVal;
sendSignal(DBUTIL_REF, GSN_UTIL_SEQUENCE_REQ,
signal, UtilSequenceReq::SignalLength, JBB);
+
+ DBUG_VOID_RETURN;
}
void
Suma::execUTIL_SEQUENCE_CONF(Signal* signal)
{
jamEntry();
-
DBUG_ENTER("Suma::execUTIL_SEQUENCE_CONF");
+ ndbassert(signal->getNoOfSections() == 0);
CRASH_INSERTION(13002);
UtilSequenceConf * conf = (UtilSequenceConf*)signal->getDataPtr();
@@ -864,22 +861,22 @@ Suma::execUTIL_SEQUENCE_CONF(Signal* signal)
Uint64 subId;
memcpy(&subId,conf->sequenceValue,8);
- Uint32 subData = conf->senderData;
-
SubscriberPtr subbPtr;
- c_subscriberPool.getPtr(subbPtr,subData);
-
+ c_subscriberPool.getPtr(subbPtr,conf->senderData);
CreateSubscriptionIdConf * subconf = (CreateSubscriptionIdConf*)conf;
+ subconf->senderRef = reference();
+ subconf->senderData = subbPtr.p->m_senderData;
subconf->subscriptionId = (Uint32)subId;
subconf->subscriptionKey =(getOwnNodeId() << 16) | (Uint32)(subId & 0xFFFF);
- subconf->subscriberData = subbPtr.p->m_senderData;
- sendSignal(subbPtr.p->m_subscriberRef, GSN_CREATE_SUBID_CONF, signal,
+ sendSignal(subbPtr.p->m_senderRef, GSN_CREATE_SUBID_CONF, signal,
CreateSubscriptionIdConf::SignalLength, JBB);
c_subscriberPool.release(subbPtr);
-
+ DBUG_PRINT("info",("c_subscriberPool size: %d free: %d",
+ c_subscriberPool.getSize(),
+ c_subscriberPool.getNoOfFree()));
DBUG_VOID_RETURN;
}
@@ -888,7 +885,9 @@ Suma::execUTIL_SEQUENCE_REF(Signal* signal)
{
jamEntry();
DBUG_ENTER("Suma::execUTIL_SEQUENCE_REF");
+ ndbassert(signal->getNoOfSections() == 0);
UtilSequenceRef * ref = (UtilSequenceRef*)signal->getDataPtr();
+ Uint32 err= ref->errorCode;
if(ref->requestType == UtilSequenceReq::Create) {
jam();
@@ -900,27 +899,35 @@ Suma::execUTIL_SEQUENCE_REF(Signal* signal)
SubscriberPtr subbPtr;
c_subscriberPool.getPtr(subbPtr,subData);
- sendSubIdRef(signal, GrepError::SEQUENCE_ERROR);
+ sendSubIdRef(signal, subbPtr.p->m_senderRef, subbPtr.p->m_senderData, err);
c_subscriberPool.release(subbPtr);
+ DBUG_PRINT("info",("c_subscriberPool size: %d free: %d",
+ c_subscriberPool.getSize(),
+ c_subscriberPool.getNoOfFree()));
DBUG_VOID_RETURN;
}//execUTIL_SEQUENCE_REF()
void
-SumaParticipant::sendSubIdRef(Signal* signal, Uint32 errCode){
+Suma::sendSubIdRef(Signal* signal,
+ Uint32 senderRef, Uint32 senderData, Uint32 errCode)
+{
jam();
+ DBUG_ENTER("Suma::sendSubIdRef");
CreateSubscriptionIdRef * ref =
(CreateSubscriptionIdRef *)signal->getDataPtrSend();
- ref->err = errCode;
- sendSignal(signal->getSendersBlockRef(),
+ ref->senderRef = reference();
+ ref->senderData = senderData;
+ ref->errorCode = errCode;
+ sendSignal(senderRef,
GSN_CREATE_SUBID_REF,
signal,
CreateSubscriptionIdRef::SignalLength,
JBB);
- releaseSections(signal);
- return;
+ releaseSections(signal);
+ DBUG_VOID_RETURN;
}
/**********************************************************
@@ -929,227 +936,113 @@ SumaParticipant::sendSubIdRef(Signal* signal, Uint32 errCode){
* Creation of subscriptions
*/
+void
+Suma::addTableId(Uint32 tableId,
+ SubscriptionPtr subPtr, SyncRecord *psyncRec)
+{
+ DBUG_ENTER("Suma::addTableId");
+ DBUG_PRINT("enter",("tableId: %u subPtr.i: %u", tableId, subPtr.i));
+ subPtr.p->m_tableId= tableId;
+ if(psyncRec != NULL)
+ psyncRec->m_tableList.append(&tableId, 1);
+ DBUG_VOID_RETURN;
+}
+
void
-SumaParticipant::execSUB_CREATE_REQ(Signal* signal) {
-#ifdef NODEFAIL_DEBUG
- ndbout_c("SumaParticipant::execSUB_CREATE_REQ");
-#endif
+Suma::execSUB_CREATE_REQ(Signal* signal)
+{
jamEntry();
-
+ DBUG_ENTER("Suma::execSUB_CREATE_REQ");
+ ndbassert(signal->getNoOfSections() == 0);
CRASH_INSERTION(13003);
const SubCreateReq req = *(SubCreateReq*)signal->getDataPtr();
+ const Uint32 subRef = req.senderRef;
+ const Uint32 subData = req.senderData;
const Uint32 subId = req.subscriptionId;
const Uint32 subKey = req.subscriptionKey;
- const Uint32 subRef = req.subscriberRef;
- const Uint32 subData = req.subscriberData;
const Uint32 type = req.subscriptionType & SubCreateReq::RemoveFlags;
const Uint32 flags = req.subscriptionType & SubCreateReq::GetFlags;
const bool addTableFlag = (flags & SubCreateReq::AddTableFlag) != 0;
const bool restartFlag = (flags & SubCreateReq::RestartFlag) != 0;
-
- const Uint32 sender = signal->getSendersBlockRef();
+ const Uint32 tableId = req.tableId;
Subscription key;
key.m_subscriptionId = subId;
key.m_subscriptionKey = subKey;
+ DBUG_PRINT("enter",("key.m_subscriptionId: %u, key.m_subscriptionKey: %u",
+ key.m_subscriptionId, key.m_subscriptionKey));
+
SubscriptionPtr subPtr;
- Ptr<SyncRecord> syncPtr;
-
+
if (addTableFlag) {
ndbrequire(restartFlag); //TODO remove this
if(!c_subscriptions.find(subPtr, key)) {
jam();
- sendSubCreateRef(signal, req, GrepError::SUBSCRIPTION_NOT_FOUND);
- return;
+ sendSubCreateRef(signal, 1407);
+ DBUG_VOID_RETURN;
}
jam();
- c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI);
+ if (restartFlag)
+ {
+ ndbrequire(type != SubCreateReq::SingleTableScan);
+ ndbrequire(req.tableId != subPtr.p->m_tableId);
+ ndbrequire(type != SubCreateReq::TableEvent);
+ addTableId(req.tableId, subPtr, 0);
+ }
} else {
// Check that id/key is unique
if(c_subscriptions.find(subPtr, key)) {
jam();
- sendSubCreateRef(signal, req, GrepError::SUBSCRIPTION_ID_NOT_UNIQUE);
- return;
+ sendSubCreateRef(signal, 1415);
+ DBUG_VOID_RETURN;
}
if(!c_subscriptions.seize(subPtr)) {
jam();
- sendSubCreateRef(signal, req, GrepError::NOSPACE_IN_POOL);
- return;
- }
- if(!c_syncPool.seize(syncPtr)) {
- jam();
- sendSubCreateRef(signal, req, GrepError::NOSPACE_IN_POOL);
- return;
+ sendSubCreateRef(signal, 1412);
+ DBUG_VOID_RETURN;
}
+ DBUG_PRINT("info",("c_subscriptionPool size: %d free: %d",
+ c_subscriptionPool.getSize(),
+ c_subscriptionPool.getNoOfFree()));
jam();
- subPtr.p->m_subscriberRef = subRef;
- subPtr.p->m_subscriberData = subData;
+ subPtr.p->m_senderRef = subRef;
+ subPtr.p->m_senderData = subData;
subPtr.p->m_subscriptionId = subId;
subPtr.p->m_subscriptionKey = subKey;
subPtr.p->m_subscriptionType = type;
-
- /**
- * ok to memset? Support on all compilers
- * @todo find out if memset is supported by all compilers
- */
- memset(subPtr.p->m_tables,0,MAX_TABLES);
- subPtr.p->m_maxTables = 0;
- subPtr.p->m_currentTable = 0;
- subPtr.p->m_syncPtrI = syncPtr.i;
- subPtr.p->m_markRemove = false;
- subPtr.p->m_nSubscribers = 0;
-
- c_subscriptions.add(subPtr);
-
- syncPtr.p->m_subscriptionPtrI = subPtr.i;
- syncPtr.p->m_doSendSyncData = true;
- syncPtr.p->ptrI = syncPtr.i;
- syncPtr.p->m_locked = false;
- syncPtr.p->m_error = false;
- }
-
- if (restartFlag ||
- type == SubCreateReq::TableEvent) {
-
- syncPtr.p->m_doSendSyncData = false;
-
- ndbrequire(type != SubCreateReq::SingleTableScan);
- jam();
+ subPtr.p->m_tableId = tableId;
+ subPtr.p->m_table_ptrI = RNIL;
+ subPtr.p->m_state = Subscription::DEFINED;
+ subPtr.p->n_subscribers = 0;
- if (subPtr.p->m_tables[req.tableId] != 0) {
- ndbrequire(false); //TODO remove
- jam();
- sendSubCreateRef(signal, req, GrepError::SELECTED_TABLE_ALREADY_ADDED);
- return;
- }
- if (addTableFlag) {
- ndbrequire(type != SubCreateReq::TableEvent);
- jam();
- }
- subPtr.p->m_maxTables++;
- addTableId(req.tableId, subPtr, syncPtr.p);
- } else {
- switch(type){
- case SubCreateReq::SingleTableScan:
- {
- jam();
- syncPtr.p->m_tableList.append(&req.tableId, 1);
- if(signal->getNoOfSections() > 0){
- SegmentedSectionPtr ptr;
- signal->getSection(ptr, SubCreateReq::ATTRIBUTE_LIST);
- LocalDataBuffer<15> attrBuf(c_dataBufferPool,syncPtr.p->m_attributeList);
- append(attrBuf, ptr, getSectionSegmentPool());
- }
- }
- break;
-#if 0
- case SubCreateReq::SelectiveTableSnapshot:
- /**
- * Tables specified by the user that does not exist
- * in the database are just ignored. No error message
- * is given, nor does the db nodes crash
- * @todo: Memory is not release here (used tableBuf)
- */
- {
- if(signal->getNoOfSections() == 0 ){
- jam();
- sendSubCreateRef(signal, req, GrepError::WRONG_NO_OF_SECTIONS);
- return;
- }
+ DBUG_PRINT("info",("Added: key.m_subscriptionId: %u, key.m_subscriptionKey: %u",
+ key.m_subscriptionId, key.m_subscriptionKey));
- jam();
- SegmentedSectionPtr ptr;
- signal->getSection(ptr,0);// SubCreateReq::TABLE_LIST);
- SimplePropertiesSectionReader r0(ptr, getSectionSegmentPool());
- Uint32 i=0;
- char table[MAX_TAB_NAME_SIZE];
- r0.reset();
- r0.first();
- while(true){
- if ((r0.getValueType() != SimpleProperties::StringValue) ||
- (r0.getValueLen() <= 0)) {
- releaseSections(signal);
- ndbrequire(false);
- }
- r0.getString(table);
- strcpy(subPtr.p->m_tableNames[i],table);
- i++;
- if(!r0.next())
- break;
- }
- releaseSections(signal);
- subPtr.p->m_maxTables = i;
- subPtr.p->m_currentTable = 0;
- releaseSections(signal);
- convertNameToId(subPtr, signal);
- return;
- }
- break;
-#endif
- case SubCreateReq::DatabaseSnapshot:
- {
- jam();
- }
- break;
- default:
- ndbrequire(false);
- }
+ c_subscriptions.add(subPtr);
}
- sendSubCreateConf(signal, sender, subPtr);
-
- return;
+ SubCreateConf * const conf = (SubCreateConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = subPtr.p->m_senderData;
+ sendSignal(subRef, GSN_SUB_CREATE_CONF, signal, SubCreateConf::SignalLength, JBB);
+ DBUG_VOID_RETURN;
}
void
-SumaParticipant::sendSubCreateConf(Signal* signal, Uint32 sender,
- SubscriptionPtr subPtr)
+Suma::sendSubCreateRef(Signal* signal, Uint32 errCode)
{
- SubCreateConf * const conf = (SubCreateConf*)signal->getDataPtrSend();
- conf->subscriptionId = subPtr.p->m_subscriptionId;
- conf->subscriptionKey = subPtr.p->m_subscriptionKey;
- conf->subscriberData = subPtr.p->m_subscriberData;
- sendSignal(sender, GSN_SUB_CREATE_CONF, signal,
- SubCreateConf::SignalLength, JBB);
-}
-
-void
-SumaParticipant::sendSubCreateRef(Signal* signal, const SubCreateReq& req, Uint32 errCode){
jam();
SubCreateRef * ref = (SubCreateRef *)signal->getDataPtrSend();
- ref->subscriberRef = reference();
- ref->subscriberData = req.subscriberData;
- ref->err = errCode;
- releaseSections(signal);
+ ref->errorCode = errCode;
sendSignal(signal->getSendersBlockRef(), GSN_SUB_CREATE_REF, signal,
SubCreateRef::SignalLength, JBB);
return;
}
-
-
-
-
-
-
-
-
-
-
-
-Uint32
-SumaParticipant::getFirstGCI(Signal* signal) {
- if (c_lastCompleteGCI == RNIL) {
- ndbout_c("WARNING: c_lastCompleteGCI == RNIL");
- return 0;
- }
- return c_lastCompleteGCI+3;
-}
-
/**********************************************************
*
* Setting upp trigger for subscription
@@ -1157,13 +1050,12 @@ SumaParticipant::getFirstGCI(Signal* signal) {
*/
void
-SumaParticipant::execSUB_SYNC_REQ(Signal* signal) {
+Suma::execSUB_SYNC_REQ(Signal* signal)
+{
jamEntry();
-
+ DBUG_ENTER("Suma::execSUB_SYNC_REQ");
+ ndbassert(signal->getNoOfSections() <= 1);
CRASH_INSERTION(13004);
-#ifdef EVENT_PH3_DEBUG
- ndbout_c("SumaParticipant::execSUB_SYNC_REQ");
-#endif
SubSyncReq * const req = (SubSyncReq*)signal->getDataPtr();
@@ -1171,28 +1063,61 @@ SumaParticipant::execSUB_SYNC_REQ(Signal* signal) {
Subscription key;
key.m_subscriptionId = req->subscriptionId;
key.m_subscriptionKey = req->subscriptionKey;
-
- if(!c_subscriptions.find(subPtr, key)){
- jam();
- sendSubSyncRef(signal, GrepError::SUBSCRIPTION_ID_NOT_FOUND);
- return;
- }
- /**
- * @todo Tomas, do you really need to do this?
- */
- if(subPtr.p->m_subscriptionType == SubCreateReq::TableEvent) {
+ DBUG_PRINT("enter",("key.m_subscriptionId: %u, key.m_subscriptionKey: %u",
+ key.m_subscriptionId, key.m_subscriptionKey));
+
+ if(!c_subscriptions.find(subPtr, key))
+ {
jam();
- subPtr.p->m_subscriberData = req->subscriberData;
+ DBUG_PRINT("info",("Not found"));
+ sendSubSyncRef(signal, 1407);
+ DBUG_VOID_RETURN;
}
bool ok = false;
SubscriptionData::Part part = (SubscriptionData::Part)req->part;
Ptr<SyncRecord> syncPtr;
- c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI);
+ if(!c_syncPool.seize(syncPtr))
+ {
+ jam();
+ sendSubSyncRef(signal, 1416);
+ DBUG_VOID_RETURN;
+ }
+ DBUG_PRINT("info",("c_syncPool size: %d free: %d",
+ c_syncPool.getSize(),
+ c_syncPool.getNoOfFree()));
+ new (syncPtr.p) Ptr<SyncRecord>;
+ syncPtr.p->m_senderRef = req->senderRef;
+ syncPtr.p->m_senderData = req->senderData;
+ syncPtr.p->m_subscriptionPtrI = subPtr.i;
+ syncPtr.p->ptrI = syncPtr.i;
+ syncPtr.p->m_error = 0;
+
+ {
+ jam();
+ syncPtr.p->m_tableList.append(&subPtr.p->m_tableId, 1);
+ if(signal->getNoOfSections() > 0){
+ SegmentedSectionPtr ptr;
+ signal->getSection(ptr, SubSyncReq::ATTRIBUTE_LIST);
+ LocalDataBuffer<15> attrBuf(c_dataBufferPool,syncPtr.p->m_attributeList);
+ append(attrBuf, ptr, getSectionSegmentPool());
+ releaseSections(signal);
+ }
+ }
+
+ TablePtr tabPtr;
+ initTable(signal,subPtr.p->m_tableId,tabPtr,syncPtr);
+ tabPtr.p->n_subscribers++;
+ DBUG_PRINT("info",("Suma::Table[%u]::n_subscribers: %u",
+ tabPtr.p->m_tableId, tabPtr.p->n_subscribers));
+ DBUG_VOID_RETURN;
+
switch(part){
case SubscriptionData::MetaData:
+ ndbrequire(false);
+#if 0
ok = true;
jam();
if (subPtr.p->m_subscriptionType == SubCreateReq::DatabaseSnapshot) {
@@ -1218,6 +1143,7 @@ SumaParticipant::execSUB_SYNC_REQ(Signal* signal) {
}
syncPtr.p->startMeta(signal);
+#endif
break;
case SubscriptionData::TableData: {
ok = true;
@@ -1227,21 +1153,20 @@ SumaParticipant::execSUB_SYNC_REQ(Signal* signal) {
}
}
ndbrequire(ok);
+ DBUG_VOID_RETURN;
}
void
-SumaParticipant::sendSubSyncRef(Signal* signal, Uint32 errCode){
+Suma::sendSubSyncRef(Signal* signal, Uint32 errCode){
jam();
- SubSyncRef * ref =
- (SubSyncRef *)signal->getDataPtrSend();
- ref->err = errCode;
+ SubSyncRef * ref= (SubSyncRef *)signal->getDataPtrSend();
+ ref->errorCode = errCode;
+ releaseSections(signal);
sendSignal(signal->getSendersBlockRef(),
GSN_SUB_SYNC_REF,
signal,
SubSyncRef::SignalLength,
JBB);
-
- releaseSections(signal);
return;
}
@@ -1249,26 +1174,287 @@ SumaParticipant::sendSubSyncRef(Signal* signal, Uint32 errCode){
* Dict interface
*/
+#if 0
void
-SumaParticipant::execLIST_TABLES_CONF(Signal* signal){
+Suma::execLIST_TABLES_CONF(Signal* signal){
jamEntry();
CRASH_INSERTION(13005);
ListTablesConf* const conf = (ListTablesConf*)signal->getDataPtr();
SyncRecord* tmp = c_syncPool.getPtr(conf->senderData);
tmp->runLIST_TABLES_CONF(signal);
}
+#endif
+
+
+/*************************************************************************
+ *
+ *
+ */
+#if 0
+void
+Suma::Table::runLIST_TABLES_CONF(Signal* signal){
+ jam();
+
+ ListTablesConf * const conf = (ListTablesConf*)signal->getDataPtr();
+ const Uint32 len = signal->length() - ListTablesConf::HeaderLength;
+
+ SubscriptionPtr subPtr;
+ suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI);
+
+ for (unsigned i = 0; i < len; i++) {
+ subPtr.p->m_maxTables++;
+ suma.addTableId(ListTablesConf::getTableId(conf->tableData[i]), subPtr, this);
+ }
+
+ // for (unsigned i = 0; i < len; i++)
+ // conf->tableData[i] = ListTablesConf::getTableId(conf->tableData[i]);
+ // m_tableList.append(&conf->tableData[0], len);
+
+#if 0
+ TableList::DataBufferIterator it;
+ int i = 0;
+ for(m_tableList.first(it);!it.isNull();m_tableList.next(it)) {
+ ndbout_c("%u listtableconf tableid %d", i++, *it.data);
+ }
+#endif
+
+ if(len == ListTablesConf::DataLength){
+ jam();
+ // we expect more LIST_TABLE_CONF
+ return;
+ }
+
+#if 0
+ subPtr.p->m_currentTable = 0;
+ subPtr.p->m_maxTables = 0;
+
+ TableList::DataBufferIterator it;
+ for(m_tableList.first(it); !it.isNull(); m_tableList.next(it)) {
+ subPtr.p->m_maxTables++;
+ suma.addTableId(*it.data, subPtr, NULL);
+#ifdef NODEFAIL_DEBUG
+ ndbout_c(" listtableconf tableid %d",*it.data);
+#endif
+ }
+#endif
+
+ startMeta(signal);
+}
+#endif
+
+
+int
+Suma::initTable(Signal *signal, Uint32 tableId, TablePtr &tabPtr,
+ SubscriberPtr subbPtr)
+{
+ DBUG_ENTER("Suma::initTable SubscriberPtr");
+ DBUG_PRINT("enter",("tableId: %d", tableId));
+
+ int r= initTable(signal,tableId,tabPtr);
+ {
+ LocalDLList<Subscriber> subscribers(c_subscriberPool,
+ tabPtr.p->c_subscribers);
+ subscribers.add(subbPtr);
+ }
+
+ DBUG_PRINT("info",("added subscriber: %i", subbPtr.i));
+
+ if (r)
+ {
+ // we have to wait getting tab info
+ DBUG_RETURN(1);
+ }
+
+ if (tabPtr.p->setupTrigger(signal, *this))
+ {
+ // we have to wait for triggers to be setup
+ DBUG_RETURN(1);
+ }
+
+ completeOneSubscriber(signal, tabPtr, subbPtr);
+ completeInitTable(signal, tabPtr);
+ DBUG_RETURN(0);
+}
+
+int
+Suma::initTable(Signal *signal, Uint32 tableId, TablePtr &tabPtr,
+ Ptr<SyncRecord> syncPtr)
+{
+ jam();
+ DBUG_ENTER("Suma::initTable Ptr<SyncRecord>");
+ DBUG_PRINT("enter",("tableId: %d", tableId));
+
+ int r= initTable(signal,tableId,tabPtr);
+
+ {
+ LocalDLList<SyncRecord> syncRecords(c_syncPool,tabPtr.p->c_syncRecords);
+ syncRecords.add(syncPtr);
+ }
+
+ if (r)
+ {
+ // we have to wait getting tab info
+ DBUG_RETURN(1);
+ }
+ completeInitTable(signal, tabPtr);
+ DBUG_RETURN(0);
+}
+
+int
+Suma::initTable(Signal *signal, Uint32 tableId, TablePtr &tabPtr)
+{
+ jam();
+ DBUG_ENTER("Suma::initTable");
+
+ if (!c_tables.find(tabPtr, tableId) ||
+ tabPtr.p->m_state == Table::DROPPED ||
+ tabPtr.p->m_state == Table::ALTERED)
+ {
+ // table not being prepared
+ // seize a new table, initialize and add to c_tables
+ ndbrequire(c_tablePool.seize(tabPtr));
+ DBUG_PRINT("info",("c_tablePool size: %d free: %d",
+ c_tablePool.getSize(),
+ c_tablePool.getNoOfFree()));
+ new (tabPtr.p) Table;
+
+ tabPtr.p->m_tableId= tableId;
+ tabPtr.p->m_ptrI= tabPtr.i;
+ tabPtr.p->n_subscribers = 0;
+ DBUG_PRINT("info",("Suma::Table[%u,i=%u]::n_subscribers: %u",
+ tabPtr.p->m_tableId, tabPtr.i, tabPtr.p->n_subscribers));
+
+ tabPtr.p->m_error = 0;
+ tabPtr.p->m_schemaVersion = RNIL;
+ tabPtr.p->m_state = Table::DEFINING;
+ tabPtr.p->m_hasTriggerDefined[0] = 0;
+ tabPtr.p->m_hasTriggerDefined[1] = 0;
+ tabPtr.p->m_hasTriggerDefined[2] = 0;
+ tabPtr.p->m_triggerIds[0] = ILLEGAL_TRIGGER_ID;
+ tabPtr.p->m_triggerIds[1] = ILLEGAL_TRIGGER_ID;
+ tabPtr.p->m_triggerIds[2] = ILLEGAL_TRIGGER_ID;
+
+ c_tables.add(tabPtr);
+
+ GetTabInfoReq * req = (GetTabInfoReq *)signal->getDataPtrSend();
+ req->senderRef = reference();
+ req->senderData = tabPtr.i;
+ req->requestType =
+ GetTabInfoReq::RequestById | GetTabInfoReq::LongSignalConf;
+ req->tableId = tableId;
+
+ DBUG_PRINT("info",("GET_TABINFOREQ id %d", req->tableId));
+ sendSignal(DBDICT_REF, GSN_GET_TABINFOREQ, signal,
+ GetTabInfoReq::SignalLength, JBB);
+ DBUG_RETURN(1);
+ }
+ if (tabPtr.p->m_state == Table::DEFINING)
+ {
+ DBUG_RETURN(1);
+ }
+ // ToDo should be a ref signal instead
+ ndbrequire(tabPtr.p->m_state == Table::DEFINED);
+ DBUG_RETURN(0);
+}
void
-SumaParticipant::execGET_TABINFOREF(Signal* signal){
+Suma::completeOneSubscriber(Signal *signal, TablePtr tabPtr, SubscriberPtr subbPtr)
+{
+ jam();
+ DBUG_ENTER("Suma::completeOneSubscriber");
+
+ if (tabPtr.p->m_error)
+ {
+ sendSubStartRef(signal,subbPtr,tabPtr.p->m_error,
+ SubscriptionData::TableData);
+ tabPtr.p->n_subscribers--;
+ }
+ else
+ {
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI);
+ subPtr.p->m_table_ptrI= tabPtr.i;
+ sendSubStartComplete(signal,subbPtr, m_last_complete_gci + 3,
+ SubscriptionData::TableData);
+ }
+ DBUG_VOID_RETURN;
+}
+
+void
+Suma::completeAllSubscribers(Signal *signal, TablePtr tabPtr)
+{
+ jam();
+ DBUG_ENTER("Suma::completeAllSubscribers");
+ // handle all subscribers
+ {
+ LocalDLList<Subscriber> subscribers(c_subscriberPool,
+ tabPtr.p->c_subscribers);
+ SubscriberPtr subbPtr;
+ for(subscribers.first(subbPtr);
+ !subbPtr.isNull();
+ subscribers.next(subbPtr))
+ {
+ completeOneSubscriber(signal, tabPtr, subbPtr);
+ }
+ }
+ DBUG_VOID_RETURN;
+}
+
+void
+Suma::completeInitTable(Signal *signal, TablePtr tabPtr)
+{
+ jam();
+ DBUG_ENTER("Suma::completeInitTable");
+
+ // handle all syncRecords
+ while (!tabPtr.p->c_syncRecords.isEmpty())
+ {
+ Ptr<SyncRecord> syncPtr;
+ {
+ LocalDLList<SyncRecord> syncRecords(c_syncPool,
+ tabPtr.p->c_syncRecords);
+ syncRecords.first(syncPtr);
+ syncRecords.remove(syncPtr);
+ }
+ syncPtr.p->ptrI = syncPtr.i;
+ if (tabPtr.p->m_error == 0)
+ {
+ jam();
+ syncPtr.p->startScan(signal);
+ }
+ else
+ {
+ jam();
+ syncPtr.p->completeScan(signal, tabPtr.p->m_error);
+ tabPtr.p->n_subscribers--;
+ }
+ }
+
+ if (tabPtr.p->m_error)
+ {
+ DBUG_PRINT("info",("Suma::Table[%u]::n_subscribers: %u",
+ tabPtr.p->m_tableId, tabPtr.p->n_subscribers));
+ tabPtr.p->checkRelease(*this);
+ }
+ else
+ {
+ tabPtr.p->m_state = Table::DEFINED;
+ }
+
+ DBUG_VOID_RETURN;
+}
+
+
+void
+Suma::execGET_TABINFOREF(Signal* signal){
jamEntry();
- GetTabInfoRef* const ref = (GetTabInfoRef*)signal->getDataPtr();
- SyncRecord* tmp = c_syncPool.getPtr(ref->senderData);
- tmp->runGET_TABINFOREF(signal);
+ /* ToDo handle this */
+ ndbrequire(false);
}
void
-SumaParticipant::execGET_TABINFO_CONF(Signal* signal){
+Suma::execGET_TABINFO_CONF(Signal* signal){
jamEntry();
CRASH_INSERTION(13006);
@@ -1278,23 +1464,30 @@ SumaParticipant::execGET_TABINFO_CONF(Signal* signal){
}
GetTabInfoConf* conf = (GetTabInfoConf*)signal->getDataPtr();
-
Uint32 tableId = conf->tableId;
- Uint32 senderData = conf->senderData;
-
- SyncRecord* tmp = c_syncPool.getPtr(senderData);
- ndbrequire(parseTable(signal, conf, tableId, tmp));
- tmp->runGET_TABINFO_CONF(signal);
+ TablePtr tabPtr;
+ c_tablePool.getPtr(tabPtr, conf->senderData);
+ SegmentedSectionPtr ptr;
+ signal->getSection(ptr, GetTabInfoConf::DICT_TAB_INFO);
+ ndbrequire(tabPtr.p->parseTable(ptr, *this));
+ releaseSections(signal);
+ /**
+ * We need to gather fragment info
+ */
+ jam();
+ signal->theData[0] = RNIL;
+ signal->theData[1] = tableId;
+ signal->theData[2] = tabPtr.i;
+ sendSignal(DBDIH_REF, GSN_DI_FCOUNTREQ, signal, 3, JBB);
}
bool
-SumaParticipant::parseTable(Signal* signal, GetTabInfoConf* conf, Uint32 tableId,
- SyncRecord* syncPtr_p){
-
- SegmentedSectionPtr ptr;
- signal->getSection(ptr, GetTabInfoConf::DICT_TAB_INFO);
+Suma::Table::parseTable(SegmentedSectionPtr ptr,
+ Suma &suma)
+{
+ DBUG_ENTER("Suma::Table::parseTable");
- SimplePropertiesSectionReader it(ptr, getSectionSegmentPool());
+ SimplePropertiesSectionReader it(ptr, suma.getSectionSegmentPool());
SimpleProperties::UnpackStatus s;
DictTabInfo::Table tableDesc; tableDesc.init();
@@ -1302,25 +1495,27 @@ SumaParticipant::parseTable(Signal* signal, GetTabInfoConf* conf, Uint32 tableId
DictTabInfo::TableMapping,
DictTabInfo::TableMappingSize,
true, true);
-
- ndbrequire(s == SimpleProperties::Break);
- TablePtr tabPtr;
- c_tables.find(tabPtr, tableId);
-
- if(!tabPtr.isNull() &&
- tabPtr.p->m_schemaVersion != tableDesc.TableVersion){
+ jam();
+ suma.suma_ndbrequire(s == SimpleProperties::Break);
+
+#if 0
+ToDo handle this
+ if(m_schemaVersion != tableDesc.TableVersion){
jam();
- tabPtr.p->release(* this);
+ release(* this);
// oops wrong schema version in stored tabledesc
// we need to find all subscriptions with old table desc
// and all subscribers to this
// hopefully none
c_tables.release(tabPtr);
+ DBUG_PRINT("info",("c_tablePool size: %d free: %d",
+ suma.c_tablePool.getSize(),
+ suma.c_tablePool.getNoOfFree()));
tabPtr.setNull();
- DLHashTable<SumaParticipant::Subscription>::Iterator i_subPtr;
+ DLHashTable<Suma::Subscription>::Iterator i_subPtr;
c_subscriptions.first(i_subPtr);
SubscriptionPtr subPtr;
for(;!i_subPtr.isNull();c_subscriptions.next(i_subPtr)){
@@ -1331,9 +1526,9 @@ SumaParticipant::parseTable(Signal* signal, GetTabInfoConf* conf, Uint32 tableId
jam();
continue;
}
- if (subPtr.p->m_tables[tableId]) {
+ if (subPtr.p->m_tables.get(tableId)) {
jam();
- subPtr.p->m_tables[tableId] = 0; // remove this old table reference
+ subPtr.p->m_tables.clear(tableId); // remove this old table reference
TableList::DataBufferIterator it;
for(tmp->m_tableList.first(it);!it.isNull();tmp->m_tableList.next(it)) {
jam();
@@ -1353,31 +1548,11 @@ SumaParticipant::parseTable(Signal* signal, GetTabInfoConf* conf, Uint32 tableId
}
}
}
-
- if (tabPtr.isNull()) {
- jam();
- /**
- * Uninitialized table record
- */
- ndbrequire(c_tables.seize(tabPtr));
- new (tabPtr.p) Table;
- tabPtr.p->m_schemaVersion = RNIL;
- tabPtr.p->m_tableId = tableId;
- tabPtr.p->m_hasTriggerDefined[0] = 0;
- tabPtr.p->m_hasTriggerDefined[1] = 0;
- tabPtr.p->m_hasTriggerDefined[2] = 0;
- tabPtr.p->m_triggerIds[0] = ILLEGAL_TRIGGER_ID;
- tabPtr.p->m_triggerIds[1] = ILLEGAL_TRIGGER_ID;
- tabPtr.p->m_triggerIds[2] = ILLEGAL_TRIGGER_ID;
-#if 0
- ndbout_c("Get tab info conf %d", tableId);
#endif
- c_tables.add(tabPtr);
- }
- if(tabPtr.p->m_attributes.getSize() != 0){
+ if(m_attributes.getSize() != 0){
jam();
- return true;
+ DBUG_RETURN(true);
}
/**
@@ -1385,22 +1560,24 @@ SumaParticipant::parseTable(Signal* signal, GetTabInfoConf* conf, Uint32 tableId
*/
Uint32 noAttribs = tableDesc.NoOfAttributes;
Uint32 notFixed = (tableDesc.NoOfNullable+tableDesc.NoOfVariable);
- tabPtr.p->m_schemaVersion = tableDesc.TableVersion;
+ m_schemaVersion = tableDesc.TableVersion;
// The attribute buffer
- LocalDataBuffer<15> attrBuf(c_dataBufferPool, tabPtr.p->m_attributes);
+ LocalDataBuffer<15> attrBuf(suma.c_dataBufferPool, m_attributes);
// Temporary buffer
- DataBuffer<15> theRest(c_dataBufferPool);
+ DataBuffer<15> theRest(suma.c_dataBufferPool);
if(!attrBuf.seize(noAttribs)){
- ndbrequire(false);
- return false;
+ jam();
+ suma.suma_ndbrequire(false);
+ DBUG_RETURN(false);
}
if(!theRest.seize(notFixed)){
- ndbrequire(false);
- return false;
+ jam();
+ suma.suma_ndbrequire(false);
+ DBUG_RETURN(false);
}
DataBuffer<15>::DataBufferIterator attrIt; // Fixed not nullable
@@ -1414,7 +1591,8 @@ SumaParticipant::parseTable(Signal* signal, GetTabInfoConf* conf, Uint32 tableId
DictTabInfo::AttributeMapping,
DictTabInfo::AttributeMappingSize,
true, true);
- ndbrequire(s == SimpleProperties::Break);
+ jam();
+ suma.suma_ndbrequire(s == SimpleProperties::Break);
if (!attrDesc.AttributeNullableFlag
/* && !attrDesc.AttributeVariableFlag */) {
@@ -1442,302 +1620,15 @@ SumaParticipant::parseTable(Signal* signal, GetTabInfoConf* conf, Uint32 tableId
theRest.release();
- return true;
-}
-
-void
-SumaParticipant::execDI_FCOUNTCONF(Signal* signal){
- jamEntry();
-
- CRASH_INSERTION(13007);
-
- const Uint32 senderData = signal->theData[3];
- SyncRecord* tmp = c_syncPool.getPtr(senderData);
- tmp->runDI_FCOUNTCONF(signal);
+ DBUG_RETURN(true);
}
void
-SumaParticipant::execDIGETPRIMCONF(Signal* signal){
- jamEntry();
-
- CRASH_INSERTION(13008);
-
- const Uint32 senderData = signal->theData[1];
- SyncRecord* tmp = c_syncPool.getPtr(senderData);
- tmp->runDIGETPRIMCONF(signal);
-}
-
-void
-SumaParticipant::execCREATE_TRIG_CONF(Signal* signal){
- jamEntry();
- DBUG_ENTER("SumaParticipant::execCREATE_TRIG_CONF");
- CRASH_INSERTION(13009);
-
- CreateTrigConf * const conf = (CreateTrigConf*)signal->getDataPtr();
-
- const Uint32 senderData = conf->getConnectionPtr();
- SyncRecord* tmp = c_syncPool.getPtr(senderData);
- tmp->runCREATE_TRIG_CONF(signal);
-
- /**
- * dodido
- * @todo: I (Johan) dont know what to do here. Jonas, what do you mean?
- */
- DBUG_VOID_RETURN;
-}
-
-void
-SumaParticipant::execCREATE_TRIG_REF(Signal* signal){
- jamEntry();
- ndbrequire(false);
-}
-
-void
-SumaParticipant::execDROP_TRIG_CONF(Signal* signal){
- jamEntry();
- DBUG_ENTER("SumaParticipant::execDROP_TRIG_CONF");
- CRASH_INSERTION(13010);
-
- DropTrigConf * const conf = (DropTrigConf*)signal->getDataPtr();
-
- const Uint32 senderData = conf->getConnectionPtr();
- SyncRecord* tmp = c_syncPool.getPtr(senderData);
- tmp->runDROP_TRIG_CONF(signal);
- DBUG_VOID_RETURN;
-}
-
-void
-SumaParticipant::execDROP_TRIG_REF(Signal* signal){
- jamEntry();
- DBUG_ENTER("SumaParticipant::execDROP_TRIG_CONF");
- DropTrigRef * const ref = (DropTrigRef*)signal->getDataPtr();
-
- const Uint32 senderData = ref->getConnectionPtr();
- SyncRecord* tmp = c_syncPool.getPtr(senderData);
- tmp->runDROP_TRIG_CONF(signal);
- DBUG_VOID_RETURN;
-}
-
-/*************************************************************************
- *
- *
- */
-
-void
-SumaParticipant::SyncRecord::runLIST_TABLES_CONF(Signal* signal){
- jam();
-
- ListTablesConf * const conf = (ListTablesConf*)signal->getDataPtr();
- const Uint32 len = signal->length() - ListTablesConf::HeaderLength;
-
- SubscriptionPtr subPtr;
- suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI);
-
- for (unsigned i = 0; i < len; i++) {
- subPtr.p->m_maxTables++;
- suma.addTableId(ListTablesConf::getTableId(conf->tableData[i]), subPtr, this);
- }
-
- // for (unsigned i = 0; i < len; i++)
- // conf->tableData[i] = ListTablesConf::getTableId(conf->tableData[i]);
- // m_tableList.append(&conf->tableData[0], len);
-
-#if 0
- TableList::DataBufferIterator it;
- int i = 0;
- for(m_tableList.first(it);!it.isNull();m_tableList.next(it)) {
- ndbout_c("%u listtableconf tableid %d", i++, *it.data);
- }
-#endif
-
- if(len == ListTablesConf::DataLength){
- jam();
- // we expect more LIST_TABLE_CONF
- return;
- }
-
-#if 0
- subPtr.p->m_currentTable = 0;
- subPtr.p->m_maxTables = 0;
-
- TableList::DataBufferIterator it;
- for(m_tableList.first(it); !it.isNull(); m_tableList.next(it)) {
- subPtr.p->m_maxTables++;
- suma.addTableId(*it.data, subPtr, NULL);
-#ifdef NODEFAIL_DEBUG
- ndbout_c(" listtableconf tableid %d",*it.data);
-#endif
- }
-#endif
-
- startMeta(signal);
-}
-
-void
-SumaParticipant::SyncRecord::startMeta(Signal* signal){
- jam();
- m_currentTable = 0;
- nextMeta(signal);
-}
-
-/**
- * m_tableList only contains UserTables
- */
-void
-SumaParticipant::SyncRecord::nextMeta(Signal* signal){
- jam();
-
- TableList::DataBufferIterator it;
- if(!m_tableList.position(it, m_currentTable)){
- completeMeta(signal);
- return;
- }
-
- GetTabInfoReq * req = (GetTabInfoReq *)signal->getDataPtrSend();
- req->senderRef = suma.reference();
- req->senderData = ptrI;
- req->requestType =
- GetTabInfoReq::RequestById | GetTabInfoReq::LongSignalConf;
- req->tableId = * it.data;
-
-#if 0
- ndbout_c("GET_TABINFOREQ id %d", req->tableId);
-#endif
- suma.sendSignal(DBDICT_REF, GSN_GET_TABINFOREQ, signal,
- GetTabInfoReq::SignalLength, JBB);
-}
-
-void
-SumaParticipant::SyncRecord::runGET_TABINFOREF(Signal* signal)
+Suma::execDI_FCOUNTCONF(Signal* signal)
{
- jam();
-
- SubscriptionPtr subPtr;
- suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI);
- ndbrequire(subPtr.p->m_syncPtrI == ptrI);
-
- Uint32 type = subPtr.p->m_subscriptionType;
-
- bool do_continue = false;
- switch (type) {
- case SubCreateReq::TableEvent:
- jam();
- break;
- case SubCreateReq::DatabaseSnapshot:
- jam();
- do_continue = true;
- break;
- case SubCreateReq::SelectiveTableSnapshot:
- jam();
- do_continue = true;
- break;
- case SubCreateReq::SingleTableScan:
- jam();
- break;
- default:
- ndbrequire(false);
- break;
- }
-
- if (! do_continue) {
- m_error = true;
- completeMeta(signal);
- return;
- }
-
- m_currentTable++;
- nextMeta(signal);
- return;
-
- // now we need to clean-up
-}
-
-
-void
-SumaParticipant::SyncRecord::runGET_TABINFO_CONF(Signal* signal){
- jam();
-
- GetTabInfoConf * const conf = (GetTabInfoConf*)signal->getDataPtr();
- // const Uint32 gci = conf->gci;
- const Uint32 tableId = conf->tableId;
- TableList::DataBufferIterator it;
-
- ndbrequire(m_tableList.position(it, m_currentTable));
- ndbrequire(* it.data == tableId);
-
- SubscriptionPtr subPtr;
- suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI);
- ndbrequire(subPtr.p->m_syncPtrI == ptrI);
-
- SegmentedSectionPtr ptr;
- signal->getSection(ptr, GetTabInfoConf::DICT_TAB_INFO);
-
- SubMetaData * data = (SubMetaData*)signal->getDataPtrSend();
- /**
- * sending lastCompleteGCI. Used by Lars in interval calculations
- * incremenet by one, since last_CompleteGCI is the not the current gci.
- */
- data->gci = suma.c_lastCompleteGCI + 1;
- data->tableId = tableId;
- data->senderData = subPtr.p->m_subscriberData;
-#if PRINT_ONLY
- ndbout_c("GSN_SUB_META_DATA Table %d", tableId);
-#else
-
- bool okToSend = m_doSendSyncData;
-
- /*
- * If it is a selectivetablesnapshot and the table is not part of the
- * subscription, then do not send anything, just continue.
- * If it is a tablevent, don't send regardless since the APIs are not
- * interested in meta data.
- */
- if(subPtr.p->m_subscriptionType == SubCreateReq::SelectiveTableSnapshot)
- if(!subPtr.p->m_tables[tableId])
- okToSend = false;
-
- if(okToSend) {
- if(refToNode(subPtr.p->m_subscriberRef) == 0){
- jam();
- suma.EXECUTE_DIRECT(refToBlock(subPtr.p->m_subscriberRef),
- GSN_SUB_META_DATA,
- signal,
- SubMetaData::SignalLength);
- jamEntry();
- suma.releaseSections(signal);
- } else {
- jam();
- suma.sendSignal(subPtr.p->m_subscriberRef,
- GSN_SUB_META_DATA,
- signal,
- SubMetaData::SignalLength, JBB);
- }
- }
-#endif
-
- TablePtr tabPtr;
- ndbrequire(suma.c_tables.find(tabPtr, tableId));
-
- LocalDataBuffer<15> fragBuf(suma.c_dataBufferPool, tabPtr.p->m_fragments);
- if(fragBuf.getSize() == 0){
- /**
- * We need to gather fragment info
- */
- jam();
- signal->theData[0] = RNIL;
- signal->theData[1] = tableId;
- signal->theData[2] = ptrI;
- suma.sendSignal(DBDIH_REF, GSN_DI_FCOUNTREQ, signal, 3, JBB);
- return;
- }
-
- m_currentTable++;
- nextMeta(signal);
-}
-
-void
-SumaParticipant::SyncRecord::runDI_FCOUNTCONF(Signal* signal){
- jam();
+ jamEntry();
+ DBUG_ENTER("Suma::execDI_FCOUNTCONF");
+ ndbassert(signal->getNoOfSections() == 0);
const Uint32 userPtr = signal->theData[0];
const Uint32 fragCount = signal->theData[1];
@@ -1746,25 +1637,31 @@ SumaParticipant::SyncRecord::runDI_FCOUNTCONF(Signal* signal){
ndbrequire(userPtr == RNIL && signal->length() == 5);
TablePtr tabPtr;
- ndbrequire(suma.c_tables.find(tabPtr, tableId));
-
- LocalDataBuffer<15> fragBuf(suma.c_dataBufferPool, tabPtr.p->m_fragments);
+ tabPtr.i= signal->theData[3];
+ ndbrequire(tabPtr.p= c_tablePool.getPtr(tabPtr.i));
+ ndbrequire(tabPtr.p->m_tableId == tableId);
+
+ LocalDataBuffer<15> fragBuf(c_dataBufferPool, tabPtr.p->m_fragments);
ndbrequire(fragBuf.getSize() == 0);
- m_currentFragment = fragCount;
+ tabPtr.p->m_fragCount = fragCount;
+
signal->theData[0] = RNIL;
- signal->theData[1] = ptrI;
+ signal->theData[1] = tabPtr.i;
signal->theData[2] = tableId;
signal->theData[3] = 0; // Frag no
- suma.sendSignal(DBDIH_REF, GSN_DIGETPRIMREQ, signal, 4, JBB);
+ sendSignal(DBDIH_REF, GSN_DIGETPRIMREQ, signal, 4, JBB);
+
+ DBUG_VOID_RETURN;
}
void
-SumaParticipant::SyncRecord::runDIGETPRIMCONF(Signal* signal){
- jam();
+Suma::execDIGETPRIMCONF(Signal* signal){
+ jamEntry();
+ DBUG_ENTER("Suma::execDIGETPRIMCONF");
+ ndbassert(signal->getNoOfSections() == 0);
const Uint32 userPtr = signal->theData[0];
- //const Uint32 senderData = signal->theData[1];
const Uint32 nodeCount = signal->theData[6];
const Uint32 tableId = signal->theData[7];
const Uint32 fragNo = signal->theData[8];
@@ -1773,40 +1670,55 @@ SumaParticipant::SyncRecord::runDIGETPRIMCONF(Signal* signal){
ndbrequire(nodeCount > 0 && nodeCount <= MAX_REPLICAS);
TablePtr tabPtr;
- ndbrequire(suma.c_tables.find(tabPtr, tableId));
- LocalDataBuffer<15> fragBuf(suma.c_dataBufferPool, tabPtr.p->m_fragments);
+ tabPtr.i= signal->theData[1];
+ ndbrequire(tabPtr.p= c_tablePool.getPtr(tabPtr.i));
+ ndbrequire(tabPtr.p->m_tableId == tableId);
- /**
- * Add primary node for fragment to list
- */
- FragmentDescriptor fd;
- fd.m_fragDesc.m_nodeId = signal->theData[2];
- fd.m_fragDesc.m_fragmentNo = fragNo;
- signal->theData[2] = fd.m_dummy;
- fragBuf.append(&signal->theData[2], 1);
+ {
+ LocalDataBuffer<15> fragBuf(c_dataBufferPool,tabPtr.p->m_fragments);
+
+ /**
+ * Add primary node for fragment to list
+ */
+ FragmentDescriptor fd;
+ fd.m_fragDesc.m_nodeId = signal->theData[2];
+ fd.m_fragDesc.m_fragmentNo = fragNo;
+ signal->theData[2] = fd.m_dummy;
+ fragBuf.append(&signal->theData[2], 1);
+ }
const Uint32 nextFrag = fragNo + 1;
- if(nextFrag == m_currentFragment){
+ if(nextFrag == tabPtr.p->m_fragCount)
+ {
/**
* Complete frag info for table
+ * table is not up to date
*/
- m_currentTable++;
- nextMeta(signal);
- return;
+
+ if (tabPtr.p->c_subscribers.isEmpty())
+ {
+ completeInitTable(signal,tabPtr);
+ DBUG_VOID_RETURN;
+ }
+ tabPtr.p->setupTrigger(signal, *this);
+ DBUG_VOID_RETURN;
}
signal->theData[0] = RNIL;
- signal->theData[1] = ptrI;
+ signal->theData[1] = tabPtr.i;
signal->theData[2] = tableId;
signal->theData[3] = nextFrag; // Frag no
- suma.sendSignal(DBDIH_REF, GSN_DIGETPRIMREQ, signal, 4, JBB);
+ sendSignal(DBDIH_REF, GSN_DIGETPRIMREQ, signal, 4, JBB);
+
+ DBUG_VOID_RETURN;
}
+#if 0
void
-SumaParticipant::SyncRecord::completeMeta(Signal* signal){
+Suma::SyncRecord::completeTableInit(Signal* signal)
+{
jam();
SubscriptionPtr subPtr;
suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI);
- ndbrequire(subPtr.p->m_syncPtrI == ptrI);
#if PRINT_ONLY
ndbout_c("GSN_SUB_SYNC_CONF (meta)");
@@ -1816,24 +1728,21 @@ SumaParticipant::SyncRecord::completeMeta(Signal* signal){
if (m_error) {
SubSyncRef * const ref = (SubSyncRef*)signal->getDataPtrSend();
- ref->subscriptionId = subPtr.p->m_subscriptionId;
- ref->subscriptionKey = subPtr.p->m_subscriptionKey;
- ref->part = SubscriptionData::MetaData;
- ref->subscriberData = subPtr.p->m_subscriberData;
+ ref->senderRef = suma.reference();
+ ref->senderData = subPtr.p->m_senderData;
ref->errorCode = SubSyncRef::Undefined;
- suma.sendSignal(subPtr.p->m_subscriberRef, GSN_SUB_SYNC_REF, signal,
+ suma.sendSignal(subPtr.p->m_senderRef, GSN_SUB_SYNC_REF, signal,
SubSyncRef::SignalLength, JBB);
} else {
SubSyncConf * const conf = (SubSyncConf*)signal->getDataPtrSend();
- conf->subscriptionId = subPtr.p->m_subscriptionId;
- conf->subscriptionKey = subPtr.p->m_subscriptionKey;
- conf->part = SubscriptionData::MetaData;
- conf->subscriberData = subPtr.p->m_subscriberData;
- suma.sendSignal(subPtr.p->m_subscriberRef, GSN_SUB_SYNC_CONF, signal,
+ conf->senderRef = suma.reference();
+ conf->senderData = subPtr.p->m_senderData;
+ suma.sendSignal(subPtr.p->m_senderRef, GSN_SUB_SYNC_CONF, signal,
SubSyncConf::SignalLength, JBB);
}
#endif
}
+#endif
/**********************************************************
*
@@ -1842,21 +1751,24 @@ SumaParticipant::SyncRecord::completeMeta(Signal* signal){
*/
void
-SumaParticipant::SyncRecord::startScan(Signal* signal){
+Suma::SyncRecord::startScan(Signal* signal)
+{
jam();
+ DBUG_ENTER("Suma::SyncRecord::startScan");
/**
* Get fraginfo
*/
m_currentTable = 0;
m_currentFragment = 0;
-
nextScan(signal);
+ DBUG_VOID_RETURN;
}
bool
-SumaParticipant::SyncRecord::getNextFragment(TablePtr * tab,
- FragmentDescriptor * fd){
+Suma::SyncRecord::getNextFragment(TablePtr * tab,
+ FragmentDescriptor * fd)
+{
jam();
SubscriptionPtr subPtr;
suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI);
@@ -1864,20 +1776,15 @@ SumaParticipant::SyncRecord::getNextFragment(TablePtr * tab,
DataBuffer<15>::DataBufferIterator fragIt;
m_tableList.position(tabIt, m_currentTable);
- for(; !tabIt.curr.isNull(); m_tableList.next(tabIt), m_currentTable++){
+ for(; !tabIt.curr.isNull(); m_tableList.next(tabIt), m_currentTable++)
+ {
TablePtr tabPtr;
ndbrequire(suma.c_tables.find(tabPtr, * tabIt.data));
- if(subPtr.p->m_subscriptionType == SubCreateReq::SelectiveTableSnapshot)
- {
- if(!subPtr.p->m_tables[tabPtr.p->m_tableId]) {
- *tab = tabPtr;
- return true;
- }
- }
LocalDataBuffer<15> fragBuf(suma.c_dataBufferPool, tabPtr.p->m_fragments);
fragBuf.position(fragIt, m_currentFragment);
- for(; !fragIt.curr.isNull(); fragBuf.next(fragIt), m_currentFragment++){
+ for(; !fragIt.curr.isNull(); fragBuf.next(fragIt), m_currentFragment++)
+ {
FragmentDescriptor tmp;
tmp.m_dummy = * fragIt.data;
if(tmp.m_fragDesc.m_nodeId == suma.getOwnNodeId()){
@@ -1887,36 +1794,30 @@ SumaParticipant::SyncRecord::getNextFragment(TablePtr * tab,
}
}
m_currentFragment = 0;
+
+ tabPtr.p->n_subscribers--;
+ DBUG_PRINT("info",("Suma::Table[%u]::n_subscribers: %u",
+ tabPtr.p->m_tableId, tabPtr.p->n_subscribers));
+ tabPtr.p->checkRelease(suma);
}
return false;
}
void
-SumaParticipant::SyncRecord::nextScan(Signal* signal){
+Suma::SyncRecord::nextScan(Signal* signal)
+{
jam();
+ DBUG_ENTER("Suma::SyncRecord::nextScan");
TablePtr tabPtr;
FragmentDescriptor fd;
SubscriptionPtr subPtr;
if(!getNextFragment(&tabPtr, &fd)){
jam();
completeScan(signal);
- return;
+ DBUG_VOID_RETURN;
}
suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI);
- ndbrequire(subPtr.p->m_syncPtrI == ptrI);
- if(subPtr.p->m_subscriptionType == SubCreateReq::SelectiveTableSnapshot) {
- jam();
- if(!subPtr.p->m_tables[tabPtr.p->m_tableId]) {
- /*
- * table is not part of the subscription. Check next table
- */
- m_currentTable++;
- nextScan(signal);
- return;
- }
- }
-
DataBuffer<15>::Head head = m_attributeList;
if(head.getSize() == 0){
head = tabPtr.p->m_attributes;
@@ -1927,7 +1828,7 @@ SumaParticipant::SyncRecord::nextScan(Signal* signal){
const Uint32 parallelism = 16;
const Uint32 attrLen = 5 + attrBuf.getSize();
- req->senderData = m_subscriptionPtrI;
+ req->senderData = ptrI;
req->resultRef = suma.reference();
req->tableId = tabPtr.p->m_tableId;
req->requestInfo = 0;
@@ -1941,7 +1842,7 @@ SumaParticipant::SyncRecord::nextScan(Signal* signal){
req->transId1 = 0;
req->transId2 = (SUMA << 20) + (suma.getOwnNodeId() << 8);
req->clientOpPtr = (ptrI << 16);
- req->batch_size_rows= 16;
+ req->batch_size_rows= parallelism;
req->batch_size_bytes= 0;
suma.sendSignal(DBLQH_REF, GSN_SCAN_FRAGREQ, signal,
ScanFragReq::SignalLength, JBB);
@@ -1963,7 +1864,7 @@ SumaParticipant::SyncRecord::nextScan(Signal* signal){
AttributeHeader::init(&signal->theData[dataPos++], * it.data, 0);
if(dataPos == 25){
suma.sendSignal(DBLQH_REF, GSN_ATTRINFO, signal, 25, JBB);
- dataPos = 3;
+ dataPos = 3;
}
}
if(dataPos != 3){
@@ -1972,11 +1873,13 @@ SumaParticipant::SyncRecord::nextScan(Signal* signal){
m_currentTableId = tabPtr.p->m_tableId;
m_currentNoOfAttributes = attrBuf.getSize();
+
+ DBUG_VOID_RETURN;
}
void
-SumaParticipant::execSCAN_FRAGREF(Signal* signal){
+Suma::execSCAN_FRAGREF(Signal* signal){
jamEntry();
// ScanFragRef * const ref = (ScanFragRef*)signal->getDataPtr();
@@ -1984,9 +1887,10 @@ SumaParticipant::execSCAN_FRAGREF(Signal* signal){
}
void
-SumaParticipant::execSCAN_FRAGCONF(Signal* signal){
+Suma::execSCAN_FRAGCONF(Signal* signal){
jamEntry();
-
+ DBUG_ENTER("Suma::execSCAN_FRAGCONF");
+ ndbassert(signal->getNoOfSections() == 0);
CRASH_INSERTION(13011);
ScanFragConf * const conf = (ScanFragConf*)signal->getDataPtr();
@@ -1995,8 +1899,8 @@ SumaParticipant::execSCAN_FRAGCONF(Signal* signal){
const Uint32 senderData = conf->senderData;
const Uint32 completedOps = conf->completedOps;
- SubscriptionPtr subPtr;
- c_subscriptions.getPtr(subPtr, senderData);
+ Ptr<SyncRecord> syncPtr;
+ c_syncPool.getPtr(syncPtr, senderData);
if(completed != 2){
jam();
@@ -2009,25 +1913,25 @@ SumaParticipant::execSCAN_FRAGCONF(Signal* signal){
execSUB_SYNC_CONTINUE_CONF(signal);
#else
SubSyncContinueReq * const req = (SubSyncContinueReq*)signal->getDataPtrSend();
- req->subscriberData = subPtr.p->m_subscriberData;
+ req->subscriberData = syncPtr.p->m_senderData;
req->noOfRowsSent = completedOps;
- sendSignal(subPtr.p->m_subscriberRef, GSN_SUB_SYNC_CONTINUE_REQ, signal,
+ sendSignal(syncPtr.p->m_senderRef, GSN_SUB_SYNC_CONTINUE_REQ, signal,
SubSyncContinueReq::SignalLength, JBB);
#endif
- return;
+ DBUG_VOID_RETURN;
}
ndbrequire(completedOps == 0);
- SyncRecord* tmp = c_syncPool.getPtr(subPtr.p->m_syncPtrI);
-
- tmp->m_currentFragment++;
- tmp->nextScan(signal);
+ syncPtr.p->m_currentFragment++;
+ syncPtr.p->nextScan(signal);
+ DBUG_VOID_RETURN;
}
void
-SumaParticipant::execSUB_SYNC_CONTINUE_CONF(Signal* signal){
+Suma::execSUB_SYNC_CONTINUE_CONF(Signal* signal){
jamEntry();
+ ndbassert(signal->getNoOfSections() == 0);
CRASH_INSERTION(13012);
@@ -2053,29 +1957,43 @@ SumaParticipant::execSUB_SYNC_CONTINUE_CONF(Signal* signal){
}
void
-SumaParticipant::SyncRecord::completeScan(Signal* signal){
+Suma::SyncRecord::completeScan(Signal* signal, int error)
+{
jam();
+ DBUG_ENTER("Suma::SyncRecord::completeScan");
// m_tableList.release();
- SubscriptionPtr subPtr;
- suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI);
- ndbrequire(subPtr.p->m_syncPtrI == ptrI);
-
#if PRINT_ONLY
ndbout_c("GSN_SUB_SYNC_CONF (data)");
#else
- SubSyncConf * const conf = (SubSyncConf*)signal->getDataPtrSend();
- conf->subscriptionId = subPtr.p->m_subscriptionId;
- conf->subscriptionKey = subPtr.p->m_subscriptionKey;
- conf->part = SubscriptionData::TableData;
- conf->subscriberData = subPtr.p->m_subscriberData;
- suma.sendSignal(subPtr.p->m_subscriberRef, GSN_SUB_SYNC_CONF, signal,
- SubSyncConf::SignalLength, JBB);
+ if (error == 0)
+ {
+ SubSyncConf * const conf = (SubSyncConf*)signal->getDataPtrSend();
+ conf->senderRef = suma.reference();
+ conf->senderData = m_senderData;
+ suma.sendSignal(m_senderRef, GSN_SUB_SYNC_CONF, signal,
+ SubSyncConf::SignalLength, JBB);
+ }
+ else
+ {
+ SubSyncRef * const ref = (SubSyncRef*)signal->getDataPtrSend();
+ ref->senderRef = suma.reference();
+ ref->senderData = m_senderData;
+ suma.sendSignal(m_senderRef, GSN_SUB_SYNC_REF, signal,
+ SubSyncRef::SignalLength, JBB);
+ }
#endif
+
+ release();
+ suma.c_syncPool.release(ptrI);
+ DBUG_PRINT("info",("c_syncPool size: %d free: %d",
+ suma.c_syncPool.getSize(),
+ suma.c_syncPool.getNoOfFree()));
+ DBUG_VOID_RETURN;
}
void
-SumaParticipant::execSCAN_HBREP(Signal* signal){
+Suma::execSCAN_HBREP(Signal* signal){
jamEntry();
#if 0
ndbout << "execSCAN_HBREP" << endl << hex;
@@ -2097,502 +2015,714 @@ SumaParticipant::execSCAN_HBREP(Signal* signal){
*/
void
-SumaParticipant::execSUB_START_REQ(Signal* signal){
+Suma::execSUB_START_REQ(Signal* signal){
jamEntry();
- DBUG_ENTER("SumaParticipant::execSUB_START_REQ");
-
- CRASH_INSERTION(13013);
-
- if (c_restartLock) {
- jam();
- // ndbout_c("c_restartLock");
- if (RtoI(signal->getSendersBlockRef(), false) == RNIL) {
- jam();
- sendSubStartRef(signal, /** Error Code */ 0, true);
- DBUG_VOID_RETURN;
- }
- // only allow other Suma's in the nodegroup to come through for restart purposes
- }
-
- Subscription key;
-
+ ndbassert(signal->getNoOfSections() == 0);
+ DBUG_ENTER("Suma::execSUB_START_REQ");
SubStartReq * const req = (SubStartReq*)signal->getDataPtr();
+ CRASH_INSERTION(13013);
Uint32 senderRef = req->senderRef;
Uint32 senderData = req->senderData;
Uint32 subscriberData = req->subscriberData;
Uint32 subscriberRef = req->subscriberRef;
SubscriptionData::Part part = (SubscriptionData::Part)req->part;
+
+ Subscription key;
key.m_subscriptionId = req->subscriptionId;
key.m_subscriptionKey = req->subscriptionKey;
-
+
+ if (c_startup.m_restart_server_node_id &&
+ refToNode(senderRef) != c_startup.m_restart_server_node_id)
+ {
+ /**
+ * only allow "restart_server" Suma's to come through
+ * for restart purposes
+ */
+ jam();
+ sendSubStartRef(signal, 1405);
+ DBUG_VOID_RETURN;
+ }
+
SubscriptionPtr subPtr;
if(!c_subscriptions.find(subPtr, key)){
jam();
- sendSubStartRef(signal, /** Error Code */ 0);
+ sendSubStartRef(signal, 1407);
DBUG_VOID_RETURN;
}
- Ptr<SyncRecord> syncPtr;
- c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI);
- if (syncPtr.p->m_locked) {
+ if (subPtr.p->m_state != Subscription::DEFINED) {
jam();
-#if 0
- ndbout_c("Locked");
-#endif
- sendSubStartRef(signal, /** Error Code */ 0, true);
+ DBUG_PRINT("info",("Locked"));
+ sendSubStartRef(signal, 1411);
DBUG_VOID_RETURN;
}
- syncPtr.p->m_locked = true;
SubscriberPtr subbPtr;
if(!c_subscriberPool.seize(subbPtr)){
jam();
- syncPtr.p->m_locked = false;
- sendSubStartRef(signal, /** Error Code */ 0);
+ sendSubStartRef(signal, 1412);
DBUG_VOID_RETURN;
}
+ DBUG_PRINT("info",("c_subscriberPool size: %d free: %d",
+ c_subscriberPool.getSize(),
+ c_subscriberPool.getNoOfFree()));
- Uint32 type = subPtr.p->m_subscriptionType;
+ c_subscriber_nodes.set(refToNode(subscriberRef));
- subbPtr.p->m_senderRef = senderRef;
- subbPtr.p->m_senderData = senderData;
+ // setup subscription record
+ subPtr.p->m_state = Subscription::LOCKED;
+ // store these here for later use
+ subPtr.p->m_senderRef = senderRef;
+ subPtr.p->m_senderData = senderData;
- switch (type) {
- case SubCreateReq::TableEvent:
- jam();
- // we want the data to return to the API not DICT
- subbPtr.p->m_subscriberRef = subscriberRef;
- // ndbout_c("start ref = %u", signal->getSendersBlockRef());
- // ndbout_c("ref = %u", subbPtr.p->m_subscriberRef);
- // we use the subscription id for now, should really be API choice
- subbPtr.p->m_subscriberData = subscriberData;
+ // setup subscriber record
+ subbPtr.p->m_senderRef = subscriberRef;
+ subbPtr.p->m_senderData = subscriberData;
+ subbPtr.p->m_subPtrI= subPtr.i;
-#if 0
- if (RtoI(signal->getSendersBlockRef(), false) == RNIL) {
- jam();
- for (Uint32 i = 0; i < c_noNodesInGroup; i++) {
- Uint32 ref = calcSumaBlockRef(c_nodesInGroup[i]);
- if (ref != reference()) {
- jam();
- sendSubStartReq(subPtr, subbPtr, signal, ref);
- } else
- jam();
- }
- }
-#endif
- break;
- case SubCreateReq::DatabaseSnapshot:
- case SubCreateReq::SelectiveTableSnapshot:
- jam();
- ndbrequire(false);
- //subbPtr.p->m_subscriberRef = GREP_REF;
- subbPtr.p->m_subscriberData = subPtr.p->m_subscriberData;
- break;
- case SubCreateReq::SingleTableScan:
- jam();
- subbPtr.p->m_subscriberRef = subPtr.p->m_subscriberRef;
- subbPtr.p->m_subscriberData = subPtr.p->m_subscriberData;
- }
-
- subbPtr.p->m_subPtrI = subPtr.i;
- subbPtr.p->m_firstGCI = RNIL;
- if (type == SubCreateReq::TableEvent)
- subbPtr.p->m_lastGCI = 0;
- else
- subbPtr.p->m_lastGCI = RNIL; // disable usage of m_lastGCI
- bool ok = false;
-
+ DBUG_PRINT("info",("subscriber: %u[%u,%u] subscription: %u[%u,%u] "
+ "tableId: %u id: %u key: %u",
+ subbPtr.i, subbPtr.p->m_senderRef, subbPtr.p->m_senderData,
+ subPtr.i, subPtr.p->m_senderRef, subPtr.p->m_senderData,
+ subPtr.p->m_tableId,
+ subPtr.p->m_subscriptionId,subPtr.p->m_subscriptionKey));
+
+ TablePtr tabPtr;
switch(part){
case SubscriptionData::MetaData:
- ok = true;
jam();
c_metaSubscribers.add(subbPtr);
sendSubStartComplete(signal, subbPtr, 0, part);
- break;
+ DBUG_VOID_RETURN;
case SubscriptionData::TableData:
- ok = true;
jam();
- c_prepDataSubscribers.add(subbPtr);
- syncPtr.p->startTrigger(signal);
- break;
+ initTable(signal,subPtr.p->m_tableId,tabPtr,subbPtr);
+ tabPtr.p->n_subscribers++;
+ DBUG_PRINT("info",("Suma::Table[%u]::n_subscribers: %u",
+ tabPtr.p->m_tableId, tabPtr.p->n_subscribers));
+ DBUG_VOID_RETURN;
}
- ndbrequire(ok);
- DBUG_VOID_RETURN;
+ ndbrequire(false);
}
void
-SumaParticipant::sendSubStartComplete(Signal* signal,
- SubscriberPtr subbPtr,
- Uint32 firstGCI,
- SubscriptionData::Part part){
+Suma::sendSubStartComplete(Signal* signal,
+ SubscriberPtr subbPtr,
+ Uint32 firstGCI,
+ SubscriptionData::Part part)
+{
jam();
+ DBUG_ENTER("Suma::sendSubStartComplete");
SubscriptionPtr subPtr;
c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI);
-
- Ptr<SyncRecord> syncPtr;
- c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI);
- syncPtr.p->m_locked = false;
-
- SubStartConf * const conf = (SubStartConf*)signal->getDataPtrSend();
+ ndbrequire( subPtr.p->m_state == Subscription::LOCKED )
+ subPtr.p->m_state = Subscription::DEFINED;
+ subPtr.p->n_subscribers++;
+
+ DBUG_PRINT("info",("subscriber: %u[%u,%u] subscription: %u[%u,%u] "
+ "tableId: %u[i=%u] id: %u key: %u",
+ subbPtr.i, subbPtr.p->m_senderRef, subbPtr.p->m_senderData,
+ subPtr.i, subPtr.p->m_senderRef, subPtr.p->m_senderData,
+ subPtr.p->m_tableId, subPtr.p->m_table_ptrI,
+ subPtr.p->m_subscriptionId,subPtr.p->m_subscriptionKey));
+
+ SubStartConf * const conf = (SubStartConf*)signal->getDataPtrSend();
conf->senderRef = reference();
- conf->senderData = subbPtr.p->m_senderData;
+ conf->senderData = subPtr.p->m_senderData;
conf->subscriptionId = subPtr.p->m_subscriptionId;
conf->subscriptionKey = subPtr.p->m_subscriptionKey;
- conf->firstGCI = firstGCI;
- conf->part = (Uint32) part;
-
- conf->subscriberData = subPtr.p->m_subscriberData;
- sendSignal(subPtr.p->m_subscriberRef, GSN_SUB_START_CONF, signal,
+ conf->firstGCI = firstGCI;
+ conf->part = (Uint32) part;
+
+ DBUG_PRINT("info",("subscriber: %u id: %u key: %u", subbPtr.i,
+ subPtr.p->m_subscriptionId,subPtr.p->m_subscriptionKey));
+ sendSignal(subPtr.p->m_senderRef, GSN_SUB_START_CONF, signal,
SubStartConf::SignalLength, JBB);
+ DBUG_VOID_RETURN;
}
-#if 0
void
-SumaParticipant::sendSubStartRef(SubscriptionPtr subPtr,
- Signal* signal, Uint32 errCode,
- bool temporary){
+Suma::sendSubStartRef(Signal* signal, Uint32 errCode)
+{
jam();
SubStartRef * ref = (SubStartRef *)signal->getDataPtrSend();
- xxx ref->senderRef = reference();
- xxx ref->senderData = subPtr.p->m_senderData;
- ref->subscriptionId = subPtr.p->m_subscriptionId;
- ref->subscriptionKey = subPtr.p->m_subscriptionKey;
- ref->part = (Uint32) subPtr.p->m_subscriptionType;
- ref->subscriberData = subPtr.p->m_subscriberData;
- ref->err = errCode;
- if (temporary) {
- jam();
- ref->setTemporary();
- }
+ ref->senderRef = reference();
+ ref->errorCode = errCode;
releaseSections(signal);
- sendSignal(subPtr.p->m_subscriberRef, GSN_SUB_START_REF, signal,
+ sendSignal(signal->getSendersBlockRef(), GSN_SUB_START_REF, signal,
SubStartRef::SignalLength, JBB);
}
-#endif
void
-SumaParticipant::sendSubStartRef(Signal* signal, Uint32 errCode,
- bool temporary){
+Suma::sendSubStartRef(Signal* signal,
+ SubscriberPtr subbPtr, Uint32 error,
+ SubscriptionData::Part part)
+{
jam();
- SubStartRef * ref = (SubStartRef *)signal->getDataPtrSend();
- ref->senderRef = reference();
- ref->err = errCode;
- if (temporary) {
- jam();
- ref->setTemporary();
- }
- releaseSections(signal);
- sendSignal(signal->getSendersBlockRef(), GSN_SUB_START_REF, signal,
+
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI);
+
+ ndbrequire( subPtr.p->m_state == Subscription::LOCKED );
+ subPtr.p->m_state = Subscription::DEFINED;
+
+ SubStartRef * ref= (SubStartRef *)signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = subPtr.p->m_senderData;
+ ref->subscriptionId = subPtr.p->m_subscriptionId;
+ ref->subscriptionKey = subPtr.p->m_subscriptionKey;
+ ref->part = (Uint32) part;
+ ref->errorCode = error;
+
+ sendSignal(subPtr.p->m_senderRef, GSN_SUB_START_REF, signal,
SubStartRef::SignalLength, JBB);
}
/**********************************************************
+ * Suma participant interface
*
- * Trigger admin interface
+ * Stopping and removing of subscriber
*
*/
void
-SumaParticipant::SyncRecord::startTrigger(Signal* signal){
- jam();
- m_currentTable = 0;
- m_latestTriggerId = RNIL;
- nextTrigger(signal);
+Suma::execSUB_STOP_REQ(Signal* signal){
+ jamEntry();
+ ndbassert(signal->getNoOfSections() == 0);
+ DBUG_ENTER("Suma::execSUB_STOP_REQ");
+
+ CRASH_INSERTION(13019);
+
+ SubStopReq * const req = (SubStopReq*)signal->getDataPtr();
+ Uint32 senderRef = req->senderRef;
+ Uint32 senderData = req->senderData;
+ Uint32 subscriberRef = req->subscriberRef;
+ Uint32 subscriberData = req->subscriberData;
+ SubscriptionPtr subPtr;
+ Subscription key;
+ key.m_subscriptionId = req->subscriptionId;
+ key.m_subscriptionKey = req->subscriptionKey;
+ Uint32 part = req->part;
+
+ if (key.m_subscriptionKey == 0 &&
+ key.m_subscriptionId == 0 &&
+ subscriberData == 0)
+ {
+ SubStopConf* conf = (SubStopConf*)signal->getDataPtrSend();
+
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ conf->subscriptionId = key.m_subscriptionId;
+ conf->subscriptionKey = key.m_subscriptionKey;
+ conf->subscriberData = subscriberData;
+
+ sendSignal(senderRef, GSN_SUB_STOP_CONF, signal,
+ SubStopConf::SignalLength, JBB);
+
+ removeSubscribersOnNode(signal, refToNode(senderRef));
+ DBUG_VOID_RETURN;
+ }
+
+ if(!c_subscriptions.find(subPtr, key)){
+ jam();
+ DBUG_PRINT("error", ("not found"));
+ sendSubStopRef(signal, 1407);
+ DBUG_VOID_RETURN;
+ }
+
+ if (subPtr.p->m_state == Subscription::LOCKED) {
+ jam();
+ DBUG_PRINT("error", ("locked"));
+ sendSubStopRef(signal, 1411);
+ DBUG_VOID_RETURN;
+ }
+
+ ndbrequire(part == SubscriptionData::TableData);
+
+ TablePtr tabPtr;
+ tabPtr.i = subPtr.p->m_table_ptrI;
+ if (!(tabPtr.p = c_tables.getPtr(tabPtr.i)) ||
+ tabPtr.p->m_tableId != subPtr.p->m_tableId)
+ {
+ jam();
+ DBUG_PRINT("error", ("no such table id %u[i=%u]",
+ subPtr.p->m_tableId, subPtr.p->m_table_ptrI));
+ sendSubStopRef(signal, 1417);
+ DBUG_VOID_RETURN;
+ }
+
+ DBUG_PRINT("info",("subscription: %u tableId: %u[i=%u] id: %u key: %u",
+ subPtr.i, subPtr.p->m_tableId, tabPtr.i,
+ subPtr.p->m_subscriptionId,subPtr.p->m_subscriptionKey));
+
+ SubscriberPtr subbPtr;
+ if (senderRef == reference()){
+ jam();
+ c_subscriberPool.getPtr(subbPtr, senderData);
+ ndbrequire(subbPtr.p->m_subPtrI == subPtr.i &&
+ subbPtr.p->m_senderRef == subscriberRef &&
+ subbPtr.p->m_senderData == subscriberData);
+ c_removeDataSubscribers.remove(subbPtr);
+ }
+ else
+ {
+ jam();
+ LocalDLList<Subscriber>
+ subscribers(c_subscriberPool,tabPtr.p->c_subscribers);
+
+ DBUG_PRINT("info",("search: subscription: %u, ref: %u, data: %d",
+ subPtr.i, subscriberRef, subscriberData));
+ for (subscribers.first(subbPtr);!subbPtr.isNull();subscribers.next(subbPtr))
+ {
+ jam();
+ DBUG_PRINT("info",
+ ("search: subscription: %u, ref: %u, data: %u, subscriber %u",
+ subbPtr.p->m_subPtrI, subbPtr.p->m_senderRef,
+ subbPtr.p->m_senderData, subbPtr.i));
+ if (subbPtr.p->m_subPtrI == subPtr.i &&
+ subbPtr.p->m_senderRef == subscriberRef &&
+ subbPtr.p->m_senderData == subscriberData)
+ {
+ jam();
+ DBUG_PRINT("info",("found"));
+ break;
+ }
+ }
+ /**
+ * If we didn't find anyone, send ref
+ */
+ if (subbPtr.isNull()) {
+ jam();
+ DBUG_PRINT("error", ("subscriber not found"));
+ sendSubStopRef(signal, 1407);
+ DBUG_VOID_RETURN;
+ }
+ subscribers.remove(subbPtr);
+ }
+
+ subPtr.p->m_senderRef = senderRef; // store ref to requestor
+ subPtr.p->m_senderData = senderData; // store ref to requestor
+
+ tabPtr.p->m_drop_subbPtr= subbPtr;
+
+ if (subPtr.p->m_state == Subscription::DEFINED)
+ {
+ jam();
+ subPtr.p->m_state = Subscription::LOCKED;
+ }
+
+ if (tabPtr.p->m_state == Table::DROPPED)
+ // not ALTERED here since trigger must be removed
+ {
+ jam();
+ tabPtr.p->n_subscribers--;
+ DBUG_PRINT("info",("Suma::Table[%u]::n_subscribers: %u",
+ tabPtr.p->m_tableId, tabPtr.p->n_subscribers));
+ tabPtr.p->checkRelease(*this);
+ sendSubStopComplete(signal, tabPtr.p->m_drop_subbPtr);
+ }
+ else
+ {
+ jam();
+ tabPtr.p->dropTrigger(signal,*this);
+ }
+ DBUG_VOID_RETURN;
}
void
-SumaParticipant::SyncRecord::nextTrigger(Signal* signal){
+Suma::sendSubStopComplete(Signal* signal, SubscriberPtr subbPtr)
+{
jam();
+ DBUG_ENTER("Suma::sendSubStopComplete");
+ CRASH_INSERTION(13020);
- TableList::DataBufferIterator it;
-
- if(!m_tableList.position(it, m_currentTable)){
- completeTrigger(signal);
- return;
- }
+ DBUG_PRINT("info",("removed subscriber: %i", subbPtr.i));
SubscriptionPtr subPtr;
- suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI);
- ndbrequire(subPtr.p->m_syncPtrI == ptrI);
- const Uint32 RT_BREAK = 48;
- Uint32 latestTriggerId = 0;
- for(Uint32 i = 0; i<RT_BREAK && !it.isNull(); i++, m_tableList.next(it)){
- TablePtr tabPtr;
-#if 0
- ndbout_c("nextTrigger tableid %u", *it.data);
-#endif
- ndbrequire(suma.c_tables.find(tabPtr, *it.data));
+ c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI);
- AttributeMask attrMask;
- createAttributeMask(attrMask, tabPtr.p);
+ Uint32 senderRef= subPtr.p->m_senderRef;
+ Uint32 senderData= subPtr.p->m_senderData;
- for(Uint32 j = 0; j<3; j++){
- i++;
- latestTriggerId = (tabPtr.p->m_schemaVersion << 18) |
- (j << 16) | tabPtr.p->m_tableId;
- if(tabPtr.p->m_hasTriggerDefined[j] == 0) {
- ndbrequire(tabPtr.p->m_triggerIds[j] == ILLEGAL_TRIGGER_ID);
-#if 0
- ndbout_c("DEFINING trigger on table %u[%u]", tabPtr.p->m_tableId, j);
-#endif
- CreateTrigReq * const req = (CreateTrigReq*)signal->getDataPtrSend();
- req->setUserRef(SUMA_REF);
- req->setConnectionPtr(ptrI);
- req->setTriggerType(TriggerType::SUBSCRIPTION_BEFORE);
- req->setTriggerActionTime(TriggerActionTime::TA_DETACHED);
- req->setMonitorReplicas(true);
- req->setMonitorAllAttributes(false);
- req->setReceiverRef(SUMA_REF);
- req->setTriggerId(latestTriggerId);
- req->setTriggerEvent((TriggerEvent::Value)j);
- req->setTableId(tabPtr.p->m_tableId);
- req->setAttributeMask(attrMask);
- suma.sendSignal(DBTUP_REF, GSN_CREATE_TRIG_REQ,
- signal, CreateTrigReq::SignalLength, JBB);
-
- } else {
- /**
- * Faking that a trigger has been created in order to
- * simulate the proper behaviour.
- * Perhaps this should be a dummy signal instead of
- * (ab)using CREATE_TRIG_CONF.
- */
- CreateTrigConf * conf = (CreateTrigConf*)signal->getDataPtrSend();
- conf->setConnectionPtr(ptrI);
- conf->setTableId(tabPtr.p->m_tableId);
- conf->setTriggerId(latestTriggerId);
- suma.sendSignal(SUMA_REF,GSN_CREATE_TRIG_CONF,
- signal, CreateTrigConf::SignalLength, JBB);
-
+ subPtr.p->n_subscribers--;
+ ndbassert( subPtr.p->m_state == Subscription::LOCKED ||
+ subPtr.p->m_state == Subscription::DROPPED );
+ if ( subPtr.p->m_state == Subscription::LOCKED )
+ {
+ jam();
+ subPtr.p->m_state = Subscription::DEFINED;
+ if (subPtr.p->n_subscribers == 0)
+ {
+ jam();
+#if 1
+ subPtr.p->m_table_ptrI = RNIL;
+#else
+ TablePtr tabPtr;
+ tabPtr.i = subPtr.p->m_table_ptrI;
+ if ((tabPtr.p= c_tablePool.getPtr(tabPtr.i)) &&
+ (tabPtr.p->m_state == Table::DROPPED ||
+ tabPtr.p->m_state == Table::ALTERED) &&
+ false)
+ {
+ // last subscriber, and table is dropped
+ // safe to drop subscription
+ c_subscriptions.release(subPtr);
+ DBUG_PRINT("info",("c_subscriptionPool size: %d free: %d",
+ c_subscriptionPool.getSize(),
+ c_subscriptionPool.getNoOfFree()));
+ }
+ else
+ {
+ subPtr.p->m_table_ptrI = RNIL;
}
+ ndbassert(tabPtr.p != 0);
+#endif
+ }
+ }
+ else if ( subPtr.p->n_subscribers == 0 )
+ {
+ // subscription is marked to be removed
+ // and there are no subscribers left
+ jam();
+ ndbassert(subPtr.p->m_state == Subscription::DROPPED);
+ completeSubRemove(subPtr);
+ }
+
+ // let subscriber know that subscrber is stopped
+ {
+ SubTableData * data = (SubTableData*)signal->getDataPtrSend();
+ data->gci = m_last_complete_gci + 1; // XXX ???
+ data->tableId = 0;
+ data->operation = NdbDictionary::Event::_TE_STOP;
+ data->senderData = subbPtr.p->m_senderData;
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_TABLE_DATA, signal,
+ SubTableData::SignalLength, JBB);
+ }
+
+ SubStopConf * const conf = (SubStopConf*)signal->getDataPtrSend();
+
+ conf->senderRef= reference();
+ conf->senderData= senderData;
+
+ sendSignal(senderRef, GSN_SUB_STOP_CONF, signal,
+ SubStopConf::SignalLength, JBB);
+
+ c_subscriberPool.release(subbPtr);
+ DBUG_PRINT("info",("c_subscriberPool size: %d free: %d",
+ c_subscriberPool.getSize(),
+ c_subscriberPool.getNoOfFree()));
+ DBUG_VOID_RETURN;
+}
+
+void
+Suma::sendSubStopRef(Signal* signal, Uint32 errCode)
+{
+ jam();
+ DBUG_ENTER("Suma::sendSubStopRef");
+ SubStopRef * ref = (SubStopRef *)signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->errorCode = errCode;
+ sendSignal(signal->getSendersBlockRef(),
+ GSN_SUB_STOP_REF,
+ signal,
+ SubStopRef::SignalLength,
+ JBB);
+ DBUG_VOID_RETURN;
+}
+
+/**********************************************************
+ *
+ * Trigger admin interface
+ *
+ */
+
+int
+Suma::Table::setupTrigger(Signal* signal,
+ Suma &suma)
+{
+ jam();
+ DBUG_ENTER("Suma::Table::setupTrigger");
+
+ int ret= 0;
+
+ AttributeMask attrMask;
+ createAttributeMask(attrMask, suma);
+ for(Uint32 j = 0; j<3; j++)
+ {
+ Uint32 triggerId = (m_schemaVersion << 18) | (j << 16) | m_ptrI;
+ if(m_hasTriggerDefined[j] == 0)
+ {
+ suma.suma_ndbrequire(m_triggerIds[j] == ILLEGAL_TRIGGER_ID);
+ DBUG_PRINT("info",("DEFINING trigger on table %u[%u]", m_tableId, j));
+ CreateTrigReq * const req = (CreateTrigReq*)signal->getDataPtrSend();
+ req->setUserRef(SUMA_REF);
+ req->setConnectionPtr(m_ptrI);
+ req->setTriggerType(TriggerType::SUBSCRIPTION_BEFORE);
+ req->setTriggerActionTime(TriggerActionTime::TA_DETACHED);
+ req->setMonitorReplicas(true);
+ req->setMonitorAllAttributes(false);
+ req->setReceiverRef(SUMA_REF);
+ req->setTriggerId(triggerId);
+ req->setTriggerEvent((TriggerEvent::Value)j);
+ req->setTableId(m_tableId);
+ req->setAttributeMask(attrMask);
+ suma.sendSignal(DBTUP_REF, GSN_CREATE_TRIG_REQ,
+ signal, CreateTrigReq::SignalLength, JBB);
+ ret= 1;
+ }
+ else
+ {
+ m_hasTriggerDefined[j]++;
+ DBUG_PRINT("info",("REFCOUNT trigger on table %u[%u] %u",
+ m_tableId, j, m_hasTriggerDefined[j]));
}
- m_currentTable++;
}
- m_latestTriggerId = latestTriggerId;
+ DBUG_RETURN(ret);
}
void
-SumaParticipant::SyncRecord::createAttributeMask(AttributeMask& mask,
- Table * table){
+Suma::Table::createAttributeMask(AttributeMask& mask,
+ Suma &suma)
+{
jam();
mask.clear();
DataBuffer<15>::DataBufferIterator it;
- LocalDataBuffer<15> attrBuf(suma.c_dataBufferPool, table->m_attributes);
+ LocalDataBuffer<15> attrBuf(suma.c_dataBufferPool, m_attributes);
for(attrBuf.first(it); !it.curr.isNull(); attrBuf.next(it)){
mask.set(* it.data);
}
}
void
-SumaParticipant::SyncRecord::runCREATE_TRIG_CONF(Signal* signal){
- jam();
-
+Suma::execCREATE_TRIG_CONF(Signal* signal){
+ jamEntry();
+ DBUG_ENTER("Suma::execCREATE_TRIG_CONF");
+ ndbassert(signal->getNoOfSections() == 0);
CreateTrigConf * const conf = (CreateTrigConf*)signal->getDataPtr();
const Uint32 triggerId = conf->getTriggerId();
Uint32 type = (triggerId >> 16) & 0x3;
Uint32 tableId = conf->getTableId();
-
+
+
+ DBUG_PRINT("enter", ("type: %u tableId: %u[i=%u==%u]",
+ type, tableId,conf->getConnectionPtr(),triggerId & 0xFFFF));
+
TablePtr tabPtr;
- ndbrequire(suma.c_tables.find(tabPtr, tableId));
+ c_tables.getPtr(tabPtr, conf->getConnectionPtr());
+ ndbrequire(tabPtr.p->m_tableId == tableId);
+ ndbrequire(tabPtr.p->m_state == Table::DEFINING);
ndbrequire(type < 3);
tabPtr.p->m_triggerIds[type] = triggerId;
- tabPtr.p->m_hasTriggerDefined[type]++;
+ ndbrequire(tabPtr.p->m_hasTriggerDefined[type] == 0);
+ tabPtr.p->m_hasTriggerDefined[type] = 1;
- if(triggerId == m_latestTriggerId){
- jam();
- nextTrigger(signal);
+ if (type == 2)
+ {
+ completeAllSubscribers(signal, tabPtr);
+ completeInitTable(signal,tabPtr);
+ DBUG_VOID_RETURN;
}
+ DBUG_VOID_RETURN;
}
void
-SumaParticipant::SyncRecord::completeTrigger(Signal* signal){
- jam();
- SubscriptionPtr subPtr;
- CRASH_INSERTION(13013);
-#ifdef EVENT_PH3_DEBUG
- ndbout_c("SumaParticipant: trigger completed");
-#endif
- Uint32 gci;
- suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI);
- ndbrequire(subPtr.p->m_syncPtrI == ptrI);
+Suma::execCREATE_TRIG_REF(Signal* signal){
+ jamEntry();
+ DBUG_ENTER("Suma::execCREATE_TRIG_REF");
+ ndbassert(signal->getNoOfSections() == 0);
+ CreateTrigRef * const ref = (CreateTrigRef*)signal->getDataPtr();
+ const Uint32 triggerId = ref->getTriggerId();
+ Uint32 type = (triggerId >> 16) & 0x3;
+ Uint32 tableId = ref->getTableId();
+
+ DBUG_PRINT("enter", ("type: %u tableId: %u[i=%u==%u]",
+ type, tableId,ref->getConnectionPtr(),triggerId & 0xFFFF));
+
+ TablePtr tabPtr;
+ c_tables.getPtr(tabPtr, ref->getConnectionPtr());
+ ndbrequire(tabPtr.p->m_tableId == tableId);
+ ndbrequire(tabPtr.p->m_state == Table::DEFINING);
- SubscriberPtr subbPtr;
- {
- bool found = false;
+ tabPtr.p->m_error= ref->getErrorCode();
- for(suma.c_prepDataSubscribers.first(subbPtr);
- !subbPtr.isNull(); suma.c_prepDataSubscribers.next(subbPtr)) {
- jam();
- if(subbPtr.p->m_subPtrI == subPtr.i) {
- jam();
- found = true;
- break;
- }
- }
- ndbrequire(found);
- gci = suma.getFirstGCI(signal);
- subbPtr.p->m_firstGCI = gci;
- suma.c_prepDataSubscribers.remove(subbPtr);
- suma.c_dataSubscribers.add(subbPtr);
+ ndbrequire(type < 3);
+
+ if (type == 2)
+ {
+ completeAllSubscribers(signal, tabPtr);
+ completeInitTable(signal,tabPtr);
+ DBUG_VOID_RETURN;
}
- suma.sendSubStartComplete(signal, subbPtr, gci, SubscriptionData::TableData);
-}
-void
-SumaParticipant::SyncRecord::startDropTrigger(Signal* signal){
- jam();
- m_currentTable = 0;
- m_latestTriggerId = RNIL;
- nextDropTrigger(signal);
+ DBUG_VOID_RETURN;
}
void
-SumaParticipant::SyncRecord::nextDropTrigger(Signal* signal){
+Suma::Table::dropTrigger(Signal* signal,Suma& suma)
+{
jam();
-
- TableList::DataBufferIterator it;
+ DBUG_ENTER("Suma::dropTrigger");
- if(!m_tableList.position(it, m_currentTable)){
- completeDropTrigger(signal);
- return;
- }
-
- SubscriptionPtr subPtr;
- suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI);
- ndbrequire(subPtr.p->m_syncPtrI == ptrI);
-
- const Uint32 RT_BREAK = 48;
- Uint32 latestTriggerId = 0;
- for(Uint32 i = 0; i<RT_BREAK && !it.isNull(); i++, m_tableList.next(it)){
+ for(Uint32 j = 0; j<3; j++){
jam();
- TablePtr tabPtr;
-#if 0
- ndbout_c("nextDropTrigger tableid %u", *it.data);
-#endif
- ndbrequire(suma.c_tables.find(tabPtr, * it.data));
-
- for(Uint32 j = 0; j<3; j++){
+ suma.suma_ndbrequire(m_triggerIds[j] != ILLEGAL_TRIGGER_ID);
+ if(m_hasTriggerDefined[j] == 1) {
jam();
- ndbrequire(tabPtr.p->m_triggerIds[j] != ILLEGAL_TRIGGER_ID);
- i++;
- latestTriggerId = tabPtr.p->m_triggerIds[j];
- if(tabPtr.p->m_hasTriggerDefined[j] == 1) {
- jam();
- DropTrigReq * const req = (DropTrigReq*)signal->getDataPtrSend();
- req->setConnectionPtr(ptrI);
- req->setUserRef(SUMA_REF); // Sending to myself
- req->setRequestType(DropTrigReq::RT_USER);
- req->setTriggerType(TriggerType::SUBSCRIPTION_BEFORE);
- req->setTriggerActionTime(TriggerActionTime::TA_DETACHED);
- req->setIndexId(RNIL);
-
- req->setTableId(tabPtr.p->m_tableId);
- req->setTriggerId(latestTriggerId);
- req->setTriggerEvent((TriggerEvent::Value)j);
-
-#if 0
- ndbout_c("DROPPING trigger %u = %u %u %u on table %u[%u]",
- latestTriggerId,TriggerType::SUBSCRIPTION_BEFORE,
- TriggerActionTime::TA_DETACHED, j, tabPtr.p->m_tableId, j);
-#endif
- suma.sendSignal(DBTUP_REF, GSN_DROP_TRIG_REQ,
- signal, DropTrigReq::SignalLength, JBB);
- } else {
- jam();
- ndbrequire(tabPtr.p->m_hasTriggerDefined[j] > 1);
- /**
- * Faking that a trigger has been dropped in order to
- * simulate the proper behaviour.
- * Perhaps this should be a dummy signal instead of
- * (ab)using DROP_TRIG_CONF.
- */
- DropTrigConf * conf = (DropTrigConf*)signal->getDataPtrSend();
- conf->setConnectionPtr(ptrI);
- conf->setTableId(tabPtr.p->m_tableId);
- conf->setTriggerId(latestTriggerId);
- suma.sendSignal(SUMA_REF,GSN_DROP_TRIG_CONF,
- signal, DropTrigConf::SignalLength, JBB);
- }
+ DropTrigReq * const req = (DropTrigReq*)signal->getDataPtrSend();
+ req->setConnectionPtr(m_ptrI);
+ req->setUserRef(SUMA_REF); // Sending to myself
+ req->setRequestType(DropTrigReq::RT_USER);
+ req->setTriggerType(TriggerType::SUBSCRIPTION_BEFORE);
+ req->setTriggerActionTime(TriggerActionTime::TA_DETACHED);
+ req->setIndexId(RNIL);
+
+ req->setTableId(m_tableId);
+ req->setTriggerId(m_triggerIds[j]);
+ req->setTriggerEvent((TriggerEvent::Value)j);
+
+ DBUG_PRINT("info",("DROPPING trigger %u = %u %u %u on table %u[%u]",
+ m_triggerIds[j],
+ TriggerType::SUBSCRIPTION_BEFORE,
+ TriggerActionTime::TA_DETACHED,
+ j,
+ m_tableId, j));
+ suma.sendSignal(DBTUP_REF, GSN_DROP_TRIG_REQ,
+ signal, DropTrigReq::SignalLength, JBB);
+ } else {
+ jam();
+ suma.suma_ndbrequire(m_hasTriggerDefined[j] > 1);
+ runDropTrigger(signal,m_triggerIds[j],suma);
}
- m_currentTable++;
}
- m_latestTriggerId = latestTriggerId;
+ DBUG_VOID_RETURN;
}
void
-SumaParticipant::SyncRecord::runDROP_TRIG_REF(Signal* signal){
- jam();
+Suma::execDROP_TRIG_REF(Signal* signal){
+ jamEntry();
+ DBUG_ENTER("Suma::execDROP_TRIG_REF");
+ ndbassert(signal->getNoOfSections() == 0);
DropTrigRef * const ref = (DropTrigRef*)signal->getDataPtr();
- if (ref->getErrorCode() != DropTrigRef::TriggerNotFound){
+ if (ref->getErrorCode() != DropTrigRef::TriggerNotFound)
+ {
ndbrequire(false);
}
- const Uint32 triggerId = ref->getTriggerId();
- Uint32 tableId = ref->getTableId();
- runDropTrig(signal, triggerId, tableId);
+ TablePtr tabPtr;
+ c_tables.getPtr(tabPtr, ref->getConnectionPtr());
+ ndbrequire(ref->getTableId() == tabPtr.p->m_tableId);
+
+ tabPtr.p->runDropTrigger(signal, ref->getTriggerId(), *this);
+ DBUG_VOID_RETURN;
}
void
-SumaParticipant::SyncRecord::runDROP_TRIG_CONF(Signal* signal){
- jam();
-
+Suma::execDROP_TRIG_CONF(Signal* signal){
+ jamEntry();
+ DBUG_ENTER("Suma::execDROP_TRIG_CONF");
+ ndbassert(signal->getNoOfSections() == 0);
+
DropTrigConf * const conf = (DropTrigConf*)signal->getDataPtr();
- const Uint32 triggerId = conf->getTriggerId();
- Uint32 tableId = conf->getTableId();
- runDropTrig(signal, triggerId, tableId);
+ TablePtr tabPtr;
+ c_tables.getPtr(tabPtr, conf->getConnectionPtr());
+ ndbrequire(conf->getTableId() == tabPtr.p->m_tableId);
+
+ tabPtr.p->runDropTrigger(signal, conf->getTriggerId(),*this);
+ DBUG_VOID_RETURN;
}
void
-SumaParticipant::SyncRecord::runDropTrig(Signal* signal,
- Uint32 triggerId,
- Uint32 tableId){
+Suma::Table::runDropTrigger(Signal* signal,
+ Uint32 triggerId,
+ Suma &suma)
+{
+ jam();
Uint32 type = (triggerId >> 16) & 0x3;
-
- TablePtr tabPtr;
- ndbrequire(suma.c_tables.find(tabPtr, tableId));
- ndbrequire(type < 3);
- ndbrequire(tabPtr.p->m_triggerIds[type] == triggerId);
- tabPtr.p->m_hasTriggerDefined[type]--;
- if (tabPtr.p->m_hasTriggerDefined[type] == 0) {
+ suma.suma_ndbrequire(type < 3);
+ suma.suma_ndbrequire(m_triggerIds[type] == triggerId);
+ m_hasTriggerDefined[type]--;
+ if (m_hasTriggerDefined[type] == 0)
+ {
jam();
- tabPtr.p->m_triggerIds[type] = ILLEGAL_TRIGGER_ID;
+ m_triggerIds[type] = ILLEGAL_TRIGGER_ID;
}
- if(triggerId == m_latestTriggerId){
+ if( m_hasTriggerDefined[0] != m_hasTriggerDefined[1] ||
+ m_hasTriggerDefined[0] != m_hasTriggerDefined[2])
+ {
+ // more to come
jam();
- nextDropTrigger(signal);
+ return;
}
-}
-void
-SumaParticipant::SyncRecord::completeDropTrigger(Signal* signal){
- jam();
- SubscriptionPtr subPtr;
- CRASH_INSERTION(13014);
#if 0
ndbout_c("trigger completed");
#endif
- suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI);
- ndbrequire(subPtr.p->m_syncPtrI == ptrI);
- bool found = false;
- SubscriberPtr subbPtr;
- for(suma.c_prepDataSubscribers.first(subbPtr);
- !subbPtr.isNull(); suma.c_prepDataSubscribers.next(subbPtr)) {
+ n_subscribers--;
+ DBUG_PRINT("info",("Suma::Table[%u]::n_subscribers: %u",
+ m_tableId, n_subscribers));
+ checkRelease(suma);
+
+ suma.sendSubStopComplete(signal, m_drop_subbPtr);
+}
+
+void Suma::suma_ndbrequire(bool v) { ndbrequire(v); }
+
+void
+Suma::Table::checkRelease(Suma &suma)
+{
+ jam();
+ DBUG_ENTER("Suma::Table::checkRelease");
+ if (n_subscribers == 0)
+ {
jam();
- if(subbPtr.p->m_subPtrI == subPtr.i) {
- jam();
- found = true;
- break;
+ suma.suma_ndbrequire(m_hasTriggerDefined[0] == 0);
+ suma.suma_ndbrequire(m_hasTriggerDefined[1] == 0);
+ suma.suma_ndbrequire(m_hasTriggerDefined[2] == 0);
+ if (!c_subscribers.isEmpty())
+ {
+ LocalDLList<Subscriber>
+ subscribers(suma.c_subscriberPool,c_subscribers);
+ SubscriberPtr subbPtr;
+ for (subscribers.first(subbPtr);!subbPtr.isNull();
+ subscribers.next(subbPtr))
+ {
+ jam();
+ DBUG_PRINT("info",("subscriber: %u", subbPtr.i));
+ }
+ suma.suma_ndbrequire(false);
+ }
+ if (!c_syncRecords.isEmpty())
+ {
+ LocalDLList<SyncRecord>
+ syncRecords(suma.c_syncPool,c_syncRecords);
+ Ptr<SyncRecord> syncPtr;
+ for (syncRecords.first(syncPtr);!syncPtr.isNull();
+ syncRecords.next(syncPtr))
+ {
+ jam();
+ DBUG_PRINT("info",("syncRecord: %u", syncPtr.i));
+ }
+ suma.suma_ndbrequire(false);
}
+ release(suma);
+ suma.c_tables.remove(m_ptrI);
+ suma.c_tablePool.release(m_ptrI);
+ DBUG_PRINT("info",("c_tablePool size: %d free: %d",
+ suma.c_tablePool.getSize(),
+ suma.c_tablePool.getNoOfFree()));
+ }
+ else
+ {
+ DBUG_PRINT("info",("n_subscribers: %d", n_subscribers));
}
- ndbrequire(found);
- suma.sendSubStopComplete(signal, subbPtr);
+ DBUG_VOID_RETURN;
}
/**********************************************************
@@ -2613,8 +2743,10 @@ static Uint32 b_buffer[SUMA_BUF_SZ];
static Uint32 b_trigBufferSize = 0;
void
-SumaParticipant::execTRANSID_AI(Signal* signal){
+Suma::execTRANSID_AI(Signal* signal)
+{
jamEntry();
+ DBUG_ENTER("Suma::execTRANSID_AI");
CRASH_INSERTION(13015);
TransIdAI * const data = (TransIdAI*)signal->getDataPtr();
@@ -2667,11 +2799,11 @@ SumaParticipant::execTRANSID_AI(Signal* signal){
* Initialize signal
*/
SubTableData * sdata = (SubTableData*)signal->getDataPtrSend();
- Uint32 ref = subPtr.p->m_subscriberRef;
+ Uint32 ref = subPtr.p->m_senderRef;
sdata->tableId = syncPtr.p->m_currentTableId;
- sdata->senderData = subPtr.p->m_subscriberData;
- sdata->operation = 3; // Scan
- sdata->gci = 1; // Undefined
+ sdata->senderData = subPtr.p->m_senderData;
+ sdata->operation = NdbDictionary::Event::_TE_SCAN; // Scan
+ sdata->gci = 0; // Undefined
#if PRINT_ONLY
ndbout_c("GSN_SUB_TABLE_DATA (scan) #attr: %d len: %d", attribs, sum);
#else
@@ -2686,6 +2818,8 @@ SumaParticipant::execTRANSID_AI(Signal* signal){
* Reset f_bufferLock
*/
f_bufferLock = 0;
+
+ DBUG_VOID_RETURN;
}
/**********************************************************
@@ -2695,9 +2829,11 @@ SumaParticipant::execTRANSID_AI(Signal* signal){
*/
void
-SumaParticipant::execTRIG_ATTRINFO(Signal* signal){
+Suma::execTRIG_ATTRINFO(Signal* signal)
+{
jamEntry();
-
+ DBUG_ENTER("Suma::execTRIG_ATTRINFO");
+
CRASH_INSERTION(13016);
TrigAttrInfo* const trg = (TrigAttrInfo*)signal->getDataPtr();
const Uint32 trigId = trg->getTriggerId();
@@ -2711,6 +2847,7 @@ SumaParticipant::execTRIG_ATTRINFO(Signal* signal){
memcpy(b_buffer + b_trigBufferSize, trg->getData(), 4 * dataLen);
b_trigBufferSize += dataLen;
+
// printf("before values %u %u %u\n",trigId, dataLen, b_trigBufferSize);
} else {
jam();
@@ -2727,6 +2864,9 @@ SumaParticipant::execTRIG_ATTRINFO(Signal* signal){
memcpy(f_buffer + f_trigBufferSize, trg->getData(), 4 * dataLen);
f_trigBufferSize += dataLen;
}
+
+
+ DBUG_VOID_RETURN;
}
#ifdef NODEFAIL_DEBUG2
@@ -2734,662 +2874,527 @@ static int theCounts[64] = {0};
#endif
Uint32
-Suma::getStoreBucket(Uint32 v)
-{
- // id will contain id to responsible suma or
- // RNIL if we don't have nodegroup info yet
-
- const Uint32 N = NO_OF_BUCKETS;
- const Uint32 D = v % N; // Distibution key
- return D;
-}
-
-Uint32
-Suma::getResponsibleSumaNodeId(Uint32 D)
+Suma::get_responsible_node(Uint32 bucket) const
{
// id will contain id to responsible suma or
// RNIL if we don't have nodegroup info yet
- Uint32 id;
-
- if (c_restartLock) {
- jam();
- // ndbout_c("c_restartLock");
- id = RNIL;
- } else {
- jam();
- id = RNIL;
- const Uint32 n = c_noNodesInGroup; // Number nodes in node group
- const Uint32 C1 = D / n;
- const Uint32 C2 = D - C1*n; // = D % n;
- const Uint32 C = C2 + C1 % n;
- for (Uint32 i = 0; i < n; i++) {
- jam();
- id = c_nodesInGroup[(C + i) % n];
- if (c_aliveNodes.get(id) &&
- !c_preparingNodes.get(id)) {
- jam();
- break;
- }//if
+ jam();
+ Uint32 node;
+ const Bucket* ptr= c_buckets + bucket;
+ for(Uint32 i = 0; i<MAX_REPLICAS; i++)
+ {
+ node= ptr->m_nodes[i];
+ if(c_alive_nodes.get(node))
+ {
+ break;
}
+ }
+
+
#ifdef NODEFAIL_DEBUG2
- theCounts[id]++;
+ if(node != 0)
+ {
+ theCounts[node]++;
ndbout_c("Suma:responsible n=%u, D=%u, id = %u, count=%u",
- n,D, id, theCounts[id]);
-#endif
+ n,D, id, theCounts[node]);
}
- return id;
+#endif
+ return node;
}
-Uint32
-SumaParticipant::decideWhoToSend(Uint32 nBucket, Uint32 gci){
- bool replicaFlag = true;
- Uint32 nId = RNIL;
-
- // bucket active/not active set by GCP_COMPLETE
- if (c_buckets[nBucket].active) {
- if (c_buckets[nBucket].handover && c_buckets[nBucket].handoverGCI <= gci) {
- jam();
- replicaFlag = true; // let the other node send this
- nId = RNIL;
- // mark this as started, if we get a node failiure now we have some lost stuff
- c_buckets[nBucket].handover_started = true;
- } else {
- jam();
- replicaFlag = false;
- nId = refToNode(reference());
- }
- } else {
- nId = getResponsibleSumaNodeId(nBucket);
- replicaFlag = !(nId == refToNode(reference()));
-
- if (!replicaFlag) {
- if (!c_buckets[nBucket].handover) {
- jam();
- // appearently a node has failed and we are taking over sending
- // from that bucket. Now we need to go back to latest completed
- // GCI. Handling will depend on Subscriber and Subscription
-
- // TODO, for now we make an easy takeover
- if (gci < c_nodeFailGCI)
- c_lastInconsistentGCI = gci;
-
- // we now have responsability for this bucket and we're actively
- // sending from that
- c_buckets[nBucket].active = true;
-#ifdef HANDOVER_DEBUG
- ndbout_c("Takeover Bucket %u", nBucket);
-#endif
- } else if (c_buckets[nBucket].handoverGCI > gci) {
- jam();
- replicaFlag = true; // handover going on, but don't start sending yet
- nId = RNIL;
- } else {
- jam();
-#ifdef HANDOVER_DEBUG
- ndbout_c("Possible error: Will send from GCI = %u", gci);
-#endif
- }
+Uint32
+Suma::get_responsible_node(Uint32 bucket, const NdbNodeBitmask& mask) const
+{
+ jam();
+ Uint32 node;
+ const Bucket* ptr= c_buckets + bucket;
+ for(Uint32 i = 0; i<MAX_REPLICAS; i++)
+ {
+ node= ptr->m_nodes[i];
+ if(mask.get(node))
+ {
+ return node;
}
}
-#ifdef NODEFAIL_DEBUG2
- ndbout_c("Suma:bucket %u, responsible id = %u, replicaFlag = %u",
- nBucket, nId, (Uint32)replicaFlag);
-#endif
- return replicaFlag;
+ return 0;
}
-void
-SumaParticipant::execFIRE_TRIG_ORD(Signal* signal){
- jamEntry();
- DBUG_ENTER("SumaParticipant::execFIRE_TRIG_ORD");
- CRASH_INSERTION(13016);
- FireTrigOrd* const trg = (FireTrigOrd*)signal->getDataPtr();
- const Uint32 trigId = trg->getTriggerId();
- const Uint32 hashValue = trg->getHashValue();
- const Uint32 gci = trg->getGCI();
- const Uint32 event = trg->getTriggerEvent();
- const Uint32 triggerId = trg->getTriggerId();
- Uint32 tableId = triggerId & 0xFFFF;
-
- ndbrequire(f_bufferLock == trigId);
-
-#ifdef EVENT_DEBUG2
- ndbout_c("SumaParticipant::execFIRE_TRIG_ORD");
-#endif
-
- Uint32 sz = trg->getNoOfPrimaryKeyWords()+trg->getNoOfAfterValueWords();
- ndbrequire(sz == f_trigBufferSize);
+bool
+Suma::check_switchover(Uint32 bucket, Uint32 gci)
+{
+ const Uint32 send_mask = (Bucket::BUCKET_STARTING | Bucket::BUCKET_TAKEOVER);
+ bool send = c_buckets[bucket].m_state & send_mask;
+ ndbassert(m_switchover_buckets.get(bucket));
+ if(unlikely(gci >= c_buckets[bucket].m_switchover_gci))
+ {
+ return send;
+ }
+ return !send;
+}
- /**
- * Reformat as "all headers" + "all data"
- */
- Uint32 dataLen = 0;
- Uint32 noOfAttrs = 0;
- Uint32 * src = f_buffer;
+static
+Uint32
+reformat(Signal* signal, LinearSectionPtr ptr[3],
+ Uint32 * src_1, Uint32 sz_1,
+ Uint32 * src_2, Uint32 sz_2)
+{
+ Uint32 noOfAttrs = 0, dataLen = 0;
Uint32 * headers = signal->theData + 25;
Uint32 * dst = signal->theData + 25 + MAX_ATTRIBUTES_IN_TABLE;
-
- LinearSectionPtr ptr[3];
- int nptr;
-
+
ptr[0].p = headers;
ptr[1].p = dst;
-
- while(sz > 0){
+
+ while(sz_1 > 0){
jam();
- Uint32 tmp = * src ++;
+ Uint32 tmp = * src_1 ++;
* headers ++ = tmp;
Uint32 len = AttributeHeader::getDataSize(tmp);
- memcpy(dst, src, 4 * len);
+ memcpy(dst, src_1, 4 * len);
dst += len;
- src += len;
-
+ src_1 += len;
+
noOfAttrs++;
dataLen += len;
- sz -= (1 + len);
+ sz_1 -= (1 + len);
}
- ndbrequire(sz == 0);
-
+ assert(sz_1 == 0);
+
ptr[0].sz = noOfAttrs;
ptr[1].sz = dataLen;
+
+ ptr[2].p = src_2;
+ ptr[2].sz = sz_2;
+
+ return sz_2 > 0 ? 3 : 2;
+}
- if (b_trigBufferSize > 0) {
- jam();
- ptr[2].p = b_buffer;
- ptr[2].sz = b_trigBufferSize;
- nptr = 3;
- } else {
- jam();
- nptr = 2;
- }
-
- // right now only for tableEvent
- bool replicaFlag = decideWhoToSend(getStoreBucket(hashValue), gci);
+void
+Suma::execFIRE_TRIG_ORD(Signal* signal)
+{
+ jamEntry();
+ DBUG_ENTER("Suma::execFIRE_TRIG_ORD");
+ ndbassert(signal->getNoOfSections() == 0);
+
+ CRASH_INSERTION(13016);
+ FireTrigOrd* const trg = (FireTrigOrd*)signal->getDataPtr();
+ const Uint32 trigId = trg->getTriggerId();
+ const Uint32 hashValue = trg->getHashValue();
+ const Uint32 gci = trg->getGCI();
+ const Uint32 event = trg->getTriggerEvent();
+ TablePtr tabPtr;
+ tabPtr.i = trigId & 0xFFFF;
+ DBUG_PRINT("enter",("tabPtr.i=%u", tabPtr.i));
+ ndbrequire(f_bufferLock == trigId);
/**
- * Signal to subscriber(s)
+ * Reset f_bufferLock
*/
- SubTableData * data = (SubTableData*)signal->getDataPtrSend();//trg;
- data->gci = gci;
- data->tableId = tableId;
- data->operation = event;
- data->noOfAttributes = noOfAttrs;
- data->dataSize = dataLen;
-
- SubscriberPtr subbPtr;
- for(c_dataSubscribers.first(subbPtr); !subbPtr.isNull();
- c_dataSubscribers.next(subbPtr)){
- if (subbPtr.p->m_firstGCI > gci) {
-#ifdef EVENT_DEBUG
- ndbout_c("m_firstGCI = %u, gci = %u", subbPtr.p->m_firstGCI, gci);
-#endif
- jam();
- // we're either restarting or it's a newly created subscriber
- // and waiting for the right gci
- continue;
- }
-
- jam();
+ f_bufferLock = 0;
+ b_bufferLock = 0;
- const Uint32 ref = subbPtr.p->m_subscriberRef;
- // ndbout_c("ref = %u", ref);
- const Uint32 subdata = subbPtr.p->m_subscriberData;
- data->senderData = subdata;
- /*
- * get subscription ptr for this subscriber
+ Uint32 bucket= hashValue % c_no_of_buckets;
+ m_max_seen_gci = (gci > m_max_seen_gci ? gci : m_max_seen_gci);
+ if(m_active_buckets.get(bucket) ||
+ (m_switchover_buckets.get(bucket) && (check_switchover(bucket, gci))))
+ {
+ m_max_sent_gci = (gci > m_max_sent_gci ? gci : m_max_sent_gci);
+ Uint32 sz = trg->getNoOfPrimaryKeyWords()+trg->getNoOfAfterValueWords();
+ ndbrequire(sz == f_trigBufferSize);
+
+ LinearSectionPtr ptr[3];
+ const Uint32 nptr= reformat(signal, ptr,
+ f_buffer, sz, b_buffer, b_trigBufferSize);
+
+ /**
+ * Signal to subscriber(s)
*/
- SubscriptionPtr subPtr;
- c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI);
-
- if(!subPtr.p->m_tables[tableId]) {
- jam();
- continue;
- //continue in for-loop if the table is not part of
- //the subscription. Otherwise, send data to subscriber.
- }
-
- if (subPtr.p->m_subscriptionType == SubCreateReq::TableEvent) {
- if (replicaFlag) {
- jam();
- c_failoverBuffer.subTableData(gci,NULL,0);
- continue;
- }
- jam();
- Uint32 tmp = data->logType;
- if (c_lastInconsistentGCI == data->gci) {
- data->setGCINotConsistent();
- }
-
-#ifdef HANDOVER_DEBUG
- {
- static int aLongGCIName = 0;
- if (data->gci != aLongGCIName) {
- aLongGCIName = data->gci;
- ndbout_c("sent from GCI = %u", aLongGCIName);
- }
- }
-#endif
- DBUG_PRINT("info",("GSN_SUB_TABLE_DATA to node %d", refToNode(ref)));
- sendSignal(ref, GSN_SUB_TABLE_DATA, signal,
- SubTableData::SignalLength, JBB, ptr, nptr);
- data->logType = tmp;
- } else {
- ndbassert(refToNode(ref) == 0 || refToNode(ref) == getOwnNodeId());
- jam();
-#if PRINT_ONLY
- ndbout_c("GSN_SUB_TABLE_DATA to %s: op: %d #attr: %d len: %d",
- getBlockName(refToBlock(ref)),
- noOfAttrs, dataLen);
+ ndbrequire(tabPtr.p = c_tablePool.getPtr(tabPtr.i));
-#else
-#ifdef HANDOVER_DEBUG
+ SubTableData * data = (SubTableData*)signal->getDataPtrSend();//trg;
+ data->gci = gci;
+ data->tableId = tabPtr.p->m_tableId;
+ data->operation = event;
+ data->logType = 0;
+
+ {
+ LocalDLList<Subscriber> list(c_subscriberPool,tabPtr.p->c_subscribers);
+ SubscriberPtr subbPtr;
+ for(list.first(subbPtr); !subbPtr.isNull(); list.next(subbPtr))
{
- static int aLongGCIName2 = 0;
- if (data->gci != aLongGCIName2) {
- aLongGCIName2 = data->gci;
- ndbout_c("(EXECUTE_DIRECT) sent from GCI = %u to %u", aLongGCIName2, ref);
- }
+ DBUG_PRINT("info",("GSN_SUB_TABLE_DATA to node %d",
+ refToNode(subbPtr.p->m_senderRef)));
+ data->senderData = subbPtr.p->m_senderData;
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_TABLE_DATA, signal,
+ SubTableData::SignalLength, JBB, ptr, nptr);
}
-#endif
- EXECUTE_DIRECT(refToBlock(ref), GSN_SUB_TABLE_DATA, signal,
- SubTableData::SignalLength);
- jamEntry();
-#endif
+ }
+ }
+ else
+ {
+ Uint32* dst;
+ Uint32 sz = f_trigBufferSize + b_trigBufferSize + 2;
+ if((dst = get_buffer_ptr(signal, bucket, gci, sz)))
+ {
+ * dst++ = tabPtr.i;
+ * dst++ = (event << 16) | f_trigBufferSize;
+ memcpy(dst, f_buffer, f_trigBufferSize << 2);
+ dst += f_trigBufferSize;
+ memcpy(dst, b_buffer, b_trigBufferSize << 2);
}
}
- /**
- * Reset f_bufferLock
- */
- f_bufferLock = 0;
- b_bufferLock = 0;
-
DBUG_VOID_RETURN;
}
void
-SumaParticipant::execSUB_GCP_COMPLETE_REP(Signal* signal){
+Suma::execSUB_GCP_COMPLETE_REP(Signal* signal)
+{
jamEntry();
+ ndbassert(signal->getNoOfSections() == 0);
SubGcpCompleteRep * rep = (SubGcpCompleteRep*)signal->getDataPtrSend();
-
- Uint32 gci = rep->gci;
- c_lastCompleteGCI = gci;
+ Uint32 gci = m_last_complete_gci = rep->gci;
+ m_max_seen_gci = (gci > m_max_seen_gci ? gci : m_max_seen_gci);
/**
- * Signal to subscriber(s)
+ *
*/
+ if(!m_switchover_buckets.isclear())
+ {
+ NdbNodeBitmask takeover_nodes;
+ NdbNodeBitmask handover_nodes;
+ Uint32 i = m_switchover_buckets.find(0);
+ for(; i != Bucket_mask::NotFound; i = m_switchover_buckets.find(i + 1))
+ {
+ if(c_buckets[i].m_switchover_gci == gci)
+ {
+ Uint32 state = c_buckets[i].m_state;
+ m_switchover_buckets.clear(i);
+ printf("switchover complete bucket %d state: %x", i, state);
+ if(state & Bucket::BUCKET_STARTING)
+ {
+ /**
+ * NR case
+ */
+ m_active_buckets.set(i);
+ c_buckets[i].m_state &= ~(Uint32)Bucket::BUCKET_STARTING;
+ ndbout_c("starting");
+ m_gcp_complete_rep_count = 1;
+ }
+ else if(state & Bucket::BUCKET_TAKEOVER)
+ {
+ /**
+ * NF case
+ */
+ Bucket* bucket= c_buckets + i;
+ Page_pos pos= bucket->m_buffer_head;
+ ndbrequire(pos.m_max_gci < gci);
+
+ Buffer_page* page= (Buffer_page*)(m_tup->page+pos.m_page_id);
+ ndbout_c("takeover %d", pos.m_page_id);
+ page->m_max_gci = pos.m_max_gci;
+ page->m_words_used = pos.m_page_pos;
+ page->m_next_page = RNIL;
+ memset(&bucket->m_buffer_head, 0, sizeof(bucket->m_buffer_head));
+ bucket->m_buffer_head.m_page_id = RNIL;
+ bucket->m_buffer_head.m_page_pos = Buffer_page::DATA_WORDS + 1;
+
+ m_active_buckets.set(i);
+ c_buckets[i].m_state &= ~(Uint32)Bucket::BUCKET_TAKEOVER;
+ takeover_nodes.set(c_buckets[i].m_switchover_node);
+ }
+ else
+ {
+ /**
+ * NR, living node
+ */
+ ndbrequire(state & Bucket::BUCKET_HANDOVER);
+ c_buckets[i].m_state &= ~(Uint32)Bucket::BUCKET_HANDOVER;
+ handover_nodes.set(c_buckets[i].m_switchover_node);
+ ndbout_c("handover");
+ }
+ }
+ }
+ ndbassert(handover_nodes.count() == 0 ||
+ m_gcp_complete_rep_count > handover_nodes.count());
+ m_gcp_complete_rep_count -= handover_nodes.count();
+ m_gcp_complete_rep_count += takeover_nodes.count();
+
+ if(getNodeState().startLevel == NodeState::SL_STARTING &&
+ m_switchover_buckets.isclear() &&
+ c_startup.m_handover_nodes.isclear())
+ {
+ sendSTTORRY(signal);
+ }
+ }
- SubscriberPtr subbPtr;
- SubscriptionPtr subPtr;
- c_dataSubscribers.first(subbPtr);
- for(; !subbPtr.isNull(); c_dataSubscribers.next(subbPtr)){
+ if(ERROR_INSERTED(13010))
+ {
+ CLEAR_ERROR_INSERT_VALUE;
+ ndbout_c("Don't send GCP_COMPLETE_REP(%d)", gci);
+ return;
+ }
- if (subbPtr.p->m_firstGCI > gci) {
- jam();
- // we don't send SUB_GCP_COMPLETE_REP for incomplete GCI's
+ /**
+ * Signal to subscribers
+ */
+ rep->gci = gci;
+ rep->senderRef = reference();
+ rep->gcp_complete_rep_count = m_gcp_complete_rep_count;
+
+ if(m_gcp_complete_rep_count && !c_subscriber_nodes.isclear())
+ {
+ NodeReceiverGroup rg(API_CLUSTERMGR, c_subscriber_nodes);
+ sendSignal(rg, GSN_SUB_GCP_COMPLETE_REP, signal,
+ SubGcpCompleteRep::SignalLength, JBB);
+
+ Ptr<Gcp_record> gcp;
+ if(c_gcp_list.seize(gcp))
+ {
+ gcp.p->m_gci = gci;
+ gcp.p->m_subscribers = c_subscriber_nodes;
+ }
+ }
+
+ /**
+ * Add GCP COMPLETE REP to buffer
+ */
+ for(Uint32 i = 0; i<c_no_of_buckets; i++)
+ {
+ if(m_active_buckets.get(i))
continue;
+
+ if(c_buckets[i].m_buffer_tail != RNIL)
+ {
+ Uint32* dst;
+ get_buffer_ptr(signal, i, gci, 0);
}
-
- const Uint32 ref = subbPtr.p->m_subscriberRef;
- rep->senderRef = ref;
- rep->senderData = subbPtr.p->m_subscriberData;
-
- c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI);
-#if PRINT_ONLY
- ndbout_c("GSN_SUB_GCP_COMPLETE_REP to %s:",
- getBlockName(refToBlock(ref)));
-#else
-
- CRASH_INSERTION(13018);
-
- if (subPtr.p->m_subscriptionType == SubCreateReq::TableEvent)
- {
- jam();
- sendSignal(ref, GSN_SUB_GCP_COMPLETE_REP, signal,
- SubGcpCompleteRep::SignalLength, JBB);
- }
- else
- {
- jam();
- ndbassert(refToNode(ref) == 0 || refToNode(ref) == getOwnNodeId());
- EXECUTE_DIRECT(refToBlock(ref), GSN_SUB_GCP_COMPLETE_REP, signal,
- SubGcpCompleteRep::SignalLength);
- jamEntry();
- }
-#endif
}
- if (c_handoverToDo) {
- jam();
- c_handoverToDo = false;
- for( int i = 0; i < NO_OF_BUCKETS; i++) {
- if (c_buckets[i].handover) {
- if (c_buckets[i].handoverGCI > gci) {
- jam();
- c_handoverToDo = true; // still waiting for the right GCI
- break; /* since all handover should happen at the same time
- * we can break here
- */
- } else {
- c_buckets[i].handover = false;
-#ifdef HANDOVER_DEBUG
- ndbout_c("Handover Bucket %u", i);
-#endif
- if (getResponsibleSumaNodeId(i) == refToNode(reference())) {
- // my bucket to be handed over to me
- ndbrequire(!c_buckets[i].active);
- jam();
- c_buckets[i].active = true;
- } else {
- // someone else's bucket to handover to
- ndbrequire(c_buckets[i].active);
- jam();
- c_buckets[i].active = false;
- }
- }
- }
- }
+ if(gci == m_out_of_buffer_gci)
+ {
+ infoEvent("Reenable event buffer");
+ m_out_of_buffer_gci = 0;
}
}
-/***********************************************************
- *
- * Embryo to syncronize the Suma's so as to know if a subscriber
- * has received a GCP_COMPLETE from all suma's or not
- *
- */
-
void
-SumaParticipant::runSUB_GCP_COMPLETE_ACC(Signal* signal){
- jam();
-
- SubGcpCompleteAcc * const acc = (SubGcpCompleteAcc*)signal->getDataPtr();
+Suma::execCREATE_TAB_CONF(Signal *signal)
+{
+ jamEntry();
+ DBUG_ENTER("Suma::execCREATE_TAB_CONF");
- Uint32 gci = acc->rep.gci;
+#if 0
+ CreateTabConf * const conf = (CreateTabConf*)signal->getDataPtr();
+ Uint32 tableId = conf->senderData;
-#ifdef EVENT_DEBUG
- ndbout_c("SumaParticipant::runSUB_GCP_COMPLETE_ACC gci = %u", gci);
+ TablePtr tabPtr;
+ initTable(signal,tableId,tabPtr);
#endif
-
- c_failoverBuffer.subGcpCompleteRep(gci);
+ DBUG_VOID_RETURN;
}
void
-Suma::execSUB_GCP_COMPLETE_ACC(Signal* signal){
+Suma::execDROP_TAB_CONF(Signal *signal)
+{
jamEntry();
+ DBUG_ENTER("Suma::execDROP_TAB_CONF");
+ ndbassert(signal->getNoOfSections() == 0);
- if (RtoI(signal->getSendersBlockRef(), false) != RNIL) {
- jam();
- // Ack from other SUMA
- runSUB_GCP_COMPLETE_ACC(signal);
- return;
+ DropTabConf * const conf = (DropTabConf*)signal->getDataPtr();
+ Uint32 senderRef= conf->senderRef;
+ Uint32 tableId= conf->tableId;
+
+ TablePtr tabPtr;
+ if (!c_tables.find(tabPtr, tableId) ||
+ tabPtr.p->m_state == Table::DROPPED ||
+ tabPtr.p->m_state == Table::ALTERED)
+ {
+ DBUG_VOID_RETURN;
}
- jam();
- // Ack from User and not an acc from other SUMA, redistribute in nodegroup
+ DBUG_PRINT("info",("drop table id: %d[i=%u]", tableId, tabPtr.i));
- SubGcpCompleteAcc * const acc = (SubGcpCompleteAcc*)signal->getDataPtr();
- Uint32 gci = acc->rep.gci;
- Uint32 senderRef = acc->rep.senderRef;
- Uint32 subscriberData = acc->rep.subscriberData;
-
-#ifdef EVENT_DEBUG
- ndbout_c("Suma::execSUB_GCP_COMPLETE_ACC gci = %u", gci);
-#endif
- bool moreToCome = false;
+ tabPtr.p->m_state = Table::DROPPED;
+ tabPtr.p->m_hasTriggerDefined[0] = 0;
+ tabPtr.p->m_hasTriggerDefined[1] = 0;
+ tabPtr.p->m_hasTriggerDefined[2] = 0;
+ tabPtr.p->m_triggerIds[0] = ILLEGAL_TRIGGER_ID;
+ tabPtr.p->m_triggerIds[1] = ILLEGAL_TRIGGER_ID;
+ tabPtr.p->m_triggerIds[2] = ILLEGAL_TRIGGER_ID;
- SubscriberPtr subbPtr;
- for(c_dataSubscribers.first(subbPtr);
- !subbPtr.isNull(); c_dataSubscribers.next(subbPtr)){
-#ifdef EVENT_DEBUG
- ndbout_c("Suma::execSUB_GCP_COMPLETE_ACC %u == %u && %u == %u",
- subbPtr.p->m_subscriberRef,
- senderRef,
- subbPtr.p->m_subscriberData,
- subscriberData);
-#endif
- if (subbPtr.p->m_subscriberRef == senderRef &&
- subbPtr.p->m_subscriberData == subscriberData) {
- jam();
-#ifdef EVENT_DEBUG
- ndbout_c("Suma::execSUB_GCP_COMPLETE_ACC gci = FOUND SUBSCRIBER");
-#endif
- subbPtr.p->m_lastGCI = gci;
- } else if (subbPtr.p->m_lastGCI < gci) {
- jam();
- if (subbPtr.p->m_firstGCI <= gci)
- moreToCome = true;
- } else
- jam();
+ if (senderRef == 0)
+ {
+ DBUG_VOID_RETURN;
}
+ // dict coordinator sends info to API
- if (!moreToCome) {
- // tell the other SUMA's that I'm done with this GCI
- jam();
- for (Uint32 i = 0; i < c_noNodesInGroup; i++) {
- Uint32 id = c_nodesInGroup[i];
- Uint32 ref = calcSumaBlockRef(id);
- if ((ref != reference()) && c_aliveNodes.get(id)) {
- jam();
- sendSignal(ref, GSN_SUB_GCP_COMPLETE_ACC, signal,
- SubGcpCompleteAcc::SignalLength, JBB);
- } else
+ SubTableData * data = (SubTableData*)signal->getDataPtrSend();
+ data->gci = m_last_complete_gci+1;
+ data->tableId = tableId;
+ data->operation = NdbDictionary::Event::_TE_DROP;
+ data->req_nodeid = refToNode(senderRef);
+
+ {
+ LocalDLList<Subscriber> subbs(c_subscriberPool,tabPtr.p->c_subscribers);
+ SubscriberPtr subbPtr;
+ for(subbs.first(subbPtr);!subbPtr.isNull();subbs.next(subbPtr))
+ {
+ jam();
+ /*
+ * get subscription ptr for this subscriber
+ */
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI);
+ if(subPtr.p->m_subscriptionType != SubCreateReq::TableEvent) {
jam();
+ continue;
+ //continue in for-loop if the table is not part of
+ //the subscription. Otherwise, send data to subscriber.
+ }
+ data->senderData= subbPtr.p->m_senderData;
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_TABLE_DATA, signal,
+ SubTableData::SignalLength, JBB);
+ DBUG_PRINT("info",("sent to subscriber %d", subbPtr.i));
}
}
+ DBUG_VOID_RETURN;
}
-static Uint32 tmpFailoverBuffer[512];
-//SumaParticipant::FailoverBuffer::FailoverBuffer(DataBuffer<15>::DataBufferPool & p)
-// : m_dataList(p),
-SumaParticipant::FailoverBuffer::FailoverBuffer()
- :
- c_gcis(tmpFailoverBuffer), c_sz(512), c_first(0), c_next(0), c_full(false)
-{
-}
-
-bool SumaParticipant::FailoverBuffer::subTableData(Uint32 gci, Uint32 *src, int sz)
-{
- bool ok = true;
-
- if (c_full) {
- ok = false;
-#ifdef EVENT_DEBUG
- ndbout_c("Suma::FailoverBuffer::SubTableData buffer full gci=%u");
-#endif
- } else {
- c_gcis[c_next] = gci;
- c_next++;
- if (c_next == c_sz) c_next = 0;
- if (c_next == c_first)
- c_full = true;
- // ndbout_c("%u %u %u",c_first,c_next,c_sz);
- }
- return ok;
-}
-bool SumaParticipant::FailoverBuffer::subGcpCompleteRep(Uint32 gci)
+void
+Suma::execALTER_TAB_CONF(Signal *signal)
{
- bool ok = true;
+ jamEntry();
+ DBUG_ENTER("Suma::execALTER_TAB_CONF");
+ ndbassert(signal->getNoOfSections() == 0);
- // ndbout_c("Empty");
- while (true) {
- if (c_first == c_next && !c_full)
- break;
- if (c_gcis[c_first] > gci)
- break;
- c_full = false;
- c_first++;
- if (c_first == c_sz) c_first = 0;
- // ndbout_c("%u %u %u : ",c_first,c_next,c_sz);
- }
+ AlterTabConf * const conf = (AlterTabConf*)signal->getDataPtr();
+ Uint32 senderRef= conf->senderRef;
+ Uint32 tableId= conf->tableId;
- return ok;
-}
-bool SumaParticipant::FailoverBuffer::nodeFailRep()
-{
- bool ok = true;
- while (true) {
- if (c_first == c_next && !c_full)
- break;
-
-#ifdef EVENT_DEBUG
- ndbout_c("Suma::FailoverBuffer::NodeFailRep resending gci=%u", c_gcis[c_first]);
-#endif
- c_full = false;
- c_first++;
- if (c_first == c_sz) c_first = 0;
+ TablePtr tabPtr;
+ if (!c_tables.find(tabPtr, tableId) ||
+ tabPtr.p->m_state == Table::DROPPED ||
+ tabPtr.p->m_state == Table::ALTERED)
+ {
+ DBUG_VOID_RETURN;
}
- return ok;
-}
-
-/**********************************************************
- * Suma participant interface
- *
- * Stopping and removing of subscriber
- *
- */
-void
-SumaParticipant::execSUB_STOP_REQ(Signal* signal){
- jamEntry();
- DBUG_ENTER("SumaParticipant::execSUB_STOP_REQ");
-
- CRASH_INSERTION(13019);
+ DBUG_PRINT("info",("alter table id: %d[i=%u]", tableId, tabPtr.i));
- SubStopReq * const req = (SubStopReq*)signal->getDataPtr();
- Uint32 senderRef = signal->getSendersBlockRef();
- Uint32 senderData = req->senderData;
- Uint32 subscriberRef = req->subscriberRef;
- Uint32 subscriberData = req->subscriberData;
- SubscriptionPtr subPtr;
- Subscription key;
- key.m_subscriptionId = req->subscriptionId;
- key.m_subscriptionKey = req->subscriptionKey;
- Uint32 part = req->part;
-
- if (key.m_subscriptionKey == 0 &&
- key.m_subscriptionId == 0 &&
- subscriberData == 0) {
- SubStopConf* conf = (SubStopConf*)signal->getDataPtrSend();
-
- conf->senderRef = reference();
- conf->senderData = senderData;
- conf->subscriptionId = key.m_subscriptionId;
- conf->subscriptionKey = key.m_subscriptionKey;
- conf->subscriberData = subscriberData;
-
- sendSignal(senderRef, GSN_SUB_STOP_CONF, signal,
- SubStopConf::SignalLength, JBB);
+ tabPtr.p->m_state = Table::ALTERED;
+ // triggers must be removed, waiting for sub stop req for that
- removeSubscribersOnNode(signal, refToNode(subscriberRef));
+ if (senderRef == 0)
+ {
DBUG_VOID_RETURN;
}
-
- if(!c_subscriptions.find(subPtr, key)){
- jam();
- sendSubStopRef(signal, GrepError::SUBSCRIPTION_ID_NOT_FOUND);
- return;
- }
+ // dict coordinator sends info to API
- ndbrequire(part == SubscriptionData::TableData);
-
- SubscriberPtr subbPtr;
- if (senderRef == reference()){
- jam();
- c_subscriberPool.getPtr(subbPtr, senderData);
- ndbrequire(subbPtr.p->m_subPtrI == subPtr.i &&
- subbPtr.p->m_subscriberRef == subscriberRef &&
- subbPtr.p->m_subscriberData == subscriberData);
- c_removeDataSubscribers.remove(subbPtr);
- } else {
- bool found = false;
- jam();
- c_dataSubscribers.first(subbPtr);
- for (;!subbPtr.isNull(); c_dataSubscribers.next(subbPtr)){
+ SubTableData * data = (SubTableData*)signal->getDataPtrSend();
+ data->gci = m_last_complete_gci+1;
+ data->tableId = tableId;
+ data->operation = NdbDictionary::Event::_TE_ALTER;
+ data->req_nodeid = refToNode(senderRef);
+
+ {
+ LocalDLList<Subscriber> subbs(c_subscriberPool,tabPtr.p->c_subscribers);
+ SubscriberPtr subbPtr;
+ for(subbs.first(subbPtr);!subbPtr.isNull();subbs.next(subbPtr))
+ {
jam();
- if (subbPtr.p->m_subPtrI == subPtr.i &&
- refToNode(subbPtr.p->m_subscriberRef) == refToNode(subscriberRef) &&
- subbPtr.p->m_subscriberData == subscriberData){
- // ndbout_c("STOP_REQ: before c_dataSubscribers.release");
+ /*
+ * get subscription ptr for this subscriber
+ */
+ SubscriptionPtr subPtr;
+ c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI);
+ if(subPtr.p->m_subscriptionType != SubCreateReq::TableEvent) {
jam();
- c_dataSubscribers.remove(subbPtr);
- found = true;
- break;
+ continue;
+ //continue in for-loop if the table is not part of
+ //the subscription. Otherwise, send data to subscriber.
}
- }
- /**
- * If we didn't find anyone, send ref
- */
- if (!found) {
- jam();
- sendSubStopRef(signal, GrepError::SUBSCRIBER_NOT_FOUND);
- DBUG_VOID_RETURN;
- }
- }
-
- subbPtr.p->m_senderRef = senderRef; // store ref to requestor
- subbPtr.p->m_senderData = senderData; // store ref to requestor
- c_prepDataSubscribers.add(subbPtr);
- Ptr<SyncRecord> syncPtr;
- c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI);
- if (syncPtr.p->m_locked) {
- jam();
- sendSubStopRef(signal, /** Error Code */ 0, true);
- DBUG_VOID_RETURN;
+ data->senderData= subbPtr.p->m_senderData;
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_TABLE_DATA, signal,
+ SubTableData::SignalLength, JBB);
+ DBUG_PRINT("info",("sent to subscriber %d", subbPtr.i));
+ }
}
- syncPtr.p->m_locked = true;
-
- syncPtr.p->startDropTrigger(signal);
DBUG_VOID_RETURN;
}
void
-SumaParticipant::sendSubStopComplete(Signal* signal, SubscriberPtr subbPtr){
- jam();
-
- CRASH_INSERTION(13020);
+Suma::execSUB_GCP_COMPLETE_ACK(Signal* signal)
+{
+ jamEntry();
+ ndbassert(signal->getNoOfSections() == 0);
- SubscriptionPtr subPtr;
- c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI);
+ SubGcpCompleteAck * const ack = (SubGcpCompleteAck*)signal->getDataPtr();
+ Uint32 gci = ack->rep.gci;
+ Uint32 senderRef = ack->rep.senderRef;
+ m_max_seen_gci = (gci > m_max_seen_gci ? gci : m_max_seen_gci);
- Ptr<SyncRecord> syncPtr;
- c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI);
- syncPtr.p->m_locked = false;
+ if (refToBlock(senderRef) == SUMA) {
+ jam();
+ // Ack from other SUMA
+ Uint32 nodeId= refToNode(senderRef);
+ for(Uint32 i = 0; i<c_no_of_buckets; i++)
+ {
+ if(m_active_buckets.get(i) ||
+ (m_switchover_buckets.get(i) && (check_switchover(i, gci))) ||
+ (!m_switchover_buckets.get(i) && get_responsible_node(i) == nodeId))
+ {
+ release_gci(signal, i, gci);
+ }
+ }
+ return;
+ }
- SubStopConf * const conf = (SubStopConf*)signal->getDataPtrSend();
+ // Ack from User and not an ack from other SUMA, redistribute in nodegroup
+
+ Uint32 nodeId = refToNode(senderRef);
- conf->senderRef = reference();
- conf->senderData = subbPtr.p->m_senderData;
- conf->subscriptionId = subPtr.p->m_subscriptionId;
- conf->subscriptionKey = subPtr.p->m_subscriptionKey;
- conf->subscriberData = subbPtr.p->m_subscriberData;
- Uint32 senderRef = subbPtr.p->m_senderRef;
-
- c_prepDataSubscribers.release(subbPtr);
- sendSignal(senderRef, GSN_SUB_STOP_CONF, signal,
- SubStopConf::SignalLength, JBB);
-}
-
-void
-SumaParticipant::sendSubStopRef(Signal* signal, Uint32 errCode,
- bool temporary){
jam();
- SubStopRef * ref = (SubStopRef *)signal->getDataPtrSend();
- ref->senderRef = reference();
- ref->errorCode = errCode;
- if (temporary) {
- ref->setTemporary();
+ Ptr<Gcp_record> gcp;
+ for(c_gcp_list.first(gcp); !gcp.isNull(); c_gcp_list.next(gcp))
+ {
+ if(gcp.p->m_gci == gci)
+ {
+ gcp.p->m_subscribers.clear(nodeId);
+ if(!gcp.p->m_subscribers.isclear())
+ {
+ jam();
+ return;
+ }
+ break;
+ }
}
- sendSignal(signal->getSendersBlockRef(),
- GSN_SUB_STOP_REF,
- signal,
- SubStopRef::SignalLength,
- JBB);
- return;
+
+ if(gcp.isNull())
+ {
+ ndbout_c("ACK wo/ gcp record (gci: %d)", gci);
+ }
+ else
+ {
+ c_gcp_list.release(gcp);
+ }
+
+ CRASH_INSERTION(13011);
+ if(ERROR_INSERTED(13012))
+ {
+ CLEAR_ERROR_INSERT_VALUE;
+ ndbout_c("Don't redistribute SUB_GCP_COMPLETE_ACK");
+ return;
+ }
+
+ ack->rep.senderRef = reference();
+ NodeReceiverGroup rg(SUMA, c_nodes_in_nodegroup_mask);
+ sendSignal(rg, GSN_SUB_GCP_COMPLETE_ACK, signal,
+ SubGcpCompleteAck::SignalLength, JBB);
}
/**************************************************************
@@ -3399,10 +3404,11 @@ SumaParticipant::sendSubStopRef(Signal* signal, Uint32 errCode,
*/
void
-SumaParticipant::execSUB_REMOVE_REQ(Signal* signal) {
+Suma::execSUB_REMOVE_REQ(Signal* signal)
+{
jamEntry();
-
- Uint32 senderRef = signal->getSendersBlockRef();
+ DBUG_ENTER("Suma::execSUB_REMOVE_REQ");
+ ndbassert(signal->getNoOfSections() == 0);
CRASH_INSERTION(13021);
@@ -3411,93 +3417,68 @@ SumaParticipant::execSUB_REMOVE_REQ(Signal* signal) {
Subscription key;
key.m_subscriptionId = req.subscriptionId;
key.m_subscriptionKey = req.subscriptionKey;
-
- if(!c_subscriptions.find(subPtr, key)) {
+
+ DBUG_PRINT("enter",("key.m_subscriptionId: %u, key.m_subscriptionKey: %u",
+ key.m_subscriptionId, key.m_subscriptionKey));
+
+ if(!c_subscriptions.find(subPtr, key))
+ {
jam();
- sendSubRemoveRef(signal, req, (Uint32) GrepError::SUBSCRIPTION_ID_NOT_FOUND);
- return;
+ DBUG_PRINT("info",("Not found"));
+ sendSubRemoveRef(signal, req, 1407);
+ DBUG_VOID_RETURN;
}
-
- int count = 0;
+ if (subPtr.p->m_state == Subscription::LOCKED)
{
+ /**
+ * we are currently setting up triggers etc. for this event
+ */
jam();
- SubscriberPtr i_subbPtr;
- for(c_prepDataSubscribers.first(i_subbPtr);
- !i_subbPtr.isNull(); c_prepDataSubscribers.next(i_subbPtr)){
- jam();
- if( i_subbPtr.p->m_subPtrI == subPtr.i ) {
- jam();
- sendSubRemoveRef(signal, req, /* ErrorCode */ 0, true);
- return;
- // c_prepDataSubscribers.release(subbPtr);
- }
- }
- c_dataSubscribers.first(i_subbPtr);
- while(!i_subbPtr.isNull()){
- jam();
- SubscriberPtr subbPtr = i_subbPtr;
- c_dataSubscribers.next(i_subbPtr);
- if( subbPtr.p->m_subPtrI == subPtr.i ) {
- jam();
- sendSubRemoveRef(signal, req, /* ErrorCode */ 0, true);
- return;
- /* Unfinished/untested code. If remove should be possible
- * even if subscribers are left these have to be stopped
- * first. See m_markRemove, m_nSubscribers. We need also to
- * block remove for this subscription so that multiple
- * removes is not possible...
- */
- c_dataSubscribers.remove(subbPtr);
- c_removeDataSubscribers.add(subbPtr);
- count++;
- }
- }
- c_metaSubscribers.first(i_subbPtr);
- while(!i_subbPtr.isNull()){
- jam();
- SubscriberPtr subbPtr = i_subbPtr;
- c_metaSubscribers.next(i_subbPtr);
- if( subbPtr.p->m_subPtrI == subPtr.i ){
- jam();
- c_metaSubscribers.release(subbPtr);
- }
- }
+ sendSubRemoveRef(signal, req, 1413);
+ DBUG_VOID_RETURN;
}
+
+ DBUG_PRINT("info",("n_subscribers: %u", subPtr.p->n_subscribers));
- subPtr.p->m_senderRef = senderRef;
- subPtr.p->m_senderData = req.senderData;
-
- if (count > 0){
+ if (subPtr.p->n_subscribers == 0)
+ {
+ // no subscribers on the subscription
+ // remove it
jam();
- ndbrequire(false); // code not finalized
- subPtr.p->m_markRemove = true;
- subPtr.p->m_nSubscribers = count;
- sendSubStopReq(signal);
- } else {
- completeSubRemoveReq(signal, subPtr);
+ completeSubRemove(subPtr);
}
+ else
+ {
+ // subscribers left on the subscription
+ // mark it to be removed once all subscribers
+ // are removed
+ jam();
+ subPtr.p->m_state = Subscription::DROPPED;
+ }
+
+ SubRemoveConf * const conf = (SubRemoveConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = req.senderData;
+ conf->subscriptionId = req.subscriptionId;
+ conf->subscriptionKey = req.subscriptionKey;
+
+ sendSignal(req.senderRef, GSN_SUB_REMOVE_CONF, signal,
+ SubRemoveConf::SignalLength, JBB);
+
+ DBUG_VOID_RETURN;
}
void
-SumaParticipant::completeSubRemoveReq(Signal* signal, SubscriptionPtr subPtr) {
+Suma::completeSubRemove(SubscriptionPtr subPtr)
+{
+ DBUG_ENTER("Suma::completeSubRemove");
Uint32 subscriptionId = subPtr.p->m_subscriptionId;
Uint32 subscriptionKey = subPtr.p->m_subscriptionKey;
- Uint32 senderRef = subPtr.p->m_senderRef;
- Uint32 senderData = subPtr.p->m_senderData;
- {
- Ptr<SyncRecord> syncPtr;
- c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI);
-
- syncPtr.p->release();
- c_syncPool.release(syncPtr);
- }
-
- // if (subPtr.p->m_subscriptionType != SubCreateReq::TableEvent) {
- // jam();
- // senderRef = subPtr.p->m_subscriberRef;
- // }
c_subscriptions.release(subPtr);
+ DBUG_PRINT("info",("c_subscriptionPool size: %d free: %d",
+ c_subscriptionPool.getSize(),
+ c_subscriptionPool.getNoOfFree()));
/**
* I was the last subscription to be remove so clear c_tables
@@ -3512,48 +3493,60 @@ SumaParticipant::completeSubRemoveReq(Signal* signal, SubscriptionPtr subPtr) {
#if 0
ndbout_c("SUB_REMOVE_REQ:Clearing c_tables");
#endif
+ int count= 0;
KeyTable<Table>::Iterator it;
- for(c_tables.first(it); !it.isNull(); ){
+ for(c_tables.first(it); !it.isNull(); )
+ {
+ // ndbrequire(false);
+ DBUG_PRINT("error",("trailing table id: %d[i=%d] n_subscribers: %d m_state: %d",
+ it.curr.p->m_tableId,
+ it.curr.p->m_ptrI,
+ it.curr.p->n_subscribers,
+ it.curr.p->m_state));
+
+ LocalDLList<Subscriber> subbs(c_subscriberPool,it.curr.p->c_subscribers);
+ SubscriberPtr subbPtr;
+ for(subbs.first(subbPtr);!subbPtr.isNull();subbs.next(subbPtr))
+ {
+ DBUG_PRINT("error",("subscriber %d, m_subPtrI: %d", subbPtr.i, subbPtr.p->m_subPtrI));
+ }
+
it.curr.p->release(* this);
-
TablePtr tabPtr = it.curr;
-
c_tables.next(it);
- c_tables.release(tabPtr);
+ c_tables.remove(tabPtr);
+ c_tablePool.release(tabPtr);
+ DBUG_PRINT("info",("c_tablePool size: %d free: %d",
+ c_tablePool.getSize(),
+ c_tablePool.getNoOfFree()));
+ count++;
}
+ DBUG_ASSERT(count == 0);
}
-
- SubRemoveConf * const conf = (SubRemoveConf*)signal->getDataPtrSend();
- conf->senderRef = reference();
- conf->senderData = senderData;
- conf->subscriptionId = subscriptionId;
- conf->subscriptionKey = subscriptionKey;
-
- sendSignal(senderRef, GSN_SUB_REMOVE_CONF, signal,
- SubRemoveConf::SignalLength, JBB);
+ DBUG_VOID_RETURN;
}
void
-SumaParticipant::sendSubRemoveRef(Signal* signal, const SubRemoveReq& req,
- Uint32 errCode, bool temporary){
+Suma::sendSubRemoveRef(Signal* signal, const SubRemoveReq& req,
+ Uint32 errCode)
+{
jam();
+ DBUG_ENTER("Suma::sendSubRemoveRef");
SubRemoveRef * ref = (SubRemoveRef *)signal->getDataPtrSend();
ref->senderRef = reference();
+ ref->senderData = req.senderData;
ref->subscriptionId = req.subscriptionId;
ref->subscriptionKey = req.subscriptionKey;
- ref->senderData = req.senderData;
- ref->err = errCode;
- if (temporary)
- ref->setTemporary();
+ ref->errorCode = errCode;
releaseSections(signal);
sendSignal(signal->getSendersBlockRef(), GSN_SUB_REMOVE_REF,
signal, SubRemoveRef::SignalLength, JBB);
- return;
+ DBUG_VOID_RETURN;
}
void
-SumaParticipant::Table::release(SumaParticipant & suma){
+Suma::Table::release(Suma & suma){
jam();
LocalDataBuffer<15> attrBuf(suma.c_dataBufferPool, m_attributes);
@@ -3561,10 +3554,16 @@ SumaParticipant::Table::release(SumaParticipant & suma){
LocalDataBuffer<15> fragBuf(suma.c_dataBufferPool, m_fragments);
fragBuf.release();
+
+ m_state = UNDEFINED;
+#ifndef DBUG_OFF
+ if (n_subscribers != 0)
+ abort();
+#endif
}
void
-SumaParticipant::SyncRecord::release(){
+Suma::SyncRecord::release(){
jam();
m_tableList.release();
@@ -3583,59 +3582,124 @@ SumaParticipant::SyncRecord::release(){
*
*/
-Suma::Restart::Restart(Suma& s) : suma(s) {
- for (int i = 0; i < MAX_REPLICAS; i++) {
- c_okToStart[i] = false;
- c_waitingToStart[i] = false;
- }
+void
+Suma::execSUMA_START_ME_REQ(Signal* signal) {
+ jamEntry();
+ DBUG_ENTER("Suma::execSUMA_START_ME");
+ ndbassert(signal->getNoOfSections() == 0);
+ Restart.runSUMA_START_ME_REQ(signal, signal->getSendersBlockRef());
+ DBUG_VOID_RETURN;
+}
+
+void
+Suma::execSUB_CREATE_REF(Signal* signal) {
+ jamEntry();
+ DBUG_ENTER("Suma::execSUB_CREATE_REF");
+ ndbassert(signal->getNoOfSections() == 0);
+ ndbrequire(false);
+ DBUG_VOID_RETURN;
+}
+
+void
+Suma::execSUB_CREATE_CONF(Signal* signal)
+{
+ jamEntry();
+ DBUG_ENTER("Suma::execSUB_CREATE_CONF");
+ ndbassert(signal->getNoOfSections() == 0);
+ Restart.runSUB_CREATE_CONF(signal);
+ DBUG_VOID_RETURN;
+}
+
+void
+Suma::execSUB_START_CONF(Signal* signal)
+{
+ jamEntry();
+ DBUG_ENTER("Suma::execSUB_START_CONF");
+ ndbassert(signal->getNoOfSections() == 0);
+ Restart.runSUB_START_CONF(signal);
+ DBUG_VOID_RETURN;
}
void
-Suma::Restart::resetNode(Uint32 sumaRef)
+Suma::execSUB_START_REF(Signal* signal) {
+ jamEntry();
+ DBUG_ENTER("Suma::execSUB_START_REF");
+ ndbassert(signal->getNoOfSections() == 0);
+ ndbrequire(false);
+ DBUG_VOID_RETURN;
+}
+
+Suma::Restart::Restart(Suma& s) : suma(s)
+{
+ nodeId = 0;
+}
+
+void
+Suma::Restart::runSUMA_START_ME_REQ(Signal* signal, Uint32 sumaRef)
{
jam();
- int I = suma.RtoI(sumaRef);
- c_okToStart[I] = false;
- c_waitingToStart[I] = false;
+ DBUG_ENTER("Suma::Restart::runSUMA_START_ME");
+
+ if(nodeId != 0)
+ {
+ SumaStartMeRef* ref= (SumaStartMeRef*)signal->getDataPtrSend();
+ ref->errorCode = SumaStartMeRef::Busy;
+ suma.sendSignal(sumaRef, GSN_SUMA_START_ME_REF, signal,
+ SumaStartMeRef::SignalLength, JBB);
+ return;
+ }
+
+ nodeId = refToNode(sumaRef);
+ startNode(signal, sumaRef);
+
+ DBUG_VOID_RETURN;
}
void
Suma::Restart::startNode(Signal* signal, Uint32 sumaRef)
{
jam();
- resetNode(sumaRef);
-
+ DBUG_ENTER("Suma::Restart::startNode");
+
// right now we can only handle restarting one node
// at a time in a node group
-
+
createSubscription(signal, sumaRef);
+ DBUG_VOID_RETURN;
}
void
-Suma::Restart::createSubscription(Signal* signal, Uint32 sumaRef) {
+Suma::Restart::createSubscription(Signal* signal, Uint32 sumaRef)
+{
jam();
- suma.c_subscriptions.first(c_subPtr);
+ DBUG_ENTER("Suma::Restart::createSubscription");
+ suma.c_subscriptions.first(c_subIt);
nextSubscription(signal, sumaRef);
+ DBUG_VOID_RETURN;
}
void
-Suma::Restart::nextSubscription(Signal* signal, Uint32 sumaRef) {
+Suma::Restart::nextSubscription(Signal* signal, Uint32 sumaRef)
+{
jam();
- if (c_subPtr.isNull()) {
+ DBUG_ENTER("Suma::Restart::nextSubscription");
+
+ if (c_subIt.isNull())
+ {
jam();
completeSubscription(signal, sumaRef);
- return;
+ DBUG_VOID_RETURN;
}
SubscriptionPtr subPtr;
- subPtr.i = c_subPtr.curr.i;
+ subPtr.i = c_subIt.curr.i;
subPtr.p = suma.c_subscriptions.getPtr(subPtr.i);
- suma.c_subscriptions.next(c_subPtr);
+ suma.c_subscriptions.next(c_subIt);
SubCreateReq * req = (SubCreateReq *)signal->getDataPtrSend();
- req->subscriberRef = suma.reference();
- req->subscriberData = subPtr.i;
+ req->senderRef = suma.reference();
+ req->senderData = subPtr.i;
req->subscriptionId = subPtr.p->m_subscriptionId;
req->subscriptionKey = subPtr.p->m_subscriptionKey;
req->subscriptionType = subPtr.p->m_subscriptionType |
@@ -3643,414 +3707,804 @@ Suma::Restart::nextSubscription(Signal* signal, Uint32 sumaRef) {
switch (subPtr.p->m_subscriptionType) {
case SubCreateReq::TableEvent:
- case SubCreateReq::SelectiveTableSnapshot:
- case SubCreateReq::DatabaseSnapshot: {
jam();
-
- Ptr<SyncRecord> syncPtr;
- suma.c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI);
- syncPtr.p->m_tableList.first(syncPtr.p->m_tableList_it);
-
- ndbrequire(!syncPtr.p->m_tableList_it.isNull());
-
- req->tableId = *syncPtr.p->m_tableList_it.data;
-
-#if 0
- for (int i = 0; i < MAX_TABLES; i++)
- if (subPtr.p->m_tables[i]) {
- req->tableId = i;
- break;
- }
-#endif
-
+ req->tableId = subPtr.p->m_tableId;
suma.sendSignal(sumaRef, GSN_SUB_CREATE_REQ, signal,
- SubCreateReq::SignalLength+1 /*to get table Id*/, JBB);
- return;
- }
- case SubCreateReq::SingleTableScan :
- // TODO
+ SubCreateReq::SignalLength, JBB);
+ DBUG_VOID_RETURN;
+ case SubCreateReq::SingleTableScan:
jam();
- return;
+ nextSubscription(signal, sumaRef);
+ DBUG_VOID_RETURN;
+ case SubCreateReq::SelectiveTableSnapshot:
+ case SubCreateReq::DatabaseSnapshot:
+ ndbrequire(false);
}
ndbrequire(false);
}
-void
-Suma::execSUB_CREATE_CONF(Signal* signal) {
- jamEntry();
-#ifdef NODEFAIL_DEBUG
- ndbout_c("Suma::execSUB_CREATE_CONF");
-#endif
+void
+Suma::Restart::runSUB_CREATE_CONF(Signal* signal)
+{
+ jam();
+ DBUG_ENTER("Suma::Restart::runSUB_CREATE_CONF");
const Uint32 senderRef = signal->senderBlockRef();
+ Uint32 sumaRef = signal->getSendersBlockRef();
SubCreateConf * const conf = (SubCreateConf *)signal->getDataPtr();
- Subscription key;
- const Uint32 subscriberData = conf->subscriberData;
- key.m_subscriptionId = conf->subscriptionId;
- key.m_subscriptionKey = conf->subscriptionKey;
-
SubscriptionPtr subPtr;
- ndbrequire(c_subscriptions.find(subPtr, key));
+ suma.c_subscriptions.getPtr(subPtr,conf->senderData);
switch(subPtr.p->m_subscriptionType) {
case SubCreateReq::TableEvent:
- case SubCreateReq::SelectiveTableSnapshot:
- case SubCreateReq::DatabaseSnapshot:
+ if (1)
{
- Ptr<SyncRecord> syncPtr;
- c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI);
-
- syncPtr.p->m_tableList.next(syncPtr.p->m_tableList_it);
- if (syncPtr.p->m_tableList_it.isNull()) {
- jam();
- SubSyncReq *req = (SubSyncReq *)signal->getDataPtrSend();
-
- req->subscriptionId = key.m_subscriptionId;
- req->subscriptionKey = key.m_subscriptionKey;
- req->subscriberData = subscriberData;
- req->part = (Uint32) SubscriptionData::MetaData;
-
- sendSignal(senderRef, GSN_SUB_SYNC_REQ, signal,
- SubSyncReq::SignalLength, JBB);
- } else {
- jam();
- SubCreateReq * req = (SubCreateReq *)signal->getDataPtrSend();
+ jam();
+ nextSubscription(signal, sumaRef);
+ } else {
+ jam();
+ SubCreateReq * req = (SubCreateReq *)signal->getDataPtrSend();
- req->subscriberRef = reference();
- req->subscriberData = subPtr.i;
- req->subscriptionId = subPtr.p->m_subscriptionId;
- req->subscriptionKey = subPtr.p->m_subscriptionKey;
- req->subscriptionType = subPtr.p->m_subscriptionType |
- SubCreateReq::RestartFlag |
- SubCreateReq::AddTableFlag;
-
- req->tableId = *syncPtr.p->m_tableList_it.data;
-
- sendSignal(senderRef, GSN_SUB_CREATE_REQ, signal,
- SubCreateReq::SignalLength+1 /*to get table Id*/, JBB);
- }
+ req->senderRef = suma.reference();
+ req->senderData = subPtr.i;
+ req->subscriptionId = subPtr.p->m_subscriptionId;
+ req->subscriptionKey = subPtr.p->m_subscriptionKey;
+ req->subscriptionType = subPtr.p->m_subscriptionType |
+ SubCreateReq::RestartFlag |
+ SubCreateReq::AddTableFlag;
+
+ req->tableId = 0;
+
+ suma.sendSignal(senderRef, GSN_SUB_CREATE_REQ, signal,
+ SubCreateReq::SignalLength, JBB);
}
- return;
+ DBUG_VOID_RETURN;
case SubCreateReq::SingleTableScan:
+ case SubCreateReq::SelectiveTableSnapshot:
+ case SubCreateReq::DatabaseSnapshot:
ndbrequire(false);
}
ndbrequire(false);
}
void
-Suma::execSUB_CREATE_REF(Signal* signal) {
- jamEntry();
-#ifdef NODEFAIL_DEBUG
- ndbout_c("Suma::execSUB_CREATE_REF");
-#endif
- //ndbrequire(false);
+Suma::Restart::completeSubscription(Signal* signal, Uint32 sumaRef)
+{
+ jam();
+ DBUG_ENTER("Suma::Restart::completeSubscription");
+ startSubscriber(signal, sumaRef);
+ DBUG_VOID_RETURN;
}
void
-Suma::execSUB_SYNC_CONF(Signal* signal) {
- jamEntry();
-#ifdef NODEFAIL_DEBUG
- ndbout_c("Suma::execSUB_SYNC_CONF");
-#endif
- Uint32 sumaRef = signal->getSendersBlockRef();
+Suma::Restart::startSubscriber(Signal* signal, Uint32 sumaRef)
+{
+ jam();
+ DBUG_ENTER("Suma::Restart::startSubscriber");
+ suma.c_tables.first(c_tabIt);
+ if (c_tabIt.isNull())
+ {
+ completeSubscriber(signal, sumaRef);
+ DBUG_VOID_RETURN;
+ }
+ SubscriberPtr subbPtr;
+ {
+ LocalDLList<Subscriber>
+ subbs(suma.c_subscriberPool,c_tabIt.curr.p->c_subscribers);
+ subbs.first(subbPtr);
+ }
+ nextSubscriber(signal, sumaRef, subbPtr);
+ DBUG_VOID_RETURN;
+}
- SubSyncConf *conf = (SubSyncConf *)signal->getDataPtr();
- Subscription key;
+void
+Suma::Restart::nextSubscriber(Signal* signal, Uint32 sumaRef,
+ SubscriberPtr subbPtr)
+{
+ jam();
+ DBUG_ENTER("Suma::Restart::nextSubscriber");
+ while (subbPtr.isNull())
+ {
+ jam();
+ DBUG_PRINT("info",("prev tableId %u",c_tabIt.curr.p->m_tableId));
+ suma.c_tables.next(c_tabIt);
+ if (c_tabIt.isNull())
+ {
+ completeSubscriber(signal, sumaRef);
+ DBUG_VOID_RETURN;
+ }
+ DBUG_PRINT("info",("next tableId %u",c_tabIt.curr.p->m_tableId));
- key.m_subscriptionId = conf->subscriptionId;
- key.m_subscriptionKey = conf->subscriptionKey;
- // SubscriptionData::Part part = (SubscriptionData::Part)conf->part;
- // const Uint32 subscriberData = conf->subscriberData;
+ LocalDLList<Subscriber>
+ subbs(suma.c_subscriberPool,c_tabIt.curr.p->c_subscribers);
+ subbs.first(subbPtr);
+ }
- SubscriptionPtr subPtr;
- c_subscriptions.find(subPtr, key);
+ /*
+ * get subscription ptr for this subscriber
+ */
- switch(subPtr.p->m_subscriptionType) {
+ SubscriptionPtr subPtr;
+ suma.c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI);
+ switch (subPtr.p->m_subscriptionType) {
case SubCreateReq::TableEvent:
+ jam();
+ sendSubStartReq(subPtr, subbPtr, signal, sumaRef);
+ DBUG_VOID_RETURN;
case SubCreateReq::SelectiveTableSnapshot:
case SubCreateReq::DatabaseSnapshot:
- jam();
- Restart.nextSubscription(signal, sumaRef);
- return;
case SubCreateReq::SingleTableScan:
ndbrequire(false);
- return;
}
ndbrequire(false);
}
-void
-Suma::execSUB_SYNC_REF(Signal* signal) {
- jamEntry();
-#ifdef NODEFAIL_DEBUG
- ndbout_c("Suma::execSUB_SYNC_REF");
-#endif
- //ndbrequire(false);
-}
-
void
-Suma::execSUMA_START_ME(Signal* signal) {
- jamEntry();
-#ifdef NODEFAIL_DEBUG
- ndbout_c("Suma::execSUMA_START_ME");
-#endif
+Suma::Restart::sendSubStartReq(SubscriptionPtr subPtr, SubscriberPtr subbPtr,
+ Signal* signal, Uint32 sumaRef)
+{
+ jam();
+ DBUG_ENTER("Suma::Restart::sendSubStartReq");
+ SubStartReq * req = (SubStartReq *)signal->getDataPtrSend();
- Restart.runSUMA_START_ME(signal, signal->getSendersBlockRef());
-}
+ req->senderRef = suma.reference();
+ req->senderData = subbPtr.i;
+ req->subscriptionId = subPtr.p->m_subscriptionId;
+ req->subscriptionKey = subPtr.p->m_subscriptionKey;
+ req->part = SubscriptionData::TableData;
+ req->subscriberData = subbPtr.p->m_senderData;
+ req->subscriberRef = subbPtr.p->m_senderRef;
-void
-Suma::Restart::runSUMA_START_ME(Signal* signal, Uint32 sumaRef) {
- int I = suma.RtoI(sumaRef);
+ // restarting suma will not respond to this until startphase 5
+ // since it is not until then data copying has been completed
+ DBUG_PRINT("info",("Restarting subscriber: %u on key: [%u,%u]",
+ subbPtr.i,
+ subPtr.p->m_subscriptionId,
+ subPtr.p->m_subscriptionKey,
+ subPtr.p->m_tableId));
- // restarting Suma is ready for SUB_START_REQ
- if (c_waitingToStart[I]) {
- // we've waited with startSubscriber since restarting suma was not ready
- c_waitingToStart[I] = false;
- startSubscriber(signal, sumaRef);
- } else {
- // do startSubscriber as soon as its time
- c_okToStart[I] = true;
- }
+ suma.sendSignal(sumaRef, GSN_SUB_START_REQ,
+ signal, SubStartReq::SignalLength2, JBB);
+ DBUG_VOID_RETURN;
}
void
-Suma::Restart::completeSubscription(Signal* signal, Uint32 sumaRef) {
+Suma::Restart::runSUB_START_CONF(Signal* signal)
+{
jam();
- int I = suma.RtoI(sumaRef);
+ DBUG_ENTER("Suma::Restart::runSUB_START_CONF");
- if (c_okToStart[I]) {// otherwise will start when START_ME comes
- c_okToStart[I] = false;
- startSubscriber(signal, sumaRef);
- } else {
- c_waitingToStart[I] = true;
+ SubStartConf * const conf = (SubStartConf*)signal->getDataPtr();
+
+ Subscription key;
+ SubscriptionPtr subPtr;
+ key.m_subscriptionId = conf->subscriptionId;
+ key.m_subscriptionKey = conf->subscriptionKey;
+ ndbrequire(suma.c_subscriptions.find(subPtr, key));
+
+ TablePtr tabPtr;
+ ndbrequire(suma.c_tables.find(tabPtr, subPtr.p->m_tableId));
+
+ SubscriberPtr subbPtr;
+ {
+ LocalDLList<Subscriber>
+ subbs(suma.c_subscriberPool,tabPtr.p->c_subscribers);
+ subbs.getPtr(subbPtr, conf->senderData);
+ DBUG_PRINT("info",("Restarted subscriber: %u on key: [%u,%u] table: %u",
+ subbPtr.i,key.m_subscriptionId,key.m_subscriptionKey,
+ subPtr.p->m_tableId));
+ subbs.next(subbPtr);
}
+
+ Uint32 sumaRef = signal->getSendersBlockRef();
+ nextSubscriber(signal, sumaRef, subbPtr);
+
+ DBUG_VOID_RETURN;
}
void
-Suma::Restart::startSubscriber(Signal* signal, Uint32 sumaRef) {
- jam();
- suma.c_dataSubscribers.first(c_subbPtr);
- nextSubscriber(signal, sumaRef);
+Suma::Restart::completeSubscriber(Signal* signal, Uint32 sumaRef)
+{
+ DBUG_ENTER("Suma::Restart::completeSubscriber");
+ completeRestartingNode(signal, sumaRef);
+ DBUG_VOID_RETURN;
}
void
-Suma::Restart::sendSubStartReq(SubscriptionPtr subPtr, SubscriberPtr subbPtr,
- Signal* signal, Uint32 sumaRef)
+Suma::Restart::completeRestartingNode(Signal* signal, Uint32 sumaRef)
{
jam();
- SubStartReq * req = (SubStartReq *)signal->getDataPtrSend();
-
- req->senderRef = suma.reference();
- req->senderData = subbPtr.p->m_senderData;
- req->subscriptionId = subPtr.p->m_subscriptionId;
- req->subscriptionKey = subPtr.p->m_subscriptionKey;
- req->part = SubscriptionData::TableData;
- req->subscriberData = subbPtr.p->m_subscriberData;
- req->subscriberRef = subbPtr.p->m_subscriberRef;
-
- // restarting suma will not respond to this until startphase 5
- // since it is not until then data copying has been completed
-#ifdef NODEFAIL_DEBUG
- ndbout_c("Suma::Restart::sendSubStartReq sending GSN_SUB_START_REQ id=%u key=%u",
- req->subscriptionId, req->subscriptionKey);
-#endif
- suma.sendSignal(sumaRef, GSN_SUB_START_REQ,
- signal, SubStartReq::SignalLength2, JBB);
+ DBUG_ENTER("Suma::Restart::completeRestartingNode");
+ //SumaStartMeConf *conf= (SumaStartMeConf*)signal->getDataPtrSend();
+ suma.sendSignal(sumaRef, GSN_SUMA_START_ME_CONF, signal,
+ SumaStartMeConf::SignalLength, JBB);
+ nodeId = 0;
+ DBUG_VOID_RETURN;
}
-void
-Suma::execSUB_START_CONF(Signal* signal) {
+// only run on restarting suma
+
+void
+Suma::execSUMA_HANDOVER_REQ(Signal* signal)
+{
jamEntry();
-#ifdef NODEFAIL_DEBUG
- ndbout_c("Suma::execSUB_START_CONF");
-#endif
- Uint32 sumaRef = signal->getSendersBlockRef();
- Restart.nextSubscriber(signal, sumaRef);
+ DBUG_ENTER("Suma::execSUMA_HANDOVER_REQ");
+ // Uint32 sumaRef = signal->getSendersBlockRef();
+ SumaHandoverReq const * req = (SumaHandoverReq *)signal->getDataPtr();
+
+ Uint32 gci = req->gci;
+ Uint32 nodeId = req->nodeId;
+ Uint32 new_gci = m_last_complete_gci + MAX_CONCURRENT_GCP + 1;
+
+ Uint32 start_gci = (gci > new_gci ? gci : new_gci);
+ // mark all active buckets really belonging to restarting SUMA
+
+ Bucket_mask tmp;
+ for( Uint32 i = 0; i < c_no_of_buckets; i++)
+ {
+ if(get_responsible_node(i) == nodeId)
+ {
+ if (m_active_buckets.get(i))
+ {
+ // I'm running this bucket but it should really be the restarted node
+ tmp.set(i);
+ m_active_buckets.clear(i);
+ m_switchover_buckets.set(i);
+ c_buckets[i].m_switchover_gci = start_gci;
+ c_buckets[i].m_state |= Bucket::BUCKET_HANDOVER;
+ c_buckets[i].m_switchover_node = nodeId;
+ ndbout_c("prepare to handover bucket: %d", i);
+ }
+ else if(m_switchover_buckets.get(i))
+ {
+ ndbout_c("dont handover bucket: %d %d", i, nodeId);
+ }
+ }
+ }
+
+ SumaHandoverConf* conf= (SumaHandoverConf*)signal->getDataPtrSend();
+ tmp.copyto(BUCKET_MASK_SIZE, conf->theBucketMask);
+ conf->gci = start_gci;
+ conf->nodeId = getOwnNodeId();
+ sendSignal(calcSumaBlockRef(nodeId), GSN_SUMA_HANDOVER_CONF, signal,
+ SumaHandoverConf::SignalLength, JBB);
+
+ DBUG_VOID_RETURN;
}
-void
-Suma::execSUB_START_REF(Signal* signal) {
+// only run on all but restarting suma
+void
+Suma::execSUMA_HANDOVER_REF(Signal* signal)
+{
+ ndbrequire(false);
+}
+
+void
+Suma::execSUMA_HANDOVER_CONF(Signal* signal) {
jamEntry();
-#ifdef NODEFAIL_DEBUG
- ndbout_c("Suma::execSUB_START_REF");
+ DBUG_ENTER("Suma::execSUMA_HANDOVER_CONF");
+
+ SumaHandoverConf const * conf = (SumaHandoverConf *)signal->getDataPtr();
+
+ Uint32 gci = conf->gci;
+ Uint32 nodeId = conf->nodeId;
+ Bucket_mask tmp;
+ tmp.assign(BUCKET_MASK_SIZE, conf->theBucketMask);
+#ifdef HANDOVER_DEBUG
+ ndbout_c("Suma::execSUMA_HANDOVER_CONF, gci = %u", gci);
#endif
- //ndbrequire(false);
-}
-void
-Suma::Restart::nextSubscriber(Signal* signal, Uint32 sumaRef) {
- jam();
- if (c_subbPtr.isNull()) {
- jam();
- completeSubscriber(signal, sumaRef);
- return;
+ for( Uint32 i = 0; i < c_no_of_buckets; i++)
+ {
+ if (tmp.get(i))
+ {
+ ndbrequire(get_responsible_node(i) == getOwnNodeId());
+ // We should run this bucket, but _nodeId_ is
+ c_buckets[i].m_switchover_gci = gci;
+ c_buckets[i].m_state |= Bucket::BUCKET_STARTING;
+ }
}
- SubscriberPtr subbPtr = c_subbPtr;
- suma.c_dataSubscribers.next(c_subbPtr);
+ char buf[255];
+ tmp.getText(buf);
+ infoEvent("Suma: handover from node %d gci: %d buckets: %s (%d)",
+ nodeId, gci, buf, c_no_of_buckets);
+ m_switchover_buckets.bitOR(tmp);
+ c_startup.m_handover_nodes.clear(nodeId);
+ DBUG_VOID_RETURN;
+}
- /*
- * get subscription ptr for this subscriber
- */
+static
+NdbOut&
+operator<<(NdbOut & out, const Suma::Page_pos & pos)
+{
+ out << "[ Page_pos:"
+ << " m_page_id: " << pos.m_page_id
+ << " m_page_pos: " << pos.m_page_pos
+ << " m_max_gci: " << pos.m_max_gci
+ << " ]";
+ return out;
+}
- SubscriptionPtr subPtr;
- suma.c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI);
- switch (subPtr.p->m_subscriptionType) {
- case SubCreateReq::TableEvent:
- case SubCreateReq::SelectiveTableSnapshot:
- case SubCreateReq::DatabaseSnapshot:
+Uint32*
+Suma::get_buffer_ptr(Signal* signal, Uint32 buck, Uint32 gci, Uint32 sz)
+{
+ sz += 1; // len
+ Bucket* bucket= c_buckets+buck;
+ Page_pos pos= bucket->m_buffer_head;
+
+ Buffer_page* page= (Buffer_page*)(m_tup->page+pos.m_page_id);
+ Uint32* ptr= page->m_data + pos.m_page_pos;
+
+ const bool same_gci = (gci == pos.m_last_gci) && (!ERROR_INSERTED(13022));
+
+ pos.m_page_pos += sz;
+ pos.m_last_gci = gci;
+ Uint32 max = pos.m_max_gci > gci ? pos.m_max_gci : gci;
+
+ if(likely(same_gci && pos.m_page_pos <= Buffer_page::DATA_WORDS))
+ {
+ pos.m_max_gci = max;
+ bucket->m_buffer_head = pos;
+ * ptr++ = (0x8000 << 16) | sz; // Same gci
+ return ptr;
+ }
+ else if(pos.m_page_pos + 1 <= Buffer_page::DATA_WORDS)
+ {
+loop:
+ pos.m_max_gci = max;
+ pos.m_page_pos += 1;
+ bucket->m_buffer_head = pos;
+ * ptr++ = (sz + 1);
+ * ptr++ = gci;
+ return ptr;
+ }
+ else
+ {
+ /**
+ * new page
+ * 1) save header on last page
+ * 2) seize new page
+ */
+ Uint32 next;
+ if(unlikely((next= seize_page()) == RNIL))
{
- jam();
- sendSubStartReq(subPtr, subbPtr, signal, sumaRef);
-#if 0
- SubStartReq * req = (SubStartReq *)signal->getDataPtrSend();
-
- req->senderRef = reference();
- req->senderData = subbPtr.p->m_senderData;
- req->subscriptionId = subPtr.p->m_subscriptionId;
- req->subscriptionKey = subPtr.p->m_subscriptionKey;
- req->part = SubscriptionData::TableData;
- req->subscriberData = subbPtr.p->m_subscriberData;
- req->subscriberRef = subbPtr.p->m_subscriberRef;
-
- // restarting suma will not respond to this until startphase 5
- // since it is not until then data copying has been completed
-#ifdef NODEFAIL_DEBUG
- ndbout_c("Suma::nextSubscriber sending GSN_SUB_START_REQ id=%u key=%u",
- req->subscriptionId, req->subscriptionKey);
-#endif
- suma.sendSignal(sumaRef, GSN_SUB_START_REQ,
- signal, SubStartReq::SignalLength2, JBB);
-#endif
+ /**
+ * Out of buffer
+ */
+ out_of_buffer(signal);
+ return 0;
}
- return;
- case SubCreateReq::SingleTableScan:
- ndbrequire(false);
- return;
+
+ if(likely(pos.m_page_id != RNIL))
+ {
+ page->m_max_gci = pos.m_max_gci;
+ page->m_words_used = pos.m_page_pos - sz;
+ page->m_next_page= next;
+ }
+ else
+ {
+ bucket->m_buffer_tail = next;
+ }
+
+ memset(&pos, 0, sizeof(pos));
+ pos.m_page_id = next;
+ pos.m_page_pos = sz;
+ pos.m_last_gci = gci;
+
+ page= (Buffer_page*)(m_tup->page+pos.m_page_id);
+ page->m_next_page= RNIL;
+ ptr= page->m_data;
+ goto loop; //
}
- ndbrequire(false);
}
-void
-Suma::Restart::completeSubscriber(Signal* signal, Uint32 sumaRef) {
- completeRestartingNode(signal, sumaRef);
+void
+Suma::out_of_buffer(Signal* signal)
+{
+ if(m_out_of_buffer_gci)
+ {
+ return;
+ }
+
+ m_out_of_buffer_gci = m_last_complete_gci - 1;
+ infoEvent("Out of event buffer: nodefailure will cause event failures");
+
+ signal->theData[0] = SumaContinueB::OUT_OF_BUFFER_RELEASE;
+ signal->theData[1] = 0;
+ sendSignal(SUMA_REF, GSN_CONTINUEB, signal, 2, JBB);
}
void
-Suma::Restart::completeRestartingNode(Signal* signal, Uint32 sumaRef) {
- jam();
- SumaHandoverReq * req = (SumaHandoverReq *)signal->getDataPtrSend();
+Suma::out_of_buffer_release(Signal* signal, Uint32 buck)
+{
+ Bucket* bucket= c_buckets+buck;
+ Uint32 tail= bucket->m_buffer_tail;
+
+ if(tail != RNIL)
+ {
+ Buffer_page* page= (Buffer_page*)(m_tup->page+tail);
+ bucket->m_buffer_tail = page->m_next_page;
+ free_page(tail, page);
+ signal->theData[0] = SumaContinueB::OUT_OF_BUFFER_RELEASE;
+ signal->theData[1] = buck;
+ sendSignal(SUMA_REF, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ }
- req->gci = suma.getFirstGCI(signal);
+ /**
+ * Clear head
+ */
+ bucket->m_buffer_head.m_page_id = RNIL;
+ bucket->m_buffer_head.m_page_pos = Buffer_page::DATA_WORDS + 1;
+
+ buck++;
+ if(buck != c_no_of_buckets)
+ {
+ signal->theData[0] = SumaContinueB::OUT_OF_BUFFER_RELEASE;
+ signal->theData[1] = buck;
+ sendSignal(SUMA_REF, GSN_CONTINUEB, signal, 2, JBB);
+ return;
+ }
- suma.sendSignal(sumaRef, GSN_SUMA_HANDOVER_REQ, signal,
- SumaHandoverReq::SignalLength, JBB);
+ /**
+ * Finished will all release
+ * prepare for inclusion
+ */
+ m_out_of_buffer_gci = m_max_seen_gci > m_last_complete_gci
+ ? m_max_seen_gci + 1 : m_last_complete_gci + 1;
}
-// only run on restarting suma
+Uint32
+Suma::seize_page()
+{
+ if(unlikely(m_out_of_buffer_gci))
+ {
+ return RNIL;
+ }
+loop:
+ Ptr<Page_chunk> ptr;
+ Uint32 ref= m_first_free_page;
+ if(likely(ref != RNIL))
+ {
+ m_first_free_page = ((Buffer_page*)m_tup->page+ref)->m_next_page;
+ Uint32 chunk = ((Buffer_page*)m_tup->page+ref)->m_page_chunk_ptr_i;
+ c_page_chunk_pool.getPtr(ptr, chunk);
+ ndbassert(ptr.p->m_free);
+ ptr.p->m_free--;
+ return ref;
+ }
+
+ if(!c_page_chunk_pool.seize(ptr))
+ return RNIL;
+
+ Uint32 count;
+ m_tup->allocConsPages(16, count, ref);
+ ndbrequire(count > 0);
+
+ ndbout_c("alloc_chunk(%d %d) - ", ref, count);
+
+ m_first_free_page = ptr.p->m_page_id = ref;
+ ptr.p->m_size = count;
+ ptr.p->m_free = count;
+
+ Buffer_page* page;
+ for(Uint32 i = 0; i<count; i++)
+ {
+ page = (Buffer_page*)(m_tup->page+ref);
+ page->m_page_state= SUMA_SEQUENCE;
+ page->m_page_chunk_ptr_i = ptr.i;
+ page->m_next_page = ++ref;
+ }
+ page->m_next_page = RNIL;
+
+ goto loop;
+}
void
-Suma::execSUMA_HANDOVER_REQ(Signal* signal)
+Suma::free_page(Uint32 page_id, Buffer_page* page)
{
- jamEntry();
- // Uint32 sumaRef = signal->getSendersBlockRef();
- SumaHandoverReq const * req = (SumaHandoverReq *)signal->getDataPtr();
+ Ptr<Page_chunk> ptr;
+ ndbrequire(page->m_page_state == SUMA_SEQUENCE);
- Uint32 gci = req->gci;
- Uint32 new_gci = getFirstGCI(signal);
+ Uint32 chunk= page->m_page_chunk_ptr_i;
+
+ c_page_chunk_pool.getPtr(ptr, chunk);
+
+ ptr.p->m_free ++;
+ page->m_next_page = m_first_free_page;
+ ndbrequire(ptr.p->m_free <= ptr.p->m_size);
+
+ m_first_free_page = page_id;
+}
+
+void
+Suma::release_gci(Signal* signal, Uint32 buck, Uint32 gci)
+{
+ Bucket* bucket= c_buckets+buck;
+ Uint32 tail= bucket->m_buffer_tail;
+ Page_pos head= bucket->m_buffer_head;
+ Uint32 max_acked = bucket->m_max_acked_gci;
- if (new_gci > gci) {
- gci = new_gci;
+ const Uint32 mask = Bucket::BUCKET_TAKEOVER | Bucket::BUCKET_RESEND;
+ if(unlikely(bucket->m_state & mask))
+ {
+ jam();
+ ndbout_c("release_gci(%d, %d) -> node failure -> abort", buck, gci);
+ return;
}
+
+ bucket->m_max_acked_gci = (max_acked > gci ? max_acked : gci);
+ if(unlikely(tail == RNIL))
+ {
+ return;
+ }
+
+ if(tail == head.m_page_id)
+ {
+ if(gci >= head.m_max_gci)
+ {
+ jam();
+ head.m_page_pos = 0;
+ head.m_max_gci = gci;
+ head.m_last_gci = 0;
+ bucket->m_buffer_head = head;
+ }
+ return;
+ }
+ else
+ {
+ jam();
+ Buffer_page* page= (Buffer_page*)(m_tup->page+tail);
+ Uint32 max_gci = page->m_max_gci;
+ Uint32 next_page = page->m_next_page;
- { // all recreated subscribers at restarting SUMA start at same GCI
- SubscriberPtr subbPtr;
- for(c_dataSubscribers.first(subbPtr);
- !subbPtr.isNull();
- c_dataSubscribers.next(subbPtr)){
- subbPtr.p->m_firstGCI = gci;
+ ndbassert(max_gci);
+
+ if(gci >= max_gci)
+ {
+ jam();
+ free_page(tail, page);
+
+ bucket->m_buffer_tail = next_page;
+ signal->theData[0] = SumaContinueB::RELEASE_GCI;
+ signal->theData[1] = buck;
+ signal->theData[2] = gci;
+ sendSignal(SUMA_REF, GSN_CONTINUEB, signal, 3, JBB);
+ return;
+ }
+ else
+ {
+ //ndbout_c("do nothing...");
}
}
+}
-#ifdef NODEFAIL_DEBUG
- ndbout_c("Suma::execSUMA_HANDOVER_REQ, gci = %u", gci);
-#endif
+static Uint32 g_cnt = 0;
- c_handoverToDo = false;
- c_restartLock = false;
+void
+Suma::start_resend(Signal* signal, Uint32 buck)
+{
+ printf("start_resend(%d, ", buck);
+
+ if(m_out_of_buffer_gci)
{
-#ifdef HANDOVER_DEBUG
- int c = 0;
-#endif
- for( int i = 0; i < NO_OF_BUCKETS; i++) {
- jam();
- if (getResponsibleSumaNodeId(i) == refToNode(reference())) {
-#ifdef HANDOVER_DEBUG
- c++;
-#endif
- jam();
- c_buckets[i].active = false;
- c_buckets[i].handoverGCI = gci;
- c_buckets[i].handover = true;
- c_buckets[i].handover_started = false;
- c_handoverToDo = true;
- }
- }
-#ifdef HANDOVER_DEBUG
- ndbout_c("prepared handover of bucket %u buckets", c);
-#endif
+ progError(__LINE__, ERR_SYSTEM_ERROR,
+ "Nodefailure while out of event buffer");
+ return;
}
+
+ /**
+ * Resend from m_max_acked_gci + 1 until max_gci + 1
+ */
+ Bucket* bucket= c_buckets + buck;
+ Page_pos pos= bucket->m_buffer_head;
- for (Uint32 i = 0; i < c_noNodesInGroup; i++) {
+ if(pos.m_page_id == RNIL)
+ {
jam();
- Uint32 ref = calcSumaBlockRef(c_nodesInGroup[i]);
- if (ref != reference()) {
- jam();
- sendSignal(ref, GSN_SUMA_HANDOVER_CONF, signal,
- SumaHandoverConf::SignalLength, JBB);
- }//if
+ m_active_buckets.set(buck);
+ m_gcp_complete_rep_count ++;
+ ndbout_c("empty bucket(RNIL) -> active");
+ return;
}
+
+ Uint32 min= bucket->m_max_acked_gci + 1;
+ Uint32 max = pos.m_max_gci;
+
+ ndbrequire(max <= m_max_seen_gci);
+
+ if(min > max)
+ {
+ ndbrequire(pos.m_page_pos <= 2);
+ ndbrequire(pos.m_page_id == bucket->m_buffer_tail);
+ m_active_buckets.set(buck);
+ m_gcp_complete_rep_count ++;
+ ndbout_c("empty bucket -> active");
+ return;
+ }
+
+ g_cnt = 0;
+ bucket->m_state |= (Bucket::BUCKET_TAKEOVER | Bucket::BUCKET_RESEND);
+ bucket->m_switchover_node = get_responsible_node(buck);
+ bucket->m_switchover_gci = max + 1;
+
+ m_switchover_buckets.set(buck);
+
+ signal->theData[1] = buck;
+ signal->theData[2] = min;
+ signal->theData[3] = 0;
+ signal->theData[4] = 0;
+ sendSignal(reference(), GSN_CONTINUEB, signal, 5, JBB);
+
+ ndbout_c("min: %d - max: %d) page: %d", min, max, bucket->m_buffer_tail);
+ ndbrequire(max >= min);
}
-// only run on all but restarting suma
void
-Suma::execSUMA_HANDOVER_CONF(Signal* signal) {
- jamEntry();
- Uint32 sumaRef = signal->getSendersBlockRef();
- SumaHandoverConf const * conf = (SumaHandoverConf *)signal->getDataPtr();
+Suma::resend_bucket(Signal* signal, Uint32 buck, Uint32 min_gci,
+ Uint32 pos, Uint32 last_gci)
+{
+ Bucket* bucket= c_buckets+buck;
+ Uint32 tail= bucket->m_buffer_tail;
- Uint32 gci = conf->gci;
+ Buffer_page* page= (Buffer_page*)(m_tup->page+tail);
+ Uint32 max_gci = page->m_max_gci;
+ Uint32 next_page = page->m_next_page;
+ Uint32 *ptr = page->m_data + pos;
+ Uint32 *end = page->m_data + page->m_words_used;
+ bool delay = false;
-#ifdef HANDOVER_DEBUG
- ndbout_c("Suma::execSUMA_HANDOVER_CONF, gci = %u", gci);
+ ndbrequire(tail != RNIL);
+
+ if(tail == bucket->m_buffer_head.m_page_id)
+ {
+ max_gci= bucket->m_buffer_head.m_max_gci;
+ end= page->m_data + bucket->m_buffer_head.m_page_pos;
+ next_page= RNIL;
+
+ if(ptr == end)
+ {
+ delay = true;
+ goto next;
+ }
+ }
+ else if(pos == 0 && min_gci > max_gci)
+ {
+ free_page(tail, page);
+ tail = bucket->m_buffer_tail = next_page;
+ ndbout_c("pos==0 && min_gci(%d) > max_gci(%d) resend switching page to %d", min_gci, max_gci, tail);
+ goto next;
+ }
+
+#if 0
+ for(Uint32 i = 0; i<page->m_words_used; i++)
+ {
+ printf("%.8x ", page->m_data[i]);
+ if(((i + 1) % 8) == 0)
+ printf("\n");
+ }
+ printf("\n");
#endif
- /* TODO, if we are restarting several SUMA's (>2 in a nodegroup)
- * we have to collect all these conf's before proceding
- */
+ while(ptr < end)
+ {
+ Uint32 *src = ptr;
+ Uint32 tmp = * src++;
+ Uint32 sz = tmp & 0xFFFF;
- // restarting node is now prepared and ready
- c_preparingNodes.clear(refToNode(sumaRef)); /* !! important to do before
- * below since it affects
- * getResponsibleSumaNodeId()
- */
+ ptr += sz;
- c_handoverToDo = false;
- // mark all active buckets really belonging to restarting SUMA
- for( int i = 0; i < NO_OF_BUCKETS; i++) {
- if (c_buckets[i].active) {
- // I'm running this bucket
- if (getResponsibleSumaNodeId(i) == refToNode(sumaRef)) {
- // but it should really be the restarted node
- c_buckets[i].handoverGCI = gci;
- c_buckets[i].handover = true;
- c_buckets[i].handover_started = false;
- c_handoverToDo = true;
+ if(! (tmp & (0x8000 << 16)))
+ {
+ sz--;
+ last_gci = * src ++;
+ }
+ else
+ {
+ ndbrequire(ptr - sz > page->m_data);
+ }
+
+ if(last_gci < min_gci)
+ {
+ continue;
+ }
+
+ if(sz == 1)
+ {
+ SubGcpCompleteRep * rep = (SubGcpCompleteRep*)signal->getDataPtrSend();
+ rep->gci = last_gci;
+ rep->senderRef = reference();
+ rep->gcp_complete_rep_count = 1;
+
+ char buf[255];
+ c_subscriber_nodes.getText(buf);
+ ndbout_c("resending GCI: %d rows: %d -> %s", last_gci, g_cnt, buf);
+ g_cnt = 0;
+
+ NodeReceiverGroup rg(API_CLUSTERMGR, c_subscriber_nodes);
+ sendSignal(rg, GSN_SUB_GCP_COMPLETE_REP, signal,
+ SubGcpCompleteRep::SignalLength, JBB);
+ }
+ else
+ {
+ g_cnt++;
+ Uint32 table = * src++ ;
+ Uint32 event = * src >> 16;
+ Uint32 sz_1 = (* src ++) & 0xFFFF;
+
+ ndbassert(sz - 2 >= sz_1);
+
+ LinearSectionPtr ptr[3];
+ const Uint32 nptr= reformat(signal, ptr,
+ src, sz_1,
+ src + sz_1, sz - 2 - sz_1);
+
+ /**
+ * Signal to subscriber(s)
+ */
+ Ptr<Table> tabPtr;
+ ndbrequire(tabPtr.p = c_tablePool.getPtr(table));
+
+ SubTableData * data = (SubTableData*)signal->getDataPtrSend();//trg;
+ data->gci = last_gci;
+ data->tableId = tabPtr.p->m_tableId;
+ data->operation = event;
+ data->logType = 0;
+
+ {
+ LocalDLList<Subscriber> list(c_subscriberPool,tabPtr.p->c_subscribers);
+ SubscriberPtr subbPtr;
+ for(list.first(subbPtr); !subbPtr.isNull(); list.next(subbPtr))
+ {
+ DBUG_PRINT("info",("GSN_SUB_TABLE_DATA to node %d",
+ refToNode(subbPtr.p->m_senderRef)));
+ data->senderData = subbPtr.p->m_senderData;
+ sendSignal(subbPtr.p->m_senderRef, GSN_SUB_TABLE_DATA, signal,
+ SubTableData::SignalLength, JBB, ptr, nptr);
+ }
}
}
+
+ break;
}
+
+ if(ptr == end && (tail != bucket->m_buffer_head.m_page_id))
+ {
+ /**
+ * release...
+ */
+ free_page(tail, page);
+ tail = bucket->m_buffer_tail = next_page;
+ pos = 0;
+ last_gci = 0;
+ ndbout_c("ptr == end -> resend switching page to %d", tail);
+ }
+ else
+ {
+ pos = (ptr - page->m_data);
+ }
+
+next:
+ if(tail == RNIL)
+ {
+ bucket->m_state &= ~(Uint32)Bucket::BUCKET_RESEND;
+ ndbassert(! (bucket->m_state & Bucket::BUCKET_TAKEOVER));
+ ndbout_c("resend done...");
+ return;
+ }
+
+ signal->theData[0] = SumaContinueB::RESEND_BUCKET;
+ signal->theData[1] = buck;
+ signal->theData[2] = min_gci;
+ signal->theData[3] = pos;
+ signal->theData[4] = last_gci;
+ if(!delay)
+ sendSignal(SUMA_REF, GSN_CONTINUEB, signal, 5, JBB);
+ else
+ sendSignalWithDelay(SUMA_REF, GSN_CONTINUEB, signal, 10, 5);
}
template void append(DataBuffer<11>&,SegmentedSectionPtr,SectionSegmentPool&);
diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.hpp b/storage/ndb/src/kernel/blocks/suma/Suma.hpp
index 65869f44423..6fe12a9b730 100644
--- a/storage/ndb/src/kernel/blocks/suma/Suma.hpp
+++ b/storage/ndb/src/kernel/blocks/suma/Suma.hpp
@@ -24,6 +24,7 @@
#include <SLList.hpp>
#include <DLList.hpp>
+#include <DLFifoList.hpp>
#include <KeyTable.hpp>
#include <DataBuffer.hpp>
#include <SignalCounter.hpp>
@@ -33,13 +34,12 @@
#include <signaldata/UtilSequence.hpp>
#include <signaldata/SumaImpl.hpp>
-class SumaParticipant : public SimulatedBlock {
-protected:
- SumaParticipant(const Configuration & conf);
- virtual ~SumaParticipant();
- BLOCK_DEFINES(SumaParticipant);
-
-protected:
+class Suma : public SimulatedBlock {
+ BLOCK_DEFINES(Suma);
+public:
+ Suma(const Configuration & conf);
+ virtual ~Suma();
+
/**
* Private interface
*/
@@ -58,14 +58,19 @@ protected:
/**
* Dict interface
*/
+#if 0
void execLIST_TABLES_REF(Signal* signal);
void execLIST_TABLES_CONF(Signal* signal);
+#endif
void execGET_TABINFOREF(Signal* signal);
void execGET_TABINFO_CONF(Signal* signal);
-#if 0
+
void execGET_TABLEID_CONF(Signal* signal);
void execGET_TABLEID_REF(Signal* signal);
-#endif
+
+ void execDROP_TAB_CONF(Signal* signal);
+ void execALTER_TAB_CONF(Signal* signal);
+ void execCREATE_TAB_CONF(Signal* signal);
/**
* Scan interface
*/
@@ -82,7 +87,6 @@ protected:
void execTRIG_ATTRINFO(Signal* signal);
void execFIRE_TRIG_ORD(Signal* signal);
void execSUB_GCP_COMPLETE_REP(Signal* signal);
- void runSUB_GCP_COMPLETE_ACC(Signal* signal);
/**
* DIH signals
@@ -106,6 +110,9 @@ protected:
void execCONTINUEB(Signal* signal);
public:
+
+ void suma_ndbrequire(bool v);
+
typedef DataBuffer<15> TableList;
union FragmentDescriptor {
@@ -127,47 +134,25 @@ public:
Uint32 m_dummy;
};
- struct Table {
- Table() { m_tableId = ~0; }
- void release(SumaParticipant&);
+ struct Subscriber {
+ Uint32 m_senderRef;
+ Uint32 m_senderData;
+ Uint32 m_subPtrI; //reference to subscription
+ Uint32 nextList;
- union { Uint32 m_tableId; Uint32 key; };
- Uint32 m_schemaVersion;
- Uint32 m_hasTriggerDefined[3]; // Insert/Update/Delete
- Uint32 m_triggerIds[3]; // Insert/Update/Delete
-
- /**
- * Default order in which to ask for attributes during scan
- * 1) Fixed, not nullable
- * 2) Rest
- */
- DataBuffer<15>::Head m_attributes; // Attribute id's
-
- /**
- * Fragments
- */
- DataBuffer<15>::Head m_fragments; // Fragment descriptors
-
- /**
- * Hash table stuff
- */
- Uint32 nextHash;
- union { Uint32 prevHash; Uint32 nextPool; };
- Uint32 hashValue() const {
- return m_tableId;
- }
- bool equal(const Table& rec) const {
- return m_tableId == rec.m_tableId;
- }
+ union { Uint32 nextPool; Uint32 prevList; };
};
- typedef Ptr<Table> TablePtr;
+ typedef Ptr<Subscriber> SubscriberPtr;
/**
* Subscriptions
*/
+ class Table;
+ typedef Ptr<Table> TablePtr;
+
struct SyncRecord {
- SyncRecord(SumaParticipant& s, DataBuffer<15>::DataBufferPool & p)
- : m_locked(false), m_tableList(p), suma(s)
+ SyncRecord(Suma& s, DataBuffer<15>::DataBufferPool & p)
+ : m_tableList(p), suma(s)
#ifdef ERROR_INSERT
, cerrorInsert(s.cerrorInsert)
#endif
@@ -175,52 +160,31 @@ public:
void release();
+ Uint32 m_senderRef;
+ Uint32 m_senderData;
+
Uint32 m_subscriptionPtrI;
- bool m_locked;
- bool m_doSendSyncData;
- bool m_error;
- TableList m_tableList; // Tables to sync (snapshoted at beginning)
+ Uint32 m_error;
+ Uint32 m_currentTable;
+ TableList m_tableList; // Tables to sync
TableList::DataBufferIterator m_tableList_it;
/**
- * Sync meta
- */
- void startMeta(Signal*);
- void nextMeta(Signal*);
- void completeMeta(Signal*);
-
- /**
- * Create triggers
- */
- Uint32 m_latestTriggerId;
- void startTrigger(Signal* signal);
- void nextTrigger(Signal* signal);
- void completeTrigger(Signal* signal);
- void createAttributeMask(AttributeMask&, Table*);
-
- /**
- * Drop triggers
- */
- void startDropTrigger(Signal* signal);
- void nextDropTrigger(Signal* signal);
- void completeDropTrigger(Signal* signal);
-
- /**
* Sync data
*/
- Uint32 m_currentTable; // Index in m_tableList
Uint32 m_currentFragment; // Index in tabPtr.p->m_fragments
DataBuffer<15>::Head m_attributeList; // Attribute if other than default
DataBuffer<15>::Head m_tabList; // tables if other than default
Uint32 m_currentTableId; // Current table
Uint32 m_currentNoOfAttributes; // No of attributes for current table
+
void startScan(Signal*);
void nextScan(Signal*);
bool getNextFragment(TablePtr * tab, FragmentDescriptor * fd);
- void completeScan(Signal*);
+ void completeScan(Signal*, int error= 0);
- SumaParticipant & suma;
+ Suma & suma;
#ifdef ERROR_INSERT
UintR &cerrorInsert;
#endif
@@ -229,34 +193,110 @@ public:
suma.progError(line, cause, extra);
}
- void runLIST_TABLES_CONF(Signal* signal);
- void runGET_TABINFO_CONF(Signal* signal);
- void runGET_TABINFOREF(Signal* signal);
+ union { Uint32 nextPool; Uint32 nextList; Uint32 prevList; Uint32 ptrI; };
+ };
+ friend struct SyncRecord;
+
+ int initTable(Signal *signal,Uint32 tableId, TablePtr &tabPtr,
+ Ptr<SyncRecord> syncPtr);
+ int initTable(Signal *signal,Uint32 tableId, TablePtr &tabPtr,
+ SubscriberPtr subbPtr);
+ int initTable(Signal *signal,Uint32 tableId, TablePtr &tabPtr);
+
+ void completeOneSubscriber(Signal* signal, TablePtr tabPtr, SubscriberPtr subbPtr);
+ void completeAllSubscribers(Signal* signal, TablePtr tabPtr);
+ void completeInitTable(Signal* signal, TablePtr tabPtr);
+
+ struct Table {
+ Table() { m_tableId = ~0; n_subscribers = 0; }
+ void release(Suma&);
+ void checkRelease(Suma &suma);
+
+ DLList<Subscriber>::Head c_subscribers;
+ DLList<SyncRecord>::Head c_syncRecords;
+
+ enum State {
+ UNDEFINED,
+ DEFINING,
+ DEFINED,
+ DROPPED,
+ ALTERED
+ };
+ State m_state;
+
+ Uint32 m_ptrI;
+ SubscriberPtr m_drop_subbPtr;
+
+ Uint32 n_subscribers;
+
+ bool parseTable(SegmentedSectionPtr ptr, Suma &suma);
+ /**
+ * Create triggers
+ */
+ int setupTrigger(Signal* signal, Suma &suma);
+ void completeTrigger(Signal* signal);
+ void createAttributeMask(AttributeMask&, Suma &suma);
- void runDI_FCOUNTCONF(Signal* signal);
- void runDIGETPRIMCONF(Signal* signal);
+ /**
+ * Drop triggers
+ */
+ void dropTrigger(Signal* signal,Suma&);
+ void runDropTrigger(Signal* signal, Uint32 triggerId,Suma&);
- void runCREATE_TRIG_CONF(Signal* signal);
- void runDROP_TRIG_CONF(Signal* signal);
- void runDROP_TRIG_REF(Signal* signal);
- void runDropTrig(Signal* signal, Uint32 triggerId, Uint32 tableId);
+ /**
+ * Sync meta
+ */
+#if 0
+ void runLIST_TABLES_CONF(Signal* signal);
+#endif
+
+ union { Uint32 m_tableId; Uint32 key; };
+ Uint32 m_schemaVersion;
+ Uint32 m_hasTriggerDefined[3]; // Insert/Update/Delete
+ Uint32 m_triggerIds[3]; // Insert/Update/Delete
- union { Uint32 nextPool; Uint32 nextList; Uint32 ptrI; };
+ Uint32 m_error;
+ /**
+ * Default order in which to ask for attributes during scan
+ * 1) Fixed, not nullable
+ * 2) Rest
+ */
+ DataBuffer<15>::Head m_attributes; // Attribute id's
+
+ /**
+ * Fragments
+ */
+ Uint32 m_fragCount;
+ DataBuffer<15>::Head m_fragments; // Fragment descriptors
+
+ /**
+ * Hash table stuff
+ */
+ Uint32 nextHash;
+ union { Uint32 prevHash; Uint32 nextPool; };
+ Uint32 hashValue() const {
+ return m_tableId;
+ }
+ bool equal(const Table& rec) const {
+ return m_tableId == rec.m_tableId;
+ }
};
- friend struct SyncRecord;
-
+
struct Subscription {
- Uint32 m_subscriberRef;
- Uint32 m_subscriberData;
Uint32 m_senderRef;
Uint32 m_senderData;
Uint32 m_subscriptionId;
Uint32 m_subscriptionKey;
Uint32 m_subscriptionType;
- Uint32 m_coordinatorRef;
- Uint32 m_syncPtrI; // Active sync operation
- Uint32 m_nSubscribers;
- bool m_markRemove;
+
+ enum State {
+ UNDEFINED,
+ LOCKED,
+ DEFINED,
+ DROPPED
+ };
+ State m_state;
+ Uint32 n_subscribers;
Uint32 nextHash;
union { Uint32 prevHash; Uint32 nextPool; };
@@ -271,52 +311,18 @@ public:
m_subscriptionKey == s.m_subscriptionKey;
}
/**
- * The following holds the table names of tables included
+ * The following holds the tables included
* in the subscription.
*/
- // TODO we've got to fix this, this is to inefficient. Tomas
- char m_tables[MAX_TABLES];
-#if 0
- char m_tableNames[MAX_TABLES][MAX_TAB_NAME_SIZE];
-#endif
- /**
- * "Iterator" used to iterate through m_tableNames
- */
- Uint32 m_maxTables;
- Uint32 m_currentTable;
+ Uint32 m_tableId;
+ Uint32 m_table_ptrI;
};
typedef Ptr<Subscription> SubscriptionPtr;
-
- struct Subscriber {
- Uint32 m_senderRef;
- Uint32 m_senderData;
- Uint32 m_subscriberRef;
- Uint32 m_subscriberData;
- Uint32 m_subPtrI; //reference to subscription
- Uint32 m_firstGCI; // first GCI to send
- Uint32 m_lastGCI; // last acnowledged GCI
- Uint32 nextList;
- union { Uint32 nextPool; Uint32 prevList; };
- };
- typedef Ptr<Subscriber> SubscriberPtr;
-
- struct Bucket {
- bool active;
- bool handover;
- bool handover_started;
- Uint32 handoverGCI;
- };
-#define NO_OF_BUCKETS 24
- struct Bucket c_buckets[NO_OF_BUCKETS];
- bool c_handoverToDo;
- Uint32 c_lastCompleteGCI;
-
+
/**
*
*/
DLList<Subscriber> c_metaSubscribers;
- DLList<Subscriber> c_dataSubscribers;
- DLList<Subscriber> c_prepDataSubscribers;
DLList<Subscriber> c_removeDataSubscribers;
/**
@@ -329,24 +335,11 @@ public:
* Pools
*/
ArrayPool<Subscriber> c_subscriberPool;
- ArrayPool<Table> c_tablePool_;
+ ArrayPool<Table> c_tablePool;
ArrayPool<Subscription> c_subscriptionPool;
ArrayPool<SyncRecord> c_syncPool;
DataBuffer<15>::DataBufferPool c_dataBufferPool;
- /**
- * for restarting Suma not to start sending data too early
- */
- bool c_restartLock;
-
- /**
- * for flagging that a GCI containg inconsistent data
- * typically due to node failiure
- */
-
- Uint32 c_lastInconsistentGCI;
- Uint32 c_nodeFailGCI;
-
NodeBitmask c_failedApiNodes;
/**
@@ -354,76 +347,33 @@ public:
*/
bool removeSubscribersOnNode(Signal *signal, Uint32 nodeId);
- bool parseTable(Signal* signal, class GetTabInfoConf* conf, Uint32 tableId,
- SyncRecord* syncPtr_p);
bool checkTableTriggers(SegmentedSectionPtr ptr);
void addTableId(Uint32 TableId,
SubscriptionPtr subPtr, SyncRecord *psyncRec);
- void sendSubIdRef(Signal* signal, Uint32 errorCode);
- void sendSubCreateConf(Signal* signal, Uint32 sender, SubscriptionPtr subPtr);
- void sendSubCreateRef(Signal* signal, const SubCreateReq& req, Uint32 errorCode);
- void sendSubStartRef(SubscriptionPtr subPtr, Signal* signal,
- Uint32 errorCode, bool temporary = false);
- void sendSubStartRef(Signal* signal,
- Uint32 errorCode, bool temporary = false);
- void sendSubStopRef(Signal* signal,
- Uint32 errorCode, bool temporary = false);
+ void sendSubIdRef(Signal* signal,Uint32 senderRef,Uint32 senderData,Uint32 errorCode);
+ void sendSubCreateRef(Signal* signal, Uint32 errorCode);
+ void sendSubStartRef(Signal*, SubscriberPtr, Uint32 errorCode, SubscriptionData::Part);
+ void sendSubStartRef(Signal* signal, Uint32 errorCode);
+ void sendSubStopRef(Signal* signal, Uint32 errorCode);
void sendSubSyncRef(Signal* signal, Uint32 errorCode);
void sendSubRemoveRef(Signal* signal, const SubRemoveReq& ref,
- Uint32 errorCode, bool temporary = false);
+ Uint32 errorCode);
void sendSubStartComplete(Signal*, SubscriberPtr, Uint32,
SubscriptionData::Part);
void sendSubStopComplete(Signal*, SubscriberPtr);
void sendSubStopReq(Signal* signal, bool unlock= false);
- void completeSubRemoveReq(Signal* signal, SubscriptionPtr subPtr);
+ void completeSubRemove(SubscriptionPtr subPtr);
Uint32 getFirstGCI(Signal* signal);
- Uint32 decideWhoToSend(Uint32 nBucket, Uint32 gci);
-
- virtual Uint32 getStoreBucket(Uint32 v) = 0;
- virtual Uint32 getResponsibleSumaNodeId(Uint32 D) = 0;
- virtual Uint32 RtoI(Uint32 sumaRef, bool dieOnNotFound = true) = 0;
-
- struct FailoverBuffer {
- // FailoverBuffer(DataBuffer<15>::DataBufferPool & p);
- FailoverBuffer();
-
- bool subTableData(Uint32 gci, Uint32 *src, int sz);
- bool subGcpCompleteRep(Uint32 gci);
- bool nodeFailRep();
-
- // typedef DataBuffer<15> GCIDataBuffer;
- // GCIDataBuffer m_GCIDataBuffer;
- // GCIDataBuffer::DataBufferIterator m_GCIDataBuffer_it;
-
- Uint32 *c_gcis;
- int c_sz;
-
- // Uint32 *c_buf;
- // int c_buf_sz;
-
- int c_first;
- int c_next;
- bool c_full;
- } c_failoverBuffer;
/**
* Table admin
*/
void convertNameToId( SubscriptionPtr subPtr, Signal * signal);
-
-};
-
-class Suma : public SumaParticipant {
- BLOCK_DEFINES(Suma);
-public:
- Suma(const Configuration & conf);
- virtual ~Suma();
-private:
/**
* Public interface
*/
@@ -449,11 +399,11 @@ private:
void execREAD_NODESCONF(Signal* signal);
void execNODE_FAILREP(Signal* signal);
void execINCL_NODEREQ(Signal* signal);
- void execCONTINUEB(Signal* signal);
void execSIGNAL_DROPPED_REP(Signal* signal);
+ void execAPI_START_REP(Signal* signal);
void execAPI_FAILREQ(Signal* signal) ;
- void execSUB_GCP_COMPLETE_ACC(Signal* signal);
+ void execSUB_GCP_COMPLETE_ACK(Signal* signal);
/**
* Controller interface
@@ -467,17 +417,14 @@ private:
void execSUB_START_REF(Signal* signal);
void execSUB_START_CONF(Signal* signal);
- void execSUB_STOP_REF(Signal* signal);
- void execSUB_STOP_CONF(Signal* signal);
-
- void execSUB_SYNC_REF(Signal* signal);
- void execSUB_SYNC_CONF(Signal* signal);
-
void execSUB_ABORT_SYNC_REF(Signal* signal);
void execSUB_ABORT_SYNC_CONF(Signal* signal);
- void execSUMA_START_ME(Signal* signal);
+ void execSUMA_START_ME_REQ(Signal* signal);
+ void execSUMA_START_ME_REF(Signal* signal);
+ void execSUMA_START_ME_CONF(Signal* signal);
void execSUMA_HANDOVER_REQ(Signal* signal);
+ void execSUMA_HANDOVER_REF(Signal* signal);
void execSUMA_HANDOVER_CONF(Signal* signal);
/**
@@ -491,9 +438,6 @@ private:
void execUTIL_SEQUENCE_REF(Signal* signal);
void execCREATE_SUBID_REQ(Signal* signal);
- Uint32 getStoreBucket(Uint32 v);
- Uint32 getResponsibleSumaNodeId(Uint32 D);
-
/**
* for Suma that is restarting another
*/
@@ -502,33 +446,29 @@ private:
Restart(Suma& s);
Suma & suma;
+ Uint32 nodeId;
- bool c_okToStart[MAX_REPLICAS];
- bool c_waitingToStart[MAX_REPLICAS];
-
- DLHashTable<SumaParticipant::Subscription>::Iterator c_subPtr; // TODO [MAX_REPLICAS]
- SubscriberPtr c_subbPtr; // TODO [MAX_REPLICAS]
+ DLHashTable<Subscription>::Iterator c_subIt;
+ KeyTable<Table>::Iterator c_tabIt;
void progError(int line, int cause, const char * extra) {
suma.progError(line, cause, extra);
}
void resetNode(Uint32 sumaRef);
- void runSUMA_START_ME(Signal*, Uint32 sumaRef);
+ void runSUMA_START_ME_REQ(Signal*, Uint32 sumaRef);
void startNode(Signal*, Uint32 sumaRef);
void createSubscription(Signal* signal, Uint32 sumaRef);
void nextSubscription(Signal* signal, Uint32 sumaRef);
+ void runSUB_CREATE_CONF(Signal* signal);
void completeSubscription(Signal* signal, Uint32 sumaRef);
- void startSync(Signal* signal, Uint32 sumaRef);
- void nextSync(Signal* signal, Uint32 sumaRef);
- void completeSync(Signal* signal, Uint32 sumaRef);
-
+ void startSubscriber(Signal* signal, Uint32 sumaRef);
+ void nextSubscriber(Signal* signal, Uint32 sumaRef, SubscriberPtr subbPtr);
void sendSubStartReq(SubscriptionPtr subPtr, SubscriberPtr subbPtr,
Signal* signal, Uint32 sumaRef);
- void startSubscriber(Signal* signal, Uint32 sumaRef);
- void nextSubscriber(Signal* signal, Uint32 sumaRef);
+ void runSUB_START_CONF(Signal* signal);
void completeSubscriber(Signal* signal, Uint32 sumaRef);
void completeRestartingNode(Signal* signal, Uint32 sumaRef);
@@ -536,65 +476,134 @@ private:
private:
friend class Restart;
- struct SubCoordinator {
- Uint32 m_subscriberRef;
- Uint32 m_subscriberData;
-
- Uint32 m_subscriptionId;
- Uint32 m_subscriptionKey;
-
- NdbNodeBitmask m_participants;
-
- Uint32 m_outstandingGsn;
- SignalCounter m_outstandingRequests;
-
- Uint32 nextList;
- union { Uint32 prevList; Uint32 nextPool; };
- };
- Ptr<SubCoordinator> SubCoordinatorPtr;
-
- struct Node {
- Uint32 nodeId;
- Uint32 alive;
- Uint32 nextList;
- union { Uint32 prevList; Uint32 nextPool; };
- };
- typedef Ptr<Node> NodePtr;
-
/**
* Variables
*/
NodeId c_masterNodeId;
- SLList<Node> c_nodes;
- NdbNodeBitmask c_aliveNodes;
- NdbNodeBitmask c_preparingNodes;
-
- Uint32 RtoI(Uint32 sumaRef, bool dieOnNotFound = true);
+ NdbNodeBitmask c_alive_nodes;
+
+ /**
+ * for restarting Suma not to start sending data too early
+ */
+ struct Startup
+ {
+ bool m_wait_handover;
+ Uint32 m_restart_server_node_id;
+ NdbNodeBitmask m_handover_nodes;
+ } c_startup;
+
+ NodeBitmask c_connected_nodes; // (NODE/API) START REP / (API/NODE) FAIL REQ
+ NodeBitmask c_subscriber_nodes; //
/**
* for all Suma's to keep track of other Suma's in Node group
*/
Uint32 c_nodeGroup;
Uint32 c_noNodesInGroup;
- Uint32 c_idInNodeGroup;
- NodeId c_nodesInGroup[MAX_REPLICAS];
+ Uint32 c_nodesInGroup[MAX_REPLICAS];
+ NdbNodeBitmask c_nodes_in_nodegroup_mask; // NodeId's of nodes in nodegroup
+
+ void send_start_me_req(Signal* signal);
+ void check_start_handover(Signal* signal);
+ void send_handover_req(Signal* signal);
+
+ Uint32 get_responsible_node(Uint32 B) const;
+ Uint32 get_responsible_node(Uint32 B, const NdbNodeBitmask& mask) const;
+ bool check_switchover(Uint32 bucket, Uint32 gci);
+
+public:
+ struct Page_pos
+ {
+ Uint32 m_page_id;
+ Uint32 m_page_pos;
+ Uint32 m_max_gci; // max gci on page
+ Uint32 m_last_gci; // last gci on page
+ };
+private:
+
+ struct Bucket
+ {
+ enum {
+ BUCKET_STARTING = 0x1 // On starting node
+ ,BUCKET_HANDOVER = 0x2 // On running node
+ ,BUCKET_TAKEOVER = 0x4 // On takeing over node
+ ,BUCKET_RESEND = 0x8 // On takeing over node
+ };
+ Uint16 m_state;
+ Uint16 m_switchover_node;
+ Uint16 m_nodes[MAX_REPLICAS];
+ Uint32 m_switchover_gci;
+ Uint32 m_max_acked_gci;
+ Uint32 m_buffer_tail; // Page
+ Page_pos m_buffer_head;
+ };
+
+ struct Buffer_page
+ {
+ STATIC_CONST( DATA_WORDS = 8192 - 5);
+ Uint32 m_page_state; // Used by TUP buddy algorithm
+ Uint32 m_page_chunk_ptr_i;
+ Uint32 m_next_page;
+ Uint32 m_words_used; //
+ Uint32 m_max_gci; //
+ Uint32 m_data[DATA_WORDS];
+ };
+
+ STATIC_CONST( NO_OF_BUCKETS = 24 ); // 24 = 4*3*2*1!
+ Uint32 c_no_of_buckets;
+ struct Bucket c_buckets[NO_OF_BUCKETS];
+
+ STATIC_CONST( BUCKET_MASK_SIZE = (((NO_OF_BUCKETS+31)>> 5)) );
+ typedef Bitmask<BUCKET_MASK_SIZE> Bucket_mask;
+ Bucket_mask m_active_buckets;
+ Bucket_mask m_switchover_buckets;
+
+ class Dbtup* m_tup;
+ void init_buffers();
+ Uint32* get_buffer_ptr(Signal*, Uint32 buck, Uint32 gci, Uint32 sz);
+ Uint32 seize_page();
+ void free_page(Uint32 page_id, Buffer_page* page);
+ void out_of_buffer(Signal*);
+ void out_of_buffer_release(Signal* signal, Uint32 buck);
+
+ void start_resend(Signal*, Uint32 bucket);
+ void resend_bucket(Signal*, Uint32 bucket, Uint32 gci,
+ Uint32 page_pos, Uint32 last_gci);
+ void release_gci(Signal*, Uint32 bucket, Uint32 gci);
+
+ Uint32 m_max_seen_gci; // FIRE_TRIG_ORD
+ Uint32 m_max_sent_gci; // FIRE_TRIG_ORD -> send
+ Uint32 m_last_complete_gci; // SUB_GCP_COMPLETE_REP
+ Uint32 m_out_of_buffer_gci;
+ Uint32 m_gcp_complete_rep_count;
+
+ struct Gcp_record
+ {
+ Uint32 m_gci;
+ NodeBitmask m_subscribers;
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ Uint32 prevList;
+ };
+ ArrayPool<Gcp_record> c_gcp_pool;
+ DLFifoList<Gcp_record> c_gcp_list;
+
+ struct Page_chunk
+ {
+ Uint32 m_page_id;
+ Uint32 m_size;
+ Uint32 m_free;
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ Uint32 prevList;
+ };
- /**
- * don't seem to be used
- */
- ArrayPool<Node> c_nodePool;
- ArrayPool<SubCoordinator> c_subCoordinatorPool;
- DLList<SubCoordinator> c_runningSubscriptions;
+ Uint32 m_first_free_page;
+ ArrayPool<Page_chunk> c_page_chunk_pool;
};
-inline Uint32
-Suma::RtoI(Uint32 sumaRef, bool dieOnNotFound) {
- for (Uint32 i = 0; i < c_noNodesInGroup; i++) {
- if (sumaRef == calcSumaBlockRef(c_nodesInGroup[i]))
- return i;
- }
- ndbrequire(!dieOnNotFound);
- return RNIL;
-}
-
#endif
diff --git a/storage/ndb/src/kernel/blocks/suma/SumaInit.cpp b/storage/ndb/src/kernel/blocks/suma/SumaInit.cpp
index b5945db3811..fed629f8649 100644
--- a/storage/ndb/src/kernel/blocks/suma/SumaInit.cpp
+++ b/storage/ndb/src/kernel/blocks/suma/SumaInit.cpp
@@ -19,68 +19,106 @@
#include <Properties.hpp>
#include <Configuration.hpp>
-SumaParticipant::SumaParticipant(const Configuration & conf) :
+Suma::Suma(const Configuration & conf) :
SimulatedBlock(SUMA, conf),
c_metaSubscribers(c_subscriberPool),
- c_dataSubscribers(c_subscriberPool),
- c_prepDataSubscribers(c_subscriberPool),
c_removeDataSubscribers(c_subscriberPool),
- c_tables(c_tablePool_),
- c_subscriptions(c_subscriptionPool)
+ c_tables(c_tablePool),
+ c_subscriptions(c_subscriptionPool),
+ Restart(*this),
+ c_gcp_list(c_gcp_pool)
{
- BLOCK_CONSTRUCTOR(SumaParticipant);
+ c_masterNodeId = getOwnNodeId();
+
+ c_no_of_buckets = c_nodeGroup = c_noNodesInGroup = 0;
+ for (int i = 0; i < MAX_REPLICAS; i++) {
+ c_nodesInGroup[i] = 0;
+ }
+
+ // Add received signals
+ addRecSignal(GSN_STTOR, &Suma::execSTTOR);
+ addRecSignal(GSN_NDB_STTOR, &Suma::execNDB_STTOR);
+ addRecSignal(GSN_DUMP_STATE_ORD, &Suma::execDUMP_STATE_ORD);
+ addRecSignal(GSN_READ_NODESCONF, &Suma::execREAD_NODESCONF);
+ addRecSignal(GSN_API_START_REP, &Suma::execAPI_START_REP, true);
+ addRecSignal(GSN_API_FAILREQ, &Suma::execAPI_FAILREQ);
+ addRecSignal(GSN_NODE_FAILREP, &Suma::execNODE_FAILREP);
+ addRecSignal(GSN_INCL_NODEREQ, &Suma::execINCL_NODEREQ);
+ addRecSignal(GSN_CONTINUEB, &Suma::execCONTINUEB);
+ addRecSignal(GSN_SIGNAL_DROPPED_REP, &Suma::execSIGNAL_DROPPED_REP, true);
+ addRecSignal(GSN_UTIL_SEQUENCE_CONF, &Suma::execUTIL_SEQUENCE_CONF);
+ addRecSignal(GSN_UTIL_SEQUENCE_REF, &Suma::execUTIL_SEQUENCE_REF);
+ addRecSignal(GSN_CREATE_SUBID_REQ,
+ &Suma::execCREATE_SUBID_REQ);
+
+ addRecSignal(GSN_SUB_CREATE_CONF, &Suma::execSUB_CREATE_CONF);
+ addRecSignal(GSN_SUB_CREATE_REF, &Suma::execSUB_CREATE_REF);
+
+ addRecSignal(GSN_SUB_START_CONF, &Suma::execSUB_START_CONF);
+ addRecSignal(GSN_SUB_START_REF, &Suma::execSUB_START_REF);
+
+ addRecSignal(GSN_SUMA_START_ME_REQ, &Suma::execSUMA_START_ME_REQ);
+ addRecSignal(GSN_SUMA_START_ME_REF, &Suma::execSUMA_START_ME_REF);
+ addRecSignal(GSN_SUMA_START_ME_CONF, &Suma::execSUMA_START_ME_CONF);
+ addRecSignal(GSN_SUMA_HANDOVER_REQ, &Suma::execSUMA_HANDOVER_REQ);
+ addRecSignal(GSN_SUMA_HANDOVER_REF, &Suma::execSUMA_HANDOVER_REF);
+ addRecSignal(GSN_SUMA_HANDOVER_CONF, &Suma::execSUMA_HANDOVER_CONF);
+
+ addRecSignal(GSN_SUB_GCP_COMPLETE_ACK,
+ &Suma::execSUB_GCP_COMPLETE_ACK);
/**
* SUMA participant if
*/
- addRecSignal(GSN_SUB_CREATE_REQ, &SumaParticipant::execSUB_CREATE_REQ);
- addRecSignal(GSN_SUB_REMOVE_REQ, &SumaParticipant::execSUB_REMOVE_REQ);
- addRecSignal(GSN_SUB_START_REQ, &SumaParticipant::execSUB_START_REQ);
- addRecSignal(GSN_SUB_STOP_REQ, &SumaParticipant::execSUB_STOP_REQ);
- addRecSignal(GSN_SUB_SYNC_REQ, &SumaParticipant::execSUB_SYNC_REQ);
-
- addRecSignal(GSN_SUB_STOP_CONF, &SumaParticipant::execSUB_STOP_CONF);
- addRecSignal(GSN_SUB_STOP_REF, &SumaParticipant::execSUB_STOP_REF);
+ addRecSignal(GSN_SUB_CREATE_REQ, &Suma::execSUB_CREATE_REQ);
+ addRecSignal(GSN_SUB_REMOVE_REQ, &Suma::execSUB_REMOVE_REQ);
+ addRecSignal(GSN_SUB_START_REQ, &Suma::execSUB_START_REQ);
+ addRecSignal(GSN_SUB_STOP_REQ, &Suma::execSUB_STOP_REQ);
+ addRecSignal(GSN_SUB_STOP_REF, &Suma::execSUB_STOP_REF);
+ addRecSignal(GSN_SUB_STOP_CONF, &Suma::execSUB_STOP_CONF);
+ addRecSignal(GSN_SUB_SYNC_REQ, &Suma::execSUB_SYNC_REQ);
/**
* Dict interface
*/
- //addRecSignal(GSN_LIST_TABLES_REF, &SumaParticipant::execLIST_TABLES_REF);
- addRecSignal(GSN_LIST_TABLES_CONF, &SumaParticipant::execLIST_TABLES_CONF);
- //addRecSignal(GSN_GET_TABINFOREF, &SumaParticipant::execGET_TABINFO_REF);
- addRecSignal(GSN_GET_TABINFO_CONF, &SumaParticipant::execGET_TABINFO_CONF);
- addRecSignal(GSN_GET_TABINFOREF, &SumaParticipant::execGET_TABINFOREF);
+ addRecSignal(GSN_DROP_TAB_CONF, &Suma::execDROP_TAB_CONF);
+ addRecSignal(GSN_ALTER_TAB_CONF, &Suma::execALTER_TAB_CONF);
+ addRecSignal(GSN_CREATE_TAB_CONF, &Suma::execCREATE_TAB_CONF);
+
+#if 0
+ addRecSignal(GSN_LIST_TABLES_CONF, &Suma::execLIST_TABLES_CONF);
+#endif
+ addRecSignal(GSN_GET_TABINFO_CONF, &Suma::execGET_TABINFO_CONF);
+ addRecSignal(GSN_GET_TABINFOREF, &Suma::execGET_TABINFOREF);
#if 0
- addRecSignal(GSN_GET_TABLEID_CONF, &SumaParticipant::execGET_TABLEID_CONF);
- addRecSignal(GSN_GET_TABLEID_REF, &SumaParticipant::execGET_TABLEID_REF);
+ addRecSignal(GSN_GET_TABLEID_CONF, &Suma::execGET_TABLEID_CONF);
+ addRecSignal(GSN_GET_TABLEID_REF, &Suma::execGET_TABLEID_REF);
#endif
/**
* Dih interface
*/
- //addRecSignal(GSN_DI_FCOUNTREF, &SumaParticipant::execDI_FCOUNTREF);
- addRecSignal(GSN_DI_FCOUNTCONF, &SumaParticipant::execDI_FCOUNTCONF);
- //addRecSignal(GSN_DIGETPRIMREF, &SumaParticipant::execDIGETPRIMREF);
- addRecSignal(GSN_DIGETPRIMCONF, &SumaParticipant::execDIGETPRIMCONF);
+ addRecSignal(GSN_DI_FCOUNTCONF, &Suma::execDI_FCOUNTCONF);
+ addRecSignal(GSN_DIGETPRIMCONF, &Suma::execDIGETPRIMCONF);
/**
* Scan interface
*/
- addRecSignal(GSN_SCAN_HBREP, &SumaParticipant::execSCAN_HBREP);
- addRecSignal(GSN_TRANSID_AI, &SumaParticipant::execTRANSID_AI);
- addRecSignal(GSN_SCAN_FRAGREF, &SumaParticipant::execSCAN_FRAGREF);
- addRecSignal(GSN_SCAN_FRAGCONF, &SumaParticipant::execSCAN_FRAGCONF);
+ addRecSignal(GSN_SCAN_HBREP, &Suma::execSCAN_HBREP);
+ addRecSignal(GSN_TRANSID_AI, &Suma::execTRANSID_AI);
+ addRecSignal(GSN_SCAN_FRAGREF, &Suma::execSCAN_FRAGREF);
+ addRecSignal(GSN_SCAN_FRAGCONF, &Suma::execSCAN_FRAGCONF);
#if 0
addRecSignal(GSN_SUB_SYNC_CONTINUE_REF,
- &SumaParticipant::execSUB_SYNC_CONTINUE_REF);
+ &Suma::execSUB_SYNC_CONTINUE_REF);
#endif
addRecSignal(GSN_SUB_SYNC_CONTINUE_CONF,
- &SumaParticipant::execSUB_SYNC_CONTINUE_CONF);
+ &Suma::execSUB_SYNC_CONTINUE_CONF);
/**
* Trigger stuff
*/
- addRecSignal(GSN_TRIG_ATTRINFO, &SumaParticipant::execTRIG_ATTRINFO);
- addRecSignal(GSN_FIRE_TRIG_ORD, &SumaParticipant::execFIRE_TRIG_ORD);
+ addRecSignal(GSN_TRIG_ATTRINFO, &Suma::execTRIG_ATTRINFO);
+ addRecSignal(GSN_FIRE_TRIG_ORD, &Suma::execFIRE_TRIG_ORD);
addRecSignal(GSN_CREATE_TRIG_REF, &Suma::execCREATE_TRIG_REF);
addRecSignal(GSN_CREATE_TRIG_CONF, &Suma::execCREATE_TRIG_CONF);
@@ -88,27 +126,33 @@ SumaParticipant::SumaParticipant(const Configuration & conf) :
addRecSignal(GSN_DROP_TRIG_CONF, &Suma::execDROP_TRIG_CONF);
addRecSignal(GSN_SUB_GCP_COMPLETE_REP,
- &SumaParticipant::execSUB_GCP_COMPLETE_REP);
+ &Suma::execSUB_GCP_COMPLETE_REP);
/**
* @todo: fix pool sizes
*/
- Uint32 noTables;
+ Uint32 noTables, noAttrs;
const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
ndbrequire(p != 0);
ndb_mgm_get_int_parameter(p, CFG_DB_NO_TABLES,
&noTables);
+ ndb_mgm_get_int_parameter(p, CFG_DB_NO_ATTRIBUTES,
+ &noAttrs);
- c_tablePool_.setSize(noTables);
+ c_tablePool.setSize(noTables);
c_tables.setSize(noTables);
- c_subscriptions.setSize(20); //10
- c_subscriberPool.setSize(64);
+ c_subscriptions.setSize(noTables);
+ c_subscriberPool.setSize(2*noTables);
- c_subscriptionPool.setSize(64); //2
- c_syncPool.setSize(20); //2
- c_dataBufferPool.setSize(128);
+ c_subscriptionPool.setSize(noTables);
+ c_syncPool.setSize(2);
+ c_dataBufferPool.setSize(noAttrs);
+ c_gcp_pool.setSize(10);
+
+ m_first_free_page= RNIL;
+ c_page_chunk_pool.setSize(50);
{
SLList<SyncRecord> tmp(c_syncPool);
@@ -118,69 +162,24 @@ SumaParticipant::SumaParticipant(const Configuration & conf) :
tmp.release();
}
- for( int i = 0; i < NO_OF_BUCKETS; i++) {
- c_buckets[i].active = false;
- c_buckets[i].handover = false;
- c_buckets[i].handover_started = false;
- c_buckets[i].handoverGCI = 0;
- }
- c_handoverToDo = false;
- c_lastInconsistentGCI = RNIL;
- c_lastCompleteGCI = RNIL;
- c_nodeFailGCI = 0;
-
- c_failedApiNodes.clear();
-}
-
-SumaParticipant::~SumaParticipant()
-{
-}
-
-Suma::Suma(const Configuration & conf) :
- SumaParticipant(conf),
- Restart(*this),
- c_nodes(c_nodePool),
- c_runningSubscriptions(c_subCoordinatorPool)
-{
-
- c_nodePool.setSize(MAX_NDB_NODES);
- c_masterNodeId = getOwnNodeId();
-
- c_nodeGroup = c_noNodesInGroup = c_idInNodeGroup = 0;
- for (int i = 0; i < MAX_REPLICAS; i++) {
- c_nodesInGroup[i] = 0;
+ memset(c_buckets, 0, sizeof(c_buckets));
+ for(Uint32 i = 0; i<NO_OF_BUCKETS; i++)
+ {
+ Bucket* bucket= c_buckets+i;
+ bucket->m_buffer_tail = RNIL;
+ bucket->m_buffer_head.m_page_id = RNIL;
+ bucket->m_buffer_head.m_page_pos = Buffer_page::DATA_WORDS;
}
-
- c_subCoordinatorPool.setSize(10);
- // Add received signals
- addRecSignal(GSN_STTOR, &Suma::execSTTOR);
- addRecSignal(GSN_NDB_STTOR, &Suma::execNDB_STTOR);
- addRecSignal(GSN_DUMP_STATE_ORD, &Suma::execDUMP_STATE_ORD);
- addRecSignal(GSN_READ_NODESCONF, &Suma::execREAD_NODESCONF);
- addRecSignal(GSN_API_FAILREQ, &Suma::execAPI_FAILREQ);
- addRecSignal(GSN_NODE_FAILREP, &Suma::execNODE_FAILREP);
- addRecSignal(GSN_INCL_NODEREQ, &Suma::execINCL_NODEREQ);
- addRecSignal(GSN_CONTINUEB, &Suma::execCONTINUEB);
- addRecSignal(GSN_SIGNAL_DROPPED_REP, &Suma::execSIGNAL_DROPPED_REP, true);
- addRecSignal(GSN_UTIL_SEQUENCE_CONF, &Suma::execUTIL_SEQUENCE_CONF);
- addRecSignal(GSN_UTIL_SEQUENCE_REF, &Suma::execUTIL_SEQUENCE_REF);
- addRecSignal(GSN_CREATE_SUBID_REQ,
- &Suma::execCREATE_SUBID_REQ);
-
- addRecSignal(GSN_SUB_CREATE_CONF, &Suma::execSUB_CREATE_CONF);
- addRecSignal(GSN_SUB_CREATE_REF, &Suma::execSUB_CREATE_REF);
- addRecSignal(GSN_SUB_SYNC_CONF, &Suma::execSUB_SYNC_CONF);
- addRecSignal(GSN_SUB_SYNC_REF, &Suma::execSUB_SYNC_REF);
- addRecSignal(GSN_SUB_START_CONF, &Suma::execSUB_START_CONF);
- addRecSignal(GSN_SUB_START_REF, &Suma::execSUB_START_REF);
-
- addRecSignal(GSN_SUMA_START_ME, &Suma::execSUMA_START_ME);
- addRecSignal(GSN_SUMA_HANDOVER_REQ, &Suma::execSUMA_HANDOVER_REQ);
- addRecSignal(GSN_SUMA_HANDOVER_CONF, &Suma::execSUMA_HANDOVER_CONF);
-
- addRecSignal(GSN_SUB_GCP_COMPLETE_ACC,
- &Suma::execSUB_GCP_COMPLETE_ACC);
+ m_max_seen_gci = 0; // FIRE_TRIG_ORD
+ m_max_sent_gci = 0; // FIRE_TRIG_ORD -> send
+ m_last_complete_gci = 0; // SUB_GCP_COMPLETE_REP
+ m_gcp_complete_rep_count = 0;
+ m_out_of_buffer_gci = 0;
+
+ c_startup.m_wait_handover= false;
+ c_failedApiNodes.clear();
+ c_startup.m_restart_server_node_id = 0; // Server for my NR
}
Suma::~Suma()
@@ -188,5 +187,4 @@ Suma::~Suma()
}
BLOCK_FUNCTIONS(Suma)
-BLOCK_FUNCTIONS(SumaParticipant)
diff --git a/storage/ndb/src/kernel/blocks/trix/Trix.cpp b/storage/ndb/src/kernel/blocks/trix/Trix.cpp
index cd11cb4d575..9d75414a953 100644
--- a/storage/ndb/src/kernel/blocks/trix/Trix.cpp
+++ b/storage/ndb/src/kernel/blocks/trix/Trix.cpp
@@ -83,7 +83,6 @@ Trix::Trix(const Configuration & conf) :
addRecSignal(GSN_SUB_SYNC_CONF, &Trix::execSUB_SYNC_CONF);
addRecSignal(GSN_SUB_SYNC_REF, &Trix::execSUB_SYNC_REF);
addRecSignal(GSN_SUB_SYNC_CONTINUE_REQ, &Trix::execSUB_SYNC_CONTINUE_REQ);
- addRecSignal(GSN_SUB_META_DATA, &Trix::execSUB_META_DATA);
addRecSignal(GSN_SUB_TABLE_DATA, &Trix::execSUB_TABLE_DATA);
// Allocate pool sizes
@@ -424,6 +423,8 @@ Trix::execDUMP_STATE_ORD(Signal* signal)
void Trix:: execBUILDINDXREQ(Signal* signal)
{
jamEntry();
+ DBUG_ENTER("Trix:: execBUILDINDXREQ");
+
BuildIndxReq * buildIndxReq = (BuildIndxReq *)signal->getDataPtr();
// Seize a subscription record
@@ -438,7 +439,7 @@ void Trix:: execBUILDINDXREQ(Signal* signal)
releaseSections(signal);
sendSignal(buildIndxReq->getUserRef(),
GSN_BUILDINDXREF, signal, BuildIndxRef::SignalLength, JBB);
- return;
+ DBUG_VOID_RETURN;
}
subRec = subRecPtr.p;
subRec->errorCode = BuildIndxRef::NoError;
@@ -476,6 +477,7 @@ void Trix:: execBUILDINDXREQ(Signal* signal)
#endif
releaseSections(signal);
prepareInsertTransactions(signal, subRecPtr);
+ DBUG_VOID_RETURN;
}
void Trix:: execBUILDINDXCONF(Signal* signal)
@@ -563,25 +565,31 @@ void Trix::execUTIL_EXECUTE_REF(Signal* signal)
void Trix::execSUB_CREATE_CONF(Signal* signal)
{
jamEntry();
+ DBUG_ENTER("Trix::execSUB_CREATE_CONF");
SubCreateConf * subCreateConf = (SubCreateConf *)signal->getDataPtr();
SubscriptionRecPtr subRecPtr;
SubscriptionRecord* subRec;
- subRecPtr.i = subCreateConf->subscriberData;
+ subRecPtr.i = subCreateConf->senderData;
if ((subRec = c_theSubscriptions.getPtr(subRecPtr.i)) == NULL) {
printf("Trix::execSUB_CREATE_CONF: Failed to find subscription data %u\n", subRecPtr.i);
- return;
+ DBUG_VOID_RETURN;
}
- ndbrequire(subRec->subscriptionId == subCreateConf->subscriptionId);
- ndbrequire(subRec->subscriptionKey == subCreateConf->subscriptionKey);
subRec->subscriptionCreated = true;
subRecPtr.p = subRec;
- setupTableScan(signal, subRecPtr);
+
+ DBUG_PRINT("info",("i: %u subscriptionId: %u, subscriptionKey: %u",
+ subRecPtr.i, subRecPtr.p->subscriptionId,
+ subRecPtr.p->subscriptionKey));
+
+ startTableScan(signal, subRecPtr);
+ DBUG_VOID_RETURN;
}
void Trix::execSUB_CREATE_REF(Signal* signal)
{
jamEntry();
+ DBUG_ENTER("Trix::execSUB_CREATE_REF");
// THIS SIGNAL IS NEVER SENT FROM SUMA?
/*
SubCreateRef * subCreateRef = (SubCreateRef *)signal->getDataPtr();
@@ -596,47 +604,48 @@ void Trix::execSUB_CREATE_REF(Signal* signal)
subRecPtr.p = subRec;
buildFailed(signal, subRecPtr, BuildIndxRef::InternalError);
*/
+ DBUG_VOID_RETURN;
}
void Trix::execSUB_SYNC_CONF(Signal* signal)
{
jamEntry();
+ DBUG_ENTER("Trix::execSUB_SYNC_CONF");
SubSyncConf * subSyncConf = (SubSyncConf *)signal->getDataPtr();
SubscriptionRecPtr subRecPtr;
SubscriptionRecord* subRec;
- subRecPtr.i = subSyncConf->subscriberData;
+ subRecPtr.i = subSyncConf->senderData;
if ((subRec = c_theSubscriptions.getPtr(subRecPtr.i)) == NULL) {
- printf("Trix::execSUB_SYNC_CONF: Failed to find subscription data %u\n", subRecPtr.i);
- return;
+ printf("Trix::execSUB_SYNC_CONF: Failed to find subscription data %u\n",
+ subRecPtr.i);
+ DBUG_VOID_RETURN;
}
- ndbrequire(subRec->subscriptionId == subSyncConf->subscriptionId);
- ndbrequire(subRec->subscriptionKey == subSyncConf->subscriptionKey);
+
subRecPtr.p = subRec;
- if(subSyncConf->part == SubscriptionData::MetaData)
- startTableScan(signal, subRecPtr);
- else {
- subRec->expectedConf--;
- checkParallelism(signal, subRec);
- if (subRec->expectedConf == 0)
- buildComplete(signal, subRecPtr);
- }
+ subRec->expectedConf--;
+ checkParallelism(signal, subRec);
+ if (subRec->expectedConf == 0)
+ buildComplete(signal, subRecPtr);
+ DBUG_VOID_RETURN;
}
void Trix::execSUB_SYNC_REF(Signal* signal)
{
jamEntry();
+ DBUG_ENTER("Trix::execSUB_SYNC_REF");
SubSyncRef * subSyncRef = (SubSyncRef *)signal->getDataPtr();
SubscriptionRecPtr subRecPtr;
SubscriptionRecord* subRec;
- subRecPtr.i = subSyncRef->subscriberData;
+ subRecPtr.i = subSyncRef->senderData;
if ((subRec = c_theSubscriptions.getPtr(subRecPtr.i)) == NULL) {
printf("Trix::execSUB_SYNC_REF: Failed to find subscription data %u\n", subRecPtr.i);
- return;
+ DBUG_VOID_RETURN;
}
subRecPtr.p = subRec;
buildFailed(signal, subRecPtr, BuildIndxRef::InternalError);
+ DBUG_VOID_RETURN;
}
void Trix::execSUB_SYNC_CONTINUE_REQ(Signal* signal)
@@ -656,21 +665,17 @@ void Trix::execSUB_SYNC_CONTINUE_REQ(Signal* signal)
checkParallelism(signal, subRec);
}
-void Trix::execSUB_META_DATA(Signal* signal)
-{
- jamEntry();
-}
-
void Trix::execSUB_TABLE_DATA(Signal* signal)
{
jamEntry();
+ DBUG_ENTER("Trix::execSUB_TABLE_DATA");
SubTableData * subTableData = (SubTableData *)signal->getDataPtr();
SubscriptionRecPtr subRecPtr;
SubscriptionRecord* subRec;
- subRecPtr.i = subTableData->subscriberData;
+ subRecPtr.i = subTableData->senderData;
if ((subRec = c_theSubscriptions.getPtr(subRecPtr.i)) == NULL) {
printf("Trix::execSUB_TABLE_DATA: Failed to find subscription data %u\n", subRecPtr.i);
- return;
+ DBUG_VOID_RETURN;
}
subRecPtr.p = subRec;
SegmentedSectionPtr headerPtr, dataPtr;
@@ -681,18 +686,41 @@ void Trix::execSUB_TABLE_DATA(Signal* signal)
printf("Trix::execSUB_TABLE_DATA: Failed to get data section\n");
}
executeInsertTransaction(signal, subRecPtr, headerPtr, dataPtr);
+ DBUG_VOID_RETURN;
}
void Trix::setupSubscription(Signal* signal, SubscriptionRecPtr subRecPtr)
{
- Uint32 attributeList[MAX_ATTRIBUTES_IN_TABLE * 2];
- SubCreateReq * subCreateReq = (SubCreateReq *)signal->getDataPtrSend();
+ jam();
+ DBUG_ENTER("Trix::setupSubscription");
SubscriptionRecord* subRec = subRecPtr.p;
+ SubCreateReq * subCreateReq = (SubCreateReq *)signal->getDataPtrSend();
// Uint32 listLen = subRec->noOfIndexColumns + subRec->noOfKeyColumns;
+ subCreateReq->senderRef = reference();
+ subCreateReq->senderData = subRecPtr.i;
+ subCreateReq->subscriptionId = subRec->subscriptionId;
+ subCreateReq->subscriptionKey = subRec->subscriptionKey;
+ subCreateReq->tableId = subRec->sourceTableId;
+ subCreateReq->subscriptionType = SubCreateReq::SingleTableScan;
+
+ DBUG_PRINT("info",("i: %u subscriptionId: %u, subscriptionKey: %u",
+ subRecPtr.i, subCreateReq->subscriptionId,
+ subCreateReq->subscriptionKey));
+
+ sendSignal(SUMA_REF, GSN_SUB_CREATE_REQ,
+ signal, SubCreateReq::SignalLength, JBB);
+ DBUG_VOID_RETURN;
+}
+
+void Trix::startTableScan(Signal* signal, SubscriptionRecPtr subRecPtr)
+{
+ jam();
+
+ Uint32 attributeList[MAX_ATTRIBUTES_IN_TABLE * 2];
+ SubscriptionRecord* subRec = subRecPtr.p;
AttrOrderBuffer::DataBufferIterator iter;
Uint32 i = 0;
- jam();
bool moreAttributes = subRec->attributeOrder.first(iter);
while (moreAttributes) {
attributeList[i++] = *iter.data;
@@ -703,41 +731,21 @@ void Trix::setupSubscription(Signal* signal, SubscriptionRecPtr subRecPtr)
orderPtr[0].p = attributeList;
orderPtr[0].sz = subRec->attributeOrder.getSize();
-
- subCreateReq->subscriberRef = reference();
- subCreateReq->subscriberData = subRecPtr.i;
- subCreateReq->subscriptionId = subRec->subscriptionId;
- subCreateReq->subscriptionKey = subRec->subscriptionKey;
- subCreateReq->tableId = subRec->sourceTableId;
- subCreateReq->subscriptionType = SubCreateReq::SingleTableScan;
-
- sendSignal(SUMA_REF, GSN_SUB_CREATE_REQ,
- signal, SubCreateReq::SignalLength+1, JBB, orderPtr, 1);
-}
-
-void Trix::setupTableScan(Signal* signal, SubscriptionRecPtr subRecPtr)
-{
SubSyncReq * subSyncReq = (SubSyncReq *)signal->getDataPtrSend();
+ subSyncReq->senderRef = reference();
+ subSyncReq->senderData = subRecPtr.i;
+ subSyncReq->subscriptionId = subRec->subscriptionId;
+ subSyncReq->subscriptionKey = subRec->subscriptionKey;
+ subSyncReq->part = SubscriptionData::TableData;
- jam();
- subSyncReq->subscriptionId = subRecPtr.i;
- subSyncReq->subscriptionKey = subRecPtr.p->subscriptionKey;
- subSyncReq->part = SubscriptionData::MetaData;
- sendSignal(SUMA_REF, GSN_SUB_SYNC_REQ,
- signal, SubSyncReq::SignalLength, JBB);
-}
-
-void Trix::startTableScan(Signal* signal, SubscriptionRecPtr subRecPtr)
-{
- jam();
subRecPtr.p->expectedConf = 1;
- SubSyncReq * subSyncReq = (SubSyncReq *)signal->getDataPtrSend();
- subSyncReq->subscriptionId = subRecPtr.i;
- subSyncReq->subscriptionKey = subRecPtr.p->subscriptionKey;
- subSyncReq->part = SubscriptionData::TableData;
- sendSignal(SUMA_REF, GSN_SUB_SYNC_REQ,
- signal, SubSyncReq::SignalLength, JBB);
+ DBUG_PRINT("info",("i: %u subscriptionId: %u, subscriptionKey: %u",
+ subRecPtr.i, subSyncReq->subscriptionId,
+ subSyncReq->subscriptionKey));
+
+ sendSignal(SUMA_REF, GSN_SUB_SYNC_REQ,
+ signal, SubSyncReq::SignalLength, JBB, orderPtr, 1);
}
void Trix::prepareInsertTransactions(Signal* signal,
diff --git a/storage/ndb/src/kernel/blocks/trix/Trix.hpp b/storage/ndb/src/kernel/blocks/trix/Trix.hpp
index 8dc01375fa1..d09ff43df70 100644
--- a/storage/ndb/src/kernel/blocks/trix/Trix.hpp
+++ b/storage/ndb/src/kernel/blocks/trix/Trix.hpp
@@ -170,12 +170,10 @@ private:
void execSUB_SYNC_CONF(Signal* signal);
void execSUB_SYNC_REF(Signal* signal);
void execSUB_SYNC_CONTINUE_REQ(Signal* signal);
- void execSUB_META_DATA(Signal* signal);
void execSUB_TABLE_DATA(Signal* signal);
// Utility functions
void setupSubscription(Signal* signal, SubscriptionRecPtr subRecPtr);
- void setupTableScan(Signal* signal, SubscriptionRecPtr subRecPtr);
void startTableScan(Signal* signal, SubscriptionRecPtr subRecPtr);
void prepareInsertTransactions(Signal* signal, SubscriptionRecPtr subRecPtr);
void executeInsertTransaction(Signal* signal, SubscriptionRecPtr subRecPtr,
diff --git a/storage/ndb/src/kernel/vm/Configuration.cpp b/storage/ndb/src/kernel/vm/Configuration.cpp
index 650d914035f..0936f77a4b6 100644
--- a/storage/ndb/src/kernel/vm/Configuration.cpp
+++ b/storage/ndb/src/kernel/vm/Configuration.cpp
@@ -568,13 +568,9 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
case NODE_TYPE_API:
noOfAPINodes++; // No of API processes
break;
- case NODE_TYPE_REP:
- break;
case NODE_TYPE_MGM:
noOfMGMNodes++; // No of MGM processes
break;
- case NODE_TYPE_EXT_REP:
- break;
default:
BaseString::snprintf(buf, sizeof(buf), "Unknown node type: %d", nodeType);
ERROR_SET(fatal, ERR_INVALID_CONFIG, msg, buf);
diff --git a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp
index 57a4032e40b..3ca741edd14 100644
--- a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp
@@ -148,6 +148,8 @@ SimulatedBlock::installSimulatedBlockFunctions(){
a[GSN_FSREMOVEREF] = &SimulatedBlock::execFSREMOVEREF;
a[GSN_FSSYNCREF] = &SimulatedBlock::execFSSYNCREF;
a[GSN_FSAPPENDREF] = &SimulatedBlock::execFSAPPENDREF;
+ a[GSN_NODE_START_REP] = &SimulatedBlock::execNODE_START_REP;
+ a[GSN_API_START_REP] = &SimulatedBlock::execAPI_START_REP;
}
void
@@ -913,6 +915,16 @@ SimulatedBlock::execCONTINUE_FRAGMENTED(Signal * signal){
sendSignal(reference(), GSN_CONTINUE_FRAGMENTED, signal, 1, JBB);
}
+void
+SimulatedBlock::execNODE_START_REP(Signal* signal)
+{
+}
+
+void
+SimulatedBlock::execAPI_START_REP(Signal* signal)
+{
+}
+
#ifdef VM_TRACE_TIME
void
SimulatedBlock::clearTimes() {
diff --git a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp
index bba92ca7c31..92002fb1fb7 100644
--- a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp
@@ -420,7 +420,9 @@ private:
void execSIGNAL_DROPPED_REP(Signal* signal);
void execCONTINUE_FRAGMENTED(Signal* signal);
-
+ void execAPI_START_REP(Signal* signal);
+ void execNODE_START_REP(Signal* signal);
+
Uint32 c_fragmentIdCounter;
ArrayPool<FragmentInfo> c_fragmentInfoPool;
DLHashTable<FragmentInfo> c_fragmentInfoHash;
diff --git a/storage/ndb/src/mgmapi/mgmapi.cpp b/storage/ndb/src/mgmapi/mgmapi.cpp
index 8263e8cbc93..0f34eafdefa 100644
--- a/storage/ndb/src/mgmapi/mgmapi.cpp
+++ b/storage/ndb/src/mgmapi/mgmapi.cpp
@@ -184,7 +184,7 @@ ndb_mgm_set_connectstring(NdbMgmHandle handle, const char * mgmsrv)
handle->cfg.~LocalConfig();
new (&(handle->cfg)) LocalConfig;
handle->cfg.init(0, 0); /* reset the LocalConfig */
- SET_ERROR(handle, NDB_MGM_ILLEGAL_CONNECT_STRING, "");
+ SET_ERROR(handle, NDB_MGM_ILLEGAL_CONNECT_STRING, mgmsrv ? mgmsrv : "");
DBUG_RETURN(-1);
}
handle->cfg_i= -1;
diff --git a/storage/ndb/src/mgmapi/ndb_logevent.cpp b/storage/ndb/src/mgmapi/ndb_logevent.cpp
index 27e7c1f36f5..a5ffb302c22 100644
--- a/storage/ndb/src/mgmapi/ndb_logevent.cpp
+++ b/storage/ndb/src/mgmapi/ndb_logevent.cpp
@@ -269,6 +269,14 @@ struct Ndb_logevent_body_row ndb_logevent_body[]= {
/* TODO */
// ROW( InfoEvent),
+ ROW( EventBufferStatus, "usage", 1, usage),
+ ROW( EventBufferStatus, "alloc", 2, alloc),
+ ROW( EventBufferStatus, "max", 3, max),
+ ROW( EventBufferStatus, "apply_gci_l", 4, apply_gci_l),
+ ROW( EventBufferStatus, "apply_gci_h", 5, apply_gci_h),
+ ROW( EventBufferStatus, "latest_gci_l", 6, latest_gci_l),
+ ROW( EventBufferStatus, "latest_gci_h", 7, latest_gci_h),
+
// Backup
ROW_FN( BackupStarted, "starting_node", 1, starting_node, ref_to_node),
ROW( BackupStarted, "backup_id", 2, backup_id),
diff --git a/storage/ndb/src/mgmclient/CommandInterpreter.cpp b/storage/ndb/src/mgmclient/CommandInterpreter.cpp
index b4bbb3531ad..0a9b4e4a956 100644
--- a/storage/ndb/src/mgmclient/CommandInterpreter.cpp
+++ b/storage/ndb/src/mgmclient/CommandInterpreter.cpp
@@ -16,14 +16,7 @@
#include <ndb_global.h>
#include <my_sys.h>
-
-//#define HAVE_GLOBAL_REPLICATION
-
#include <Vector.hpp>
-#ifdef HAVE_GLOBAL_REPLICATION
-#include "../rep/repapi/repapi.h"
-#endif
-
#include <mgmapi.h>
class MgmtSrvr;
@@ -158,11 +151,6 @@ private:
int m_verbose;
int try_reconnect;
int m_error;
-#ifdef HAVE_GLOBAL_REPLICATION
- NdbRepHandle m_repserver;
- const char *rep_host;
- bool rep_connected;
-#endif
struct NdbThread* m_event_thread;
};
@@ -220,10 +208,6 @@ extern "C" {
#include <NdbMem.h>
#include <EventLogger.hpp>
#include <signaldata/SetLogLevelOrd.hpp>
-#include <signaldata/GrepImpl.hpp>
-#ifdef HAVE_GLOBAL_REPLICATION
-
-#endif // HAVE_GLOBAL_REPLICATION
#include "MgmtErrorReporter.hpp"
#include <Parser.hpp>
#include <SocketServer.hpp>
@@ -251,9 +235,6 @@ static const char* helpText =
"---------------------------------------------------------------------------\n"
"HELP Print help text\n"
"HELP SHOW Help for SHOW command\n"
-#ifdef HAVE_GLOBAL_REPLICATION
-"HELP REPLICATION Help for global replication\n"
-#endif // HAVE_GLOBAL_REPLICATION
#ifdef VM_TRACE // DEBUG ONLY
"HELP DEBUG Help for debug compiled version\n"
#endif
@@ -277,9 +258,6 @@ static const char* helpText =
"EXIT SINGLE USER MODE Exit single user mode\n"
"<id> STATUS Print status\n"
"<id> CLUSTERLOG {<category>=<level>}+ Set log level for cluster log\n"
-#ifdef HAVE_GLOBAL_REPLICATION
-"REP CONNECT <host:port> Connect to REP server on host:port\n"
-#endif
"PURGE STALE SESSIONS Reset reserved nodeid's in the mgmt server\n"
"CONNECT [<connectstring>] Connect to management server (reconnect if already connected)\n"
"QUIT Quit management client\n"
@@ -297,39 +275,6 @@ static const char* helpTextShow =
#endif
;
-#ifdef HAVE_GLOBAL_REPLICATION
-static const char* helpTextRep =
-"---------------------------------------------------------------------------\n"
-" NDB Cluster -- Management Client -- Help for Global Replication\n"
-"---------------------------------------------------------------------------\n"
-"Commands should be executed on the standby NDB Cluster\n"
-"These features are in an experimental release state.\n"
-"\n"
-"Simple Commands:\n"
-"REP START Start Global Replication\n"
-"REP START REQUESTOR Start Global Replication Requestor\n"
-"REP STATUS Show Global Replication status\n"
-"REP STOP Stop Global Replication\n"
-"REP STOP REQUESTOR Stop Global Replication Requestor\n"
-"\n"
-"Advanced Commands:\n"
-"REP START <protocol> Starts protocol\n"
-"REP STOP <protocol> Stops protocol\n"
-"<protocol> = TRANSFER | APPLY | DELETE\n"
-"\n"
-#ifdef VM_TRACE // DEBUG ONLY
-"Debugging commands:\n"
-"REP DELETE Removes epochs stored in primary and standy systems\n"
-"REP DROP <tableid> Drop a table in SS identified by table id\n"
-"REP SLOWSTOP Stop Replication (Tries to synchonize with primary)\n"
-"REP FASTSTOP Stop Replication (Stops in consistent state)\n"
-"<component> = SUBSCRIPTION\n"
-" METALOG | METASCAN | DATALOG | DATASCAN\n"
-" REQUESTOR | TRANSFER | APPLY | DELETE\n"
-#endif
-;
-#endif // HAVE_GLOBAL_REPLICATION
-
#ifdef VM_TRACE // DEBUG ONLY
static const char* helpTextDebug =
"---------------------------------------------------------------------------\n"
@@ -401,11 +346,6 @@ CommandInterpreter::CommandInterpreter(const char *_host,int verbose)
m_connected= false;
m_event_thread= 0;
try_reconnect = 0;
-#ifdef HAVE_GLOBAL_REPLICATION
- rep_host = NULL;
- m_repserver = NULL;
- rep_connected = false;
-#endif
}
/*
@@ -696,13 +636,6 @@ CommandInterpreter::execute_impl(const char *_line)
executePurge(allAfterFirstToken);
DBUG_RETURN(true);
}
-#ifdef HAVE_GLOBAL_REPLICATION
- else if(strcasecmp(firstToken, "REPLICATION") == 0 ||
- strcasecmp(firstToken, "REP") == 0) {
- executeRep(allAfterFirstToken);
- DBUG_RETURN(true);
- }
-#endif // HAVE_GLOBAL_REPLICATION
else if(strcasecmp(firstToken, "ENTER") == 0 &&
allAfterFirstToken != NULL &&
strncasecmp(allAfterFirstToken, "SINGLE USER MODE ",
@@ -965,11 +898,6 @@ CommandInterpreter::executeHelp(char* parameters)
ndbout << endl;
} else if (strcasecmp(parameters, "SHOW") == 0) {
ndbout << helpTextShow;
-#ifdef HAVE_GLOBAL_REPLICATION
- } else if (strcasecmp(parameters, "REPLICATION") == 0 ||
- strcasecmp(parameters, "REP") == 0) {
- ndbout << helpTextRep;
-#endif // HAVE_GLOBAL_REPLICATION
#ifdef VM_TRACE // DEBUG ONLY
} else if (strcasecmp(parameters, "DEBUG") == 0) {
ndbout << helpTextDebug;
@@ -1219,8 +1147,6 @@ CommandInterpreter::executeShow(char* parameters)
case NDB_MGM_NODE_TYPE_UNKNOWN:
ndbout << "Error: Unknown Node Type" << endl;
return;
- case NDB_MGM_NODE_TYPE_REP:
- abort();
}
}
@@ -2099,225 +2025,4 @@ CommandInterpreter::executeAbortBackup(char* parameters)
return;
}
-#ifdef HAVE_GLOBAL_REPLICATION
-/*****************************************************************************
- * Global Replication
- *
- * For information about the different commands, see
- * GrepReq::Request in file signaldata/grepImpl.cpp.
- *
- * Below are commands as of 2003-07-05 (may change!):
- * START = 0, ///< Start Global Replication (all phases)
- * START_METALOG = 1, ///< Start Global Replication (all phases)
- * START_METASCAN = 2, ///< Start Global Replication (all phases)
- * START_DATALOG = 3, ///< Start Global Replication (all phases)
- * START_DATASCAN = 4, ///< Start Global Replication (all phases)
- * START_REQUESTOR = 5, ///< Start Global Replication (all phases)
- * ABORT = 6, ///< Immediate stop (removes subscription)
- * SLOW_STOP = 7, ///< Stop after finishing applying current GCI epoch
- * FAST_STOP = 8, ///< Stop after finishing applying all PS GCI epochs
- * START_TRANSFER = 9, ///< Start SS-PS transfer
- * STOP_TRANSFER = 10, ///< Stop SS-PS transfer
- * START_APPLY = 11, ///< Start applying GCI epochs in SS
- * STOP_APPLY = 12, ///< Stop applying GCI epochs in SS
- * STATUS = 13, ///< Status
- * START_SUBSCR = 14,
- * REMOVE_BUFFERS = 15,
- * DROP_TABLE = 16
-
- *****************************************************************************/
-
-void
-CommandInterpreter::executeRep(char* parameters)
-{
- if (emptyString(parameters)) {
- ndbout << helpTextRep;
- return;
- }
-
- char * line = my_strdup(parameters,MYF(MY_WME));
- My_auto_ptr<char> ap1((char*)line);
- char * firstToken = strtok(line, " ");
-
- struct ndb_rep_reply reply;
- unsigned int repId;
-
-
- if (!strcasecmp(firstToken, "CONNECT")) {
- char * host = strtok(NULL, "\0");
- for (unsigned int i = 0; i < strlen(host); ++i) {
- host[i] = tolower(host[i]);
- }
-
- if(host == NULL)
- {
- ndbout_c("host:port must be specified.");
- return;
- }
-
- if(rep_connected) {
- if(m_repserver != NULL) {
- ndb_rep_disconnect(m_repserver);
- rep_connected = false;
- }
- }
-
- if(m_repserver == NULL)
- m_repserver = ndb_rep_create_handle();
- if(ndb_rep_connect(m_repserver, host) < 0)
- ndbout_c("Failed to connect to %s", host);
- else
- rep_connected=true;
- return;
-
- if(!rep_connected) {
- ndbout_c("Not connected to REP server");
- }
- }
-
- /********
- * START
- ********/
- if (!strcasecmp(firstToken, "START")) {
-
- unsigned int req;
- char *startType = strtok(NULL, "\0");
-
- if (startType == NULL) {
- req = GrepReq::START;
- } else if (!strcasecmp(startType, "SUBSCRIPTION")) {
- req = GrepReq::START_SUBSCR;
- } else if (!strcasecmp(startType, "METALOG")) {
- req = GrepReq::START_METALOG;
- } else if (!strcasecmp(startType, "METASCAN")) {
- req = GrepReq::START_METASCAN;
- } else if (!strcasecmp(startType, "DATALOG")) {
- req = GrepReq::START_DATALOG;
- } else if (!strcasecmp(startType, "DATASCAN")) {
- req = GrepReq::START_DATASCAN;
- } else if (!strcasecmp(startType, "REQUESTOR")) {
- req = GrepReq::START_REQUESTOR;
- } else if (!strcasecmp(startType, "TRANSFER")) {
- req = GrepReq::START_TRANSFER;
- } else if (!strcasecmp(startType, "APPLY")) {
- req = GrepReq::START_APPLY;
- } else if (!strcasecmp(startType, "DELETE")) {
- req = GrepReq::START_DELETE;
- } else {
- ndbout_c("Illegal argument to command 'REPLICATION START'");
- return;
- }
-
- int result = ndb_rep_command(m_repserver, req, &repId, &reply);
-
- if (result != 0) {
- ndbout << "Start of Global Replication failed" << endl;
- } else {
- ndbout << "Start of Global Replication ordered" << endl;
- }
- return;
- }
-
- /********
- * STOP
- ********/
- if (!strcasecmp(firstToken, "STOP")) {
- unsigned int req;
- char *startType = strtok(NULL, " ");
- unsigned int epoch = 0;
-
- if (startType == NULL) {
- /**
- * Stop immediately
- */
- req = GrepReq::STOP;
- } else if (!strcasecmp(startType, "EPOCH")) {
- char *strEpoch = strtok(NULL, "\0");
- if(strEpoch == NULL) {
- ndbout_c("Epoch expected!");
- return;
- }
- req = GrepReq::STOP;
- epoch=atoi(strEpoch);
- } else if (!strcasecmp(startType, "SUBSCRIPTION")) {
- req = GrepReq::STOP_SUBSCR;
- } else if (!strcasecmp(startType, "METALOG")) {
- req = GrepReq::STOP_METALOG;
- } else if (!strcasecmp(startType, "METASCAN")) {
- req = GrepReq::STOP_METASCAN;
- } else if (!strcasecmp(startType, "DATALOG")) {
- req = GrepReq::STOP_DATALOG;
- } else if (!strcasecmp(startType, "DATASCAN")) {
- req = GrepReq::STOP_DATASCAN;
- } else if (!strcasecmp(startType, "REQUESTOR")) {
- req = GrepReq::STOP_REQUESTOR;
- } else if (!strcasecmp(startType, "TRANSFER")) {
- req = GrepReq::STOP_TRANSFER;
- } else if (!strcasecmp(startType, "APPLY")) {
- req = GrepReq::STOP_APPLY;
- } else if (!strcasecmp(startType, "DELETE")) {
- req = GrepReq::STOP_DELETE;
- } else {
- ndbout_c("Illegal argument to command 'REPLICATION STOP'");
- return;
- }
- int result = ndb_rep_command(m_repserver, req, &repId, &reply, epoch);
-
- if (result != 0) {
- ndbout << "Stop command failed" << endl;
- } else {
- ndbout << "Stop ordered" << endl;
- }
- return;
- }
-
- /*********
- * STATUS
- *********/
- if (!strcasecmp(firstToken, "STATUS")) {
- struct rep_state repstate;
- int result =
- ndb_rep_get_status(m_repserver, &repId, &reply, &repstate);
-
- if (result != 0) {
- ndbout << "Status request of Global Replication failed" << endl;
- } else {
- ndbout << "Status request of Global Replication ordered" << endl;
- ndbout << "See printout at one of the DB nodes" << endl;
- ndbout << "(Better status report is under development.)" << endl;
- ndbout << " SubscriptionId " << repstate.subid
- << " SubscriptionKey " << repstate.subkey << endl;
- }
- return;
- }
-
- /*********
- * QUERY (see repapi.h for querable counters)
- *********/
- if (!strcasecmp(firstToken, "QUERY")) {
- char *query = strtok(NULL, "\0");
- int queryCounter=-1;
- if(query != NULL) {
- queryCounter = atoi(query);
- }
- struct rep_state repstate;
- unsigned repId = 0;
- int result = ndb_rep_query(m_repserver, (QueryCounter)queryCounter,
- &repId, &reply, &repstate);
-
- if (result != 0) {
- ndbout << "Query repserver failed" << endl;
- } else {
- ndbout << "Query repserver sucessful" << endl;
- ndbout_c("repstate : QueryCounter %d, f=%d l=%d"
- " nodegroups %d" ,
- repstate.queryCounter,
- repstate.first[0], repstate.last[0],
- repstate.no_of_nodegroups );
- }
- return;
- }
-}
-#endif // HAVE_GLOBAL_REPLICATION
-
template class Vector<char const*>;
diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp
index d946b4af4a7..82e1d24d045 100644
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp
@@ -37,7 +37,6 @@
#include <signaldata/EventReport.hpp>
#include <signaldata/DumpStateOrd.hpp>
#include <signaldata/BackupSignalData.hpp>
-#include <signaldata/GrepImpl.hpp>
#include <signaldata/ManagementServer.hpp>
#include <signaldata/NFCompleteRep.hpp>
#include <signaldata/NodeFailRep.hpp>
@@ -509,10 +508,6 @@ MgmtSrvr::MgmtSrvr(SocketServer *socket_server,
case NODE_TYPE_MGM:
nodeTypes[id] = NDB_MGM_NODE_TYPE_MGM;
break;
- case NODE_TYPE_REP:
- nodeTypes[id] = NDB_MGM_NODE_TYPE_REP;
- break;
- case NODE_TYPE_EXT_REP:
default:
break;
}
@@ -1929,7 +1924,7 @@ MgmtSrvr::handleReceivedSignal(NdbApiSignal* signal)
break;
case GSN_EVENT_REP:
- eventReport(refToNode(signal->theSendersBlockRef), signal->getDataPtr());
+ eventReport(signal->getDataPtr());
break;
case GSN_STOP_REF:{
@@ -2035,12 +2030,14 @@ MgmtSrvr::handleStatus(NodeId nodeId, bool alive, bool nfComplete)
{
DBUG_ENTER("MgmtSrvr::handleStatus");
Uint32 theData[25];
+ EventReport *rep = (EventReport *)theData;
+
theData[1] = nodeId;
if (alive) {
m_started_nodes.push_back(nodeId);
- theData[0] = NDB_LE_Connected;
+ rep->setEventType(NDB_LE_Connected);
} else {
- theData[0] = NDB_LE_Disconnected;
+ rep->setEventType(NDB_LE_Connected);
if(nfComplete)
{
handleStopReply(nodeId, 0);
@@ -2054,8 +2051,9 @@ MgmtSrvr::handleStatus(NodeId nodeId, bool alive, bool nfComplete)
NdbCondition_Signal(theMgmtWaitForResponseCondPtr);
}
}
-
- eventReport(_ownNodeId, theData);
+
+ rep->setNodeId(_ownNodeId);
+ eventReport(theData);
DBUG_VOID_RETURN;
}
@@ -2387,11 +2385,13 @@ MgmtSrvr::getNextNodeId(NodeId * nodeId, enum ndb_mgm_node_type type) const
#include "Services.hpp"
void
-MgmtSrvr::eventReport(NodeId nodeId, const Uint32 * theData)
+MgmtSrvr::eventReport(const Uint32 * theData)
{
const EventReport * const eventReport = (EventReport *)&theData[0];
Ndb_logevent_type type = eventReport->getEventType();
+ Uint32 nodeId= eventReport->getNodeId();
+
// Log event
g_eventLogger.log(type, theData, nodeId,
&m_event_listner[0].m_logLevel);
@@ -2571,18 +2571,6 @@ MgmtSrvr::abortBackup(Uint32 backupId)
}
-/*****************************************************************************
- * Global Replication
- *****************************************************************************/
-
-int
-MgmtSrvr::repCommand(Uint32* repReqId, Uint32 request, bool waitCompleted)
-{
- require(false);
- return 0;
-}
-
-
NodeId
MgmtSrvr::getPrimaryNode() const {
#if 0
diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp
index de1af1286ff..e43a0ff4149 100644
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp
@@ -370,11 +370,6 @@ public:
int abortBackup(Uint32 backupId);
int performBackup(Uint32* backupId);
- /**
- * Global Replication
- */
- int repCommand(Uint32* repReqId, Uint32 request, bool waitCompleted = false);
-
//**************************************************************************
// Description: Set event report level for a DB process
// Parameters:
@@ -695,7 +690,7 @@ private:
/**
* An event from <i>nodeId</i> has arrived
*/
- void eventReport(NodeId nodeId, const Uint32 * theData);
+ void eventReport(const Uint32 * theData);
//**************************************************************************
diff --git a/storage/ndb/src/mgmsrv/Services.cpp b/storage/ndb/src/mgmsrv/Services.cpp
index ed32ab9c963..3992796e304 100644
--- a/storage/ndb/src/mgmsrv/Services.cpp
+++ b/storage/ndb/src/mgmsrv/Services.cpp
@@ -182,12 +182,6 @@ ParserRow<MgmApiSession> commands[] = {
MGM_CMD("abort backup", &MgmApiSession::abortBackup, ""),
MGM_ARG("id", Int, Mandatory, "Backup id"),
- /**
- * Global Replication
- */
- MGM_CMD("rep", &MgmApiSession::repCommand, ""),
- MGM_ARG("request", Int, Mandatory, "Command"),
-
MGM_CMD("stop", &MgmApiSession::stop, ""),
MGM_ARG("node", String, Mandatory, "Node"),
MGM_ARG("abort", Int, Mandatory, "Node"),
@@ -711,30 +705,6 @@ MgmApiSession::abortBackup(Parser<MgmApiSession>::Context &,
m_output->println("");
}
-/*****************************************************************************
- * Global Replication
- *****************************************************************************/
-
-void
-MgmApiSession::repCommand(Parser<MgmApiSession>::Context &,
- Properties const &args) {
-
- Uint32 request = 0;
- args.get("request", &request);
-
- Uint32 repReqId;
- int result = m_mgmsrv.repCommand(&repReqId, request, true);
-
- m_output->println("global replication reply");
- if(result != 0)
- m_output->println("result: %s", get_error_text(result));
- else{
- m_output->println("result: Ok");
- m_output->println("id: %d", repReqId);
- }
- m_output->println("");
-}
-
/*****************************************************************************/
void
diff --git a/storage/ndb/src/mgmsrv/Services.hpp b/storage/ndb/src/mgmsrv/Services.hpp
index ff9008b05a8..7d5d8cb9c8c 100644
--- a/storage/ndb/src/mgmsrv/Services.hpp
+++ b/storage/ndb/src/mgmsrv/Services.hpp
@@ -101,8 +101,6 @@ public:
void transporter_connect(Parser_t::Context &ctx, Properties const &args);
void get_mgmd_nodeid(Parser_t::Context &ctx, Properties const &args);
-
- void repCommand(Parser_t::Context &ctx, const class Properties &args);
};
class MgmApiService : public SocketServer::Service {
diff --git a/storage/ndb/src/ndbapi/ClusterMgr.cpp b/storage/ndb/src/ndbapi/ClusterMgr.cpp
index 42b7c5b115d..2ff403d53b3 100644
--- a/storage/ndb/src/ndbapi/ClusterMgr.cpp
+++ b/storage/ndb/src/ndbapi/ClusterMgr.cpp
@@ -109,18 +109,6 @@ ClusterMgr::init(ndb_mgm_configuration_iterator & iter){
case NODE_TYPE_MGM:
theNodes[tmp].m_info.m_type = NodeInfo::MGM;
break;
- case NODE_TYPE_REP:
- theNodes[tmp].m_info.m_type = NodeInfo::REP;
- break;
- case NODE_TYPE_EXT_REP:
- theNodes[tmp].m_info.m_type = NodeInfo::REP;
- {
- Uint32 hbFreq = 10000;
- //ndb_mgm_get_int_parameter(iter, CFG_, &hbFreq);
- theNodes[tmp].hbFrequency = hbFreq;
- assert(100 <= hbFreq && hbFreq < 60 * 60 * 1000);
- }
- break;
default:
type = type;
#if 0
@@ -218,13 +206,6 @@ ClusterMgr::threadMain( ){
theNode.hbCounter = 0;
}
- /**
- * If the node is of type REP,
- * then the receiver of the signal should be API_CLUSTERMGR
- */
- if (theNode.m_info.m_type == NodeInfo::REP) {
- signal.theReceiversBlockNumber = API_CLUSTERMGR;
- }
#if 0
ndbout_c("ClusterMgr: Sending API_REGREQ to node %d", (int)nodeId);
#endif
@@ -347,9 +328,7 @@ ClusterMgr::execAPI_REGCONF(const Uint32 * theData){
}//if
node.m_info.m_heartbeat_cnt = 0;
node.hbCounter = 0;
- if (node.m_info.m_type != NodeInfo::REP) {
- node.hbFrequency = (apiRegConf->apiHeartbeatFrequency * 10) - 50;
- }
+ node.hbFrequency = (apiRegConf->apiHeartbeatFrequency * 10) - 50;
}
void
@@ -422,10 +401,7 @@ ClusterMgr::reportConnected(NodeId nodeId){
* if first API_REGCONF has not arrived
*/
theNode.m_state.m_connected_nodes.set(nodeId);
-
- if (theNode.m_info.m_type != NodeInfo::REP) {
- theNode.hbFrequency = 0;
- }
+ theNode.hbFrequency = 0;
theNode.m_info.m_version = 0;
theNode.compatible = true;
theNode.nfCompleteRep = true;
diff --git a/storage/ndb/src/ndbapi/ClusterMgr.hpp b/storage/ndb/src/ndbapi/ClusterMgr.hpp
index da8f16d6789..eacdac9512e 100644
--- a/storage/ndb/src/ndbapi/ClusterMgr.hpp
+++ b/storage/ndb/src/ndbapi/ClusterMgr.hpp
@@ -77,6 +77,7 @@ public:
const Node & getNodeInfo(NodeId) const;
Uint32 getNoOfConnectedNodes() const;
+ bool isClusterAlive() const;
void hb_received(NodeId);
private:
@@ -129,6 +130,11 @@ ClusterMgr::getNoOfConnectedNodes() const {
}
inline
+bool
+ClusterMgr::isClusterAlive() const {
+ return noOfAliveNodes != 0;
+}
+inline
void
ClusterMgr::hb_received(NodeId nodeId) {
theNodes[nodeId].m_info.m_heartbeat_cnt= 0;
diff --git a/storage/ndb/src/ndbapi/Ndb.cpp b/storage/ndb/src/ndbapi/Ndb.cpp
index c5d32f59c3c..02b66af9a6e 100644
--- a/storage/ndb/src/ndbapi/Ndb.cpp
+++ b/storage/ndb/src/ndbapi/Ndb.cpp
@@ -29,6 +29,7 @@ Name: Ndb.cpp
#include <NdbOperation.hpp>
#include <NdbTransaction.hpp>
#include <NdbEventOperation.hpp>
+#include <NdbEventOperationImpl.hpp>
#include <NdbRecAttr.hpp>
#include <md5_hash.hpp>
#include <NdbSleep.h>
@@ -216,10 +217,9 @@ Remark: Disconnect all connections to the database.
void
Ndb::doDisconnect()
{
+ DBUG_ENTER("Ndb::doDisconnect");
NdbTransaction* tNdbCon;
CHECK_STATUS_MACRO_VOID;
- /* DBUG_ENTER must be after CHECK_STATUS_MACRO_VOID because of 'return' */
- DBUG_ENTER("Ndb::doDisconnect");
Uint32 tNoOfDbNodes = theImpl->theNoOfDBnodes;
Uint8 *theDBnodes= theImpl->theDBnodes;
@@ -574,6 +574,7 @@ Ndb::NdbTamper(TamperType aAction, int aNode)
#ifdef CUSTOMER_RELEASE
return -1;
#else
+ DBUG_ENTER("Ndb::NdbTamper");
CHECK_STATUS_MACRO;
checkFailedNode();
@@ -595,13 +596,13 @@ Ndb::NdbTamper(TamperType aAction, int aNode)
break;
default:
theError.code = 4102;
- return -1;
+ DBUG_RETURN(-1);
}
tNdbConn = getNdbCon(); // Get free connection object
if (tNdbConn == NULL) {
theError.code = 4000;
- return -1;
+ DBUG_RETURN(-1);
}
tSignal.setSignal(GSN_DIHNDBTAMPER);
tSignal.setData (tAction, 1);
@@ -620,12 +621,12 @@ Ndb::NdbTamper(TamperType aAction, int aNode)
if (tNode == 0) {
theError.code = 4002;
releaseNdbCon(tNdbConn);
- return -1;
+ DBUG_RETURN(-1);
}//if
ret_code = tp->sendSignal(&tSignal,aNode);
tp->unlock_mutex();
releaseNdbCon(tNdbConn);
- return ret_code;
+ DBUG_RETURN(ret_code);
} else {
do {
tp->lock_mutex();
@@ -636,7 +637,7 @@ Ndb::NdbTamper(TamperType aAction, int aNode)
if (tNode == 0) {
theError.code = 4009;
releaseNdbCon(tNdbConn);
- return -1;
+ DBUG_RETURN(-1);
}//if
ret_code = sendRecSignal(tNode, WAIT_NDB_TAMPER, &tSignal, 0);
if (ret_code == 0) {
@@ -644,15 +645,15 @@ Ndb::NdbTamper(TamperType aAction, int aNode)
theRestartGCI = 0;
}//if
releaseNdbCon(tNdbConn);
- return theRestartGCI;
+ DBUG_RETURN(theRestartGCI);
} else if ((ret_code == -5) || (ret_code == -2)) {
TRACE_DEBUG("Continue DIHNDBTAMPER when node failed/stopping");
} else {
- return -1;
+ DBUG_RETURN(-1);
}//if
} while (1);
}
- return 0;
+ DBUG_RETURN(0);
#endif
}
#if 0
@@ -1229,49 +1230,82 @@ Ndb::getSchemaFromInternalName(const char * internalName)
return ret;
}
-NdbEventOperation* Ndb::createEventOperation(const char* eventName,
- const int bufferLength)
+// ToDo set event buffer size
+NdbEventOperation* Ndb::createEventOperation(const char* eventName)
{
- NdbEventOperation* tOp;
-
- tOp = new NdbEventOperation(this, eventName, bufferLength);
-
- if (tOp == 0)
+ DBUG_ENTER("Ndb::createEventOperation");
+ NdbEventOperation* tOp= theEventBuffer->createEventOperation(eventName,
+ theError);
+ if (tOp)
{
- theError.code= 4000;
- return NULL;
+ // keep track of all event operations
+ NdbEventOperationImpl *op=
+ NdbEventBuffer::getEventOperationImpl(tOp);
+ op->m_next= theImpl->m_ev_op;
+ op->m_prev= 0;
+ theImpl->m_ev_op= op;
+ if (op->m_next)
+ op->m_next->m_prev= op;
}
- if (tOp->getState() != NdbEventOperation::EO_CREATED) {
- theError.code= tOp->getNdbError().code;
- delete tOp;
- tOp = NULL;
- }
+ DBUG_RETURN(tOp);
+}
+
+int Ndb::dropEventOperation(NdbEventOperation* tOp)
+{
+ DBUG_ENTER("Ndb::dropEventOperation");
+ // remove it from list
+ NdbEventOperationImpl *op=
+ NdbEventBuffer::getEventOperationImpl(tOp);
+ if (op->m_next)
+ op->m_next->m_prev= op->m_prev;
+ if (op->m_prev)
+ op->m_prev->m_next= op->m_next;
+ else
+ theImpl->m_ev_op= op->m_next;
- //now we have to look up this event in dict
+ assert(theImpl->m_ev_op == 0 || theImpl->m_ev_op->m_prev == 0);
- return tOp;
+ theEventBuffer->dropEventOperation(tOp);
+ DBUG_RETURN(0);
}
-int Ndb::dropEventOperation(NdbEventOperation* op) {
- delete op;
+NdbEventOperation *Ndb::getEventOperation(NdbEventOperation* tOp)
+{
+ NdbEventOperationImpl *op;
+ if (tOp)
+ op= NdbEventBuffer::getEventOperationImpl(tOp)->m_next;
+ else
+ op= theImpl->m_ev_op;
+ if (op)
+ return op->m_facade;
return 0;
}
-NdbGlobalEventBufferHandle* Ndb::getGlobalEventBufferHandle()
+int
+Ndb::pollEvents(int aMillisecondNumber, Uint64 *latestGCI)
{
- return theGlobalEventBufferHandle;
+ return theEventBuffer->pollEvents(aMillisecondNumber, latestGCI);
}
-//void Ndb::monitorEvent(NdbEventOperation *op, NdbEventCallback cb, void* rs)
-//{
-//}
+NdbEventOperation *Ndb::nextEvent()
+{
+ return theEventBuffer->nextEvent();
+}
-int
-Ndb::pollEvents(int aMillisecondNumber)
+Uint64 Ndb::getLatestGCI()
+{
+ return theEventBuffer->getLatestGCI();
+}
+
+void Ndb::setReportThreshEventGCISlip(unsigned thresh)
+{
+ theEventBuffer->m_gci_slip_thresh= thresh;
+}
+
+void Ndb::setReportThreshEventFreeMem(unsigned thresh)
{
- return NdbEventOperation::wait(theGlobalEventBufferHandle,
- aMillisecondNumber);
+ theEventBuffer->m_free_thresh= thresh;
}
#ifdef VM_TRACE
diff --git a/storage/ndb/src/ndbapi/NdbDictionary.cpp b/storage/ndb/src/ndbapi/NdbDictionary.cpp
index 2a6cf07b2ca..520a5711100 100644
--- a/storage/ndb/src/ndbapi/NdbDictionary.cpp
+++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp
@@ -284,6 +284,11 @@ NdbDictionary::Table::getName() const {
return m_impl.getName();
}
+const char *
+NdbDictionary::Table::getMysqlName() const {
+ return m_impl.getMysqlName();
+}
+
int
NdbDictionary::Table::getTableId() const {
return m_impl.m_tableId;
@@ -840,6 +845,12 @@ NdbDictionary::Dictionary::invalidateIndex(const char * indexName,
DBUG_VOID_RETURN;
}
+int
+NdbDictionary::Dictionary::forceGCPWait()
+{
+ return m_impl.forceGCPWait();
+}
+
void
NdbDictionary::Dictionary::removeCachedIndex(const char * indexName,
const char * tableName){
diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
index f7b23200223..3236916b035 100644
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
@@ -29,6 +29,7 @@
#include <signaldata/AlterTable.hpp>
#include <signaldata/DropIndx.hpp>
#include <signaldata/ListTables.hpp>
+#include <signaldata/WaitGCP.hpp>
#include <SimpleProperties.hpp>
#include <Bitmask.hpp>
#include <AttributeList.hpp>
@@ -42,6 +43,8 @@
#define DEBUG_PRINT 0
#define INCOMPATIBLE_VERSION -2
+extern Uint64 g_latest_trans_gci;
+
//#define EVENT_DEBUG
/**
@@ -393,6 +396,7 @@ NdbTableImpl::assign(const NdbTableImpl& org)
{
m_tableId = org.m_tableId;
m_internalName.assign(org.m_internalName);
+ updateMysqlName();
m_externalName.assign(org.m_externalName);
m_newExternalName.assign(org.m_newExternalName);
m_frm.assign(org.m_frm.get_data(), org.m_frm.length());
@@ -439,6 +443,17 @@ NdbTableImpl::getName() const
return m_newExternalName.c_str();
}
+void
+NdbTableImpl::updateMysqlName()
+{
+ Vector<BaseString> v;
+ if (m_internalName.split(v,"/") == 3)
+ {
+ m_mysqlName.assfmt("%s/%s",v[0].c_str(),v[2].c_str());
+ return;
+ }
+ m_mysqlName.assign("");
+}
void
NdbTableImpl::buildColumnHash(){
@@ -629,8 +644,6 @@ void NdbEventImpl::init()
mi_type= 0;
m_dur= NdbDictionary::Event::ED_UNDEFINED;
m_tableImpl= NULL;
- m_bufferId= RNIL;
- eventOp= NULL;
}
NdbEventImpl::~NdbEventImpl()
@@ -641,12 +654,12 @@ NdbEventImpl::~NdbEventImpl()
void NdbEventImpl::setName(const char * name)
{
- m_externalName.assign(name);
+ m_name.assign(name);
}
const char *NdbEventImpl::getName() const
{
- return m_externalName.c_str();
+ return m_name.c_str();
}
void
@@ -671,12 +684,7 @@ NdbEventImpl::getTableName() const
void
NdbEventImpl::addTableEvent(const NdbDictionary::Event::TableEvent t = NdbDictionary::Event::TE_ALL)
{
- switch (t) {
- case NdbDictionary::Event::TE_INSERT : mi_type |= 1; break;
- case NdbDictionary::Event::TE_DELETE : mi_type |= 2; break;
- case NdbDictionary::Event::TE_UPDATE : mi_type |= 4; break;
- default: mi_type = 4 | 2 | 1; // all types
- }
+ mi_type |= (unsigned)t;
}
void
@@ -939,12 +947,6 @@ NdbDictInterface::execSignal(void* dictImpl,
case GSN_SUB_START_REF:
tmp->execSUB_START_REF(signal, ptr);
break;
- case GSN_SUB_TABLE_DATA:
- tmp->execSUB_TABLE_DATA(signal, ptr);
- break;
- case GSN_SUB_GCP_COMPLETE_REP:
- tmp->execSUB_GCP_COMPLETE_REP(signal, ptr);
- break;
case GSN_SUB_STOP_CONF:
tmp->execSUB_STOP_CONF(signal, ptr);
break;
@@ -960,6 +962,12 @@ NdbDictInterface::execSignal(void* dictImpl,
case GSN_LIST_TABLES_CONF:
tmp->execLIST_TABLES_CONF(signal, ptr);
break;
+ case GSN_WAIT_GCP_CONF:
+ tmp->execWAIT_GCP_CONF(signal, ptr);
+ break;
+ case GSN_WAIT_GCP_REF:
+ tmp->execWAIT_GCP_REF(signal, ptr);
+ break;
default:
abort();
}
@@ -1062,7 +1070,13 @@ NdbDictInterface::dictSignal(NdbApiSignal* signal,
DBUG_RETURN(-1);
}
- if ( (temporaryMask & m_error.code) != 0 ) {
+ if ( temporaryMask == -1)
+ {
+ const NdbError &error= getNdbError();
+ if (error.status == NdbError::TemporaryError)
+ continue;
+ }
+ else if ( (temporaryMask & m_error.code) != 0 ) {
continue;
}
if (errcodes) {
@@ -1320,6 +1334,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
impl->m_version = tableDesc.TableVersion;
impl->m_status = NdbDictionary::Object::Retrieved;
impl->m_internalName.assign(internalName);
+ impl->updateMysqlName();
impl->m_externalName.assign(externalName);
impl->m_frm.assign(tableDesc.FrmData, tableDesc.FrmLen);
@@ -1629,6 +1644,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
const BaseString internalName(
ndb.internalize_table_name(impl.m_externalName.c_str()));
impl.m_internalName.assign(internalName);
+ impl.updateMysqlName();
UtilBufferWriter w(m_buffer);
DictTabInfo::Table tmpTab;
tmpTab.init();
@@ -2444,26 +2460,26 @@ NdbDictInterface::execDROP_INDX_REF(NdbApiSignal * signal,
int
NdbDictionaryImpl::createEvent(NdbEventImpl & evnt)
{
+ DBUG_ENTER("NdbDictionaryImpl::createEvent");
int i;
- NdbTableImpl* tab = getTable(evnt.getTableName());
-
- if(tab == 0){
-#ifdef EVENT_DEBUG
- ndbout_c("NdbDictionaryImpl::createEvent: table not found: %s",
- evnt.getTableName());
-#endif
- return -1;
+ NdbTableImpl* tab= evnt.m_tableImpl;
+ if (tab == 0)
+ {
+ tab= getTable(evnt.getTableName());
+ if(tab == 0){
+ DBUG_PRINT("info",("NdbDictionaryImpl::createEvent: table not found: %s",
+ evnt.getTableName()));
+ DBUG_RETURN(-1);
+ }
}
+ DBUG_PRINT("info",("Table: id: %d version: %d", tab->m_tableId, tab->m_version));
+
evnt.m_tableId = tab->m_tableId;
+ evnt.m_tableVersion = tab->m_version;
evnt.m_tableImpl = tab;
-#ifdef EVENT_DEBUG
- ndbout_c("Event on tableId=%d", evnt.m_tableId);
-#endif
-
NdbTableImpl &table = *evnt.m_tableImpl;
-
int attributeList_sz = evnt.m_attrIds.size();
for (i = 0; i < attributeList_sz; i++) {
@@ -2474,17 +2490,19 @@ NdbDictionaryImpl::createEvent(NdbEventImpl & evnt)
ndbout_c("Attr id %u in table %s not found", evnt.m_attrIds[i],
evnt.getTableName());
m_error.code= 4713;
- return -1;
+ DBUG_RETURN(-1);
}
}
evnt.m_attrIds.clear();
attributeList_sz = evnt.m_columns.size();
-#ifdef EVENT_DEBUG
- ndbout_c("creating event %s", evnt.m_externalName.c_str());
- ndbout_c("no of columns %d", evnt.m_columns.size());
-#endif
+
+ DBUG_PRINT("info",("Event on tableId=%d, tableVersion=%d, event name %s, no of columns %d",
+ evnt.m_tableId, evnt.m_tableVersion,
+ evnt.m_name.c_str(),
+ evnt.m_columns.size()));
+
int pk_count = 0;
evnt.m_attrListBitmask.clear();
@@ -2493,7 +2511,7 @@ NdbDictionaryImpl::createEvent(NdbEventImpl & evnt)
table.getColumn(evnt.m_columns[i]->m_name.c_str());
if(col == 0){
m_error.code= 4247;
- return -1;
+ DBUG_RETURN(-1);
}
// Copy column definition
*evnt.m_columns[i] = *col;
@@ -2519,7 +2537,7 @@ NdbDictionaryImpl::createEvent(NdbEventImpl & evnt)
for(i = 1; i<attributeList_sz; i++) {
if (evnt.m_columns[i-1]->m_attrId == evnt.m_columns[i]->m_attrId) {
m_error.code= 4258;
- return -1;
+ DBUG_RETURN(-1);
}
}
@@ -2530,7 +2548,7 @@ NdbDictionaryImpl::createEvent(NdbEventImpl & evnt)
#endif
// NdbDictInterface m_receiver;
- return m_receiver.createEvent(m_ndb, evnt, 0 /* getFlag unset */);
+ DBUG_RETURN(m_receiver.createEvent(m_ndb, evnt, 0 /* getFlag unset */));
}
int
@@ -2538,6 +2556,9 @@ NdbDictInterface::createEvent(class Ndb & ndb,
NdbEventImpl & evnt,
int getFlag)
{
+ DBUG_ENTER("NdbDictInterface::createEvent");
+ DBUG_PRINT("enter",("getFlag=%d",getFlag));
+
NdbApiSignal tSignal(m_reference);
tSignal.theReceiversBlockNumber = DBDICT;
tSignal.theVerId_signalNumber = GSN_CREATE_EVNT_REQ;
@@ -2555,22 +2576,25 @@ NdbDictInterface::createEvent(class Ndb & ndb,
// getting event from Dictionary
req->setRequestType(CreateEvntReq::RT_USER_GET);
} else {
+ DBUG_PRINT("info",("tableId: %u tableVersion: %u",
+ evnt.m_tableId, evnt.m_tableVersion));
// creating event in Dictionary
req->setRequestType(CreateEvntReq::RT_USER_CREATE);
req->setTableId(evnt.m_tableId);
+ req->setTableVersion(evnt.m_tableVersion);
req->setAttrListBitmask(evnt.m_attrListBitmask);
req->setEventType(evnt.mi_type);
}
UtilBufferWriter w(m_buffer);
- const size_t len = strlen(evnt.m_externalName.c_str()) + 1;
+ const size_t len = strlen(evnt.m_name.c_str()) + 1;
if(len > MAX_TAB_NAME_SIZE) {
m_error.code= 4241;
- return -1;
+ DBUG_RETURN(-1);
}
- w.add(SimpleProperties::StringValue, evnt.m_externalName.c_str());
+ w.add(SimpleProperties::StringValue, evnt.m_name.c_str());
if (getFlag == 0)
{
@@ -2587,7 +2611,7 @@ NdbDictInterface::createEvent(class Ndb & ndb,
int ret = createEvent(&tSignal, ptr, 1);
if (ret) {
- return ret;
+ DBUG_RETURN(ret);
}
char *dataPtr = (char *)m_buffer.get_data();
@@ -2600,89 +2624,86 @@ NdbDictInterface::createEvent(class Ndb & ndb,
if (getFlag) {
evnt.m_tableId = evntConf->getTableId();
+ evnt.m_tableVersion = evntConf->getTableVersion();
evnt.m_attrListBitmask = evntConf->getAttrListBitmask();
evnt.mi_type = evntConf->getEventType();
evnt.setTable(dataPtr);
} else {
if (evnt.m_tableId != evntConf->getTableId() ||
+ evnt.m_tableVersion != evntConf->getTableVersion() ||
//evnt.m_attrListBitmask != evntConf->getAttrListBitmask() ||
evnt.mi_type != evntConf->getEventType()) {
ndbout_c("ERROR*************");
- return 1;
+ DBUG_RETURN(1);
}
}
evnt.m_eventId = evntConf->getEventId();
evnt.m_eventKey = evntConf->getEventKey();
- return ret;
+ DBUG_RETURN(0);
}
int
NdbDictInterface::createEvent(NdbApiSignal* signal,
LinearSectionPtr ptr[3], int noLSP)
{
- const int noErrCodes = 1;
- int errCodes[noErrCodes] = {CreateEvntRef::Busy};
return dictSignal(signal,ptr,noLSP,
1 /*use masternode id*/,
100,
WAIT_CREATE_INDX_REQ /*WAIT_CREATE_EVNT_REQ*/,
-1,
- errCodes,noErrCodes, CreateEvntRef::Temporary);
+ NULL,0, -1);
}
int
-NdbDictionaryImpl::executeSubscribeEvent(NdbEventImpl & ev)
+NdbDictionaryImpl::executeSubscribeEvent(NdbEventOperationImpl & ev_op)
{
// NdbDictInterface m_receiver;
- return m_receiver.executeSubscribeEvent(m_ndb, ev);
+ return m_receiver.executeSubscribeEvent(m_ndb, ev_op);
}
int
NdbDictInterface::executeSubscribeEvent(class Ndb & ndb,
- NdbEventImpl & evnt)
+ NdbEventOperationImpl & ev_op)
{
DBUG_ENTER("NdbDictInterface::executeSubscribeEvent");
NdbApiSignal tSignal(m_reference);
- // tSignal.theReceiversBlockNumber = SUMA;
tSignal.theReceiversBlockNumber = DBDICT;
tSignal.theVerId_signalNumber = GSN_SUB_START_REQ;
tSignal.theLength = SubStartReq::SignalLength2;
- SubStartReq * sumaStart = CAST_PTR(SubStartReq, tSignal.getDataPtrSend());
+ SubStartReq * req = CAST_PTR(SubStartReq, tSignal.getDataPtrSend());
- sumaStart->subscriptionId = evnt.m_eventId;
- sumaStart->subscriptionKey = evnt.m_eventKey;
- sumaStart->part = SubscriptionData::TableData;
- sumaStart->subscriberData = evnt.m_bufferId & 0xFF;
- sumaStart->subscriberRef = m_reference;
+ req->subscriptionId = ev_op.m_eventImpl->m_eventId;
+ req->subscriptionKey = ev_op.m_eventImpl->m_eventKey;
+ req->part = SubscriptionData::TableData;
+ req->subscriberData = ev_op.m_oid;
+ req->subscriberRef = m_reference;
- DBUG_RETURN(executeSubscribeEvent(&tSignal, NULL));
-}
+ DBUG_PRINT("info",("GSN_SUB_START_REQ subscriptionId=%d,subscriptionKey=%d,"
+ "subscriberData=%d",req->subscriptionId,
+ req->subscriptionKey,req->subscriberData));
-int
-NdbDictInterface::executeSubscribeEvent(NdbApiSignal* signal,
- LinearSectionPtr ptr[3])
-{
- return dictSignal(signal,NULL,0,
- 1 /*use masternode id*/,
- 100,
- WAIT_CREATE_INDX_REQ /*WAIT_CREATE_EVNT_REQ*/,
- -1,
- NULL,0);
+ int errCodes[] = { SubStartRef::Busy };
+ DBUG_RETURN(dictSignal(&tSignal,NULL,0,
+ 1 /*use masternode id*/,
+ 100,
+ WAIT_CREATE_INDX_REQ /*WAIT_CREATE_EVNT_REQ*/,
+ -1,
+ errCodes, sizeof(errCodes)/sizeof(errCodes[0])));
}
int
-NdbDictionaryImpl::stopSubscribeEvent(NdbEventImpl & ev)
+NdbDictionaryImpl::stopSubscribeEvent(NdbEventOperationImpl & ev_op)
{
// NdbDictInterface m_receiver;
- return m_receiver.stopSubscribeEvent(m_ndb, ev);
+ return m_receiver.stopSubscribeEvent(m_ndb, ev_op);
}
int
NdbDictInterface::stopSubscribeEvent(class Ndb & ndb,
- NdbEventImpl & evnt)
+ NdbEventOperationImpl & ev_op)
{
DBUG_ENTER("NdbDictInterface::stopSubscribeEvent");
@@ -2692,36 +2713,36 @@ NdbDictInterface::stopSubscribeEvent(class Ndb & ndb,
tSignal.theVerId_signalNumber = GSN_SUB_STOP_REQ;
tSignal.theLength = SubStopReq::SignalLength;
- SubStopReq * sumaStop = CAST_PTR(SubStopReq, tSignal.getDataPtrSend());
+ SubStopReq * req = CAST_PTR(SubStopReq, tSignal.getDataPtrSend());
- sumaStop->subscriptionId = evnt.m_eventId;
- sumaStop->subscriptionKey = evnt.m_eventKey;
- sumaStop->subscriberData = evnt.m_bufferId & 0xFF;
- sumaStop->part = (Uint32) SubscriptionData::TableData;
- sumaStop->subscriberRef = m_reference;
+ req->subscriptionId = ev_op.m_eventImpl->m_eventId;
+ req->subscriptionKey = ev_op.m_eventImpl->m_eventKey;
+ req->subscriberData = ev_op.m_oid;
+ req->part = (Uint32) SubscriptionData::TableData;
+ req->subscriberRef = m_reference;
- DBUG_RETURN(stopSubscribeEvent(&tSignal, NULL));
-}
+ DBUG_PRINT("info",("GSN_SUB_STOP_REQ subscriptionId=%d,subscriptionKey=%d,"
+ "subscriberData=%d",req->subscriptionId,
+ req->subscriptionKey,req->subscriberData));
-int
-NdbDictInterface::stopSubscribeEvent(NdbApiSignal* signal,
- LinearSectionPtr ptr[3])
-{
- return dictSignal(signal,NULL,0,
- 1 /*use masternode id*/,
- 100,
- WAIT_CREATE_INDX_REQ /*WAIT_SUB_STOP__REQ*/,
- -1,
- NULL,0);
+ int errCodes[] = { SubStopRef::Busy };
+ DBUG_RETURN(dictSignal(&tSignal,NULL,0,
+ 1 /*use masternode id*/,
+ 100,
+ WAIT_CREATE_INDX_REQ /*WAIT_SUB_STOP__REQ*/,
+ -1,
+ errCodes, sizeof(errCodes)/sizeof(errCodes[0])));
}
NdbEventImpl *
NdbDictionaryImpl::getEvent(const char * eventName)
{
- NdbEventImpl *ev = new NdbEventImpl();
+ DBUG_ENTER("NdbDictionaryImpl::getEvent");
+ DBUG_PRINT("enter",("eventName= %s", eventName));
+ NdbEventImpl *ev = new NdbEventImpl();
if (ev == NULL) {
- return NULL;
+ DBUG_RETURN(NULL);
}
ev->setName(eventName);
@@ -2730,48 +2751,83 @@ NdbDictionaryImpl::getEvent(const char * eventName)
if (ret) {
delete ev;
- return NULL;
+ DBUG_RETURN(NULL);
}
// We only have the table name with internal name
+ DBUG_PRINT("info",("table %s", ev->getTableName()));
+ Ndb_local_table_info *info;
+ int retry= 0;
+ while (1)
+ {
+ info= get_local_table_info(ev->getTableName(), true);
+ if (info == 0)
+ {
+ DBUG_PRINT("error",("unable to find table %s", ev->getTableName()));
+ delete ev;
+ DBUG_RETURN(NULL);
+ }
+
+ if (ev->m_tableId == info->m_table_impl->m_tableId &&
+ ev->m_tableVersion == info->m_table_impl->m_version)
+ break;
+ if (retry)
+ {
+ m_error.code= 241;
+ DBUG_PRINT("error",("%s: table version mismatch, event: [%u,%u] table: [%u,%u]",
+ ev->getTableName(), ev->m_tableId, ev->m_tableVersion,
+ info->m_table_impl->m_tableId, info->m_table_impl->m_version));
+ delete ev;
+ DBUG_RETURN(NULL);
+ }
+ invalidateObject(*info->m_table_impl);
+ retry++;
+ }
+
+ ev->m_tableImpl= info->m_table_impl;
ev->setTable(m_ndb.externalizeTableName(ev->getTableName()));
- ev->m_tableImpl = getTable(ev->getTableName());
// get the columns from the attrListBitmask
NdbTableImpl &table = *ev->m_tableImpl;
AttributeMask & mask = ev->m_attrListBitmask;
- int attributeList_sz = mask.count();
- int id = -1;
+ unsigned attributeList_sz = mask.count();
-#ifdef EVENT_DEBUG
- ndbout_c("NdbDictionaryImpl::getEvent attributeList_sz = %d",
- attributeList_sz);
+ DBUG_PRINT("info",("Table: id: %d version: %d", table.m_tableId, table.m_version));
+
+#ifndef DBUG_OFF
char buf[128] = {0};
mask.getText(buf);
- ndbout_c("mask = %s", buf);
+ DBUG_PRINT("info",("attributeList_sz= %d, mask= %s", attributeList_sz, buf));
#endif
- for(int i = 0; i < attributeList_sz; i++) {
- id++; while (!mask.get(id)) id++;
+
+ if ( attributeList_sz > table.getNoOfColumns() )
+ {
+ DBUG_PRINT("error",("Invalid version, too many columns"));
+ delete ev;
+ DBUG_RETURN(NULL);
+ }
- const NdbColumnImpl* col = table.getColumn(id);
- if(col == 0) {
-#ifdef EVENT_DEBUG
- ndbout_c("NdbDictionaryImpl::getEvent could not find column id %d", id);
-#endif
- m_error.code= 4247;
+ assert( (int)attributeList_sz <= table.getNoOfColumns() );
+ for(unsigned id= 0; ev->m_columns.size() < attributeList_sz; id++) {
+ if ( id >= table.getNoOfColumns())
+ {
+ DBUG_PRINT("error",("Invalid version, column %d out of range", id));
delete ev;
- return NULL;
+ DBUG_RETURN(NULL);
}
+ if (!mask.get(id))
+ continue;
+
+ const NdbColumnImpl* col = table.getColumn(id);
+ DBUG_PRINT("info",("column %d %s", id, col->getName()));
NdbColumnImpl* new_col = new NdbColumnImpl;
// Copy column definition
*new_col = *col;
-
ev->m_columns.push_back(new_col);
}
-
- return ev;
+ DBUG_RETURN(ev);
}
void
@@ -2795,7 +2851,8 @@ NdbDictInterface::execCREATE_EVNT_CONF(NdbApiSignal * signal,
Uint32 subscriptionId = createEvntConf->getEventId();
Uint32 subscriptionKey = createEvntConf->getEventKey();
- DBUG_PRINT("info",("subscriptionId=%d,subscriptionKey=%d",
+ DBUG_PRINT("info",("nodeid=%d,subscriptionId=%d,subscriptionKey=%d",
+ refToNode(signal->theSendersBlockRef),
subscriptionId,subscriptionKey));
m_waiter.signal(NO_WAIT);
DBUG_VOID_RETURN;
@@ -2900,74 +2957,6 @@ NdbDictInterface::execSUB_START_REF(NdbApiSignal * signal,
m_waiter.signal(NO_WAIT);
DBUG_VOID_RETURN;
}
-void
-NdbDictInterface::execSUB_GCP_COMPLETE_REP(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
-{
- const SubGcpCompleteRep * const rep=
- CAST_CONSTPTR(SubGcpCompleteRep, signal->getDataPtr());
-
- const Uint32 gci = rep->gci;
- // const Uint32 senderRef = rep->senderRef;
- const Uint32 subscriberData = rep->subscriberData;
-
- const Uint32 bufferId = subscriberData;
-
- const Uint32 ref = signal->theSendersBlockRef;
-
- NdbApiSignal tSignal(m_reference);
- SubGcpCompleteAcc * acc=
- CAST_PTR(SubGcpCompleteAcc, tSignal.getDataPtrSend());
-
- acc->rep = *rep;
-
- tSignal.theReceiversBlockNumber = refToBlock(ref);
- tSignal.theVerId_signalNumber = GSN_SUB_GCP_COMPLETE_ACC;
- tSignal.theLength = SubGcpCompleteAcc::SignalLength;
-
- Uint32 aNodeId = refToNode(ref);
-
- // m_transporter->lock_mutex();
- int r;
- r = m_transporter->sendSignal(&tSignal, aNodeId);
- // m_transporter->unlock_mutex();
-
- NdbGlobalEventBufferHandle::latestGCI(bufferId, gci);
-}
-
-void
-NdbDictInterface::execSUB_TABLE_DATA(NdbApiSignal * signal,
- LinearSectionPtr ptr[3])
-{
-#ifdef EVENT_DEBUG
- const char * FNAME = "NdbDictInterface::execSUB_TABLE_DATA";
-#endif
- //TODO
- const SubTableData * const sdata = CAST_CONSTPTR(SubTableData, signal->getDataPtr());
-
- // const Uint32 gci = sdata->gci;
- // const Uint32 operation = sdata->operation;
- // const Uint32 tableId = sdata->tableId;
- // const Uint32 noOfAttrs = sdata->noOfAttributes;
- // const Uint32 dataLen = sdata->dataSize;
- const Uint32 subscriberData = sdata->subscriberData;
- // const Uint32 logType = sdata->logType;
-
- for (int i=signal->m_noOfSections;i < 3; i++) {
- ptr[i].p = NULL;
- ptr[i].sz = 0;
- }
-#ifdef EVENT_DEBUG
- ndbout_c("%s: senderData %d, gci %d, operation %d, tableId %d, noOfAttrs %d, dataLen %d",
- FNAME, subscriberData, gci, operation, tableId, noOfAttrs, dataLen);
- ndbout_c("ptr[0] %u %u ptr[1] %u %u ptr[2] %u %u\n",
- ptr[0].p,ptr[0].sz,ptr[1].p,ptr[1].sz,ptr[2].p,ptr[2].sz);
-#endif
- const Uint32 bufferId = subscriberData;
-
- NdbGlobalEventBufferHandle::insertDataL(bufferId,
- sdata, ptr);
-}
/*****************************************************************
* Drop event
@@ -2999,7 +2988,7 @@ NdbDictInterface::dropEvent(const NdbEventImpl &evnt)
UtilBufferWriter w(m_buffer);
- w.add(SimpleProperties::StringValue, evnt.m_externalName.c_str());
+ w.add(SimpleProperties::StringValue, evnt.m_name.c_str());
LinearSectionPtr ptr[1];
ptr[0].p = (Uint32*)m_buffer.get_data();
@@ -3013,14 +3002,12 @@ NdbDictInterface::dropEvent(NdbApiSignal* signal,
LinearSectionPtr ptr[3], int noLSP)
{
//TODO
- const int noErrCodes = 1;
- int errCodes[noErrCodes] = {DropEvntRef::Busy};
return dictSignal(signal,ptr,noLSP,
1 /*use masternode id*/,
100,
WAIT_CREATE_INDX_REQ /*WAIT_CREATE_EVNT_REQ*/,
-1,
- errCodes,noErrCodes, DropEvntRef::Temporary);
+ NULL,0, -1);
}
void
NdbDictInterface::execDROP_EVNT_CONF(NdbApiSignal * signal,
@@ -3213,6 +3200,65 @@ NdbDictInterface::execLIST_TABLES_CONF(NdbApiSignal* signal,
}
}
+int
+NdbDictionaryImpl::forceGCPWait()
+{
+ return m_receiver.forceGCPWait();
+}
+
+int
+NdbDictInterface::forceGCPWait()
+{
+ NdbApiSignal tSignal(m_reference);
+ WaitGCPReq* const req = CAST_PTR(WaitGCPReq, tSignal.getDataPtrSend());
+ req->senderRef = m_reference;
+ req->senderData = 0;
+ req->requestType = WaitGCPReq::CompleteForceStart;
+ tSignal.theReceiversBlockNumber = DBDIH;
+ tSignal.theVerId_signalNumber = GSN_WAIT_GCP_REQ;
+ tSignal.theLength = WaitGCPReq::SignalLength;
+
+ const Uint32 RETRIES = 100;
+ for (Uint32 i = 0; i < RETRIES; i++)
+ {
+ m_transporter->lock_mutex();
+ Uint16 aNodeId = m_transporter->get_an_alive_node();
+ if (aNodeId == 0) {
+ m_error.code= 4009;
+ m_transporter->unlock_mutex();
+ return -1;
+ }
+ if (m_transporter->sendSignal(&tSignal, aNodeId) != 0) {
+ m_transporter->unlock_mutex();
+ continue;
+ }
+ m_error.code= 0;
+ m_waiter.m_node = aNodeId;
+ m_waiter.m_state = WAIT_LIST_TABLES_CONF;
+ m_waiter.wait(WAITFOR_RESPONSE_TIMEOUT);
+ m_transporter->unlock_mutex();
+ return 0;
+ }
+ return -1;
+}
+
+void
+NdbDictInterface::execWAIT_GCP_CONF(NdbApiSignal* signal,
+ LinearSectionPtr ptr[3])
+{
+ const WaitGCPConf * const conf=
+ CAST_CONSTPTR(WaitGCPConf, signal->getDataPtr());
+ g_latest_trans_gci= conf->gcp;
+ m_waiter.signal(NO_WAIT);
+}
+
+void
+NdbDictInterface::execWAIT_GCP_REF(NdbApiSignal* signal,
+ LinearSectionPtr ptr[3])
+{
+ m_waiter.signal(NO_WAIT);
+}
+
template class Vector<int>;
template class Vector<Uint16>;
template class Vector<Uint32>;
diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
index b4614ec3512..575de7fb425 100644
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
@@ -102,12 +102,15 @@ public:
void init();
void setName(const char * name);
const char * getName() const;
+ const char * getMysqlName() const;
+ void updateMysqlName();
Uint32 m_changeMask;
Uint32 m_tableId;
Uint32 m_primaryTableId;
BaseString m_internalName;
BaseString m_externalName;
+ BaseString m_mysqlName;
BaseString m_newExternalName; // Used for alter table
UtilBuffer m_frm;
UtilBuffer m_ng;
@@ -230,9 +233,9 @@ public:
Uint32 m_eventId;
Uint32 m_eventKey;
Uint32 m_tableId;
+ Uint32 m_tableVersion;
AttributeMask m_attrListBitmask;
- //BaseString m_internalName;
- BaseString m_externalName;
+ BaseString m_name;
Uint32 mi_type;
NdbDictionary::Event::EventDurability m_dur;
@@ -242,10 +245,6 @@ public:
Vector<NdbColumnImpl *> m_columns;
Vector<unsigned> m_attrIds;
- int m_bufferId;
-
- NdbEventOperation *eventOp;
-
static NdbEventImpl & getImpl(NdbDictionary::Event & t);
static NdbEventImpl & getImpl(const NdbDictionary::Event & t);
NdbDictionary::Event * m_facade;
@@ -301,11 +300,8 @@ public:
int dropEvent(const NdbEventImpl &);
int dropEvent(NdbApiSignal* signal, LinearSectionPtr ptr[3], int noLSP);
- int executeSubscribeEvent(class Ndb & ndb, NdbEventImpl &);
- int executeSubscribeEvent(NdbApiSignal* signal, LinearSectionPtr ptr[3]);
-
- int stopSubscribeEvent(class Ndb & ndb, NdbEventImpl &);
- int stopSubscribeEvent(NdbApiSignal* signal, LinearSectionPtr ptr[3]);
+ int executeSubscribeEvent(class Ndb & ndb, NdbEventOperationImpl &);
+ int stopSubscribeEvent(class Ndb & ndb, NdbEventOperationImpl &);
int listObjects(NdbDictionary::Dictionary::List& list, Uint32 requestData, bool fullyQualifiedNames);
int listObjects(NdbApiSignal* signal);
@@ -316,6 +312,8 @@ public:
LinearSectionPtr ptr[3],
Uint32 noOfSections, bool fullyQualifiedNames);
+ int forceGCPWait();
+
static int parseTableInfo(NdbTableImpl ** dst,
const Uint32 * data, Uint32 len,
bool fullyQualifiedNames);
@@ -323,7 +321,7 @@ public:
static int create_index_obj_from_table(NdbIndexImpl ** dst,
NdbTableImpl* index_table,
const NdbTableImpl* primary_table);
-
+ const NdbError &getNdbError() const;
NdbError & m_error;
private:
Uint32 m_reference;
@@ -356,8 +354,6 @@ private:
void execCREATE_EVNT_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
void execSUB_START_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
void execSUB_START_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
- void execSUB_TABLE_DATA(NdbApiSignal *, LinearSectionPtr ptr[3]);
- void execSUB_GCP_COMPLETE_REP(NdbApiSignal *, LinearSectionPtr ptr[3]);
void execSUB_STOP_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
void execSUB_STOP_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
void execDROP_EVNT_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
@@ -367,6 +363,9 @@ private:
void execDROP_TABLE_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
void execLIST_TABLES_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
+ void execWAIT_GCP_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]);
+ void execWAIT_GCP_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
+
Uint32 m_fragmentId;
UtilBuffer m_buffer;
};
@@ -400,8 +399,10 @@ public:
int createEvent(NdbEventImpl &);
int dropEvent(const char * eventName);
- int executeSubscribeEvent(NdbEventImpl &);
- int stopSubscribeEvent(NdbEventImpl &);
+ int executeSubscribeEvent(NdbEventOperationImpl &);
+ int stopSubscribeEvent(NdbEventOperationImpl &);
+
+ int forceGCPWait();
int listObjects(List& list, NdbDictionary::Object::Type type);
int listIndexes(List& list, Uint32 indexId);
@@ -513,6 +514,13 @@ NdbTableImpl::getColumn(unsigned attrId){
}
inline
+const char *
+NdbTableImpl::getMysqlName() const
+{
+ return m_mysqlName.c_str();
+}
+
+inline
Uint32
Hash( const char* str ){
Uint32 h = 0;
@@ -646,10 +654,8 @@ NdbDictionaryImpl::getTable(const char * table_name, void **data)
get_local_table_info(internal_tabname, true);
if (info == 0)
return 0;
-
if (data)
*data= info->m_local_data;
-
return info->m_table_impl;
}
diff --git a/storage/ndb/src/ndbapi/NdbEventOperation.cpp b/storage/ndb/src/ndbapi/NdbEventOperation.cpp
index e99cad918c5..14d6fe69b35 100644
--- a/storage/ndb/src/ndbapi/NdbEventOperation.cpp
+++ b/storage/ndb/src/ndbapi/NdbEventOperation.cpp
@@ -21,12 +21,8 @@
#include "NdbEventOperationImpl.hpp"
#include "NdbDictionaryImpl.hpp"
-NdbEventOperation::NdbEventOperation(Ndb *theNdb,
- const char* eventName,
- int bufferLength)
- : m_impl(* new NdbEventOperationImpl(*this,theNdb,
- eventName,
- bufferLength))
+NdbEventOperation::NdbEventOperation(Ndb *theNdb,const char* eventName)
+ : m_impl(* new NdbEventOperationImpl(*this,theNdb,eventName))
{
}
@@ -61,31 +57,43 @@ NdbEventOperation::execute()
}
int
-NdbEventOperation::next(int *pOverrun)
+NdbEventOperation::isOverrun() const
{
- return m_impl.next(pOverrun);
+ return 0; // ToDo
}
bool
-NdbEventOperation::isConsistent()
+NdbEventOperation::isConsistent() const
{
- return m_impl.isConsistent();
+ return true;
}
-Uint32
-NdbEventOperation::getGCI()
+void
+NdbEventOperation::clearError()
+{
+ m_impl.m_has_error= 0;
+}
+
+int
+NdbEventOperation::hasError() const
+{
+ return m_impl.m_has_error;
+}
+
+Uint64
+NdbEventOperation::getGCI() const
{
return m_impl.getGCI();
}
-Uint32
-NdbEventOperation::getLatestGCI()
+Uint64
+NdbEventOperation::getLatestGCI() const
{
return m_impl.getLatestGCI();
}
NdbDictionary::Event::TableEvent
-NdbEventOperation::getEventType()
+NdbEventOperation::getEventType() const
{
return m_impl.getEventType();
}
@@ -97,15 +105,62 @@ NdbEventOperation::print()
}
/*
- * Private members
+ * Internal for the mysql server
*/
+const NdbDictionary::Table *NdbEventOperation::getTable() const
+{
+ return m_impl.m_eventImpl->m_tableImpl->m_facade;
+}
+const NdbDictionary::Event *NdbEventOperation::getEvent() const
+{
+ return m_impl.m_eventImpl->m_facade;
+}
+const NdbRecAttr* NdbEventOperation::getFirstPkAttr() const
+{
+ return m_impl.theFirstPkAttrs[0];
+}
+const NdbRecAttr* NdbEventOperation::getFirstPkPreAttr() const
+{
+ return m_impl.theFirstPkAttrs[1];
+}
+const NdbRecAttr* NdbEventOperation::getFirstDataAttr() const
+{
+ return m_impl.theFirstDataAttrs[0];
+}
+const NdbRecAttr* NdbEventOperation::getFirstDataPreAttr() const
+{
+ return m_impl.theFirstDataAttrs[1];
+}
+bool NdbEventOperation::validateTable(NdbDictionary::Table &table) const
+{
+ DBUG_ENTER("NdbEventOperation::validateTable");
+ bool res = true;
+ if (table.getObjectVersion() != m_impl.m_eventImpl->m_tableVersion)
+ {
+ DBUG_PRINT("info",("invalid version"));
+ res= false;
+ }
+ DBUG_RETURN(res);
+}
-int
-NdbEventOperation::wait(void *p, int aMillisecondNumber)
+void NdbEventOperation::setCustomData(void * data)
+{
+ m_impl.m_custom_data= data;
+}
+void * NdbEventOperation::getCustomData() const
+{
+ return m_impl.m_custom_data;
+}
+
+int NdbEventOperation::getReqNodeId() const
{
- return NdbEventOperationImpl::wait(p, aMillisecondNumber);
+ return m_impl.m_data_item->sdata->req_nodeid;
}
+/*
+ * Private members
+ */
+
NdbEventOperation::NdbEventOperation(NdbEventOperationImpl& impl)
: m_impl(impl) {}
diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
index 208525bfc15..429e3b558d3 100644
--- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
+++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
@@ -41,6 +41,13 @@
#include <NdbEventOperation.hpp>
#include "NdbEventOperationImpl.hpp"
+#include <EventLogger.hpp>
+extern EventLogger g_eventLogger;
+
+static Gci_container g_empty_gci_container;
+static const Uint32 ACTIVE_GCI_DIRECTORY_SIZE = 4;
+static const Uint32 ACTIVE_GCI_MASK = ACTIVE_GCI_DIRECTORY_SIZE - 1;
+
/*
* Class NdbEventOperationImpl
*
@@ -49,14 +56,20 @@
//#define EVENT_DEBUG
+// todo handle several ndb objects
+// todo free allocated data when closing NdbEventBuffer
NdbEventOperationImpl::NdbEventOperationImpl(NdbEventOperation &N,
Ndb *theNdb,
- const char* eventName,
- const int bufferLength)
- : NdbEventOperation(*this), m_ndb(theNdb),
- m_state(EO_ERROR), m_bufferL(bufferLength)
+ const char* eventName)
+ : NdbEventOperation(*this), m_facade(&N), m_magic_number(0),
+ m_ndb(theNdb), m_state(EO_ERROR), mi_type(0), m_oid(~(Uint32)0),
+#ifdef VM_TRACE
+ m_data_done_count(0), m_data_count(0),
+#endif
+ m_next(0), m_prev(0)
{
+ DBUG_ENTER("NdbEventOperationImpl::NdbEventOperationImpl");
m_eventId = 0;
theFirstPkAttrs[0] = NULL;
theCurrentPkAttrs[0] = NULL;
@@ -66,10 +79,11 @@ NdbEventOperationImpl::NdbEventOperationImpl(NdbEventOperation &N,
theCurrentDataAttrs[0] = NULL;
theFirstDataAttrs[1] = NULL;
theCurrentDataAttrs[1] = NULL;
- sdata = NULL;
- ptr[0].p = NULL;
- ptr[1].p = NULL;
- ptr[2].p = NULL;
+ m_data_item= NULL;
+ m_eventImpl = NULL;
+
+ m_custom_data= 0;
+ m_has_error= 1;
// we should lookup id in Dictionary, TODO
// also make sure we only have one listener on each event
@@ -77,49 +91,44 @@ NdbEventOperationImpl::NdbEventOperationImpl(NdbEventOperation &N,
if (!m_ndb) abort();
NdbDictionary::Dictionary *myDict = m_ndb->getDictionary();
- if (!myDict) { m_error.code= m_ndb->getNdbError().code; return; }
+ if (!myDict) { m_error.code= m_ndb->getNdbError().code; DBUG_VOID_RETURN; }
const NdbDictionary::Event *myEvnt = myDict->getEvent(eventName);
- if (!myEvnt) { m_error.code= myDict->getNdbError().code; return; }
+ if (!myEvnt) { m_error.code= myDict->getNdbError().code; DBUG_VOID_RETURN; }
m_eventImpl = &myEvnt->m_impl;
m_eventId = m_eventImpl->m_eventId;
- m_bufferHandle = m_ndb->getGlobalEventBufferHandle();
- if (m_bufferHandle->m_bufferL > 0)
- m_bufferL =m_bufferHandle->m_bufferL;
- else
- m_bufferHandle->m_bufferL = m_bufferL;
+ m_oid= m_ndb->theImpl->theNdbObjectIdMap.map(this);
+
+ m_state= EO_CREATED;
- m_state = EO_CREATED;
+ m_has_error= 0;
+
+ DBUG_PRINT("exit",("this: 0x%x oid: %u", this, m_oid));
+ DBUG_VOID_RETURN;
}
NdbEventOperationImpl::~NdbEventOperationImpl()
{
- int i;
- if (sdata) NdbMem_Free((char*)sdata);
- for (i=0 ; i<2; i++) {
- NdbRecAttr *p = theFirstPkAttrs[i];
- while (p) {
- NdbRecAttr *p_next = p->next();
- m_ndb->releaseRecAttr(p);
- p = p_next;
- }
- }
- for (i=0 ; i<2; i++) {
- NdbRecAttr *p = theFirstDataAttrs[i];
- while (p) {
- NdbRecAttr *p_next = p->next();
- m_ndb->releaseRecAttr(p);
- p = p_next;
- }
- }
- if (m_state == EO_EXECUTING) {
- stop();
- // m_bufferHandle->dropSubscribeEvent(m_bufferId);
- ; // We should send stop signal here
+ DBUG_ENTER("NdbEventOperationImpl::~NdbEventOperationImpl");
+ m_magic_number= 0;
+
+ stop();
+ // m_bufferHandle->dropSubscribeEvent(m_bufferId);
+ ; // ToDo? We should send stop signal here
+
+ m_ndb->theImpl->theNdbObjectIdMap.unmap(m_oid, this);
+ DBUG_PRINT("exit",("this: 0x%x/0x%x oid: %u", this, m_facade, m_oid));
+
+ if (m_eventImpl)
+ {
+ delete m_eventImpl->m_facade;
+ m_eventImpl= 0;
}
+
+ DBUG_VOID_RETURN;
}
NdbEventOperation::State
@@ -133,7 +142,8 @@ NdbEventOperationImpl::getValue(const char *colName, char *aValue, int n)
{
DBUG_ENTER("NdbEventOperationImpl::getValue");
if (m_state != EO_CREATED) {
- ndbout_c("NdbEventOperationImpl::getValue may only be called between instantiation and execute()");
+ ndbout_c("NdbEventOperationImpl::getValue may only be called between "
+ "instantiation and execute()");
DBUG_RETURN(NULL);
}
@@ -244,42 +254,22 @@ NdbEventOperationImpl::execute()
}
- NdbDictionaryImpl & myDictImpl = NdbDictionaryImpl::getImpl(*myDict);
-
-
- int hasSubscriber;
- int r= m_bufferHandle->prepareAddSubscribeEvent(this,
- hasSubscriber /*return value*/);
- m_error.code= 4709;
-
- if (r < 0)
- {
- DBUG_RETURN(-1);
- }
-
- m_eventImpl->m_bufferId = m_bufferId = (Uint32)r;
-
- r = -1;
- if (m_bufferId >= 0) {
- // now we check if there's already a subscriber
-
- if (hasSubscriber == 0) { // only excute if there's no other subscribers
- r = myDictImpl.executeSubscribeEvent(*m_eventImpl);
- } else {
- r = 0;
- }
- if (r) {
- //Error
- m_bufferHandle->unprepareAddSubscribeEvent(m_bufferId);
- m_state = EO_ERROR;
- } else {
- m_bufferHandle->addSubscribeEvent(m_bufferId, this);
- m_state = EO_EXECUTING;
- }
- } else {
- //Error
- m_state = EO_ERROR;
+ m_ndb->theEventBuffer->add_drop_lock();
+ m_magic_number= NDB_EVENT_OP_MAGIC_NUMBER;
+ m_state= EO_EXECUTING;
+ mi_type= m_eventImpl->mi_type;
+ m_ndb->theEventBuffer->add_op();
+ int r= NdbDictionaryImpl::getImpl(*myDict).executeSubscribeEvent(*this);
+ if (r == 0) {
+ m_ndb->theEventBuffer->add_drop_unlock();
+ DBUG_RETURN(0);
}
+ //Error
+ m_state= EO_ERROR;
+ mi_type= 0;
+ m_magic_number= 0;
+ m_ndb->theEventBuffer->remove_op();
+ m_ndb->theEventBuffer->add_drop_unlock();
DBUG_RETURN(r);
}
@@ -287,249 +277,204 @@ int
NdbEventOperationImpl::stop()
{
DBUG_ENTER("NdbEventOperationImpl::stop");
+ int i;
+
+ for (i=0 ; i<2; i++) {
+ NdbRecAttr *p = theFirstPkAttrs[i];
+ while (p) {
+ NdbRecAttr *p_next = p->next();
+ m_ndb->releaseRecAttr(p);
+ p = p_next;
+ }
+ theFirstPkAttrs[i]= 0;
+ }
+ for (i=0 ; i<2; i++) {
+ NdbRecAttr *p = theFirstDataAttrs[i];
+ while (p) {
+ NdbRecAttr *p_next = p->next();
+ m_ndb->releaseRecAttr(p);
+ p = p_next;
+ }
+ theFirstDataAttrs[i]= 0;
+ }
+
if (m_state != EO_EXECUTING)
{
DBUG_RETURN(-1);
}
- // ndbout_c("NdbEventOperation::stopping()");
-
NdbDictionary::Dictionary *myDict = m_ndb->getDictionary();
if (!myDict) {
m_error.code= m_ndb->getNdbError().code;
DBUG_RETURN(-1);
}
- NdbDictionaryImpl & myDictImpl = NdbDictionaryImpl::getImpl(*myDict);
-
- int hasSubscriber;
- int ret =
- m_bufferHandle->prepareDropSubscribeEvent(m_bufferId,
- hasSubscriber /* return value */);
-
- if (ret < 0) {
- m_error.code= 4712;
- DBUG_RETURN(-1);
- }
- // m_eventImpl->m_bufferId = m_bufferId;
-
- int r = -1;
-
- if (hasSubscriber == 0) { // only excute if there's no other subscribers
- r = myDictImpl.stopSubscribeEvent(*m_eventImpl);
-#ifdef EVENT_DEBUG
- ndbout_c("NdbEventOperation::stopping() done");
-#endif
- } else
- r = 0;
-
- if (r) {
- //Error
- m_bufferHandle->unprepareDropSubscribeEvent(m_bufferId);
- m_error.code= myDictImpl.m_error.code;
- m_state = EO_ERROR;
- } else {
-#ifdef EVENT_DEBUG
- ndbout_c("NdbEventOperation::dropping()");
-#endif
- m_bufferHandle->dropSubscribeEvent(m_bufferId);
- m_state = EO_CREATED;
+ m_ndb->theEventBuffer->add_drop_lock();
+ int r= NdbDictionaryImpl::getImpl(*myDict).stopSubscribeEvent(*this);
+ m_ndb->theEventBuffer->remove_op();
+ m_state= EO_DROPPED;
+ mi_type= 0;
+ if (r == 0) {
+ m_ndb->theEventBuffer->add_drop_unlock();
+ DBUG_RETURN(0);
}
-
+ //Error
+ m_error.code= NdbDictionaryImpl::getImpl(*myDict).m_error.code;
+ m_state= EO_ERROR;
+ m_ndb->theEventBuffer->add_drop_unlock();
DBUG_RETURN(r);
}
-bool
-NdbEventOperationImpl::isConsistent()
-{
- return sdata->isGCIConsistent();
-}
-
-Uint32
+Uint64
NdbEventOperationImpl::getGCI()
{
- return sdata->gci;
+ return m_data_item->sdata->gci;
}
-Uint32
+Uint64
NdbEventOperationImpl::getLatestGCI()
{
- return NdbGlobalEventBufferHandle::getLatestGCI();
+ return m_ndb->theEventBuffer->getLatestGCI();
}
int
-NdbEventOperationImpl::next(int *pOverrun)
+NdbEventOperationImpl::receive_event()
{
- DBUG_ENTER("NdbEventOperationImpl::next");
- int nr = 10000; // a high value
- int tmpOverrun = 0;
- int *ptmpOverrun;
- if (pOverrun) {
- ptmpOverrun = &tmpOverrun;
- } else
- ptmpOverrun = NULL;
-
- while (nr > 0) {
- int r=NdbGlobalEventBufferHandle::getDataL(m_bufferId, sdata,
- ptr, pOverrun);
- if (pOverrun) {
- tmpOverrun += *pOverrun;
- *pOverrun = tmpOverrun;
- }
+ DBUG_ENTER("NdbEventOperationImpl::receive_event");
- if (r <= 0)
- {
- DBUG_RETURN(r); // no data
- }
+ Uint32 operation= (Uint32)m_data_item->sdata->operation;
+ DBUG_PRINT("info",("sdata->operation %u",operation));
- if (r < nr) r = nr; else nr--; // we don't want to be stuck here forever
-
-#ifdef EVENT_DEBUG
- ndbout_c("!!!!!!!sdata->operation %u", (Uint32)sdata->operation);
-#endif
+ if (unlikely(operation >= NdbDictionary::Event::_TE_FIRST_NON_DATA_EVENT))
+ {
+ DBUG_RETURN(1);
+ }
- // now move the data into the RecAttrs
- if ((theFirstPkAttrs[0] == NULL) &&
- (theFirstPkAttrs[1] == NULL) &&
- (theFirstDataAttrs[0] == NULL) &&
- (theFirstDataAttrs[1] == NULL))
- {
- DBUG_RETURN(r);
- }
- // no copying since no RecAttr's
-
-
- Uint32 *aAttrPtr = ptr[0].p;
- Uint32 *aAttrEndPtr = aAttrPtr + ptr[0].sz;
- Uint32 *aDataPtr = ptr[1].p;
-
-#ifdef EVENT_DEBUG
- int i;
- printf("after values sz=%u\n", ptr[1].sz);
- for(i=0; i < (int)ptr[1].sz; i++)
- printf ("H'%.8X ",ptr[1].p[i]);
- printf("\n");
- printf("before values sz=%u\n", ptr[2].sz);
- for(i=0; i < (int)ptr[2].sz; i++)
- printf ("H'%.8X ",ptr[2].p[i]);
- printf("\n");
-#endif
+ // now move the data into the RecAttrs
+
+ int is_update= operation == NdbDictionary::Event::_TE_UPDATE;
- // copy data into the RecAttr's
- // we assume that the respective attribute lists are sorted
+ Uint32 *aAttrPtr = m_data_item->ptr[0].p;
+ Uint32 *aAttrEndPtr = aAttrPtr + m_data_item->ptr[0].sz;
+ Uint32 *aDataPtr = m_data_item->ptr[1].p;
- // first the pk's
+ DBUG_DUMP("after",(char*)m_data_item->ptr[1].p, m_data_item->ptr[1].sz*4);
+ DBUG_DUMP("before",(char*)m_data_item->ptr[2].p, m_data_item->ptr[2].sz*4);
+
+ // copy data into the RecAttr's
+ // we assume that the respective attribute lists are sorted
+
+ // first the pk's
+ {
+ NdbRecAttr *tAttr= theFirstPkAttrs[0];
+ NdbRecAttr *tAttr1= theFirstPkAttrs[1];
+ while(tAttr)
{
- NdbRecAttr *tAttr= theFirstPkAttrs[0];
- while(tAttr)
+ assert(aAttrPtr < aAttrEndPtr);
+ unsigned tDataSz= AttributeHeader(*aAttrPtr).getDataSize();
+ assert(tAttr->attrId() ==
+ AttributeHeader(*aAttrPtr).getAttributeId());
+ receive_data(tAttr, aDataPtr, tDataSz);
+ if (is_update)
{
- assert(aAttrPtr < aAttrEndPtr);
- unsigned tDataSz= AttributeHeader(*aAttrPtr).getDataSize();
- assert(tAttr->attrId() ==
- AttributeHeader(*aAttrPtr).getAttributeId());
- assert(tAttr->receive_data(aDataPtr, tDataSz));
- // next
- aAttrPtr++;
- aDataPtr+= tDataSz;
- tAttr= tAttr->next();
+ receive_data(tAttr1, aDataPtr, tDataSz);
+ tAttr1= tAttr1->next();
}
+ // next
+ aAttrPtr++;
+ aDataPtr+= tDataSz;
+ tAttr= tAttr->next();
}
-
- NdbRecAttr *tWorkingRecAttr = theFirstDataAttrs[0];
-
- Uint32 tRecAttrId;
- Uint32 tAttrId;
- Uint32 tDataSz;
- int hasSomeData=0;
- while ((aAttrPtr < aAttrEndPtr) && (tWorkingRecAttr != NULL)) {
- tRecAttrId = tWorkingRecAttr->attrId();
- tAttrId = AttributeHeader(*aAttrPtr).getAttributeId();
- tDataSz = AttributeHeader(*aAttrPtr).getDataSize();
-
- while (tAttrId > tRecAttrId) {
- //printf("[%u] %u %u [%u]\n", tAttrId, tDataSz, *aDataPtr, tRecAttrId);
- tWorkingRecAttr->setUNDEFINED();
- tWorkingRecAttr = tWorkingRecAttr->next();
- if (tWorkingRecAttr == NULL)
- break;
- tRecAttrId = tWorkingRecAttr->attrId();
- }
+ }
+
+ NdbRecAttr *tWorkingRecAttr = theFirstDataAttrs[0];
+
+ Uint32 tRecAttrId;
+ Uint32 tAttrId;
+ Uint32 tDataSz;
+ int hasSomeData=0;
+ while ((aAttrPtr < aAttrEndPtr) && (tWorkingRecAttr != NULL)) {
+ tRecAttrId = tWorkingRecAttr->attrId();
+ tAttrId = AttributeHeader(*aAttrPtr).getAttributeId();
+ tDataSz = AttributeHeader(*aAttrPtr).getDataSize();
+
+ while (tAttrId > tRecAttrId) {
+ DBUG_PRINT("info",("undef [%u] %u 0x%x [%u] 0x%x",
+ tAttrId, tDataSz, *aDataPtr, tRecAttrId, aDataPtr));
+ tWorkingRecAttr->setUNDEFINED();
+ tWorkingRecAttr = tWorkingRecAttr->next();
if (tWorkingRecAttr == NULL)
break;
+ tRecAttrId = tWorkingRecAttr->attrId();
+ }
+ if (tWorkingRecAttr == NULL)
+ break;
+
+ if (tAttrId == tRecAttrId) {
+ hasSomeData++;
- //printf("[%u] %u %u [%u]\n", tAttrId, tDataSz, *aDataPtr, tRecAttrId);
+ DBUG_PRINT("info",("set [%u] %u 0x%x [%u] 0x%x",
+ tAttrId, tDataSz, *aDataPtr, tRecAttrId, aDataPtr));
- if (tAttrId == tRecAttrId) {
- hasSomeData++;
-
- //printf("set!\n");
-
- assert(tWorkingRecAttr->receive_data(aDataPtr, tDataSz));
- tWorkingRecAttr = tWorkingRecAttr->next();
- }
- aAttrPtr++;
- aDataPtr += tDataSz;
+ receive_data(tWorkingRecAttr, aDataPtr, tDataSz);
+ tWorkingRecAttr = tWorkingRecAttr->next();
}
+ aAttrPtr++;
+ aDataPtr += tDataSz;
+ }
- while (tWorkingRecAttr != NULL) {
- tRecAttrId = tWorkingRecAttr->attrId();
- //printf("set undefined [%u] %u %u [%u]\n", tAttrId, tDataSz, *aDataPtr, tRecAttrId);
+ while (tWorkingRecAttr != NULL) {
+ tRecAttrId = tWorkingRecAttr->attrId();
+ //printf("set undefined [%u] %u %u [%u]\n",
+ // tAttrId, tDataSz, *aDataPtr, tRecAttrId);
+ tWorkingRecAttr->setUNDEFINED();
+ tWorkingRecAttr = tWorkingRecAttr->next();
+ }
+
+ tWorkingRecAttr = theFirstDataAttrs[1];
+ aDataPtr = m_data_item->ptr[2].p;
+ Uint32 *aDataEndPtr = aDataPtr + m_data_item->ptr[2].sz;
+ while ((aDataPtr < aDataEndPtr) && (tWorkingRecAttr != NULL)) {
+ tRecAttrId = tWorkingRecAttr->attrId();
+ tAttrId = AttributeHeader(*aDataPtr).getAttributeId();
+ tDataSz = AttributeHeader(*aDataPtr).getDataSize();
+ aDataPtr++;
+ while (tAttrId > tRecAttrId) {
tWorkingRecAttr->setUNDEFINED();
tWorkingRecAttr = tWorkingRecAttr->next();
- }
-
- tWorkingRecAttr = theFirstDataAttrs[1];
- aDataPtr = ptr[2].p;
- Uint32 *aDataEndPtr = aDataPtr + ptr[2].sz;
- while ((aDataPtr < aDataEndPtr) && (tWorkingRecAttr != NULL)) {
- tRecAttrId = tWorkingRecAttr->attrId();
- tAttrId = AttributeHeader(*aDataPtr).getAttributeId();
- tDataSz = AttributeHeader(*aDataPtr).getDataSize();
- aDataPtr++;
- while (tAttrId > tRecAttrId) {
- tWorkingRecAttr->setUNDEFINED();
- tWorkingRecAttr = tWorkingRecAttr->next();
- if (tWorkingRecAttr == NULL)
- break;
- tRecAttrId = tWorkingRecAttr->attrId();
- }
if (tWorkingRecAttr == NULL)
break;
- if (tAttrId == tRecAttrId) {
- assert(!m_eventImpl->m_tableImpl->getColumn(tRecAttrId)->getPrimaryKey());
- hasSomeData++;
-
- assert(tWorkingRecAttr->receive_data(aDataPtr, tDataSz));
- tWorkingRecAttr = tWorkingRecAttr->next();
- }
- aDataPtr += tDataSz;
+ tRecAttrId = tWorkingRecAttr->attrId();
}
- while (tWorkingRecAttr != NULL) {
- tWorkingRecAttr->setUNDEFINED();
+ if (tWorkingRecAttr == NULL)
+ break;
+ if (tAttrId == tRecAttrId) {
+ assert(!m_eventImpl->m_tableImpl->getColumn(tRecAttrId)->getPrimaryKey());
+ hasSomeData++;
+
+ receive_data(tWorkingRecAttr, aDataPtr, tDataSz);
tWorkingRecAttr = tWorkingRecAttr->next();
}
-
- if (hasSomeData)
- {
- DBUG_RETURN(r);
- }
+ aDataPtr += tDataSz;
+ }
+ while (tWorkingRecAttr != NULL) {
+ tWorkingRecAttr->setUNDEFINED();
+ tWorkingRecAttr = tWorkingRecAttr->next();
+ }
+
+ if (hasSomeData || !is_update)
+ {
+ DBUG_RETURN(1);
}
+
DBUG_RETURN(0);
}
NdbDictionary::Event::TableEvent
NdbEventOperationImpl::getEventType()
{
- switch (sdata->operation) {
- case TriggerEvent::TE_INSERT:
- return NdbDictionary::Event::TE_INSERT;
- case TriggerEvent::TE_DELETE:
- return NdbDictionary::Event::TE_DELETE;
- case TriggerEvent::TE_UPDATE:
- return NdbDictionary::Event::TE_UPDATE;
- default:
- return NdbDictionary::Event::TE_ALL;
- }
+ return (NdbDictionary::Event::TableEvent)
+ (1 << (unsigned)m_data_item->sdata->operation);
}
@@ -563,9 +508,9 @@ NdbEventOperationImpl::print()
void
NdbEventOperationImpl::printAll()
{
- Uint32 *aAttrPtr = ptr[0].p;
- Uint32 *aAttrEndPtr = aAttrPtr + ptr[0].sz;
- Uint32 *aDataPtr = ptr[1].p;
+ Uint32 *aAttrPtr = m_data_item->ptr[0].p;
+ Uint32 *aAttrEndPtr = aAttrPtr + m_data_item->ptr[0].sz;
+ Uint32 *aDataPtr = m_data_item->ptr[1].p;
//tRecAttr->setup(tAttrInfo, aValue)) {
@@ -580,787 +525,855 @@ NdbEventOperationImpl::printAll()
}
}
-
-int NdbEventOperationImpl::wait(void *p, int aMillisecondNumber)
-{
- return ((NdbGlobalEventBufferHandle*)p)->wait(aMillisecondNumber);
-}
-
/*
- * Global variable ndbGlobalEventBuffer
- * Class NdbGlobalEventBufferHandle
- * Class NdbGlobalEventBuffer
- *
+ * Class NdbEventBuffer
+ * Each Ndb object has a Object.
*/
-#define ADD_DROP_LOCK_GUARDR(TYPE, FN) \
-{ \
- ndbGlobalEventBuffer->add_drop_lock(); \
- ndbGlobalEventBuffer->lock(); \
- TYPE r = ndbGlobalEventBuffer->FN; \
- ndbGlobalEventBuffer->unlock(); \
- if (r < 0) { \
- ndbGlobalEventBuffer->add_drop_unlock(); \
- } \
- return r;\
-}
-#define GUARDR(TYPE, FN) \
-{ \
- ndbGlobalEventBuffer->lock(); \
- TYPE r = ndbGlobalEventBuffer->FN; \
- ndbGlobalEventBuffer->unlock(); \
- return r;\
-}
-#define GUARD(FN) \
-{ \
- ndbGlobalEventBuffer->lock(); \
- ndbGlobalEventBuffer->FN; \
- ndbGlobalEventBuffer->unlock(); \
-}
-#define ADD_DROP_UNLOCK_GUARD(FN) \
-{ \
- GUARD(FN); \
- ndbGlobalEventBuffer->add_drop_unlock(); \
-}
-#define GUARDBLOCK(BLOCK) \
-{ \
- ndbGlobalEventBuffer->lock(); \
- BLOCK \
- ndbGlobalEventBuffer->unlock(); \
-}
+// ToDo ref count this so it get's destroyed
+NdbMutex *NdbEventBuffer::p_add_drop_mutex= 0;
-/*
- * Global variable ndbGlobalEventBuffer
- *
- */
-
-extern NdbMutex * ndb_global_event_buffer_mutex;
-static NdbGlobalEventBuffer *ndbGlobalEventBuffer=NULL;
-
-/*
- * Class NdbGlobalEventBufferHandle
- * Each Ndb object has a Handle. This Handle is used to access the
- * global NdbGlobalEventBuffer instance ndbGlobalEventBuffer
- */
-
-NdbGlobalEventBufferHandle *
-NdbGlobalEventBuffer_init(int n)
-{
- return new NdbGlobalEventBufferHandle(n);
- // return NdbGlobalEventBufferHandle::init(n);
-}
-
-void
-NdbGlobalEventBuffer_drop(NdbGlobalEventBufferHandle *h)
+NdbEventBuffer::NdbEventBuffer(Ndb *ndb) :
+ m_system_nodes(ndb->theImpl->theNoOfDBnodes),
+ m_ndb(ndb),
+ m_latestGCI(0),
+ m_total_alloc(0),
+ m_free_thresh(10),
+ m_gci_slip_thresh(3),
+ m_dropped_ev_op(0),
+ m_active_op_count(0)
{
- delete h;
-}
+#ifdef VM_TRACE
+ m_latest_command= "NdbEventBuffer::NdbEventBuffer";
+#endif
-NdbGlobalEventBufferHandle::NdbGlobalEventBufferHandle
-(int MAX_NUMBER_ACTIVE_EVENTS) : m_bufferL(0), m_nids(0)
-{
if ((p_cond = NdbCondition_Create()) == NULL) {
- ndbout_c("NdbGlobalEventBufferHandle: NdbCondition_Create() failed");
+ ndbout_c("NdbEventHandle: NdbCondition_Create() failed");
exit(-1);
}
-
- NdbMutex_Lock(ndb_global_event_buffer_mutex);
- if (ndbGlobalEventBuffer == NULL) {
- if (ndbGlobalEventBuffer == NULL) {
- ndbGlobalEventBuffer = new NdbGlobalEventBuffer();
- if (!ndbGlobalEventBuffer) {
- NdbMutex_Unlock(ndb_global_event_buffer_mutex);
- ndbout_c("NdbGlobalEventBufferHandle:: failed to allocate ndbGlobalEventBuffer");
- exit(-1);
- }
+ m_mutex= ndb->theImpl->theWaiter.m_mutex;
+ lock();
+ if (p_add_drop_mutex == 0)
+ {
+ if ((p_add_drop_mutex = NdbMutex_Create()) == NULL) {
+ ndbout_c("NdbEventBuffer: NdbMutex_Create() failed");
+ exit(-1);
}
}
- NdbMutex_Unlock(ndb_global_event_buffer_mutex);
+ unlock();
- GUARD(real_init(this,MAX_NUMBER_ACTIVE_EVENTS));
+ // ToDo set event buffer size
+ // pre allocate event data array
+ m_sz= 0;
+#ifdef VM_TRACE
+ m_free_data_count= 0;
+#endif
+ m_free_data= 0;
+ m_free_data_sz= 0;
+
+ // initialize lists
+ bzero(&g_empty_gci_container, sizeof(Gci_container));
+ init_gci_containers();
}
-NdbGlobalEventBufferHandle::~NdbGlobalEventBufferHandle()
+NdbEventBuffer::~NdbEventBuffer()
{
- NdbCondition_Destroy(p_cond);
+ // todo lock? what if receive thread writes here?
+ for (unsigned j= 0; j < m_allocated_data.size(); j++)
+ {
+ unsigned sz= m_allocated_data[j]->sz;
+ EventBufData *data= m_allocated_data[j]->data;
+ EventBufData *end_data= data+sz;
+ for (; data < end_data; data++)
+ {
+ if (data->sdata)
+ NdbMem_Free(data->sdata);
+ }
+ NdbMem_Free((char*)m_allocated_data[j]);
+ }
- ndbGlobalEventBuffer->lock();
- ndbGlobalEventBuffer->real_remove(this);
- ndbGlobalEventBuffer->unlock();
+ NdbCondition_Destroy(p_cond);
- NdbMutex_Lock(ndb_global_event_buffer_mutex);
- if (ndbGlobalEventBuffer->m_handlers.size() == 0) {
- delete ndbGlobalEventBuffer;
- ndbGlobalEventBuffer = NULL;
+ lock();
+ if (p_add_drop_mutex)
+ {
+ NdbMutex_Destroy(p_add_drop_mutex);
+ p_add_drop_mutex = 0;
}
- NdbMutex_Unlock(ndb_global_event_buffer_mutex);
+ unlock();
}
void
-NdbGlobalEventBufferHandle::addBufferId(int bufferId)
+NdbEventBuffer::add_op()
{
- DBUG_ENTER("NdbGlobalEventBufferHandle::addBufferId");
- DBUG_PRINT("enter",("bufferId=%d",bufferId));
- if (m_nids >= NDB_MAX_ACTIVE_EVENTS) {
- ndbout_c("NdbGlobalEventBufferHandle::addBufferId error in paramerer setting");
- exit(-1);
+ if(m_active_op_count == 0)
+ {
+ init_gci_containers();
}
- m_bufferIds[m_nids] = bufferId;
- m_nids++;
- DBUG_VOID_RETURN;
+ m_active_op_count++;
}
void
-NdbGlobalEventBufferHandle::dropBufferId(int bufferId)
-{
- DBUG_ENTER("NdbGlobalEventBufferHandle::dropBufferId");
- DBUG_PRINT("enter",("bufferId=%d",bufferId));
- for (int i = 0; i < m_nids; i++)
- if (m_bufferIds[i] == bufferId) {
- m_nids--;
- for (; i < m_nids; i++)
- m_bufferIds[i] = m_bufferIds[i+1];
- DBUG_VOID_RETURN;
- }
- ndbout_c("NdbGlobalEventBufferHandle::dropBufferId %d does not exist",
- bufferId);
- exit(-1);
-}
-/*
-NdbGlobalEventBufferHandle *
-NdbGlobalEventBufferHandle::init (int MAX_NUMBER_ACTIVE_EVENTS)
-{
- return new NdbGlobalEventBufferHandle();
-}
-void
-NdbGlobalEventBufferHandle::drop(NdbGlobalEventBufferHandle *handle)
-{
- delete handle;
-}
-*/
-int
-NdbGlobalEventBufferHandle::prepareAddSubscribeEvent
-(NdbEventOperationImpl *eventOp, int& hasSubscriber)
-{
- ADD_DROP_LOCK_GUARDR(int,real_prepareAddSubscribeEvent(this, eventOp,
- hasSubscriber));
-}
-void
-NdbGlobalEventBufferHandle::addSubscribeEvent
-(int bufferId, NdbEventOperationImpl *ndbEventOperationImpl)
+NdbEventBuffer::remove_op()
{
- ADD_DROP_UNLOCK_GUARD(real_addSubscribeEvent(bufferId, ndbEventOperationImpl));
+ m_active_op_count--;
}
+
void
-NdbGlobalEventBufferHandle::unprepareAddSubscribeEvent(int bufferId)
+NdbEventBuffer::init_gci_containers()
{
- ADD_DROP_UNLOCK_GUARD(real_unprepareAddSubscribeEvent(bufferId));
+ bzero(&m_complete_data, sizeof(m_complete_data));
+ m_latest_complete_GCI = m_latestGCI = 0;
+ m_active_gci.clear();
+ m_active_gci.fill(2 * ACTIVE_GCI_DIRECTORY_SIZE - 1, g_empty_gci_container);
}
-int
-NdbGlobalEventBufferHandle::prepareDropSubscribeEvent(int bufferId,
- int& hasSubscriber)
+int NdbEventBuffer::expand(unsigned sz)
{
- ADD_DROP_LOCK_GUARDR(int,real_prepareDropSubscribeEvent(bufferId, hasSubscriber));
-}
+ unsigned alloc_size=
+ sizeof(EventBufData_chunk) +(sz-1)*sizeof(EventBufData);
+ EventBufData_chunk *chunk_data=
+ (EventBufData_chunk *)NdbMem_Allocate(alloc_size);
-void
-NdbGlobalEventBufferHandle::unprepareDropSubscribeEvent(int bufferId)
-{
- ADD_DROP_UNLOCK_GUARD(real_unprepareDropSubscribeEvent(bufferId));
-}
+ m_total_alloc+= alloc_size;
-void
-NdbGlobalEventBufferHandle::dropSubscribeEvent(int bufferId)
-{
- ADD_DROP_UNLOCK_GUARD(real_dropSubscribeEvent(bufferId));
-}
+ chunk_data->sz= sz;
+ m_allocated_data.push_back(chunk_data);
-int
-NdbGlobalEventBufferHandle::insertDataL(int bufferId,
- const SubTableData * const sdata,
- LinearSectionPtr ptr[3])
-{
- GUARDR(int,real_insertDataL(bufferId,sdata,ptr));
-}
-
-void
-NdbGlobalEventBufferHandle::latestGCI(int bufferId, Uint32 gci)
-{
- GUARD(real_latestGCI(bufferId,gci));
-}
-
-Uint32
-NdbGlobalEventBufferHandle::getLatestGCI()
-{
- GUARDR(Uint32, real_getLatestGCI());
-}
-
-inline void
-NdbGlobalEventBufferHandle::group_lock()
-{
- ndbGlobalEventBuffer->group_lock();
-}
+ EventBufData *data= chunk_data->data;
+ EventBufData *end_data= data+sz;
+ EventBufData *last_data= m_free_data;
-inline void
-NdbGlobalEventBufferHandle::group_unlock()
-{
- ndbGlobalEventBuffer->group_unlock();
+ bzero((void*)data, sz*sizeof(EventBufData));
+ for (; data < end_data; data++)
+ {
+ data->m_next= last_data;
+ last_data= data;
+ }
+ m_free_data= last_data;
+
+ m_sz+= sz;
+#ifdef VM_TRACE
+ m_free_data_count+= sz;
+#endif
+ return 0;
}
int
-NdbGlobalEventBufferHandle::wait(int aMillisecondNumber)
+NdbEventBuffer::pollEvents(int aMillisecondNumber, Uint64 *latestGCI)
{
- GUARDR(int, real_wait(this, aMillisecondNumber));
+ int ret= 1;
+#ifdef VM_TRACE
+ const char *m_latest_command_save= m_latest_command;
+ m_latest_command= "NdbEventBuffer::pollEvents";
+#endif
+
+ NdbMutex_Lock(m_mutex);
+ NdbEventOperationImpl *ev_op= move_data();
+ if (unlikely(ev_op == 0))
+ {
+ NdbCondition_WaitTimeout(p_cond, m_mutex, aMillisecondNumber);
+ ev_op= move_data();
+ if (unlikely(ev_op == 0))
+ ret= 0;
+ }
+ if (latestGCI)
+ *latestGCI= m_latestGCI;
+#ifdef VM_TRACE
+ if (ev_op)
+ {
+ // m_mutex is locked
+ // update event ops data counters
+ ev_op->m_data_count-= ev_op->m_data_done_count;
+ ev_op->m_data_done_count= 0;
+ }
+ m_latest_command= m_latest_command_save;
+#endif
+ NdbMutex_Unlock(m_mutex); // we have moved the data
+ return ret;
}
-int NdbGlobalEventBufferHandle::getDataL(const int bufferId,
- SubTableData * &sdata,
- LinearSectionPtr ptr[3],
- int *pOverrun)
+NdbEventOperation *
+NdbEventBuffer::nextEvent()
{
- GUARDR(int,real_getDataL(bufferId,sdata,ptr,pOverrun));
-}
+ DBUG_ENTER("NdbEventBuffer::nextEvent");
+#ifdef VM_TRACE
+ const char *m_latest_command_save= m_latest_command;
+#endif
-/*
- * Class NdbGlobalEventBuffer
- *
- *
- */
+ if (m_used_data.m_count > 1024)
+ {
+#ifdef VM_TRACE
+ m_latest_command= "NdbEventBuffer::nextEvent (lock)";
+#endif
+ NdbMutex_Lock(m_mutex);
+ // return m_used_data to m_free_data
+ free_list(m_used_data);
+
+ NdbMutex_Unlock(m_mutex);
+ }
+#ifdef VM_TRACE
+ m_latest_command= "NdbEventBuffer::nextEvent";
+#endif
+
+ EventBufData *data;
+ while ((data= m_available_data.m_head))
+ {
+ NdbEventOperationImpl *op= data->m_event_op;
+
+ // set NdbEventOperation data
+ op->m_data_item= data;
+
+ // remove item from m_available_data
+ m_available_data.remove_first();
+ // add it to used list
+ m_used_data.append(data);
+
+#ifdef VM_TRACE
+ op->m_data_done_count++;
+#endif
+
+ int r= op->receive_event();
+ if (r > 0)
+ {
+ if (op->m_state == NdbEventOperation::EO_EXECUTING)
+ {
+#ifdef VM_TRACE
+ m_latest_command= m_latest_command_save;
+#endif
+ DBUG_RETURN(op->m_facade);
+ }
+ // the next event belonged to an event op that is no
+ // longer valid, skip to next
+ continue;
+ }
+#ifdef VM_TRACE
+ m_latest_command= m_latest_command_save;
+#endif
+ }
+ m_error.code= 0;
+#ifdef VM_TRACE
+ m_latest_command= m_latest_command_save;
+#endif
+ DBUG_RETURN(0);
+}
void
-NdbGlobalEventBuffer::lock()
+NdbEventBuffer::lock()
{
- if (!m_group_lock_flag)
- NdbMutex_Lock(ndb_global_event_buffer_mutex);
+ NdbMutex_Lock(m_mutex);
}
void
-NdbGlobalEventBuffer::unlock()
+NdbEventBuffer::unlock()
{
- if (!m_group_lock_flag)
- NdbMutex_Unlock(ndb_global_event_buffer_mutex);
+ NdbMutex_Unlock(m_mutex);
}
void
-NdbGlobalEventBuffer::add_drop_lock()
+NdbEventBuffer::add_drop_lock()
{
NdbMutex_Lock(p_add_drop_mutex);
}
void
-NdbGlobalEventBuffer::add_drop_unlock()
+NdbEventBuffer::add_drop_unlock()
{
NdbMutex_Unlock(p_add_drop_mutex);
}
-inline void
-NdbGlobalEventBuffer::group_lock()
-{
- lock();
- m_group_lock_flag = 1;
-}
-inline void
-NdbGlobalEventBuffer::group_unlock()
+static
+NdbOut&
+operator<<(NdbOut& out, const Gci_container& gci)
{
- m_group_lock_flag = 0;
- unlock();
+ out << "[ GCI: " << gci.m_gci
+ << " state: " << hex << gci.m_state
+ << " head: " << hex << gci.m_data.m_head
+ << " tail: " << hex << gci.m_data.m_tail
+#ifdef VM_TRACE
+ << " cnt: " << dec << gci.m_data.m_count
+#endif
+ << " gcp: " << dec << gci.m_gcp_complete_rep_count
+ << "]";
+ return out;
}
-void
-NdbGlobalEventBuffer::lockB(int bufferId)
-{
- NdbMutex_Lock(m_buf[ID(bufferId)].p_buf_mutex);
-}
-void
-NdbGlobalEventBuffer::unlockB(int bufferId)
+static
+Gci_container*
+find_bucket_chained(Vector<Gci_container> * active, Uint64 gci)
{
- NdbMutex_Lock(m_buf[ID(bufferId)].p_buf_mutex);
-}
-
-// Private methods
+ Uint32 pos = (gci & ACTIVE_GCI_MASK);
+ Gci_container *bucket= active->getBase() + pos;
-NdbGlobalEventBuffer::NdbGlobalEventBuffer() :
- m_handlers(),
- m_group_lock_flag(0),
- m_latestGCI(0),
- m_no(0) // must start at ZERO!
-{
- if ((p_add_drop_mutex = NdbMutex_Create()) == NULL) {
- ndbout_c("NdbGlobalEventBuffer: NdbMutex_Create() failed");
- exit(-1);
+ if(gci > bucket->m_gci)
+ {
+ Gci_container* move;
+ Uint32 move_pos = pos + ACTIVE_GCI_DIRECTORY_SIZE;
+ do
+ {
+ active->fill(move_pos, g_empty_gci_container);
+ bucket = active->getBase() + pos; // Needs to recomputed after fill
+ move = active->getBase() + move_pos;
+ if(move->m_gcp_complete_rep_count == 0)
+ {
+ memcpy(move, bucket, sizeof(Gci_container));
+ bzero(bucket, sizeof(Gci_container));
+ bucket->m_gci = gci;
+ bucket->m_gcp_complete_rep_count = ~(Uint32)0;
+ return bucket;
+ }
+ move_pos += ACTIVE_GCI_DIRECTORY_SIZE;
+ } while(true);
+ }
+ else /** gci < bucket->m_gci */
+ {
+ Uint32 size = active->size() - ACTIVE_GCI_DIRECTORY_SIZE;
+ do
+ {
+ pos += ACTIVE_GCI_DIRECTORY_SIZE;
+ bucket += ACTIVE_GCI_DIRECTORY_SIZE;
+
+ if(bucket->m_gci == gci)
+ return bucket;
+
+ } while(pos < size);
+
+ return 0;
}
}
-NdbGlobalEventBuffer::~NdbGlobalEventBuffer()
+inline
+Gci_container*
+find_bucket(Vector<Gci_container> * active, Uint64 gci)
{
- NdbMutex_Destroy(p_add_drop_mutex);
- // NdbMem_Deallocate(m_eventBufferIdToEventId);
+ Uint32 pos = (gci & ACTIVE_GCI_MASK);
+ Gci_container *bucket= active->getBase() + pos;
+ if(likely(gci == bucket->m_gci))
+ return bucket;
+
+ return find_bucket_chained(active,gci);
}
+
void
-NdbGlobalEventBuffer::real_init (NdbGlobalEventBufferHandle *h,
- int MAX_NUMBER_ACTIVE_EVENTS)
+NdbEventBuffer::execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const rep)
{
- DBUG_ENTER("NdbGlobalEventBuffer::real_init");
- DBUG_PRINT("enter",("m_handles.size()=%u %u", m_handlers.size(), h));
- if (m_handlers.size() == 0)
- { // First init
- DBUG_PRINT("info",("first to come"));
- m_max = MAX_NUMBER_ACTIVE_EVENTS;
- m_buf = new BufItem[m_max];
- for (int i=0; i<m_max; i++) {
- m_buf[i].gId= 0;
+ DBUG_ENTER("NdbEventBuffer::execSUB_GCP_COMPLETE_REP");
+
+ const Uint64 gci= rep->gci;
+ const Uint32 cnt= rep->gcp_complete_rep_count;
+
+ Gci_container *bucket = find_bucket(&m_active_gci, gci);
+
+ if (unlikely(m_active_op_count == 0))
+ {
+ DBUG_VOID_RETURN;
+ }
+
+ if (unlikely(bucket == 0))
+ {
+ /**
+ * Already completed GCI...
+ * Possible in case of resend during NF handling
+ */
+ ndbout << "bucket == 0, gci:" << gci
+ << " complete: " << m_complete_data << endl;
+ for(Uint32 i = 0; i<m_active_gci.size(); i++)
+ {
+ ndbout << i << " - " << m_active_gci[i] << endl;
}
+ DBUG_VOID_RETURN;
}
- assert(m_max == MAX_NUMBER_ACTIVE_EVENTS);
- // TODO make sure we don't hit roof
- m_handlers.push_back(h);
- DBUG_VOID_RETURN;
-}
-void
-NdbGlobalEventBuffer::real_remove(NdbGlobalEventBufferHandle *h)
-{
- DBUG_ENTER("NdbGlobalEventBuffer::real_remove");
- DBUG_PRINT("enter",("m_handles.size()=%u %u", m_handlers.size(), h));
- for (Uint32 i=0 ; i < m_handlers.size(); i++)
+
+ Uint32 old_cnt = bucket->m_gcp_complete_rep_count;
+ if(unlikely(old_cnt == ~(Uint32)0))
{
- DBUG_PRINT("info",("m_handlers[%u] %u", i, m_handlers[i]));
- if (m_handlers[i] == h)
+ old_cnt = m_system_nodes;
+ }
+
+ assert(old_cnt >= cnt);
+ bucket->m_gcp_complete_rep_count = old_cnt - cnt;
+
+ if(old_cnt == cnt)
+ {
+ if(likely(gci == m_latestGCI + 1 || m_latestGCI == 0))
{
- m_handlers.erase(i);
- if (m_handlers.size() == 0)
+ m_latestGCI = m_complete_data.m_gci = gci; // before reportStatus
+ if(!bucket->m_data.is_empty())
{
- DBUG_PRINT("info",("last to go"));
- delete[] m_buf;
- m_buf = NULL;
+#ifdef VM_TRACE
+ assert(bucket->m_data.m_count);
+#endif
+ m_complete_data.m_data.append(bucket->m_data);
+ reportStatus();
}
- DBUG_VOID_RETURN;
+ bzero(bucket, sizeof(Gci_container));
+ bucket->m_gci = gci + ACTIVE_GCI_DIRECTORY_SIZE;
+ bucket->m_gcp_complete_rep_count = m_system_nodes;
+ if(unlikely(m_latest_complete_GCI > gci))
+ {
+ complete_outof_order_gcis();
+ }
+
+ // signal that somethings happened
+
+ NdbCondition_Signal(p_cond);
+ }
+ else
+ {
+ /** out of order something */
+ ndbout_c("out of order bucket: %d gci: %lld m_latestGCI: %lld",
+ bucket-m_active_gci.getBase(), gci, m_latestGCI);
+ bucket->m_state = Gci_container::GC_COMPLETE;
+ bucket->m_gcp_complete_rep_count = 1; // Prevent from being reused
+ m_latest_complete_GCI = gci;
}
}
- ndbout_c("NdbGlobalEventBuffer::real_remove() non-existing handle");
- DBUG_PRINT("error",("non-existing handle"));
- abort();
+
DBUG_VOID_RETURN;
}
-int
-NdbGlobalEventBuffer::real_prepareAddSubscribeEvent
-(NdbGlobalEventBufferHandle *aHandle, NdbEventOperationImpl *eventOp,
- int& hasSubscriber)
+void
+NdbEventBuffer::complete_outof_order_gcis()
{
- DBUG_ENTER("NdbGlobalEventBuffer::real_prepareAddSubscribeEvent");
- int i;
- int bufferId= -1;
- Uint32 eventId= eventOp->m_eventId;
-
- DBUG_PRINT("enter",("eventId: %u", eventId));
- // add_drop_lock(); // only one thread can do add or drop at a time
-
- // Find place where eventId already set
- for (i=0; i<m_no; i++) {
- if (m_buf[i].gId == eventId) {
- bufferId= i;
- break;
- }
+ Uint64 start_gci = m_latestGCI + 1;
+ Uint64 stop_gci = m_latest_complete_GCI;
+
+ const Uint32 size = m_active_gci.size();
+ Gci_container* array= m_active_gci.getBase();
+
+ ndbout_c("complete_outof_order_gcis");
+ for(Uint32 i = 0; i<size; i++)
+ {
+ ndbout << i << " - " << array[i] << endl;
}
- if (bufferId < 0) {
- // find space for new bufferId
- for (i=0; i<m_no; i++) {
- if (m_buf[i].gId == 0) {
- bufferId= i; // we found an empty spot
- goto found_bufferId;
+
+ for(; start_gci <= stop_gci; start_gci++)
+ {
+ /**
+ * Find gci
+ */
+ Uint32 i;
+ Gci_container* bucket= 0;
+ for(i = 0; i<size; i++)
+ {
+ Gci_container* tmp = array + i;
+ if(tmp->m_gci == start_gci && tmp->m_state == Gci_container::GC_COMPLETE)
+ {
+ bucket= tmp;
+ break;
}
}
- if (bufferId < 0 &&
- m_no < m_max) {
- // room for more so get that
- bufferId= m_no;
- m_buf[m_no].gId= 0;
- m_no++;
- } else {
- // add_drop_unlock();
- DBUG_PRINT("error",("Can't accept more subscribers:"
- " bufferId=%d, m_no=%d, m_max=%d",
- bufferId, m_no, m_max));
- DBUG_RETURN(-1);
- }
- }
-found_bufferId:
-
- BufItem &b= m_buf[ID(bufferId)];
-
- if (b.gId == 0) { // first subscriber needs some initialization
-
- bufferId= NO_ID(0, bufferId);
-
- b.gId= eventId;
- b.eventType= (Uint32)eventOp->m_eventImpl->mi_type;
-
- if ((b.p_buf_mutex= NdbMutex_Create()) == NULL) {
- ndbout_c("NdbGlobalEventBuffer: NdbMutex_Create() failed");
- abort();
+ if(bucket == 0)
+ {
+ break;
}
- b.subs= 0;
- b.f= 0;
- b.sz= 0;
- b.max_sz= aHandle->m_bufferL;
- b.data=
- (BufItem::Data *)NdbMem_Allocate(b.max_sz*sizeof(BufItem::Data));
- for (int i = 0; i < b.max_sz; i++) {
- b.data[i].sdata= NULL;
- b.data[i].ptr[0].p= NULL;
- b.data[i].ptr[1].p= NULL;
- b.data[i].ptr[2].p= NULL;
- }
- } else {
- DBUG_PRINT("info",
- ("TRYING handle one subscriber per event b.subs=%u",b.subs));
- int ni = -1;
- for(int i=0; i < b.subs;i++) {
- if (b.ps[i].theHandle == NULL) {
- ni = i;
- break;
- }
+ printf("complete_outof_order_gcis - completing %lld", start_gci);
+ if(!bucket->m_data.is_empty())
+ {
+#ifdef VM_TRACE
+ assert(bucket->m_data.m_count);
+#endif
+ m_complete_data.m_data.append(bucket->m_data);
+#ifdef VM_TRACE
+ ndbout_c(" moved %lld rows -> %lld", bucket->m_data.m_count,
+ m_complete_data.m_data.m_count);
+#else
+ ndbout_c("");
+#endif
}
- if (ni < 0) {
- if (b.subs < MAX_SUBSCRIBERS_PER_EVENT) {
- ni = b.subs;
- } else {
- DBUG_PRINT("error",
- ("Can't accept more subscribers: b.subs=%d",b.subs));
- // add_drop_unlock();
- DBUG_RETURN(-1);
- }
+ bzero(bucket, sizeof(Gci_container));
+ if(i < ACTIVE_GCI_DIRECTORY_SIZE)
+ {
+ bucket->m_gci = start_gci + ACTIVE_GCI_DIRECTORY_SIZE;
+ bucket->m_gcp_complete_rep_count = m_system_nodes;
}
- bufferId = NO_ID(ni, bufferId);
- }
-
- // initialize BufItem::Ps
- {
- int n = NO(bufferId);
- NdbGlobalEventBuffer::BufItem::Ps &e = b.ps[n];
- e.theHandle = aHandle;
- e.b=0;
- e.bufferempty = 1;
- e.overrun=0; // set to -1 to handle first insert
+
+ m_latestGCI = m_complete_data.m_gci = start_gci;
}
-
- if (b.subs > 0)
- hasSubscriber = 1;
- else
- hasSubscriber = 0;
-
- DBUG_PRINT("info",("handed out bufferId=%d for eventId=%d hasSubscriber=%d",
- bufferId, eventId, hasSubscriber));
-
- /* we now have a lock on the prepare so that no one can mess with this
- * unlock comes in unprepareAddSubscribeEvent or addSubscribeEvent
- */
- DBUG_RETURN(bufferId);
+
+ ndbout_c("complete_outof_order_gcis: m_latestGCI: %lld", m_latestGCI);
}
void
-NdbGlobalEventBuffer::real_unprepareAddSubscribeEvent(int bufferId)
+NdbEventBuffer::completeClusterFailed()
{
- DBUG_ENTER("NdbGlobalEventBuffer::real_unprepareAddSubscribeEvent");
- BufItem &b = m_buf[ID(bufferId)];
- int n = NO(bufferId);
+ DBUG_ENTER("NdbEventBuffer::completeClusterFailed");
- DBUG_PRINT("enter", ("bufferId=%d,ID(bufferId)=%d,NO(bufferId)=%d",
- bufferId, ID(bufferId), NO(bufferId)));
+ SubTableData data;
+ LinearSectionPtr ptr[3];
+ bzero(&data, sizeof(data));
+ bzero(ptr, sizeof(ptr));
- b.ps[n].theHandle = NULL;
-
- // remove subscribers from the end,
- // we have to keep gaps since the position
- // has been handed out in bufferId
- for (int i = b.subs-1; i >= 0; i--)
- if (b.ps[i].theHandle == NULL)
- b.subs--;
- else
- break;
+ data.tableId = ~0;
+ data.operation = NdbDictionary::Event::_TE_CLUSTER_FAILURE;
+ data.logType = SubTableData::LOG;
- if (b.subs == 0) {
- DBUG_PRINT("info",("no more subscribers left on eventId %d", b.gId));
- b.gId= 0; // We don't have any subscribers, reuse BufItem
- if (b.data) {
- NdbMem_Free((void *)b.data);
- b.data = NULL;
- }
- if (b.p_buf_mutex) {
- NdbMutex_Destroy(b.p_buf_mutex);
- b.p_buf_mutex = NULL;
+ /**
+ * Find min not completed GCI
+ */
+ Uint32 sz= m_active_gci.size();
+ Uint64 gci= ~0;
+ Gci_container* bucket = 0;
+ Gci_container* array = m_active_gci.getBase();
+ for(Uint32 i = 0; i<sz; i++)
+ {
+ if(array[i].m_gcp_complete_rep_count && array[i].m_gci < gci)
+ {
+ bucket= array + i;
+ gci = bucket->m_gci;
}
}
- // add_drop_unlock();
- DBUG_VOID_RETURN;
-}
-void
-NdbGlobalEventBuffer::real_addSubscribeEvent(int bufferId,
- void *ndbEventOperation)
-{
- DBUG_ENTER("NdbGlobalEventBuffer::real_addSubscribeEvent");
- BufItem &b = m_buf[ID(bufferId)];
- int n = NO(bufferId);
+ if(bucket == 0)
+ {
+ /**
+ * Did not find any not completed GCI's
+ * lets fake one...
+ */
+ gci = m_latestGCI + 1;
+ bucket = array + ( gci & ACTIVE_GCI_MASK );
+ bucket->m_gcp_complete_rep_count = 1;
+ }
+
+ const Uint32 cnt= bucket->m_gcp_complete_rep_count = 1;
- b.subs++;
- b.ps[n].theHandle->addBufferId(bufferId);
+ /**
+ * Release all GCI's
+ */
+ for(Uint32 i = 0; i<sz; i++)
+ {
+ Gci_container* tmp = array + i;
+ if(!tmp->m_data.is_empty())
+ {
+ free_list(tmp->m_data);
+#if 0
+ m_free_data_count++;
+ EventBufData* loop= tmp->m_head;
+ while(loop != tmp->m_tail)
+ {
+ m_free_data_count++;
+ loop = loop->m_next;
+ }
+#endif
+ }
+ bzero(tmp, sizeof(Gci_container));
+ }
+
+ bucket->m_gci = gci;
+ bucket->m_gcp_complete_rep_count = cnt;
+
+ data.gci = gci;
+
+ /**
+ * Insert this event for each operation
+ */
+ NdbEventOperation* op= 0;
+ while((op = m_ndb->getEventOperation(op)))
+ {
+ NdbEventOperationImpl* impl= &op->m_impl;
+ data.senderData = impl->m_oid;
+ insertDataL(impl, &data, ptr);
+ }
+
+ /**
+ * And finally complete this GCI
+ */
+ SubGcpCompleteRep rep;
+ rep.gci= gci;
+ rep.gcp_complete_rep_count= cnt;
+ execSUB_GCP_COMPLETE_REP(&rep);
- // add_drop_unlock();
- DBUG_PRINT("info",("added bufferId %d", bufferId));
DBUG_VOID_RETURN;
}
-void
-NdbGlobalEventBuffer::real_unprepareDropSubscribeEvent(int bufferId)
+Uint64
+NdbEventBuffer::getLatestGCI()
{
- // add_drop_unlock(); // only one thread can do add or drop at a time
+ return m_latestGCI;
}
-int
-NdbGlobalEventBuffer::real_prepareDropSubscribeEvent(int bufferId,
- int& hasSubscriber)
+int
+NdbEventBuffer::insertDataL(NdbEventOperationImpl *op,
+ const SubTableData * const sdata,
+ LinearSectionPtr ptr[3])
{
- DBUG_ENTER("NdbGlobalEventBuffer::real_prepareDropSubscribeEvent");
- // add_drop_lock(); // only one thread can do add or drop at a time
-
- BufItem &b = m_buf[ID(bufferId)];
+ DBUG_ENTER("NdbEventBuffer::insertDataL");
- int n = 0;
- for(int i=0; i < b.subs;i++) {
- if (b.ps[i].theHandle != NULL)
- n++;
- }
+ Uint64 gci= sdata->gci;
+ EventBufData *data= m_free_data;
- if (n > 1)
- hasSubscriber = 1;
- else if (n == 1)
- hasSubscriber = 0;
- else
+ if ( likely((Uint32)op->mi_type & 1 << (Uint32)sdata->operation) )
{
- DBUG_RETURN(-1);
- }
-
- DBUG_RETURN(0);
-}
-
-void
-NdbGlobalEventBuffer::real_dropSubscribeEvent(int bufferId)
-{
- DBUG_ENTER("NdbGlobalEventBuffer::real_dropSubscribeEvent");
- // add_drop_lock(); // only one thread can do add-drop at a time
+ Gci_container* bucket= find_bucket(&m_active_gci, gci);
+
+ DBUG_PRINT("info", ("data insertion in eventId %d", op->m_eventId));
- BufItem &b = m_buf[ID(bufferId)];
- int n = NO(bufferId);
+ if (unlikely(bucket == 0))
+ {
+ /**
+ * Already completed GCI...
+ * Possible in case of resend during NF handling
+ */
+ DBUG_RETURN(0);
+ }
- b.ps[n].overrun=0;
- b.ps[n].bufferempty=1;
- b.ps[n].b=0;
- b.ps[n].theHandle->dropBufferId(bufferId);
+ if (unlikely(data == 0))
+ {
+#ifdef VM_TRACE
+ assert(m_free_data_count == 0);
+ assert(m_free_data_sz == 0);
+#endif
+ expand(4000);
+ reportStatus();
- real_unprepareAddSubscribeEvent(bufferId); // does add_drop_unlock();
+ data= m_free_data;
+ if (unlikely(data == 0))
+ {
+#ifdef VM_TRACE
+ printf("m_latest_command: %s\n", m_latest_command);
+ printf("no free data, m_latestGCI %lld\n",
+ m_latestGCI);
+ printf("m_free_data_count %d\n", m_free_data_count);
+ printf("m_available_data_count %d first gci %d last gci %d\n",
+ m_available_data.m_count,
+ m_available_data.m_head ? m_available_data.m_head->sdata->gci : 0,
+ m_available_data.m_tail ? m_available_data.m_tail->sdata->gci : 0);
+ printf("m_used_data_count %d\n", m_used_data.m_count);
+#endif
+ op->m_has_error= 2;
+ DBUG_RETURN(-1); // TODO handle this, overrun, or, skip?
+ }
+ }
-#ifdef EVENT_DEBUG
- ndbout_c("dropSubscribeEvent:: dropped bufferId %d", bufferId);
+ // remove data from free list
+ m_free_data= data->m_next;
+#ifdef VM_TRACE
+ m_free_data_count--;
+ assert(m_free_data_sz >= data->sz);
#endif
- DBUG_VOID_RETURN;
-}
+ m_free_data_sz-= data->sz;
-void
-NdbGlobalEventBuffer::real_latestGCI(int bufferId, Uint32 gci)
-{
- if (gci > m_latestGCI)
- m_latestGCI = gci;
- else if ((m_latestGCI-gci) > 0xffff) // If NDB stays up :-)
- m_latestGCI = gci;
-}
+ if (unlikely(copy_data_alloc(sdata, ptr, data)))
+ {
+ op->m_has_error= 3;
+ DBUG_RETURN(-1);
+ }
-Uint32
-NdbGlobalEventBuffer::real_getLatestGCI()
-{
- return m_latestGCI;
-}
+ // add it to received data
+ bucket->m_data.append(data);
-int
-NdbGlobalEventBuffer::real_insertDataL(int bufferId,
- const SubTableData * const sdata,
- LinearSectionPtr ptr[3])
-{
- DBUG_ENTER("NdbGlobalEventBuffer::real_insertDataL");
- BufItem &b = m_buf[ID(bufferId)];
-#ifdef EVENT_DEBUG
- int n = NO(bufferId);
+ data->m_event_op= op;
+#ifdef VM_TRACE
+ op->m_data_count++;
#endif
+ DBUG_RETURN(0);
+ }
- if ( b.eventType & (1 << (Uint32)sdata->operation) )
+#ifdef VM_TRACE
+ if ((Uint32)op->m_eventImpl->mi_type & 1 << (Uint32)sdata->operation)
{
- if (b.subs) {
-#ifdef EVENT_DEBUG
- ndbout_c("data insertion in buffer %d with eventId %d", bufferId, b.gId);
-#endif
- // move front forward
- if (copy_data_alloc(sdata, ptr,
- b.data[b.f].sdata, b.data[b.f].ptr))
- {
- DBUG_RETURN(-1);
- }
- for (int i=0; i < b.subs; i++) {
- NdbGlobalEventBuffer::BufItem::Ps &e = b.ps[i];
- if (e.theHandle) { // active subscriber
- if (b.f == e.b) { // next-to-read == written
- if (e.bufferempty == 0) {
- e.overrun++; // another item has been overwritten
- e.b++; // move next-to-read next since old item was overwritten
- if (e.b == b.max_sz) e.b= 0; // start from beginning
- }
- }
- e.bufferempty = 0;
- // signal subscriber that there's more to get
- NdbCondition_Signal(e.theHandle->p_cond);
- }
- }
- b.f++; // move next-to-write
- if (b.f == b.max_sz) b.f = 0; // start from beginning
-#ifdef EVENT_DEBUG
- ndbout_c("Front= %d Back = %d overun = %d", b.f,
- b.ps[n].b, b.ps[n].overrun);
+ DBUG_PRINT("info",("Data arrived before ready eventId", op->m_eventId));
+ DBUG_RETURN(0);
+ }
+ else {
+ DBUG_PRINT("info",("skipped"));
+ DBUG_RETURN(0);
+ }
+#else
+ return 0;
#endif
- } else {
-#ifdef EVENT_DEBUG
- ndbout_c("Data arrived before ready eventId", b.gId);
+}
+
+int
+NdbEventBuffer::copy_data_alloc(const SubTableData * const f_sdata,
+ LinearSectionPtr f_ptr[3],
+ EventBufData *ev_buf)
+{
+ DBUG_ENTER("NdbEventBuffer::copy_data_alloc");
+ const unsigned min_alloc_size= 128;
+ const unsigned sz4= (sizeof(SubTableData)+3)>>2;
+ Uint32 f_ptr_sz_0= f_ptr[0].sz;
+ Uint32 f_ptr_sz_1= f_ptr[1].sz;
+ Uint32 f_ptr_sz_2= f_ptr[2].sz;
+ LinearSectionPtr *t_ptr= ev_buf->ptr;
+ SubTableData *sdata= ev_buf->sdata;
+ const unsigned alloc_size= (sz4 +
+ f_ptr_sz_0 +
+ f_ptr_sz_1 +
+ f_ptr_sz_2) * sizeof(Uint32);
+ Uint32 *ptr;
+ if (alloc_size > min_alloc_size)
+ {
+ if (sdata)
+ {
+ NdbMem_Free((char*)sdata);
+#ifdef VM_TRACE
+ assert(m_total_alloc >= ev_buf->sz);
#endif
+ m_total_alloc-= ev_buf->sz;
}
+ ptr= (Uint32*)NdbMem_Allocate(alloc_size);
+ ev_buf->sdata= (SubTableData *)ptr;
+ ev_buf->sz= alloc_size;
+ m_total_alloc+= alloc_size;
}
- else
+ else /* alloc_size <= min_alloc_size */
{
-#ifdef EVENT_DEBUG
- ndbout_c("skipped");
-#endif
+ if (sdata)
+ ptr= (Uint32*)sdata;
+ else
+ {
+ ptr= (Uint32*)NdbMem_Allocate(min_alloc_size);
+ ev_buf->sdata= (SubTableData *)ptr;
+ ev_buf->sz= min_alloc_size;
+ m_total_alloc+= min_alloc_size;
+ }
}
- DBUG_RETURN(0);
-}
+ memcpy(ptr,f_sdata,sizeof(SubTableData));
+ ptr+= sz4;
-int NdbGlobalEventBuffer::hasData(int bufferId) {
- DBUG_ENTER("NdbGlobalEventBuffer::hasData");
- BufItem &b = m_buf[ID(bufferId)];
- int n = NO(bufferId);
- NdbGlobalEventBuffer::BufItem::Ps &e = b.ps[n];
+ t_ptr->p= ptr;
+ t_ptr->sz= f_ptr_sz_0;
- if(e.bufferempty)
- {
- DBUG_RETURN(0);
- }
+ memcpy(ptr, f_ptr[0].p, sizeof(Uint32)*f_ptr_sz_0);
+ ptr+= f_ptr_sz_0;
+ t_ptr++;
- if (b.f <= e.b)
+ t_ptr->p= ptr;
+ t_ptr->sz= f_ptr_sz_1;
+
+ memcpy(ptr, f_ptr[1].p, sizeof(Uint32)*f_ptr_sz_1);
+ ptr+= f_ptr_sz_1;
+ t_ptr++;
+
+ if (f_ptr_sz_2)
{
- DBUG_RETURN(b.max_sz-e.b + b.f);
+ t_ptr->p= ptr;
+ t_ptr->sz= f_ptr_sz_2;
+ memcpy(ptr, f_ptr[2].p, sizeof(Uint32)*f_ptr_sz_2);
}
else
{
- DBUG_RETURN(b.f-e.b);
+ t_ptr->p= 0;
+ t_ptr->sz= 0;
}
+
+ DBUG_RETURN(0);
}
-int NdbGlobalEventBuffer::real_getDataL(const int bufferId,
- SubTableData * &sdata,
- LinearSectionPtr ptr[3],
- int *pOverrun)
+NdbEventOperationImpl *
+NdbEventBuffer::move_data()
{
- DBUG_ENTER("NdbGlobalEventBuffer::real_getDataL");
- BufItem &b = m_buf[ID(bufferId)];
- int n = NO(bufferId);
- NdbGlobalEventBuffer::BufItem::Ps &e = b.ps[n];
-
- if (pOverrun) {
- *pOverrun = e.overrun;
- e.overrun = 0; // if pOverrun is returned to user reset e.overrun
+ // handle received data
+ if (!m_complete_data.m_data.is_empty())
+ {
+ // move this list to last in m_available_data
+ m_available_data.append(m_complete_data.m_data);
+
+ bzero(&m_complete_data, sizeof(m_complete_data));
}
- if (e.bufferempty)
+ // handle used data
+ if (!m_used_data.is_empty())
{
- DBUG_RETURN(0); // nothing to get
+ // return m_used_data to m_free_data
+ free_list(m_used_data);
}
+ if (!m_available_data.is_empty())
+ {
+ DBUG_ENTER("NdbEventBuffer::move_data");
+#ifdef VM_TRACE
+ DBUG_PRINT("exit",("m_available_data_count %u", m_available_data.m_count));
+#endif
+ DBUG_RETURN(m_available_data.m_head->m_event_op);
+ }
+ return 0;
+}
- DBUG_PRINT("info",("ID(bufferId) %d NO(bufferId) %d e.b %d",
- ID(bufferId), NO(bufferId), e.b));
+void
+NdbEventBuffer::free_list(EventBufData_list &list)
+{
+ // return list to m_free_data
+ list.m_tail->m_next= m_free_data;
+ m_free_data= list.m_head;
+#ifdef VM_TRACE
+ m_free_data_count+= list.m_count;
+#endif
+ m_free_data_sz+= list.m_sz;
+
+ // list returned to m_free_data
+ new (&list) EventBufData_list;
+}
- if (copy_data_alloc(b.data[e.b].sdata, b.data[e.b].ptr,
- sdata, ptr))
+NdbEventOperation*
+NdbEventBuffer::createEventOperation(const char* eventName,
+ NdbError &theError)
+{
+ DBUG_ENTER("NdbEventBuffer::createEventOperation");
+ NdbEventOperation* tOp= new NdbEventOperation(m_ndb, eventName);
+ if (tOp == 0)
{
- DBUG_RETURN(-1);
+ theError.code= 4000;
+ DBUG_RETURN(NULL);
}
+ if (tOp->getState() != NdbEventOperation::EO_CREATED) {
+ theError.code= tOp->getNdbError().code;
+ delete tOp;
+ DBUG_RETURN(NULL);
+ }
+ DBUG_RETURN(tOp);
+}
- e.b++; if (e.b == b.max_sz) e.b= 0; // move next-to-read forward
+void
+NdbEventBuffer::dropEventOperation(NdbEventOperation* tOp)
+{
+ NdbEventOperationImpl* op= getEventOperationImpl(tOp);
- if (b.f == e.b) // back has cought up with front
- e.bufferempty = 1;
+ op->stop();
-#ifdef EVENT_DEBUG
- ndbout_c("getting data from buffer %d with eventId %d", bufferId, b.gId);
-#endif
+ op->m_next= m_dropped_ev_op;
+ op->m_prev= 0;
+ if (m_dropped_ev_op)
+ m_dropped_ev_op->m_prev= op;
+ m_dropped_ev_op= op;
- DBUG_RETURN(hasData(bufferId)+1);
-}
-int
-NdbGlobalEventBuffer::copy_data_alloc(const SubTableData * const f_sdata,
- LinearSectionPtr f_ptr[3],
- SubTableData * &t_sdata,
- LinearSectionPtr t_ptr[3])
-{
- DBUG_ENTER("NdbGlobalEventBuffer::copy_data_alloc");
- unsigned sz4= (sizeof(SubTableData)+3)>>2;
- Uint32 *ptr= (Uint32*)NdbMem_Allocate((sz4 +
- f_ptr[0].sz +
- f_ptr[1].sz +
- f_ptr[2].sz) * sizeof(Uint32));
- if (t_sdata)
- NdbMem_Free((char*)t_sdata);
- t_sdata= (SubTableData *)ptr;
- memcpy(t_sdata,f_sdata,sizeof(SubTableData));
- ptr+= sz4;
+ // ToDo, take care of these to be deleted at the
+ // appropriate time, after we are sure that there
+ // are _no_ more events coming
- for (int i = 0; i < 3; i++) {
- LinearSectionPtr & f_p = f_ptr[i];
- LinearSectionPtr & t_p = t_ptr[i];
- if (f_p.sz > 0) {
- t_p.p= (Uint32 *)ptr;
- memcpy(t_p.p, f_p.p, sizeof(Uint32)*f_p.sz);
- ptr+= f_p.sz;
- t_p.sz= f_p.sz;
- } else {
- t_p.p= NULL;
- t_p.sz= 0;
- }
- }
- DBUG_RETURN(0);
+ // delete tOp;
}
-int
-NdbGlobalEventBuffer::real_wait(NdbGlobalEventBufferHandle *h,
- int aMillisecondNumber)
+
+void
+NdbEventBuffer::reportStatus()
{
- DBUG_ENTER("NdbGlobalEventBuffer::real_wait");
- // check if there are anything in any of the buffers
- int i;
- int n = 0;
- for (i = 0; i < h->m_nids; i++)
- n += hasData(h->m_bufferIds[i]);
- if (n)
- {
- DBUG_RETURN(n);
- }
+ EventBufData *apply_buf= m_available_data.m_head;
+ Uint64 apply_gci, latest_gci= m_latestGCI;
+ if (apply_buf == 0)
+ apply_buf= m_complete_data.m_data.m_head;
+ if (apply_buf)
+ apply_gci= apply_buf->sdata->gci;
+ else
+ apply_gci= latest_gci;
- int r = NdbCondition_WaitTimeout(h->p_cond, ndb_global_event_buffer_mutex,
- aMillisecondNumber);
- if (r > 0)
+ if (100*m_free_data_sz < m_free_thresh*m_total_alloc ||
+ latest_gci-apply_gci >= m_gci_slip_thresh)
{
- DBUG_RETURN(-1);
+ Uint32 data[8];
+ data[0]= NDB_LE_EventBufferStatus;
+ data[1]= m_total_alloc-m_free_data_sz;
+ data[2]= m_total_alloc;
+ data[3]= 0;
+ data[4]= apply_gci & ~(Uint32)0;
+ data[5]= apply_gci >> 32;
+ data[6]= latest_gci & ~(Uint32)0;
+ data[7]= latest_gci >> 32;
+ m_ndb->theImpl->send_event_report(data,8);
+#ifdef VM_TRACE
+ assert(m_total_alloc >= m_free_data_sz);
+#endif
}
-
- n = 0;
- for (i = 0; i < h->m_nids; i++)
- n += hasData(h->m_bufferIds[i]);
- DBUG_RETURN(n);
}
-template class Vector<NdbGlobalEventBufferHandle*>;
+template class Vector<Gci_container>;
+template class Vector<NdbEventBuffer::EventBufData_chunk*>;
diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp
index 96958979c76..0436cea66ce 100644
--- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp
+++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp
@@ -20,14 +20,122 @@
#include <NdbEventOperation.hpp>
#include <signaldata/SumaImpl.hpp>
#include <transporter/TransporterDefinitions.hpp>
+#include <NdbRecAttr.hpp>
+
+#define NDB_EVENT_OP_MAGIC_NUMBER 0xA9F301B4
+
+class NdbEventOperationImpl;
+struct EventBufData
+{
+ union {
+ SubTableData *sdata;
+ char *memory;
+ };
+ LinearSectionPtr ptr[3];
+ unsigned sz;
+ NdbEventOperationImpl *m_event_op;
+ EventBufData *m_next; // Next wrt to global order
+};
+
+class EventBufData_list
+{
+public:
+ EventBufData_list();
+ ~EventBufData_list();
+
+ void remove_first();
+ void append(EventBufData *data);
+ void append(const EventBufData_list &list);
+
+ int is_empty();
+
+ EventBufData *m_head, *m_tail;
+ unsigned m_count;
+ unsigned m_sz;
+};
+
+inline
+EventBufData_list::EventBufData_list()
+ : m_head(0), m_tail(0),
+ m_count(0),
+ m_sz(0)
+{
+}
+
+inline
+EventBufData_list::~EventBufData_list()
+{
+}
+
+
+inline
+int EventBufData_list::is_empty()
+{
+ return m_head == 0;
+}
+
+inline
+void EventBufData_list::remove_first()
+{
+ m_count--;
+ m_sz-= m_head->sz;
+ m_head= m_head->m_next;
+ if (m_head == 0)
+ m_tail= 0;
+}
+
+inline
+void EventBufData_list::append(EventBufData *data)
+{
+ data->m_next= 0;
+ if (m_tail)
+ m_tail->m_next= data;
+ else
+ {
+#ifdef VM_TRACE
+ assert(m_count == 0);
+ assert(m_sz == 0);
+#endif
+ m_head= data;
+ }
+ m_tail= data;
+
+ m_count++;
+ m_sz+= data->sz;
+}
+
+inline
+void EventBufData_list::append(const EventBufData_list &list)
+{
+ if (m_tail)
+ m_tail->m_next= list.m_head;
+ else
+ m_head= list.m_head;
+ m_tail= list.m_tail;
+ m_count+= list.m_count;
+ m_sz+= list.m_sz;
+}
+
+struct Gci_container
+{
+ enum State
+ {
+ GC_COMPLETE = 0x1 // GCI is complete, but waiting for out of order
+ };
+
+ Uint32 m_state;
+ Uint32 m_gcp_complete_rep_count; // Remaining SUB_GCP_COMPLETE_REP until done
+ Uint64 m_gci; // GCI
+ EventBufData_list m_data;
+};
-class NdbGlobalEventBufferHandle;
class NdbEventOperationImpl : public NdbEventOperation {
public:
NdbEventOperationImpl(NdbEventOperation &N,
Ndb *theNdb,
- const char* eventName,
- const int bufferLength);
+ const char* eventName);
+ NdbEventOperationImpl(NdbEventOperationImpl&); //unimplemented
+ NdbEventOperationImpl& operator=(const NdbEventOperationImpl&); //unimplemented
~NdbEventOperationImpl();
NdbEventOperation::State getState();
@@ -36,169 +144,177 @@ public:
int stop();
NdbRecAttr *getValue(const char *colName, char *aValue, int n);
NdbRecAttr *getValue(const NdbColumnImpl *, char *aValue, int n);
- static int wait(void *p, int aMillisecondNumber);
- int next(int *pOverrun);
- bool isConsistent();
- Uint32 getGCI();
- Uint32 getLatestGCI();
+ int receive_event();
+ Uint64 getGCI();
+ Uint64 getLatestGCI();
NdbDictionary::Event::TableEvent getEventType();
- /*
- getOperation();
- getGCI();
- getLogType();
- */
-
void print();
void printAll();
+ NdbEventOperation *m_facade;
+ Uint32 m_magic_number;
+
const NdbError & getNdbError() const;
NdbError m_error;
Ndb *m_ndb;
NdbEventImpl *m_eventImpl;
- NdbGlobalEventBufferHandle *m_bufferHandle;
NdbRecAttr *theFirstPkAttrs[2];
NdbRecAttr *theCurrentPkAttrs[2];
NdbRecAttr *theFirstDataAttrs[2];
NdbRecAttr *theCurrentDataAttrs[2];
- NdbEventOperation::State m_state;
+ NdbEventOperation::State m_state; /* note connection to mi_type */
+ Uint32 mi_type; /* should be == 0 if m_state != EO_EXECUTING
+ * else same as in EventImpl
+ */
Uint32 m_eventId;
- int m_bufferId;
- int m_bufferL;
- SubTableData *sdata;
- LinearSectionPtr ptr[3];
+ Uint32 m_oid;
+
+ EventBufData *m_data_item;
+
+ void *m_custom_data;
+ int m_has_error;
+
+#ifdef VM_TRACE
+ Uint32 m_data_done_count;
+ Uint32 m_data_count;
+#endif
+
+ // managed by the ndb object
+ NdbEventOperationImpl *m_next;
+ NdbEventOperationImpl *m_prev;
+private:
+ void receive_data(NdbRecAttr *r, const Uint32 *data, Uint32 sz);
};
-class NdbGlobalEventBuffer;
-class NdbGlobalEventBufferHandle {
+
+class NdbEventBuffer {
public:
- NdbGlobalEventBufferHandle (int MAX_NUMBER_ACTIVE_EVENTS);
- ~NdbGlobalEventBufferHandle ();
- //static NdbGlobalEventBufferHandle *init(int MAX_NUMBER_ACTIVE_EVENTS);
-
- // returns bufferId 0-N if ok otherwise -1
- int prepareAddSubscribeEvent(NdbEventOperationImpl *, int& hasSubscriber);
- void unprepareAddSubscribeEvent(int bufferId);
- void addSubscribeEvent(int bufferId,
- NdbEventOperationImpl *ndbEventOperationImpl);
-
- void unprepareDropSubscribeEvent(int bufferId);
- int prepareDropSubscribeEvent(int bufferId, int& hasSubscriber);
- void dropSubscribeEvent(int bufferId);
-
- static int getDataL(const int bufferId,
- SubTableData * &sdata,
- LinearSectionPtr ptr[3],
- int *pOverrun);
- static int insertDataL(int bufferId,
- const SubTableData * const sdata,
- LinearSectionPtr ptr[3]);
- static void latestGCI(int bufferId, Uint32 gci);
- static Uint32 getLatestGCI();
- static Uint32 getEventId(int bufferId);
-
- void group_lock();
- void group_unlock();
- int wait(int aMillisecondNumber);
- int m_bufferL;
-private:
- friend class NdbGlobalEventBuffer;
- void addBufferId(int bufferId);
- void dropBufferId(int bufferId);
+ NdbEventBuffer(Ndb*);
+ ~NdbEventBuffer();
- struct NdbCondition *p_cond;
- int m_nids;
- int m_bufferIds[NDB_MAX_ACTIVE_EVENTS];
-};
+ const Uint32 &m_system_nodes;
+ Vector<Gci_container> m_active_gci;
+ NdbEventOperation *createEventOperation(const char* eventName,
+ NdbError &);
+ void dropEventOperation(NdbEventOperation *);
+ static NdbEventOperationImpl* getEventOperationImpl(NdbEventOperation* tOp);
-class NdbGlobalEventBuffer {
-private:
- friend class NdbGlobalEventBufferHandle;
- void lockB(int bufferId);
- void unlockB(int bufferId);
- void group_lock();
- void group_unlock();
- void lock();
- void unlock();
void add_drop_lock();
void add_drop_unlock();
+ void lock();
+ void unlock();
+
+ void add_op();
+ void remove_op();
+ void init_gci_containers();
+ Uint32 m_active_op_count;
+
+ // accessed from the "receive thread"
+ int insertDataL(NdbEventOperationImpl *op,
+ const SubTableData * const sdata,
+ LinearSectionPtr ptr[3]);
+ void execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const rep);
+ void complete_outof_order_gcis();
+
+ void reportClusterFailed(NdbEventOperationImpl *op);
+ void completeClusterFailed();
+
+ // used by user thread
+ Uint64 getLatestGCI();
+ Uint32 getEventId(int bufferId);
- NdbGlobalEventBuffer();
- ~NdbGlobalEventBuffer();
-
- void real_remove(NdbGlobalEventBufferHandle *h);
- void real_init(NdbGlobalEventBufferHandle *h,
- int MAX_NUMBER_ACTIVE_EVENTS);
-
- int real_prepareAddSubscribeEvent(NdbGlobalEventBufferHandle *h,
- NdbEventOperationImpl *,
- int& hasSubscriber);
- void real_unprepareAddSubscribeEvent(int bufferId);
- void real_addSubscribeEvent(int bufferId, void *ndbEventOperation);
-
- void real_unprepareDropSubscribeEvent(int bufferId);
- int real_prepareDropSubscribeEvent(int bufferId,
- int& hasSubscriber);
- void real_dropSubscribeEvent(int bufferId);
-
- int real_getDataL(const int bufferId,
- SubTableData * &sdata,
- LinearSectionPtr ptr[3],
- int *pOverrun);
- int real_insertDataL(int bufferId,
- const SubTableData * const sdata,
- LinearSectionPtr ptr[3]);
- void real_latestGCI(int bufferId, Uint32 gci);
- Uint32 real_getLatestGCI();
+ int pollEvents(int aMillisecondNumber, Uint64 *latestGCI= 0);
+ NdbEventOperation *nextEvent();
+
+ NdbEventOperationImpl *move_data();
+
+ // used by both user thread and receive thread
int copy_data_alloc(const SubTableData * const f_sdata,
LinearSectionPtr f_ptr[3],
- SubTableData * &t_sdata,
- LinearSectionPtr t_ptr[3]);
+ EventBufData *ev_buf);
- int real_wait(NdbGlobalEventBufferHandle *, int aMillisecondNumber);
- int hasData(int bufferId);
- int ID (int bufferId) {return bufferId & 0xFF;};
- int NO (int bufferId) {return bufferId >> 16;};
- int NO_ID (int n, int bufferId) {return (n << 16) | ID(bufferId);};
+ void free_list(EventBufData_list &list);
- Vector<NdbGlobalEventBufferHandle*> m_handlers;
+ void reportStatus();
// Global Mutex used for some things
- NdbMutex *p_add_drop_mutex;
-
- int m_group_lock_flag;
- Uint32 m_latestGCI;
-
- int m_no;
- int m_max;
-#define MAX_SUBSCRIBERS_PER_EVENT 16
- struct BufItem {
- // local mutex for each event/buffer
- NdbMutex *p_buf_mutex;
- Uint32 gId;
- Uint32 eventType;
- struct Data {
- SubTableData *sdata;
- LinearSectionPtr ptr[3];
- } * data;
-
- struct Ps {
- NdbGlobalEventBufferHandle *theHandle;
- int b;
- int overrun;
- int bufferempty;
- //void *ndbEventOperation;
- } ps[MAX_SUBSCRIBERS_PER_EVENT]; // only supports 1 subscriber so far
-
- int subs;
- int f;
- int sz;
- int max_sz;
+ static NdbMutex *p_add_drop_mutex;
+
+#ifdef VM_TRACE
+ const char *m_latest_command;
+#endif
+
+ Ndb *m_ndb;
+ Uint64 m_latestGCI; // latest "handover" GCI
+ Uint64 m_latest_complete_GCI; // latest complete GCI (in case of outof order)
+
+ NdbMutex *m_mutex;
+ struct NdbCondition *p_cond;
+
+ // receive thread
+ Gci_container m_complete_data;
+ EventBufData *m_free_data;
+#ifdef VM_TRACE
+ unsigned m_free_data_count;
+#endif
+ unsigned m_free_data_sz;
+
+ // user thread
+ EventBufData_list m_available_data;
+ EventBufData_list m_used_data;
+
+ unsigned m_total_alloc; // total allocated memory
+
+ // threshholds to report status
+ unsigned m_free_thresh;
+ unsigned m_gci_slip_thresh;
+
+ NdbError m_error;
+private:
+ int expand(unsigned sz);
+
+ // all allocated data
+ struct EventBufData_chunk
+ {
+ unsigned sz;
+ EventBufData data[1];
};
- BufItem *m_buf;
+ Vector<EventBufData_chunk *> m_allocated_data;
+ unsigned m_sz;
+
+ // dropped event operations that have not yet
+ // been deleted
+ NdbEventOperationImpl *m_dropped_ev_op;
};
+
+inline
+NdbEventOperationImpl*
+NdbEventBuffer::getEventOperationImpl(NdbEventOperation* tOp)
+{
+ return &tOp->m_impl;
+}
+
+inline void
+NdbEventOperationImpl::receive_data(NdbRecAttr *r,
+ const Uint32 *data,
+ Uint32 sz)
+{
+ r->receive_data(data,sz);
+#if 0
+ if (sz)
+ {
+ assert((r->attrSize() * r->arraySize() + 3) >> 2 == sz);
+ r->theNULLind= 0;
+ memcpy(r->aRef(), data, 4 * sz);
+ return;
+ }
+ r->theNULLind= 1;
+#endif
+}
+
#endif
diff --git a/storage/ndb/src/ndbapi/NdbImpl.hpp b/storage/ndb/src/ndbapi/NdbImpl.hpp
index d73b8afe10c..5e630b54d68 100644
--- a/storage/ndb/src/ndbapi/NdbImpl.hpp
+++ b/storage/ndb/src/ndbapi/NdbImpl.hpp
@@ -40,6 +40,10 @@ public:
NdbImpl(Ndb_cluster_connection *, Ndb&);
~NdbImpl();
+ int send_event_report(Uint32 *data, Uint32 length);
+
+ Ndb &m_ndb;
+
Ndb_cluster_connection_impl &m_ndb_cluster_connection;
NdbDictionaryImpl m_dictionary;
@@ -58,6 +62,8 @@ public:
NdbWaiter theWaiter;
+ NdbEventOperationImpl *m_ev_op;
+
int m_optimized_node_selection;
@@ -81,13 +87,13 @@ public:
#endif
#define CHECK_STATUS_MACRO \
- {if (checkInitState() == -1) { theError.code = 4100; return -1;}}
+ {if (checkInitState() == -1) { theError.code = 4100; DBUG_RETURN(-1);}}
#define CHECK_STATUS_MACRO_VOID \
- {if (checkInitState() == -1) { theError.code = 4100; return;}}
+ {if (checkInitState() == -1) { theError.code = 4100; DBUG_VOID_RETURN;}}
#define CHECK_STATUS_MACRO_ZERO \
- {if (checkInitState() == -1) { theError.code = 4100; return 0;}}
+ {if (checkInitState() == -1) { theError.code = 4100; DBUG_RETURN(0);}}
#define CHECK_STATUS_MACRO_NULL \
- {if (checkInitState() == -1) { theError.code = 4100; return NULL;}}
+ {if (checkInitState() == -1) { theError.code = 4100; DBUG_RETURN(NULL);}}
inline
void *
diff --git a/storage/ndb/src/ndbapi/NdbWaiter.hpp b/storage/ndb/src/ndbapi/NdbWaiter.hpp
index 4ccfb40b5ba..043beb82372 100644
--- a/storage/ndb/src/ndbapi/NdbWaiter.hpp
+++ b/storage/ndb/src/ndbapi/NdbWaiter.hpp
@@ -64,7 +64,7 @@ public:
Uint32 m_node;
Uint32 m_state;
- void * m_mutex;
+ NdbMutex * m_mutex;
bool m_poll_owner;
Uint32 m_cond_wait_index;
struct NdbCondition * m_condition;
@@ -75,7 +75,7 @@ void
NdbWaiter::wait(int waitTime)
{
assert(!m_poll_owner);
- NdbCondition_WaitTimeout(m_condition, (NdbMutex*)m_mutex, waitTime);
+ NdbCondition_WaitTimeout(m_condition, m_mutex, waitTime);
}
inline
diff --git a/storage/ndb/src/ndbapi/Ndberr.cpp b/storage/ndb/src/ndbapi/Ndberr.cpp
index b05818de6f1..260137cfd54 100644
--- a/storage/ndb/src/ndbapi/Ndberr.cpp
+++ b/storage/ndb/src/ndbapi/Ndberr.cpp
@@ -80,3 +80,10 @@ NdbEventOperationImpl::getNdbError() const {
update(m_error);
return m_error;
}
+
+const
+NdbError &
+NdbDictInterface::getNdbError() const {
+ update(m_error);
+ return m_error;
+}
diff --git a/storage/ndb/src/ndbapi/Ndbif.cpp b/storage/ndb/src/ndbapi/Ndbif.cpp
index ca7b5aee84a..48608345acd 100644
--- a/storage/ndb/src/ndbapi/Ndbif.cpp
+++ b/storage/ndb/src/ndbapi/Ndbif.cpp
@@ -26,6 +26,7 @@
#include <NdbRecAttr.hpp>
#include <NdbReceiver.hpp>
#include "API.hpp"
+#include "NdbEventOperationImpl.hpp"
#include <signaldata/TcCommit.hpp>
#include <signaldata/TcKeyFailConf.hpp>
@@ -37,11 +38,15 @@
#include <signaldata/TransIdAI.hpp>
#include <signaldata/ScanFrag.hpp>
#include <signaldata/ScanTab.hpp>
+#include <signaldata/SumaImpl.hpp>
#include <ndb_limits.h>
#include <NdbOut.hpp>
#include <NdbTick.h>
+#include <EventLogger.hpp>
+extern EventLogger g_eventLogger;
+Uint64 g_latest_trans_gci= 0;
/******************************************************************************
* int init( int aNrOfCon, int aNrOfOp );
@@ -197,7 +202,8 @@ void Ndb::connected(Uint32 ref)
n++;
}
}
- theImpl->theNoOfDBnodes= n;
+ theImpl->theNoOfDBnodes = n;
+
theFirstTransId = ((Uint64)tBlockNo << 52)+
((Uint64)tmpTheNode << 40);
theFirstTransId += theFacade->m_max_trans_id;
@@ -244,6 +250,7 @@ Ndb::report_node_failure(Uint32 node_id)
*
* This method is only called by ClusterMgr (via lots of methods)
*/
+
theImpl->the_release_ind[node_id] = 1;
// must come after
theImpl->the_release_ind[0] = 1;
@@ -255,6 +262,14 @@ Ndb::report_node_failure(Uint32 node_id)
void
Ndb::report_node_failure_completed(Uint32 node_id)
{
+ if (theEventBuffer &&
+ !TransporterFacade::instance()->theClusterMgr->isClusterAlive())
+ {
+ // cluster is unavailable,
+ // eventOperations in the ndb object should be notified
+ theEventBuffer->completeClusterFailed();
+ }
+
abortTransactionsAfterNodeFailure(node_id);
}//Ndb::report_node_failure_completed()
@@ -356,6 +371,7 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
tCon = void2con(tFirstDataPtr);
if ((tCon->checkMagicNumber() == 0) &&
(tCon->theSendStatus == NdbTransaction::sendTC_OP)) {
+ g_latest_trans_gci= keyConf->gci;
tReturnCode = tCon->receiveTCKEYCONF(keyConf, tLen);
if (tReturnCode != -1) {
completedTransaction(tCon);
@@ -506,6 +522,7 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
tCon = void2con(tFirstDataPtr);
if ((tCon->checkMagicNumber() == 0) &&
(tCon->theSendStatus == NdbTransaction::sendTC_COMMIT)) {
+ g_latest_trans_gci= commitConf->gci;
tReturnCode = tCon->receiveTC_COMMITCONF(commitConf);
if (tReturnCode != -1) {
completedTransaction(tCon);
@@ -676,6 +693,8 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
case GSN_DROP_EVNT_CONF:
case GSN_DROP_EVNT_REF:
case GSN_LIST_TABLES_CONF:
+ case GSN_WAIT_GCP_CONF:
+ case GSN_WAIT_GCP_REF:
NdbDictInterface::execSignal(&theDictionary->m_receiver,
aSignal, ptr);
return;
@@ -684,16 +703,43 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
case GSN_SUB_REMOVE_CONF:
case GSN_SUB_REMOVE_REF:
return; // ignore these signals
- case GSN_SUB_GCP_COMPLETE_REP:
case GSN_SUB_START_CONF:
case GSN_SUB_START_REF:
- case GSN_SUB_TABLE_DATA:
case GSN_SUB_STOP_CONF:
case GSN_SUB_STOP_REF:
NdbDictInterface::execSignal(&theDictionary->m_receiver,
aSignal, ptr);
return;
+ case GSN_SUB_GCP_COMPLETE_REP:
+ {
+ const SubGcpCompleteRep * const rep=
+ CAST_CONSTPTR(SubGcpCompleteRep, aSignal->getDataPtr());
+ theEventBuffer->execSUB_GCP_COMPLETE_REP(rep);
+ return;
+ }
+ case GSN_SUB_TABLE_DATA:
+ {
+ const SubTableData * const sdata=
+ CAST_CONSTPTR(SubTableData, aSignal->getDataPtr());
+ const Uint32 oid = sdata->senderData;
+ for (int i= aSignal->m_noOfSections;i < 3; i++) {
+ ptr[i].p = NULL;
+ ptr[i].sz = 0;
+ }
+ DBUG_PRINT("info",("oid=senderData: %d, gci: %d, operation: %d, "
+ "tableId: %d",
+ sdata->senderData, sdata->gci, sdata->operation,
+ sdata->tableId));
+
+ NdbEventOperationImpl *op= (NdbEventOperationImpl*)int2void(oid);
+ if (op->m_magic_number == NDB_EVENT_OP_MAGIC_NUMBER)
+ theEventBuffer->insertDataL(op,sdata, ptr);
+ else
+ g_eventLogger.error("dropped GSN_SUB_TABLE_DATA due to wrong magic "
+ "number");
+ return;
+ }
case GSN_DIHNDBTAMPER:
{
tFirstDataPtr = int2void(tFirstData);
@@ -798,6 +844,7 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
tCon = void2con(tFirstDataPtr);
if ((tCon->checkMagicNumber() == 0) &&
(tCon->theSendStatus == NdbTransaction::sendTC_OP)) {
+ g_latest_trans_gci= indxConf->gci;
tReturnCode = tCon->receiveTCINDXCONF(indxConf, tLen);
if (tReturnCode != -1) {
completedTransaction(tCon);
@@ -1376,3 +1423,27 @@ NdbTransaction::sendTC_COMMIT_ACK(NdbApiSignal * aSignal,
tp->sendSignal(aSignal, refToNode(aTCRef));
}
+
+int
+NdbImpl::send_event_report(Uint32 *data, Uint32 length)
+{
+ NdbApiSignal aSignal(m_ndb.theMyRef);
+ TransporterFacade *tp = TransporterFacade::instance();
+ aSignal.theTrace = TestOrd::TraceAPI;
+ aSignal.theReceiversBlockNumber = CMVMI;
+ aSignal.theVerId_signalNumber = GSN_EVENT_REP;
+ aSignal.theLength = length;
+ memcpy((char *)aSignal.getDataPtrSend(), (char *)data, length*4);
+
+ Uint32 tNode;
+ Ndb_cluster_connection_node_iter node_iter;
+ m_ndb_cluster_connection.init_get_next_node(node_iter);
+ while ((tNode= m_ndb_cluster_connection.get_next_node(node_iter)))
+ {
+ if(tp->get_node_alive(tNode)){
+ tp->sendSignal(&aSignal, tNode);
+ return 0;
+ }
+ }
+ return 1;
+}
diff --git a/storage/ndb/src/ndbapi/Ndbinit.cpp b/storage/ndb/src/ndbapi/Ndbinit.cpp
index 6efcc55c32e..a742f571a81 100644
--- a/storage/ndb/src/ndbapi/Ndbinit.cpp
+++ b/storage/ndb/src/ndbapi/Ndbinit.cpp
@@ -29,10 +29,10 @@
#include <NdbOut.hpp>
#include <NdbSleep.h>
#include "ObjectMap.hpp"
+#include "NdbEventOperationImpl.hpp"
-class NdbGlobalEventBufferHandle;
-NdbGlobalEventBufferHandle *NdbGlobalEventBuffer_init(int);
-void NdbGlobalEventBuffer_drop(NdbGlobalEventBufferHandle *);
+#include <EventLogger.hpp>
+extern EventLogger g_eventLogger;
Ndb::Ndb( Ndb_cluster_connection *ndb_cluster_connection,
const char* aDataBase , const char* aSchema)
@@ -123,13 +123,12 @@ void Ndb::setup(Ndb_cluster_connection *ndb_cluster_connection,
theInitState = NotInitialised;
{
- NdbGlobalEventBufferHandle *h=
- NdbGlobalEventBuffer_init(NDB_MAX_ACTIVE_EVENTS);
- if (h == NULL) {
- ndbout_c("Failed NdbGlobalEventBuffer_init(%d)",NDB_MAX_ACTIVE_EVENTS);
+ // theImpl->theWaiter.m_mutex must be set before this
+ theEventBuffer= new NdbEventBuffer(this);
+ if (theEventBuffer == NULL) {
+ ndbout_c("Failed NdbEventBuffer()");
exit(-1);
}
- theGlobalEventBufferHandle = h;
}
DBUG_VOID_RETURN;
@@ -144,10 +143,18 @@ void Ndb::setup(Ndb_cluster_connection *ndb_cluster_connection,
Ndb::~Ndb()
{
DBUG_ENTER("Ndb::~Ndb()");
- DBUG_PRINT("enter",("Ndb::~Ndb this=0x%x",this));
+ DBUG_PRINT("enter",("this=0x%x",this));
+
+ assert(theImpl->m_ev_op == 0); // user should return NdbEventOperation's
+ for (NdbEventOperationImpl *op= theImpl->m_ev_op; op; op=op->m_next)
+ {
+ if (op->m_state == NdbEventOperation::EO_EXECUTING && op->stop())
+ g_eventLogger.error("stopping NdbEventOperation failed in Ndb destructor");
+ op->m_magic_number= 0;
+ }
doDisconnect();
- NdbGlobalEventBuffer_drop(theGlobalEventBufferHandle);
+ delete theEventBuffer;
if (TransporterFacade::instance() != NULL && theNdbBlockNumber > 0){
TransporterFacade::instance()->close(theNdbBlockNumber, theFirstTransId);
@@ -231,11 +238,13 @@ NdbWaiter::~NdbWaiter(){
NdbImpl::NdbImpl(Ndb_cluster_connection *ndb_cluster_connection,
Ndb& ndb)
- : m_ndb_cluster_connection(ndb_cluster_connection->m_impl),
+ : m_ndb(ndb),
+ m_ndb_cluster_connection(ndb_cluster_connection->m_impl),
m_dictionary(ndb),
theCurrentConnectIndex(0),
theNdbObjectIdMap(1024,1024),
- theNoOfDBnodes(0)
+ theNoOfDBnodes(0),
+ m_ev_op(0)
{
int i;
for (i = 0; i < MAX_NDB_NODES; i++) {
diff --git a/storage/ndb/src/ndbapi/ObjectMap.hpp b/storage/ndb/src/ndbapi/ObjectMap.hpp
index 21407279f0b..dc2a5046f77 100644
--- a/storage/ndb/src/ndbapi/ObjectMap.hpp
+++ b/storage/ndb/src/ndbapi/ObjectMap.hpp
@@ -82,9 +82,7 @@ NdbObjectIdMap::map(void * object){
// unlock();
-#ifdef DEBUG_OBJECTMAP
- ndbout_c("NdbObjectIdMap::map(0x%x) %u", object, ff<<2);
-#endif
+ DBUG_PRINT("info",("NdbObjectIdMap::map(0x%x) %u", object, ff<<2));
return ff<<2;
}
@@ -103,14 +101,13 @@ NdbObjectIdMap::unmap(Uint32 id, void *object){
m_firstFree = i;
} else {
ndbout_c("Error: NdbObjectIdMap::::unmap(%u, 0x%x) obj=0x%x", id, object, obj);
+ DBUG_PRINT("error",("NdbObjectIdMap::unmap(%u, 0x%x) obj=0x%x", id, object, obj));
return 0;
}
// unlock();
-#ifdef DEBUG_OBJECTMAP
- ndbout_c("NdbObjectIdMap::unmap(%u) obj=0x%x", id, obj);
-#endif
+ DBUG_PRINT("info",("NdbObjectIdMap::unmap(%u) obj=0x%x", id, obj));
return obj;
}
@@ -119,9 +116,7 @@ NdbObjectIdMap::unmap(Uint32 id, void *object){
inline void *
NdbObjectIdMap::getObject(Uint32 id){
-#ifdef DEBUG_OBJECTMAP
- ndbout_c("NdbObjectIdMap::getObject(%u) obj=0x%x", id, m_map[id>>2].m_obj);
-#endif
+ // DBUG_PRINT("info",("NdbObjectIdMap::getObject(%u) obj=0x%x", id, m_map[id>>2].m_obj));
id >>= 2;
if(id < m_size){
return m_map[id].m_obj;
diff --git a/storage/ndb/src/ndbapi/TransporterFacade.cpp b/storage/ndb/src/ndbapi/TransporterFacade.cpp
index 638eae15c24..1f7096e67bd 100644
--- a/storage/ndb/src/ndbapi/TransporterFacade.cpp
+++ b/storage/ndb/src/ndbapi/TransporterFacade.cpp
@@ -36,6 +36,7 @@
#include <SignalLoggerManager.hpp>
#include <kernel/ndb_limits.h>
#include <signaldata/AlterTable.hpp>
+#include <signaldata/SumaImpl.hpp>
//#define REPORT_TRANSPORTER
//#define API_TRACE;
@@ -320,6 +321,32 @@ execute(void * callbackObj, SignalHeader * const header,
rep->tableVersion,
rep->changeType == AlterTableRep::CT_ALTERED);
theFacade->m_globalDictCache.unlock();
+ break;
+ }
+ case GSN_SUB_GCP_COMPLETE_REP:
+ {
+ /**
+ * Report
+ */
+ NdbApiSignal tSignal(* header);
+ tSignal.setDataPtr(theData);
+ theFacade->for_each(&tSignal, ptr);
+
+ /**
+ * Reply
+ */
+ {
+ Uint32* send= tSignal.getDataPtrSend();
+ memcpy(send, theData, tSignal.getLength() << 2);
+ ((SubGcpCompleteAck*)send)->rep.senderRef =
+ numberToRef(API_CLUSTERMGR, theFacade->theOwnId);
+ Uint32 ref= header->theSendersBlockRef;
+ Uint32 aNodeId= refToNode(ref);
+ tSignal.theReceiversBlockNumber= refToBlock(ref);
+ tSignal.theVerId_signalNumber= GSN_SUB_GCP_COMPLETE_ACK;
+ theFacade->sendSignal(&tSignal, aNodeId);
+ }
+ break;
}
default:
break;
@@ -719,6 +746,22 @@ TransporterFacade::init(Uint32 nodeId, const ndb_mgm_configuration* props)
DBUG_RETURN(true);
}
+void
+TransporterFacade::for_each(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
+{
+ DBUG_ENTER("TransporterFacade::connected");
+ Uint32 sz = m_threads.m_statusNext.size();
+ TransporterFacade::ThreadData::Object_Execute oe;
+ for (Uint32 i = 0; i < sz ; i ++)
+ {
+ oe = m_threads.m_objectExecute[i];
+ if (m_threads.getInUse(i))
+ {
+ (* oe.m_executeFunction) (oe.m_object, aSignal, ptr);
+ }
+ }
+ DBUG_VOID_RETURN;
+}
void
TransporterFacade::connected()
diff --git a/storage/ndb/src/ndbapi/TransporterFacade.hpp b/storage/ndb/src/ndbapi/TransporterFacade.hpp
index 34e1a944276..2c863614de8 100644
--- a/storage/ndb/src/ndbapi/TransporterFacade.hpp
+++ b/storage/ndb/src/ndbapi/TransporterFacade.hpp
@@ -102,6 +102,11 @@ public:
void ReportNodeAlive(NodeId nodeId);
void ReportNodeDead(NodeId nodeId);
void ReportNodeFailureComplete(NodeId nodeId);
+
+ /**
+ * Send signal to each registered object
+ */
+ void for_each(NdbApiSignal* aSignal, LinearSectionPtr ptr[3]);
void lock_mutex();
void unlock_mutex();
@@ -245,7 +250,7 @@ private:
inline Object_Execute get(Uint16 blockNo) const {
blockNo -= MIN_API_BLOCK_NO;
- if(blockNo < m_objectExecute.size()){
+ if(likely (blockNo < m_objectExecute.size())){
return m_objectExecute[blockNo];
}
Object_Execute oe = { 0, 0 };
@@ -381,11 +386,6 @@ TransporterFacade::getIsNodeSendable(NodeId n) const {
else
return node.compatible && (startLevel == NodeState::SL_STARTED ||
startLevel == NodeState::SL_STOPPING_1);
- } else if (node.m_info.m_type == NodeInfo::REP) {
- /**
- * @todo Check that REP node actually has received API_REG_REQ
- */
- return node.compatible;
} else {
ndbout_c("TransporterFacade::getIsNodeSendable: Illegal node type: "
"%d of node: %d",
diff --git a/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp b/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp
index 7625da609b0..68e2c1e71d8 100644
--- a/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp
+++ b/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp
@@ -38,7 +38,6 @@ EventLogger g_eventLogger;
static int g_run_connect_thread= 0;
#include <NdbMutex.h>
-NdbMutex *ndb_global_event_buffer_mutex= NULL;
#ifdef VM_TRACE
NdbMutex *ndb_print_state_mutex= NULL;
#endif
@@ -188,6 +187,28 @@ Ndb_cluster_connection::node_id()
}
+int Ndb_cluster_connection::get_no_ready()
+{
+ TransporterFacade *tp = TransporterFacade::instance();
+ if (tp == 0 || tp->ownId() == 0)
+ return -1;
+
+ unsigned int foundAliveNode = 0;
+ tp->lock_mutex();
+ for(unsigned i= 0; i < no_db_nodes(); i++)
+ {
+ //************************************************
+ // If any node is answering, ndb is answering
+ //************************************************
+ if (tp->get_node_alive(m_impl.m_all_nodes[i].id) != 0) {
+ foundAliveNode++;
+ }
+ }
+ tp->unlock_mutex();
+
+ return foundAliveNode;
+}
+
int
Ndb_cluster_connection::wait_until_ready(int timeout,
int timeout_after_first_alive)
@@ -206,18 +227,7 @@ Ndb_cluster_connection::wait_until_ready(int timeout,
int milliCounter = 0;
int noChecksSinceFirstAliveFound = 0;
do {
- unsigned int foundAliveNode = 0;
- tp->lock_mutex();
- for(unsigned i= 0; i < no_db_nodes(); i++)
- {
- //************************************************
- // If any node is answering, ndb is answering
- //************************************************
- if (tp->get_node_alive(m_impl.m_all_nodes[i].id) != 0) {
- foundAliveNode++;
- }
- }
- tp->unlock_mutex();
+ unsigned int foundAliveNode = get_no_ready();
if (foundAliveNode == no_db_nodes())
{
@@ -264,9 +274,6 @@ Ndb_cluster_connection_impl::Ndb_cluster_connection_impl(const char *
m_connect_thread= 0;
m_connect_callback= 0;
- if (ndb_global_event_buffer_mutex == NULL)
- ndb_global_event_buffer_mutex= NdbMutex_Create();
-
#ifdef VM_TRACE
if (ndb_print_state_mutex == NULL)
ndb_print_state_mutex= NdbMutex_Create();
@@ -275,7 +282,7 @@ Ndb_cluster_connection_impl::Ndb_cluster_connection_impl(const char *
new ConfigRetriever(connect_string, NDB_VERSION, NODE_TYPE_API);
if (m_config_retriever->hasError())
{
- printf("Could not connect initialize handle to management server: %s",
+ printf("Could not initialize handle to management server: %s\n",
m_config_retriever->getErrorString());
delete m_config_retriever;
m_config_retriever= 0;
@@ -312,11 +319,6 @@ Ndb_cluster_connection_impl::~Ndb_cluster_connection_impl()
delete m_config_retriever;
m_config_retriever= NULL;
}
- if (ndb_global_event_buffer_mutex != NULL)
- {
- NdbMutex_Destroy(ndb_global_event_buffer_mutex);
- ndb_global_event_buffer_mutex= NULL;
- }
#ifdef VM_TRACE
if (ndb_print_state_mutex != NULL)
{
diff --git a/storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp b/storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp
index 05652f3316a..df30189d713 100644
--- a/storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp
+++ b/storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp
@@ -54,6 +54,7 @@ private:
friend class NdbImpl;
friend void* run_ndb_cluster_connection_connect_thread(void*);
friend class Ndb_cluster_connection;
+ friend class NdbEventBuffer;
struct Node
{
diff --git a/storage/ndb/src/ndbapi/ndberror.c b/storage/ndb/src/ndbapi/ndberror.c
index 8a00e82398f..09d12e9a5b9 100644
--- a/storage/ndb/src/ndbapi/ndberror.c
+++ b/storage/ndb/src/ndbapi/ndberror.c
@@ -16,11 +16,13 @@
#include <ndb_global.h>
+#include <my_base.h>
#include <ndberror.h>
#include <m_string.h>
typedef struct ErrorBundle {
int code;
+ int mysql_code;
ndberror_classification classification;
const char * message;
} ErrorBundle;
@@ -57,6 +59,9 @@ typedef struct ErrorBundle {
#define OE ndberror_cl_schema_object_already_exists
+/* default mysql error code for unmapped codes */
+#define DMEC -1
+
static const char* empty_string = "";
/*
@@ -71,6 +76,7 @@ static const char* empty_string = "";
* 900 - TUX
* 1200 - LQH
* 1300 - BACKUP
+ * 1400 - SUMA
* 4000 - API
* 4100 - ""
* 4200 - ""
@@ -88,442 +94,453 @@ ErrorBundle ErrorCodes[] = {
/**
* No error
*/
- { 0, NE, "No error" },
+ { 0, 0, NE, "No error" },
/**
* NoDataFound
*/
- { 626, ND, "Tuple did not exist" },
+ { 626, HA_ERR_KEY_NOT_FOUND, ND, "Tuple did not exist" },
/**
* ConstraintViolation
*/
- { 630, CV, "Tuple already existed when attempting to insert" },
- { 840, CV, "Trying to set a NOT NULL attribute to NULL" },
- { 893, CV, "Constraint violation e.g. duplicate value in unique index" },
+ { 630, HA_ERR_FOUND_DUPP_KEY, CV, "Tuple already existed when attempting to insert" },
+ { 839, DMEC, CV, "Illegal null attribute" },
+ { 840, DMEC, CV, "Trying to set a NOT NULL attribute to NULL" },
+ { 893, HA_ERR_FOUND_DUPP_KEY, CV, "Constraint violation e.g. duplicate value in unique index" },
/**
* Node recovery errors
*/
- { 286, NR, "Node failure caused abort of transaction" },
- { 250, NR, "Node where lock was held crashed, restart scan transaction" },
- { 499, NR, "Scan take over error, restart scan transaction" },
- { 1204, NR, "Temporary failure, distribution changed" },
- { 4002, NR, "Send to NDB failed" },
- { 4010, NR, "Node failure caused abort of transaction" },
- { 4025, NR, "Node failure caused abort of transaction" },
- { 4027, NR, "Node failure caused abort of transaction" },
- { 4028, NR, "Node failure caused abort of transaction" },
- { 4029, NR, "Node failure caused abort of transaction" },
- { 4031, NR, "Node failure caused abort of transaction" },
- { 4033, NR, "Send to NDB failed" },
- { 4115, NR,
+ { 286, DMEC, NR, "Node failure caused abort of transaction" },
+ { 250, DMEC, NR, "Node where lock was held crashed, restart scan transaction" },
+ { 499, DMEC, NR, "Scan take over error, restart scan transaction" },
+ { 1204, DMEC, NR, "Temporary failure, distribution changed" },
+ { 4002, DMEC, NR, "Send to NDB failed" },
+ { 4010, DMEC, NR, "Node failure caused abort of transaction" },
+ { 4025, DMEC, NR, "Node failure caused abort of transaction" },
+ { 4027, DMEC, NR, "Node failure caused abort of transaction" },
+ { 4028, DMEC, NR, "Node failure caused abort of transaction" },
+ { 4029, DMEC, NR, "Node failure caused abort of transaction" },
+ { 4031, DMEC, NR, "Node failure caused abort of transaction" },
+ { 4033, DMEC, NR, "Send to NDB failed" },
+ { 4115, DMEC, NR,
"Transaction was committed but all read information was not "
"received due to node crash" },
- { 4119, NR, "Simple/dirty read failed due to node failure" },
+ { 4119, DMEC, NR, "Simple/dirty read failed due to node failure" },
/**
* Node shutdown
*/
- { 280, NS, "Transaction aborted due to node shutdown" },
+ { 280, DMEC, NS, "Transaction aborted due to node shutdown" },
/* This scan trans had an active fragment scan in a LQH which have crashed */
- { 270, NS, "Transaction aborted due to node shutdown" },
- { 1223, NS, "Read operation aborted due to node shutdown" },
- { 4023, NS, "Transaction aborted due to node shutdown" },
- { 4030, NS, "Transaction aborted due to node shutdown" },
- { 4034, NS, "Transaction aborted due to node shutdown" },
+ { 270, DMEC, NS, "Transaction aborted due to node shutdown" },
+ { 1223, DMEC, NS, "Read operation aborted due to node shutdown" },
+ { 4023, DMEC, NS, "Transaction aborted due to node shutdown" },
+ { 4030, DMEC, NS, "Transaction aborted due to node shutdown" },
+ { 4034, DMEC, NS, "Transaction aborted due to node shutdown" },
/**
* Unknown result
*/
- { 4008, UR, "Receive from NDB failed" },
- { 4009, UR, "Cluster Failure" },
- { 4012, UR,
+ { 4008, DMEC, UR, "Receive from NDB failed" },
+ { 4009, DMEC, UR, "Cluster Failure" },
+ { 4012, DMEC, UR,
"Request ndbd time-out, maybe due to high load or communication problems"},
- { 4024, UR,
+ { 4024, DMEC, UR,
"Time-out, most likely caused by simple read or cluster failure" },
/**
* TemporaryResourceError
*/
- { 217, TR, "217" },
- { 218, TR, "218" },
- { 219, TR, "219" },
- { 233, TR,
+ { 217, DMEC, TR, "217" },
+ { 218, DMEC, TR, "218" },
+ { 219, DMEC, TR, "219" },
+ { 233, DMEC, TR,
"Out of operation records in transaction coordinator (increase MaxNoOfConcurrentOperations)" },
- { 275, TR, "275" },
- { 279, TR, "Out of transaction markers in transaction coordinator" },
- { 414, TR, "414" },
- { 418, TR, "Out of transaction buffers in LQH" },
- { 419, TR, "419" },
- { 245, TR, "Too many active scans" },
- { 488, TR, "Too many active scans" },
- { 490, TR, "Too many active scans" },
- { 805, TR, "Out of attrinfo records in tuple manager" },
- { 830, TR, "Out of add fragment operation records" },
- { 873, TR, "Out of attrinfo records for scan in tuple manager" },
- { 1217, TR, "Out of operation records in local data manager (increase MaxNoOfLocalOperations)" },
- { 1220, TR, "REDO log files overloaded, consult online manual (decrease TimeBetweenLocalCheckpoints, and|or increase NoOfFragmentLogFiles)" },
- { 1222, TR, "Out of transaction markers in LQH" },
- { 1224, TR, "Out of Send Buffer space in LQH" },
- { 4021, TR, "Out of Send Buffer space in NDB API" },
- { 4022, TR, "Out of Send Buffer space in NDB API" },
- { 4032, TR, "Out of Send Buffer space in NDB API" },
- { 288, TR, "Out of index operations in transaction coordinator (increase MaxNoOfConcurrentIndexOperations)" },
+ { 275, DMEC, TR, "275" },
+ { 279, DMEC, TR, "Out of transaction markers in transaction coordinator" },
+ { 414, DMEC, TR, "414" },
+ { 418, DMEC, TR, "Out of transaction buffers in LQH" },
+ { 419, DMEC, TR, "419" },
+ { 245, DMEC, TR, "Too many active scans" },
+ { 488, DMEC, TR, "Too many active scans" },
+ { 490, DMEC, TR, "Too many active scans" },
+ { 805, DMEC, TR, "Out of attrinfo records in tuple manager" },
+ { 830, DMEC, TR, "Out of add fragment operation records" },
+ { 873, DMEC, TR, "Out of attrinfo records for scan in tuple manager" },
+ { 1217, DMEC, TR, "Out of operation records in local data manager (increase MaxNoOfLocalOperations)" },
+ { 1220, DMEC, TR, "REDO log files overloaded, consult online manual (decrease TimeBetweenLocalCheckpoints, and|or increase NoOfFragmentLogFiles)" },
+ { 1222, DMEC, TR, "Out of transaction markers in LQH" },
+ { 4021, DMEC, TR, "Out of Send Buffer space in NDB API" },
+ { 4022, DMEC, TR, "Out of Send Buffer space in NDB API" },
+ { 4032, DMEC, TR, "Out of Send Buffer space in NDB API" },
+ { 288, DMEC, TR, "Out of index operations in transaction coordinator (increase MaxNoOfConcurrentIndexOperations)" },
/**
* InsufficientSpace
*/
- { 623, IS, "623" },
- { 624, IS, "624" },
- { 625, IS, "Out of memory in Ndb Kernel, hash index part (increase IndexMemory)" },
- { 640, IS, "Too many hash indexes (should not happen)" },
- { 826, IS, "Too many tables and attributes (increase MaxNoOfAttributes or MaxNoOfTables)" },
- { 827, IS, "Out of memory in Ndb Kernel, table data (increase DataMemory)" },
- { 902, IS, "Out of memory in Ndb Kernel, ordered index data (increase DataMemory)" },
- { 903, IS, "Too many ordered indexes (increase MaxNoOfOrderedIndexes)" },
- { 904, IS, "Out of fragment records (increase MaxNoOfOrderedIndexes)" },
- { 905, IS, "Out of attribute records (increase MaxNoOfAttributes)" },
+ { 623, HA_ERR_RECORD_FILE_FULL, IS, "623" },
+ { 624, HA_ERR_RECORD_FILE_FULL, IS, "624" },
+ { 625, HA_ERR_INDEX_FILE_FULL, IS, "Out of memory in Ndb Kernel, hash index part (increase IndexMemory)" },
+ { 640, DMEC, IS, "Too many hash indexes (should not happen)" },
+ { 826, HA_ERR_RECORD_FILE_FULL, IS, "Too many tables and attributes (increase MaxNoOfAttributes or MaxNoOfTables)" },
+ { 827, HA_ERR_RECORD_FILE_FULL, IS, "Out of memory in Ndb Kernel, table data (increase DataMemory)" },
+ { 902, HA_ERR_RECORD_FILE_FULL, IS, "Out of memory in Ndb Kernel, ordered index data (increase DataMemory)" },
+ { 903, HA_ERR_INDEX_FILE_FULL, IS, "Too many ordered indexes (increase MaxNoOfOrderedIndexes)" },
+ { 904, HA_ERR_INDEX_FILE_FULL, IS, "Out of fragment records (increase MaxNoOfOrderedIndexes)" },
+ { 905, DMEC, IS, "Out of attribute records (increase MaxNoOfAttributes)" },
/**
* TimeoutExpired
*/
- { 266, TO, "Time-out in NDB, probably caused by deadlock" },
- { 274, TO, "Time-out in NDB, probably caused by deadlock" }, /* Scan trans timeout */
- { 296, TO, "Time-out in NDB, probably caused by deadlock" }, /* Scan trans timeout */
- { 297, TO, "Time-out in NDB, probably caused by deadlock" }, /* Scan trans timeout, temporary!! */
- { 237, TO, "Transaction had timed out when trying to commit it" },
+ { 266, HA_ERR_LOCK_WAIT_TIMEOUT, TO, "Time-out in NDB, probably caused by deadlock" },
+ { 274, HA_ERR_LOCK_WAIT_TIMEOUT, TO, "Time-out in NDB, probably caused by deadlock" }, /* Scan trans timeout */
+ { 296, HA_ERR_LOCK_WAIT_TIMEOUT, TO, "Time-out in NDB, probably caused by deadlock" }, /* Scan trans timeout */
+ { 297, HA_ERR_LOCK_WAIT_TIMEOUT, TO, "Time-out in NDB, probably caused by deadlock" }, /* Scan trans timeout, temporary!! */
+ { 237, HA_ERR_LOCK_WAIT_TIMEOUT, TO, "Transaction had timed out when trying to commit it" },
/**
* OverloadError
*/
- { 410, OL, "REDO log files overloaded, consult online manual (decrease TimeBetweenLocalCheckpoints, and|or increase NoOfFragmentLogFiles)" },
- { 677, OL, "Index UNDO buffers overloaded (increase UndoIndexBuffer)" },
- { 891, OL, "Data UNDO buffers overloaded (increase UndoDataBuffer)" },
- { 1221, OL, "REDO buffers overloaded, consult online manual (increase RedoBuffer)" },
- { 4006, OL, "Connect failure - out of connection objects (increase MaxNoOfConcurrentTransactions)" },
+ { 410, DMEC, OL, "REDO log files overloaded, consult online manual (decrease TimeBetweenLocalCheckpoints, and|or increase NoOfFragmentLogFiles)" },
+ { 677, DMEC, OL, "Index UNDO buffers overloaded (increase UndoIndexBuffer)" },
+ { 891, DMEC, OL, "Data UNDO buffers overloaded (increase UndoDataBuffer)" },
+ { 1221, DMEC, OL, "REDO buffers overloaded, consult online manual (increase RedoBuffer)" },
+ { 4006, DMEC, OL, "Connect failure - out of connection objects (increase MaxNoOfConcurrentTransactions)" },
/**
* Internal errors
*/
- { 892, IE, "Inconsistent hash index. The index needs to be dropped and recreated" },
- { 896, IE, "Tuple corrupted - wrong checksum or column data in invalid format" },
- { 901, IE, "Inconsistent ordered index. The index needs to be dropped and recreated" },
- { 202, IE, "202" },
- { 203, IE, "203" },
- { 207, IE, "207" },
- { 208, IE, "208" },
- { 209, IE, "Communication problem, signal error" },
- { 220, IE, "220" },
- { 230, IE, "230" },
- { 232, IE, "232" },
- { 238, IE, "238" },
- { 271, IE, "Simple Read transaction without any attributes to read" },
- { 272, IE, "Update operation without any attributes to update" },
- { 276, IE, "276" },
- { 277, IE, "277" },
- { 278, IE, "278" },
- { 287, IE, "Index corrupted" },
- { 631, IE, "631" },
- { 632, IE, "632" },
- { 702, IE, "Request to non-master" },
- { 706, IE, "Inconsistency during table creation" },
- { 809, IE, "809" },
- { 812, IE, "812" },
- { 829, IE, "829" },
- { 833, IE, "833" },
- { 839, IE, "Illegal null attribute" },
- { 871, IE, "871" },
- { 882, IE, "882" },
- { 883, IE, "883" },
- { 887, IE, "887" },
- { 888, IE, "888" },
- { 890, IE, "890" },
- { 4000, IE, "MEMORY ALLOCATION ERROR" },
- { 4001, IE, "Signal Definition Error" },
- { 4005, IE, "Internal Error in NdbApi" },
- { 4011, IE, "Internal Error in NdbApi" },
- { 4107, IE, "Simple Transaction and Not Start" },
- { 4108, IE, "Faulty operation type" },
- { 4109, IE, "Faulty primary key attribute length" },
- { 4110, IE, "Faulty length in ATTRINFO signal" },
- { 4111, IE, "Status Error in NdbConnection" },
- { 4113, IE, "Too many operations received" },
- { 4320, IE, "Cannot use the same object twice to create table" },
- { 4321, IE, "Trying to start two schema transactions" },
- { 4344, IE, "Only DBDICT and TRIX can send requests to TRIX" },
- { 4345, IE, "TRIX block is not available yet, probably due to node failure" },
- { 4346, IE, "Internal error at index create/build" },
- { 4347, IE, "Bad state at alter index" },
- { 4348, IE, "Inconsistency detected at alter index" },
- { 4349, IE, "Inconsistency detected at index usage" },
- { 4350, IE, "Transaction already aborted" },
+ { 892, DMEC, IE, "Inconsistent hash index. The index needs to be dropped and recreated" },
+ { 896, DMEC, IE, "Tuple corrupted - wrong checksum or column data in invalid format" },
+ { 901, DMEC, IE, "Inconsistent ordered index. The index needs to be dropped and recreated" },
+ { 202, DMEC, IE, "202" },
+ { 203, DMEC, IE, "203" },
+ { 207, DMEC, IE, "207" },
+ { 208, DMEC, IE, "208" },
+ { 209, DMEC, IE, "Communication problem, signal error" },
+ { 220, DMEC, IE, "220" },
+ { 230, DMEC, IE, "230" },
+ { 232, DMEC, IE, "232" },
+ { 238, DMEC, IE, "238" },
+ { 271, DMEC, IE, "Simple Read transaction without any attributes to read" },
+ { 272, DMEC, IE, "Update operation without any attributes to update" },
+ { 276, DMEC, IE, "276" },
+ { 277, DMEC, IE, "277" },
+ { 278, DMEC, IE, "278" },
+ { 287, DMEC, IE, "Index corrupted" },
+ { 631, DMEC, IE, "631" },
+ { 632, DMEC, IE, "632" },
+ { 702, DMEC, IE, "Request to non-master" },
+ { 706, DMEC, IE, "Inconsistency during table creation" },
+ { 809, DMEC, IE, "809" },
+ { 812, DMEC, IE, "812" },
+ { 829, DMEC, IE, "829" },
+ { 833, DMEC, IE, "833" },
+ { 871, DMEC, IE, "871" },
+ { 882, DMEC, IE, "882" },
+ { 883, DMEC, IE, "883" },
+ { 887, DMEC, IE, "887" },
+ { 888, DMEC, IE, "888" },
+ { 890, DMEC, IE, "890" },
+ { 4000, DMEC, IE, "MEMORY ALLOCATION ERROR" },
+ { 4001, DMEC, IE, "Signal Definition Error" },
+ { 4005, DMEC, IE, "Internal Error in NdbApi" },
+ { 4011, DMEC, IE, "Internal Error in NdbApi" },
+ { 4107, DMEC, IE, "Simple Transaction and Not Start" },
+ { 4108, DMEC, IE, "Faulty operation type" },
+ { 4109, DMEC, IE, "Faulty primary key attribute length" },
+ { 4110, DMEC, IE, "Faulty length in ATTRINFO signal" },
+ { 4111, DMEC, IE, "Status Error in NdbConnection" },
+ { 4113, DMEC, IE, "Too many operations received" },
+ { 4320, DMEC, IE, "Cannot use the same object twice to create table" },
+ { 4321, DMEC, IE, "Trying to start two schema transactions" },
+ { 4344, DMEC, IE, "Only DBDICT and TRIX can send requests to TRIX" },
+ { 4345, DMEC, IE, "TRIX block is not available yet, probably due to node failure" },
+ { 4346, DMEC, IE, "Internal error at index create/build" },
+ { 4347, DMEC, IE, "Bad state at alter index" },
+ { 4348, DMEC, IE, "Inconsistency detected at alter index" },
+ { 4349, DMEC, IE, "Inconsistency detected at index usage" },
+ { 4350, DMEC, IE, "Transaction already aborted" },
/**
* Application error
*/
- { 823, AE, "Too much attrinfo from application in tuple manager" },
- { 831, AE, "Too many nullable/bitfields in table definition" },
- { 876, AE, "876" },
- { 877, AE, "877" },
- { 878, AE, "878" },
- { 879, AE, "879" },
- { 880, AE, "Tried to read too much - too many getValue calls" },
- { 884, AE, "Stack overflow in interpreter" },
- { 885, AE, "Stack underflow in interpreter" },
- { 886, AE, "More than 65535 instructions executed in interpreter" },
- { 897, AE, "Update attempt of primary key via ndbcluster internal api (if this occurs via the MySQL server it is a bug, please report)" },
- { 4256, AE, "Must call Ndb::init() before this function" },
- { 4257, AE, "Tried to read too much - too many getValue calls" },
+ { 823, DMEC, AE, "Too much attrinfo from application in tuple manager" },
+ { 831, DMEC, AE, "Too many nullable/bitfields in table definition" },
+ { 876, DMEC, AE, "876" },
+ { 877, DMEC, AE, "877" },
+ { 878, DMEC, AE, "878" },
+ { 879, DMEC, AE, "879" },
+ { 880, DMEC, AE, "Tried to read too much - too many getValue calls" },
+ { 884, DMEC, AE, "Stack overflow in interpreter" },
+ { 885, DMEC, AE, "Stack underflow in interpreter" },
+ { 886, DMEC, AE, "More than 65535 instructions executed in interpreter" },
+ { 897, DMEC, AE, "Update attempt of primary key via ndbcluster internal api (if this occurs via the MySQL server it is a bug, please report)" },
+ { 4256, DMEC, AE, "Must call Ndb::init() before this function" },
+ { 4257, DMEC, AE, "Tried to read too much - too many getValue calls" },
/**
* Scan application errors
*/
- { 242, AE, "Zero concurrency in scan"},
- { 244, AE, "Too high concurrency in scan"},
- { 269, AE, "No condition and attributes to read in scan"},
- { 4600, AE, "Transaction is already started"},
- { 4601, AE, "Transaction is not started"},
- { 4602, AE, "You must call getNdbOperation before executeScan" },
- { 4603, AE, "There can only be ONE operation in a scan transaction" },
- { 4604, AE, "takeOverScanOp, opType must be UpdateRequest or DeleteRequest" },
- { 4605, AE, "You may only call openScanRead or openScanExclusive once for each operation"},
- { 4607, AE, "There may only be one operation in a scan transaction"},
- { 4608, AE, "You can not takeOverScan unless you have used openScanExclusive"},
- { 4609, AE, "You must call nextScanResult before trying to takeOverScan"},
- { 4232, AE, "Parallelism can only be between 1 and 240" },
- { 290, AE, "Scan not started or has been closed by kernel due to timeout" },
+ { 242, DMEC, AE, "Zero concurrency in scan"},
+ { 244, DMEC, AE, "Too high concurrency in scan"},
+ { 269, DMEC, AE, "No condition and attributes to read in scan"},
+ { 4600, DMEC, AE, "Transaction is already started"},
+ { 4601, DMEC, AE, "Transaction is not started"},
+ { 4602, DMEC, AE, "You must call getNdbOperation before executeScan" },
+ { 4603, DMEC, AE, "There can only be ONE operation in a scan transaction" },
+ { 4604, DMEC, AE, "takeOverScanOp, opType must be UpdateRequest or DeleteRequest" },
+ { 4605, DMEC, AE, "You may only call openScanRead or openScanExclusive once for each operation"},
+ { 4607, DMEC, AE, "There may only be one operation in a scan transaction"},
+ { 4608, DMEC, AE, "You can not takeOverScan unless you have used openScanExclusive"},
+ { 4609, DMEC, AE, "You must call nextScanResult before trying to takeOverScan"},
+ { 4232, DMEC, AE, "Parallelism can only be between 1 and 240" },
+ { 290, DMEC, AE, "Scan not started or has been closed by kernel due to timeout" },
/**
* Event schema errors
*/
- { 4713, SE, "Column defined in event does not exist in table"},
+ { 4713, DMEC, SE, "Column defined in event does not exist in table"},
/**
* Event application errors
*/
- { 4707, AE, "Too many event have been defined"},
- { 4708, AE, "Event name is too long"},
- { 4709, AE, "Can't accept more subscribers"},
- { 746, OE, "Event name already exists"},
- { 4710, AE, "Event not found"},
- { 4711, AE, "Creation of event failed"},
- { 4712, AE, "Stopped event operation does not exist. Already stopped?"},
+ { 4707, DMEC, AE, "Too many event have been defined"},
+ { 4708, DMEC, AE, "Event name is too long"},
+ { 4709, DMEC, AE, "Can't accept more subscribers"},
+ { 746, DMEC, OE, "Event name already exists"},
+ { 747, DMEC, IS, "Out of event records"},
+ { 748, DMEC, TR, "Busy during read of event table"},
+ { 4710, DMEC, AE, "Event not found"},
+ { 4711, DMEC, AE, "Creation of event failed"},
+ { 4712, DMEC, AE, "Stopped event operation does not exist. Already stopped?"},
/**
* Event internal errors
*/
- { 4731, IE, "Event not found"},
+ { 4731, DMEC, IE, "Event not found"},
/**
* SchemaError
*/
- { 701, SE, "System busy with other schema operation" },
- { 703, SE, "Invalid table format" },
- { 704, SE, "Attribute name too long" },
- { 705, SE, "Table name too long" },
- { 707, SE, "No more table metadata records (increase MaxNoOfTables)" },
- { 708, SE, "No more attribute metadata records (increase MaxNoOfAttributes)" },
- { 709, SE, "No such table existed" },
- { 721, OE, "Table or index with given name already exists" },
- { 723, SE, "No such table existed" },
- { 736, SE, "Unsupported array size" },
- { 737, SE, "Attribute array size too big" },
- { 738, SE, "Record too big" },
- { 739, SE, "Unsupported primary key length" },
- { 740, SE, "Nullable primary key not supported" },
- { 741, SE, "Unsupported alter table" },
- { 743, SE, "Unsupported character set in table or index" },
- { 744, SE, "Character string is invalid for given character set" },
- { 745, SE, "Distribution key not supported for char attribute (use binary attribute)" },
- { 747, AE, "Given NODEGROUP doesn't exist in this cluster" },
- { 748, IE, "Given fragmentType doesn't exist" },
- { 749, IE, "Primary Table in wrong state" },
- { 761, SE, "Unable to drop table as backup is in progress" },
- { 762, SE, "Unable to alter table as backup is in progress" },
- { 241, SE, "Invalid schema object version" },
- { 283, SE, "Table is being dropped" },
- { 284, SE, "Table not defined in transaction coordinator" },
- { 285, SE, "Unknown table error in transaction coordinator" },
- { 881, SE, "Unable to create table, out of data pages (increase DataMemory) " },
- { 906, SE, "Unsupported attribute type in index" },
- { 907, SE, "Unsupported character set in table or index" },
- { 908, IS, "Invalid ordered index tree node size" },
- { 1225, SE, "Table not defined in local query handler" },
- { 1226, SE, "Table is being dropped" },
- { 1228, SE, "Cannot use drop table for drop index" },
- { 1229, SE, "Too long frm data supplied" },
- { 1231, SE, "Invalid table or index to scan" },
- { 1232, SE, "Invalid table or index to scan" },
+ { 701, DMEC, SE, "System busy with other schema operation" },
+ { 703, DMEC, SE, "Invalid table format" },
+ { 704, DMEC, SE, "Attribute name too long" },
+ { 705, DMEC, SE, "Table name too long" },
+ { 707, DMEC, SE, "No more table metadata records (increase MaxNoOfTables)" },
+ { 708, DMEC, SE, "No more attribute metadata records (increase MaxNoOfAttributes)" },
+ { 709, HA_ERR_NO_SUCH_TABLE, SE, "No such table existed" },
+ { 721, HA_ERR_TABLE_EXIST, OE, "Table or index with given name already exists" },
+ { 723, DMEC, SE, "No such table existed" },
+ { 736, DMEC, SE, "Unsupported array size" },
+ { 737, HA_WRONG_CREATE_OPTION, SE, "Attribute array size too big" },
+ { 738, HA_WRONG_CREATE_OPTION, SE, "Record too big" },
+ { 739, HA_WRONG_CREATE_OPTION, SE, "Unsupported primary key length" },
+ { 740, HA_WRONG_CREATE_OPTION, SE, "Nullable primary key not supported" },
+ { 741, DMEC, SE, "Unsupported alter table" },
+ { 743, HA_WRONG_CREATE_OPTION, SE, "Unsupported character set in table or index" },
+ { 744, DMEC, SE, "Character string is invalid for given character set" },
+ { 745, HA_WRONG_CREATE_OPTION, SE, "Distribution key not supported for char attribute (use binary attribute)" },
+ { 747, HA_WRONG_CREATE_OPTION, AE, "Given NODEGROUP doesn't exist in this cluster" },
+ { 748, HA_WRONG_CREATE_OPTION, IE, "Given fragmentType doesn't exist" },
+ { 749, HA_WRONG_CREATE_OPTION, IE, "Primary Table in wrong state" },
+ { 761, DMEC, SE, "Unable to drop table as backup is in progress" },
+ { 762, DMEC, SE, "Unable to alter table as backup is in progress" },
+ { 241, HA_ERR_TABLE_DEF_CHANGED, SE, "Invalid schema object version" },
+ { 283, HA_ERR_NO_SUCH_TABLE, SE, "Table is being dropped" },
+ { 284, HA_ERR_TABLE_DEF_CHANGED, SE, "Table not defined in transaction coordinator" },
+ { 285, DMEC, SE, "Unknown table error in transaction coordinator" },
+ { 881, DMEC, SE, "Unable to create table, out of data pages (increase DataMemory) " },
+ { 906, DMEC, SE, "Unsupported attribute type in index" },
+ { 907, DMEC, SE, "Unsupported character set in table or index" },
+ { 908, DMEC, IS, "Invalid ordered index tree node size" },
+ { 1225, DMEC, SE, "Table not defined in local query handler" },
+ { 1226, DMEC, SE, "Table is being dropped" },
+ { 1228, DMEC, SE, "Cannot use drop table for drop index" },
+ { 1229, DMEC, SE, "Too long frm data supplied" },
+ { 1231, DMEC, SE, "Invalid table or index to scan" },
+ { 1232, DMEC, SE, "Invalid table or index to scan" },
/**
* FunctionNotImplemented
*/
- { 4003, NI, "Function not implemented yet" },
+ { 4003, DMEC, NI, "Function not implemented yet" },
/**
* Backup error codes
*/
- { 1300, IE, "Undefined error" },
- { 1301, IE, "Backup issued to not master (reissue command to master)" },
- { 1302, IE, "Out of backup record" },
- { 1303, IS, "Out of resources" },
- { 1304, IE, "Sequence failure" },
- { 1305, IE, "Backup definition not implemented" },
- { 1306, AE, "Backup not supported in diskless mode (change Diskless)" },
-
- { 1321, IE, "Backup aborted by application" },
- { 1322, IE, "Backup already completed" },
- { 1323, IE, "1323" },
- { 1324, IE, "Backup log buffer full" },
- { 1325, IE, "File or scan error" },
- { 1326, IE, "Backup abortet due to node failure" },
- { 1327, IE, "1327" },
+ { 1300, DMEC, IE, "Undefined error" },
+ { 1301, DMEC, IE, "Backup issued to not master (reissue command to master)" },
+ { 1302, DMEC, IE, "Out of backup record" },
+ { 1303, DMEC, IS, "Out of resources" },
+ { 1304, DMEC, IE, "Sequence failure" },
+ { 1305, DMEC, IE, "Backup definition not implemented" },
+ { 1306, DMEC, AE, "Backup not supported in diskless mode (change Diskless)" },
+
+ { 1321, DMEC, IE, "Backup aborted by application" },
+ { 1322, DMEC, IE, "Backup already completed" },
+ { 1323, DMEC, IE, "1323" },
+ { 1324, DMEC, IE, "Backup log buffer full" },
+ { 1325, DMEC, IE, "File or scan error" },
+ { 1326, DMEC, IE, "Backup abortet due to node failure" },
+ { 1327, DMEC, IE, "1327" },
- { 1340, IE, "Backup undefined error" },
- { 1342, AE, "Backup failed to allocate buffers (check configuration)" },
- { 1343, AE, "Backup failed to setup fs buffers (check configuration)" },
- { 1344, AE, "Backup failed to allocate tables (check configuration)" },
- { 1345, AE, "Backup failed to insert file header (check configuration)" },
- { 1346, AE, "Backup failed to insert table list (check configuration)" },
- { 1347, AE, "Backup failed to allocate table memory (check configuration)" },
- { 1348, AE, "Backup failed to allocate file record (check configuration)" },
- { 1349, AE, "Backup failed to allocate attribute record (check configuration)" },
- { 1329, AE, "Backup during software upgrade not supported" },
+ { 1340, DMEC, IE, "Backup undefined error" },
+ { 1342, DMEC, AE, "Backup failed to allocate buffers (check configuration)" },
+ { 1343, DMEC, AE, "Backup failed to setup fs buffers (check configuration)" },
+ { 1344, DMEC, AE, "Backup failed to allocate tables (check configuration)" },
+ { 1345, DMEC, AE, "Backup failed to insert file header (check configuration)" },
+ { 1346, DMEC, AE, "Backup failed to insert table list (check configuration)" },
+ { 1347, DMEC, AE, "Backup failed to allocate table memory (check configuration)" },
+ { 1348, DMEC, AE, "Backup failed to allocate file record (check configuration)" },
+ { 1349, DMEC, AE, "Backup failed to allocate attribute record (check configuration)" },
+ { 1329, DMEC, AE, "Backup during software upgrade not supported" },
/**
* Still uncategorized
*/
- { 720, AE, "Attribute name reused in table definition" },
- { 4004, AE, "Attribute name not found in the Table" },
+ { 720, DMEC, AE, "Attribute name reused in table definition" },
+ { 1405, DMEC, NR, "Subscriber manager busy with node recovery" },
+ { 1407, DMEC, SE, "Subscription not found in subscriber manager" },
+ { 1411, DMEC, TR, "Subscriber manager busy with adding/removing a subscriber" },
+ { 1412, DMEC, IS, "Can't accept more subscribers, out of space in pool" },
+ { 1413, DMEC, TR, "Subscriber manager busy with adding the subscription" },
+ { 1414, DMEC, TR, "Subscriber manager has subscribers on this subscription" },
+ { 1415, DMEC, SE, "Subscription not unique in subscriber manager" },
+ { 1416, DMEC, IS, "Can't accept more subscriptions, out of space in pool" },
+ { 1417, DMEC, SE, "Table in suscription not defined, probably dropped" },
+
+ { 4004, DMEC, AE, "Attribute name not found in the Table" },
- { 4100, AE, "Status Error in NDB" },
- { 4101, AE, "No connections to NDB available and connect failed" },
- { 4102, AE, "Type in NdbTamper not correct" },
- { 4103, AE, "No schema connections to NDB available and connect failed" },
- { 4104, AE, "Ndb Init in wrong state, destroy Ndb object and create a new" },
- { 4105, AE, "Too many Ndb objects" },
- { 4106, AE, "All Not NULL attribute have not been defined" },
- { 4114, AE, "Transaction is already completed" },
- { 4116, AE, "Operation was not defined correctly, probably missing a key" },
- { 4117, AE, "Could not start transporter, configuration error"},
- { 4118, AE, "Parameter error in API call" },
- { 4300, AE, "Tuple Key Type not correct" },
- { 4301, AE, "Fragment Type not correct" },
- { 4302, AE, "Minimum Load Factor not correct" },
- { 4303, AE, "Maximum Load Factor not correct" },
- { 4304, AE, "Maximum Load Factor smaller than Minimum" },
- { 4305, AE, "K value must currently be set to 6" },
- { 4306, AE, "Memory Type not correct" },
- { 4307, AE, "Invalid table name" },
- { 4308, AE, "Attribute Size not correct" },
- { 4309, AE, "Fixed array too large, maximum 64000 bytes" },
- { 4310, AE, "Attribute Type not correct" },
- { 4311, AE, "Storage Mode not correct" },
- { 4312, AE, "Null Attribute Type not correct" },
- { 4313, AE, "Index only storage for non-key attribute" },
- { 4314, AE, "Storage Type of attribute not correct" },
- { 4315, AE, "No more key attributes allowed after defining variable length key attribute" },
- { 4316, AE, "Key attributes are not allowed to be NULL attributes" },
- { 4317, AE, "Too many primary keys defined in table" },
- { 4318, AE, "Invalid attribute name" },
- { 4319, AE, "createAttribute called at erroneus place" },
- { 4322, AE, "Attempt to define distribution key when not prepared to" },
- { 4323, AE, "Distribution Key set on table but not defined on first attribute" },
- { 4324, AE, "Attempt to define distribution group when not prepared to" },
- { 4325, AE, "Distribution Group set on table but not defined on first attribute" },
- { 4326, AE, "Distribution Group with erroneus number of bits" },
- { 4327, AE, "Distribution Group with 1 byte attribute is not allowed" },
- { 4328, AE, "Disk memory attributes not yet supported" },
- { 4329, AE, "Variable stored attributes not yet supported" },
-
- { 4400, AE, "Status Error in NdbSchemaCon" },
- { 4401, AE, "Only one schema operation per schema transaction" },
- { 4402, AE, "No schema operation defined before calling execute" },
-
- { 4501, AE, "Insert in hash table failed when getting table information from Ndb" },
- { 4502, AE, "GetValue not allowed in Update operation" },
- { 4503, AE, "GetValue not allowed in Insert operation" },
- { 4504, AE, "SetValue not allowed in Read operation" },
- { 4505, AE, "NULL value not allowed in primary key search" },
- { 4506, AE, "Missing getValue/setValue when calling execute" },
- { 4507, AE, "Missing operation request when calling execute" },
-
- { 4200, AE, "Status Error when defining an operation" },
- { 4201, AE, "Variable Arrays not yet supported" },
- { 4202, AE, "Set value on tuple key attribute is not allowed" },
- { 4203, AE, "Trying to set a NOT NULL attribute to NULL" },
- { 4204, AE, "Set value and Read/Delete Tuple is incompatible" },
- { 4205, AE, "No Key attribute used to define tuple" },
- { 4206, AE, "Not allowed to equal key attribute twice" },
- { 4207, AE, "Key size is limited to 4092 bytes" },
- { 4208, AE, "Trying to read a non-stored attribute" },
- { 4209, AE, "Length parameter in equal/setValue is incorrect" },
- { 4210, AE, "Ndb sent more info than the length he specified" },
- { 4211, AE, "Inconsistency in list of NdbRecAttr-objects" },
- { 4212, AE, "Ndb reports NULL value on Not NULL attribute" },
- { 4213, AE, "Not all data of an attribute has been received" },
- { 4214, AE, "Not all attributes have been received" },
- { 4215, AE, "More data received than reported in TCKEYCONF message" },
- { 4216, AE, "More than 8052 bytes in setValue cannot be handled" },
- { 4217, AE, "It is not allowed to increment any other than unsigned ints" },
- { 4218, AE, "Currently not allowed to increment NULL-able attributes" },
- { 4219, AE, "Maximum size of interpretative attributes are 64 bits" },
- { 4220, AE, "Maximum size of interpretative attributes are 64 bits" },
- { 4221, AE, "Trying to jump to a non-defined label" },
- { 4222, AE, "Label was not found, internal error" },
- { 4223, AE, "Not allowed to create jumps to yourself" },
- { 4224, AE, "Not allowed to jump to a label in a different subroutine" },
- { 4225, AE, "All primary keys defined, call setValue/getValue"},
- { 4226, AE, "Bad number when defining a label" },
- { 4227, AE, "Bad number when defining a subroutine" },
- { 4228, AE, "Illegal interpreter function in scan definition" },
- { 4229, AE, "Illegal register in interpreter function definition" },
- { 4230, AE, "Illegal state when calling getValue, probably not a read" },
- { 4231, AE, "Illegal state when calling interpreter routine" },
- { 4233, AE, "Calling execute (synchronous) when already prepared asynchronous transaction exists" },
- { 4234, AE, "Illegal to call setValue in this state" },
- { 4235, AE, "No callback from execute" },
- { 4236, AE, "Trigger name too long" },
- { 4237, AE, "Too many triggers" },
- { 4238, AE, "Trigger not found" },
- { 4239, AE, "Trigger with given name already exists"},
- { 4240, AE, "Unsupported trigger type"},
- { 4241, AE, "Index name too long" },
- { 4242, AE, "Too many indexes" },
- { 4243, AE, "Index not found" },
- { 4244, OE, "Index or table with given name already exists" },
- { 4247, AE, "Illegal index/trigger create/drop/alter request" },
- { 4248, AE, "Trigger/index name invalid" },
- { 4249, AE, "Invalid table" },
- { 4250, AE, "Invalid index type or index logging option" },
- { 4251, AE, "Cannot create unique index, duplicate keys found" },
- { 4252, AE, "Failed to allocate space for index" },
- { 4253, AE, "Failed to create index table" },
- { 4254, AE, "Table not an index table" },
- { 4255, AE, "Hash index attributes must be specified in same order as table attributes" },
- { 4258, AE, "Cannot create unique index, duplicate attributes found in definition" },
- { 4259, AE, "Invalid set of range scan bounds" },
- { 4260, UD, "NdbScanFilter: Operator is not defined in NdbScanFilter::Group"},
- { 4261, UD, "NdbScanFilter: Column is NULL"},
- { 4262, UD, "NdbScanFilter: Condition is out of bounds"},
- { 4263, IE, "Invalid blob attributes or invalid blob parts table" },
- { 4264, AE, "Invalid usage of blob attribute" },
- { 4265, AE, "Method is not valid in current blob state" },
- { 4266, AE, "Invalid blob seek position" },
- { 4267, IE, "Corrupted blob value" },
- { 4268, IE, "Error in blob head update forced rollback of transaction" },
- { 4269, IE, "No connection to ndb management server" },
- { 4270, IE, "Unknown blob error" },
- { 4335, AE, "Only one autoincrement column allowed per table. Having a table without primary key uses an autoincremented hidden key, i.e. a table without a primary key can not have an autoincremented column" },
- { 4271, AE, "Invalid index object, not retrieved via getIndex()" }
+ { 4100, DMEC, AE, "Status Error in NDB" },
+ { 4101, DMEC, AE, "No connections to NDB available and connect failed" },
+ { 4102, DMEC, AE, "Type in NdbTamper not correct" },
+ { 4103, DMEC, AE, "No schema connections to NDB available and connect failed" },
+ { 4104, DMEC, AE, "Ndb Init in wrong state, destroy Ndb object and create a new" },
+ { 4105, DMEC, AE, "Too many Ndb objects" },
+ { 4106, DMEC, AE, "All Not NULL attribute have not been defined" },
+ { 4114, DMEC, AE, "Transaction is already completed" },
+ { 4116, DMEC, AE, "Operation was not defined correctly, probably missing a key" },
+ { 4117, DMEC, AE, "Could not start transporter, configuration error"},
+ { 4118, DMEC, AE, "Parameter error in API call" },
+ { 4300, DMEC, AE, "Tuple Key Type not correct" },
+ { 4301, DMEC, AE, "Fragment Type not correct" },
+ { 4302, DMEC, AE, "Minimum Load Factor not correct" },
+ { 4303, DMEC, AE, "Maximum Load Factor not correct" },
+ { 4304, DMEC, AE, "Maximum Load Factor smaller than Minimum" },
+ { 4305, DMEC, AE, "K value must currently be set to 6" },
+ { 4306, DMEC, AE, "Memory Type not correct" },
+ { 4307, DMEC, AE, "Invalid table name" },
+ { 4308, DMEC, AE, "Attribute Size not correct" },
+ { 4309, DMEC, AE, "Fixed array too large, maximum 64000 bytes" },
+ { 4310, DMEC, AE, "Attribute Type not correct" },
+ { 4311, DMEC, AE, "Storage Mode not correct" },
+ { 4312, DMEC, AE, "Null Attribute Type not correct" },
+ { 4313, DMEC, AE, "Index only storage for non-key attribute" },
+ { 4314, DMEC, AE, "Storage Type of attribute not correct" },
+ { 4315, DMEC, AE, "No more key attributes allowed after defining variable length key attribute" },
+ { 4316, DMEC, AE, "Key attributes are not allowed to be NULL attributes" },
+ { 4317, DMEC, AE, "Too many primary keys defined in table" },
+ { 4318, DMEC, AE, "Invalid attribute name" },
+ { 4319, DMEC, AE, "createAttribute called at erroneus place" },
+ { 4322, DMEC, AE, "Attempt to define distribution key when not prepared to" },
+ { 4323, DMEC, AE, "Distribution Key set on table but not defined on first attribute" },
+ { 4324, DMEC, AE, "Attempt to define distribution group when not prepared to" },
+ { 4325, DMEC, AE, "Distribution Group set on table but not defined on first attribute" },
+ { 4326, DMEC, AE, "Distribution Group with erroneus number of bits" },
+ { 4327, DMEC, AE, "Distribution Group with 1 byte attribute is not allowed" },
+ { 4328, DMEC, AE, "Disk memory attributes not yet supported" },
+ { 4329, DMEC, AE, "Variable stored attributes not yet supported" },
+
+ { 4400, DMEC, AE, "Status Error in NdbSchemaCon" },
+ { 4401, DMEC, AE, "Only one schema operation per schema transaction" },
+ { 4402, DMEC, AE, "No schema operation defined before calling execute" },
+
+ { 4501, DMEC, AE, "Insert in hash table failed when getting table information from Ndb" },
+ { 4502, DMEC, AE, "GetValue not allowed in Update operation" },
+ { 4503, DMEC, AE, "GetValue not allowed in Insert operation" },
+ { 4504, DMEC, AE, "SetValue not allowed in Read operation" },
+ { 4505, DMEC, AE, "NULL value not allowed in primary key search" },
+ { 4506, DMEC, AE, "Missing getValue/setValue when calling execute" },
+ { 4507, DMEC, AE, "Missing operation request when calling execute" },
+
+ { 4200, DMEC, AE, "Status Error when defining an operation" },
+ { 4201, DMEC, AE, "Variable Arrays not yet supported" },
+ { 4202, DMEC, AE, "Set value on tuple key attribute is not allowed" },
+ { 4203, DMEC, AE, "Trying to set a NOT NULL attribute to NULL" },
+ { 4204, DMEC, AE, "Set value and Read/Delete Tuple is incompatible" },
+ { 4205, DMEC, AE, "No Key attribute used to define tuple" },
+ { 4206, DMEC, AE, "Not allowed to equal key attribute twice" },
+ { 4207, DMEC, AE, "Key size is limited to 4092 bytes" },
+ { 4208, DMEC, AE, "Trying to read a non-stored attribute" },
+ { 4209, DMEC, AE, "Length parameter in equal/setValue is incorrect" },
+ { 4210, DMEC, AE, "Ndb sent more info than the length he specified" },
+ { 4211, DMEC, AE, "Inconsistency in list of NdbRecAttr-objects" },
+ { 4212, DMEC, AE, "Ndb reports NULL value on Not NULL attribute" },
+ { 4213, DMEC, AE, "Not all data of an attribute has been received" },
+ { 4214, DMEC, AE, "Not all attributes have been received" },
+ { 4215, DMEC, AE, "More data received than reported in TCKEYCONF message" },
+ { 4216, DMEC, AE, "More than 8052 bytes in setValue cannot be handled" },
+ { 4217, DMEC, AE, "It is not allowed to increment any other than unsigned ints" },
+ { 4218, DMEC, AE, "Currently not allowed to increment NULL-able attributes" },
+ { 4219, DMEC, AE, "Maximum size of interpretative attributes are 64 bits" },
+ { 4220, DMEC, AE, "Maximum size of interpretative attributes are 64 bits" },
+ { 4221, DMEC, AE, "Trying to jump to a non-defined label" },
+ { 4222, DMEC, AE, "Label was not found, internal error" },
+ { 4223, DMEC, AE, "Not allowed to create jumps to yourself" },
+ { 4224, DMEC, AE, "Not allowed to jump to a label in a different subroutine" },
+ { 4225, DMEC, AE, "All primary keys defined, call setValue/getValue"},
+ { 4226, DMEC, AE, "Bad number when defining a label" },
+ { 4227, DMEC, AE, "Bad number when defining a subroutine" },
+ { 4228, DMEC, AE, "Illegal interpreter function in scan definition" },
+ { 4229, DMEC, AE, "Illegal register in interpreter function definition" },
+ { 4230, DMEC, AE, "Illegal state when calling getValue, probably not a read" },
+ { 4231, DMEC, AE, "Illegal state when calling interpreter routine" },
+ { 4233, DMEC, AE, "Calling execute (synchronous) when already prepared asynchronous transaction exists" },
+ { 4234, DMEC, AE, "Illegal to call setValue in this state" },
+ { 4235, DMEC, AE, "No callback from execute" },
+ { 4236, DMEC, AE, "Trigger name too long" },
+ { 4237, DMEC, AE, "Too many triggers" },
+ { 4238, DMEC, AE, "Trigger not found" },
+ { 4239, DMEC, AE, "Trigger with given name already exists"},
+ { 4240, DMEC, AE, "Unsupported trigger type"},
+ { 4241, DMEC, AE, "Index name too long" },
+ { 4242, DMEC, AE, "Too many indexes" },
+ { 4243, DMEC, AE, "Index not found" },
+ { 4244, HA_ERR_TABLE_EXIST, OE, "Index or table with given name already exists" },
+ { 4247, DMEC, AE, "Illegal index/trigger create/drop/alter request" },
+ { 4248, DMEC, AE, "Trigger/index name invalid" },
+ { 4249, DMEC, AE, "Invalid table" },
+ { 4250, DMEC, AE, "Invalid index type or index logging option" },
+ { 4251, DMEC, AE, "Cannot create unique index, duplicate keys found" },
+ { 4252, DMEC, AE, "Failed to allocate space for index" },
+ { 4253, DMEC, AE, "Failed to create index table" },
+ { 4254, DMEC, AE, "Table not an index table" },
+ { 4255, DMEC, AE, "Hash index attributes must be specified in same order as table attributes" },
+ { 4258, DMEC, AE, "Cannot create unique index, duplicate attributes found in definition" },
+ { 4259, DMEC, AE, "Invalid set of range scan bounds" },
+ { 4260, DMEC, UD, "NdbScanFilter: Operator is not defined in NdbScanFilter::Group"},
+ { 4261, DMEC, UD, "NdbScanFilter: Column is NULL"},
+ { 4262, DMEC, UD, "NdbScanFilter: Condition is out of bounds"},
+ { 4263, DMEC, IE, "Invalid blob attributes or invalid blob parts table" },
+ { 4264, DMEC, AE, "Invalid usage of blob attribute" },
+ { 4265, DMEC, AE, "Method is not valid in current blob state" },
+ { 4266, DMEC, AE, "Invalid blob seek position" },
+ { 4267, DMEC, IE, "Corrupted blob value" },
+ { 4268, DMEC, IE, "Error in blob head update forced rollback of transaction" },
+ { 4269, DMEC, IE, "No connection to ndb management server" },
+ { 4270, DMEC, IE, "Unknown blob error" },
+ { 4335, DMEC, AE, "Only one autoincrement column allowed per table. Having a table without primary key uses an autoincremented hidden key, i.e. a table without a primary key can not have an autoincremented column" },
+ { 4271, DMEC, AE, "Invalid index object, not retrieved via getIndex()" }
};
static
@@ -614,6 +631,7 @@ ndberror_update(ndberror_struct * error){
if(ErrorCodes[i].code == error->code){
error->classification = ErrorCodes[i].classification;
error->message = ErrorCodes[i].message;
+ error->mysql_code = ErrorCodes[i].mysql_code;
found = 1;
break;
}
@@ -622,6 +640,7 @@ ndberror_update(ndberror_struct * error){
if(!found){
error->classification = UE;
error->message = "Unknown error code";
+ error->mysql_code = DMEC;
}
found = 0;
@@ -639,6 +658,7 @@ ndberror_update(ndberror_struct * error){
error->details = 0;
}
+#if CHECK_ERRORCODES
int
checkErrorCodes(){
int i, j;
@@ -655,7 +675,6 @@ checkErrorCodes(){
/*static const int a = checkErrorCodes();*/
-#if CHECK_ERRORCODES
int main(void){
checkErrorCodes();
return 0;
diff --git a/storage/ndb/test/include/HugoOperations.hpp b/storage/ndb/test/include/HugoOperations.hpp
index 82fd5529fa2..c6ecb4c574e 100644
--- a/storage/ndb/test/include/HugoOperations.hpp
+++ b/storage/ndb/test/include/HugoOperations.hpp
@@ -29,7 +29,7 @@ public:
~HugoOperations();
int startTransaction(Ndb*);
- int setTransaction(NdbTransaction*);
+ int setTransaction(NdbTransaction*,bool not_null_ok= false);
int closeTransaction(Ndb*);
NdbTransaction* getTransaction();
void refresh();
diff --git a/storage/ndb/test/include/HugoTransactions.hpp b/storage/ndb/test/include/HugoTransactions.hpp
index 5795bbc94c9..dd6f6c0dd3b 100644
--- a/storage/ndb/test/include/HugoTransactions.hpp
+++ b/storage/ndb/test/include/HugoTransactions.hpp
@@ -28,9 +28,6 @@ public:
HugoTransactions(const NdbDictionary::Table&,
const NdbDictionary::Index* idx = 0);
~HugoTransactions();
- int createEvent(Ndb*);
- int eventOperation(Ndb*, void* stats,
- int records);
int loadTable(Ndb*,
int records,
int batch = 512,
@@ -107,10 +104,14 @@ public:
const char * idxName,
int records,
int batchsize = 1);
+
+ void setRetryMax(int retryMax = 100) { m_retryMax = retryMax; }
+ Uint32 m_latest_gci;
protected:
NDBT_ResultRow row;
int m_defaultScanUpdateMethod;
+ int m_retryMax;
};
diff --git a/storage/ndb/test/include/NDBT_Table.hpp b/storage/ndb/test/include/NDBT_Table.hpp
index d2f99b85187..500ac7c0e39 100644
--- a/storage/ndb/test/include/NDBT_Table.hpp
+++ b/storage/ndb/test/include/NDBT_Table.hpp
@@ -28,7 +28,8 @@ public:
NdbDictionary::Column::Type _type,
int _length = 1,
bool _pk = false,
- bool _nullable = false):
+ bool _nullable = false,
+ CHARSET_INFO *cs= 0):
NdbDictionary::Column(_name)
{
assert(_name != 0);
@@ -37,6 +38,10 @@ public:
setLength(_length);
setNullable(_nullable);
setPrimaryKey(_pk);
+ if (cs)
+ {
+ setCharset(cs);
+ }
}
};
diff --git a/storage/ndb/test/include/NDBT_Test.hpp b/storage/ndb/test/include/NDBT_Test.hpp
index 027ac356e0c..62579cbfd6b 100644
--- a/storage/ndb/test/include/NDBT_Test.hpp
+++ b/storage/ndb/test/include/NDBT_Test.hpp
@@ -17,6 +17,7 @@
#ifndef NDBT_TEST_HPP
#define NDBT_TEST_HPP
+#include <ndb_global.h>
#include "NDBT_ReturnCodes.h"
#include <Properties.hpp>
@@ -40,6 +41,8 @@ public:
NDBT_Context(Ndb_cluster_connection&);
~NDBT_Context();
const NdbDictionary::Table* getTab();
+ int getNumTables() const;
+ const char * getTableName(int) const;
NDBT_TestSuite* getSuite();
NDBT_TestCase* getCase();
@@ -218,6 +221,9 @@ public:
virtual int getNoOfRunningSteps() const = 0;
virtual int getNoOfCompletedSteps() const = 0;
+ bool m_all_tables;
+ bool m_has_run;
+
protected:
virtual int runInit(NDBT_Context* ctx) = 0;
virtual int runSteps(NDBT_Context* ctx) = 0;
@@ -352,6 +358,8 @@ public:
int addTest(NDBT_TestCase* pTest);
+
+ Vector<BaseString> m_tables_in_test;
private:
int executeOne(Ndb_cluster_connection&,
const char* _tabname, const char* testname = NULL);
@@ -374,6 +382,7 @@ private:
int timer;
NdbTimer testSuiteTimer;
bool createTable;
+ bool createAllTables;
};
@@ -431,6 +440,10 @@ C##suitname():NDBT_TestSuite(#suitname){ \
#define NOT_TABLE(tableName) \
pt->addTable(tableName, false);
+// Text case will only be run once, not once per table as normally
+#define ALL_TABLES() \
+ pt->m_all_tables= true;
+
#define NDBT_TESTSUITE_END(suitname) \
} } ; C##suitname suitname
diff --git a/storage/ndb/test/ndbapi/Makefile.am b/storage/ndb/test/ndbapi/Makefile.am
index bf993d30111..a11744ec464 100644
--- a/storage/ndb/test/ndbapi/Makefile.am
+++ b/storage/ndb/test/ndbapi/Makefile.am
@@ -34,7 +34,6 @@ test_event ndbapi_slow_select testReadPerf testLcp \
testPartitioning \
testBitfield \
DbCreate DbAsyncGenerator \
-test_event_multi_table \
testSRBank
#flexTimedAsynch
@@ -78,7 +77,6 @@ testPartitioning_SOURCES = testPartitioning.cpp
testBitfield_SOURCES = testBitfield.cpp
DbCreate_SOURCES = bench/mainPopulate.cpp bench/dbPopulate.cpp bench/userInterface.cpp bench/dbPopulate.h bench/userInterface.h bench/testData.h bench/testDefinitions.h bench/ndb_schema.hpp bench/ndb_error.hpp
DbAsyncGenerator_SOURCES = bench/mainAsyncGenerator.cpp bench/asyncGenerator.cpp bench/ndb_async2.cpp bench/dbGenerator.h bench/macros.h bench/userInterface.h bench/testData.h bench/testDefinitions.h bench/ndb_schema.hpp bench/ndb_error.hpp
-test_event_multi_table_SOURCES = test_event_multi_table.cpp
testSRBank_SOURCES = testSRBank.cpp
INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/include/kernel
diff --git a/storage/ndb/test/ndbapi/bank/Bank.cpp b/storage/ndb/test/ndbapi/bank/Bank.cpp
index 37224fdd055..fc23ebb59ce 100644
--- a/storage/ndb/test/ndbapi/bank/Bank.cpp
+++ b/storage/ndb/test/ndbapi/bank/Bank.cpp
@@ -19,8 +19,8 @@
#include <NdbSleep.h>
#include <UtilTransactions.hpp>
-Bank::Bank(Ndb_cluster_connection& con, bool _init):
- m_ndb(&con, "BANK"),
+Bank::Bank(Ndb_cluster_connection& con, bool _init, char * dbase):
+ m_ndb(&con, dbase),
m_maxAccount(-1),
m_initialized(false)
{
diff --git a/storage/ndb/test/ndbapi/bank/Bank.hpp b/storage/ndb/test/ndbapi/bank/Bank.hpp
index b80f02dae97..a581684a41b 100644
--- a/storage/ndb/test/ndbapi/bank/Bank.hpp
+++ b/storage/ndb/test/ndbapi/bank/Bank.hpp
@@ -27,7 +27,7 @@
class Bank {
public:
- Bank(Ndb_cluster_connection&, bool init = true);
+ Bank(Ndb_cluster_connection&, bool init = true, char *dbase="BANK");
int createAndLoadBank(bool overWrite, int num_accounts=10);
int dropBank();
diff --git a/storage/ndb/test/ndbapi/bank/bankCreator.cpp b/storage/ndb/test/ndbapi/bank/bankCreator.cpp
index 257255babc8..02a2e85732e 100644
--- a/storage/ndb/test/ndbapi/bank/bankCreator.cpp
+++ b/storage/ndb/test/ndbapi/bank/bankCreator.cpp
@@ -29,8 +29,10 @@
int main(int argc, const char** argv){
ndb_init();
int _help = 0;
+ char * _database = "BANK";
struct getargs args[] = {
+ { "database", 'd', arg_string, &_database, "Database name", ""},
{ "usage", '?', arg_flag, &_help, "Print help", "" }
};
int num_args = sizeof(args) / sizeof(args[0]);
@@ -49,7 +51,7 @@ int main(int argc, const char** argv){
return NDBT_ProgramExit(NDBT_FAILED);
}
- Bank bank(con);
+ Bank bank(con,_database);
int overWriteExisting = true;
if (bank.createAndLoadBank(overWriteExisting) != NDBT_OK)
return NDBT_ProgramExit(NDBT_FAILED);
diff --git a/storage/ndb/test/ndbapi/bank/bankMakeGL.cpp b/storage/ndb/test/ndbapi/bank/bankMakeGL.cpp
index cf373481e3e..8fedd561614 100644
--- a/storage/ndb/test/ndbapi/bank/bankMakeGL.cpp
+++ b/storage/ndb/test/ndbapi/bank/bankMakeGL.cpp
@@ -29,9 +29,11 @@
int main(int argc, const char** argv){
ndb_init();
int _help = 0;
+ char * _database = "BANK";
struct getargs args[] = {
- { "usage", '?', arg_flag, &_help, "Print help", "" }
+ { "usage", '?', arg_flag, &_help, "Print help", "" },
+ { "database", 'd', arg_string, &_database, "Database name", ""}
};
int num_args = sizeof(args) / sizeof(args[0]);
int optind = 0;
@@ -49,7 +51,7 @@ int main(int argc, const char** argv){
return NDBT_ProgramExit(NDBT_FAILED);
}
- Bank bank(con);
+ Bank bank(con,_database);
if (bank.performMakeGLs() != 0)
return NDBT_ProgramExit(NDBT_FAILED);
diff --git a/storage/ndb/test/ndbapi/bank/bankSumAccounts.cpp b/storage/ndb/test/ndbapi/bank/bankSumAccounts.cpp
index 034f70f8f95..610ff05eaa9 100644
--- a/storage/ndb/test/ndbapi/bank/bankSumAccounts.cpp
+++ b/storage/ndb/test/ndbapi/bank/bankSumAccounts.cpp
@@ -29,9 +29,11 @@
int main(int argc, const char** argv){
ndb_init();
int _help = 0;
+ char * _database="BANK";
struct getargs args[] = {
- { "usage", '?', arg_flag, &_help, "Print help", "" }
+ { "usage", '?', arg_flag, &_help, "Print help", "" },
+ { "database", 'd', arg_string, &_database, "Database name", ""}
};
int num_args = sizeof(args) / sizeof(args[0]);
int optind = 0;
@@ -49,7 +51,7 @@ int main(int argc, const char** argv){
return NDBT_ProgramExit(NDBT_FAILED);
}
- Bank bank(con);
+ Bank bank(con,_database);
if (bank.performSumAccounts() != 0)
return NDBT_ProgramExit(NDBT_FAILED);
diff --git a/storage/ndb/test/ndbapi/bank/bankTimer.cpp b/storage/ndb/test/ndbapi/bank/bankTimer.cpp
index 298f85e1e43..3f4b8565a30 100644
--- a/storage/ndb/test/ndbapi/bank/bankTimer.cpp
+++ b/storage/ndb/test/ndbapi/bank/bankTimer.cpp
@@ -31,9 +31,11 @@ int main(int argc, const char** argv){
ndb_init();
int _help = 0;
int _wait = 30;
+ char * _database="BANK";
struct getargs args[] = {
{ "wait", 'w', arg_integer, &_wait, "Max time to wait between days", "secs" },
+ { "database", 'd', arg_string, &_database, "Database name", ""},
{ "usage", '?', arg_flag, &_help, "Print help", "" }
};
int num_args = sizeof(args) / sizeof(args[0]);
@@ -52,7 +54,7 @@ int main(int argc, const char** argv){
return NDBT_ProgramExit(NDBT_FAILED);
}
- Bank bank(con);
+ Bank bank(con,_database);
if (bank.performIncreaseTime(_wait) != 0)
return NDBT_ProgramExit(NDBT_FAILED);
diff --git a/storage/ndb/test/ndbapi/bank/bankTransactionMaker.cpp b/storage/ndb/test/ndbapi/bank/bankTransactionMaker.cpp
index f8e646b6553..cd4455804a8 100644
--- a/storage/ndb/test/ndbapi/bank/bankTransactionMaker.cpp
+++ b/storage/ndb/test/ndbapi/bank/bankTransactionMaker.cpp
@@ -31,9 +31,11 @@ int main(int argc, const char** argv){
ndb_init();
int _help = 0;
int _wait = 20;
+ char * _database = "BANK";
struct getargs args[] = {
{ "wait", 'w', arg_integer, &_wait, "Time to wait between transactions", "ms" },
+ { "database", 'd', arg_string, &_database, "Database name", ""},
{ "usage", '?', arg_flag, &_help, "Print help", "" }
};
int num_args = sizeof(args) / sizeof(args[0]);
@@ -52,7 +54,7 @@ int main(int argc, const char** argv){
return NDBT_ProgramExit(NDBT_FAILED);
}
- Bank bank(con);
+ Bank bank(con,_database);
if (bank.performTransactions(_wait) != 0)
return NDBT_ProgramExit(NDBT_FAILED);
diff --git a/storage/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp b/storage/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp
index 0c268121d8a..12ac072ce58 100644
--- a/storage/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp
+++ b/storage/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp
@@ -30,9 +30,11 @@
int main(int argc, const char** argv){
ndb_init();
int _help = 0;
+ char * _database="BANK";
struct getargs args[] = {
- { "usage", '?', arg_flag, &_help, "Print help", "" }
+ { "usage", '?', arg_flag, &_help, "Print help", "" },
+ { "database", 'd', arg_string, &_database, "Database name", ""}
};
int num_args = sizeof(args) / sizeof(args[0]);
int optind = 0;
@@ -50,7 +52,7 @@ int main(int argc, const char** argv){
return NDBT_ProgramExit(NDBT_FAILED);
}
- Bank bank(con);
+ Bank bank(con,_database);
if (bank.performValidateAllGLs() != 0)
return NDBT_ProgramExit(NDBT_FAILED);
diff --git a/storage/ndb/test/ndbapi/bank/testBank.cpp b/storage/ndb/test/ndbapi/bank/testBank.cpp
index 6be66d528b1..035cfe0a685 100644
--- a/storage/ndb/test/ndbapi/bank/testBank.cpp
+++ b/storage/ndb/test/ndbapi/bank/testBank.cpp
@@ -32,7 +32,8 @@
#include "Bank.hpp"
int runCreateBank(NDBT_Context* ctx, NDBT_Step* step){
- Bank bank(ctx->m_cluster_connection);
+ char * _database = "BANK";
+ Bank bank(ctx->m_cluster_connection, _database);
int overWriteExisting = true;
if (bank.createAndLoadBank(overWriteExisting) != NDBT_OK)
return NDBT_FAILED;
@@ -40,7 +41,8 @@ int runCreateBank(NDBT_Context* ctx, NDBT_Step* step){
}
int runBankTimer(NDBT_Context* ctx, NDBT_Step* step){
- Bank bank(ctx->m_cluster_connection);
+ char * _database = "BANK";
+ Bank bank(ctx->m_cluster_connection, _database);
int wait = 30; // Max seconds between each "day"
int yield = 1; // Loops before bank returns
@@ -51,7 +53,8 @@ int runBankTimer(NDBT_Context* ctx, NDBT_Step* step){
}
int runBankTransactions(NDBT_Context* ctx, NDBT_Step* step){
- Bank bank(ctx->m_cluster_connection);
+ char * _database = "BANK";
+ Bank bank(ctx->m_cluster_connection, _database);
int wait = 10; // Max ms between each transaction
int yield = 100; // Loops before bank returns
@@ -62,7 +65,8 @@ int runBankTransactions(NDBT_Context* ctx, NDBT_Step* step){
}
int runBankGL(NDBT_Context* ctx, NDBT_Step* step){
- Bank bank(ctx->m_cluster_connection);
+ char * _database = "BANK";
+ Bank bank(ctx->m_cluster_connection, _database);
int yield = 20; // Loops before bank returns
int result = NDBT_OK;
@@ -76,7 +80,8 @@ int runBankGL(NDBT_Context* ctx, NDBT_Step* step){
}
int runBankSum(NDBT_Context* ctx, NDBT_Step* step){
- Bank bank(ctx->m_cluster_connection);
+ char * _database = "BANK";
+ Bank bank(ctx->m_cluster_connection, _database);
int wait = 2000; // Max ms between each sum of accounts
int yield = 1; // Loops before bank returns
int result = NDBT_OK;
@@ -91,7 +96,8 @@ int runBankSum(NDBT_Context* ctx, NDBT_Step* step){
}
int runDropBank(NDBT_Context* ctx, NDBT_Step* step){
- Bank bank(ctx->m_cluster_connection);
+ char * _database = "BANK";
+ Bank bank(ctx->m_cluster_connection, _database);
if (bank.dropBank() != NDBT_OK)
return NDBT_FAILED;
return NDBT_OK;
diff --git a/storage/ndb/test/ndbapi/bench/asyncGenerator.cpp b/storage/ndb/test/ndbapi/bench/asyncGenerator.cpp
index d91e38dff1a..7fc39d2cf9e 100644
--- a/storage/ndb/test/ndbapi/bench/asyncGenerator.cpp
+++ b/storage/ndb/test/ndbapi/bench/asyncGenerator.cpp
@@ -325,11 +325,10 @@ doTransaction_T4(Ndb * pNDB, ThreadData * td, int async)
td->transactionData.do_rollback =
getNextRandom(&td->generator.rollbackSequenceT4);
-#if 0
- memset(td->transactionData.session_details,
- myRandom48(26)+'A', SESSION_DETAILS_LENGTH);
-#endif
- td->transactionData.session_details[SESSION_DETAILS_LENGTH] = 0;
+ memset(td->transactionData.session_details+2,
+ myRandom48(26)+'A', SESSION_DETAILS_LENGTH-3);
+ td->transactionData.session_details[SESSION_DETAILS_LENGTH-1] = 0;
+ int2store(td->transactionData.session_details,SESSION_DETAILS_LENGTH-2);
/*-----------------*/
/* Run transaction */
diff --git a/storage/ndb/test/ndbapi/bench/ndb_schema.hpp b/storage/ndb/test/ndbapi/bench/ndb_schema.hpp
index af08bc2eecd..4be1b5e95b1 100644
--- a/storage/ndb/test/ndbapi/bench/ndb_schema.hpp
+++ b/storage/ndb/test/ndbapi/bench/ndb_schema.hpp
@@ -36,7 +36,7 @@
#define SERVER_INSERTS "NO_OF_INSERT"
#define SERVER_DELETES "NO_OF_DELETE"
-#define GROUP_TABLE "GROUP"
+#define GROUP_TABLE "GROUP_T"
#define GROUP_ID "GROUP_ID"
#define GROUP_NAME "GROUP_NAME"
#define GROUP_ALLOW_READ "ALLOW_READ"
diff --git a/storage/ndb/test/ndbapi/test_event.cpp b/storage/ndb/test/ndbapi/test_event.cpp
index 2df50f21e43..52e0db9fe9c 100644
--- a/storage/ndb/test/ndbapi/test_event.cpp
+++ b/storage/ndb/test/ndbapi/test_event.cpp
@@ -19,16 +19,301 @@
#include <HugoTransactions.hpp>
#include <UtilTransactions.hpp>
#include <TestNdbEventOperation.hpp>
+#include <NdbAutoPtr.hpp>
+#include <NdbRestarter.hpp>
+#include <NdbRestarts.hpp>
#define GETNDB(ps) ((NDBT_NdbApiStep*)ps)->getNdb()
-int runCreateEvent(NDBT_Context* ctx, NDBT_Step* step)
+static int createEvent(Ndb *pNdb, const NdbDictionary::Table &tab)
{
- HugoTransactions hugoTrans(*ctx->getTab());
+ char eventName[1024];
+ sprintf(eventName,"%s_EVENT",tab.getName());
+
+ NdbDictionary::Dictionary *myDict = pNdb->getDictionary();
+
+ if (!myDict) {
+ g_err << "Dictionary not found "
+ << pNdb->getNdbError().code << " "
+ << pNdb->getNdbError().message << endl;
+ return NDBT_FAILED;
+ }
+
+ NdbDictionary::Event myEvent(eventName);
+ myEvent.setTable(tab.getName());
+ myEvent.addTableEvent(NdbDictionary::Event::TE_ALL);
+ for(int a = 0; a < tab.getNoOfColumns(); a++){
+ myEvent.addEventColumn(a);
+ }
+
+ int res = myDict->createEvent(myEvent); // Add event to database
- if (hugoTrans.createEvent(GETNDB(step)) != 0){
+ if (res == 0)
+ myEvent.print();
+ else if (myDict->getNdbError().classification ==
+ NdbError::SchemaObjectExists)
+ {
+ g_info << "Event creation failed event exists\n";
+ res = myDict->dropEvent(eventName);
+ if (res) {
+ g_err << "Failed to drop event: "
+ << myDict->getNdbError().code << " : "
+ << myDict->getNdbError().message << endl;
+ return NDBT_FAILED;
+ }
+ // try again
+ res = myDict->createEvent(myEvent); // Add event to database
+ if (res) {
+ g_err << "Failed to create event (1): "
+ << myDict->getNdbError().code << " : "
+ << myDict->getNdbError().message << endl;
+ return NDBT_FAILED;
+ }
+ }
+ else
+ {
+ g_err << "Failed to create event (2): "
+ << myDict->getNdbError().code << " : "
+ << myDict->getNdbError().message << endl;
return NDBT_FAILED;
}
+
+ return NDBT_OK;
+}
+
+static int dropEvent(Ndb *pNdb, const NdbDictionary::Table &tab)
+{
+ char eventName[1024];
+ sprintf(eventName,"%s_EVENT",tab.getName());
+ NdbDictionary::Dictionary *myDict = pNdb->getDictionary();
+ if (!myDict) {
+ g_err << "Dictionary not found "
+ << pNdb->getNdbError().code << " "
+ << pNdb->getNdbError().message << endl;
+ return NDBT_FAILED;
+ }
+ if (myDict->dropEvent(eventName)) {
+ g_err << "Failed to drop event: "
+ << myDict->getNdbError().code << " : "
+ << myDict->getNdbError().message << endl;
+ return NDBT_FAILED;
+ }
+ return NDBT_OK;
+}
+
+static int runCreateEvent(NDBT_Context* ctx, NDBT_Step* step)
+{
+ if (createEvent(GETNDB(step),* ctx->getTab()) != 0){
+ return NDBT_FAILED;
+ }
+ return NDBT_OK;
+}
+
+struct receivedEvent {
+ Uint32 pk;
+ Uint32 count;
+ Uint32 event;
+};
+
+static int
+eventOperation(Ndb* pNdb, const NdbDictionary::Table &tab, void* pstats, int records)
+{
+ int i;
+ const char function[] = "HugoTransactions::eventOperation: ";
+ struct receivedEvent* recInsertEvent;
+ NdbAutoObjArrayPtr<struct receivedEvent>
+ p00( recInsertEvent = new struct receivedEvent[3*records] );
+ struct receivedEvent* recUpdateEvent = &recInsertEvent[records];
+ struct receivedEvent* recDeleteEvent = &recInsertEvent[2*records];
+
+ EventOperationStats &stats = *(EventOperationStats*)pstats;
+
+ stats.n_inserts = 0;
+ stats.n_deletes = 0;
+ stats.n_updates = 0;
+ stats.n_consecutive = 0;
+ stats.n_duplicates = 0;
+ stats.n_inconsistent_gcis = 0;
+
+ for (i = 0; i < records; i++) {
+ recInsertEvent[i].pk = 0xFFFFFFFF;
+ recInsertEvent[i].count = 0;
+ recInsertEvent[i].event = 0xFFFFFFFF;
+
+ recUpdateEvent[i].pk = 0xFFFFFFFF;
+ recUpdateEvent[i].count = 0;
+ recUpdateEvent[i].event = 0xFFFFFFFF;
+
+ recDeleteEvent[i].pk = 0xFFFFFFFF;
+ recDeleteEvent[i].count = 0;
+ recDeleteEvent[i].event = 0xFFFFFFFF;
+ }
+
+ NdbDictionary::Dictionary *myDict = pNdb->getDictionary();
+
+ if (!myDict) {
+ g_err << function << "Event Creation failedDictionary not found\n";
+ return NDBT_FAILED;
+ }
+
+ int r = 0;
+ NdbEventOperation *pOp;
+
+ char eventName[1024];
+ sprintf(eventName,"%s_EVENT",tab.getName());
+ Uint32 noEventColumnName = tab.getNoOfColumns();
+
+ g_info << function << "create EventOperation\n";
+ pOp = pNdb->createEventOperation(eventName);
+ if ( pOp == NULL ) {
+ g_err << function << "Event operation creation failed\n";
+ return NDBT_FAILED;
+ }
+
+ g_info << function << "get values\n";
+ NdbRecAttr* recAttr[1024];
+ NdbRecAttr* recAttrPre[1024];
+
+ const NdbDictionary::Table *_table = myDict->getTable(tab.getName());
+
+ for (int a = 0; a < noEventColumnName; a++) {
+ recAttr[a] = pOp->getValue(_table->getColumn(a)->getName());
+ recAttrPre[a] = pOp->getPreValue(_table->getColumn(a)->getName());
+ }
+
+ // set up the callbacks
+ g_info << function << "execute\n";
+ if (pOp->execute()) { // This starts changes to "start flowing"
+ g_err << function << "operation execution failed: \n";
+ g_err << pOp->getNdbError().code << " "
+ << pOp->getNdbError().message << endl;
+ return NDBT_FAILED;
+ }
+
+ g_info << function << "ok\n";
+
+ int count = 0;
+ Uint32 last_inconsitant_gci = 0xEFFFFFF0;
+
+ while (r < records){
+ //printf("now waiting for event...\n");
+ int res = pNdb->pollEvents(1000); // wait for event or 1000 ms
+
+ if (res > 0) {
+ //printf("got data! %d\n", r);
+ NdbEventOperation *tmp;
+ while ((tmp= pNdb->nextEvent()))
+ {
+ assert(tmp == pOp);
+ r++;
+ count++;
+
+ Uint32 gci = pOp->getGCI();
+ Uint32 pk = recAttr[0]->u_32_value();
+
+ if (!pOp->isConsistent()) {
+ if (last_inconsitant_gci != gci) {
+ last_inconsitant_gci = gci;
+ stats.n_inconsistent_gcis++;
+ }
+ g_warning << "A node failure has occured and events might be missing\n";
+ }
+ g_info << function << "GCI " << gci << ": " << count;
+ struct receivedEvent* recEvent;
+ switch (pOp->getEventType()) {
+ case NdbDictionary::Event::TE_INSERT:
+ stats.n_inserts++;
+ g_info << " INSERT: ";
+ recEvent = recInsertEvent;
+ break;
+ case NdbDictionary::Event::TE_DELETE:
+ stats.n_deletes++;
+ g_info << " DELETE: ";
+ recEvent = recDeleteEvent;
+ break;
+ case NdbDictionary::Event::TE_UPDATE:
+ stats.n_updates++;
+ g_info << " UPDATE: ";
+ recEvent = recUpdateEvent;
+ break;
+ default:
+ case NdbDictionary::Event::TE_ALL:
+ abort();
+ }
+
+ if ((int)pk < records) {
+ recEvent[pk].pk = pk;
+ recEvent[pk].count++;
+ }
+
+ for (i = 1; i < noEventColumnName; i++) {
+ if (recAttr[i]->isNULL() >= 0) { // we have a value
+ g_info << " post[" << i << "]=";
+ if (recAttr[i]->isNULL() == 0) // we have a non-null value
+ g_info << recAttr[i]->u_32_value();
+ else // we have a null value
+ g_info << "NULL";
+ }
+ if (recAttrPre[i]->isNULL() >= 0) { // we have a value
+ g_info << " pre[" << i << "]=";
+ if (recAttrPre[i]->isNULL() == 0) // we have a non-null value
+ g_info << recAttrPre[i]->u_32_value();
+ else // we have a null value
+ g_info << "NULL";
+ }
+ }
+ g_info << endl;
+ }
+ } else
+ ;//printf("timed out\n");
+ }
+
+ g_info << "dropping event operation" << endl;
+
+ int res = pNdb->dropEventOperation(pOp);
+ if (res != 0) {
+ g_err << "operation execution failed\n";
+ return NDBT_FAILED;
+ }
+
+ g_info << " ok" << endl;
+
+ if (stats.n_inserts > 0) {
+ stats.n_consecutive++;
+ }
+ if (stats.n_deletes > 0) {
+ stats.n_consecutive++;
+ }
+ if (stats.n_updates > 0) {
+ stats.n_consecutive++;
+ }
+ for (i = 0; i < (Uint32)records/3; i++) {
+ if (recInsertEvent[i].pk != i) {
+ stats.n_consecutive ++;
+ ndbout << "missing insert pk " << i << endl;
+ } else if (recInsertEvent[i].count > 1) {
+ ndbout << "duplicates insert pk " << i
+ << " count " << recInsertEvent[i].count << endl;
+ stats.n_duplicates += recInsertEvent[i].count-1;
+ }
+ if (recUpdateEvent[i].pk != i) {
+ stats.n_consecutive ++;
+ ndbout << "missing update pk " << i << endl;
+ } else if (recUpdateEvent[i].count > 1) {
+ ndbout << "duplicates update pk " << i
+ << " count " << recUpdateEvent[i].count << endl;
+ stats.n_duplicates += recUpdateEvent[i].count-1;
+ }
+ if (recDeleteEvent[i].pk != i) {
+ stats.n_consecutive ++;
+ ndbout << "missing delete pk " << i << endl;
+ } else if (recDeleteEvent[i].count > 1) {
+ ndbout << "duplicates delete pk " << i
+ << " count " << recDeleteEvent[i].count << endl;
+ stats.n_duplicates += recDeleteEvent[i].count-1;
+ }
+ }
+
return NDBT_OK;
}
@@ -58,26 +343,26 @@ int runCreateShadowTable(NDBT_Context* ctx, NDBT_Step* step)
int runCreateDropEventOperation(NDBT_Context* ctx, NDBT_Step* step)
{
int loops = ctx->getNumLoops();
- int records = ctx->getNumRecords();
+ //int records = ctx->getNumRecords();
HugoTransactions hugoTrans(*ctx->getTab());
EventOperationStats stats;
- Ndb *pNdb=GETNDB(step);
+ //Ndb *pNdb=GETNDB(step);
const NdbDictionary::Table& tab= *ctx->getTab();
- NdbEventOperation *pOp;
+ //NdbEventOperation *pOp;
char eventName[1024];
sprintf(eventName,"%s_EVENT",tab.getName());
- int noEventColumnName = tab.getNoOfColumns();
+ //int noEventColumnName = tab.getNoOfColumns();
for (int i= 0; i < loops; i++)
{
#if 1
- if (hugoTrans.eventOperation(GETNDB(step), (void*)&stats, 0) != 0){
+ if (eventOperation(GETNDB(step), tab, (void*)&stats, 0) != 0){
return NDBT_FAILED;
}
#else
g_info << "create EventOperation\n";
- pOp = pNdb->createEventOperation(eventName, 100);
+ pOp = pNdb->createEventOperation(eventName);
if ( pOp == NULL ) {
g_err << "Event operation creation failed\n";
return NDBT_FAILED;
@@ -100,7 +385,7 @@ int theThreadIdCounter = 0;
int runEventOperation(NDBT_Context* ctx, NDBT_Step* step)
{
int tId = theThreadIdCounter++;
- int loops = ctx->getNumLoops();
+ //int loops = ctx->getNumLoops();
int records = ctx->getNumRecords();
HugoTransactions hugoTrans(*ctx->getTab());
@@ -110,7 +395,7 @@ int runEventOperation(NDBT_Context* ctx, NDBT_Step* step)
// sleep(tId);
- if (hugoTrans.eventOperation(GETNDB(step), (void*)&stats, 3*records) != 0){
+ if (eventOperation(GETNDB(step), *ctx->getTab(), (void*)&stats, 3*records) != 0){
return NDBT_FAILED;
}
@@ -143,9 +428,11 @@ int runEventLoad(NDBT_Context* ctx, NDBT_Step* step)
int records = ctx->getNumRecords();
HugoTransactions hugoTrans(*ctx->getTab());
+ sleep(1);
+#if 0
sleep(5);
sleep(theThreadIdCounter);
-
+#endif
if (hugoTrans.loadTable(GETNDB(step), records, 1, true, loops) != 0){
return NDBT_FAILED;
}
@@ -163,45 +450,53 @@ int runEventMixedLoad(NDBT_Context* ctx, NDBT_Step* step)
int loops = ctx->getNumLoops();
int records = ctx->getNumRecords();
HugoTransactions hugoTrans(*ctx->getTab());
+
+ while(loops -- && !ctx->isTestStopped())
+ {
+ hugoTrans.clearTable(GETNDB(step), 0);
- sleep(5);
-
- if (hugoTrans.loadTable(GETNDB(step), 3*records, 1, true, 1) != 0){
- return NDBT_FAILED;
- }
- if (hugoTrans.pkDelRecords(GETNDB(step), 3*records, 1, true, 1) != 0){
- return NDBT_FAILED;
- }
- if (hugoTrans.loadTable(GETNDB(step), records, 1, true, 1) != 0){
- return NDBT_FAILED;
- }
- if (hugoTrans.pkUpdateRecords(GETNDB(step), records, 1, 1) != 0){
- return NDBT_FAILED;
- }
- if (hugoTrans.pkUpdateRecords(GETNDB(step), records, 1, 1) != 0){
- return NDBT_FAILED;
- }
- if (hugoTrans.pkUpdateRecords(GETNDB(step), records, 1, 1) != 0){
- return NDBT_FAILED;
+ if (hugoTrans.loadTable(GETNDB(step), 3*records, 1, true, 1) != 0){
+ g_err << "FAIL " << __LINE__ << endl;
+ return NDBT_FAILED;
+ }
+ if (hugoTrans.pkDelRecords(GETNDB(step), 3*records, 1, true, 1) != 0){
+ g_err << "FAIL " << __LINE__ << endl;
+ return NDBT_FAILED;
+ }
+ if (hugoTrans.loadTable(GETNDB(step), records, 1, true, 1) != 0){
+ g_err << "FAIL " << __LINE__ << endl;
+ return NDBT_FAILED;
+ }
+ if (hugoTrans.pkUpdateRecords(GETNDB(step), records, 1, 1) != 0){
+ g_err << "FAIL " << __LINE__ << endl;
+ return NDBT_FAILED;
+ }
+ if (hugoTrans.pkUpdateRecords(GETNDB(step), records, 1, 1) != 0){
+ g_err << "FAIL " << __LINE__ << endl;
+ return NDBT_FAILED;
+ }
+ if (hugoTrans.pkUpdateRecords(GETNDB(step), records, 1, 1) != 0){
+ g_err << "FAIL " << __LINE__ << endl;
+ return NDBT_FAILED;
+ }
+
+ ctx->setProperty("LastGCI", hugoTrans.m_latest_gci);
+ if(ctx->getPropertyWait("LastGCI", ~(Uint32)0))
+ {
+ return NDBT_FAILED;
+ }
}
-
+
return NDBT_OK;
}
int runDropEvent(NDBT_Context* ctx, NDBT_Step* step)
{
- HugoTransactions hugoTrans(*ctx->getTab());
-
- theThreadIdCounter = 0;
- // if (hugoTrans.createEvent(GETNDB(step)) != 0){
- // return NDBT_FAILED;
- // }
return NDBT_OK;
}
int runVerify(NDBT_Context* ctx, NDBT_Step* step)
{
- int records = ctx->getNumRecords();
const NdbDictionary::Table * table= ctx->getTab();
char buf[1024];
@@ -220,22 +515,23 @@ int runEventApplier(NDBT_Context* ctx, NDBT_Step* step)
{
DBUG_ENTER("runEventApplier");
- int records = ctx->getNumRecords();
+ int result = NDBT_OK;
int loops = ctx->getNumLoops();
const NdbDictionary::Table * table= ctx->getTab();
- char buf[1024];
+ HugoTransactions hugoTrans(* table);
- sprintf(buf, "%s_SHADOW", table->getName());
+ char shadow[1024], buf[1024];
+ sprintf(shadow, "%s_SHADOW", table->getName());
const NdbDictionary::Table * table_shadow;
- if ((table_shadow = GETNDB(step)->getDictionary()->getTable(buf)) == 0)
+ if ((table_shadow = GETNDB(step)->getDictionary()->getTable(shadow)) == 0)
{
- g_err << "Unable to get table " << buf << endl;
+ g_err << "Unable to get table " << shadow << endl;
DBUG_RETURN(NDBT_FAILED);
}
-
+
sprintf(buf, "%s_EVENT", table->getName());
- NdbEventOperation *pOp;
- pOp = GETNDB(step)->createEventOperation(buf, 10*records);
+ NdbEventOperation *pOp, *pCreate = 0;
+ pCreate = pOp = GETNDB(step)->createEventOperation(buf);
if ( pOp == NULL ) {
g_err << "Event operation creation failed on %s" << buf << endl;
DBUG_RETURN(NDBT_FAILED);
@@ -254,57 +550,472 @@ int runEventApplier(NDBT_Context* ctx, NDBT_Step* step)
g_err << "execute operation execution failed: \n";
g_err << pOp->getNdbError().code << " "
<< pOp->getNdbError().message << endl;
- DBUG_RETURN(NDBT_FAILED);
+ result = NDBT_FAILED;
+ goto end;
+ }
+
+ while(loops-- && !ctx->isTestStopped())
+ {
+ int r;
+ int count= 0;
+ Uint32 stop_gci= ~0;
+ Uint64 curr_gci = 0;
+ Ndb* ndb= GETNDB(step);
+
+ while(!ctx->isTestStopped() && curr_gci <= stop_gci)
+ {
+ ndb->pollEvents(100, &curr_gci);
+ while ((pOp= ndb->nextEvent()) != 0)
+ {
+ assert(pOp == pCreate);
+ int noRetries= 0;
+ do
+ {
+ NdbTransaction *trans= GETNDB(step)->startTransaction();
+ if (trans == 0)
+ {
+ g_err << "startTransaction failed "
+ << GETNDB(step)->getNdbError().code << " "
+ << GETNDB(step)->getNdbError().message << endl;
+ result = NDBT_FAILED;
+ goto end;
+
+ }
+
+ NdbOperation *op= trans->getNdbOperation(table_shadow);
+ if (op == 0)
+ {
+ g_err << "getNdbOperation failed "
+ << trans->getNdbError().code << " "
+ << trans->getNdbError().message << endl;
+ result = NDBT_FAILED;
+ goto end;
+
+ }
+
+ switch (pOp->getEventType()) {
+ case NdbDictionary::Event::TE_INSERT:
+ if (op->writeTuple())
+ {
+ g_err << "insertTuple "
+ << op->getNdbError().code << " "
+ << op->getNdbError().message << endl;
+ result = NDBT_FAILED;
+ goto end;
+
+ }
+ break;
+ case NdbDictionary::Event::TE_DELETE:
+ if (op->deleteTuple())
+ {
+ g_err << "deleteTuple "
+ << op->getNdbError().code << " "
+ << op->getNdbError().message << endl;
+ result = NDBT_FAILED;
+ goto end;
+
+ }
+ break;
+ case NdbDictionary::Event::TE_UPDATE:
+ if (op->writeTuple())
+ {
+ g_err << "updateTuple "
+ << op->getNdbError().code << " "
+ << op->getNdbError().message << endl;
+ result = NDBT_FAILED;
+ goto end;
+
+ }
+ break;
+ default:
+ abort();
+ }
+
+ for (i= 0; i < n_columns; i++)
+ {
+ if (recAttr[i]->isNULL())
+ {
+ if (table->getColumn(i)->getPrimaryKey())
+ {
+ g_err << "internal error: primary key isNull()="
+ << recAttr[i]->isNULL() << endl;
+ result = NDBT_FAILED;
+ goto end;
+
+ }
+ switch (pOp->getEventType()) {
+ case NdbDictionary::Event::TE_INSERT:
+ if (recAttr[i]->isNULL() < 0)
+ {
+ g_err << "internal error: missing value for insert\n";
+ result = NDBT_FAILED;
+ goto end;
+
+ }
+ break;
+ case NdbDictionary::Event::TE_DELETE:
+ break;
+ case NdbDictionary::Event::TE_UPDATE:
+ break;
+ default:
+ abort();
+ }
+ }
+ if (table->getColumn(i)->getPrimaryKey() &&
+ op->equal(i,recAttr[i]->aRef()))
+ {
+ g_err << "equal " << i << " "
+ << op->getNdbError().code << " "
+ << op->getNdbError().message << endl;
+ result = NDBT_FAILED;
+ goto end;
+
+ }
+ }
+
+ switch (pOp->getEventType()) {
+ case NdbDictionary::Event::TE_INSERT:
+ for (i= 0; i < n_columns; i++)
+ {
+ if (!table->getColumn(i)->getPrimaryKey() &&
+ op->setValue(i,recAttr[i]->isNULL() ? 0:recAttr[i]->aRef()))
+ {
+ g_err << "setValue(insert) " << i << " "
+ << op->getNdbError().code << " "
+ << op->getNdbError().message << endl;
+ result = NDBT_FAILED;
+ goto end;
+
+ }
+ }
+ break;
+ case NdbDictionary::Event::TE_DELETE:
+ break;
+ case NdbDictionary::Event::TE_UPDATE:
+ for (i= 0; i < n_columns; i++)
+ {
+ if (!table->getColumn(i)->getPrimaryKey() &&
+ recAttr[i]->isNULL() >= 0 &&
+ op->setValue(i,recAttr[i]->isNULL() ? 0:recAttr[i]->aRef()))
+ {
+ g_err << "setValue(update) " << i << " "
+ << op->getNdbError().code << " "
+ << op->getNdbError().message << endl;
+ result = NDBT_FAILED;
+ goto end;
+
+ }
+ }
+ break;
+ default:
+ case NdbDictionary::Event::TE_ALL:
+ abort();
+ }
+ if (trans->execute(Commit) == 0)
+ {
+ trans->close();
+ count++;
+ // everything ok
+ break;
+ }
+
+ if (trans->getNdbError().status == NdbError::PermanentError)
+ {
+ g_err << "Ignoring execute " << r << " failed "
+ << trans->getNdbError().code << " "
+ << trans->getNdbError().message << endl;
+
+ trans->close();
+ count++;
+ break;
+ }
+ else if (noRetries++ == 10)
+ {
+ g_err << "execute " << r << " failed "
+ << trans->getNdbError().code << " "
+ << trans->getNdbError().message << endl;
+ trans->close();
+ result = NDBT_FAILED;
+ goto end;
+
+ }
+ trans->close();
+ NdbSleep_MilliSleep(100); // sleep before retying
+ } while(1);
+ }
+ stop_gci = ctx->getProperty("LastGCI", ~(Uint32)0);
+ }
+
+ ndbout_c("Applied gci: %d, %d events", stop_gci, count);
+ if (hugoTrans.compare(GETNDB(step), shadow, 0))
+ {
+ g_err << "compare failed" << endl;
+ result = NDBT_FAILED;
+ goto end;
+ }
+ ctx->setProperty("LastGCI", ~(Uint32)0);
+ ctx->broadcast();
+ }
+
+end:
+ if(pCreate)
+ {
+ if (GETNDB(step)->dropEventOperation(pCreate)) {
+ g_err << "dropEventOperation execution failed "
+ << GETNDB(step)->getNdbError().code << " "
+ << GETNDB(step)->getNdbError().message << endl;
+ result = NDBT_FAILED;
+ }
+ }
+ ctx->stopTest();
+ DBUG_RETURN(result);
+}
+
+int runRestarter(NDBT_Context* ctx, NDBT_Step* step){
+ int result = NDBT_OK;
+ int loops = ctx->getNumLoops();
+ NdbRestarter restarter;
+ int i = 0;
+ int lastId = 0;
+
+ if (restarter.getNumDbNodes() < 2){
+ ctx->stopTest();
+ return NDBT_OK;
+ }
+
+ if(restarter.waitClusterStarted(60) != 0){
+ g_err << "Cluster failed to start" << endl;
+ return NDBT_FAILED;
+ }
+
+ while(result != NDBT_FAILED && !ctx->isTestStopped()){
+
+ int id = lastId % restarter.getNumDbNodes();
+ int nodeId = restarter.getDbNodeId(id);
+ ndbout << "Restart node " << nodeId << endl;
+ if(restarter.restartOneDbNode(nodeId, false, false, true) != 0){
+ g_err << "Failed to restartNextDbNode" << endl;
+ result = NDBT_FAILED;
+ break;
+ }
+
+ if(restarter.waitClusterStarted(60) != 0){
+ g_err << "Cluster failed to start" << endl;
+ result = NDBT_FAILED;
+ break;
+ }
+
+ lastId++;
+ i++;
+ }
+
+ return result;
+}
+
+Vector<const NdbDictionary::Table*> pTabs;
+Vector<const NdbDictionary::Table*> pShadowTabs;
+
+static int getAllTables(NDBT_Context* ctx, NDBT_Step* step)
+{
+ DBUG_ENTER("getAllTables");
+ Ndb * ndb= GETNDB(step);
+ NdbDictionary::Dictionary * dict = ndb->getDictionary();
+ pTabs.clear();
+
+ for (int i= 0; i < ctx->getNumTables(); i++)
+ {
+ const NdbDictionary::Table *pTab= dict->getTable(ctx->getTableName(i));
+ if (pTab == 0)
+ {
+ ndbout << "Failed to get table" << endl;
+ ndbout << dict->getNdbError() << endl;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ pTabs.push_back(pTab);
+ ndbout << " " << ctx->getTableName(i);
+ }
+ pTabs.push_back(NULL);
+ ndbout << endl;
+
+ DBUG_RETURN(NDBT_OK);
+}
+
+static int createAllEvents(NDBT_Context* ctx, NDBT_Step* step)
+{
+ DBUG_ENTER("createAllEvents");
+ Ndb * ndb= GETNDB(step);
+ for (int i= 0; pTabs[i]; i++)
+ {
+ if (createEvent(ndb,*pTabs[i]))
+ {
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ }
+ DBUG_RETURN(NDBT_OK);
+}
+
+static int dropAllEvents(NDBT_Context* ctx, NDBT_Step* step)
+{
+ DBUG_ENTER("createAllEvents");
+ Ndb * ndb= GETNDB(step);
+ int i;
+
+ for (i= 0; pTabs[i]; i++)
+ {
+ if (dropEvent(ndb,*pTabs[i]))
+ {
+ DBUG_RETURN(NDBT_FAILED);
+ }
}
+ DBUG_RETURN(NDBT_OK);
+}
+static int createAllShadows(NDBT_Context* ctx, NDBT_Step* step)
+{
+ DBUG_ENTER("createAllShadows");
+ Ndb * ndb= GETNDB(step);
+ NdbDictionary::Dictionary * dict = ndb->getDictionary();
+ // create a "shadow" table for each table
+ for (int i= 0; pTabs[i]; i++)
+ {
+ char buf[1024];
+ sprintf(buf, "%s_SHADOW", pTabs[i]->getName());
+
+ dict->dropTable(buf);
+ if (dict->getTable(buf))
+ {
+ DBUG_RETURN(NDBT_FAILED);
+ }
+
+ NdbDictionary::Table table_shadow(*pTabs[i]);
+ table_shadow.setName(buf);
+ if (dict->createTable(table_shadow))
+ {
+ g_err << "createTable(" << buf << ") "
+ << dict->getNdbError().code << " "
+ << dict->getNdbError().message << endl;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ pShadowTabs.push_back(dict->getTable(buf));
+ if (!pShadowTabs[i])
+ {
+ g_err << "getTable(" << buf << ") "
+ << dict->getNdbError().code << " "
+ << dict->getNdbError().message << endl;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ }
+ DBUG_RETURN(NDBT_OK);
+}
+
+static int dropAllShadows(NDBT_Context* ctx, NDBT_Step* step)
+{
+ DBUG_ENTER("dropAllShadows");
+ Ndb * ndb= GETNDB(step);
+ NdbDictionary::Dictionary * dict = ndb->getDictionary();
+
+ for (int i= 0; pTabs[i]; i++)
+ {
+ char buf[1024];
+ sprintf(buf, "%s_SHADOW", pTabs[i]->getName());
+ if (dict->dropTable(buf))
+ {
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ }
+ DBUG_RETURN(NDBT_OK);
+}
+
+static int start_transaction(Ndb *ndb, Vector<HugoOperations*> &ops)
+{
+ if (ops[0]->startTransaction(ndb) != NDBT_OK)
+ return -1;
+ NdbTransaction * t= ops[0]->getTransaction();
+ for (int i= ops.size()-1; i > 0; i--)
+ {
+ ops[i]->setTransaction(t,true);
+ }
+ return 0;
+}
+
+static int close_transaction(Ndb *ndb, Vector<HugoOperations*> &ops)
+{
+ if (ops[0]->closeTransaction(ndb) != NDBT_OK)
+ return -1;
+ for (int i= ops.size()-1; i > 0; i--)
+ {
+ ops[i]->setTransaction(NULL,true);
+ }
+ return 0;
+}
+
+static int execute_commit(Ndb *ndb, Vector<HugoOperations*> &ops)
+{
+ if (ops[0]->execute_Commit(ndb) != NDBT_OK)
+ return -1;
+ return 0;
+}
+
+static int copy_events(Ndb *ndb)
+{
+ DBUG_ENTER("copy_events");
int r= 0;
- int res;
- while (r < 10*records){
- //printf("now waiting for event...\n");
- res= GETNDB(step)->pollEvents(1000); // wait for event or 1000 ms
+ NdbDictionary::Dictionary * dict = ndb->getDictionary();
+ int n_inserts= 0;
+ int n_updates= 0;
+ int n_deletes= 0;
+ while (1)
+ {
+ int res= ndb->pollEvents(1000); // wait for event or 1000 ms
+ DBUG_PRINT("info", ("pollEvents res=%d", res));
if (res <= 0)
{
- ndbout_c("********************");
- continue;
+ break;
}
-
- //printf("got data! %d\n", r);
- int overrun= 0;
- while (pOp->next(&overrun) > 0)
+ NdbEventOperation *pOp;
+ while ((pOp= ndb->nextEvent()))
{
- if (overrun)
+ char buf[1024];
+ sprintf(buf, "%s_SHADOW", pOp->getTable()->getName());
+ const NdbDictionary::Table *table= dict->getTable(buf);
+
+ if (table == 0)
+ {
+ g_err << "unable to find table " << buf << endl;
+ DBUG_RETURN(-1);
+ }
+
+ if (pOp->isOverrun())
{
g_err << "buffer overrun\n";
- DBUG_RETURN(NDBT_FAILED);
+ DBUG_RETURN(-1);
}
r++;
-
- Uint32 gci= pOp->getGCI();
-
+
if (!pOp->isConsistent()) {
g_err << "A node failure has occured and events might be missing\n";
- DBUG_RETURN(NDBT_FAILED);
+ DBUG_RETURN(-1);
}
-
+
int noRetries= 0;
do
{
- NdbTransaction *trans= GETNDB(step)->startTransaction();
+ NdbTransaction *trans= ndb->startTransaction();
if (trans == 0)
{
g_err << "startTransaction failed "
- << GETNDB(step)->getNdbError().code << " "
- << GETNDB(step)->getNdbError().message << endl;
- DBUG_RETURN(NDBT_FAILED);
+ << ndb->getNdbError().code << " "
+ << ndb->getNdbError().message << endl;
+ DBUG_RETURN(-1);
}
- NdbOperation *op= trans->getNdbOperation(table_shadow);
+ NdbOperation *op= trans->getNdbOperation(table);
if (op == 0)
{
g_err << "getNdbOperation failed "
<< trans->getNdbError().code << " "
<< trans->getNdbError().message << endl;
- DBUG_RETURN(NDBT_FAILED);
+ DBUG_RETURN(-1);
}
switch (pOp->getEventType()) {
@@ -314,7 +1025,11 @@ int runEventApplier(NDBT_Context* ctx, NDBT_Step* step)
g_err << "insertTuple "
<< op->getNdbError().code << " "
<< op->getNdbError().message << endl;
- DBUG_RETURN(NDBT_FAILED);
+ DBUG_RETURN(-1);
+ }
+ if (noRetries == 0)
+ {
+ n_inserts++;
}
break;
case NdbDictionary::Event::TE_DELETE:
@@ -323,7 +1038,11 @@ int runEventApplier(NDBT_Context* ctx, NDBT_Step* step)
g_err << "deleteTuple "
<< op->getNdbError().code << " "
<< op->getNdbError().message << endl;
- DBUG_RETURN(NDBT_FAILED);
+ DBUG_RETURN(-1);
+ }
+ if (noRetries == 0)
+ {
+ n_deletes++;
}
break;
case NdbDictionary::Event::TE_UPDATE:
@@ -332,79 +1051,78 @@ int runEventApplier(NDBT_Context* ctx, NDBT_Step* step)
g_err << "updateTuple "
<< op->getNdbError().code << " "
<< op->getNdbError().message << endl;
- DBUG_RETURN(NDBT_FAILED);
+ DBUG_RETURN(-1);
+ }
+ if (noRetries == 0)
+ {
+ n_updates++;
}
break;
default:
abort();
}
-
- for (i= 0; i < n_columns; i++)
+
{
- if (recAttr[i]->isNULL())
+ for (const NdbRecAttr *pk= pOp->getFirstPkAttr();
+ pk;
+ pk= pk->next())
{
- if (table->getColumn(i)->getPrimaryKey())
+ if (pk->isNULL())
{
g_err << "internal error: primary key isNull()="
- << recAttr[i]->isNULL() << endl;
+ << pk->isNULL() << endl;
DBUG_RETURN(NDBT_FAILED);
}
- switch (pOp->getEventType()) {
- case NdbDictionary::Event::TE_INSERT:
- if (recAttr[i]->isNULL() < 0)
- {
- g_err << "internal error: missing value for insert\n";
- DBUG_RETURN(NDBT_FAILED);
- }
- break;
- case NdbDictionary::Event::TE_DELETE:
- break;
- case NdbDictionary::Event::TE_UPDATE:
- break;
- default:
- abort();
+ if (op->equal(pk->getColumn()->getColumnNo(),pk->aRef()))
+ {
+ g_err << "equal " << pk->getColumn()->getColumnNo() << " "
+ << op->getNdbError().code << " "
+ << op->getNdbError().message << endl;
+ DBUG_RETURN(NDBT_FAILED);
}
}
- if (table->getColumn(i)->getPrimaryKey() &&
- op->equal(i,recAttr[i]->aRef()))
- {
- g_err << "equal " << i << " "
- << op->getNdbError().code << " "
- << op->getNdbError().message << endl;
- DBUG_RETURN(NDBT_FAILED);
- }
}
switch (pOp->getEventType()) {
case NdbDictionary::Event::TE_INSERT:
- for (i= 0; i < n_columns; i++)
+ {
+ for (const NdbRecAttr *data= pOp->getFirstDataAttr();
+ data;
+ data= data->next())
{
- if (!table->getColumn(i)->getPrimaryKey() &&
- op->setValue(i,recAttr[i]->isNULL() ? 0:recAttr[i]->aRef()))
+ if (data->isNULL() < 0 ||
+ op->setValue(data->getColumn()->getColumnNo(),
+ data->isNULL() ? 0:data->aRef()))
{
- g_err << "setValue(insert) " << i << " "
- << op->getNdbError().code << " "
- << op->getNdbError().message << endl;
- DBUG_RETURN(NDBT_FAILED);
+ g_err << "setValue(insert) " << data->getColumn()->getColumnNo()
+ << " " << op->getNdbError().code
+ << " " << op->getNdbError().message << endl;
+ DBUG_RETURN(-1);
}
}
break;
+ }
case NdbDictionary::Event::TE_DELETE:
break;
case NdbDictionary::Event::TE_UPDATE:
- for (i= 0; i < n_columns; i++)
+ {
+ for (const NdbRecAttr *data= pOp->getFirstDataAttr();
+ data;
+ data= data->next())
{
- if (!table->getColumn(i)->getPrimaryKey() &&
- recAttr[i]->isNULL() >= 0 &&
- op->setValue(i,recAttr[i]->isNULL() ? 0:recAttr[i]->aRef()))
+ if (data->isNULL() >= 0 &&
+ op->setValue(data->getColumn()->getColumnNo(),
+ data->isNULL() ? 0:data->aRef()))
{
- g_err << "setValue(update) " << i << " "
- << op->getNdbError().code << " "
- << op->getNdbError().message << endl;
+ g_err << "setValue(update) " << data->getColumn()->getColumnNo()
+ << " " << op->getNdbError().code
+ << " " << op->getNdbError().message << endl;
DBUG_RETURN(NDBT_FAILED);
}
}
break;
+ }
+ default:
case NdbDictionary::Event::TE_ALL:
abort();
}
@@ -421,33 +1139,275 @@ int runEventApplier(NDBT_Context* ctx, NDBT_Step* step)
<< trans->getNdbError().code << " "
<< trans->getNdbError().message << endl;
trans->close();
- DBUG_RETURN(NDBT_FAILED);
+ DBUG_RETURN(-1);
}
trans->close();
NdbSleep_MilliSleep(100); // sleep before retying
} while(1);
+ } // for
+ } // while(1)
+ g_info << "n_updates: " << n_updates << " "
+ << "n_inserts: " << n_inserts << " "
+ << "n_deletes: " << n_deletes << endl;
+ DBUG_RETURN(r);
+}
+
+static int verify_copy(Ndb *ndb,
+ Vector<const NdbDictionary::Table *> &tabs1,
+ Vector<const NdbDictionary::Table *> &tabs2)
+{
+ for (unsigned i= 0; i < tabs1.size(); i++)
+ if (tabs1[i])
+ {
+ HugoTransactions hugoTrans(*tabs1[i]);
+ if (hugoTrans.compare(ndb, tabs2[i]->getName(), 0))
+ return -1;
+ }
+ return 0;
+}
+
+static int createEventOperations(Ndb * ndb)
+{
+ DBUG_ENTER("createEventOperations");
+ int i;
+
+ // creat all event ops
+ for (i= 0; pTabs[i]; i++)
+ {
+ char buf[1024];
+ sprintf(buf, "%s_EVENT", pTabs[i]->getName());
+ NdbEventOperation *pOp= ndb->createEventOperation(buf);
+ if ( pOp == NULL )
+ {
+ DBUG_RETURN(NDBT_FAILED);
+ }
+
+ int n_columns= pTabs[i]->getNoOfColumns();
+ for (int j = 0; j < n_columns; j++)
+ {
+ pOp->getValue(pTabs[i]->getColumn(j)->getName());
+ pOp->getPreValue(pTabs[i]->getColumn(j)->getName());
+ }
+
+ if ( pOp->execute() )
+ {
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ }
+
+ DBUG_RETURN(NDBT_OK);
+}
+
+static int dropEventOperations(Ndb * ndb)
+{
+ DBUG_ENTER("dropEventOperations");
+
+ NdbEventOperation *pOp;
+ while ( (pOp= ndb->getEventOperation()) )
+ {
+ if (ndb->dropEventOperation(pOp))
+ {
+ DBUG_RETURN(NDBT_FAILED);
}
}
- if (GETNDB(step)->dropEventOperation(pOp)) {
- g_err << "dropEventOperation execution failed "
- << GETNDB(step)->getNdbError().code << " "
- << GETNDB(step)->getNdbError().message << endl;
+ DBUG_RETURN(NDBT_OK);
+}
+
+static int runMulti(NDBT_Context* ctx, NDBT_Step* step)
+{
+ DBUG_ENTER("runMulti");
+
+ Ndb * ndb= GETNDB(step);
+
+ int no_error= 1;
+ int i;
+
+ if (createEventOperations(ndb))
+ {
DBUG_RETURN(NDBT_FAILED);
}
-
+
+ // create a hugo operation per table
+ Vector<HugoOperations *> hugo_ops;
+ for (i= 0; no_error && pTabs[i]; i++)
+ {
+ hugo_ops.push_back(new HugoOperations(*pTabs[i]));
+ }
+
+ int n_records= 3;
+ // insert n_records records per table
+ do {
+ if (start_transaction(ndb, hugo_ops))
+ {
+ no_error= 0;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ for (i= 0; no_error && pTabs[i]; i++)
+ {
+ hugo_ops[i]->pkInsertRecord(ndb, 0, n_records);
+ }
+ if (execute_commit(ndb, hugo_ops))
+ {
+ no_error= 0;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ if(close_transaction(ndb, hugo_ops))
+ {
+ no_error= 0;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ } while(0);
+
+ // copy events and verify
+ do {
+ if (copy_events(ndb) < 0)
+ {
+ no_error= 0;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ if (verify_copy(ndb, pTabs, pShadowTabs))
+ {
+ no_error= 0;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ } while (0);
+
+ // update n_records-1 records in first table
+ do {
+ if (start_transaction(ndb, hugo_ops))
+ {
+ no_error= 0;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+
+ hugo_ops[0]->pkUpdateRecord(ndb, n_records-1);
+
+ if (execute_commit(ndb, hugo_ops))
+ {
+ no_error= 0;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ if(close_transaction(ndb, hugo_ops))
+ {
+ no_error= 0;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ } while(0);
+
+ // copy events and verify
+ do {
+ if (copy_events(ndb) < 0)
+ {
+ no_error= 0;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ if (verify_copy(ndb, pTabs, pShadowTabs))
+ {
+ no_error= 0;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ } while (0);
+
+ if (dropEventOperations(ndb))
+ {
+ DBUG_RETURN(NDBT_FAILED);
+ }
+
+ if (no_error)
+ DBUG_RETURN(NDBT_OK);
+ DBUG_RETURN(NDBT_FAILED);
+}
+
+static int runMulti_NR(NDBT_Context* ctx, NDBT_Step* step)
+{
+ DBUG_ENTER("runMulti");
+
+ int records = ctx->getNumRecords();
+ int loops = ctx->getNumLoops();
+ Ndb * ndb= GETNDB(step);
+
+ int i;
+
+ if (createEventOperations(ndb))
+ {
+ DBUG_RETURN(NDBT_FAILED);
+ }
+
+ for (i= 0; pTabs[i]; i++)
+ {
+ HugoTransactions hugo(*pTabs[i]);
+ if (hugo.loadTable(ndb, records, 1, true, 1))
+ {
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ // copy events and verify
+ if (copy_events(ndb) < 0)
+ {
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ }
+
+ if (verify_copy(ndb, pTabs, pShadowTabs))
+ {
+ DBUG_RETURN(NDBT_FAILED);
+ }
+
+ {
+ NdbRestarts restarts;
+ for (int j= 0; j < loops; j++)
+ {
+ // restart a node
+ int timeout = 240;
+ if (restarts.executeRestart("RestartRandomNodeAbort", timeout))
+ {
+ DBUG_RETURN(NDBT_FAILED);
+ }
+
+ sleep(5);
+ // update all tables
+ for (i= 0; pTabs[i]; i++)
+ {
+ HugoTransactions hugo(*pTabs[i]);
+ if (hugo.pkUpdateRecords(ndb, records, 1, 1))
+ {
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ if (copy_events(ndb) < 0)
+ {
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ }
+
+ // copy events and verify
+ if (verify_copy(ndb, pTabs, pShadowTabs))
+ {
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ }
+ }
+
+ if (dropEventOperations(ndb))
+ {
+ DBUG_RETURN(NDBT_FAILED);
+ }
+
DBUG_RETURN(NDBT_OK);
}
-// INITIALIZER(runInsert);
-// STEP(runPkRead);
-// VERIFIER(runVerifyInsert);
-// FINALIZER(runClearTable);
NDBT_TESTSUITE(test_event);
TESTCASE("BasicEventOperation",
"Verify that we can listen to Events"
- "NOTE! No errors are allowed!" ){
+ "NOTE! No errors are allowed!" )
+{
+#if 0
+ TABLE("T1");
+ TABLE("T3");
+ TABLE("T5");
+ TABLE("T6");
+ TABLE("T8");
+#endif
INITIALIZER(runCreateEvent);
STEP(runEventOperation);
STEP(runEventLoad);
@@ -480,10 +1440,51 @@ TESTCASE("EventOperationApplier",
FINALIZER(runDropEvent);
FINALIZER(runVerify);
}
+TESTCASE("EventOperationApplier_NR",
+ "Verify that if we apply the data we get from event "
+ "operation is the same as the original table"
+ "NOTE! No errors are allowed!" ){
+ INITIALIZER(runCreateEvent);
+ INITIALIZER(runCreateShadowTable);
+ STEP(runEventApplier);
+ STEP(runEventMixedLoad);
+ STEP(runRestarter);
+ FINALIZER(runDropEvent);
+ FINALIZER(runVerify);
+}
+TESTCASE("Multi",
+ "Verify that we can work with all tables in parallell"
+ "NOTE! HugoOperations::startTransaction, pTrans != NULL errors, "
+ "are allowed!" ){
+ ALL_TABLES();
+ INITIALIZER(getAllTables);
+ INITIALIZER(createAllEvents);
+ INITIALIZER(createAllShadows);
+ STEP(runMulti);
+ FINALIZER(dropAllShadows);
+ FINALIZER(dropAllEvents);
+}
+TESTCASE("Multi_NR",
+ "Verify that we can work with all tables in parallell"
+ "NOTE! HugoOperations::startTransaction, pTrans != NULL errors, "
+ "are allowed!" ){
+ ALL_TABLES();
+ INITIALIZER(getAllTables);
+ INITIALIZER(createAllEvents);
+ INITIALIZER(createAllShadows);
+ STEP(runMulti_NR);
+ FINALIZER(dropAllShadows);
+ FINALIZER(dropAllEvents);
+}
NDBT_TESTSUITE_END(test_event);
int main(int argc, const char** argv){
ndb_init();
+ test_event.setCreateAllTables(true);
return test_event.execute(argc, argv);
}
+template class Vector<HugoOperations *>;
+template class Vector<NdbEventOperation *>;
+template class Vector<NdbRecAttr*>;
+template class Vector<Vector<NdbRecAttr*> >;
diff --git a/storage/ndb/test/ndbapi/test_event_multi_table.cpp b/storage/ndb/test/ndbapi/test_event_multi_table.cpp
index f16504029fa..a4c2833a789 100644
--- a/storage/ndb/test/ndbapi/test_event_multi_table.cpp
+++ b/storage/ndb/test/ndbapi/test_event_multi_table.cpp
@@ -21,6 +21,8 @@
#include <HugoTransactions.hpp>
#include <UtilTransactions.hpp>
#include <TestNdbEventOperation.hpp>
+#include <NdbRestarter.hpp>
+#include <NdbRestarts.hpp>
static void usage()
{
@@ -57,189 +59,182 @@ static int execute_commit(Ndb *ndb, Vector<HugoOperations*> &ops)
return 0;
}
-static int copy_events(Ndb *ndb,
- Vector<NdbEventOperation *> &ops,
- Vector<const NdbDictionary::Table *> &tabs,
- Vector<Vector<NdbRecAttr *> > &values)
+static int copy_events(Ndb *ndb)
{
DBUG_ENTER("copy_events");
int r= 0;
+ NdbDictionary::Dictionary * dict = ndb->getDictionary();
while (1)
{
int res= ndb->pollEvents(1000); // wait for event or 1000 ms
- DBUG_PRINT("info", ("pollEvents res=%d", r));
+ DBUG_PRINT("info", ("pollEvents res=%d", res));
if (res <= 0)
{
break;
}
- for (unsigned i_ops= 0; i_ops < ops.size(); i_ops++)
+ int error= 0;
+ NdbEventOperation *pOp;
+ while ((pOp= ndb->nextEvent(&error)))
{
- NdbEventOperation *pOp= ops[i_ops];
- const NdbDictionary::Table *table= tabs[i_ops];
- Vector<NdbRecAttr *> &recAttr= values[i_ops];
-
- int overrun= 0;
- unsigned i;
- unsigned n_columns= table->getNoOfColumns();
- while (pOp->next(&overrun) > 0)
+ char buf[1024];
+ sprintf(buf, "%s_SHADOW", pOp->getTable()->getName());
+ const NdbDictionary::Table *table= dict->getTable(buf);
+
+ if (table == 0)
+ {
+ g_err << "unable to find table " << buf << endl;
+ DBUG_RETURN(-1);
+ }
+
+ if (pOp->isOverrun())
{
- if (overrun)
+ g_err << "buffer overrun\n";
+ DBUG_RETURN(-1);
+ }
+ r++;
+
+ Uint32 gci= pOp->getGCI();
+
+ if (!pOp->isConsistent()) {
+ g_err << "A node failure has occured and events might be missing\n";
+ DBUG_RETURN(-1);
+ }
+
+ int noRetries= 0;
+ do
+ {
+ NdbTransaction *trans= ndb->startTransaction();
+ if (trans == 0)
{
- g_err << "buffer overrun\n";
+ g_err << "startTransaction failed "
+ << ndb->getNdbError().code << " "
+ << ndb->getNdbError().message << endl;
DBUG_RETURN(-1);
}
- r++;
- Uint32 gci= pOp->getGCI();
-
- if (!pOp->isConsistent()) {
- g_err << "A node failure has occured and events might be missing\n";
+ NdbOperation *op= trans->getNdbOperation(table);
+ if (op == 0)
+ {
+ g_err << "getNdbOperation failed "
+ << trans->getNdbError().code << " "
+ << trans->getNdbError().message << endl;
DBUG_RETURN(-1);
}
- int noRetries= 0;
- do
- {
- NdbTransaction *trans= ndb->startTransaction();
- if (trans == 0)
+ switch (pOp->getEventType()) {
+ case NdbDictionary::Event::TE_INSERT:
+ if (op->insertTuple())
{
- g_err << "startTransaction failed "
- << ndb->getNdbError().code << " "
- << ndb->getNdbError().message << endl;
+ g_err << "insertTuple "
+ << op->getNdbError().code << " "
+ << op->getNdbError().message << endl;
DBUG_RETURN(-1);
}
-
- NdbOperation *op= trans->getNdbOperation(table);
- if (op == 0)
+ break;
+ case NdbDictionary::Event::TE_DELETE:
+ if (op->deleteTuple())
{
- g_err << "getNdbOperation failed "
- << trans->getNdbError().code << " "
- << trans->getNdbError().message << endl;
+ g_err << "deleteTuple "
+ << op->getNdbError().code << " "
+ << op->getNdbError().message << endl;
DBUG_RETURN(-1);
}
-
- switch (pOp->getEventType()) {
- case NdbDictionary::Event::TE_INSERT:
- if (op->insertTuple())
+ break;
+ case NdbDictionary::Event::TE_UPDATE:
+ if (op->updateTuple())
+ {
+ g_err << "updateTuple "
+ << op->getNdbError().code << " "
+ << op->getNdbError().message << endl;
+ DBUG_RETURN(-1);
+ }
+ break;
+ default:
+ abort();
+ }
+
+ {
+ for (const NdbRecAttr *pk= pOp->getFirstPkAttr(); pk; pk= pk->next())
+ {
+ if (pk->isNULL())
{
- g_err << "insertTuple "
- << op->getNdbError().code << " "
- << op->getNdbError().message << endl;
- DBUG_RETURN(-1);
+ g_err << "internal error: primary key isNull()="
+ << pk->isNULL() << endl;
+ DBUG_RETURN(NDBT_FAILED);
}
- break;
- case NdbDictionary::Event::TE_DELETE:
- if (op->deleteTuple())
+ if (op->equal(pk->getColumn()->getColumnNo(),pk->aRef()))
{
- g_err << "deleteTuple "
+ g_err << "equal " << pk->getColumn()->getColumnNo() << " "
<< op->getNdbError().code << " "
<< op->getNdbError().message << endl;
- DBUG_RETURN(-1);
+ DBUG_RETURN(NDBT_FAILED);
}
- break;
- case NdbDictionary::Event::TE_UPDATE:
- if (op->updateTuple())
+ }
+ }
+
+ switch (pOp->getEventType()) {
+ case NdbDictionary::Event::TE_INSERT:
+ {
+ for (const NdbRecAttr *data= pOp->getFirstDataAttr(); data; data= data->next())
+ {
+ if (data->isNULL() < 0 ||
+ op->setValue(data->getColumn()->getColumnNo(),
+ data->isNULL() ? 0:data->aRef()))
{
- g_err << "updateTuple "
+ g_err << "setValue(insert) " << data->getColumn()->getColumnNo() << " "
<< op->getNdbError().code << " "
<< op->getNdbError().message << endl;
DBUG_RETURN(-1);
}
- break;
- default:
- abort();
}
-
- for (i= 0; i < n_columns; i++)
+ break;
+ }
+ case NdbDictionary::Event::TE_DELETE:
+ break;
+ case NdbDictionary::Event::TE_UPDATE:
+ {
+ for (const NdbRecAttr *data= pOp->getFirstDataAttr(); data; data= data->next())
{
- if (recAttr[i]->isNULL())
+ if (data->isNULL() >= 0 &&
+ op->setValue(data->getColumn()->getColumnNo(),
+ data->isNULL() ? 0:data->aRef()))
{
- if (table->getColumn(i)->getPrimaryKey())
- {
- g_err << "internal error: primary key isNull()="
- << recAttr[i]->isNULL() << endl;
- DBUG_RETURN(NDBT_FAILED);
- }
- switch (pOp->getEventType()) {
- case NdbDictionary::Event::TE_INSERT:
- if (recAttr[i]->isNULL() < 0)
- {
- g_err << "internal error: missing value for insert\n";
- DBUG_RETURN(NDBT_FAILED);
- }
- break;
- case NdbDictionary::Event::TE_DELETE:
- break;
- case NdbDictionary::Event::TE_UPDATE:
- break;
- default:
- abort();
- }
- }
- if (table->getColumn(i)->getPrimaryKey() &&
- op->equal(i,recAttr[i]->aRef()))
- {
- g_err << "equal " << i << " "
+ g_err << "setValue(update) " << data->getColumn()->getColumnNo() << " "
<< op->getNdbError().code << " "
<< op->getNdbError().message << endl;
DBUG_RETURN(NDBT_FAILED);
}
}
-
- switch (pOp->getEventType()) {
- case NdbDictionary::Event::TE_INSERT:
- for (i= 0; i < n_columns; i++)
- {
- if (!table->getColumn(i)->getPrimaryKey() &&
- op->setValue(i,recAttr[i]->isNULL() ? 0:recAttr[i]->aRef()))
- {
- g_err << "setValue(insert) " << i << " "
- << op->getNdbError().code << " "
- << op->getNdbError().message << endl;
- DBUG_RETURN(-1);
- }
- }
- break;
- case NdbDictionary::Event::TE_DELETE:
- break;
- case NdbDictionary::Event::TE_UPDATE:
- for (i= 0; i < n_columns; i++)
- {
- if (!table->getColumn(i)->getPrimaryKey() &&
- recAttr[i]->isNULL() >= 0 &&
- op->setValue(i,recAttr[i]->isNULL() ? 0:recAttr[i]->aRef()))
- {
- g_err << "setValue(update) " << i << " "
- << op->getNdbError().code << " "
- << op->getNdbError().message << endl;
- DBUG_RETURN(NDBT_FAILED);
- }
- }
- break;
- case NdbDictionary::Event::TE_ALL:
- abort();
- }
- if (trans->execute(Commit) == 0)
- {
- trans->close();
- // everything ok
- break;
- }
- if (noRetries++ == 10 ||
- trans->getNdbError().status != NdbError::TemporaryError)
- {
- g_err << "execute " << r << " failed "
- << trans->getNdbError().code << " "
- << trans->getNdbError().message << endl;
- trans->close();
- DBUG_RETURN(-1);
- }
+ break;
+ }
+ case NdbDictionary::Event::TE_ALL:
+ abort();
+ }
+ if (trans->execute(Commit) == 0)
+ {
trans->close();
- NdbSleep_MilliSleep(100); // sleep before retying
- } while(1);
- }
+ // everything ok
+ break;
+ }
+ if (noRetries++ == 10 ||
+ trans->getNdbError().status != NdbError::TemporaryError)
+ {
+ g_err << "execute " << r << " failed "
+ << trans->getNdbError().code << " "
+ << trans->getNdbError().message << endl;
+ trans->close();
+ DBUG_RETURN(-1);
+ }
+ trans->close();
+ NdbSleep_MilliSleep(100); // sleep before retying
+ } while(1);
+ } // for
+ if (error)
+ {
+ g_err << "nextEvent()\n";
+ DBUG_RETURN(-1);
}
- }
+ } // while(1)
DBUG_RETURN(r);
}
@@ -302,19 +297,39 @@ main(int argc, char** argv)
// create all tables
Vector<const NdbDictionary::Table*> pTabs;
- for (i= 0; no_error && argc; argc--, i++)
+ if (argc == 0)
{
- dict->dropTable(argv[i]);
- NDBT_Tables::createTable(&ndb, argv[i]);
- const NdbDictionary::Table *pTab= dict->getTable(argv[i]);
- if (pTab == 0)
+ NDBT_Tables::dropAllTables(&ndb);
+ NDBT_Tables::createAllTables(&ndb);
+ for (i= 0; no_error && i < NDBT_Tables::getNumTables(); i++)
{
- ndbout << "Failed to create table" << endl;
- ndbout << dict->getNdbError() << endl;
- no_error= 0;
- break;
+ const NdbDictionary::Table *pTab= dict->getTable(NDBT_Tables::getTable(i)->getName());
+ if (pTab == 0)
+ {
+ ndbout << "Failed to create table" << endl;
+ ndbout << dict->getNdbError() << endl;
+ no_error= 0;
+ break;
+ }
+ pTabs.push_back(pTab);
+ }
+ }
+ else
+ {
+ for (i= 0; no_error && argc; argc--, i++)
+ {
+ dict->dropTable(argv[i]);
+ NDBT_Tables::createTable(&ndb, argv[i]);
+ const NdbDictionary::Table *pTab= dict->getTable(argv[i]);
+ if (pTab == 0)
+ {
+ ndbout << "Failed to create table" << endl;
+ ndbout << dict->getNdbError() << endl;
+ no_error= 0;
+ break;
+ }
+ pTabs.push_back(pTab);
}
- pTabs.push_back(pTab);
}
pTabs.push_back(NULL);
@@ -344,19 +359,13 @@ main(int argc, char** argv)
}
// get storage for each event operation
- Vector<Vector<NdbRecAttr*> > values;
- Vector<Vector<NdbRecAttr*> > pre_values;
for (i= 0; no_error && pTabs[i]; i++)
{
int n_columns= pTabs[i]->getNoOfColumns();
- Vector<NdbRecAttr*> tmp_a;
- Vector<NdbRecAttr*> tmp_b;
for (int j = 0; j < n_columns; j++) {
- tmp_a.push_back(pOps[i]->getValue(pTabs[i]->getColumn(j)->getName()));
- tmp_b.push_back(pOps[i]->getPreValue(pTabs[i]->getColumn(j)->getName()));
+ pOps[i]->getValue(pTabs[i]->getColumn(j)->getName());
+ pOps[i]->getPreValue(pTabs[i]->getColumn(j)->getName());
}
- values.push_back(tmp_a);
- pre_values.push_back(tmp_b);
}
// start receiving events
@@ -401,9 +410,8 @@ main(int argc, char** argv)
hugo_ops.push_back(new HugoOperations(*pTabs[i]));
}
- sleep(5);
-
- // insert 3 records per table
+ int n_records= 3;
+ // insert n_records records per table
do {
if (start_transaction(&ndb, hugo_ops))
{
@@ -412,7 +420,7 @@ main(int argc, char** argv)
}
for (i= 0; no_error && pTabs[i]; i++)
{
- hugo_ops[i]->pkInsertRecord(&ndb, 0, 3);
+ hugo_ops[i]->pkInsertRecord(&ndb, 0, n_records);
}
if (execute_commit(&ndb, hugo_ops))
{
@@ -428,7 +436,7 @@ main(int argc, char** argv)
// copy events and verify
do {
- if (copy_events(&ndb, pOps, pShadowTabs, values) < 0)
+ if (copy_events(&ndb) < 0)
{
no_error= 0;
break;
@@ -440,7 +448,7 @@ main(int argc, char** argv)
}
} while (0);
- // update 2 records in first table
+ // update n_records-1 records in first table
do {
if (start_transaction(&ndb, hugo_ops))
{
@@ -448,7 +456,7 @@ main(int argc, char** argv)
break;
}
- hugo_ops[0]->pkUpdateRecord(&ndb, 2);
+ hugo_ops[0]->pkUpdateRecord(&ndb, n_records-1);
if (execute_commit(&ndb, hugo_ops))
{
@@ -464,7 +472,7 @@ main(int argc, char** argv)
// copy events and verify
do {
- if (copy_events(&ndb, pOps, pShadowTabs, values) < 0)
+ if (copy_events(&ndb) < 0)
{
no_error= 0;
break;
@@ -476,6 +484,70 @@ main(int argc, char** argv)
}
} while (0);
+
+ {
+ NdbRestarts restarts;
+ for (int j= 0; j < 10; j++)
+ {
+ // restart a node
+ if (no_error)
+ {
+ int timeout = 240;
+ if (restarts.executeRestart("RestartRandomNodeAbort", timeout))
+ {
+ no_error= 0;
+ break;
+ }
+ }
+
+ // update all n_records records on all tables
+ if (start_transaction(&ndb, hugo_ops))
+ {
+ no_error= 0;
+ break;
+ }
+
+ for (int r= 0; r < n_records; r++)
+ {
+ for (i= 0; pTabs[i]; i++)
+ {
+ hugo_ops[i]->pkUpdateRecord(&ndb, r);
+ }
+ }
+ if (execute_commit(&ndb, hugo_ops))
+ {
+ no_error= 0;
+ break;
+ }
+ if(close_transaction(&ndb, hugo_ops))
+ {
+ no_error= 0;
+ break;
+ }
+
+ // copy events and verify
+ if (copy_events(&ndb) < 0)
+ {
+ no_error= 0;
+ break;
+ }
+ if (verify_copy(&ndb, pTabs, pShadowTabs))
+ {
+ no_error= 0;
+ break;
+ }
+ }
+ }
+
+ // drop the event operations
+ for (i= 0; i < (int)pOps.size(); i++)
+ {
+ if (ndb.dropEventOperation(pOps[i]))
+ {
+ no_error= 0;
+ }
+ }
+
if (no_error)
DBUG_RETURN(NDBT_ProgramExit(NDBT_OK));
DBUG_RETURN(NDBT_ProgramExit(NDBT_FAILED));
diff --git a/storage/ndb/test/run-test/Makefile.am b/storage/ndb/test/run-test/Makefile.am
index 903e3ab824e..34d164b09dd 100644
--- a/storage/ndb/test/run-test/Makefile.am
+++ b/storage/ndb/test/run-test/Makefile.am
@@ -11,7 +11,10 @@ test_DATA=daily-basic-tests.txt daily-devel-tests.txt 16node-tests.txt \
conf-daily-basic-shark.txt \
conf-daily-devel-ndbmaster.txt \
conf-daily-sql-ndbmaster.txt \
- conf-daily-basic-dl145a.txt
+ conf-daily-basic-dl145a.txt \
+ conf-daily-basic-ndb08.txt \
+ conf-daily-devel-ndb08.txt \
+ conf-daily-sql-ndb08.txt
test_SCRIPTS=atrt-analyze-result.sh atrt-gather-result.sh atrt-setup.sh \
atrt-clear-result.sh make-config.sh make-index.sh make-html-reports.sh
diff --git a/storage/ndb/test/run-test/conf-daily-basic-ndb08.txt b/storage/ndb/test/run-test/conf-daily-basic-ndb08.txt
new file mode 100644
index 00000000000..bcd809593f3
--- /dev/null
+++ b/storage/ndb/test/run-test/conf-daily-basic-ndb08.txt
@@ -0,0 +1,19 @@
+baseport: 14000
+basedir: /space/autotest
+mgm: CHOOSE_host1
+ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
+api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
+-- cluster config
+[DB DEFAULT]
+NoOfReplicas: 2
+IndexMemory: 100M
+DataMemory: 300M
+BackupMemory: 64M
+MaxNoOfConcurrentScans: 100
+DataDir: .
+FileSystemPath: /space/autotest/run
+
+[MGM DEFAULT]
+PortNumber: 14000
+ArbitrationRank: 1
+DataDir: .
diff --git a/storage/ndb/test/run-test/conf-daily-devel-ndb08.txt b/storage/ndb/test/run-test/conf-daily-devel-ndb08.txt
new file mode 100644
index 00000000000..8b340e6a39d
--- /dev/null
+++ b/storage/ndb/test/run-test/conf-daily-devel-ndb08.txt
@@ -0,0 +1,19 @@
+baseport: 16000
+basedir: /space/autotest
+mgm: CHOOSE_host1
+ndb: CHOOSE_host2 CHOOSE_host3 CHOOSE_host2 CHOOSE_host3
+api: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1
+-- cluster config
+[DB DEFAULT]
+NoOfReplicas: 2
+IndexMemory: 100M
+DataMemory: 300M
+BackupMemory: 64M
+MaxNoOfConcurrentScans: 100
+DataDir: .
+FileSystemPath: /space/autotest/run
+
+[MGM DEFAULT]
+PortNumber: 16000
+ArbitrationRank: 1
+DataDir: .
diff --git a/storage/ndb/test/run-test/conf-daily-sql-ndb08.txt b/storage/ndb/test/run-test/conf-daily-sql-ndb08.txt
new file mode 100644
index 00000000000..0d6a99f8d48
--- /dev/null
+++ b/storage/ndb/test/run-test/conf-daily-sql-ndb08.txt
@@ -0,0 +1,20 @@
+baseport: 16000
+basedir: /space/autotest
+mgm: CHOOSE_host1
+ndb: CHOOSE_host2 CHOOSE_host3
+mysqld: CHOOSE_host1 CHOOSE_host4
+mysql: CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host1 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4 CHOOSE_host4
+-- cluster config
+[DB DEFAULT]
+NoOfReplicas: 2
+IndexMemory: 100M
+DataMemory: 300M
+BackupMemory: 64M
+MaxNoOfConcurrentScans: 100
+DataDir: .
+FileSystemPath: /space/autotest/run
+
+[MGM DEFAULT]
+PortNumber: 16000
+ArbitrationRank: 1
+DataDir: .
diff --git a/storage/ndb/test/run-test/daily-devel-tests.txt b/storage/ndb/test/run-test/daily-devel-tests.txt
index 20f54e031e5..67cf25a6f4d 100644
--- a/storage/ndb/test/run-test/daily-devel-tests.txt
+++ b/storage/ndb/test/run-test/daily-devel-tests.txt
@@ -208,3 +208,17 @@ max-time: 2500
cmd: test_event
args: -n EventOperationApplier
+#
+max-time: 2500
+cmd: test_event
+args: -n EventOperationApplier_NR
+
+#
+max-time: 2500
+cmd: test_event
+args: -n Multi
+
+max-time: 600
+cmd: testBasic
+args: -n PkRead T1
+
diff --git a/storage/ndb/test/sql/BANK.sql b/storage/ndb/test/sql/BANK.sql
new file mode 100644
index 00000000000..055761621e3
--- /dev/null
+++ b/storage/ndb/test/sql/BANK.sql
@@ -0,0 +1,44 @@
+CREATE DATABASE IF NOT EXISTS BANK default charset=latin1 default collate=latin1_bin;
+USE BANK;
+DROP TABLE IF EXISTS GL;
+CREATE TABLE GL ( TIME BIGINT UNSIGNED NOT NULL,
+ ACCOUNT_TYPE INT UNSIGNED NOT NULL,
+ BALANCE INT UNSIGNED NOT NULL,
+ DEPOSIT_COUNT INT UNSIGNED NOT NULL,
+ DEPOSIT_SUM INT UNSIGNED NOT NULL,
+ WITHDRAWAL_COUNT INT UNSIGNED NOT NULL,
+ WITHDRAWAL_SUM INT UNSIGNED NOT NULL,
+ PURGED INT UNSIGNED NOT NULL,
+ PRIMARY KEY USING HASH (TIME,ACCOUNT_TYPE))
+ ENGINE = NDB;
+
+DROP TABLE IF EXISTS ACCOUNT;
+CREATE TABLE ACCOUNT ( ACCOUNT_ID INT UNSIGNED NOT NULL,
+ OWNER INT UNSIGNED NOT NULL,
+ BALANCE INT UNSIGNED NOT NULL,
+ ACCOUNT_TYPE INT UNSIGNED NOT NULL,
+ PRIMARY KEY USING HASH (ACCOUNT_ID))
+ ENGINE = NDB;
+
+DROP TABLE IF EXISTS TRANSACTION;
+CREATE TABLE TRANSACTION ( TRANSACTION_ID BIGINT UNSIGNED NOT NULL,
+ ACCOUNT INT UNSIGNED NOT NULL,
+ ACCOUNT_TYPE INT UNSIGNED NOT NULL,
+ OTHER_ACCOUNT INT UNSIGNED NOT NULL,
+ TRANSACTION_TYPE INT UNSIGNED NOT NULL,
+ TIME BIGINT UNSIGNED NOT NULL,
+ AMOUNT INT UNSIGNED NOT NULL,
+ PRIMARY KEY USING HASH (TRANSACTION_ID,ACCOUNT))
+ ENGINE = NDB;
+
+DROP TABLE IF EXISTS SYSTEM_VALUES;
+CREATE TABLE SYSTEM_VALUES ( SYSTEM_VALUES_ID INT UNSIGNED NOT NULL,
+ VALUE BIGINT UNSIGNED NOT NULL,
+ PRIMARY KEY USING HASH (SYSTEM_VALUES_ID))
+ ENGINE = NDB;
+
+DROP TABLE IF EXISTS ACCOUNT_TYPE;
+CREATE TABLE ACCOUNT_TYPE ( ACCOUNT_TYPE_ID INT UNSIGNED NOT NULL,
+ DESCRIPTION CHAR(64) NOT NULL,
+ PRIMARY KEY USING HASH (ACCOUNT_TYPE_ID))
+ ENGINE = NDB;
diff --git a/storage/ndb/test/sql/T1.sql b/storage/ndb/test/sql/T1.sql
new file mode 100644
index 00000000000..754211c4eac
--- /dev/null
+++ b/storage/ndb/test/sql/T1.sql
@@ -0,0 +1,9 @@
+create database if not exists TEST_DB;
+use TEST_DB;
+drop table if exists T1;
+create table T1 (KOL1 int unsigned not null,
+ KOL2 int unsigned not null,
+ KOL3 int unsigned not null,
+ KOL4 int unsigned not null,
+ KOL5 int unsigned not null,
+ primary key using hash(KOL1)) engine=ndb;
diff --git a/storage/ndb/test/src/HugoOperations.cpp b/storage/ndb/test/src/HugoOperations.cpp
index f2e54971766..6cd40179205 100644
--- a/storage/ndb/test/src/HugoOperations.cpp
+++ b/storage/ndb/test/src/HugoOperations.cpp
@@ -31,9 +31,9 @@ int HugoOperations::startTransaction(Ndb* pNdb){
return NDBT_OK;
}
-int HugoOperations::setTransaction(NdbTransaction* new_trans){
+int HugoOperations::setTransaction(NdbTransaction* new_trans, bool not_null_ok){
- if (pTrans != NULL){
+ if (pTrans != NULL && !not_null_ok){
ndbout << "HugoOperations::startTransaction, pTrans != NULL" << endl;
return NDBT_FAILED;
}
diff --git a/storage/ndb/test/src/HugoTransactions.cpp b/storage/ndb/test/src/HugoTransactions.cpp
index 3260b921985..b8614369f2d 100644
--- a/storage/ndb/test/src/HugoTransactions.cpp
+++ b/storage/ndb/test/src/HugoTransactions.cpp
@@ -24,6 +24,7 @@ HugoTransactions::HugoTransactions(const NdbDictionary::Table& _tab,
row(_tab){
m_defaultScanUpdateMethod = 3;
+ setRetryMax();
}
HugoTransactions::~HugoTransactions(){
@@ -40,13 +41,12 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
{
int retryAttempt = 0;
- const int retryMax = 100;
int check, a;
NdbScanOperation *pOp;
while (true){
- if (retryAttempt >= retryMax){
+ if (retryAttempt >= m_retryMax){
g_err << "ERROR: has retried this operation " << retryAttempt
<< " times, failing!" << endl;
return NDBT_FAILED;
@@ -192,13 +192,12 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
{
int retryAttempt = 0;
- const int retryMax = 100;
int check, a;
NdbIndexScanOperation *pOp;
while (true){
- if (retryAttempt >= retryMax){
+ if (retryAttempt >= m_retryMax){
g_err << "ERROR: has retried this operation " << retryAttempt
<< " times, failing!" << endl;
return NDBT_FAILED;
@@ -378,14 +377,13 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb,
int abortPercent,
int parallelism){
int retryAttempt = 0;
- const int retryMax = 100;
int check, a;
NdbScanOperation *pOp;
while (true){
restart:
- if (retryAttempt++ >= retryMax){
+ if (retryAttempt++ >= m_retryMax){
g_info << "ERROR: has retried this operation " << retryAttempt
<< " times, failing!" << endl;
return NDBT_FAILED;
@@ -478,6 +476,8 @@ restart:
if(check != -1){
check = pTrans->execute(Commit);
+ if(check != -1)
+ m_latest_gci = pTrans->getGCI();
pTrans->restart();
}
@@ -585,6 +585,8 @@ HugoTransactions::loadTable(Ndb* pNdb,
// closeTrans = true;
closeTrans = false;
check = pTrans->execute( Commit );
+ if(check != -1)
+ m_latest_gci = pTrans->getGCI();
pTrans->restart();
} else {
closeTrans = false;
@@ -757,6 +759,7 @@ HugoTransactions::fillTable(Ndb* pNdb,
}
}
else{
+ m_latest_gci = pTrans->getGCI();
closeTransaction(pNdb);
}
@@ -768,285 +771,6 @@ HugoTransactions::fillTable(Ndb* pNdb,
}
int
-HugoTransactions::createEvent(Ndb* pNdb){
-
- char eventName[1024];
- sprintf(eventName,"%s_EVENT",tab.getName());
-
- NdbDictionary::Dictionary *myDict = pNdb->getDictionary();
-
- if (!myDict) {
- g_err << "Dictionary not found "
- << pNdb->getNdbError().code << " "
- << pNdb->getNdbError().message << endl;
- return NDBT_FAILED;
- }
-
- NdbDictionary::Event myEvent(eventName);
- myEvent.setTable(tab.getName());
- myEvent.addTableEvent(NdbDictionary::Event::TE_ALL);
- // myEvent.addTableEvent(NdbDictionary::Event::TE_INSERT);
- // myEvent.addTableEvent(NdbDictionary::Event::TE_UPDATE);
- // myEvent.addTableEvent(NdbDictionary::Event::TE_DELETE);
-
- // const NdbDictionary::Table *_table = myDict->getTable(tab.getName());
- for(int a = 0; a < tab.getNoOfColumns(); a++){
- // myEvent.addEventColumn(_table->getColumn(a)->getName());
- myEvent.addEventColumn(a);
- }
-
- int res = myDict->createEvent(myEvent); // Add event to database
-
- if (res == 0)
- myEvent.print();
- else if (myDict->getNdbError().classification ==
- NdbError::SchemaObjectExists)
- {
- g_info << "Event creation failed event exists\n";
- res = myDict->dropEvent(eventName);
- if (res) {
- g_err << "Failed to drop event: "
- << myDict->getNdbError().code << " : "
- << myDict->getNdbError().message << endl;
- return NDBT_FAILED;
- }
- // try again
- res = myDict->createEvent(myEvent); // Add event to database
- if (res) {
- g_err << "Failed to create event (1): "
- << myDict->getNdbError().code << " : "
- << myDict->getNdbError().message << endl;
- return NDBT_FAILED;
- }
- }
- else
- {
- g_err << "Failed to create event (2): "
- << myDict->getNdbError().code << " : "
- << myDict->getNdbError().message << endl;
- return NDBT_FAILED;
- }
-
- return NDBT_OK;
-}
-
-#include <NdbEventOperation.hpp>
-#include "TestNdbEventOperation.hpp"
-#include <NdbAutoPtr.hpp>
-
-struct receivedEvent {
- Uint32 pk;
- Uint32 count;
- Uint32 event;
-};
-
-int XXXXX = 0;
-
-int
-HugoTransactions::eventOperation(Ndb* pNdb, void* pstats,
- int records) {
- int myXXXXX = XXXXX++;
- Uint32 i;
- const char function[] = "HugoTransactions::eventOperation: ";
- struct receivedEvent* recInsertEvent;
- NdbAutoObjArrayPtr<struct receivedEvent>
- p00( recInsertEvent = new struct receivedEvent[3*records] );
- struct receivedEvent* recUpdateEvent = &recInsertEvent[records];
- struct receivedEvent* recDeleteEvent = &recInsertEvent[2*records];
-
- EventOperationStats &stats = *(EventOperationStats*)pstats;
-
- stats.n_inserts = 0;
- stats.n_deletes = 0;
- stats.n_updates = 0;
- stats.n_consecutive = 0;
- stats.n_duplicates = 0;
- stats.n_inconsistent_gcis = 0;
-
- for (i = 0; i < records; i++) {
- recInsertEvent[i].pk = 0xFFFFFFFF;
- recInsertEvent[i].count = 0;
- recInsertEvent[i].event = 0xFFFFFFFF;
-
- recUpdateEvent[i].pk = 0xFFFFFFFF;
- recUpdateEvent[i].count = 0;
- recUpdateEvent[i].event = 0xFFFFFFFF;
-
- recDeleteEvent[i].pk = 0xFFFFFFFF;
- recDeleteEvent[i].count = 0;
- recDeleteEvent[i].event = 0xFFFFFFFF;
- }
-
- NdbDictionary::Dictionary *myDict = pNdb->getDictionary();
-
- if (!myDict) {
- g_err << function << "Event Creation failedDictionary not found\n";
- return NDBT_FAILED;
- }
-
- int r = 0;
- NdbEventOperation *pOp;
-
- char eventName[1024];
- sprintf(eventName,"%s_EVENT",tab.getName());
- int noEventColumnName = tab.getNoOfColumns();
-
- g_info << function << "create EventOperation\n";
- pOp = pNdb->createEventOperation(eventName, 100);
- if ( pOp == NULL ) {
- g_err << function << "Event operation creation failed\n";
- return NDBT_FAILED;
- }
-
- g_info << function << "get values\n";
- NdbRecAttr* recAttr[1024];
- NdbRecAttr* recAttrPre[1024];
-
- const NdbDictionary::Table *_table = myDict->getTable(tab.getName());
-
- for (int a = 0; a < noEventColumnName; a++) {
- recAttr[a] = pOp->getValue(_table->getColumn(a)->getName());
- recAttrPre[a] = pOp->getPreValue(_table->getColumn(a)->getName());
- }
-
- // set up the callbacks
- g_info << function << "execute\n";
- if (pOp->execute()) { // This starts changes to "start flowing"
- g_err << function << "operation execution failed: \n";
- g_err << pOp->getNdbError().code << " "
- << pOp->getNdbError().message << endl;
- return NDBT_FAILED;
- }
-
- g_info << function << "ok\n";
-
- int count = 0;
- Uint32 last_inconsitant_gci = 0xEFFFFFF0;
-
- while (r < records){
- //printf("now waiting for event...\n");
- int res = pNdb->pollEvents(1000); // wait for event or 1000 ms
-
- if (res > 0) {
- //printf("got data! %d\n", r);
- int overrun;
- while (pOp->next(&overrun) > 0) {
- r++;
- r += overrun;
- count++;
-
- Uint32 gci = pOp->getGCI();
- Uint32 pk = recAttr[0]->u_32_value();
-
- if (!pOp->isConsistent()) {
- if (last_inconsitant_gci != gci) {
- last_inconsitant_gci = gci;
- stats.n_inconsistent_gcis++;
- }
- g_warning << "A node failure has occured and events might be missing\n";
- }
- g_info << function << "GCI " << gci << ": " << count;
- struct receivedEvent* recEvent;
- switch (pOp->getEventType()) {
- case NdbDictionary::Event::TE_INSERT:
- stats.n_inserts++;
- g_info << " INSERT: ";
- recEvent = recInsertEvent;
- break;
- case NdbDictionary::Event::TE_DELETE:
- stats.n_deletes++;
- g_info << " DELETE: ";
- recEvent = recDeleteEvent;
- break;
- case NdbDictionary::Event::TE_UPDATE:
- stats.n_updates++;
- g_info << " UPDATE: ";
- recEvent = recUpdateEvent;
- break;
- case NdbDictionary::Event::TE_ALL:
- abort();
- }
-
- if ((int)pk < records) {
- recEvent[pk].pk = pk;
- recEvent[pk].count++;
- }
-
- g_info << "overrun " << overrun << " pk " << pk;
- for (i = 1; i < noEventColumnName; i++) {
- if (recAttr[i]->isNULL() >= 0) { // we have a value
- g_info << " post[" << i << "]=";
- if (recAttr[i]->isNULL() == 0) // we have a non-null value
- g_info << recAttr[i]->u_32_value();
- else // we have a null value
- g_info << "NULL";
- }
- if (recAttrPre[i]->isNULL() >= 0) { // we have a value
- g_info << " pre[" << i << "]=";
- if (recAttrPre[i]->isNULL() == 0) // we have a non-null value
- g_info << recAttrPre[i]->u_32_value();
- else // we have a null value
- g_info << "NULL";
- }
- }
- g_info << endl;
- }
- } else
- ;//printf("timed out\n");
- }
-
- // sleep ((XXXXX-myXXXXX)*2);
-
- g_info << myXXXXX << "dropping event operation" << endl;
-
- int res = pNdb->dropEventOperation(pOp);
- if (res != 0) {
- g_err << "operation execution failed\n";
- return NDBT_FAILED;
- }
-
- g_info << myXXXXX << " ok" << endl;
-
- if (stats.n_inserts > 0) {
- stats.n_consecutive++;
- }
- if (stats.n_deletes > 0) {
- stats.n_consecutive++;
- }
- if (stats.n_updates > 0) {
- stats.n_consecutive++;
- }
- for (i = 0; i < (Uint32)records/3; i++) {
- if (recInsertEvent[i].pk != i) {
- stats.n_consecutive ++;
- ndbout << "missing insert pk " << i << endl;
- } else if (recInsertEvent[i].count > 1) {
- ndbout << "duplicates insert pk " << i
- << " count " << recInsertEvent[i].count << endl;
- stats.n_duplicates += recInsertEvent[i].count-1;
- }
- if (recUpdateEvent[i].pk != i) {
- stats.n_consecutive ++;
- ndbout << "missing update pk " << i << endl;
- } else if (recUpdateEvent[i].count > 1) {
- ndbout << "duplicates update pk " << i
- << " count " << recUpdateEvent[i].count << endl;
- stats.n_duplicates += recUpdateEvent[i].count-1;
- }
- if (recDeleteEvent[i].pk != i) {
- stats.n_consecutive ++;
- ndbout << "missing delete pk " << i << endl;
- } else if (recDeleteEvent[i].count > 1) {
- ndbout << "duplicates delete pk " << i
- << " count " << recDeleteEvent[i].count << endl;
- stats.n_duplicates += recDeleteEvent[i].count-1;
- }
- }
-
- return NDBT_OK;
-}
-
-int
HugoTransactions::pkReadRecords(Ndb* pNdb,
int records,
int batch,
@@ -1054,7 +778,6 @@ HugoTransactions::pkReadRecords(Ndb* pNdb,
int reads = 0;
int r = 0;
int retryAttempt = 0;
- const int retryMax = 100;
int check, a;
if (batch == 0) {
@@ -1066,7 +789,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb,
if(r + batch > records)
batch = records - r;
- if (retryAttempt >= retryMax){
+ if (retryAttempt >= m_retryMax){
g_info << "ERROR: has retried this operation " << retryAttempt
<< " times, failing!" << endl;
return NDBT_FAILED;
@@ -1116,6 +839,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb,
return NDBT_FAILED;
}
} else {
+
if(pIndexScanOp)
{
int rows_found = 0;
@@ -1173,7 +897,6 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
int updated = 0;
int r = 0;
int retryAttempt = 0;
- const int retryMax = 100;
int check, a, b;
NdbOperation *pOp;
@@ -1184,7 +907,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
if(r + batch > records)
batch = records - r;
- if (retryAttempt >= retryMax){
+ if (retryAttempt >= m_retryMax){
g_info << "ERROR: has retried this operation " << retryAttempt
<< " times, failing!" << endl;
return NDBT_FAILED;
@@ -1302,6 +1025,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
}
else{
updated += batch;
+ m_latest_gci = pTrans->getGCI();
}
closeTransaction(pNdb);
@@ -1321,12 +1045,11 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb,
int updated = 0;
int r = 0;
int retryAttempt = 0;
- const int retryMax = 100;
int check, a;
while (r < records){
- if (retryAttempt >= retryMax){
+ if (retryAttempt >= m_retryMax){
g_info << "ERROR: has retried this operation " << retryAttempt
<< " times, failing!" << endl;
return NDBT_FAILED;
@@ -1476,8 +1199,9 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb,
}
else{
updated++;
+ m_latest_gci = pTrans->getGCI();
}
-
+
closeTransaction(pNdb);
@@ -1499,7 +1223,6 @@ HugoTransactions::pkDelRecords(Ndb* pNdb,
int deleted = 0;
int r = 0;
int retryAttempt = 0;
- const int retryMax = 100;
int check, a;
NdbOperation *pOp;
@@ -1508,7 +1231,7 @@ HugoTransactions::pkDelRecords(Ndb* pNdb,
if(r + batch > records)
batch = records - r;
- if (retryAttempt >= retryMax){
+ if (retryAttempt >= m_retryMax){
g_info << "ERROR: has retried this operation " << retryAttempt
<< " times, failing!" << endl;
return NDBT_FAILED;
@@ -1576,6 +1299,7 @@ HugoTransactions::pkDelRecords(Ndb* pNdb,
}
else {
deleted += batch;
+ m_latest_gci = pTrans->getGCI();
}
closeTransaction(pNdb);
@@ -1598,7 +1322,6 @@ HugoTransactions::lockRecords(Ndb* pNdb,
// and lock som other records
int r = 0;
int retryAttempt = 0;
- const int retryMax = 100;
int check, a, b;
NdbOperation *pOp;
NdbOperation::LockMode lm = NdbOperation::LM_Exclusive;
@@ -1619,7 +1342,7 @@ HugoTransactions::lockRecords(Ndb* pNdb,
g_info << "|- Locking " << lockBatch << " records..." << endl;
- if (retryAttempt >= retryMax){
+ if (retryAttempt >= m_retryMax){
g_info << "ERROR: has retried this operation " << retryAttempt
<< " times, failing!" << endl;
return NDBT_FAILED;
@@ -1718,7 +1441,6 @@ HugoTransactions::indexReadRecords(Ndb* pNdb,
int reads = 0;
int r = 0;
int retryAttempt = 0;
- const int retryMax = 100;
int check, a;
NdbOperation *pOp;
NdbIndexScanOperation *sOp;
@@ -1741,7 +1463,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb,
allocRows(batch);
while (r < records){
- if (retryAttempt >= retryMax){
+ if (retryAttempt >= m_retryMax){
g_info << "ERROR: has retried this operation " << retryAttempt
<< " times, failing!" << endl;
return NDBT_FAILED;
@@ -1865,7 +1587,6 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
int updated = 0;
int r = 0;
int retryAttempt = 0;
- const int retryMax = 100;
int check, a, b;
NdbOperation *pOp;
NdbScanOperation * sOp;
@@ -1881,7 +1602,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
allocRows(batch);
while (r < records){
- if (retryAttempt >= retryMax){
+ if (retryAttempt >= m_retryMax){
g_info << "ERROR: has retried this operation " << retryAttempt
<< " times, failing!" << endl;
return NDBT_FAILED;
@@ -2037,6 +1758,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
return NDBT_FAILED;
} else {
updated += batch;
+ m_latest_gci = pTrans->getGCI();
}
closeTransaction(pNdb);
diff --git a/storage/ndb/test/src/NDBT_ResultRow.cpp b/storage/ndb/test/src/NDBT_ResultRow.cpp
index ab8d7b07ea1..09e6b8a383c 100644
--- a/storage/ndb/test/src/NDBT_ResultRow.cpp
+++ b/storage/ndb/test/src/NDBT_ResultRow.cpp
@@ -115,7 +115,8 @@ BaseString NDBT_ResultRow::c_str() const {
}
NdbOut &
-operator << (NdbOut& ndbout, const NDBT_ResultRow & res) {
+operator << (NdbOut& ndbout, const NDBT_ResultRow & res)
+{
if (res.cols != 0)
{
ndbout << *(res.data[0]);
diff --git a/storage/ndb/test/src/NDBT_Tables.cpp b/storage/ndb/test/src/NDBT_Tables.cpp
index 5a5fecd85c1..ce4c49b3c75 100644
--- a/storage/ndb/test/src/NDBT_Tables.cpp
+++ b/storage/ndb/test/src/NDBT_Tables.cpp
@@ -703,7 +703,7 @@ static
const
NDBT_Attribute ACCOUNT_TYPES_Attribs[] = {
NDBT_Attribute("ACCOUNT_TYPE_ID", NdbDictionary::Column::Unsigned, 1, true),
- NDBT_Attribute("DESCRIPTION", NdbDictionary::Column::Char, 64)
+ NDBT_Attribute("DESCRIPTION", NdbDictionary::Column::Char, 64, false, false, &my_charset_latin1_bin)
};
static
diff --git a/storage/ndb/test/src/NDBT_Test.cpp b/storage/ndb/test/src/NDBT_Test.cpp
index 8fecf56531f..4cd9df8e538 100644
--- a/storage/ndb/test/src/NDBT_Test.cpp
+++ b/storage/ndb/test/src/NDBT_Test.cpp
@@ -15,6 +15,8 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <ndb_global.h>
+//#define NDB_OPTS_SKIP_USAGE
+#include <ndb_opts.h>
#include <my_pthread.h>
#include "NDBT.hpp"
@@ -22,7 +24,6 @@
#include <PortDefs.h>
-#include <getarg.h>
#include <time.h>
// No verbose outxput
@@ -71,6 +72,18 @@ NDBT_TestCase* NDBT_Context::getCase(){
return testcase;
}
+const char* NDBT_Context::getTableName(int n) const
+{
+ assert(suite != NULL);
+ return suite->m_tables_in_test[n].c_str();
+}
+
+int NDBT_Context::getNumTables() const
+{
+ assert(suite != NULL);
+ return suite->m_tables_in_test.size();
+}
+
int NDBT_Context::getNumRecords() const{
return records;
}
@@ -348,6 +361,9 @@ NDBT_TestCase::NDBT_TestCase(NDBT_TestSuite* psuite,
name= _name.c_str();
comment= _comment.c_str();
assert(suite != NULL);
+
+ m_all_tables = false;
+ m_has_run = false;
}
NDBT_TestCaseImpl1::NDBT_TestCaseImpl1(NDBT_TestSuite* psuite,
@@ -735,6 +751,7 @@ NDBT_TestSuite::NDBT_TestSuite(const char* pname):name(pname){
records = 0;
loops = 0;
createTable = true;
+ createAllTables = false;
}
@@ -749,6 +766,10 @@ void NDBT_TestSuite::setCreateTable(bool _flag){
createTable = _flag;
}
+void NDBT_TestSuite::setCreateAllTables(bool _flag){
+ createAllTables = _flag;
+}
+
bool NDBT_TestSuite::timerIsOn(){
return (timer != 0);
}
@@ -823,13 +844,17 @@ void NDBT_TestSuite::execute(Ndb_cluster_connection& con,
const char* _testname){
int result;
-
for (unsigned t = 0; t < tests.size(); t++){
if (_testname != NULL &&
strcasecmp(tests[t]->getName(), _testname) != 0)
continue;
+ if (tests[t]->m_all_tables && tests[t]->m_has_run)
+ {
+ continue;
+ }
+
if (tests[t]->isVerify(pTab) == false) {
continue;
}
@@ -877,10 +902,12 @@ void NDBT_TestSuite::execute(Ndb_cluster_connection& con,
numTestsOk++;
numTestsExecuted++;
- if (result == NDBT_OK && createTable == true){
+ if (result == NDBT_OK && createTable == true && createAllTables == false){
pDict->dropTable(pTab->getName());
}
+ tests[t]->m_has_run = true;
+
delete ctx;
}
}
@@ -946,6 +973,67 @@ int NDBT_TestSuite::reportAllTables(const char* _testname){
return result;
}
+enum test_options {
+ OPT_INTERACTIVE = NDB_STD_OPTIONS_LAST,
+ OPT_PRINT,
+ OPT_PRINT_HTML,
+ OPT_PRINT_CASES
+
+};
+NDB_STD_OPTS_VARS;
+
+static int opt_print = false;
+static int opt_print_html = false;
+static int opt_print_cases = false;
+static int opt_records;
+static int opt_loops;
+static int opt_timer;
+static char * opt_remote_mgm = NULL;
+static char * opt_testname = NULL;
+static int opt_verbose;
+
+static struct my_option my_long_options[] =
+{
+ NDB_STD_OPTS(""),
+ { "print", OPT_PRINT, "Print execution tree",
+ (gptr*) &opt_print, (gptr*) &opt_print, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
+ { "print_html", OPT_PRINT_HTML, "Print execution tree in html table format",
+ (gptr*) &opt_print_html, (gptr*) &opt_print_html, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
+ { "print_cases", OPT_PRINT_CASES, "Print list of test cases",
+ (gptr*) &opt_print_cases, (gptr*) &opt_print_cases, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
+ { "records", 'r', "Number of records",
+ (gptr*) &opt_records, (gptr*) &opt_records, 0,
+ GET_INT, REQUIRED_ARG, 1000, 0, 0, 0, 0, 0 },
+ { "loops", 'l', "Number of loops",
+ (gptr*) &opt_loops, (gptr*) &opt_loops, 0,
+ GET_INT, REQUIRED_ARG, 5, 0, 0, 0, 0, 0 },
+ { "testname", 'n', "Name of test to run",
+ (gptr*) &opt_testname, (gptr*) &opt_testname, 0,
+ GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
+ { "remote_mgm", 'm',
+ "host:port to mgmsrv of remote cluster",
+ (gptr*) &opt_remote_mgm, (gptr*) &opt_remote_mgm, 0,
+ GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
+ { "timer", 't', "Print execution time",
+ (gptr*) &opt_timer, (gptr*) &opt_timer, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
+ { "verbose", 'v', "Print verbose status",
+ (gptr*) &opt_verbose, (gptr*) &opt_verbose, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
+};
+
+static void usage()
+{
+ ndb_std_print_version();
+ printf("Usage: %s [OPTIONS] [tabname1 tabname2 ... tabnameN]\n", my_progname);
+ my_print_help(my_long_options);
+ my_print_variables(my_long_options);
+}
+
int NDBT_TestSuite::execute(int argc, const char** argv){
int res = NDBT_FAILED;
/* Arguments:
@@ -967,78 +1055,50 @@ int NDBT_TestSuite::execute(int argc, const char** argv){
a complete test suite without any greater knowledge of what
should be tested ie. keep arguments at a minimum
*/
- int _records = 1000;
- int _loops = 5;
- int _timer = 0;
- char * _remote_mgm =NULL;
- char* _testname = NULL;
- const char* _tabname = NULL;
- int _print = false;
- int _print_html = false;
-
- int _print_cases = false;
- int _verbose = false;
-#ifndef DBUG_OFF
- const char *debug_option= 0;
-#endif
- struct getargs args[] = {
- { "print", '\0', arg_flag, &_print, "Print execution tree", "" },
- { "print_html", '\0', arg_flag, &_print_html, "Print execution tree in html table format", "" },
- { "print_cases", '\0', arg_flag, &_print_cases, "Print list of test cases", "" },
- { "records", 'r', arg_integer, &_records, "Number of records", "records" },
- { "loops", 'l', arg_integer, &_loops, "Number of loops", "loops" },
- { "testname", 'n', arg_string, &_testname, "Name of test to run", "testname" },
- { "remote_mgm", 'm', arg_string, &_remote_mgm,
- "host:port to mgmsrv of remote cluster", "host:port" },
- { "timer", 't', arg_flag, &_timer, "Print execution time", "time" },
-#ifndef DBUG_OFF
- { "debug", 0, arg_string, &debug_option,
- "Specify debug options e.g. d:t:i:o,out.trace", "options" },
-#endif
- { "verbose", 'v', arg_flag, &_verbose, "Print verbose status", "verbose" }
- };
- int num_args = sizeof(args) / sizeof(args[0]);
- int optind = 0;
-
- if(getarg(args, num_args, argc, argv, &optind)) {
- arg_printusage(args, num_args, argv[0], "tabname1 tabname2 ... tabnameN\n");
- return NDBT_WRONGARGS;
- }
+ char **_argv= (char **)argv;
+
+ if (!my_progname)
+ my_progname= _argv[0];
+
+ const char *load_default_groups[]= { "mysql_cluster",0 };
+ load_defaults("my",load_default_groups,&argc,&_argv);
+ int ho_error;
#ifndef DBUG_OFF
- if (debug_option)
- DBUG_PUSH(debug_option);
+ opt_debug= "d:t:i:F:L";
#endif
+ if ((ho_error=handle_options(&argc, &_argv, my_long_options,
+ ndb_std_get_one_option)))
+ {
+ usage();
+ return NDBT_ProgramExit(NDBT_WRONGARGS);
+ }
- // Check if table name is supplied
- if (argv[optind] != NULL)
- _tabname = argv[optind];
-
- if (_print == true){
+ if (opt_print == true){
printExecutionTree();
return 0;
}
- if (_print_html == true){
+ if (opt_print_html == true){
printExecutionTreeHTML();
return 0;
}
- if (_print_cases == true){
+ if (opt_print_cases == true){
printCases();
- return NDBT_ProgramExit(NDBT_FAILED);
+ return 0;
}
- if (_verbose)
+ if (opt_verbose)
setOutputLevel(2); // Show g_info
else
- setOutputLevel(0); // Show only g_err ?
+ setOutputLevel(0); // Show only g_err ?
- remote_mgm = _remote_mgm;
- records = _records;
- loops = _loops;
- timer = _timer;
+ remote_mgm = opt_remote_mgm;
+ records = opt_records;
+ loops = opt_loops;
+ timer = opt_timer;
Ndb_cluster_connection con;
if(con.connect(12, 5, 1))
@@ -1046,18 +1106,72 @@ int NDBT_TestSuite::execute(int argc, const char** argv){
return NDBT_ProgramExit(NDBT_FAILED);
}
- if(optind == argc){
+ {
+ Ndb ndb(&con, "TEST_DB");
+ ndb.init(1024);
+ if (ndb.waitUntilReady(500)){
+ g_err << "Ndb was not ready" << endl;
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ NdbDictionary::Dictionary* pDict = ndb.getDictionary();
+
+ int num_tables= argc;
+ if (argc == 0)
+ num_tables = NDBT_Tables::getNumTables();
+
+ for(int i = 0; i<num_tables; i++)
+ {
+ if (argc == 0)
+ m_tables_in_test.push_back(NDBT_Tables::getTable(i)->getName());
+ else
+ m_tables_in_test.push_back(_argv[i]);
+ if (createAllTables == true)
+ {
+ const char *tab_name= m_tables_in_test[i].c_str();
+ const NdbDictionary::Table* pTab = pDict->getTable(tab_name);
+ if (pTab && pDict->dropTable(tab_name) != 0)
+ {
+ g_err << "ERROR0: Failed to drop table " << tab_name
+ << pDict->getNdbError() << endl;
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ if(NDBT_Tables::createTable(&ndb, tab_name) != 0)
+ {
+ g_err << "ERROR1: Failed to create table " << tab_name
+ << pDict->getNdbError() << endl;
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ }
+ }
+ }
+
+ if(argc == 0){
// No table specified
- res = executeAll(con, _testname);
+ res = executeAll(con, opt_testname);
} else {
testSuiteTimer.doStart();
- for(int i = optind; i<argc; i++){
- executeOne(con, argv[i], _testname);
+ for(int i = 0; i<argc; i++){
+ executeOne(con, _argv[i], opt_testname);
}
testSuiteTimer.doStop();
- res = report(_testname);
+ res = report(opt_testname);
}
-
+
+ if (res == NDBT_OK && createAllTables == true)
+ {
+ Ndb ndb(&con, "TEST_DB");
+ ndb.init(1024);
+ if (ndb.waitUntilReady(500)){
+ g_err << "Ndb was not ready" << endl;
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ NdbDictionary::Dictionary* pDict = ndb.getDictionary();
+ for(unsigned i = 0; i<m_tables_in_test.size(); i++)
+ {
+ pDict->dropTable(m_tables_in_test[i].c_str());
+ }
+ }
+
return NDBT_ProgramExit(res);
}
diff --git a/storage/ndb/test/src/UtilTransactions.cpp b/storage/ndb/test/src/UtilTransactions.cpp
index 31c323045ed..eabd22f3a17 100644
--- a/storage/ndb/test/src/UtilTransactions.cpp
+++ b/storage/ndb/test/src/UtilTransactions.cpp
@@ -1318,7 +1318,7 @@ UtilTransactions::compare(Ndb* pNdb, const char* tab_name2, int flags){
NdbError err;
- int return_code= -1, row_count= 0;
+ int return_code= 0, row_count= 0;
int retryAttempt = 0, retryMax = 10;
HugoCalculator calc(tab);
@@ -1336,9 +1336,9 @@ UtilTransactions::compare(Ndb* pNdb, const char* tab_name2, int flags){
UtilTransactions count(tab2);
while (true){
-
+loop:
if (retryAttempt++ >= retryMax){
- g_info << "ERROR: has retried this operation " << retryAttempt
+ g_err << "ERROR: compare has retried this operation " << retryAttempt
<< " times, failing!" << endl;
return -1;
}
@@ -1409,8 +1409,7 @@ UtilTransactions::compare(Ndb* pNdb, const char* tab_name2, int flags){
g_err << "COMPARE FAILED" << endl;
g_err << row << endl;
g_err << cmp.get_row(0) << endl;
- return_code= 1;
- goto close;
+ return_code++;
}
retryAttempt= 0;
cmp.closeTransaction(pNdb);
@@ -1434,12 +1433,14 @@ UtilTransactions::compare(Ndb* pNdb, const char* tab_name2, int flags){
return -1;
}
- g_info << row_count2 << " rows in tab_name2" << endl;
- return (row_count == row_count2 ? 0 : 1);
+ g_info << row_count2 << " rows in tab_name2 - failed " << return_code
+ << endl;
+ return (row_count == row_count2 ? return_code : 1);
}
error:
if(err.status == NdbError::TemporaryError)
{
+ g_err << err << endl;
NdbSleep_MilliSleep(50);
if(pTrans != 0)
{
@@ -1448,8 +1449,12 @@ error:
}
if(cmp.getTransaction())
cmp.closeTransaction(pNdb);
- continue;
+
+ goto loop;
}
+ g_err << "ERROR" << endl;
+ g_err << err << endl;
+
break;
}
diff --git a/storage/ndb/test/tools/Makefile.am b/storage/ndb/test/tools/Makefile.am
index a7ab7669052..30ba5b71d75 100644
--- a/storage/ndb/test/tools/Makefile.am
+++ b/storage/ndb/test/tools/Makefile.am
@@ -1,5 +1,5 @@
-ndbtest_PROGRAMS = hugoLoad hugoFill hugoLockRecords hugoPkDelete hugoPkRead hugoPkReadRecord hugoPkUpdate hugoScanRead hugoScanUpdate restart verify_index copy_tab create_index ndb_cpcc
+ndbtest_PROGRAMS = hugoLoad hugoFill hugoLockRecords hugoPkDelete hugoPkRead hugoPkReadRecord hugoPkUpdate hugoScanRead hugoScanUpdate restart verify_index copy_tab create_index ndb_cpcc listen_event
# transproxy
@@ -18,6 +18,7 @@ verify_index_SOURCES = verify_index.cpp
copy_tab_SOURCES = copy_tab.cpp
create_index_SOURCES = create_index.cpp
ndb_cpcc_SOURCES = cpcc.cpp
+listen_event_SOURCES = listen.cpp
include $(top_srcdir)/storage/ndb/config/common.mk.am
include $(top_srcdir)/storage/ndb/config/type_ndbapitest.mk.am
diff --git a/storage/ndb/test/tools/listen.cpp b/storage/ndb/test/tools/listen.cpp
new file mode 100644
index 00000000000..05edb012fdb
--- /dev/null
+++ b/storage/ndb/test/tools/listen.cpp
@@ -0,0 +1,169 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+#include <NdbOut.hpp>
+#include <NdbApi.hpp>
+#include <NdbSleep.h>
+#include <NDBT.hpp>
+#include <HugoTransactions.hpp>
+#include <getarg.h>
+
+
+int
+main(int argc, const char** argv){
+ ndb_init();
+
+
+ int _help = 0;
+ const char* db = 0;
+
+ struct getargs args[] = {
+ { "database", 'd', arg_string, &db, "Database", "" },
+ { "usage", '?', arg_flag, &_help, "Print help", "" }
+ };
+ int num_args = sizeof(args) / sizeof(args[0]);
+ int optind = 0, i;
+ char desc[] =
+ "<tabname>+ \nThis program listen to events on specified tables\n";
+
+ if(getarg(args, num_args, argc, argv, &optind) ||
+ argv[optind] == NULL || _help) {
+ arg_printusage(args, num_args, argv[0], desc);
+ return NDBT_ProgramExit(NDBT_WRONGARGS);
+ }
+
+ // Connect to Ndb
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ Ndb MyNdb( &con, db ? db : "TEST_DB" );
+
+ if(MyNdb.init() != 0){
+ ERR(MyNdb.getNdbError());
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+
+ // Connect to Ndb and wait for it to become ready
+ while(MyNdb.waitUntilReady() != 0)
+ ndbout << "Waiting for ndb to become ready..." << endl;
+
+ int result = 0;
+ Uint64 last_gci= 0, cnt= 0;
+
+ NdbDictionary::Dictionary *myDict = MyNdb.getDictionary();
+ Vector<NdbDictionary::Event*> events;
+ Vector<NdbEventOperation*> event_ops;
+ for(i= optind; i<argc; i++)
+ {
+ const NdbDictionary::Table* table= myDict->getTable(argv[i]);
+ if(!table)
+ {
+ ndbout_c("Could not find table: %s, skipping", argv[i]);
+ continue;
+ }
+
+ BaseString name;
+ name.appfmt("EV-%s", argv[i]);
+ NdbDictionary::Event *myEvent= new NdbDictionary::Event(name.c_str());
+ myEvent->setTable(table->getName());
+ myEvent->addTableEvent(NdbDictionary::Event::TE_ALL);
+ for(int a = 0; a < table->getNoOfColumns(); a++){
+ myEvent->addEventColumn(a);
+ }
+
+ if (myDict->createEvent(* myEvent))
+ {
+ if(myDict->getNdbError().classification == NdbError::SchemaObjectExists)
+ {
+ g_info << "Event creation failed event exists\n";
+ if (myDict->dropEvent(name.c_str()))
+ {
+ g_err << "Failed to drop event: " << myDict->getNdbError() << endl;
+ result = 1;
+ goto end;
+ }
+ // try again
+ if (myDict->createEvent(* myEvent))
+ {
+ g_err << "Failed to create event: " << myDict->getNdbError() << endl;
+ result = 1;
+ goto end;
+ }
+ }
+ else
+ {
+ g_err << "Failed to create event: " << myDict->getNdbError() << endl;
+ result = 1;
+ goto end;
+ }
+ }
+
+ events.push_back(myEvent);
+
+ NdbEventOperation* pOp = MyNdb.createEventOperation(name.c_str());
+ if ( pOp == NULL ) {
+ g_err << "Event operation creation failed" << endl;
+ result = 1;
+ goto end;
+ }
+
+ for (int a = 0; a < table->getNoOfColumns(); a++)
+ {
+ pOp->getValue(table->getColumn(a)->getName());
+ pOp->getPreValue(table->getColumn(a)->getName());
+ }
+ event_ops.push_back(pOp);
+ }
+
+ for(i= 0; i<(int)event_ops.size(); i++)
+ {
+ if (event_ops[i]->execute())
+ {
+ g_err << "operation execution failed: " << event_ops[i]->getNdbError()
+ << endl;
+ result = 1;
+ goto end;
+ }
+ }
+
+ while(true)
+ {
+ while(MyNdb.pollEvents(100) == 0);
+
+ NdbEventOperation* pOp;
+ while((pOp= MyNdb.nextEvent()) != 0)
+ {
+ if(pOp->getGCI() != last_gci)
+ {
+ if(cnt) ndbout_c("GCI: %lld events: %lld", last_gci, cnt);
+ cnt= 1;
+ last_gci= pOp->getGCI();
+ }
+ else
+ {
+ cnt++;
+ }
+ }
+ }
+end:
+ return NDBT_ProgramExit(NDBT_OK);
+}
+
+template class Vector<NdbDictionary::Event*>;
+template class Vector<NdbEventOperation*>;