summaryrefslogtreecommitdiff
path: root/ndb/src
diff options
context:
space:
mode:
Diffstat (limited to 'ndb/src')
-rw-r--r--ndb/src/common/debugger/DebuggerNames.cpp19
-rw-r--r--ndb/src/common/debugger/EventLogger.cpp1437
-rw-r--r--ndb/src/common/debugger/Makefile.am2
-rw-r--r--ndb/src/common/debugger/signaldata/ContinueB.cpp4
-rw-r--r--ndb/src/common/debugger/signaldata/CopyGCI.cpp12
-rw-r--r--ndb/src/common/debugger/signaldata/CreateTrig.cpp28
-rw-r--r--ndb/src/common/debugger/signaldata/DihContinueB.cpp5
-rw-r--r--ndb/src/common/debugger/signaldata/Makefile.am3
-rw-r--r--ndb/src/common/debugger/signaldata/MasterLCP.cpp8
-rw-r--r--ndb/src/common/debugger/signaldata/NdbfsContinueB.cpp5
-rw-r--r--ndb/src/common/debugger/signaldata/ScanFrag.cpp42
-rw-r--r--ndb/src/common/debugger/signaldata/ScanTab.cpp34
-rw-r--r--ndb/src/common/debugger/signaldata/SignalDataPrint.cpp10
-rw-r--r--ndb/src/common/debugger/signaldata/SignalNames.cpp2
-rw-r--r--ndb/src/common/debugger/signaldata/TcKeyConf.cpp42
-rw-r--r--ndb/src/common/logger/FileLogHandler.cpp4
-rw-r--r--ndb/src/common/logger/LogHandler.cpp4
-rw-r--r--ndb/src/common/logger/Logger.cpp6
-rw-r--r--ndb/src/common/logger/SysLogHandler.cpp2
-rw-r--r--ndb/src/common/logger/listtest/LogHandlerListUnitTest.cpp4
-rw-r--r--ndb/src/common/logger/loggertest/LoggerUnitTest.cpp2
-rw-r--r--ndb/src/common/mgmcommon/ConfigRetriever.cpp128
-rw-r--r--ndb/src/common/mgmcommon/IPCConfig.cpp123
-rw-r--r--ndb/src/common/mgmcommon/LocalConfig.cpp93
-rw-r--r--ndb/src/common/mgmcommon/Makefile.am7
-rw-r--r--ndb/src/common/mgmcommon/NdbConfig.c88
-rw-r--r--ndb/src/common/portlib/NdbCondition.c3
-rw-r--r--ndb/src/common/portlib/NdbDaemon.c2
-rw-r--r--ndb/src/common/portlib/NdbMem.c6
-rw-r--r--ndb/src/common/portlib/NdbMutex.c3
-rw-r--r--ndb/src/common/portlib/NdbSleep.c7
-rw-r--r--ndb/src/common/portlib/NdbTCP.cpp47
-rw-r--r--ndb/src/common/portlib/NdbThread.c9
-rw-r--r--ndb/src/common/portlib/memtest.c8
-rw-r--r--ndb/src/common/transporter/Makefile.am2
-rw-r--r--ndb/src/common/transporter/OSE_Receiver.cpp2
-rw-r--r--ndb/src/common/transporter/OSE_Transporter.cpp4
-rw-r--r--ndb/src/common/transporter/Packer.cpp21
-rw-r--r--ndb/src/common/transporter/SCI_Transporter.cpp733
-rw-r--r--ndb/src/common/transporter/SCI_Transporter.hpp29
-rw-r--r--ndb/src/common/transporter/SHM_Buffer.hpp20
-rw-r--r--ndb/src/common/transporter/SHM_Transporter.cpp52
-rw-r--r--ndb/src/common/transporter/SHM_Transporter.hpp2
-rw-r--r--ndb/src/common/transporter/TCP_Transporter.cpp93
-rw-r--r--ndb/src/common/transporter/TCP_Transporter.hpp3
-rw-r--r--ndb/src/common/transporter/Transporter.cpp2
-rw-r--r--ndb/src/common/transporter/TransporterRegistry.cpp147
-rw-r--r--ndb/src/common/transporter/perftest/perfTransporterTest.cpp4
-rw-r--r--ndb/src/common/transporter/priotest/prioTransporterTest.cpp4
-rw-r--r--ndb/src/common/util/BaseString.cpp25
-rw-r--r--ndb/src/common/util/ConfigValues.cpp7
-rw-r--r--ndb/src/common/util/File.cpp4
-rw-r--r--ndb/src/common/util/Makefile.am2
-rw-r--r--ndb/src/common/util/NdbErrHnd.cpp38
-rw-r--r--ndb/src/common/util/NdbOut.cpp6
-rw-r--r--ndb/src/common/util/NdbSqlUtil.cpp425
-rw-r--r--ndb/src/common/util/OutputStream.cpp4
-rw-r--r--ndb/src/common/util/Properties.cpp27
-rw-r--r--ndb/src/common/util/SimpleProperties.cpp2
-rw-r--r--ndb/src/common/util/SocketClient.cpp2
-rw-r--r--ndb/src/common/util/SocketServer.cpp32
-rw-r--r--ndb/src/common/util/basestring_vsnprintf.c37
-rw-r--r--ndb/src/common/util/getarg.c6
-rw-r--r--ndb/src/common/util/ndb_init.c (renamed from ndb/src/common/debugger/LogLevel.cpp)31
-rw-r--r--ndb/src/common/util/new.cpp9
-rw-r--r--ndb/src/common/util/random.c6
-rw-r--r--ndb/src/common/util/socket_io.cpp8
-rw-r--r--ndb/src/common/util/version.c25
-rw-r--r--ndb/src/cw/cpcd/APIService.cpp16
-rw-r--r--ndb/src/cw/cpcd/CPCD.cpp10
-rw-r--r--ndb/src/cw/cpcd/CPCD.hpp2
-rw-r--r--ndb/src/cw/cpcd/Makefile.am6
-rw-r--r--ndb/src/cw/cpcd/Process.cpp13
-rw-r--r--ndb/src/cw/cpcd/main.cpp8
-rw-r--r--ndb/src/kernel/Makefile.am5
-rw-r--r--ndb/src/kernel/blocks/ERROR_codes.txt11
-rw-r--r--ndb/src/kernel/blocks/backup/Backup.cpp88
-rw-r--r--ndb/src/kernel/blocks/backup/Backup.hpp8
-rw-r--r--ndb/src/kernel/blocks/backup/BackupInit.cpp13
-rw-r--r--ndb/src/kernel/blocks/backup/read.cpp1
-rw-r--r--ndb/src/kernel/blocks/backup/restore/Makefile.am6
-rw-r--r--ndb/src/kernel/blocks/backup/restore/Restore.cpp33
-rw-r--r--ndb/src/kernel/blocks/backup/restore/Restore.hpp51
-rw-r--r--ndb/src/kernel/blocks/backup/restore/consumer.hpp2
-rw-r--r--ndb/src/kernel/blocks/backup/restore/consumer_restore.cpp188
-rw-r--r--ndb/src/kernel/blocks/backup/restore/consumer_restore.hpp17
-rw-r--r--ndb/src/kernel/blocks/backup/restore/main.cpp140
-rw-r--r--ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp77
-rw-r--r--ndb/src/kernel/blocks/dbacc/Dbacc.hpp15
-rw-r--r--ndb/src/kernel/blocks/dbacc/DbaccInit.cpp19
-rw-r--r--ndb/src/kernel/blocks/dbacc/DbaccMain.cpp134
-rw-r--r--ndb/src/kernel/blocks/dbdict/Dbdict.cpp126
-rw-r--r--ndb/src/kernel/blocks/dbdict/Dbdict.hpp26
-rw-r--r--ndb/src/kernel/blocks/dbdih/DbdihMain.cpp68
-rw-r--r--ndb/src/kernel/blocks/dbdih/Sysfile.hpp12
-rw-r--r--ndb/src/kernel/blocks/dblqh/Dblqh.hpp84
-rw-r--r--ndb/src/kernel/blocks/dblqh/DblqhInit.cpp21
-rw-r--r--ndb/src/kernel/blocks/dblqh/DblqhMain.cpp594
-rw-r--r--ndb/src/kernel/blocks/dbtc/Dbtc.hpp101
-rw-r--r--ndb/src/kernel/blocks/dbtc/DbtcInit.cpp12
-rw-r--r--ndb/src/kernel/blocks/dbtc/DbtcMain.cpp762
-rw-r--r--ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp61
-rw-r--r--ndb/src/kernel/blocks/dbtup/Dbtup.hpp82
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp6
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp28
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp15
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp10
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp111
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupGen.cpp40
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp108
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupLCP.cpp3
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp88
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp132
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp29
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp51
-rw-r--r--ndb/src/kernel/blocks/dbtux/Dbtux.hpp280
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp230
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp74
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp84
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp18
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp62
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp430
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp215
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp82
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp404
-rw-r--r--ndb/src/kernel/blocks/dbtux/Times.txt66
-rw-r--r--ndb/src/kernel/blocks/dbutil/DbUtil.cpp8
-rw-r--r--ndb/src/kernel/blocks/dbutil/DbUtil.hpp2
-rw-r--r--ndb/src/kernel/blocks/grep/Grep.cpp8
-rw-r--r--ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp6
-rw-r--r--ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp20
-rw-r--r--ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp4
-rw-r--r--ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp12
-rw-r--r--ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp2
-rw-r--r--ndb/src/kernel/blocks/ndbfs/Filename.cpp74
-rw-r--r--ndb/src/kernel/blocks/ndbfs/Filename.hpp9
-rw-r--r--ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp6
-rw-r--r--ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp1
-rw-r--r--ndb/src/kernel/blocks/qmgr/QmgrMain.cpp19
-rw-r--r--ndb/src/kernel/blocks/suma/Suma.cpp25
-rw-r--r--ndb/src/kernel/blocks/suma/Suma.hpp5
-rw-r--r--ndb/src/kernel/blocks/suma/SumaInit.cpp13
-rw-r--r--ndb/src/kernel/blocks/trix/Trix.cpp4
-rw-r--r--ndb/src/kernel/blocks/trix/Trix.hpp13
-rw-r--r--ndb/src/kernel/error/ErrorHandlingMacros.hpp2
-rw-r--r--ndb/src/kernel/error/ErrorReporter.cpp54
-rw-r--r--ndb/src/kernel/main.cpp49
-rw-r--r--ndb/src/kernel/vm/ClusterConfiguration.cpp8
-rw-r--r--ndb/src/kernel/vm/Configuration.cpp288
-rw-r--r--ndb/src/kernel/vm/Configuration.hpp17
-rw-r--r--ndb/src/kernel/vm/Emulator.cpp15
-rw-r--r--ndb/src/kernel/vm/Emulator.hpp1
-rw-r--r--ndb/src/kernel/vm/FastScheduler.hpp2
-rw-r--r--ndb/src/kernel/vm/LongSignal.hpp2
-rw-r--r--ndb/src/kernel/vm/MetaData.hpp3
-rw-r--r--ndb/src/kernel/vm/SignalCounter.hpp2
-rw-r--r--ndb/src/kernel/vm/SimulatedBlock.cpp24
-rw-r--r--ndb/src/kernel/vm/SimulatedBlock.hpp4
-rw-r--r--ndb/src/kernel/vm/TransporterCallback.cpp7
-rw-r--r--ndb/src/kernel/vm/WatchDog.cpp5
-rw-r--r--ndb/src/kernel/vm/pc.hpp10
-rw-r--r--ndb/src/mgmapi/mgmapi.cpp186
-rw-r--r--ndb/src/mgmclient/CommandInterpreter.cpp619
-rw-r--r--ndb/src/mgmclient/CommandInterpreter.hpp1
-rw-r--r--ndb/src/mgmclient/CpcClient.cpp4
-rw-r--r--ndb/src/mgmclient/Makefile.am5
-rw-r--r--ndb/src/mgmclient/main.cpp3
-rw-r--r--ndb/src/mgmsrv/CommandInterpreter.cpp27
-rw-r--r--ndb/src/mgmsrv/Config.cpp (renamed from ndb/src/common/mgmcommon/Config.cpp)4
-rw-r--r--ndb/src/mgmsrv/Config.hpp (renamed from ndb/src/common/mgmcommon/Config.hpp)2
-rw-r--r--ndb/src/mgmsrv/ConfigInfo.cpp (renamed from ndb/src/common/mgmcommon/ConfigInfo.cpp)1614
-rw-r--r--ndb/src/mgmsrv/ConfigInfo.hpp (renamed from ndb/src/common/mgmcommon/ConfigInfo.hpp)10
-rw-r--r--ndb/src/mgmsrv/InitConfigFileParser.cpp (renamed from ndb/src/common/mgmcommon/InitConfigFileParser.cpp)66
-rw-r--r--ndb/src/mgmsrv/InitConfigFileParser.hpp (renamed from ndb/src/common/mgmcommon/InitConfigFileParser.hpp)5
-rw-r--r--ndb/src/mgmsrv/Makefile.am19
-rw-r--r--ndb/src/mgmsrv/MgmtSrvr.cpp1108
-rw-r--r--ndb/src/mgmsrv/MgmtSrvr.hpp137
-rw-r--r--ndb/src/mgmsrv/MgmtSrvrConfig.cpp3
-rw-r--r--ndb/src/mgmsrv/NodeLogLevel.cpp70
-rw-r--r--ndb/src/mgmsrv/NodeLogLevel.hpp54
-rw-r--r--ndb/src/mgmsrv/NodeLogLevelList.cpp182
-rw-r--r--ndb/src/mgmsrv/NodeLogLevelList.hpp93
-rw-r--r--ndb/src/mgmsrv/Services.cpp256
-rw-r--r--ndb/src/mgmsrv/Services.hpp27
-rw-r--r--ndb/src/mgmsrv/SignalQueue.cpp3
-rw-r--r--ndb/src/mgmsrv/main.cpp132
-rw-r--r--ndb/src/mgmsrv/mkconfig/mkconfig.cpp1
-rw-r--r--ndb/src/ndbapi/ClusterMgr.cpp46
-rw-r--r--ndb/src/ndbapi/DictCache.cpp37
-rw-r--r--ndb/src/ndbapi/DictCache.hpp17
-rw-r--r--ndb/src/ndbapi/Makefile.am1
-rw-r--r--ndb/src/ndbapi/Ndb.cpp182
-rw-r--r--ndb/src/ndbapi/NdbApiSignal.cpp6
-rw-r--r--ndb/src/ndbapi/NdbApiSignal.hpp13
-rw-r--r--ndb/src/ndbapi/NdbBlob.cpp6
-rw-r--r--ndb/src/ndbapi/NdbConnection.cpp284
-rw-r--r--ndb/src/ndbapi/NdbConnectionScan.cpp16
-rw-r--r--ndb/src/ndbapi/NdbDictionary.cpp104
-rw-r--r--ndb/src/ndbapi/NdbDictionaryImpl.cpp461
-rw-r--r--ndb/src/ndbapi/NdbDictionaryImpl.hpp97
-rw-r--r--ndb/src/ndbapi/NdbEventOperationImpl.cpp23
-rw-r--r--ndb/src/ndbapi/NdbIndexOperation.cpp115
-rw-r--r--ndb/src/ndbapi/NdbLinHash.hpp15
-rw-r--r--ndb/src/ndbapi/NdbOperation.cpp18
-rw-r--r--ndb/src/ndbapi/NdbOperationDefine.cpp59
-rw-r--r--ndb/src/ndbapi/NdbOperationExec.cpp59
-rw-r--r--ndb/src/ndbapi/NdbOperationInt.cpp19
-rw-r--r--ndb/src/ndbapi/NdbOperationSearch.cpp70
-rw-r--r--ndb/src/ndbapi/NdbRecAttr.cpp2
-rw-r--r--ndb/src/ndbapi/NdbReceiver.cpp65
-rw-r--r--ndb/src/ndbapi/NdbScanFilter.cpp2
-rw-r--r--ndb/src/ndbapi/NdbScanOperation.cpp513
-rw-r--r--ndb/src/ndbapi/NdbScanReceiver.cpp187
-rw-r--r--ndb/src/ndbapi/NdbScanReceiver.hpp210
-rw-r--r--ndb/src/ndbapi/Ndbif.cpp111
-rw-r--r--ndb/src/ndbapi/Ndbinit.cpp187
-rw-r--r--ndb/src/ndbapi/Ndblist.cpp34
-rw-r--r--ndb/src/ndbapi/ObjectMap.hpp7
-rw-r--r--ndb/src/ndbapi/TransporterFacade.cpp224
-rw-r--r--ndb/src/ndbapi/TransporterFacade.hpp57
-rw-r--r--ndb/src/ndbapi/ndb_cluster_connection.cpp208
-rw-r--r--ndb/src/ndbapi/ndberror.c26
222 files changed, 10408 insertions, 7819 deletions
diff --git a/ndb/src/common/debugger/DebuggerNames.cpp b/ndb/src/common/debugger/DebuggerNames.cpp
index 2142138e435..8571b8ece86 100644
--- a/ndb/src/common/debugger/DebuggerNames.cpp
+++ b/ndb/src/common/debugger/DebuggerNames.cpp
@@ -15,6 +15,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <ndb_global.h>
+#include <BaseString.hpp>
#include "DebuggerNames.hpp"
@@ -29,7 +30,7 @@ static const char * localBlockNames[NO_OF_BLOCKS];
static
int
initSignalNames(const char * dst[], const GsnName src[], unsigned short len){
- int i;
+ unsigned i;
for(i = 0; i<=MAX_GSN; i++)
dst[i] = 0;
@@ -53,14 +54,13 @@ initSignalNames(const char * dst[], const GsnName src[], unsigned short len){
static
int
initSignalPrinters(SignalDataPrintFunction dst[],
- const NameFunctionPair src[],
- unsigned short len){
- int i;
+ const NameFunctionPair src[]){
+ unsigned i;
for(i = 0; i<=MAX_GSN; i++)
dst[i] = 0;
- for(i = 0; i<len; i++){
- unsigned short gsn = src[i].gsn;
+ unsigned short gsn;
+ for(i = 0; (gsn = src[i].gsn) > 0; i++){
SignalDataPrintFunction fun = src[i].function;
if(dst[gsn] != 0 && fun != 0){
@@ -81,7 +81,7 @@ int
initBlockNames(const char * dst[],
const BlockName src[],
unsigned len){
- int i;
+ unsigned i;
for(i = 0; i<NO_OF_BLOCKS; i++)
dst[i] = 0;
@@ -107,8 +107,7 @@ xxx_DUMMY_SIGNAL_NAMES_xxx = initSignalNames(localSignalNames,
NO_OF_SIGNAL_NAMES);
static const int
xxx_DUMMY_PRINT_FUNCTIONS_xxx = initSignalPrinters(localPrintFunctions,
- SignalDataPrintFunctions,
- NO_OF_PRINT_FUNCTIONS);
+ SignalDataPrintFunctions);
static const int
xxx_DUMMY_BLOCK_NAMES_xxx = initBlockNames(localBlockNames,
@@ -133,7 +132,7 @@ getBlockName(unsigned short blockNo, const char * ret){
return localBlockNames[blockNo-MIN_BLOCK_NO];
if (ret == 0) {
static char buf[20];
- snprintf(buf, sizeof(buf), "BLOCK#%d", (int)blockNo);
+ BaseString::snprintf(buf, sizeof(buf), "BLOCK#%d", (int)blockNo);
return buf;
}
return ret;
diff --git a/ndb/src/common/debugger/EventLogger.cpp b/ndb/src/common/debugger/EventLogger.cpp
index 50c3b778731..8a09be9a0a7 100644
--- a/ndb/src/common/debugger/EventLogger.cpp
+++ b/ndb/src/common/debugger/EventLogger.cpp
@@ -28,6 +28,10 @@
//
// PUBLIC
//
+EventLoggerBase::~EventLoggerBase()
+{
+
+}
/**
* This matrix defines which event should be printed when
@@ -35,128 +39,95 @@
* threshold - is in range [0-15]
* severity - DEBUG to ALERT (Type of log message)
*/
-const EventLogger::EventRepLogLevelMatrix EventLogger::matrix[] = {
+const EventLoggerBase::EventRepLogLevelMatrix EventLoggerBase::matrix[] = {
// CONNECTION
- { EventReport::Connected, LogLevel::llConnection, 8, LL_INFO },
- { EventReport::Disconnected, LogLevel::llConnection, 8, LL_ALERT },
- { EventReport::CommunicationClosed, LogLevel::llConnection, 8, LL_INFO },
- { EventReport::CommunicationOpened, LogLevel::llConnection, 8, LL_INFO },
- { EventReport::ConnectedApiVersion, LogLevel::llConnection, 8, LL_INFO },
+ { EventReport::Connected, LogLevel::llConnection, 8, Logger::LL_INFO },
+ { EventReport::Disconnected, LogLevel::llConnection, 8, Logger::LL_ALERT },
+ { EventReport::CommunicationClosed, LogLevel::llConnection, 8, Logger::LL_INFO },
+ { EventReport::CommunicationOpened, LogLevel::llConnection, 8, Logger::LL_INFO },
+ { EventReport::ConnectedApiVersion, LogLevel::llConnection, 8, Logger::LL_INFO },
// CHECKPOINT
- { EventReport::GlobalCheckpointStarted, LogLevel::llCheckpoint, 9, LL_INFO },
- { EventReport::GlobalCheckpointCompleted,LogLevel::llCheckpoint,10, LL_INFO },
- { EventReport::LocalCheckpointStarted, LogLevel::llCheckpoint, 7, LL_INFO },
- { EventReport::LocalCheckpointCompleted,LogLevel::llCheckpoint, 8, LL_INFO },
- { EventReport::LCPStoppedInCalcKeepGci, LogLevel::llCheckpoint, 0, LL_ALERT },
- { EventReport::LCPFragmentCompleted, LogLevel::llCheckpoint, 11, LL_INFO },
- { EventReport::UndoLogBlocked, LogLevel::llCheckpoint, 7, LL_INFO },
+ { EventReport::GlobalCheckpointStarted, LogLevel::llCheckpoint, 9, Logger::LL_INFO },
+ { EventReport::GlobalCheckpointCompleted,LogLevel::llCheckpoint,10, Logger::LL_INFO },
+ { EventReport::LocalCheckpointStarted, LogLevel::llCheckpoint, 7, Logger::LL_INFO },
+ { EventReport::LocalCheckpointCompleted,LogLevel::llCheckpoint, 8, Logger::LL_INFO },
+ { EventReport::LCPStoppedInCalcKeepGci, LogLevel::llCheckpoint, 0, Logger::LL_ALERT },
+ { EventReport::LCPFragmentCompleted, LogLevel::llCheckpoint, 11, Logger::LL_INFO },
+ { EventReport::UndoLogBlocked, LogLevel::llCheckpoint, 7, Logger::LL_INFO },
// STARTUP
- { EventReport::NDBStartStarted, LogLevel::llStartUp, 1, LL_INFO },
- { EventReport::NDBStartCompleted, LogLevel::llStartUp, 1, LL_INFO },
- { EventReport::STTORRYRecieved, LogLevel::llStartUp,15, LL_INFO },
- { EventReport::StartPhaseCompleted, LogLevel::llStartUp, 4, LL_INFO },
- { EventReport::CM_REGCONF, LogLevel::llStartUp, 3, LL_INFO },
- { EventReport::CM_REGREF, LogLevel::llStartUp, 8, LL_INFO },
- { EventReport::FIND_NEIGHBOURS, LogLevel::llStartUp, 8, LL_INFO },
- { EventReport::NDBStopStarted, LogLevel::llStartUp, 1, LL_INFO },
- { EventReport::NDBStopAborted, LogLevel::llStartUp, 1, LL_INFO },
- { EventReport::StartREDOLog, LogLevel::llStartUp, 10, LL_INFO },
- { EventReport::StartLog, LogLevel::llStartUp, 10, LL_INFO },
- { EventReport::UNDORecordsExecuted, LogLevel::llStartUp, 15, LL_INFO },
+ { EventReport::NDBStartStarted, LogLevel::llStartUp, 1, Logger::LL_INFO },
+ { EventReport::NDBStartCompleted, LogLevel::llStartUp, 1, Logger::LL_INFO },
+ { EventReport::STTORRYRecieved, LogLevel::llStartUp,15, Logger::LL_INFO },
+ { EventReport::StartPhaseCompleted, LogLevel::llStartUp, 4, Logger::LL_INFO },
+ { EventReport::CM_REGCONF, LogLevel::llStartUp, 3, Logger::LL_INFO },
+ { EventReport::CM_REGREF, LogLevel::llStartUp, 8, Logger::LL_INFO },
+ { EventReport::FIND_NEIGHBOURS, LogLevel::llStartUp, 8, Logger::LL_INFO },
+ { EventReport::NDBStopStarted, LogLevel::llStartUp, 1, Logger::LL_INFO },
+ { EventReport::NDBStopAborted, LogLevel::llStartUp, 1, Logger::LL_INFO },
+ { EventReport::StartREDOLog, LogLevel::llStartUp, 10, Logger::LL_INFO },
+ { EventReport::StartLog, LogLevel::llStartUp, 10, Logger::LL_INFO },
+ { EventReport::UNDORecordsExecuted, LogLevel::llStartUp, 15, Logger::LL_INFO },
// NODERESTART
- { EventReport::NR_CopyDict, LogLevel::llNodeRestart, 8, LL_INFO },
- { EventReport::NR_CopyDistr, LogLevel::llNodeRestart, 8, LL_INFO },
- { EventReport::NR_CopyFragsStarted, LogLevel::llNodeRestart, 8, LL_INFO },
- { EventReport::NR_CopyFragDone, LogLevel::llNodeRestart, 10, LL_INFO },
- { EventReport::NR_CopyFragsCompleted, LogLevel::llNodeRestart, 8, LL_INFO },
+ { EventReport::NR_CopyDict, LogLevel::llNodeRestart, 8, Logger::LL_INFO },
+ { EventReport::NR_CopyDistr, LogLevel::llNodeRestart, 8, Logger::LL_INFO },
+ { EventReport::NR_CopyFragsStarted, LogLevel::llNodeRestart, 8, Logger::LL_INFO },
+ { EventReport::NR_CopyFragDone, LogLevel::llNodeRestart, 10, Logger::LL_INFO },
+ { EventReport::NR_CopyFragsCompleted, LogLevel::llNodeRestart, 8, Logger::LL_INFO },
- { EventReport::NodeFailCompleted, LogLevel::llNodeRestart, 8, LL_ALERT},
- { EventReport::NODE_FAILREP, LogLevel::llNodeRestart, 8, LL_ALERT},
- { EventReport::ArbitState, LogLevel::llNodeRestart, 6, LL_INFO },
- { EventReport::ArbitResult, LogLevel::llNodeRestart, 2, LL_ALERT},
- { EventReport::GCP_TakeoverStarted, LogLevel::llNodeRestart, 7, LL_INFO },
- { EventReport::GCP_TakeoverCompleted, LogLevel::llNodeRestart, 7, LL_INFO },
- { EventReport::LCP_TakeoverStarted, LogLevel::llNodeRestart, 7, LL_INFO },
- { EventReport::LCP_TakeoverCompleted, LogLevel::llNodeRestart, 7, LL_INFO },
+ { EventReport::NodeFailCompleted, LogLevel::llNodeRestart, 8, Logger::LL_ALERT},
+ { EventReport::NODE_FAILREP, LogLevel::llNodeRestart, 8, Logger::LL_ALERT},
+ { EventReport::ArbitState, LogLevel::llNodeRestart, 6, Logger::LL_INFO },
+ { EventReport::ArbitResult, LogLevel::llNodeRestart, 2, Logger::LL_ALERT},
+ { EventReport::GCP_TakeoverStarted, LogLevel::llNodeRestart, 7, Logger::LL_INFO },
+ { EventReport::GCP_TakeoverCompleted, LogLevel::llNodeRestart, 7, Logger::LL_INFO },
+ { EventReport::LCP_TakeoverStarted, LogLevel::llNodeRestart, 7, Logger::LL_INFO },
+ { EventReport::LCP_TakeoverCompleted, LogLevel::llNodeRestart, 7, Logger::LL_INFO },
// STATISTIC
- { EventReport::TransReportCounters, LogLevel::llStatistic, 8, LL_INFO },
- { EventReport::OperationReportCounters, LogLevel::llStatistic, 8, LL_INFO },
- { EventReport::TableCreated, LogLevel::llStatistic, 7, LL_INFO },
- { EventReport::JobStatistic, LogLevel::llStatistic, 9, LL_INFO },
- { EventReport::SendBytesStatistic, LogLevel::llStatistic, 9, LL_INFO },
- { EventReport::ReceiveBytesStatistic, LogLevel::llStatistic, 9, LL_INFO },
- { EventReport::MemoryUsage, LogLevel::llStatistic, 5, LL_INFO },
+ { EventReport::TransReportCounters, LogLevel::llStatistic, 8, Logger::LL_INFO },
+ { EventReport::OperationReportCounters, LogLevel::llStatistic, 8, Logger::LL_INFO },
+ { EventReport::TableCreated, LogLevel::llStatistic, 7, Logger::LL_INFO },
+ { EventReport::JobStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO },
+ { EventReport::SendBytesStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO },
+ { EventReport::ReceiveBytesStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO },
+ { EventReport::MemoryUsage, LogLevel::llStatistic, 5, Logger::LL_INFO },
// ERROR
- { EventReport::TransporterError, LogLevel::llError, 2, LL_ERROR },
- { EventReport::TransporterWarning, LogLevel::llError, 8, LL_WARNING },
- { EventReport::MissedHeartbeat, LogLevel::llError, 8, LL_WARNING },
- { EventReport::DeadDueToHeartbeat, LogLevel::llError, 8, LL_ALERT },
- { EventReport::WarningEvent, LogLevel::llError, 2, LL_WARNING },
+ { EventReport::TransporterError, LogLevel::llError, 2, Logger::LL_ERROR },
+ { EventReport::TransporterWarning, LogLevel::llError, 8, Logger::LL_WARNING },
+ { EventReport::MissedHeartbeat, LogLevel::llError, 8, Logger::LL_WARNING },
+ { EventReport::DeadDueToHeartbeat, LogLevel::llError, 8, Logger::LL_ALERT },
+ { EventReport::WarningEvent, LogLevel::llError, 2, Logger::LL_WARNING },
// INFO
- { EventReport::SentHeartbeat, LogLevel::llInfo, 12, LL_INFO },
- { EventReport::CreateLogBytes, LogLevel::llInfo, 11, LL_INFO },
- { EventReport::InfoEvent, LogLevel::llInfo, 2, LL_INFO },
+ { EventReport::SentHeartbeat, LogLevel::llInfo, 12, Logger::LL_INFO },
+ { EventReport::CreateLogBytes, LogLevel::llInfo, 11, Logger::LL_INFO },
+ { EventReport::InfoEvent, LogLevel::llInfo, 2, Logger::LL_INFO },
//Global replication
- { EventReport::GrepSubscriptionInfo, LogLevel::llGrep, 7, LL_INFO},
- { EventReport::GrepSubscriptionAlert, LogLevel::llGrep, 7, LL_ALERT}
-};
-
-const Uint32 EventLogger::matrixSize = sizeof(EventLogger::matrix)/
- sizeof(EventRepLogLevelMatrix);
-
-/**
- * Default log levels for management nodes.
- *
- * threshold - is in range [0-15]
- */
-const EventLogger::EventLogMatrix EventLogger::defEventLogMatrix[] = {
- { LogLevel::llStartUp, 7 },
- { LogLevel::llShutdown, 7 },
- { LogLevel::llStatistic, 7 },
- { LogLevel::llCheckpoint, 7 },
- { LogLevel::llNodeRestart, 7 },
- { LogLevel::llConnection, 7 },
- { LogLevel::llError, 15 },
- { LogLevel::llInfo, 7 },
- { LogLevel::llGrep, 7 }
-};
+ { EventReport::GrepSubscriptionInfo, LogLevel::llGrep, 7, Logger::LL_INFO},
+ { EventReport::GrepSubscriptionAlert, LogLevel::llGrep, 7, Logger::LL_ALERT},
-const Uint32
-EventLogger::defEventLogMatrixSize = sizeof(EventLogger::defEventLogMatrix)/
- sizeof(EventLogMatrix);
-/**
- * Specifies allowed event categories/log levels that can be set from
- * the Management API/interactive shell.
- */
-const EventLogger::EventCategoryName EventLogger::eventCategoryNames[] = {
- { LogLevel::llStartUp, "STARTUP" },
- { LogLevel::llStatistic, "STATISTICS" },
- { LogLevel::llCheckpoint, "CHECKPOINT" },
- { LogLevel::llNodeRestart, "NODERESTART" },
- { LogLevel::llConnection, "CONNECTION" },
- { LogLevel::llInfo, "INFO" },
- { LogLevel::llGrep, "GREP" }
+ // Backup
+ { EventReport::BackupStarted, LogLevel::llBackup, 7, Logger::LL_INFO },
+ { EventReport::BackupCompleted, LogLevel::llBackup, 7, Logger::LL_INFO },
+ { EventReport::BackupFailedToStart, LogLevel::llBackup, 7, Logger::LL_ALERT},
+ { EventReport::BackupAborted, LogLevel::llBackup, 7, Logger::LL_ALERT }
};
-const Uint32
-EventLogger::noOfEventCategoryNames = sizeof(EventLogger::eventCategoryNames)/
- sizeof(EventLogger::EventCategoryName);
-
-char EventLogger::m_text[MAX_TEXT_LENGTH];
+const Uint32 EventLoggerBase::matrixSize = sizeof(EventLoggerBase::matrix)/
+ sizeof(EventRepLogLevelMatrix);
const char*
-EventLogger::getText(int type,
+EventLogger::getText(char * m_text, size_t m_text_len,
+ int type,
const Uint32* theData, NodeId nodeId)
{
// TODO: Change the switch implementation...
char theNodeId[32];
if (nodeId != 0){
- ::snprintf(theNodeId, 32, "Node %u: ", nodeId);
+ BaseString::snprintf(theNodeId, 32, "Node %u: ", nodeId);
} else {
theNodeId[0] = 0;
}
@@ -164,13 +135,13 @@ EventLogger::getText(int type,
EventReport::EventType eventType = (EventReport::EventType)type;
switch (eventType){
case EventReport::Connected:
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sNode %u Connected",
theNodeId,
theData[1]);
break;
case EventReport::ConnectedApiVersion:
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sNode %u: API version %d.%d.%d",
theNodeId,
theData[1],
@@ -179,7 +150,7 @@ EventLogger::getText(int type,
getBuild(theData[2]));
break;
case EventReport::Disconnected:
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sNode %u Disconnected",
theNodeId,
theData[1]);
@@ -188,7 +159,7 @@ EventLogger::getText(int type,
//-----------------------------------------------------------------------
// REPORT communication to node closed.
//-----------------------------------------------------------------------
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sCommunication to Node %u closed",
theNodeId,
theData[1]);
@@ -197,7 +168,7 @@ EventLogger::getText(int type,
//-----------------------------------------------------------------------
// REPORT communication to node opened.
//-----------------------------------------------------------------------
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sCommunication to Node %u opened",
theNodeId,
theData[1]);
@@ -206,7 +177,7 @@ EventLogger::getText(int type,
//-----------------------------------------------------------------------
// Start of NDB has been initiated.
//-----------------------------------------------------------------------
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sStart initiated (version %d.%d.%d)",
theNodeId ,
getMajor(theData[1]),
@@ -214,13 +185,13 @@ EventLogger::getText(int type,
getBuild(theData[1]));
break;
case EventReport::NDBStopStarted:
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%s%s shutdown initiated",
theNodeId,
(theData[1] == 1 ? "Cluster" : "Node"));
break;
case EventReport::NDBStopAborted:
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sNode shutdown aborted",
theNodeId);
break;
@@ -228,7 +199,7 @@ EventLogger::getText(int type,
//-----------------------------------------------------------------------
// Start of NDB has been completed.
//-----------------------------------------------------------------------
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sStarted (version %d.%d.%d)",
theNodeId ,
getMajor(theData[1]),
@@ -240,7 +211,7 @@ EventLogger::getText(int type,
//-----------------------------------------------------------------------
// STTORRY recevied after restart finished.
//-----------------------------------------------------------------------
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sSTTORRY received after restart finished",
theNodeId);
break;
@@ -266,7 +237,7 @@ EventLogger::getText(int type,
type = "";
break;
default:{
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sStart phase %u completed (unknown = %d)",
theNodeId,
theData[1],
@@ -274,7 +245,7 @@ EventLogger::getText(int type,
return m_text;
}
}
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sStart phase %u completed %s",
theNodeId,
theData[1],
@@ -283,7 +254,7 @@ EventLogger::getText(int type,
break;
}
case EventReport::CM_REGCONF:
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sCM_REGCONF president = %u, own Node = %u, our dynamic id = %u"
,
theNodeId,
@@ -315,7 +286,7 @@ EventLogger::getText(int type,
break;
}//switch
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sCM_REGREF from Node %u to our Node %u. Cause = %s",
theNodeId,
theData[2],
@@ -327,8 +298,8 @@ EventLogger::getText(int type,
//-----------------------------------------------------------------------
// REPORT Node Restart copied a fragment.
//-----------------------------------------------------------------------
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sWe are Node %u with dynamic ID %u, our left neighbour "
"is Node %u, our right is Node %u",
theNodeId,
@@ -344,13 +315,13 @@ EventLogger::getText(int type,
if (theData[1] == 0)
{
if (theData[3] != 0) {
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sNode %u completed failure of Node %u",
theNodeId,
theData[3],
theData[2]);
} else {
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sAll nodes completed failure of Node %u",
theNodeId,
theData[2]);
@@ -367,7 +338,7 @@ EventLogger::getText(int type,
line = "DBLQH";
}
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sNode failure of %u %s completed",
theNodeId,
theData[2],
@@ -375,8 +346,8 @@ EventLogger::getText(int type,
}
break;
case EventReport::NODE_FAILREP:
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sNode %u has failed. The Node state at failure "
"was %u",
theNodeId,
@@ -395,41 +366,41 @@ EventLogger::getText(int type,
const unsigned state = sd->code >> 16;
switch (code) {
case ArbitCode::ThreadStart:
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sPresident restarts arbitration thread [state=%u]",
theNodeId, state);
break;
case ArbitCode::PrepPart2:
sd->ticket.getText(ticketText, sizeof(ticketText));
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sPrepare arbitrator node %u [ticket=%s]",
theNodeId, sd->node, ticketText);
break;
case ArbitCode::PrepAtrun:
sd->ticket.getText(ticketText, sizeof(ticketText));
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sReceive arbitrator node %u [ticket=%s]",
theNodeId, sd->node, ticketText);
break;
case ArbitCode::ApiStart:
sd->ticket.getText(ticketText, sizeof(ticketText));
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sStarted arbitrator node %u [ticket=%s]",
theNodeId, sd->node, ticketText);
break;
case ArbitCode::ApiFail:
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sLost arbitrator node %u - process failure [state=%u]",
theNodeId, sd->node, state);
break;
case ArbitCode::ApiExit:
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sLost arbitrator node %u - process exit [state=%u]",
theNodeId, sd->node, state);
break;
default:
ArbitCode::getErrText(code, errText, sizeof(errText));
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sLost arbitrator node %u - %s [state=%u]",
theNodeId, sd->node, errText, state);
break;
@@ -446,48 +417,48 @@ EventLogger::getText(int type,
const unsigned state = sd->code >> 16;
switch (code) {
case ArbitCode::LoseNodes:
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sArbitration check lost - less than 1/2 nodes left",
theNodeId);
break;
case ArbitCode::WinGroups:
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sArbitration check won - node group majority",
theNodeId);
break;
case ArbitCode::LoseGroups:
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sArbitration check lost - missing node group",
theNodeId);
break;
case ArbitCode::Partitioning:
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sNetwork partitioning - arbitration required",
theNodeId);
break;
case ArbitCode::WinChoose:
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sArbitration won - positive reply from node %u",
theNodeId, sd->node);
break;
case ArbitCode::LoseChoose:
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sArbitration lost - negative reply from node %u",
theNodeId, sd->node);
break;
case ArbitCode::LoseNorun:
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sNetwork partitioning - no arbitrator available",
theNodeId);
break;
case ArbitCode::LoseNocfg:
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sNetwork partitioning - no arbitrator configured",
theNodeId);
break;
default:
ArbitCode::getErrText(code, errText, sizeof(errText));
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sArbitration failure - %s [state=%u]",
theNodeId, errText, state);
break;
@@ -499,8 +470,8 @@ EventLogger::getText(int type,
// This event reports that a global checkpoint has been started and this
// node is the master of this global checkpoint.
//-----------------------------------------------------------------------
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sGlobal checkpoint %u started",
theNodeId,
theData[1]);
@@ -510,7 +481,7 @@ EventLogger::getText(int type,
// This event reports that a global checkpoint has been completed on this
// node and the node is the master of this global checkpoint.
//-----------------------------------------------------------------------
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sGlobal checkpoint %u completed",
theNodeId,
theData[1]);
@@ -520,8 +491,8 @@ EventLogger::getText(int type,
// This event reports that a local checkpoint has been started and this
// node is the master of this local checkpoint.
//-----------------------------------------------------------------------
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sLocal checkpoint %u started. "
"Keep GCI = %u oldest restorable GCI = %u",
theNodeId,
@@ -534,8 +505,8 @@ EventLogger::getText(int type,
// This event reports that a local checkpoint has been completed on this
// node and the node is the master of this local checkpoint.
//-----------------------------------------------------------------------
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sLocal checkpoint %u completed",
theNodeId,
theData[1]);
@@ -544,14 +515,14 @@ EventLogger::getText(int type,
//-----------------------------------------------------------------------
// This event reports that a table has been created.
//-----------------------------------------------------------------------
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sTable with ID = %u created",
theNodeId,
theData[1]);
break;
case EventReport::LCPStoppedInCalcKeepGci:
if (theData[1] == 0)
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sLocal Checkpoint stopped in CALCULATED_KEEP_GCI",
theNodeId);
break;
@@ -559,8 +530,8 @@ EventLogger::getText(int type,
//-----------------------------------------------------------------------
// REPORT Node Restart completed copy of dictionary information.
//-----------------------------------------------------------------------
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sNode restart completed copy of dictionary information",
theNodeId);
break;
@@ -568,8 +539,8 @@ EventLogger::getText(int type,
//-----------------------------------------------------------------------
// REPORT Node Restart completed copy of distribution information.
//-----------------------------------------------------------------------
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sNode restart completed copy of distribution information",
theNodeId);
break;
@@ -577,8 +548,8 @@ EventLogger::getText(int type,
//-----------------------------------------------------------------------
// REPORT Node Restart is starting to copy the fragments.
//-----------------------------------------------------------------------
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sNode restart starting to copy the fragments "
"to Node %u",
theNodeId,
@@ -588,8 +559,8 @@ EventLogger::getText(int type,
//-----------------------------------------------------------------------
// REPORT Node Restart copied a fragment.
//-----------------------------------------------------------------------
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sTable ID = %u, fragment ID = %u have been copied "
"to Node %u",
theNodeId,
@@ -598,16 +569,16 @@ EventLogger::getText(int type,
theData[1]);
break;
case EventReport::NR_CopyFragsCompleted:
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sNode restart completed copying the fragments "
"to Node %u",
theNodeId,
theData[1]);
break;
case EventReport::LCPFragmentCompleted:
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sTable ID = %u, fragment ID = %u has completed LCP "
"on Node %u",
theNodeId,
@@ -619,12 +590,13 @@ EventLogger::getText(int type,
// -------------------------------------------------------------------
// Report information about transaction activity once per 10 seconds.
// -------------------------------------------------------------------
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sTrans. Count = %u, Commit Count = %u, "
"Read Count = %u, Simple Read Count = %u,\n"
"Write Count = %u, AttrInfo Count = %u, "
- "Concurrent Operations = %u, Abort Count = %u",
+ "Concurrent Operations = %u, Abort Count = %u\n"
+ " Scans: %u Range scans: %u",
theNodeId,
theData[1],
theData[2],
@@ -633,10 +605,12 @@ EventLogger::getText(int type,
theData[5],
theData[6],
theData[7],
- theData[8]);
+ theData[8],
+ theData[9],
+ theData[10]);
break;
case EventReport::OperationReportCounters:
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%sOperations=%u",
theNodeId,
theData[1]);
@@ -645,8 +619,8 @@ EventLogger::getText(int type,
//-----------------------------------------------------------------------
// REPORT Undo Logging blocked due to buffer near to overflow.
//-----------------------------------------------------------------------
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sACC Blocked %u and TUP Blocked %u times last second",
theNodeId,
theData[1],
@@ -654,8 +628,8 @@ EventLogger::getText(int type,
break;
case EventReport::TransporterError:
case EventReport::TransporterWarning:
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sTransporter to node %d reported error 0x%x",
theNodeId,
theData[1],
@@ -665,8 +639,8 @@ EventLogger::getText(int type,
//-----------------------------------------------------------------------
// REPORT Undo Logging blocked due to buffer near to overflow.
//-----------------------------------------------------------------------
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sNode %d missed heartbeat %d",
theNodeId,
theData[1],
@@ -676,45 +650,45 @@ EventLogger::getText(int type,
//-----------------------------------------------------------------------
// REPORT Undo Logging blocked due to buffer near to overflow.
//-----------------------------------------------------------------------
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sNode %d declared dead due to missed heartbeat",
theNodeId,
theData[1]);
break;
case EventReport::JobStatistic:
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sMean loop Counter in doJob last 8192 times = %u",
theNodeId,
theData[1]);
break;
case EventReport::SendBytesStatistic:
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sMean send size to Node = %d last 4096 sends = %u bytes",
theNodeId,
theData[1],
theData[2]);
break;
case EventReport::ReceiveBytesStatistic:
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sMean receive size to Node = %d last 4096 sends = %u bytes",
theNodeId,
theData[1],
theData[2]);
break;
case EventReport::SentHeartbeat:
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sNode Sent Heartbeat to node = %d",
theNodeId,
theData[1]);
break;
case EventReport::CreateLogBytes:
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sLog part %u, log file %u, MB %u",
theNodeId,
theData[1],
@@ -722,8 +696,8 @@ EventLogger::getText(int type,
theData[3]);
break;
case EventReport::StartLog:
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sLog part %u, start MB %u, stop MB %u, last GCI, log exec %u",
theNodeId,
theData[1],
@@ -732,8 +706,8 @@ EventLogger::getText(int type,
theData[4]);
break;
case EventReport::StartREDOLog:
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sNode: %d StartLog: [GCI Keep: %d LastCompleted: %d NewestRestorable: %d]",
theNodeId,
theData[1],
@@ -749,8 +723,8 @@ EventLogger::getText(int type,
line = "DBACC";
}
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%s UNDO %s %d [%d %d %d %d %d %d %d %d %d]",
theNodeId,
line,
@@ -767,37 +741,37 @@ EventLogger::getText(int type,
}
break;
case EventReport::InfoEvent:
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%s%s",
theNodeId,
(char *)&theData[1]);
break;
case EventReport::WarningEvent:
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%s%s",
theNodeId,
(char *)&theData[1]);
break;
case EventReport::GCP_TakeoverStarted:
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sGCP Take over started", theNodeId);
break;
case EventReport::GCP_TakeoverCompleted:
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sGCP Take over completed", theNodeId);
break;
case EventReport::LCP_TakeoverStarted:
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sLCP Take over started", theNodeId);
break;
case EventReport::LCP_TakeoverCompleted:
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sLCP Take over completed (state = %d)",
theNodeId, theData[1]);
break;
@@ -809,7 +783,7 @@ EventLogger::getText(int type,
const int block = theData[5];
const int percent = (used*100)/total;
- ::snprintf(m_text, sizeof(m_text),
+ BaseString::snprintf(m_text, m_text_len,
"%s%s usage %s %d%s(%d %dK pages of total %d)",
theNodeId,
(block==DBACC ? "Index" : (block == DBTUP ?"Data":"<unknown>")),
@@ -819,478 +793,508 @@ EventLogger::getText(int type,
);
break;
}
-
-
case EventReport::GrepSubscriptionInfo :
- {
- GrepEvent::Subscription event = (GrepEvent::Subscription)theData[1];
- switch(event) {
- case GrepEvent::GrepSS_CreateSubIdConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: Created subscription id"
- " (subId=%d,SubKey=%d)"
- " Return code: %d.",
- subId,
- subKey,
- err);
- break;
- }
- case GrepEvent::GrepPS_CreateSubIdConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: Created subscription id"
- " (subId=%d,SubKey=%d)"
- " Return code: %d.",
- subId,
- subKey,
- err);
- break;
- }
- case GrepEvent::GrepSS_SubCreateConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- const int nodegrp = theData[5];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: Created subscription using"
- " (subId=%d,SubKey=%d)"
- " in primary system. Primary system has %d nodegroup(s)."
- " Return code: %d",
- subId,
- subKey,
- nodegrp,
- err);
- break;
- }
- case GrepEvent::GrepPS_SubCreateConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: All participants have created "
- "subscriptions"
- " using (subId=%d,SubKey=%d)."
- " Return code: %d",
- subId,
- subKey,
- err);
- break;
- }
- case GrepEvent::GrepSS_SubStartMetaConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: Logging started on meta data changes."
- " using (subId=%d,SubKey=%d)"
- " Return code: %d",
- subId,
- subKey,
- err);
- break;
- }
- case GrepEvent::GrepPS_SubStartMetaConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: All participants have started "
- "logging meta data"
- " changes on the subscription subId=%d,SubKey=%d) "
- "(N.I yet)."
- " Return code: %d",
- subId,
- subKey,
- err);
- break;
- }
- case GrepEvent::GrepSS_SubStartDataConf: {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: Logging started on table data changes "
- " using (subId=%d,SubKey=%d)"
- " Return code: %d",
- subId,
- subKey,
- err);
- break;
- }
- case GrepEvent::GrepPS_SubStartDataConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: All participants have started logging "
- "table data changes on the subscription "
- "subId=%d,SubKey=%d)."
- " Return code: %d",
- subId,
- subKey,
- err);
- break;
- }
- case GrepEvent::GrepPS_SubSyncMetaConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: All participants have started "
- " synchronization on meta data (META SCAN) using "
- "(subId=%d,SubKey=%d)."
- " Return code: %d",
- subId,
- subKey,
- err);
- break;
- }
- case GrepEvent::GrepSS_SubSyncMetaConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: Synchronization started (META SCAN) on "
- " meta data using (subId=%d,SubKey=%d)"
- " Return code: %d",
- subId,
- subKey,
- err);
- break;
- }
- case GrepEvent::GrepPS_SubSyncDataConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: All participants have started "
- "synchronization "
- " on table data (DATA SCAN) using (subId=%d,SubKey=%d)."
- " Return code: %d",
- subId,
- subKey,
- err);
- break;
- }
- case GrepEvent::GrepSS_SubSyncDataConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- const int gci = theData[5];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: Synchronization started (DATA SCAN) on "
- "table data using (subId=%d,SubKey=%d). GCI = %d"
- " Return code: %d",
- subId,
- subKey,
- gci,
- err);
- break;
- }
- case GrepEvent::GrepPS_SubRemoveConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: All participants have removed "
- "subscription (subId=%d,SubKey=%d). I have cleaned "
- "up resources I've used."
- " Return code: %d",
- subId,
- subKey,
- err);
- break;
- }
- case GrepEvent::GrepSS_SubRemoveConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: Removed subscription "
- "(subId=%d,SubKey=%d)"
- " Return code: %d",
- subId,
- subKey,
- err);
- break;
- }
+ {
+ GrepEvent::Subscription event = (GrepEvent::Subscription)theData[1];
+ switch(event) {
+ case GrepEvent::GrepSS_CreateSubIdConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: Created subscription id"
+ " (subId=%d,SubKey=%d)"
+ " Return code: %d.",
+ subId,
+ subKey,
+ err);
+ break;
+ }
+ case GrepEvent::GrepPS_CreateSubIdConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: Created subscription id"
+ " (subId=%d,SubKey=%d)"
+ " Return code: %d.",
+ subId,
+ subKey,
+ err);
+ break;
+ }
+ case GrepEvent::GrepSS_SubCreateConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ const int nodegrp = theData[5];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: Created subscription using"
+ " (subId=%d,SubKey=%d)"
+ " in primary system. Primary system has %d nodegroup(s)."
+ " Return code: %d",
+ subId,
+ subKey,
+ nodegrp,
+ err);
+ break;
+ }
+ case GrepEvent::GrepPS_SubCreateConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: All participants have created "
+ "subscriptions"
+ " using (subId=%d,SubKey=%d)."
+ " Return code: %d",
+ subId,
+ subKey,
+ err);
+ break;
+ }
+ case GrepEvent::GrepSS_SubStartMetaConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: Logging started on meta data changes."
+ " using (subId=%d,SubKey=%d)"
+ " Return code: %d",
+ subId,
+ subKey,
+ err);
+ break;
+ }
+ case GrepEvent::GrepPS_SubStartMetaConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: All participants have started "
+ "logging meta data"
+ " changes on the subscription subId=%d,SubKey=%d) "
+ "(N.I yet)."
+ " Return code: %d",
+ subId,
+ subKey,
+ err);
+ break;
+ }
+ case GrepEvent::GrepSS_SubStartDataConf: {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: Logging started on table data changes "
+ " using (subId=%d,SubKey=%d)"
+ " Return code: %d",
+ subId,
+ subKey,
+ err);
+ break;
+ }
+ case GrepEvent::GrepPS_SubStartDataConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: All participants have started logging "
+ "table data changes on the subscription "
+ "subId=%d,SubKey=%d)."
+ " Return code: %d",
+ subId,
+ subKey,
+ err);
+ break;
+ }
+ case GrepEvent::GrepPS_SubSyncMetaConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: All participants have started "
+ " synchronization on meta data (META SCAN) using "
+ "(subId=%d,SubKey=%d)."
+ " Return code: %d",
+ subId,
+ subKey,
+ err);
+ break;
+ }
+ case GrepEvent::GrepSS_SubSyncMetaConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: Synchronization started (META SCAN) on "
+ " meta data using (subId=%d,SubKey=%d)"
+ " Return code: %d",
+ subId,
+ subKey,
+ err);
+ break;
+ }
+ case GrepEvent::GrepPS_SubSyncDataConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: All participants have started "
+ "synchronization "
+ " on table data (DATA SCAN) using (subId=%d,SubKey=%d)."
+ " Return code: %d",
+ subId,
+ subKey,
+ err);
+ break;
+ }
+ case GrepEvent::GrepSS_SubSyncDataConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ const int gci = theData[5];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: Synchronization started (DATA SCAN) on "
+ "table data using (subId=%d,SubKey=%d). GCI = %d"
+ " Return code: %d",
+ subId,
+ subKey,
+ gci,
+ err);
+ break;
+ }
+ case GrepEvent::GrepPS_SubRemoveConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: All participants have removed "
+ "subscription (subId=%d,SubKey=%d). I have cleaned "
+ "up resources I've used."
+ " Return code: %d",
+ subId,
+ subKey,
+ err);
+ break;
+ }
+ case GrepEvent::GrepSS_SubRemoveConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: Removed subscription "
+ "(subId=%d,SubKey=%d)"
+ " Return code: %d",
+ subId,
+ subKey,
+ err);
+ break;
+ }
default:
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sUnknown GrepSubscriptonInfo event: %d",
theNodeId,
theData[1]);
- }
- break;
}
-
+ break;
+ }
+
case EventReport::GrepSubscriptionAlert :
+ {
+ GrepEvent::Subscription event = (GrepEvent::Subscription)theData[1];
+ switch(event)
+ {
+ case GrepEvent::GrepSS_CreateSubIdRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::SSCoord:Error code: %d Error message: %s"
+ " (subId=%d,SubKey=%d)",
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err),
+ subId,
+ subKey);
+ break;
+ }
+ case GrepEvent::GrepSS_SubCreateRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: FAILED to Created subscription using"
+ " (subId=%d,SubKey=%d)in primary system."
+ " Error code: %d Error Message: %s",
+ subId,
+ subKey,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err));
+ break;
+ }
+ case GrepEvent::GrepSS_SubStartMetaRef:
{
- GrepEvent::Subscription event = (GrepEvent::Subscription)theData[1];
- switch(event)
- {
- case GrepEvent::GrepSS_CreateSubIdRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord:Error code: %d Error message: %s"
- " (subId=%d,SubKey=%d)",
- err,
- GrepError::getErrorDesc((GrepError::Code)err),
- subId,
- subKey);
- break;
- }
- case GrepEvent::GrepSS_SubCreateRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: FAILED to Created subscription using"
- " (subId=%d,SubKey=%d)in primary system."
- " Error code: %d Error Message: %s",
- subId,
- subKey,
- err,
- GrepError::getErrorDesc((GrepError::Code)err));
- break;
- }
- case GrepEvent::GrepSS_SubStartMetaRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: Logging failed to start on meta "
- "data changes."
- " using (subId=%d,SubKey=%d)"
- " Error code: %d Error Message: %s",
- subId,
- subKey,
- err,
- GrepError::getErrorDesc((GrepError::Code)err));
- break;
- }
- case GrepEvent::GrepSS_SubStartDataRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: Logging FAILED to start on table data "
- " changes using (subId=%d,SubKey=%d)"
- " Error code: %d Error Message: %s",
- subId,
- subKey,
- err,
- GrepError::getErrorDesc((GrepError::Code)err));
- break;
- }
- case GrepEvent::GrepSS_SubSyncMetaRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: Synchronization FAILED (META SCAN) on "
- " meta data using (subId=%d,SubKey=%d)"
- " Error code: %d Error Message: %s",
- subId,
- subKey,
- err,
- GrepError::getErrorDesc((GrepError::Code)err));
- break;
- }
- case GrepEvent::GrepSS_SubSyncDataRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- const int gci = theData[5];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: Synchronization FAILED (DATA SCAN) on "
- "table data using (subId=%d,SubKey=%d). GCI = %d"
- " Error code: %d Error Message: %s",
- subId,
- subKey,
- gci,
- err,
- GrepError::getErrorDesc((GrepError::Code)err));
- break;
- }
- case GrepEvent::GrepSS_SubRemoveRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: Failed to remove subscription "
- "(subId=%d,SubKey=%d). "
- " Error code: %d Error Message: %s",
- subId,
- subKey,
- err,
- GrepError::getErrorDesc((GrepError::Code)err)
- );
- break;
- }
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: Logging failed to start on meta "
+ "data changes."
+ " using (subId=%d,SubKey=%d)"
+ " Error code: %d Error Message: %s",
+ subId,
+ subKey,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err));
+ break;
+ }
+ case GrepEvent::GrepSS_SubStartDataRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: Logging FAILED to start on table data "
+ " changes using (subId=%d,SubKey=%d)"
+ " Error code: %d Error Message: %s",
+ subId,
+ subKey,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err));
+ break;
+ }
+ case GrepEvent::GrepSS_SubSyncMetaRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: Synchronization FAILED (META SCAN) on "
+ " meta data using (subId=%d,SubKey=%d)"
+ " Error code: %d Error Message: %s",
+ subId,
+ subKey,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err));
+ break;
+ }
+ case GrepEvent::GrepSS_SubSyncDataRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ const int gci = theData[5];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: Synchronization FAILED (DATA SCAN) on "
+ "table data using (subId=%d,SubKey=%d). GCI = %d"
+ " Error code: %d Error Message: %s",
+ subId,
+ subKey,
+ gci,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err));
+ break;
+ }
+ case GrepEvent::GrepSS_SubRemoveRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: Failed to remove subscription "
+ "(subId=%d,SubKey=%d). "
+ " Error code: %d Error Message: %s",
+ subId,
+ subKey,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err)
+ );
+ break;
+ }
- case GrepEvent::GrepPS_CreateSubIdRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: Error code: %d Error Message: %s"
- " (subId=%d,SubKey=%d)",
- err,
- GrepError::getErrorDesc((GrepError::Code)err),
- subId,
- subKey);
- break;
- }
- case GrepEvent::GrepPS_SubCreateRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: FAILED to Created subscription using"
- " (subId=%d,SubKey=%d)in primary system."
- " Error code: %d Error Message: %s",
- subId,
- subKey,
- err,
- GrepError::getErrorDesc((GrepError::Code)err));
- break;
- }
- case GrepEvent::GrepPS_SubStartMetaRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: Logging failed to start on meta "
- "data changes."
- " using (subId=%d,SubKey=%d)"
- " Error code: %d Error Message: %s",
- subId,
- subKey,
- err,
- GrepError::getErrorDesc((GrepError::Code)err));
- break;
- }
- case GrepEvent::GrepPS_SubStartDataRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: Logging FAILED to start on table data "
- " changes using (subId=%d,SubKey=%d)"
- " Error code: %d Error Message: %s",
- subId,
- subKey,
- err,
- GrepError::getErrorDesc((GrepError::Code)err));
- break;
- }
- case GrepEvent::GrepPS_SubSyncMetaRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: Synchronization FAILED (META SCAN) on "
- " meta data using (subId=%d,SubKey=%d)"
- " Error code: %d Error Message: %s",
- subId,
- subKey,
- err,
- GrepError::getErrorDesc((GrepError::Code)err));
- break;
- }
- case GrepEvent::GrepPS_SubSyncDataRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- const int gci = theData[5];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: Synchronization FAILED (DATA SCAN) on "
- "table data using (subId=%d,SubKey=%d). GCI = %d. "
- " Error code: %d Error Message: %s",
- subId,
- subKey,
- gci,
- err,
- GrepError::getErrorDesc((GrepError::Code)err));
- break;
- }
- case GrepEvent::GrepPS_SubRemoveRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: Failed to remove subscription "
- "(subId=%d,SubKey=%d)."
- " Error code: %d Error Message: %s",
- subId,
- subKey,
- err,
- GrepError::getErrorDesc((GrepError::Code)err));
- break;
- }
- case GrepEvent::Rep_Disconnect:
- {
- const int err = theData[4];
- const int nodeId = theData[5];
- ::snprintf(m_text, sizeof(m_text),
- "Rep: Node %d."
- " Error code: %d Error Message: %s",
- nodeId,
- err,
- GrepError::getErrorDesc((GrepError::Code)err));
- break;
- }
+ case GrepEvent::GrepPS_CreateSubIdRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: Error code: %d Error Message: %s"
+ " (subId=%d,SubKey=%d)",
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err),
+ subId,
+ subKey);
+ break;
+ }
+ case GrepEvent::GrepPS_SubCreateRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: FAILED to Created subscription using"
+ " (subId=%d,SubKey=%d)in primary system."
+ " Error code: %d Error Message: %s",
+ subId,
+ subKey,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err));
+ break;
+ }
+ case GrepEvent::GrepPS_SubStartMetaRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: Logging failed to start on meta "
+ "data changes."
+ " using (subId=%d,SubKey=%d)"
+ " Error code: %d Error Message: %s",
+ subId,
+ subKey,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err));
+ break;
+ }
+ case GrepEvent::GrepPS_SubStartDataRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: Logging FAILED to start on table data "
+ " changes using (subId=%d,SubKey=%d)"
+ " Error code: %d Error Message: %s",
+ subId,
+ subKey,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err));
+ break;
+ }
+ case GrepEvent::GrepPS_SubSyncMetaRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: Synchronization FAILED (META SCAN) on "
+ " meta data using (subId=%d,SubKey=%d)"
+ " Error code: %d Error Message: %s",
+ subId,
+ subKey,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err));
+ break;
+ }
+ case GrepEvent::GrepPS_SubSyncDataRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ const int gci = theData[5];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: Synchronization FAILED (DATA SCAN) on "
+ "table data using (subId=%d,SubKey=%d). GCI = %d. "
+ " Error code: %d Error Message: %s",
+ subId,
+ subKey,
+ gci,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err));
+ break;
+ }
+ case GrepEvent::GrepPS_SubRemoveRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ BaseString::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: Failed to remove subscription "
+ "(subId=%d,SubKey=%d)."
+ " Error code: %d Error Message: %s",
+ subId,
+ subKey,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err));
+ break;
+ }
+ case GrepEvent::Rep_Disconnect:
+ {
+ const int err = theData[4];
+ const int nodeId = theData[5];
+ BaseString::snprintf(m_text, m_text_len,
+ "Rep: Node %d."
+ " Error code: %d Error Message: %s",
+ nodeId,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err));
+ break;
+ }
- default:
- ::snprintf(m_text,
- sizeof(m_text),
- "%sUnknown GrepSubscriptionAlert event: %d",
- theNodeId,
- theData[1]);
- break;
- }
- break;
+ default:
+ BaseString::snprintf(m_text,
+ m_text_len,
+ "%sUnknown GrepSubscriptionAlert event: %d",
+ theNodeId,
+ theData[1]);
+ break;
}
-
+ break;
+ }
+
+ case EventReport::BackupStarted:
+ BaseString::snprintf(m_text,
+ m_text_len,
+ "%sBackup %d started from node %d",
+ theNodeId, theData[2], refToNode(theData[1]));
+ break;
+ case EventReport::BackupFailedToStart:
+ BaseString::snprintf(m_text,
+ m_text_len,
+ "%sBackup request from %d failed to start. Error: %d",
+ theNodeId, refToNode(theData[1]), theData[2]);
+ break;
+ case EventReport::BackupCompleted:
+ BaseString::snprintf(m_text,
+ m_text_len,
+ "%sBackup %d started from node %d completed\n"
+ " StartGCP: %d StopGCP: %d\n"
+ " #Records: %d #LogRecords: %d\n"
+ " Data: %d bytes Log: %d bytes",
+ theNodeId, theData[2], refToNode(theData[1]),
+ theData[3], theData[4], theData[6], theData[8],
+ theData[5], theData[7]);
+ break;
+ case EventReport::BackupAborted:
+ BaseString::snprintf(m_text,
+ m_text_len,
+ "%sBackup %d started from %d has been aborted. Error: %d",
+ theNodeId,
+ theData[2],
+ refToNode(theData[1]),
+ theData[3]);
+ break;
default:
- ::snprintf(m_text,
- sizeof(m_text),
+ BaseString::snprintf(m_text,
+ m_text_len,
"%sUnknown event: %d",
theNodeId,
theData[0]);
@@ -1299,54 +1303,10 @@ EventLogger::getText(int type,
return m_text;
}
-bool
-EventLogger::matchEventCategory(const char * str,
- LogLevel::EventCategory * cat,
- bool exactMatch){
- unsigned i;
- if(cat == 0 || str == 0)
- return false;
-
- char * tmp = strdup(str);
- for(i = 0; i<strlen(tmp); i++)
- tmp[i] = toupper(tmp[i]);
-
- for(i = 0; i<noOfEventCategoryNames; i++){
- if(strcmp(tmp, eventCategoryNames[i].name) == 0){
- * cat = eventCategoryNames[i].category;
- free(tmp);
- return true;
- }
- }
- free(tmp);
- return false;
-}
-
-const char *
-EventLogger::getEventCategoryName(LogLevel::EventCategory cat){
-
- for(unsigned i = 0; i<noOfEventCategoryNames; i++){
- if(cat == eventCategoryNames[i].category){
- return eventCategoryNames[i].name;
- }
- }
- return 0;
-}
-
-
-EventLogger::EventLogger() : Logger(), m_logLevel(), m_filterLevel(15)
+EventLogger::EventLogger() : m_filterLevel(15)
{
setCategory("EventLogger");
- m_logLevel.setLogLevel(LogLevel::llStartUp, m_filterLevel);
- m_logLevel.setLogLevel(LogLevel::llShutdown, m_filterLevel);
- // m_logLevel.setLogLevel(LogLevel::llStatistic, m_filterLevel);
- // m_logLevel.setLogLevel(LogLevel::llCheckpoint, m_filterLevel);
- m_logLevel.setLogLevel(LogLevel::llNodeRestart, m_filterLevel);
- m_logLevel.setLogLevel(LogLevel::llConnection, m_filterLevel);
- m_logLevel.setLogLevel(LogLevel::llError, m_filterLevel);
- m_logLevel.setLogLevel(LogLevel::llInfo, m_filterLevel);
- enable(Logger::LL_INFO, Logger::LL_ALERT); // Log INFO to ALERT
-
+ enable(Logger::LL_INFO, Logger::LL_ALERT);
}
EventLogger::~EventLogger()
@@ -1367,66 +1327,77 @@ EventLogger::close()
removeAllHandlers();
}
-void
-EventLogger::log(NodeId nodeId, int eventType, const Uint32* theData)
+static NdbOut&
+operator<<(NdbOut& out, const LogLevel & ll)
{
- log(eventType, theData, nodeId);
+ out << "[LogLevel: ";
+ for(size_t i = 0; i<LogLevel::LOGLEVEL_CATEGORIES; i++)
+ out << ll.getLogLevel((LogLevel::EventCategory)i) << " ";
+ out << "]";
+ return out;
}
void
-EventLogger::log(int eventType, const Uint32* theData, NodeId nodeId)
+EventLogger::log(int eventType, const Uint32* theData, NodeId nodeId,
+ const LogLevel* ll)
{
Uint32 threshold = 0;
- Logger::LoggerLevel severity = LL_WARNING;
+ Logger::LoggerLevel severity = Logger::LL_WARNING;
+ LogLevel::EventCategory cat= LogLevel::llInvalid;
- for(unsigned i = 0; i<EventLogger::matrixSize; i++){
- if(EventLogger::matrix[i].eventType == eventType){
- const LogLevel::EventCategory cat = EventLogger::matrix[i].eventCategory;
- threshold = m_logLevel.getLogLevel(cat);
- severity = EventLogger::matrix[i].severity;
+ for(unsigned i = 0; i<EventLoggerBase::matrixSize; i++){
+ if(EventLoggerBase::matrix[i].eventType == eventType){
+ cat = EventLoggerBase::matrix[i].eventCategory;
+ threshold = EventLoggerBase::matrix[i].threshold;
+ severity = EventLoggerBase::matrix[i].severity;
break;
}
}
+
+ if (cat == LogLevel::llInvalid)
+ return;
- if (threshold <= m_filterLevel){
+ Uint32 set = ll?ll->getLogLevel(cat) : m_logLevel.getLogLevel(cat);
+ if (threshold <= set){
switch (severity){
- case LL_ALERT:
- alert(EventLogger::getText(eventType, theData, nodeId));
+ case Logger::LL_ALERT:
+ alert(EventLogger::getText(m_text, sizeof(m_text),
+ eventType, theData, nodeId));
break;
- case LL_CRITICAL:
- critical(EventLogger::getText(eventType, theData, nodeId));
+ case Logger::LL_CRITICAL:
+ critical(EventLogger::getText(m_text, sizeof(m_text),
+ eventType, theData, nodeId));
break;
- case LL_WARNING:
- warning(EventLogger::getText(eventType, theData, nodeId));
+ case Logger::LL_WARNING:
+ warning(EventLogger::getText(m_text, sizeof(m_text),
+ eventType, theData, nodeId));
break;
- case LL_ERROR:
- error(EventLogger::getText(eventType, theData, nodeId));
+ case Logger::LL_ERROR:
+ error(EventLogger::getText(m_text, sizeof(m_text),
+ eventType, theData, nodeId));
break;
- case LL_INFO:
- info(EventLogger::getText(eventType, theData, nodeId));
+ case Logger::LL_INFO:
+ info(EventLogger::getText(m_text, sizeof(m_text),
+ eventType, theData, nodeId));
break;
- case LL_DEBUG:
- debug(EventLogger::getText(eventType, theData, nodeId));
+ case Logger::LL_DEBUG:
+ debug(EventLogger::getText(m_text, sizeof(m_text),
+ eventType, theData, nodeId));
break;
default:
- info(EventLogger::getText(eventType, theData, nodeId));
+ info(EventLogger::getText(m_text, sizeof(m_text),
+ eventType, theData, nodeId));
break;
}
} // if (..
}
-LogLevel&
-EventLogger::getLoglevel()
-{
- return m_logLevel;
-}
-
int
EventLogger::getFilterLevel() const
{
diff --git a/ndb/src/common/debugger/Makefile.am b/ndb/src/common/debugger/Makefile.am
index 0278d0d2ba0..d0fb30717cd 100644
--- a/ndb/src/common/debugger/Makefile.am
+++ b/ndb/src/common/debugger/Makefile.am
@@ -2,7 +2,7 @@ SUBDIRS = signaldata
noinst_LTLIBRARIES = libtrace.la
-libtrace_la_SOURCES = SignalLoggerManager.cpp DebuggerNames.cpp BlockNames.cpp LogLevel.cpp EventLogger.cpp GrepError.cpp
+libtrace_la_SOURCES = SignalLoggerManager.cpp DebuggerNames.cpp BlockNames.cpp EventLogger.cpp GrepError.cpp
include $(top_srcdir)/ndb/config/common.mk.am
include $(top_srcdir)/ndb/config/type_kernel.mk.am
diff --git a/ndb/src/common/debugger/signaldata/ContinueB.cpp b/ndb/src/common/debugger/signaldata/ContinueB.cpp
index 1be6da86cb1..c295041bc01 100644
--- a/ndb/src/common/debugger/signaldata/ContinueB.cpp
+++ b/ndb/src/common/debugger/signaldata/ContinueB.cpp
@@ -24,9 +24,9 @@ bool
printCONTINUEB(FILE * output, const Uint32 * theData, Uint32 len,
Uint16 receiverBlockNo){
if(receiverBlockNo == DBDIH){
- return printCONTINUEB_DBDIH(output, theData, len);
+ return printCONTINUEB_DBDIH(output, theData, len, 0);
} else if(receiverBlockNo == NDBFS) {
- return printCONTINUEB_NDBFS(output, theData, len);
+ return printCONTINUEB_NDBFS(output, theData, len, 0);
}
return false;
diff --git a/ndb/src/common/debugger/signaldata/CopyGCI.cpp b/ndb/src/common/debugger/signaldata/CopyGCI.cpp
index 96186e82525..173b3f6708f 100644
--- a/ndb/src/common/debugger/signaldata/CopyGCI.cpp
+++ b/ndb/src/common/debugger/signaldata/CopyGCI.cpp
@@ -21,22 +21,22 @@ void
print(char * buf, size_t buf_len, CopyGCIReq::CopyReason r){
switch(r){
case CopyGCIReq::IDLE:
- snprintf(buf, buf_len, "IDLE");
+ BaseString::snprintf(buf, buf_len, "IDLE");
break;
case CopyGCIReq::LOCAL_CHECKPOINT:
- snprintf(buf, buf_len, "LOCAL_CHECKPOINT");
+ BaseString::snprintf(buf, buf_len, "LOCAL_CHECKPOINT");
break;
case CopyGCIReq::RESTART:
- snprintf(buf, buf_len, "RESTART");
+ BaseString::snprintf(buf, buf_len, "RESTART");
break;
case CopyGCIReq::GLOBAL_CHECKPOINT:
- snprintf(buf, buf_len, "GLOBAL_CHECKPOINT");
+ BaseString::snprintf(buf, buf_len, "GLOBAL_CHECKPOINT");
break;
case CopyGCIReq::INITIAL_START_COMPLETED:
- snprintf(buf, buf_len, "INITIAL_START_COMPLETED");
+ BaseString::snprintf(buf, buf_len, "INITIAL_START_COMPLETED");
break;
default:
- snprintf(buf, buf_len, "<Unknown>");
+ BaseString::snprintf(buf, buf_len, "<Unknown>");
}
}
diff --git a/ndb/src/common/debugger/signaldata/CreateTrig.cpp b/ndb/src/common/debugger/signaldata/CreateTrig.cpp
index ddd45080cba..db5344cfbe7 100644
--- a/ndb/src/common/debugger/signaldata/CreateTrig.cpp
+++ b/ndb/src/common/debugger/signaldata/CreateTrig.cpp
@@ -28,51 +28,51 @@ bool printCREATE_TRIG_REQ(FILE * output, const Uint32 * theData, Uint32 len, Uin
//sig->getTriggerName((char *) &triggerName);
switch (sig->getTriggerType()) {
case(TriggerType::SECONDARY_INDEX):
- snprintf(triggerType, sizeof(triggerType), "SECONDARY_INDEX");
+ BaseString::snprintf(triggerType, sizeof(triggerType), "SECONDARY_INDEX");
break;
case(TriggerType::SUBSCRIPTION):
- snprintf(triggerType, sizeof(triggerType), "SUBSCRIPTION");
+ BaseString::snprintf(triggerType, sizeof(triggerType), "SUBSCRIPTION");
break;
case(TriggerType::ORDERED_INDEX):
- snprintf(triggerType, sizeof(triggerType), "ORDERED_INDEX");
+ BaseString::snprintf(triggerType, sizeof(triggerType), "ORDERED_INDEX");
break;
default:
- snprintf(triggerType, sizeof(triggerType), "UNKNOWN [%d]", (int)sig->getTriggerType());
+ BaseString::snprintf(triggerType, sizeof(triggerType), "UNKNOWN [%d]", (int)sig->getTriggerType());
break;
}
switch (sig->getTriggerActionTime()) {
case (TriggerActionTime::TA_BEFORE):
- snprintf(triggerActionTime, sizeof(triggerActionTime), "BEFORE");
+ BaseString::snprintf(triggerActionTime, sizeof(triggerActionTime), "BEFORE");
break;
case(TriggerActionTime::TA_AFTER):
- snprintf(triggerActionTime, sizeof(triggerActionTime), "AFTER");
+ BaseString::snprintf(triggerActionTime, sizeof(triggerActionTime), "AFTER");
break;
case (TriggerActionTime::TA_DEFERRED):
- snprintf(triggerActionTime, sizeof(triggerActionTime), "DEFERRED");
+ BaseString::snprintf(triggerActionTime, sizeof(triggerActionTime), "DEFERRED");
break;
case (TriggerActionTime::TA_DETACHED):
- snprintf(triggerActionTime, sizeof(triggerActionTime), "DETACHED");
+ BaseString::snprintf(triggerActionTime, sizeof(triggerActionTime), "DETACHED");
break;
default:
- snprintf(triggerActionTime, sizeof(triggerActionTime),
+ BaseString::snprintf(triggerActionTime, sizeof(triggerActionTime),
"UNKNOWN [%d]", (int)sig->getTriggerActionTime());
break;
}
switch (sig->getTriggerEvent()) {
case (TriggerEvent::TE_INSERT):
- snprintf(triggerEvent, sizeof(triggerEvent), "INSERT");
+ BaseString::snprintf(triggerEvent, sizeof(triggerEvent), "INSERT");
break;
case(TriggerEvent::TE_DELETE):
- snprintf(triggerEvent, sizeof(triggerEvent), "DELETE");
+ BaseString::snprintf(triggerEvent, sizeof(triggerEvent), "DELETE");
break;
case(TriggerEvent::TE_UPDATE):
- snprintf(triggerEvent, sizeof(triggerEvent), "UPDATE");
+ BaseString::snprintf(triggerEvent, sizeof(triggerEvent), "UPDATE");
break;
case(TriggerEvent::TE_CUSTOM):
- snprintf(triggerEvent, sizeof(triggerEvent), "CUSTOM");
+ BaseString::snprintf(triggerEvent, sizeof(triggerEvent), "CUSTOM");
break;
default:
- snprintf(triggerEvent, sizeof(triggerEvent), "UNKNOWN [%d]", (int)sig->getTriggerEvent());
+ BaseString::snprintf(triggerEvent, sizeof(triggerEvent), "UNKNOWN [%d]", (int)sig->getTriggerEvent());
break;
}
diff --git a/ndb/src/common/debugger/signaldata/DihContinueB.cpp b/ndb/src/common/debugger/signaldata/DihContinueB.cpp
index 94453e76d72..9fece17315c 100644
--- a/ndb/src/common/debugger/signaldata/DihContinueB.cpp
+++ b/ndb/src/common/debugger/signaldata/DihContinueB.cpp
@@ -18,7 +18,10 @@
#include <signaldata/DihContinueB.hpp>
bool
-printCONTINUEB_DBDIH(FILE * output, const Uint32 * theData, Uint32 len){
+printCONTINUEB_DBDIH(FILE * output, const Uint32 * theData,
+ Uint32 len, Uint16 not_used){
+
+ (void)not_used;
switch (theData[0]) {
case DihContinueB::ZPACK_TABLE_INTO_PAGES:
diff --git a/ndb/src/common/debugger/signaldata/Makefile.am b/ndb/src/common/debugger/signaldata/Makefile.am
index 0a5806e1e00..c855c5f8a18 100644
--- a/ndb/src/common/debugger/signaldata/Makefile.am
+++ b/ndb/src/common/debugger/signaldata/Makefile.am
@@ -23,7 +23,8 @@ libsignaldataprint_la_SOURCES = \
FailRep.cpp DisconnectRep.cpp SignalDroppedRep.cpp \
SumaImpl.cpp NdbSttor.cpp CreateFragmentation.cpp \
UtilLock.cpp TuxMaint.cpp AccLock.cpp \
- LqhTrans.cpp ReadNodesConf.cpp CntrStart.cpp
+ LqhTrans.cpp ReadNodesConf.cpp CntrStart.cpp \
+ ScanFrag.cpp
include $(top_srcdir)/ndb/config/common.mk.am
include $(top_srcdir)/ndb/config/type_ndbapi.mk.am
diff --git a/ndb/src/common/debugger/signaldata/MasterLCP.cpp b/ndb/src/common/debugger/signaldata/MasterLCP.cpp
index aa30404524f..078b92f6f2e 100644
--- a/ndb/src/common/debugger/signaldata/MasterLCP.cpp
+++ b/ndb/src/common/debugger/signaldata/MasterLCP.cpp
@@ -23,16 +23,16 @@ void
print(char *buf, size_t buf_len, MasterLCPConf::State s){
switch(s){
case MasterLCPConf::LCP_STATUS_IDLE:
- snprintf(buf, buf_len, "LCP_STATUS_IDLE");
+ BaseString::snprintf(buf, buf_len, "LCP_STATUS_IDLE");
break;
case MasterLCPConf::LCP_STATUS_ACTIVE:
- snprintf(buf, buf_len, "LCP_STATUS_ACTIVE");
+ BaseString::snprintf(buf, buf_len, "LCP_STATUS_ACTIVE");
break;
case MasterLCPConf::LCP_TAB_COMPLETED:
- snprintf(buf, buf_len, "LCP_TAB_COMPLETED");
+ BaseString::snprintf(buf, buf_len, "LCP_TAB_COMPLETED");
break;
case MasterLCPConf::LCP_TAB_SAVED:
- snprintf(buf, buf_len, "LCP_TAB_SAVED");
+ BaseString::snprintf(buf, buf_len, "LCP_TAB_SAVED");
break;
}
}
diff --git a/ndb/src/common/debugger/signaldata/NdbfsContinueB.cpp b/ndb/src/common/debugger/signaldata/NdbfsContinueB.cpp
index b3c7a61136e..9f55efae017 100644
--- a/ndb/src/common/debugger/signaldata/NdbfsContinueB.cpp
+++ b/ndb/src/common/debugger/signaldata/NdbfsContinueB.cpp
@@ -18,7 +18,10 @@
#include <signaldata/NdbfsContinueB.hpp>
bool
-printCONTINUEB_NDBFS(FILE * output, const Uint32 * theData, Uint32 len){
+printCONTINUEB_NDBFS(FILE * output, const Uint32 * theData,
+ Uint32 len, Uint16 not_used){
+
+ (void)not_used;
switch (theData[0]) {
case NdbfsContinueB::ZSCAN_MEMORYCHANNEL_10MS_DELAY:
diff --git a/ndb/src/common/debugger/signaldata/ScanFrag.cpp b/ndb/src/common/debugger/signaldata/ScanFrag.cpp
new file mode 100644
index 00000000000..4d19a325637
--- /dev/null
+++ b/ndb/src/common/debugger/signaldata/ScanFrag.cpp
@@ -0,0 +1,42 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+
+
+#include <BlockNumbers.h>
+#include <signaldata/ScanTab.hpp>
+#include <signaldata/ScanFrag.hpp>
+
+bool
+printSCAN_FRAGREQ(FILE * output, const Uint32 * theData,
+ Uint32 len, Uint16 receiverBlockNo) {
+ const ScanFragReq * const sig = (ScanFragReq *)theData;
+ fprintf(output, " senderData: %x\n", sig->senderData);
+ fprintf(output, " resultRef: %x\n", sig->resultRef);
+ fprintf(output, " savePointId: %x\n", sig->savePointId);
+ fprintf(output, " requestInfo: %x\n", sig->requestInfo);
+ fprintf(output, " tableId: %x\n", sig->tableId);
+ fprintf(output, " fragmentNo: %x\n", sig->fragmentNoKeyLen & 0xFFFF);
+ fprintf(output, " keyLen: %x\n", sig->fragmentNoKeyLen >> 16);
+ fprintf(output, " schemaVersion: %x\n", sig->schemaVersion);
+ fprintf(output, " transId1: %x\n", sig->transId1);
+ fprintf(output, " transId2: %x\n", sig->transId2);
+ fprintf(output, " clientOpPtr: %x\n", sig->clientOpPtr);
+ fprintf(output, " batch_size_rows: %x\n", sig->batch_size_rows);
+ fprintf(output, " batch_size_bytes: %x\n", sig->batch_size_bytes);
+ return true;
+}
+
diff --git a/ndb/src/common/debugger/signaldata/ScanTab.cpp b/ndb/src/common/debugger/signaldata/ScanTab.cpp
index 4b057171963..72a4d9f94b9 100644
--- a/ndb/src/common/debugger/signaldata/ScanTab.cpp
+++ b/ndb/src/common/debugger/signaldata/ScanTab.cpp
@@ -27,38 +27,26 @@ printSCANTABREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiv
const UintR requestInfo = sig->requestInfo;
- fprintf(output, " apiConnectPtr: H\'%.8x\n",
+ fprintf(output, " apiConnectPtr: H\'%.8x",
sig->apiConnectPtr);
fprintf(output, " requestInfo: H\'%.8x:\n", requestInfo);
- fprintf(output, " Parallellism: %u, Batch: %u LockMode: %u, Holdlock: %u, RangeScan: %u\n",
+ fprintf(output, " Parallellism: %u, Batch: %u LockMode: %u, Keyinfo: %u Holdlock: %u, RangeScan: %u\n",
sig->getParallelism(requestInfo),
sig->getScanBatch(requestInfo),
sig->getLockMode(requestInfo),
sig->getHoldLockFlag(requestInfo),
- sig->getRangeScanFlag(requestInfo));
+ sig->getRangeScanFlag(requestInfo),
+ sig->getKeyinfoFlag(requestInfo));
- fprintf(output, " attrLen: %d, tableId: %d, tableSchemaVer: %d\n",
- sig->attrLen, sig->tableId, sig->tableSchemaVersion);
+ Uint32 keyLen = (sig->attrLenKeyLen >> 16);
+ Uint32 attrLen = (sig->attrLenKeyLen & 0xFFFF);
+ fprintf(output, " attrLen: %d, keyLen: %d tableId: %d, tableSchemaVer: %d\n",
+ attrLen, keyLen, sig->tableId, sig->tableSchemaVersion);
fprintf(output, " transId(1, 2): (H\'%.8x, H\'%.8x) storedProcId: H\'%.8x\n",
sig->transId1, sig->transId2, sig->storedProcId);
-
- fprintf(output, " OperationPtr(s):\n ");
- Uint32 restLen = (len - 9);
- const Uint32 * rest = &sig->apiOperationPtr[0];
- while(restLen >= 7){
- fprintf(output,
- " H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x\n",
- rest[0], rest[1], rest[2], rest[3],
- rest[4], rest[5], rest[6]);
- restLen -= 7;
- rest += 7;
- }
- if(restLen > 0){
- for(Uint32 i = 0; i<restLen; i++)
- fprintf(output, " H\'%.8x", rest[i]);
- fprintf(output, "\n");
- }
+ fprintf(output, " batch_byte_size: %d, first_batch_size: %d\n",
+ sig->batch_byte_size, sig->first_batch_size);
return false;
}
@@ -82,7 +70,7 @@ printSCANTABCONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 recei
fprintf(output, " Operation(s) [api tc rows len]:\n");
ScanTabConf::OpData * op = (ScanTabConf::OpData*)
(theData + ScanTabConf::SignalLength);
- for(int i = 0; i<op_count; i++){
+ for(size_t i = 0; i<op_count; i++){
if(op->info != ScanTabConf::EndOfData)
fprintf(output, " [0x%x 0x%x %d %d]",
op->apiPtrI, op->tcPtrI,
diff --git a/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp b/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp
index 65351663789..3314f0bd097 100644
--- a/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp
+++ b/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp
@@ -53,6 +53,7 @@
#include <signaldata/UtilPrepare.hpp>
#include <signaldata/UtilExecute.hpp>
#include <signaldata/ScanTab.hpp>
+#include <signaldata/ScanFrag.hpp>
#include <signaldata/LqhFrag.hpp>
#include <signaldata/LqhTransConf.hpp>
#include <signaldata/DropTab.hpp>
@@ -75,12 +76,11 @@
#include <signaldata/TuxMaint.hpp>
#include <signaldata/AccLock.hpp>
-bool printCONTINUEB(FILE *, const Uint32 *, Uint32, Uint16);
-
/**
* This is the register
*/
-const NameFunctionPair
+
+const NameFunctionPair
SignalDataPrintFunctions[] = {
{ GSN_TCKEYREQ, printTCKEYREQ },
{ GSN_TCKEYCONF, printTCKEYCONF },
@@ -250,10 +250,10 @@ SignalDataPrintFunctions[] = {
,{ GSN_TUX_MAINT_REQ, printTUX_MAINT_REQ }
,{ GSN_ACC_LOCKREQ, printACC_LOCKREQ }
,{ GSN_LQH_TRANSCONF, printLQH_TRANSCONF }
+ ,{ GSN_SCAN_FRAGREQ, printSCAN_FRAGREQ }
+ ,{ 0, 0 }
};
-const unsigned short NO_OF_PRINT_FUNCTIONS = sizeof(SignalDataPrintFunctions)/sizeof(NameFunctionPair);
-
template class Bitmask<1>;
template class Bitmask<2>;
template class Bitmask<4>;
diff --git a/ndb/src/common/debugger/signaldata/SignalNames.cpp b/ndb/src/common/debugger/signaldata/SignalNames.cpp
index 9d4d5bdf6f5..9228e305677 100644
--- a/ndb/src/common/debugger/signaldata/SignalNames.cpp
+++ b/ndb/src/common/debugger/signaldata/SignalNames.cpp
@@ -446,6 +446,8 @@ const GsnName SignalNames [] = {
,{ GSN_STOP_REQ, "STOP_REQ" }
,{ GSN_STOP_REF, "STOP_REF" }
+ ,{ GSN_API_VERSION_REQ, "API_VERSION_REQ" }
+ ,{ GSN_API_VERSION_CONF, "API_VERSION_CONF" }
,{ GSN_ABORT_ALL_REQ, "ABORT_ALL_REQ" }
,{ GSN_ABORT_ALL_REF, "ABORT_ALL_REF" }
diff --git a/ndb/src/common/debugger/signaldata/TcKeyConf.cpp b/ndb/src/common/debugger/signaldata/TcKeyConf.cpp
index 727e097a464..652c2b8a557 100644
--- a/ndb/src/common/debugger/signaldata/TcKeyConf.cpp
+++ b/ndb/src/common/debugger/signaldata/TcKeyConf.cpp
@@ -22,38 +22,48 @@ printTCKEYCONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receive
if (receiverBlockNo == API_PACKED) {
- fprintf(output, "Signal data: ");
- Uint32 i = 0;
- while (i < len)
- fprintf(output, "H\'%.8x ", theData[i++]);
- fprintf(output,"\n");
+ return false;
+ Uint32 Theader = * theData++;
+ Uint32 TpacketLen = (Theader & 0x1F) + 3;
+ Uint32 TrecBlockNo = Theader >> 16;
+
+ do {
+ fprintf(output, "Block: %d %d %d\n", TrecBlockNo, len, TpacketLen);
+ printTCKEYCONF(output, theData, TpacketLen, TrecBlockNo);
+ assert(len >= (1 + TpacketLen));
+ len -= (1 + TpacketLen);
+ theData += TpacketLen;
+ } while(len);
+ return true;
}
else {
const TcKeyConf * const sig = (TcKeyConf *) theData;
- fprintf(output, "Signal data: ");
Uint32 i = 0;
Uint32 confInfo = sig->confInfo;
Uint32 noOfOp = TcKeyConf::getNoOfOperations(confInfo);
if (noOfOp > 10) noOfOp = 10;
- while (i < len)
- fprintf(output, "H\'%.8x ", theData[i++]);
- fprintf(output,"\n");
- fprintf(output, "apiConnectPtr: H'%.8x, gci: %u, transId:(H'%.8x, H'%.8x)\n",
+ fprintf(output, " apiConnectPtr: H'%.8x, gci: %u, transId:(H'%.8x, H'%.8x)\n",
sig->apiConnectPtr, sig->gci, sig->transId1, sig->transId2);
- fprintf(output, "noOfOperations: %u, commitFlag: %s, markerFlag: %s\n",
+ fprintf(output, " noOfOperations: %u, commitFlag: %s, markerFlag: %s\n",
noOfOp,
(TcKeyConf::getCommitFlag(confInfo) == 0)?"false":"true",
(TcKeyConf::getMarkerFlag(confInfo) == 0)?"false":"true");
fprintf(output, "Operations:\n");
for(i = 0; i < noOfOp; i++) {
- fprintf(output,
- "apiOperationPtr: H'%.8x, attrInfoLen: %u\n",
- sig->operations[i].apiOperationPtr,
- sig->operations[i].attrInfoLen);
+ if(sig->operations[i].attrInfoLen > TcKeyConf::SimpleReadBit)
+ fprintf(output,
+ " apiOperationPtr: H'%.8x, simplereadnode: %u\n",
+ sig->operations[i].apiOperationPtr,
+ sig->operations[i].attrInfoLen & (~TcKeyConf::SimpleReadBit));
+ else
+ fprintf(output,
+ " apiOperationPtr: H'%.8x, attrInfoLen: %u\n",
+ sig->operations[i].apiOperationPtr,
+ sig->operations[i].attrInfoLen);
}
}
-
+
return true;
}
diff --git a/ndb/src/common/logger/FileLogHandler.cpp b/ndb/src/common/logger/FileLogHandler.cpp
index 632db71db15..29172ff93ad 100644
--- a/ndb/src/common/logger/FileLogHandler.cpp
+++ b/ndb/src/common/logger/FileLogHandler.cpp
@@ -153,11 +153,11 @@ FileLogHandler::createNewFile()
if (fileNo >= m_maxNoFiles)
{
fileNo = 1;
- ::snprintf(newName, sizeof(newName),
+ BaseString::snprintf(newName, sizeof(newName),
"%s.%d", m_pLogFile->getName(), fileNo);
break;
}
- ::snprintf(newName, sizeof(newName),
+ BaseString::snprintf(newName, sizeof(newName),
"%s.%d", m_pLogFile->getName(), fileNo++);
} while (File_class::exists(newName));
diff --git a/ndb/src/common/logger/LogHandler.cpp b/ndb/src/common/logger/LogHandler.cpp
index 83d479c82fd..4fab957fc50 100644
--- a/ndb/src/common/logger/LogHandler.cpp
+++ b/ndb/src/common/logger/LogHandler.cpp
@@ -45,7 +45,7 @@ LogHandler::getDefaultHeader(char* pStr, const char* pCategory,
Logger::LoggerLevel level) const
{
char time[MAX_DATE_TIME_HEADER_LENGTH];
- ::snprintf(pStr, MAX_HEADER_LENGTH, "%s [%s] %s -- ",
+ BaseString::snprintf(pStr, MAX_HEADER_LENGTH, "%s [%s] %s -- ",
getTimeAsString((char*)time),
pCategory,
Logger::LoggerLevelNames[level]);
@@ -84,7 +84,7 @@ LogHandler::getTimeAsString(char* pStr) const
tm_now = ::localtime(&now); //uses the "current" timezone
#endif
- ::snprintf(pStr, MAX_DATE_TIME_HEADER_LENGTH,
+ BaseString::snprintf(pStr, MAX_DATE_TIME_HEADER_LENGTH,
m_pDateTimeFormat,
tm_now->tm_year + 1900,
tm_now->tm_mon + 1, //month is [0,11]. +1 -> [1,12]
diff --git a/ndb/src/common/logger/Logger.cpp b/ndb/src/common/logger/Logger.cpp
index c2fdecb642b..00a2fae67bc 100644
--- a/ndb/src/common/logger/Logger.cpp
+++ b/ndb/src/common/logger/Logger.cpp
@@ -232,7 +232,7 @@ Logger::enable(LoggerLevel logLevel)
{
if (logLevel == LL_ALL)
{
- for (int i = 1; i < MAX_LOG_LEVELS; i++)
+ for (unsigned i = 1; i < MAX_LOG_LEVELS; i++)
{
m_logLevels[i] = true;
}
@@ -264,7 +264,7 @@ Logger::disable(LoggerLevel logLevel)
{
if (logLevel == LL_ALL)
{
- for (int i = 0; i < MAX_LOG_LEVELS; i++)
+ for (unsigned i = 0; i < MAX_LOG_LEVELS; i++)
{
m_logLevels[i] = false;
}
@@ -340,7 +340,7 @@ Logger::log(LoggerLevel logLevel, const char* pMsg, va_list ap) const
while ( (pHandler = m_pHandlerList->next()) != NULL)
{
char buf[1024];
- vsnprintf(buf, sizeof(buf), pMsg, ap);
+ BaseString::vsnprintf(buf, sizeof(buf), pMsg, ap);
pHandler->append(m_pCategory, logLevel, buf);
}
}
diff --git a/ndb/src/common/logger/SysLogHandler.cpp b/ndb/src/common/logger/SysLogHandler.cpp
index a300c487eb9..5b1b8d85ca7 100644
--- a/ndb/src/common/logger/SysLogHandler.cpp
+++ b/ndb/src/common/logger/SysLogHandler.cpp
@@ -113,7 +113,7 @@ SysLogHandler::setParam(const BaseString &param, const BaseString &value) {
}
static const struct syslog_facility {
- char *name;
+ const char *name;
int value;
} facilitynames[] = {
{ "auth", LOG_AUTH },
diff --git a/ndb/src/common/logger/listtest/LogHandlerListUnitTest.cpp b/ndb/src/common/logger/listtest/LogHandlerListUnitTest.cpp
index 44ee11717b4..7de9ee46479 100644
--- a/ndb/src/common/logger/listtest/LogHandlerListUnitTest.cpp
+++ b/ndb/src/common/logger/listtest/LogHandlerListUnitTest.cpp
@@ -48,7 +48,7 @@ int main(int argc, char* argv[])
{
ndbout << "-- " << " Test " << i + 1
<< " [" << testCases[i].name << "] --" << endl;
- snprintf(str, 256, "%s %s %s %d", "Logging ",
+ BaseString::snprintf(str, 256, "%s %s %s %d", "Logging ",
testCases[i].name, " message ", i);
if (testCases[i].test(str))
{
@@ -128,7 +128,7 @@ LogHandlerListUnitTest::testTraverseNext(const char* msg)
{
char* str = new char[3];
pHandlers[i] = new ConsoleLogHandler();
- ::snprintf(str, 3, "%d", i);
+ BaseString::snprintf(str, 3, "%d", i);
pHandlers[i]->setDateTimeFormat(str);
list.add(pHandlers[i]);
}
diff --git a/ndb/src/common/logger/loggertest/LoggerUnitTest.cpp b/ndb/src/common/logger/loggertest/LoggerUnitTest.cpp
index 017dcb79c1f..990d2e0eada 100644
--- a/ndb/src/common/logger/loggertest/LoggerUnitTest.cpp
+++ b/ndb/src/common/logger/loggertest/LoggerUnitTest.cpp
@@ -86,7 +86,7 @@ NDB_COMMAND(loggertest, "loggertest", "loggertest -console | -file",
{
ndbout << "-- " << " Test " << i + 1
<< " [" << testCases[i].name << "] --" << endl;
- ::snprintf(str, 256, "%s %s %s %d", "Logging ",
+ BaseString::snprintf(str, 256, "%s %s %s %d", "Logging ",
testCases[i].name, " message ", i);
if (testCases[i].test(str))
{
diff --git a/ndb/src/common/mgmcommon/ConfigRetriever.cpp b/ndb/src/common/mgmcommon/ConfigRetriever.cpp
index 2e809907058..d8417ac146a 100644
--- a/ndb/src/common/mgmcommon/ConfigRetriever.cpp
+++ b/ndb/src/common/mgmcommon/ConfigRetriever.cpp
@@ -18,6 +18,7 @@
#include <ndb_version.h>
#include <ConfigRetriever.hpp>
+#include <SocketServer.hpp>
#include "LocalConfig.hpp"
#include <NdbSleep.h>
@@ -44,11 +45,14 @@
//****************************************************************************
//****************************************************************************
-ConfigRetriever::ConfigRetriever(Uint32 version, Uint32 node_type) {
-
+ConfigRetriever::ConfigRetriever(LocalConfig &local_config,
+ Uint32 version, Uint32 node_type)
+ : _localConfig(local_config)
+{
m_handle= 0;
m_version = version;
m_node_type = node_type;
+ _ownNodeId = _localConfig._ownNodeId;
}
ConfigRetriever::~ConfigRetriever(){
@@ -63,22 +67,11 @@ ConfigRetriever::~ConfigRetriever(){
//****************************************************************************
//****************************************************************************
-int
-ConfigRetriever::init() {
- if (!_localConfig.init(m_connectString.c_str(),
- _localConfigFileName.c_str())){
-
- setError(CR_ERROR, "error in retrieving contact info for mgmtsrvr");
- _localConfig.printError();
- _localConfig.printUsage();
- return -1;
- }
-
- return _ownNodeId = _localConfig._ownNodeId;
-}
-
int
-ConfigRetriever::do_connect(){
+ConfigRetriever::do_connect(int exit_on_connect_failure){
+
+ m_mgmd_port= 0;
+ m_mgmd_host= 0;
if(!m_handle)
m_handle= ndb_mgm_create_handle();
@@ -94,12 +87,20 @@ ConfigRetriever::do_connect(){
while(retry < retry_max){
Uint32 type = CR_ERROR;
BaseString tmp;
- for (int i = 0; i<_localConfig.ids.size(); i++){
+ for (unsigned int i = 0; i<_localConfig.ids.size(); i++){
MgmtSrvrId * m = &_localConfig.ids[i];
+ DBUG_PRINT("info",("trying %s:%d",
+ m->name.c_str(),
+ m->port));
switch(m->type){
case MgmId_TCP:
tmp.assfmt("%s:%d", m->name.c_str(), m->port);
if (ndb_mgm_connect(m_handle, tmp.c_str()) == 0) {
+ m_mgmd_port= m->port;
+ m_mgmd_host= m->name.c_str();
+ DBUG_PRINT("info",("connected to ndb_mgmd at %s:%d",
+ m_mgmd_host,
+ m_mgmd_port));
return 0;
}
setError(CR_RETRY, ndb_mgm_get_latest_error_desc(m_handle));
@@ -107,8 +108,10 @@ ConfigRetriever::do_connect(){
break;
}
}
-
if(latestErrorType == CR_RETRY){
+ DBUG_PRINT("info",("CR_RETRY"));
+ if (exit_on_connect_failure)
+ return 1;
REPORT_WARNING("Failed to retrieve cluster configuration");
ndbout << "(Cause of failure: " << getErrorString() << ")" << endl;
ndbout << "Attempt " << retry << " of " << retry_max << ". "
@@ -123,6 +126,8 @@ ConfigRetriever::do_connect(){
ndb_mgm_destroy_handle(&m_handle);
m_handle= 0;
+ m_mgmd_port= 0;
+ m_mgmd_host= 0;
return -1;
}
@@ -138,7 +143,7 @@ ConfigRetriever::getConfig() {
if(m_handle != 0){
p = getConfig(m_handle);
} else {
- for (int i = 0; i<_localConfig.ids.size(); i++){
+ for (unsigned int i = 0; i<_localConfig.ids.size(); i++){
MgmtSrvrId * m = &_localConfig.ids[i];
switch(m->type){
case MgmId_File:
@@ -154,7 +159,7 @@ ConfigRetriever::getConfig() {
if(p == 0)
return 0;
- if(!verifyConfig(p)){
+ if(!verifyConfig(p, _ownNodeId)){
free(p);
p= 0;
}
@@ -181,7 +186,7 @@ ConfigRetriever::getConfig(const char * filename){
const int res = stat(filename, &sbuf);
if(res != 0){
char buf[255];
- snprintf(buf, sizeof(buf), "Could not find file: \"%s\"", filename);
+ BaseString::snprintf(buf, sizeof(buf), "Could not find file: \"%s\"", filename);
setError(CR_ERROR, buf);
return 0;
}
@@ -206,7 +211,7 @@ ConfigRetriever::getConfig(const char * filename){
ConfigValuesFactory cvf;
if(!cvf.unpack(buf2, bytes)){
char buf[255];
- snprintf(buf, sizeof(buf), "Error while unpacking");
+ BaseString::snprintf(buf, sizeof(buf), "Error while unpacking");
setError(CR_ERROR, buf);
delete []buf2;
return 0;
@@ -228,91 +233,58 @@ ConfigRetriever::getErrorString(){
return errorString.c_str();
}
-void
-ConfigRetriever::setLocalConfigFileName(const char * localConfigFileName) {
- _localConfigFileName.assign(localConfigFileName ? localConfigFileName : "");
-}
-
-void
-ConfigRetriever::setConnectString(const char * connectString) {
- m_connectString.assign(connectString ? connectString : "");
-}
-
bool
-ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf){
+ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf, Uint32 nodeid){
char buf[255];
ndb_mgm_configuration_iterator * it;
it = ndb_mgm_create_configuration_iterator((struct ndb_mgm_configuration *)conf, CFG_SECTION_NODE);
if(it == 0){
- snprintf(buf, 255, "Unable to create config iterator");
+ BaseString::snprintf(buf, 255, "Unable to create config iterator");
setError(CR_ERROR, buf);
return false;
}
NdbAutoPtr<ndb_mgm_configuration_iterator> ptr(it);
- if(ndb_mgm_find(it, CFG_NODE_ID, _ownNodeId) != 0){
- snprintf(buf, 255, "Unable to find node with id: %d", _ownNodeId);
+ if(ndb_mgm_find(it, CFG_NODE_ID, nodeid) != 0){
+ BaseString::snprintf(buf, 255, "Unable to find node with id: %d", nodeid);
setError(CR_ERROR, buf);
return false;
}
const char * hostname;
if(ndb_mgm_get_string_parameter(it, CFG_NODE_HOST, &hostname)){
- snprintf(buf, 255, "Unable to get hostname(%d) from config",CFG_NODE_HOST);
+ BaseString::snprintf(buf, 255, "Unable to get hostname(%d) from config",CFG_NODE_HOST);
setError(CR_ERROR, buf);
return false;
}
- char localhost[MAXHOSTNAMELEN];
- if(NdbHost_GetHostName(localhost) != 0){
- snprintf(buf, 255, "Unable to get own hostname");
- setError(CR_ERROR, buf);
- return false;
+ const char * datadir;
+ if(!ndb_mgm_get_string_parameter(it, CFG_NODE_DATADIR, &datadir)){
+ NdbConfig_SetPath(datadir);
}
- do {
- if(strlen(hostname) == 0)
- break;
-
- if(strcasecmp(hostname, localhost) == 0)
- break;
-
- if(strcasecmp(hostname, "localhost") == 0)
- break;
-
- struct in_addr local, config;
- bool b1 = false, b2 = false, b3 = false;
- b1 = Ndb_getInAddr(&local, localhost) == 0;
- b2 = Ndb_getInAddr(&config, hostname) == 0;
- b3 = memcmp(&local, &config, sizeof(local)) == 0;
-
- if(b1 && b2 && b3)
- break;
-
- b1 = Ndb_getInAddr(&local, "localhost") == 0;
- b3 = memcmp(&local, &config, sizeof(local)) == 0;
- if(b1 && b2 && b3)
- break;
-
- snprintf(buf, 255, "Local hostname(%s) and config hostname(%s) dont match",
- localhost, hostname);
+ if (hostname && hostname[0] != 0 &&
+ !SocketServer::tryBind(0,hostname)) {
+ BaseString::snprintf(buf, 255, "Config hostname(%s) don't match a local interface,"
+ " tried to bind, error = %d - %s",
+ hostname, errno, strerror(errno));
setError(CR_ERROR, buf);
return false;
- } while(false);
+ }
unsigned int _type;
if(ndb_mgm_get_int_parameter(it, CFG_TYPE_OF_SECTION, &_type)){
- snprintf(buf, 255, "Unable to get type of node(%d) from config",
+ BaseString::snprintf(buf, 255, "Unable to get type of node(%d) from config",
CFG_TYPE_OF_SECTION);
setError(CR_ERROR, buf);
return false;
}
if(_type != m_node_type){
- snprintf(buf, 255, "Supplied node type(%d) and config node type(%d) "
+ BaseString::snprintf(buf, 255, "Supplied node type(%d) and config node type(%d) "
" don't match", m_node_type, _type);
setError(CR_ERROR, buf);
return false;
@@ -332,27 +304,27 @@ ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf){
if(iter.get(CFG_CONNECTION_NODE_1, &nodeId1)) continue;
if(iter.get(CFG_CONNECTION_NODE_2, &nodeId2)) continue;
- if(nodeId1 != _ownNodeId && nodeId2 != _ownNodeId) continue;
- remoteNodeId = (_ownNodeId == nodeId1 ? nodeId2 : nodeId1);
+ if(nodeId1 != nodeid && nodeId2 != nodeid) continue;
+ remoteNodeId = (nodeid == nodeId1 ? nodeId2 : nodeId1);
const char * name;
struct in_addr addr;
BaseString tmp;
- if(!iter.get(CFG_TCP_HOSTNAME_1, &name) && strlen(name)){
+ if(!iter.get(CFG_CONNECTION_HOSTNAME_1, &name) && strlen(name)){
if(Ndb_getInAddr(&addr, name) != 0){
tmp.assfmt("Unable to lookup/illegal hostname %s, "
"connection from node %d to node %d",
- name, _ownNodeId, remoteNodeId);
+ name, nodeid, remoteNodeId);
setError(CR_ERROR, tmp.c_str());
return false;
}
}
- if(!iter.get(CFG_TCP_HOSTNAME_2, &name) && strlen(name)){
+ if(!iter.get(CFG_CONNECTION_HOSTNAME_2, &name) && strlen(name)){
if(Ndb_getInAddr(&addr, name) != 0){
tmp.assfmt("Unable to lookup/illegal hostname %s, "
"connection from node %d to node %d",
- name, _ownNodeId, remoteNodeId);
+ name, nodeid, remoteNodeId);
setError(CR_ERROR, tmp.c_str());
return false;
}
diff --git a/ndb/src/common/mgmcommon/IPCConfig.cpp b/ndb/src/common/mgmcommon/IPCConfig.cpp
index a76c541f3f6..780504d2c62 100644
--- a/ndb/src/common/mgmcommon/IPCConfig.cpp
+++ b/ndb/src/common/mgmcommon/IPCConfig.cpp
@@ -133,7 +133,6 @@ IPCConfig::configureTransporters(TransporterRegistry * theTransporterRegistry){
Uint32 compression;
Uint32 checksum;
if(!tmp->get("SendSignalId", &sendSignalId)) continue;
- if(!tmp->get("Compression", &compression)) continue;
if(!tmp->get("Checksum", &checksum)) continue;
const char * type;
@@ -143,8 +142,6 @@ IPCConfig::configureTransporters(TransporterRegistry * theTransporterRegistry){
SHM_TransporterConfiguration conf;
conf.localNodeId = the_ownId;
conf.remoteNodeId = (nodeId1 != the_ownId ? nodeId1 : nodeId2);
- conf.byteOrder = 0;
- conf.compression = compression;
conf.checksum = checksum;
conf.signalId = sendSignalId;
@@ -164,8 +161,6 @@ IPCConfig::configureTransporters(TransporterRegistry * theTransporterRegistry){
SCI_TransporterConfiguration conf;
conf.localNodeId = the_ownId;
conf.remoteNodeId = (nodeId1 != the_ownId ? nodeId1 : nodeId2);
- conf.byteOrder = 0;
- conf.compression = compression;
conf.checksum = checksum;
conf.signalId = sendSignalId;
@@ -174,18 +169,16 @@ IPCConfig::configureTransporters(TransporterRegistry * theTransporterRegistry){
if(the_ownId == nodeId1){
if(!tmp->get("Node1_NoOfAdapters", &conf.nLocalAdapters)) continue;
- if(!tmp->get("Node2_NoOfAdapters", &conf.nRemoteAdapters)) continue;
if(!tmp->get("Node2_Adapter", 0, &conf.remoteSciNodeId0)) continue;
- if(conf.nRemoteAdapters > 1){
+ if(conf.nLocalAdapters > 1){
if(!tmp->get("Node2_Adapter", 1, &conf.remoteSciNodeId1)) continue;
}
} else {
if(!tmp->get("Node2_NoOfAdapters", &conf.nLocalAdapters)) continue;
- if(!tmp->get("Node1_NoOfAdapters", &conf.nRemoteAdapters)) continue;
if(!tmp->get("Node1_Adapter", 0, &conf.remoteSciNodeId0)) continue;
- if(conf.nRemoteAdapters > 1){
+ if(conf.nLocalAdapters > 1){
if(!tmp->get("Node1_Adapter", 1, &conf.remoteSciNodeId1)) continue;
}
}
@@ -243,8 +236,6 @@ IPCConfig::configureTransporters(TransporterRegistry * theTransporterRegistry){
conf.localHostName = ownHostName;
conf.remoteNodeId = remoteNodeId;
conf.localNodeId = ownNodeId;
- conf.byteOrder = 0;
- conf.compression = compression;
conf.checksum = checksum;
conf.signalId = sendSignalId;
@@ -270,8 +261,6 @@ IPCConfig::configureTransporters(TransporterRegistry * theTransporterRegistry){
conf.localHostName = ownHostName;
conf.remoteNodeId = remoteNodeId;
conf.localNodeId = ownNodeId;
- conf.byteOrder = 0;
- conf.compression = compression;
conf.checksum = checksum;
conf.signalId = sendSignalId;
@@ -344,19 +333,29 @@ Uint32
IPCConfig::configureTransporters(Uint32 nodeId,
const class ndb_mgm_configuration & config,
class TransporterRegistry & tr){
+ DBUG_ENTER("IPCConfig::configureTransporters");
- Uint32 noOfTransportersCreated= 0, server_port= 0;
+ Uint32 noOfTransportersCreated= 0;
ndb_mgm_configuration_iterator iter(config, CFG_SECTION_CONNECTION);
for(iter.first(); iter.valid(); iter.next()){
Uint32 nodeId1, nodeId2, remoteNodeId;
+ const char * remoteHostName= 0, * localHostName= 0;
if(iter.get(CFG_CONNECTION_NODE_1, &nodeId1)) continue;
if(iter.get(CFG_CONNECTION_NODE_2, &nodeId2)) continue;
if(nodeId1 != nodeId && nodeId2 != nodeId) continue;
remoteNodeId = (nodeId == nodeId1 ? nodeId2 : nodeId1);
+ {
+ const char * host1= 0, * host2= 0;
+ iter.get(CFG_CONNECTION_HOSTNAME_1, &host1);
+ iter.get(CFG_CONNECTION_HOSTNAME_2, &host2);
+ localHostName = (nodeId == nodeId1 ? host1 : host2);
+ remoteHostName = (nodeId == nodeId1 ? host2 : host1);
+ }
+
Uint32 sendSignalId = 1;
Uint32 checksum = 1;
if(iter.get(CFG_CONNECTION_SEND_SIGNAL_ID, &sendSignalId)) continue;
@@ -365,71 +364,77 @@ IPCConfig::configureTransporters(Uint32 nodeId,
Uint32 type = ~0;
if(iter.get(CFG_TYPE_OF_SECTION, &type)) continue;
- Uint32 tmp_server_port= 0;
- if(iter.get(CFG_CONNECTION_SERVER_PORT, &tmp_server_port)) break;
+ Uint32 server_port= 0;
+ if(iter.get(CFG_CONNECTION_SERVER_PORT, &server_port)) break;
if (nodeId <= nodeId1 && nodeId <= nodeId2) {
- if (server_port && server_port != tmp_server_port) {
- ndbout << "internal error in config setup of server ports line= " << __LINE__ << endl;
- exit(-1);
- }
- server_port= tmp_server_port;
+ tr.add_transporter_interface(localHostName, server_port);
}
-
+ DBUG_PRINT("info", ("Transporter between this node %d and node %d using port %d, signalId %d, checksum %d",
+ nodeId, remoteNodeId, server_port, sendSignalId, checksum));
switch(type){
case CONNECTION_TYPE_SHM:{
SHM_TransporterConfiguration conf;
conf.localNodeId = nodeId;
conf.remoteNodeId = remoteNodeId;
- conf.byteOrder = 0;
- conf.compression = 0;
conf.checksum = checksum;
conf.signalId = sendSignalId;
if(iter.get(CFG_SHM_KEY, &conf.shmKey)) break;
if(iter.get(CFG_SHM_BUFFER_MEM, &conf.shmSize)) break;
- conf.port= tmp_server_port;
+ conf.port= server_port;
if(!tr.createTransporter(&conf)){
+ DBUG_PRINT("error", ("Failed to create SHM Transporter from %d to %d",
+ conf.localNodeId, conf.remoteNodeId));
ndbout << "Failed to create SHM Transporter from: "
<< conf.localNodeId << " to: " << conf.remoteNodeId << endl;
} else {
noOfTransportersCreated++;
}
+ DBUG_PRINT("info", ("Created SHM Transporter using shmkey %d, buf size = %d",
+ conf.shmKey, conf.shmSize));
break;
}
case CONNECTION_TYPE_SCI:{
SCI_TransporterConfiguration conf;
conf.localNodeId = nodeId;
conf.remoteNodeId = remoteNodeId;
- conf.byteOrder = 0;
- conf.compression = 0;
conf.checksum = checksum;
conf.signalId = sendSignalId;
+ conf.port= server_port;
+ conf.localHostName = localHostName;
+ conf.remoteHostName = remoteHostName;
+
if(iter.get(CFG_SCI_SEND_LIMIT, &conf.sendLimit)) break;
if(iter.get(CFG_SCI_BUFFER_MEM, &conf.bufferSize)) break;
-
- if(nodeId == nodeId1){
- if(iter.get(CFG_SCI_NODE1_ADAPTERS, &conf.nLocalAdapters)) break;
- if(iter.get(CFG_SCI_NODE2_ADAPTERS, &conf.nRemoteAdapters)) break;
- if(iter.get(CFG_SCI_NODE2_ADAPTER0, &conf.remoteSciNodeId0)) break;
- if(conf.nRemoteAdapters > 1){
- if(iter.get(CFG_SCI_NODE2_ADAPTER1, &conf.remoteSciNodeId1)) break;
- }
+ if (nodeId == nodeId1) {
+ if(iter.get(CFG_SCI_HOST2_ID_0, &conf.remoteSciNodeId0)) break;
+ if(iter.get(CFG_SCI_HOST2_ID_1, &conf.remoteSciNodeId1)) break;
} else {
- if(iter.get(CFG_SCI_NODE2_ADAPTERS, &conf.nLocalAdapters)) break;
- if(iter.get(CFG_SCI_NODE1_ADAPTERS, &conf.nRemoteAdapters)) break;
- if(iter.get(CFG_SCI_NODE1_ADAPTER0, &conf.remoteSciNodeId0)) break;
- if(conf.nRemoteAdapters > 1){
- if(iter.get(CFG_SCI_NODE1_ADAPTER1, &conf.remoteSciNodeId1)) break;
- }
+ if(iter.get(CFG_SCI_HOST1_ID_0, &conf.remoteSciNodeId0)) break;
+ if(iter.get(CFG_SCI_HOST1_ID_1, &conf.remoteSciNodeId1)) break;
}
-
- if(!tr.createTransporter(&conf)){
+ if (conf.remoteSciNodeId1 == 0) {
+ conf.nLocalAdapters = 1;
+ } else {
+ conf.nLocalAdapters = 2;
+ }
+ if(!tr.createTransporter(&conf)){
+ DBUG_PRINT("error", ("Failed to create SCI Transporter from %d to %d",
+ conf.localNodeId, conf.remoteNodeId));
ndbout << "Failed to create SCI Transporter from: "
<< conf.localNodeId << " to: " << conf.remoteNodeId << endl;
} else {
+ DBUG_PRINT("info", ("Created SCI Transporter: Adapters = %d, remote SCI node id %d",
+ conf.nLocalAdapters, conf.remoteSciNodeId0));
+ DBUG_PRINT("info", ("Host 1 = %s, Host 2 = %s, sendLimit = %d, buf size = %d",
+ conf.localHostName, conf.remoteHostName, conf.sendLimit, conf.bufferSize));
+ if (conf.nLocalAdapters > 1) {
+ DBUG_PRINT("info", ("Fault-tolerant with 2 Remote Adapters, second remote SCI node id = %d",
+ conf.remoteSciNodeId1));
+ }
noOfTransportersCreated++;
continue;
}
@@ -437,14 +442,10 @@ IPCConfig::configureTransporters(Uint32 nodeId,
case CONNECTION_TYPE_TCP:{
TCP_TransporterConfiguration conf;
- const char * host1, * host2;
- if(iter.get(CFG_TCP_HOSTNAME_1, &host1)) break;
- if(iter.get(CFG_TCP_HOSTNAME_2, &host2)) break;
-
if(iter.get(CFG_TCP_SEND_BUFFER_SIZE, &conf.sendBufferSize)) break;
if(iter.get(CFG_TCP_RECEIVE_BUFFER_SIZE, &conf.maxReceiveSize)) break;
- conf.port= tmp_server_port;
+ conf.port= server_port;
const char * proxy;
if (!iter.get(CFG_TCP_PROXY, &proxy)) {
if (strlen(proxy) > 0 && nodeId2 == nodeId) {
@@ -455,10 +456,8 @@ IPCConfig::configureTransporters(Uint32 nodeId,
conf.localNodeId = nodeId;
conf.remoteNodeId = remoteNodeId;
- conf.localHostName = (nodeId == nodeId1 ? host1 : host2);
- conf.remoteHostName = (nodeId == nodeId1 ? host2 : host1);
- conf.byteOrder = 0;
- conf.compression = 0;
+ conf.localHostName = localHostName;
+ conf.remoteHostName = remoteHostName;
conf.checksum = checksum;
conf.signalId = sendSignalId;
@@ -468,23 +467,20 @@ IPCConfig::configureTransporters(Uint32 nodeId,
} else {
noOfTransportersCreated++;
}
+ DBUG_PRINT("info", ("Created TCP Transporter: sendBufferSize = %d, maxReceiveSize = %d",
+ conf.sendBufferSize, conf.maxReceiveSize));
+ break;
case CONNECTION_TYPE_OSE:{
OSE_TransporterConfiguration conf;
-
- const char * host1, * host2;
- if(iter.get(CFG_OSE_HOSTNAME_1, &host1)) break;
- if(iter.get(CFG_OSE_HOSTNAME_2, &host2)) break;
-
+
if(iter.get(CFG_OSE_PRIO_A_SIZE, &conf.prioASignalSize)) break;
if(iter.get(CFG_OSE_PRIO_B_SIZE, &conf.prioBSignalSize)) break;
if(iter.get(CFG_OSE_RECEIVE_ARRAY_SIZE, &conf.receiveBufferSize)) break;
conf.localNodeId = nodeId;
conf.remoteNodeId = remoteNodeId;
- conf.localHostName = (nodeId == nodeId1 ? host1 : host2);
- conf.remoteHostName = (nodeId == nodeId1 ? host2 : host1);
- conf.byteOrder = 0;
- conf.compression = 0;
+ conf.localHostName = localHostName;
+ conf.remoteHostName = remoteHostName;
conf.checksum = checksum;
conf.signalId = sendSignalId;
@@ -502,9 +498,6 @@ IPCConfig::configureTransporters(Uint32 nodeId,
}
}
}
-
- tr.m_service_port= server_port;
-
- return noOfTransportersCreated;
+ DBUG_RETURN(noOfTransportersCreated);
}
diff --git a/ndb/src/common/mgmcommon/LocalConfig.cpp b/ndb/src/common/mgmcommon/LocalConfig.cpp
index 0440ce84dba..3cd4341c6b7 100644
--- a/ndb/src/common/mgmcommon/LocalConfig.cpp
+++ b/ndb/src/common/mgmcommon/LocalConfig.cpp
@@ -18,6 +18,7 @@
#include <NdbEnv.h>
#include <NdbConfig.h>
#include <NdbAutoPtr.hpp>
+#include <NdbMem.h>
LocalConfig::LocalConfig(){
error_line = 0; error_msg[0] = 0;
@@ -39,7 +40,7 @@ LocalConfig::init(const char *connectString,
//1. Check connectString
if(connectString != 0 && connectString[0] != 0){
- if(readConnectString(connectString)){
+ if(readConnectString(connectString, "connect string")){
return true;
}
return false;
@@ -58,7 +59,7 @@ LocalConfig::init(const char *connectString,
char buf[255];
if(NdbEnv_GetEnv("NDB_CONNECTSTRING", buf, sizeof(buf)) &&
strlen(buf) != 0){
- if(readConnectString(buf)){
+ if(readConnectString(buf, "NDB_CONNECTSTRING")){
return true;
}
return false;
@@ -89,8 +90,8 @@ LocalConfig::init(const char *connectString,
//7. Check
{
char buf[256];
- snprintf(buf, sizeof(buf), "host=localhost:%u", NDB_BASE_PORT);
- if(readConnectString(buf))
+ BaseString::snprintf(buf, sizeof(buf), "host=localhost:%s", NDB_BASE_PORT);
+ if(readConnectString(buf, "default connect string"))
return true;
}
@@ -108,8 +109,10 @@ void LocalConfig::setError(int lineNumber, const char * _msg) {
}
void LocalConfig::printError() const {
- ndbout << "Local configuration error"<< endl
- << "Line: "<< error_line << ", " << error_msg << endl << endl;
+ ndbout << "Configuration error" << endl;
+ if (error_line)
+ ndbout << "Line: "<< error_line << ", ";
+ ndbout << error_msg << endl << endl;
}
void LocalConfig::printUsage() const {
@@ -139,7 +142,7 @@ const char *nodeIdTokens[] = {
const char *hostNameTokens[] = {
"host://%[^:]:%i",
"host=%[^:]:%i",
- "%[^:]:%i",
+ "%[^:^=^ ]:%i",
"%s %i",
0
};
@@ -191,7 +194,7 @@ LocalConfig::parseFileName(const char * buf){
}
bool
-LocalConfig::parseString(const char * connectString, char *line){
+LocalConfig::parseString(const char * connectString, BaseString &err){
char * for_strtok;
char * copy = strdup(connectString);
NdbAutoPtr<char> tmp_aptr(copy);
@@ -199,9 +202,8 @@ LocalConfig::parseString(const char * connectString, char *line){
bool b_nodeId = false;
bool found_other = false;
- for (char *tok = strtok_r(copy,";",&for_strtok); tok != 0;
- tok = strtok_r(NULL, ";", &for_strtok)) {
-
+ for (char *tok = strtok_r(copy,";,",&for_strtok); tok != 0;
+ tok = strtok_r(NULL, ";,", &for_strtok)) {
if (tok[0] == '#') continue;
if (!b_nodeId) // only one nodeid definition allowed
@@ -212,15 +214,12 @@ LocalConfig::parseString(const char * connectString, char *line){
if (found_other = parseFileName(tok))
continue;
- if (line)
- snprintf(line, 150, "Unexpected entry: \"%s\"", tok);
+ err.assfmt("Unexpected entry: \"%s\"", tok);
return false;
}
if (!found_other) {
- if (line)
- snprintf(line, 150, "Missing host/file name extry in \"%s\"",
- connectString);
+ err.appfmt("Missing host/file name extry in \"%s\"", connectString);
return false;
}
@@ -229,54 +228,60 @@ LocalConfig::parseString(const char * connectString, char *line){
bool LocalConfig::readFile(const char * filename, bool &fopenError)
{
- char line[150], line2[150];
-
+ char line[1024];
+
fopenError = false;
-
+
FILE * file = fopen(filename, "r");
if(file == 0){
- snprintf(line, 150, "Unable to open local config file: %s", filename);
+ BaseString::snprintf(line, sizeof(line),
+ "Unable to open local config file: %s", filename);
setError(0, line);
fopenError = true;
return false;
}
- int sz = 1024;
- char* theString = (char*)malloc(sz);
- theString[0] = 0;
-
- fgets(theString, sz, file);
- while (fgets(line+1, 100, file)) {
- line[0] = ';';
- while (strlen(theString) + strlen(line) >= sz) {
- sz = sz*2;
- char *newString = (char*)malloc(sz);
- strcpy(newString, theString);
- free(theString);
- theString = newString;
+ BaseString theString;
+
+ while(fgets(line, sizeof(line), file)){
+ BaseString tmp(line);
+ tmp.trim(" \t\n\r");
+ if(tmp.length() > 0 && tmp.c_str()[0] != '#'){
+ theString.append(tmp);
+ break;
}
- strcat(theString, line);
}
-
- bool return_value = parseString(theString, line);
+ while (fgets(line, sizeof(line), file)) {
+ BaseString tmp(line);
+ tmp.trim(" \t\n\r");
+ if(tmp.length() > 0 && tmp.c_str()[0] != '#'){
+ theString.append(";");
+ theString.append(tmp);
+ }
+ }
+
+ BaseString err;
+ bool return_value = parseString(theString.c_str(), err);
if (!return_value) {
- snprintf(line2, 150, "Reading %s: %s", filename, line);
- setError(0,line2);
+ BaseString tmp;
+ tmp.assfmt("Reading %s: %s", filename, err.c_str());
+ setError(0, tmp.c_str());
}
- free(theString);
fclose(file);
return return_value;
}
bool
-LocalConfig::readConnectString(const char * connectString){
- char line[150], line2[150];
- bool return_value = parseString(connectString, line);
+LocalConfig::readConnectString(const char * connectString,
+ const char * info){
+ BaseString err;
+ bool return_value = parseString(connectString, err);
if (!return_value) {
- snprintf(line2, 150, "Reading NDB_CONNECTSTRING \"%s\": %s", connectString, line);
- setError(0,line2);
+ BaseString err2;
+ err2.assfmt("Reading %d \"%s\": %s", info, connectString, err.c_str());
+ setError(0,err2.c_str());
}
return return_value;
}
diff --git a/ndb/src/common/mgmcommon/Makefile.am b/ndb/src/common/mgmcommon/Makefile.am
index 8a34fa16ed1..ed6a526eb47 100644
--- a/ndb/src/common/mgmcommon/Makefile.am
+++ b/ndb/src/common/mgmcommon/Makefile.am
@@ -2,13 +2,12 @@ noinst_LTLIBRARIES = libmgmsrvcommon.la
libmgmsrvcommon_la_SOURCES = \
LocalConfig.cpp \
- Config.cpp \
- ConfigInfo.cpp \
ConfigRetriever.cpp \
- InitConfigFileParser.cpp \
IPCConfig.cpp NdbConfig.c
-INCLUDES_LOC = -I$(top_srcdir)/ndb/src/mgmapi
+INCLUDES_LOC = -I$(top_srcdir)/ndb/src/mgmapi -I$(top_srcdir)/ndb/src/mgmsrv
+
+DEFS_LOC = -DNDB_BASE_PORT="\"@ndb_port_base@\""
include $(top_srcdir)/ndb/config/common.mk.am
include $(top_srcdir)/ndb/config/type_ndbapi.mk.am
diff --git a/ndb/src/common/mgmcommon/NdbConfig.c b/ndb/src/common/mgmcommon/NdbConfig.c
index 6b609b22fa4..e92f8fa8392 100644
--- a/ndb/src/common/mgmcommon/NdbConfig.c
+++ b/ndb/src/common/mgmcommon/NdbConfig.c
@@ -17,28 +17,46 @@
#include <ndb_global.h>
#include <NdbConfig.h>
#include <NdbEnv.h>
+#include <NdbMem.h>
-static char*
-NdbConfig_AllocHomePath(int _len)
+static const char *datadir_path= 0;
+
+const char *
+NdbConfig_get_path(int *_len)
{
const char *path= NdbEnv_GetEnv("NDB_HOME", 0, 0);
- int len= _len;
int path_len= 0;
- char *buf;
-
if (path)
path_len= strlen(path);
+ if (path_len == 0 && datadir_path) {
+ path= datadir_path;
+ path_len= strlen(path);
+ }
+ if (path_len == 0) {
+ path= ".";
+ path_len= strlen(path);
+ }
+ if (_len)
+ *_len= path_len;
+ return path;
+}
- len+= path_len;
- buf= malloc(len);
- if (path_len > 0)
- snprintf(buf, len, "%s%s", path, DIR_SEPARATOR);
- else
- buf[0]= 0;
-
+static char*
+NdbConfig_AllocHomePath(int _len)
+{
+ int path_len;
+ const char *path= NdbConfig_get_path(&path_len);
+ int len= _len+path_len;
+ char *buf= NdbMem_Allocate(len);
+ snprintf(buf, len, "%s%s", path, DIR_SEPARATOR);
return buf;
}
+void
+NdbConfig_SetPath(const char* path){
+ datadir_path= path;
+}
+
char*
NdbConfig_NdbCfgName(int with_ndb_home){
char *buf;
@@ -48,63 +66,79 @@ NdbConfig_NdbCfgName(int with_ndb_home){
buf= NdbConfig_AllocHomePath(128);
len= strlen(buf);
} else
- buf= malloc(128);
+ buf= NdbMem_Allocate(128);
snprintf(buf+len, 128, "Ndb.cfg");
return buf;
}
+static
+char *get_prefix_buf(int len, int node_id)
+{
+ char tmp_buf[sizeof("ndb_pid#########")+1];
+ char *buf;
+ if (node_id > 0)
+ snprintf(tmp_buf, sizeof(tmp_buf), "ndb_%u", node_id);
+ else
+ snprintf(tmp_buf, sizeof(tmp_buf), "ndb_pid%u", getpid());
+ tmp_buf[sizeof(tmp_buf)-1]= 0;
+
+ buf= NdbConfig_AllocHomePath(len+strlen(tmp_buf));
+ strcat(buf, tmp_buf);
+ return buf;
+}
+
char*
NdbConfig_ErrorFileName(int node_id){
- char *buf= NdbConfig_AllocHomePath(128);
+ char *buf= get_prefix_buf(128, node_id);
int len= strlen(buf);
- snprintf(buf+len, 128, "ndb_%u_error.log", node_id);
+ snprintf(buf+len, 128, "_error.log");
return buf;
}
char*
NdbConfig_ClusterLogFileName(int node_id){
- char *buf= NdbConfig_AllocHomePath(128);
+ char *buf= get_prefix_buf(128, node_id);
int len= strlen(buf);
- snprintf(buf+len, 128, "ndb_%u_cluster.log", node_id);
+ snprintf(buf+len, 128, "_cluster.log");
return buf;
}
char*
NdbConfig_SignalLogFileName(int node_id){
- char *buf= NdbConfig_AllocHomePath(128);
+ char *buf= get_prefix_buf(128, node_id);
int len= strlen(buf);
- snprintf(buf+len, 128, "ndb_%u_signal.log", node_id);
+ snprintf(buf+len, 128, "_signal.log");
return buf;
}
char*
NdbConfig_TraceFileName(int node_id, int file_no){
- char *buf= NdbConfig_AllocHomePath(128);
+ char *buf= get_prefix_buf(128, node_id);
int len= strlen(buf);
- snprintf(buf+len, 128, "ndb_%u_trace.log.%u", node_id, file_no);
+ snprintf(buf+len, 128, "_trace.log.%u", file_no);
return buf;
}
char*
NdbConfig_NextTraceFileName(int node_id){
- char *buf= NdbConfig_AllocHomePath(128);
+ char *buf= get_prefix_buf(128, node_id);
int len= strlen(buf);
- snprintf(buf+len, 128, "ndb_%u_trace.log.next", node_id);
+ snprintf(buf+len, 128, "_trace.log.next");
return buf;
}
char*
NdbConfig_PidFileName(int node_id){
- char *buf= NdbConfig_AllocHomePath(128);
+ char *buf= get_prefix_buf(128, node_id);
int len= strlen(buf);
- snprintf(buf+len, 128, "ndb_%u.pid", node_id);
+ snprintf(buf+len, 128, ".pid");
return buf;
}
char*
NdbConfig_StdoutFileName(int node_id){
- char *buf= NdbConfig_AllocHomePath(128);
+ char *buf= get_prefix_buf(128, node_id);
int len= strlen(buf);
- snprintf(buf+len, 128, "ndb_%u_out.log", node_id);
+ snprintf(buf+len, 128, "_out.log");
return buf;
}
diff --git a/ndb/src/common/portlib/NdbCondition.c b/ndb/src/common/portlib/NdbCondition.c
index 1d229bdcdef..df312c7cc24 100644
--- a/ndb/src/common/portlib/NdbCondition.c
+++ b/ndb/src/common/portlib/NdbCondition.c
@@ -20,6 +20,7 @@
#include <NdbCondition.h>
#include <NdbThread.h>
#include <NdbMutex.h>
+#include <NdbMem.h>
struct NdbCondition
{
@@ -34,7 +35,7 @@ NdbCondition_Create(void)
struct NdbCondition* tmpCond;
int result;
- tmpCond = (struct NdbCondition*)malloc(sizeof(struct NdbCondition));
+ tmpCond = (struct NdbCondition*)NdbMem_Allocate(sizeof(struct NdbCondition));
if (tmpCond == NULL)
return NULL;
diff --git a/ndb/src/common/portlib/NdbDaemon.c b/ndb/src/common/portlib/NdbDaemon.c
index d8d33595156..c73b5927ff4 100644
--- a/ndb/src/common/portlib/NdbDaemon.c
+++ b/ndb/src/common/portlib/NdbDaemon.c
@@ -28,6 +28,8 @@ NdbDaemon_Make(const char* lockfile, const char* logfile, unsigned flags)
int lockfd = -1, logfd = -1, n;
char buf[64];
+ (void)flags; /* remove warning for unused parameter */
+
/* Check that we have write access to lock file */
assert(lockfile != NULL);
lockfd = open(lockfile, O_CREAT|O_RDWR, 0644);
diff --git a/ndb/src/common/portlib/NdbMem.c b/ndb/src/common/portlib/NdbMem.c
index 0b06e5b23f1..f964f4d9937 100644
--- a/ndb/src/common/portlib/NdbMem.c
+++ b/ndb/src/common/portlib/NdbMem.c
@@ -31,14 +31,18 @@ void NdbMem_Destroy()
return;
}
+
void* NdbMem_Allocate(size_t size)
{
+ void* mem_allocated;
assert(size > 0);
- return (void*)malloc(size);
+ mem_allocated= (void*)malloc(size);
+ return mem_allocated;
}
void* NdbMem_AllocateAlign(size_t size, size_t alignment)
{
+ (void)alignment; /* remove warning for unused parameter */
/*
return (void*)memalign(alignment, size);
TEMP fix
diff --git a/ndb/src/common/portlib/NdbMutex.c b/ndb/src/common/portlib/NdbMutex.c
index 50f314d2683..d3d39ea8cf7 100644
--- a/ndb/src/common/portlib/NdbMutex.c
+++ b/ndb/src/common/portlib/NdbMutex.c
@@ -19,13 +19,14 @@
#include <NdbThread.h>
#include <NdbMutex.h>
+#include <NdbMem.h>
NdbMutex* NdbMutex_Create(void)
{
NdbMutex* pNdbMutex;
int result;
- pNdbMutex = (NdbMutex*)malloc(sizeof(NdbMutex));
+ pNdbMutex = (NdbMutex*)NdbMem_Allocate(sizeof(NdbMutex));
if (pNdbMutex == NULL)
return NULL;
diff --git a/ndb/src/common/portlib/NdbSleep.c b/ndb/src/common/portlib/NdbSleep.c
index 8702a25d1b1..44bafe98a37 100644
--- a/ndb/src/common/portlib/NdbSleep.c
+++ b/ndb/src/common/portlib/NdbSleep.c
@@ -16,16 +16,21 @@
#include <ndb_global.h>
-#include "NdbSleep.h"
+#include <my_sys.h>
+#include <NdbSleep.h>
int
NdbSleep_MilliSleep(int milliseconds){
+ my_sleep(milliseconds*1000);
+ return 0;
+#if 0
int result = 0;
struct timespec sleeptime;
sleeptime.tv_sec = milliseconds / 1000;
sleeptime.tv_nsec = (milliseconds - (sleeptime.tv_sec * 1000)) * 1000000;
result = nanosleep(&sleeptime, NULL);
return result;
+#endif
}
int
diff --git a/ndb/src/common/portlib/NdbTCP.cpp b/ndb/src/common/portlib/NdbTCP.cpp
index 4bf4936aa30..35b0c8c21e4 100644
--- a/ndb/src/common/portlib/NdbTCP.cpp
+++ b/ndb/src/common/portlib/NdbTCP.cpp
@@ -15,34 +15,43 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#include <NdbMutex.h>
+#include <ndb_global.h>
+#include <my_net.h>
#include <NdbTCP.h>
-#if defined NDB_WIN32 || defined SCO
-static NdbMutex & LOCK_gethostbyname = * NdbMutex_Create();
-#else
-static NdbMutex LOCK_gethostbyname = NDB_MUTEX_INITIALIZER;
-#endif
-
extern "C"
int
Ndb_getInAddr(struct in_addr * dst, const char *address) {
- struct hostent * hostPtr;
- NdbMutex_Lock(&LOCK_gethostbyname);
- hostPtr = gethostbyname(address);
- if (hostPtr != NULL) {
- dst->s_addr = ((struct in_addr *) *hostPtr->h_addr_list)->s_addr;
- NdbMutex_Unlock(&LOCK_gethostbyname);
- return 0;
+ DBUG_ENTER("Ndb_getInAddr");
+ {
+ int tmp_errno;
+ struct hostent tmp_hostent, *hp;
+ char buff[GETHOSTBYNAME_BUFF_SIZE];
+ hp = my_gethostbyname_r(address,&tmp_hostent,buff,sizeof(buff),
+ &tmp_errno);
+ if (hp)
+ {
+ memcpy(dst, hp->h_addr, min(sizeof(*dst), (size_t) hp->h_length));
+ my_gethostbyname_r_free();
+ DBUG_RETURN(0);
+ }
+ my_gethostbyname_r_free();
}
- NdbMutex_Unlock(&LOCK_gethostbyname);
-
/* Try it as aaa.bbb.ccc.ddd. */
dst->s_addr = inet_addr(address);
- if (dst->s_addr != -1) {
- return 0;
+ if (dst->s_addr !=
+#ifdef INADDR_NONE
+ INADDR_NONE
+#else
+ -1
+#endif
+ )
+ {
+ DBUG_RETURN(0);
}
- return -1;
+ DBUG_PRINT("error",("inet_addr(%s) - %d - %s",
+ address, errno, strerror(errno)));
+ DBUG_RETURN(-1);
}
#if 0
diff --git a/ndb/src/common/portlib/NdbThread.c b/ndb/src/common/portlib/NdbThread.c
index 8683a37edcb..69e39994a9c 100644
--- a/ndb/src/common/portlib/NdbThread.c
+++ b/ndb/src/common/portlib/NdbThread.c
@@ -18,6 +18,7 @@
#include <ndb_global.h>
#include <NdbThread.h>
#include <pthread.h>
+#include <NdbMem.h>
#define MAX_THREAD_NAME 16
@@ -41,15 +42,16 @@ struct NdbThread* NdbThread_Create(NDB_THREAD_FUNC *p_thread_func,
int result;
pthread_attr_t thread_attr;
+ (void)thread_prio; /* remove warning for unused parameter */
+
if (p_thread_func == NULL)
return 0;
- tmpThread = (struct NdbThread*)malloc(sizeof(struct NdbThread));
+ tmpThread = (struct NdbThread*)NdbMem_Allocate(sizeof(struct NdbThread));
if (tmpThread == NULL)
return NULL;
- snprintf(tmpThread->thread_name, sizeof(tmpThread->thread_name),
- "%s", p_thread_name);
+ strnmov(tmpThread->thread_name,p_thread_name,sizeof(tmpThread->thread_name));
pthread_attr_init(&thread_attr);
pthread_attr_setstacksize(&thread_attr, thread_stack_size);
@@ -108,6 +110,7 @@ int NdbThread_SetConcurrencyLevel(int level)
#ifdef USE_PTHREAD_EXTRAS
return pthread_setconcurrency(level);
#else
+ (void)level; /* remove warning for unused parameter */
return 0;
#endif
}
diff --git a/ndb/src/common/portlib/memtest.c b/ndb/src/common/portlib/memtest.c
index 059a4ec025e..673f23fa803 100644
--- a/ndb/src/common/portlib/memtest.c
+++ b/ndb/src/common/portlib/memtest.c
@@ -90,7 +90,7 @@ void malloctest(int loopcount, int memsize, int touch) {
long long start=0;
int total=0;
int i=0, j=0;
- int size=memsize*1024*1024; //bytes;
+ int size=memsize*1024*1024; /*bytes*/;
float mean;
char * ptr =0;
@@ -126,7 +126,7 @@ void mmaptest(int loopcount, int memsize, int touch) {
int total=0;
int i=0, j=0;
char * ptr;
- int size=memsize*1024*1024; //bytes;
+ int size=memsize*1024*1024; /*bytes*/;
float mean;
printf("Staring mmaptest ");
@@ -165,7 +165,7 @@ void unmaptest(loopcount, memsize)
int total=0;
int i=0, j=0;
char * ptr;
- int size=memsize*1024*1024; //bytes;
+ int size=memsize*1024*1024; /*bytes*/;
float mean;
printf("Staring munmap test (loopcount = 1 no matter what you prev. set)\n");
@@ -215,7 +215,7 @@ void freetest(int loopcount, int memsize) {
long long start=0;
int total=0;
int i=0, j=0;
- int size=memsize*1024*1024; //bytes;
+ int size=memsize*1024*1024; /*bytes*/;
float mean;
char * ptr =0;
diff --git a/ndb/src/common/transporter/Makefile.am b/ndb/src/common/transporter/Makefile.am
index 218b261606d..9d91a210d46 100644
--- a/ndb/src/common/transporter/Makefile.am
+++ b/ndb/src/common/transporter/Makefile.am
@@ -13,7 +13,7 @@ EXTRA_libtransporter_la_SOURCES = SHM_Transporter.cpp SHM_Transporter.unix.cpp S
libtransporter_la_LIBADD = @ndb_transporter_opt_objs@
libtransporter_la_DEPENDENCIES = @ndb_transporter_opt_objs@
-INCLUDES_LOC = -I$(top_srcdir)/ndb/include/kernel -I$(top_srcdir)/ndb/include/transporter
+INCLUDES_LOC = -I$(top_srcdir)/ndb/include/kernel -I$(top_srcdir)/ndb/include/transporter @NDB_SCI_INCLUDES@
include $(top_srcdir)/ndb/config/common.mk.am
include $(top_srcdir)/ndb/config/type_util.mk.am
diff --git a/ndb/src/common/transporter/OSE_Receiver.cpp b/ndb/src/common/transporter/OSE_Receiver.cpp
index b7d47b2f88c..63a33fc8f24 100644
--- a/ndb/src/common/transporter/OSE_Receiver.cpp
+++ b/ndb/src/common/transporter/OSE_Receiver.cpp
@@ -41,7 +41,7 @@ OSE_Receiver::OSE_Receiver(TransporterRegistry * tr,
phantomCreated = false;
localNodeId = _localNodeId;
- snprintf(localHostName, sizeof(localHostName),
+ BaseString::snprintf(localHostName, sizeof(localHostName),
"ndb_node%d", localNodeId);
DEBUG("localNodeId = " << localNodeId << " -> localHostName = "
diff --git a/ndb/src/common/transporter/OSE_Transporter.cpp b/ndb/src/common/transporter/OSE_Transporter.cpp
index c9b0f777319..a52862a80e5 100644
--- a/ndb/src/common/transporter/OSE_Transporter.cpp
+++ b/ndb/src/common/transporter/OSE_Transporter.cpp
@@ -51,10 +51,10 @@ OSE_Transporter::OSE_Transporter(int _prioASignalSize,
prioBSignalSize = _prioBSignalSize;
if (strcmp(lHostName, rHostName) == 0){
- snprintf(remoteNodeName, sizeof(remoteNodeName),
+ BaseString::snprintf(remoteNodeName, sizeof(remoteNodeName),
"ndb_node%d", remoteNodeId);
} else {
- snprintf(remoteNodeName, sizeof(remoteNodeName),
+ BaseString::snprintf(remoteNodeName, sizeof(remoteNodeName),
"%s/ndb_node%d", rHostName, remoteNodeId);
}
diff --git a/ndb/src/common/transporter/Packer.cpp b/ndb/src/common/transporter/Packer.cpp
index 645517a4b1a..9eba335330d 100644
--- a/ndb/src/common/transporter/Packer.cpp
+++ b/ndb/src/common/transporter/Packer.cpp
@@ -21,6 +21,7 @@
#include <TransporterCallback.hpp>
#include <RefConvert.hpp>
+#define MAX_RECEIVED_SIGNALS 1024
Uint32
TransporterRegistry::unpack(Uint32 * readPtr,
Uint32 sizeOfData,
@@ -30,12 +31,15 @@ TransporterRegistry::unpack(Uint32 * readPtr,
LinearSectionPtr ptr[3];
Uint32 usedData = 0;
-
+ Uint32 loop_count = 0;
+
if(state == NoHalt || state == HaltOutput){
- while(sizeOfData >= 4 + sizeof(Protocol6)){
+ while ((sizeOfData >= 4 + sizeof(Protocol6)) &&
+ (loop_count < MAX_RECEIVED_SIGNALS)) {
Uint32 word1 = readPtr[0];
Uint32 word2 = readPtr[1];
Uint32 word3 = readPtr[2];
+ loop_count++;
#if 0
if(Protocol6::getByteOrder(word1) != MY_OWN_BYTE_ORDER){
@@ -112,10 +116,12 @@ TransporterRegistry::unpack(Uint32 * readPtr,
} else {
/** state = HaltIO || state == HaltInput */
- while(sizeOfData >= 4 + sizeof(Protocol6)){
+ while ((sizeOfData >= 4 + sizeof(Protocol6)) &&
+ (loop_count < MAX_RECEIVED_SIGNALS)) {
Uint32 word1 = readPtr[0];
Uint32 word2 = readPtr[1];
Uint32 word3 = readPtr[2];
+ loop_count++;
#if 0
if(Protocol6::getByteOrder(word1) != MY_OWN_BYTE_ORDER){
@@ -208,12 +214,13 @@ TransporterRegistry::unpack(Uint32 * readPtr,
IOState state) {
static SignalHeader signalHeader;
static LinearSectionPtr ptr[3];
+ Uint32 loop_count = 0;
if(state == NoHalt || state == HaltOutput){
- while(readPtr < eodPtr){
+ while ((readPtr < eodPtr) && (loop_count < MAX_RECEIVED_SIGNALS)) {
Uint32 word1 = readPtr[0];
Uint32 word2 = readPtr[1];
Uint32 word3 = readPtr[2];
-
+ loop_count++;
#if 0
if(Protocol6::getByteOrder(word1) != MY_OWN_BYTE_ORDER){
//Do funky stuff
@@ -280,11 +287,11 @@ TransporterRegistry::unpack(Uint32 * readPtr,
} else {
/** state = HaltIO || state == HaltInput */
- while(readPtr < eodPtr){
+ while ((readPtr < eodPtr) && (loop_count < MAX_RECEIVED_SIGNALS)) {
Uint32 word1 = readPtr[0];
Uint32 word2 = readPtr[1];
Uint32 word3 = readPtr[2];
-
+ loop_count++;
#if 0
if(Protocol6::getByteOrder(word1) != MY_OWN_BYTE_ORDER){
//Do funky stuff
diff --git a/ndb/src/common/transporter/SCI_Transporter.cpp b/ndb/src/common/transporter/SCI_Transporter.cpp
index c52c8a9d8c0..73fbb064599 100644
--- a/ndb/src/common/transporter/SCI_Transporter.cpp
+++ b/ndb/src/common/transporter/SCI_Transporter.cpp
@@ -24,23 +24,30 @@
#include "TransporterInternalDefinitions.hpp"
#include <TransporterCallback.hpp>
-
+
+#include <InputStream.hpp>
+#include <OutputStream.hpp>
+
#define FLAGS 0
-
-SCI_Transporter::SCI_Transporter(Uint32 packetSize,
+#define DEBUG_TRANSPORTER
+SCI_Transporter::SCI_Transporter(TransporterRegistry &t_reg,
+ const char *lHostName,
+ const char *rHostName,
+ int r_port,
+ Uint32 packetSize,
Uint32 bufferSize,
Uint32 nAdapters,
Uint16 remoteSciNodeId0,
Uint16 remoteSciNodeId1,
NodeId _localNodeId,
NodeId _remoteNodeId,
- int byte_order,
- bool compr,
bool chksm,
bool signalId,
Uint32 reportFreq) :
- Transporter(_localNodeId, _remoteNodeId, byte_order, compr, chksm, signalId)
-{
+ Transporter(t_reg, lHostName, rHostName, r_port, _localNodeId,
+ _remoteNodeId, 0, false, chksm, signalId)
+{
+ DBUG_ENTER("SCI_Transporter::SCI_Transporter");
m_PacketSize = (packetSize + 3)/4 ;
m_BufferSize = bufferSize;
m_sendBuffer.m_buffer = NULL;
@@ -56,10 +63,6 @@ SCI_Transporter::SCI_Transporter(Uint32 packetSize,
m_initLocal=false;
- m_remoteNodes= new Uint16[m_numberOfRemoteNodes];
- if(m_remoteNodes == NULL) {
- //DO WHAT??
- }
m_swapCounter=0;
m_failCounter=0;
m_remoteNodes[0]=remoteSciNodeId0;
@@ -94,20 +97,19 @@ SCI_Transporter::SCI_Transporter(Uint32 packetSize,
i4096=0;
i4097=0;
#endif
-
+ DBUG_VOID_RETURN;
}
void SCI_Transporter::disconnectImpl()
{
+ DBUG_ENTER("SCI_Transporter::disconnectImpl");
sci_error_t err;
if(m_mapped){
setDisconnect();
-#ifdef DEBUG_TRANSPORTER
- ndbout << "DisconnectImpl " << getConnectionStatus() << endl;
- ndbout << "remote node " << remoteNodeId << endl;
-#endif
+ DBUG_PRINT("info", ("connect status = %d, remote node = %d",
+ (int)getConnectionStatus(), remoteNodeId));
disconnectRemote();
disconnectLocal();
}
@@ -124,65 +126,56 @@ void SCI_Transporter::disconnectImpl()
SCIClose(sciAdapters[i].scidesc, FLAGS, &err);
if(err != SCI_ERR_OK) {
- reportError(callbackObj, localNodeId, TE_SCI_UNABLE_TO_CLOSE_CHANNEL);
-#ifdef DEBUG_TRANSPORTER
- fprintf(stderr,
- "\nCannot close channel to the driver. Error code 0x%x",
- err);
-#endif
- }
+ report_error(TE_SCI_UNABLE_TO_CLOSE_CHANNEL);
+ DBUG_PRINT("error", ("Cannot close channel to the driver. Error code 0x%x",
+ err));
+ }
}
}
m_sciinit=false;
#ifdef DEBUG_TRANSPORTER
- ndbout << "total: " << i1024+ i10242048 + i2048+i2049 << endl;
+ ndbout << "total: " << i1024+ i10242048 + i2048+i2049 << endl;
ndbout << "<1024: " << i1024 << endl;
ndbout << "1024-2047: " << i10242048 << endl;
ndbout << "==2048: " << i2048 << endl;
ndbout << "2049-4096: " << i20484096 << endl;
ndbout << "==4096: " << i4096 << endl;
ndbout << ">4096: " << i4097 << endl;
-
#endif
-
+ DBUG_VOID_RETURN;
}
bool SCI_Transporter::initTransporter() {
- if(m_BufferSize < (2*MAX_MESSAGE_SIZE)){
- m_BufferSize = 2 * MAX_MESSAGE_SIZE;
+ DBUG_ENTER("SCI_Transporter::initTransporter");
+ if(m_BufferSize < (2*MAX_MESSAGE_SIZE + 4096)){
+ m_BufferSize = 2 * MAX_MESSAGE_SIZE + 4096;
}
- // Allocate buffers for sending
- Uint32 sz = 0;
- if(m_BufferSize < (m_PacketSize * 4)){
- sz = m_BufferSize + MAX_MESSAGE_SIZE;
- } else {
- /**
- * 3 packages
- */
- sz = (m_PacketSize * 4) * 3 + MAX_MESSAGE_SIZE;
- }
+ // Allocate buffers for sending, send buffer size plus 2048 bytes for avoiding
+ // the need to send twice when a large message comes around. Send buffer size is
+ // measured in words.
+ Uint32 sz = 4 * m_PacketSize + MAX_MESSAGE_SIZE;;
- m_sendBuffer.m_bufferSize = 4 * ((sz + 3) / 4);
- m_sendBuffer.m_buffer = new Uint32[m_sendBuffer.m_bufferSize / 4];
+ m_sendBuffer.m_sendBufferSize = 4 * ((sz + 3) / 4);
+ m_sendBuffer.m_buffer = new Uint32[m_sendBuffer.m_sendBufferSize / 4];
m_sendBuffer.m_dataSize = 0;
-
+
+ DBUG_PRINT("info", ("Created SCI Send Buffer with buffer size %d and packet size %d",
+ m_sendBuffer.m_sendBufferSize, m_PacketSize * 4));
if(!getLinkStatus(m_ActiveAdapterId) ||
- !getLinkStatus(m_StandbyAdapterId)) {
-#ifdef DEBUG_TRANSPORTER
- ndbout << "The link is not fully operational. " << endl;
- ndbout << "Check the cables and the switches" << endl;
-#endif
+ (m_adapters > 1 &&
+ !getLinkStatus(m_StandbyAdapterId))) {
+ DBUG_PRINT("error", ("The link is not fully operational. Check the cables and the switches"));
//reportDisconnect(remoteNodeId, 0);
//doDisconnect();
//NDB should terminate
- reportError(callbackObj, localNodeId, TE_SCI_LINK_ERROR);
- return false;
+ report_error(TE_SCI_LINK_ERROR);
+ DBUG_RETURN(false);
}
- return true;
+ DBUG_RETURN(true);
} // initTransporter()
@@ -218,10 +211,8 @@ bool SCI_Transporter::getLinkStatus(Uint32 adapterNo)
SCIQuery(SCI_Q_ADAPTER,(void*)(&queryAdapter),(Uint32)NULL,&error);
if(error != SCI_ERR_OK) {
-#ifdef DEBUG_TRANSPORTER
- ndbout << "error querying adapter " << endl;
-#endif
- return false;
+ DBUG_PRINT("error", ("error %d querying adapter", error));
+ return false;
}
if(linkstatus<=0)
return false;
@@ -231,6 +222,7 @@ bool SCI_Transporter::getLinkStatus(Uint32 adapterNo)
sci_error_t SCI_Transporter::initLocalSegment() {
+ DBUG_ENTER("SCI_Transporter::initLocalSegment");
Uint32 segmentSize = m_BufferSize;
Uint32 offset = 0;
sci_error_t err;
@@ -238,16 +230,12 @@ sci_error_t SCI_Transporter::initLocalSegment() {
for(Uint32 i=0; i<m_adapters ; i++) {
SCIOpen(&(sciAdapters[i].scidesc), FLAGS, &err);
sciAdapters[i].localSciNodeId=getLocalNodeId(i);
-#ifdef DEBUG_TRANSPORTER
- ndbout_c("SCInode iD %d adapter %d\n",
- sciAdapters[i].localSciNodeId, i);
-#endif
+ DBUG_PRINT("info", ("SCInode iD %d adapter %d\n",
+ sciAdapters[i].localSciNodeId, i));
if(err != SCI_ERR_OK) {
-#ifdef DEBUG_TRANSPORTER
- ndbout_c("\nCannot open an SCI virtual device. Error code 0x%x",
- err);
-#endif
- return err;
+ DBUG_PRINT("error", ("Cannot open an SCI virtual device. Error code 0x%x",
+ err));
+ DBUG_RETURN(err);
}
}
}
@@ -264,12 +252,11 @@ sci_error_t SCI_Transporter::initLocalSegment() {
&err);
if(err != SCI_ERR_OK) {
- return err;
+ DBUG_PRINT("error", ("Error creating segment, err = 0x%x", err));
+ DBUG_RETURN(err);
} else {
-#ifdef DEBUG_TRANSPORTER
- ndbout << "created segment id : "
- << hostSegmentId(localNodeId, remoteNodeId) << endl;
-#endif
+ DBUG_PRINT("info", ("created segment id : %d",
+ hostSegmentId(localNodeId, remoteNodeId)));
}
/** Prepare the segment*/
@@ -280,11 +267,9 @@ sci_error_t SCI_Transporter::initLocalSegment() {
&err);
if(err != SCI_ERR_OK) {
-#ifdef DEBUG_TRANSPORTER
- ndbout_c("Local Segment is not accessible by an SCI adapter.");
- ndbout_c("Error code 0x%x\n", err);
-#endif
- return err;
+ DBUG_PRINT("error", ("Local Segment is not accessible by an SCI adapter. Error code 0x%x\n",
+ err));
+ DBUG_RETURN(err);
}
}
@@ -301,14 +286,10 @@ sci_error_t SCI_Transporter::initLocalSegment() {
if(err != SCI_ERR_OK) {
-
-#ifdef DEBUG_TRANSPORTER
- fprintf(stderr, "\nCannot map area of size %d. Error code 0x%x",
- segmentSize,err);
- ndbout << "initLocalSegment does a disConnect" << endl;
-#endif
+ DBUG_PRINT("error", ("Cannot map area of size %d. Error code 0x%x",
+ segmentSize,err));
doDisconnect();
- return err;
+ DBUG_RETURN(err);
}
@@ -320,18 +301,16 @@ sci_error_t SCI_Transporter::initLocalSegment() {
&err);
if(err != SCI_ERR_OK) {
-#ifdef DEBUG_TRANSPORTER
- ndbout_c("\nLocal Segment is not available for remote connections.");
- ndbout_c("Error code 0x%x\n", err);
-#endif
- return err;
+ DBUG_PRINT("error", ("Local Segment is not available for remote connections. Error code 0x%x\n",
+ err));
+ DBUG_RETURN(err);
}
}
setupLocalSegment();
- return err;
+ DBUG_RETURN(err);
} // initLocalSegment()
@@ -345,7 +324,7 @@ bool SCI_Transporter::doSend() {
Uint32 retry=0;
const char * const sendPtr = (char*)m_sendBuffer.m_buffer;
- const Uint32 sizeToSend = m_sendBuffer.m_dataSize;
+ const Uint32 sizeToSend = 4 * m_sendBuffer.m_dataSize; //Convert to number of bytes
if (sizeToSend > 0){
#ifdef DEBUG_TRANSPORTER
@@ -363,15 +342,19 @@ bool SCI_Transporter::doSend() {
i4097++;
#endif
if(startSequence(m_ActiveAdapterId)!=SCI_ERR_OK) {
-#ifdef DEBUG_TRANSPORTER
- ndbout << "Start sequence failed" << endl;
-#endif
- reportError(callbackObj, remoteNodeId, TE_SCI_UNABLE_TO_START_SEQUENCE);
+ DBUG_PRINT("error", ("Start sequence failed"));
+ report_error(TE_SCI_UNABLE_TO_START_SEQUENCE);
return false;
}
- tryagain:
+ tryagain:
+ retry++;
+ if (retry > 3) {
+ DBUG_PRINT("error", ("SCI Transfer failed"));
+ report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
+ return false;
+ }
Uint32 * insertPtr = (Uint32 *)
(m_TargetSegm[m_ActiveAdapterId].writer)->getWritePtr(sizeToSend);
@@ -390,44 +373,37 @@ bool SCI_Transporter::doSend() {
&err);
+ if (err != SCI_ERR_OK) {
if(err == SCI_ERR_OUT_OF_RANGE) {
-#ifdef DEBUG_TRANSPORTER
- ndbout << "Data transfer : out of range error \n" << endl;
-#endif
+ DBUG_PRINT("error", ("Data transfer : out of range error"));
goto tryagain;
}
if(err == SCI_ERR_SIZE_ALIGNMENT) {
-#ifdef DEBUG_TRANSPORTER
- ndbout << "Data transfer : aligne\n" << endl;
-#endif
+ DBUG_PRINT("error", ("Data transfer : alignment error"));
+ DBUG_PRINT("info", ("sendPtr 0x%x, sizeToSend = %d", sendPtr, sizeToSend));
goto tryagain;
}
if(err == SCI_ERR_OFFSET_ALIGNMENT) {
-#ifdef DEBUG_TRANSPORTER
- ndbout << "Data transfer : offset alignment\n" << endl;
-#endif
+ DBUG_PRINT("error", ("Data transfer : offset alignment"));
goto tryagain;
- }
+ }
if(err == SCI_ERR_TRANSFER_FAILED) {
//(m_TargetSegm[m_StandbyAdapterId].writer)->heavyLock();
if(getLinkStatus(m_ActiveAdapterId)) {
- retry++;
- if(retry>3) {
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
- return false;
- }
goto tryagain;
}
+ if (m_adapters == 1) {
+ DBUG_PRINT("error", ("SCI Transfer failed"));
+ report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
+ return false;
+ }
m_failCounter++;
Uint32 temp=m_ActiveAdapterId;
switch(m_swapCounter) {
case 0:
/**swap from active (0) to standby (1)*/
if(getLinkStatus(m_StandbyAdapterId)) {
-#ifdef DEBUG_TRANSPORTER
- ndbout << "Swapping from 0 to 1 " << endl;
-#endif
+ DBUG_PRINT("error", ("Swapping from adapter 0 to 1"));
failoverShmWriter();
SCIStoreBarrier(m_TargetSegm[m_StandbyAdapterId].sequence,0);
m_ActiveAdapterId=m_StandbyAdapterId;
@@ -436,26 +412,21 @@ bool SCI_Transporter::doSend() {
FLAGS,
&err);
if(err!=SCI_ERR_OK) {
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNABLE_TO_REMOVE_SEQUENCE);
+ report_error(TE_SCI_UNABLE_TO_REMOVE_SEQUENCE);
+ DBUG_PRINT("error", ("Unable to remove sequence"));
return false;
}
if(startSequence(m_ActiveAdapterId)!=SCI_ERR_OK) {
-#ifdef DEBUG_TRANSPORTER
- ndbout << "Start sequence failed" << endl;
-#endif
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNABLE_TO_START_SEQUENCE);
+ DBUG_PRINT("error", ("Start sequence failed"));
+ report_error(TE_SCI_UNABLE_TO_START_SEQUENCE);
return false;
}
m_swapCounter++;
-#ifdef DEBUG_TRANSPORTER
- ndbout << "failover complete.." << endl;
-#endif
+ DBUG_PRINT("info", ("failover complete"));
goto tryagain;
} else {
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
+ report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
+ DBUG_PRINT("error", ("SCI Transfer failed"));
return false;
}
return false;
@@ -468,20 +439,15 @@ bool SCI_Transporter::doSend() {
failoverShmWriter();
m_ActiveAdapterId=m_StandbyAdapterId;
m_StandbyAdapterId=temp;
-#ifdef DEBUG_TRANSPORTER
- ndbout << "Swapping from 1 to 0 " << endl;
-#endif
+ DBUG_PRINT("info", ("Swapping from 1 to 0"));
if(createSequence(m_ActiveAdapterId)!=SCI_ERR_OK) {
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNABLE_TO_CREATE_SEQUENCE);
+ DBUG_PRINT("error", ("Unable to create sequence"));
+ report_error(TE_SCI_UNABLE_TO_CREATE_SEQUENCE);
return false;
}
if(startSequence(m_ActiveAdapterId)!=SCI_ERR_OK) {
-#ifdef DEBUG_TRANSPORTER
- ndbout << "startSequence failed... disconnecting" << endl;
-#endif
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNABLE_TO_START_SEQUENCE);
+ DBUG_PRINT("error", ("startSequence failed... disconnecting"));
+ report_error(TE_SCI_UNABLE_TO_START_SEQUENCE);
return false;
}
@@ -489,37 +455,36 @@ bool SCI_Transporter::doSend() {
, FLAGS,
&err);
if(err!=SCI_ERR_OK) {
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNABLE_TO_REMOVE_SEQUENCE);
+ DBUG_PRINT("error", ("Unable to remove sequence"));
+ report_error(TE_SCI_UNABLE_TO_REMOVE_SEQUENCE);
return false;
}
if(createSequence(m_StandbyAdapterId)!=SCI_ERR_OK) {
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNABLE_TO_CREATE_SEQUENCE);
+ DBUG_PRINT("error", ("Unable to create sequence on standby"));
+ report_error(TE_SCI_UNABLE_TO_CREATE_SEQUENCE);
return false;
}
m_swapCounter=0;
-#ifdef DEBUG_TRANSPORTER
- ndbout << "failover complete.." << endl;
-#endif
+ DBUG_PRINT("info", ("failover complete.."));
goto tryagain;
} else {
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
+ DBUG_PRINT("error", ("Unrecoverable data transfer error"));
+ report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
return false;
}
break;
default:
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
+ DBUG_PRINT("error", ("Unrecoverable data transfer error"));
+ report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
return false;
break;
}
+ }
} else {
SHM_Writer * writer = (m_TargetSegm[m_ActiveAdapterId].writer);
writer->updateWritePtr(sizeToSend);
@@ -535,13 +500,10 @@ bool SCI_Transporter::doSend() {
/**
* If we end up here, the SCI segment is full.
*/
-#ifdef DEBUG_TRANSPORTER
- ndbout << "the segment is full for some reason" << endl;
-#endif
+ DBUG_PRINT("error", ("the segment is full for some reason"));
return false;
} //if
}
-
return true;
} // doSend()
@@ -557,11 +519,8 @@ void SCI_Transporter::failoverShmWriter() {
void SCI_Transporter::setupLocalSegment()
{
-
+ DBUG_ENTER("SCI_Transporter::setupLocalSegment");
Uint32 sharedSize = 0;
- sharedSize += 16; //SHM_Reader::getSharedSize();
- sharedSize += 16; //SHM_Writer::getSharedSize();
- sharedSize += 32; //SHM_Writer::getSharedSize();
sharedSize =4096; //start of the buffer is page aligend
Uint32 sizeOfBuffer = m_BufferSize;
@@ -570,27 +529,15 @@ void SCI_Transporter::setupLocalSegment()
Uint32 * localReadIndex =
(Uint32*)m_SourceSegm[m_ActiveAdapterId].mappedMemory;
- Uint32 * localWriteIndex =
- (Uint32*)(localReadIndex+ 1);
-
- Uint32 * localEndOfDataIndex = (Uint32*)
- (localReadIndex + 2);
-
+ Uint32 * localWriteIndex = (Uint32*)(localReadIndex+ 1);
m_localStatusFlag = (Uint32*)(localReadIndex + 3);
- Uint32 * sharedLockIndex = (Uint32*)
- (localReadIndex + 4);
-
- Uint32 * sharedHeavyLock = (Uint32*)
- (localReadIndex + 5);
-
char * localStartOfBuf = (char*)
((char*)m_SourceSegm[m_ActiveAdapterId].mappedMemory+sharedSize);
-
- * localReadIndex = * localWriteIndex = 0;
- * localEndOfDataIndex = sizeOfBuffer -1;
-
+ * localReadIndex = 0;
+ * localWriteIndex = 0;
+
const Uint32 slack = MAX_MESSAGE_SIZE;
reader = new SHM_Reader(localStartOfBuf,
@@ -599,178 +546,240 @@ void SCI_Transporter::setupLocalSegment()
localReadIndex,
localWriteIndex);
- * localReadIndex = 0;
- * localWriteIndex = 0;
-
reader->clear();
+ DBUG_VOID_RETURN;
} //setupLocalSegment
void SCI_Transporter::setupRemoteSegment()
{
+ DBUG_ENTER("SCI_Transporter::setupRemoteSegment");
Uint32 sharedSize = 0;
- sharedSize += 16; //SHM_Reader::getSharedSize();
- sharedSize += 16; //SHM_Writer::getSharedSize();
- sharedSize += 32;
- sharedSize =4096; //start of the buffer is page aligend
+ sharedSize =4096; //start of the buffer is page aligned
Uint32 sizeOfBuffer = m_BufferSize;
+ const Uint32 slack = MAX_MESSAGE_SIZE;
sizeOfBuffer -= sharedSize;
- Uint32 * segPtr = (Uint32*) m_TargetSegm[m_StandbyAdapterId].mappedMemory ;
-
- Uint32 * remoteReadIndex2 = (Uint32*)segPtr;
- Uint32 * remoteWriteIndex2 = (Uint32*) (segPtr + 1);
- Uint32 * remoteEndOfDataIndex2 = (Uint32*) (segPtr + 2);
- Uint32 * sharedLockIndex2 = (Uint32*) (segPtr + 3);
- m_remoteStatusFlag2 = (Uint32*)(segPtr + 4);
- Uint32 * sharedHeavyLock2 = (Uint32*) (segPtr + 5);
-
-
- char * remoteStartOfBuf2 = ( char*)((char *)segPtr+sharedSize);
-
- segPtr = (Uint32*) m_TargetSegm[m_ActiveAdapterId].mappedMemory ;
+
+ Uint32 *segPtr = (Uint32*) m_TargetSegm[m_ActiveAdapterId].mappedMemory ;
Uint32 * remoteReadIndex = (Uint32*)segPtr;
- Uint32 * remoteWriteIndex = (Uint32*) (segPtr + 1);
- Uint32 * remoteEndOfDataIndex = (Uint32*) (segPtr + 2);
- Uint32 * sharedLockIndex = (Uint32*) (segPtr + 3);
- m_remoteStatusFlag = (Uint32*)(segPtr + 4);
- Uint32 * sharedHeavyLock = (Uint32*) (segPtr + 5);
+ Uint32 * remoteWriteIndex = (Uint32*)(segPtr + 1);
+ m_remoteStatusFlag = (Uint32*)(segPtr + 3);
char * remoteStartOfBuf = ( char*)((char*)segPtr+(sharedSize));
- * remoteReadIndex = * remoteWriteIndex = 0;
- * remoteReadIndex2 = * remoteWriteIndex2 = 0;
- * remoteEndOfDataIndex = sizeOfBuffer - 1;
- * remoteEndOfDataIndex2 = sizeOfBuffer - 1;
-
- /**
- * setup two writers. writer2 is used to mirror the changes of
- * writer on the standby
- * segment, so that in the case of a failover, we can switch
- * to the stdby seg. quickly.*
- */
- const Uint32 slack = MAX_MESSAGE_SIZE;
-
writer = new SHM_Writer(remoteStartOfBuf,
sizeOfBuffer,
slack,
remoteReadIndex,
remoteWriteIndex);
- writer2 = new SHM_Writer(remoteStartOfBuf2,
- sizeOfBuffer,
- slack,
- remoteReadIndex2,
- remoteWriteIndex2);
-
- * remoteReadIndex = 0;
- * remoteWriteIndex = 0;
-
writer->clear();
- writer2->clear();
m_TargetSegm[0].writer=writer;
- m_TargetSegm[1].writer=writer2;
m_sendBuffer.m_forceSendLimit = writer->getBufferSize();
if(createSequence(m_ActiveAdapterId)!=SCI_ERR_OK) {
- reportThreadError(remoteNodeId, TE_SCI_UNABLE_TO_CREATE_SEQUENCE);
+ report_error(TE_SCI_UNABLE_TO_CREATE_SEQUENCE);
+ DBUG_PRINT("error", ("Unable to create sequence on active"));
doDisconnect();
}
- if(createSequence(m_StandbyAdapterId)!=SCI_ERR_OK) {
- reportThreadError(remoteNodeId, TE_SCI_UNABLE_TO_CREATE_SEQUENCE);
- doDisconnect();
- }
-
-
+ if (m_adapters > 1) {
+ segPtr = (Uint32*) m_TargetSegm[m_StandbyAdapterId].mappedMemory ;
+
+ Uint32 * remoteReadIndex2 = (Uint32*)segPtr;
+ Uint32 * remoteWriteIndex2 = (Uint32*) (segPtr + 1);
+ m_remoteStatusFlag2 = (Uint32*)(segPtr + 3);
+
+ char * remoteStartOfBuf2 = ( char*)((char *)segPtr+sharedSize);
+
+ /**
+ * setup a writer. writer2 is used to mirror the changes of
+ * writer on the standby
+ * segment, so that in the case of a failover, we can switch
+ * to the stdby seg. quickly.*
+ */
+ writer2 = new SHM_Writer(remoteStartOfBuf2,
+ sizeOfBuffer,
+ slack,
+ remoteReadIndex2,
+ remoteWriteIndex2);
+
+ * remoteReadIndex = 0;
+ * remoteWriteIndex = 0;
+ writer2->clear();
+ m_TargetSegm[1].writer=writer2;
+ if(createSequence(m_StandbyAdapterId)!=SCI_ERR_OK) {
+ report_error(TE_SCI_UNABLE_TO_CREATE_SEQUENCE);
+ DBUG_PRINT("error", ("Unable to create sequence on standby"));
+ doDisconnect();
+ }
+ }
+ DBUG_VOID_RETURN;
} //setupRemoteSegment
-
-
-bool SCI_Transporter::connectImpl(Uint32 timeout) {
-
- sci_error_t err;
- Uint32 offset = 0;
-
+
+bool
+SCI_Transporter::init_local()
+{
+ DBUG_ENTER("SCI_Transporter::init_local");
if(!m_initLocal) {
if(initLocalSegment()!=SCI_ERR_OK){
- NdbSleep_MilliSleep(timeout);
+ NdbSleep_MilliSleep(10);
//NDB SHOULD TERMINATE AND COMPUTER REBOOTED!
- reportThreadError(localNodeId, TE_SCI_CANNOT_INIT_LOCALSEGMENT);
- return false;
+ report_error(TE_SCI_CANNOT_INIT_LOCALSEGMENT);
+ DBUG_RETURN(false);
}
- m_initLocal=true;
+ m_initLocal=true;
}
-
- if(!m_mapped ) {
-
- for(Uint32 i=0; i < m_adapters ; i++) {
- m_TargetSegm[i].rhm[i].remoteHandle=0;
- SCIConnectSegment(sciAdapters[i].scidesc,
- &(m_TargetSegm[i].rhm[i].remoteHandle),
- m_remoteNodes[i],
- remoteSegmentId(localNodeId, remoteNodeId),
- i,
- 0,
- 0,
- 0,
- 0,
- &err);
-
- if(err != SCI_ERR_OK) {
- NdbSleep_MilliSleep(timeout);
- return false;
- }
-
- }
-
-
+ DBUG_RETURN(true);
+}
+
+bool
+SCI_Transporter::init_remote()
+{
+ DBUG_ENTER("SCI_Transporter::init_remote");
+ sci_error_t err;
+ Uint32 offset = 0;
+ if(!m_mapped ) {
+ DBUG_PRINT("info", ("Map remote segments"));
+ for(Uint32 i=0; i < m_adapters ; i++) {
+ m_TargetSegm[i].rhm[i].remoteHandle=0;
+ SCIConnectSegment(sciAdapters[i].scidesc,
+ &(m_TargetSegm[i].rhm[i].remoteHandle),
+ m_remoteNodes[i],
+ remoteSegmentId(localNodeId, remoteNodeId),
+ i,
+ 0,
+ 0,
+ 0,
+ 0,
+ &err);
+
+ if(err != SCI_ERR_OK) {
+ NdbSleep_MilliSleep(10);
+ DBUG_PRINT("error", ("Error connecting segment, err 0x%x", err));
+ DBUG_RETURN(false);
+ }
+
+ }
// Map the remote memory segment into program space
- for(Uint32 i=0; i < m_adapters ; i++) {
- m_TargetSegm[i].mappedMemory =
- SCIMapRemoteSegment((m_TargetSegm[i].rhm[i].remoteHandle),
- &(m_TargetSegm[i].rhm[i].map),
- offset,
- m_BufferSize,
- NULL,
- FLAGS,
- &err);
-
-
- if(err!= SCI_ERR_OK) {
-#ifdef DEBUG_TRANSPORTER
- ndbout_c("\nCannot map a segment to the remote node %d.");
- ndbout_c("Error code 0x%x",m_RemoteSciNodeId, err);
-#endif
- //NDB SHOULD TERMINATE AND COMPUTER REBOOTED!
- reportThreadError(remoteNodeId, TE_SCI_CANNOT_MAP_REMOTESEGMENT);
- return false;
- }
-
-
- }
- m_mapped=true;
- setupRemoteSegment();
- setConnected();
-#ifdef DEBUG_TRANSPORTER
- ndbout << "connected and mapped to segment : " << endl;
- ndbout << "remoteNode: " << m_remoteNodes[0] << endl;
- ndbout << "remoteNode: " << m_remotenodes[1] << endl;
- ndbout << "remoteSegId: "
- << remoteSegmentId(localNodeId, remoteNodeId)
- << endl;
-#endif
- return true;
- }
- else {
- return getConnectionStatus();
- }
-} // connectImpl()
-
+ for(Uint32 i=0; i < m_adapters ; i++) {
+ m_TargetSegm[i].mappedMemory =
+ SCIMapRemoteSegment((m_TargetSegm[i].rhm[i].remoteHandle),
+ &(m_TargetSegm[i].rhm[i].map),
+ offset,
+ m_BufferSize,
+ NULL,
+ FLAGS,
+ &err);
+
+ if(err!= SCI_ERR_OK) {
+ DBUG_PRINT("error", ("Cannot map a segment to the remote node %d. Error code 0x%x",m_RemoteSciNodeId, err));
+ //NDB SHOULD TERMINATE AND COMPUTER REBOOTED!
+ report_error(TE_SCI_CANNOT_MAP_REMOTESEGMENT);
+ DBUG_RETURN(false);
+ }
+ }
+ m_mapped=true;
+ setupRemoteSegment();
+ setConnected();
+ DBUG_PRINT("info", ("connected and mapped to segment, remoteNode: %d",
+ remoteNodeId));
+ DBUG_PRINT("info", ("remoteSegId: %d",
+ remoteSegmentId(localNodeId, remoteNodeId)));
+ DBUG_RETURN(true);
+ } else {
+ DBUG_RETURN(getConnectionStatus());
+ }
+}
+
+bool
+SCI_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd)
+{
+ SocketInputStream s_input(sockfd);
+ SocketOutputStream s_output(sockfd);
+ char buf[256];
+ DBUG_ENTER("SCI_Transporter::connect_client_impl");
+ // Wait for server to create and attach
+ if (s_input.gets(buf, 256) == 0) {
+ DBUG_PRINT("error", ("No initial response from server in SCI"));
+ NDB_CLOSE_SOCKET(sockfd);
+ DBUG_RETURN(false);
+ }
+
+ if (!init_local()) {
+ NDB_CLOSE_SOCKET(sockfd);
+ DBUG_RETURN(false);
+ }
+
+ // Send ok to server
+ s_output.println("sci client 1 ok");
+
+ if (!init_remote()) {
+ NDB_CLOSE_SOCKET(sockfd);
+ DBUG_RETURN(false);
+ }
+ // Wait for ok from server
+ if (s_input.gets(buf, 256) == 0) {
+ DBUG_PRINT("error", ("No second response from server in SCI"));
+ NDB_CLOSE_SOCKET(sockfd);
+ DBUG_RETURN(false);
+ }
+ // Send ok to server
+ s_output.println("sci client 2 ok");
+
+ NDB_CLOSE_SOCKET(sockfd);
+ DBUG_PRINT("info", ("Successfully connected client to node %d",
+ remoteNodeId));
+ DBUG_RETURN(true);
+}
+
+bool
+SCI_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd)
+{
+ SocketOutputStream s_output(sockfd);
+ SocketInputStream s_input(sockfd);
+ char buf[256];
+ DBUG_ENTER("SCI_Transporter::connect_server_impl");
+
+ if (!init_local()) {
+ NDB_CLOSE_SOCKET(sockfd);
+ DBUG_RETURN(false);
+ }
+ // Send ok to client
+ s_output.println("sci server 1 ok");
+
+ // Wait for ok from client
+ if (s_input.gets(buf, 256) == 0) {
+ DBUG_PRINT("error", ("No response from client in SCI"));
+ NDB_CLOSE_SOCKET(sockfd);
+ DBUG_RETURN(false);
+ }
+
+ if (!init_remote()) {
+ NDB_CLOSE_SOCKET(sockfd);
+ DBUG_RETURN(false);
+ }
+ // Send ok to client
+ s_output.println("sci server 2 ok");
+ // Wait for ok from client
+ if (s_input.gets(buf, 256) == 0) {
+ DBUG_PRINT("error", ("No second response from client in SCI"));
+ NDB_CLOSE_SOCKET(sockfd);
+ DBUG_RETURN(false);
+ }
+
+ NDB_CLOSE_SOCKET(sockfd);
+ DBUG_PRINT("info", ("Successfully connected server to node %d",
+ remoteNodeId));
+ DBUG_RETURN(true);
+}
+
sci_error_t SCI_Transporter::createSequence(Uint32 adapterid) {
sci_error_t err;
SCICreateMapSequence((m_TargetSegm[adapterid].rhm[adapterid].map),
@@ -795,13 +804,14 @@ sci_error_t SCI_Transporter::startSequence(Uint32 adapterid) {
// If there still is an error then data cannot be safely send
- return err;
+ return err;
} // startSequence()
bool SCI_Transporter::disconnectLocal()
-{
+{
+ DBUG_ENTER("SCI_Transporter::disconnectLocal");
sci_error_t err;
m_ActiveAdapterId=0;
@@ -809,31 +819,28 @@ bool SCI_Transporter::disconnectLocal()
*/
SCIUnmapSegment(m_SourceSegm[0].lhm[0].map,0,&err);
- if(err!=SCI_ERR_OK) {
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNABLE_TO_UNMAP_SEGMENT);
- return false;
- }
+ if(err!=SCI_ERR_OK) {
+ report_error(TE_SCI_UNABLE_TO_UNMAP_SEGMENT);
+ DBUG_PRINT("error", ("Unable to unmap segment"));
+ DBUG_RETURN(false);
+ }
SCIRemoveSegment((m_SourceSegm[m_ActiveAdapterId].localHandle),
FLAGS,
&err);
if(err!=SCI_ERR_OK) {
- reportError(callbackObj, remoteNodeId, TE_SCI_UNABLE_TO_REMOVE_SEGMENT);
- return false;
+ report_error(TE_SCI_UNABLE_TO_REMOVE_SEGMENT);
+ DBUG_PRINT("error", ("Unable to remove segment"));
+ DBUG_RETURN(false);
}
-
- if(err == SCI_ERR_OK) {
-#ifdef DEBUG_TRANSPORTER
- printf("Local memory segment is unmapped and removed\n" );
-#endif
- }
- return true;
+ DBUG_PRINT("info", ("Local memory segment is unmapped and removed"));
+ DBUG_RETURN(true);
} // disconnectLocal()
bool SCI_Transporter::disconnectRemote() {
+ DBUG_ENTER("SCI_Transporter::disconnectRemote");
sci_error_t err;
for(Uint32 i=0; i<m_adapters; i++) {
/**
@@ -841,35 +848,32 @@ bool SCI_Transporter::disconnectRemote() {
*/
SCIUnmapSegment(m_TargetSegm[i].rhm[i].map,0,&err);
if(err!=SCI_ERR_OK) {
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNABLE_TO_DISCONNECT_SEGMENT);
- return false;
- }
+ report_error(TE_SCI_UNABLE_TO_UNMAP_SEGMENT);
+ DBUG_PRINT("error", ("Unable to unmap segment"));
+ DBUG_RETURN(false);
+ }
SCIDisconnectSegment(m_TargetSegm[i].rhm[i].remoteHandle,
FLAGS,
&err);
if(err!=SCI_ERR_OK) {
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNABLE_TO_DISCONNECT_SEGMENT);
- return false;
+ report_error(TE_SCI_UNABLE_TO_DISCONNECT_SEGMENT);
+ DBUG_PRINT("error", ("Unable to disconnect segment"));
+ DBUG_RETURN(false);
}
-#ifdef DEBUG_TRANSPORTER
- ndbout_c("Remote memory segment is unmapped and disconnected\n" );
-#endif
+ DBUG_PRINT("info", ("Remote memory segment is unmapped and disconnected"));
}
- return true;
+ DBUG_RETURN(true);
} // disconnectRemote()
SCI_Transporter::~SCI_Transporter() {
+ DBUG_ENTER("SCI_Transporter::~SCI_Transporter");
// Close channel to the driver
-#ifdef DEBUG_TRANSPORTER
- ndbout << "~SCITransporter does a disConnect" << endl;
-#endif
doDisconnect();
if(m_sendBuffer.m_buffer != NULL)
delete[] m_sendBuffer.m_buffer;
+ DBUG_VOID_RETURN;
} // ~SCI_Transporter()
@@ -878,7 +882,7 @@ SCI_Transporter::~SCI_Transporter() {
void SCI_Transporter::closeSCI() {
// Termination of SCI
sci_error_t err;
- printf("\nClosing SCI Transporter...\n");
+ DBUG_ENTER("SCI_Transporter::closeSCI");
// Disconnect and remove remote segment
disconnectRemote();
@@ -890,26 +894,41 @@ void SCI_Transporter::closeSCI() {
// Closes an SCI virtual device
SCIClose(activeSCIDescriptor, FLAGS, &err);
- if(err != SCI_ERR_OK)
- fprintf(stderr,
- "\nCannot close SCI channel to the driver. Error code 0x%x",
- err);
+ if(err != SCI_ERR_OK) {
+ DBUG_PRINT("error", ("Cannot close SCI channel to the driver. Error code 0x%x",
+ err));
+ }
SCITerminate();
+ DBUG_VOID_RETURN;
} // closeSCI()
Uint32 *
-SCI_Transporter::getWritePtr(Uint32 lenBytes, Uint32 prio){
+SCI_Transporter::getWritePtr(Uint32 lenBytes, Uint32 prio)
+{
- if(m_sendBuffer.full()){
- /**-------------------------------------------------
- * Buffer was completely full. We have severe problems.
- * -------------------------------------------------
- */
- if(!doSend()){
+ Uint32 sci_buffer_remaining = m_sendBuffer.m_forceSendLimit;
+ Uint32 send_buf_size = m_sendBuffer.m_sendBufferSize;
+ Uint32 curr_data_size = m_sendBuffer.m_dataSize << 2;
+ Uint32 new_curr_data_size = curr_data_size + lenBytes;
+ if ((curr_data_size >= send_buf_size) ||
+ (curr_data_size >= sci_buffer_remaining)) {
+ /**
+ * The new message will not fit in the send buffer. We need to
+ * send the send buffer before filling it up with the new
+ * signal data. If current data size will spill over buffer edge
+ * we will also send to ensure correct operation.
+ */
+ if (!doSend()) {
+ /**
+ * We were not successfull sending, report 0 as meaning buffer full and
+ * upper levels handle retries and other recovery matters.
+ */
return 0;
}
}
-
+ /**
+ * New signal fits, simply fill it up with more data.
+ */
Uint32 sz = m_sendBuffer.m_dataSize;
return &m_sendBuffer.m_buffer[sz];
}
@@ -918,10 +937,11 @@ void
SCI_Transporter::updateWritePtr(Uint32 lenBytes, Uint32 prio){
Uint32 sz = m_sendBuffer.m_dataSize;
- sz += (lenBytes / 4);
+ Uint32 packet_size = m_PacketSize;
+ sz += ((lenBytes + 3) >> 2);
m_sendBuffer.m_dataSize = sz;
- if(sz > m_PacketSize) {
+ if(sz > packet_size) {
/**-------------------------------------------------
* Buffer is full and we are ready to send. We will
* not wait since the signal is already in the buffer.
@@ -944,7 +964,8 @@ bool
SCI_Transporter::getConnectionStatus() {
if(*m_localStatusFlag == SCICONNECTED &&
(*m_remoteStatusFlag == SCICONNECTED ||
- *m_remoteStatusFlag2 == SCICONNECTED))
+ ((m_adapters > 1) &&
+ *m_remoteStatusFlag2 == SCICONNECTED)))
return true;
else
return false;
@@ -954,7 +975,9 @@ SCI_Transporter::getConnectionStatus() {
void
SCI_Transporter::setConnected() {
*m_remoteStatusFlag = SCICONNECTED;
- *m_remoteStatusFlag2 = SCICONNECTED;
+ if (m_adapters > 1) {
+ *m_remoteStatusFlag2 = SCICONNECTED;
+ }
*m_localStatusFlag = SCICONNECTED;
}
@@ -963,8 +986,10 @@ void
SCI_Transporter::setDisconnect() {
if(getLinkStatus(m_ActiveAdapterId))
*m_remoteStatusFlag = SCIDISCONNECT;
- if(getLinkStatus(m_StandbyAdapterId))
- *m_remoteStatusFlag2 = SCIDISCONNECT;
+ if (m_adapters > 1) {
+ if(getLinkStatus(m_StandbyAdapterId))
+ *m_remoteStatusFlag2 = SCIDISCONNECT;
+ }
}
@@ -981,20 +1006,20 @@ static bool init = false;
bool
SCI_Transporter::initSCI() {
+ DBUG_ENTER("SCI_Transporter::initSCI");
if(!init){
sci_error_t error;
// Initialize SISCI library
SCIInitialize(0, &error);
if(error != SCI_ERR_OK) {
-#ifdef DEBUG_TRANSPORTER
- ndbout_c("\nCannot initialize SISCI library.");
- ndbout_c("\nInconsistency between SISCI library and SISCI driver.Error code 0x%x", error);
-#endif
- return false;
+ DBUG_PRINT("error", ("Cannot initialize SISCI library."));
+ DBUG_PRINT("error", ("Inconsistency between SISCI library and SISCI driver. Error code 0x%x",
+ error));
+ DBUG_RETURN(false);
}
init = true;
}
- return true;
+ DBUG_RETURN(true);
}
diff --git a/ndb/src/common/transporter/SCI_Transporter.hpp b/ndb/src/common/transporter/SCI_Transporter.hpp
index 03496c2ce21..e2f2dfcaf99 100644
--- a/ndb/src/common/transporter/SCI_Transporter.hpp
+++ b/ndb/src/common/transporter/SCI_Transporter.hpp
@@ -26,7 +26,7 @@
#include <ndb_types.h>
- /**
+/**
* The SCI Transporter
*
* The design goal of the SCI transporter is to deliver high performance
@@ -135,15 +135,17 @@ public:
bool getConnectionStatus();
private:
- SCI_Transporter(Uint32 packetSize,
+ SCI_Transporter(TransporterRegistry &t_reg,
+ const char *local_host,
+ const char *remote_host,
+ int port,
+ Uint32 packetSize,
Uint32 bufferSize,
Uint32 nAdapters,
Uint16 remoteSciNodeId0,
Uint16 remoteSciNodeId1,
NodeId localNodeID,
NodeId remoteNodeID,
- int byteorder,
- bool compression,
bool checksum,
bool signalId,
Uint32 reportFreq = 4096);
@@ -160,7 +162,8 @@ private:
/**
* For statistics on transfered packets
*/
-#ifdef DEBUG_TRANSPORTER
+//#ifdef DEBUG_TRANSPORTER
+#if 1
Uint32 i1024;
Uint32 i2048;
Uint32 i2049;
@@ -177,10 +180,8 @@ private:
struct {
Uint32 * m_buffer; // The buffer
Uint32 m_dataSize; // No of words in buffer
- Uint32 m_bufferSize; // Buffer size
+ Uint32 m_sendBufferSize; // Buffer size
Uint32 m_forceSendLimit; // Send when buffer is this full
-
- bool full() const { return (m_dataSize * 4) > m_forceSendLimit ;}
} m_sendBuffer;
SHM_Reader * reader;
@@ -196,7 +197,7 @@ private:
Uint32 m_adapters;
Uint32 m_numberOfRemoteNodes;
- Uint16* m_remoteNodes;
+ Uint16 m_remoteNodes[2];
typedef struct SciAdapter {
sci_desc_t scidesc;
@@ -296,12 +297,11 @@ private:
*/
bool sendIsPossible(struct timeval * timeout);
-
void getReceivePtr(Uint32 ** ptr, Uint32 ** eod){
reader->getReadPtr(* ptr, * eod);
}
- void updateReceivePtr(Uint32 * ptr){
+ void updateReceivePtr(Uint32 *ptr){
reader->updateReadPtr(ptr);
}
@@ -341,7 +341,9 @@ private:
*/
void failoverShmWriter();
-
+ bool init_local();
+ bool init_remote();
+
protected:
/** Perform a connection between segment
@@ -350,7 +352,8 @@ protected:
* retrying.
* @return Returns true on success, otherwize falser
*/
- bool connectImpl(Uint32 timeOutMillis);
+ bool connect_server_impl(NDB_SOCKET_TYPE sockfd);
+ bool connect_client_impl(NDB_SOCKET_TYPE sockfd);
/**
* We will disconnect if:
diff --git a/ndb/src/common/transporter/SHM_Buffer.hpp b/ndb/src/common/transporter/SHM_Buffer.hpp
index 32e59dd57a2..f49b4fe73cb 100644
--- a/ndb/src/common/transporter/SHM_Buffer.hpp
+++ b/ndb/src/common/transporter/SHM_Buffer.hpp
@@ -52,7 +52,7 @@ public:
}
void clear() {
- m_readIndex = * m_sharedReadIndex;
+ m_readIndex = 0;
}
/**
@@ -71,7 +71,7 @@ public:
/**
* Update read ptr
*/
- inline void updateReadPtr(Uint32 * readPtr);
+ inline void updateReadPtr(Uint32 *ptr);
private:
char * const m_startOfBuffer;
@@ -98,8 +98,8 @@ SHM_Reader::empty() const{
*/
inline
void
-SHM_Reader::getReadPtr(Uint32 * & ptr, Uint32 * & eod){
-
+SHM_Reader::getReadPtr(Uint32 * & ptr, Uint32 * & eod)
+{
Uint32 tReadIndex = m_readIndex;
Uint32 tWriteIndex = * m_sharedWriteIndex;
@@ -117,14 +117,14 @@ SHM_Reader::getReadPtr(Uint32 * & ptr, Uint32 * & eod){
*/
inline
void
-SHM_Reader::updateReadPtr(Uint32 * ptr){
-
- Uint32 tReadIndex = ((char *)ptr) - m_startOfBuffer;
+SHM_Reader::updateReadPtr(Uint32 *ptr)
+{
+ Uint32 tReadIndex = ((char*)ptr) - m_startOfBuffer;
assert(tReadIndex < m_totalBufferSize);
if(tReadIndex >= m_bufferSize){
- tReadIndex = 0; //-= m_bufferSize;
+ tReadIndex = 0;
}
m_readIndex = tReadIndex;
@@ -149,7 +149,7 @@ public:
}
void clear() {
- m_writeIndex = * m_sharedWriteIndex;
+ m_writeIndex = 0;
}
inline char * getWritePtr(Uint32 sz);
@@ -206,7 +206,7 @@ SHM_Writer::updateWritePtr(Uint32 sz){
assert(tWriteIndex < m_totalBufferSize);
if(tWriteIndex >= m_bufferSize){
- tWriteIndex = 0; //-= m_bufferSize;
+ tWriteIndex = 0;
}
m_writeIndex = tWriteIndex;
diff --git a/ndb/src/common/transporter/SHM_Transporter.cpp b/ndb/src/common/transporter/SHM_Transporter.cpp
index aa6b650afa8..ab161d8c18c 100644
--- a/ndb/src/common/transporter/SHM_Transporter.cpp
+++ b/ndb/src/common/transporter/SHM_Transporter.cpp
@@ -32,13 +32,12 @@ SHM_Transporter::SHM_Transporter(TransporterRegistry &t_reg,
int r_port,
NodeId lNodeId,
NodeId rNodeId,
- bool compression,
bool checksum,
bool signalId,
key_t _shmKey,
Uint32 _shmSize) :
Transporter(t_reg, lHostName, rHostName, r_port, lNodeId, rNodeId,
- 0, compression, checksum, signalId),
+ 0, false, checksum, signalId),
shmKey(_shmKey),
shmSize(_shmSize)
{
@@ -48,7 +47,7 @@ SHM_Transporter::SHM_Transporter(TransporterRegistry &t_reg,
shmBuf = 0;
reader = 0;
writer = 0;
-
+
setupBuffersDone=false;
#ifdef DEBUG_TRANSPORTER
printf("shm key (%d - %d) = %d\n", lNodeId, rNodeId, shmKey);
@@ -92,8 +91,6 @@ SHM_Transporter::setupBuffers(){
clientStatusFlag = base2 + 4;
char * startOfBuf2 = ((char *)base2)+sharedSize;
- * sharedReadIndex2 = * sharedWriteIndex2 = 0;
-
if(isServer){
* serverStatusFlag = 0;
reader = new SHM_Reader(startOfBuf1,
@@ -109,10 +106,10 @@ SHM_Transporter::setupBuffers(){
sharedWriteIndex2);
* sharedReadIndex1 = 0;
- * sharedWriteIndex2 = 0;
+ * sharedWriteIndex1 = 0;
* sharedReadIndex2 = 0;
- * sharedWriteIndex1 = 0;
+ * sharedWriteIndex2 = 0;
reader->clear();
writer->clear();
@@ -224,6 +221,7 @@ SHM_Transporter::prepareSend(const SignalHeader * const signalHeader,
bool
SHM_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd)
{
+ DBUG_ENTER("SHM_Transporter::connect_server_impl");
SocketOutputStream s_output(sockfd);
SocketInputStream s_input(sockfd);
char buf[256];
@@ -233,7 +231,7 @@ SHM_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd)
if (!ndb_shm_create()) {
report_error(TE_SHM_UNABLE_TO_CREATE_SEGMENT);
NDB_CLOSE_SOCKET(sockfd);
- return false;
+ DBUG_RETURN(false);
}
_shmSegCreated = true;
}
@@ -243,7 +241,7 @@ SHM_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd)
if (!ndb_shm_attach()) {
report_error(TE_SHM_UNABLE_TO_ATTACH_SEGMENT);
NDB_CLOSE_SOCKET(sockfd);
- return false;
+ DBUG_RETURN(false);
}
_attached = true;
}
@@ -254,7 +252,7 @@ SHM_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd)
// Wait for ok from client
if (s_input.gets(buf, 256) == 0) {
NDB_CLOSE_SOCKET(sockfd);
- return false;
+ DBUG_RETURN(false);
}
int r= connect_common(sockfd);
@@ -265,17 +263,20 @@ SHM_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd)
// Wait for ok from client
if (s_input.gets(buf, 256) == 0) {
NDB_CLOSE_SOCKET(sockfd);
- return false;
+ DBUG_RETURN(false);
}
+ DBUG_PRINT("info", ("Successfully connected server to node %d",
+ remoteNodeId));
}
NDB_CLOSE_SOCKET(sockfd);
- return r;
+ DBUG_RETURN(r);
}
bool
SHM_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd)
{
+ DBUG_ENTER("SHM_Transporter::connect_client_impl");
SocketInputStream s_input(sockfd);
SocketOutputStream s_output(sockfd);
char buf[256];
@@ -283,14 +284,18 @@ SHM_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd)
// Wait for server to create and attach
if (s_input.gets(buf, 256) == 0) {
NDB_CLOSE_SOCKET(sockfd);
- return false;
+ DBUG_PRINT("error", ("Server id %d did not attach",
+ remoteNodeId));
+ DBUG_RETURN(false);
}
// Create
if(!_shmSegCreated){
if (!ndb_shm_get()) {
NDB_CLOSE_SOCKET(sockfd);
- return false;
+ DBUG_PRINT("error", ("Failed create of shm seg to node %d",
+ remoteNodeId));
+ DBUG_RETURN(false);
}
_shmSegCreated = true;
}
@@ -300,7 +305,9 @@ SHM_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd)
if (!ndb_shm_attach()) {
report_error(TE_SHM_UNABLE_TO_ATTACH_SEGMENT);
NDB_CLOSE_SOCKET(sockfd);
- return false;
+ DBUG_PRINT("error", ("Failed attach of shm seg to node %d",
+ remoteNodeId));
+ DBUG_RETURN(false);
}
_attached = true;
}
@@ -314,21 +321,28 @@ SHM_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd)
// Wait for ok from server
if (s_input.gets(buf, 256) == 0) {
NDB_CLOSE_SOCKET(sockfd);
- return false;
+ DBUG_PRINT("error", ("No ok from server node %d",
+ remoteNodeId));
+ DBUG_RETURN(false);
}
// Send ok to server
s_output.println("shm client 2 ok");
+ DBUG_PRINT("info", ("Successfully connected client to node %d",
+ remoteNodeId));
}
NDB_CLOSE_SOCKET(sockfd);
- return r;
+ DBUG_RETURN(r);
}
bool
SHM_Transporter::connect_common(NDB_SOCKET_TYPE sockfd)
{
- if (!checkConnected())
+ if (!checkConnected()) {
+ DBUG_PRINT("error", ("Already connected to node %d",
+ remoteNodeId));
return false;
+ }
if(!setupBuffersDone) {
setupBuffers();
@@ -341,5 +355,7 @@ SHM_Transporter::connect_common(NDB_SOCKET_TYPE sockfd)
return true;
}
+ DBUG_PRINT("error", ("Failed to set up buffers to node %d",
+ remoteNodeId));
return false;
}
diff --git a/ndb/src/common/transporter/SHM_Transporter.hpp b/ndb/src/common/transporter/SHM_Transporter.hpp
index be54d0daa2a..27692209ffe 100644
--- a/ndb/src/common/transporter/SHM_Transporter.hpp
+++ b/ndb/src/common/transporter/SHM_Transporter.hpp
@@ -38,7 +38,6 @@ public:
int r_port,
NodeId lNodeId,
NodeId rNodeId,
- bool compression,
bool checksum,
bool signalId,
key_t shmKey,
@@ -127,6 +126,7 @@ protected:
private:
bool _shmSegCreated;
bool _attached;
+ bool m_connected;
key_t shmKey;
volatile Uint32 * serverStatusFlag;
diff --git a/ndb/src/common/transporter/TCP_Transporter.cpp b/ndb/src/common/transporter/TCP_Transporter.cpp
index 8833b51e236..524ecd653e0 100644
--- a/ndb/src/common/transporter/TCP_Transporter.cpp
+++ b/ndb/src/common/transporter/TCP_Transporter.cpp
@@ -70,11 +70,10 @@ TCP_Transporter::TCP_Transporter(TransporterRegistry &t_reg,
int r_port,
NodeId lNodeId,
NodeId rNodeId,
- int byte_order,
- bool compr, bool chksm, bool signalId,
+ bool chksm, bool signalId,
Uint32 _reportFreq) :
Transporter(t_reg, lHostName, rHostName, r_port, lNodeId, rNodeId,
- byte_order, compr, chksm, signalId),
+ 0, false, chksm, signalId),
m_sendBuffer(sendBufSize)
{
maxReceiveSize = maxRecvSize;
@@ -106,12 +105,14 @@ TCP_Transporter::~TCP_Transporter() {
bool TCP_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd)
{
- return connect_common(sockfd);
+ DBUG_ENTER("TCP_Transpporter::connect_server_impl");
+ DBUG_RETURN(connect_common(sockfd));
}
bool TCP_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd)
{
- return connect_common(sockfd);
+ DBUG_ENTER("TCP_Transpporter::connect_client_impl");
+ DBUG_RETURN(connect_common(sockfd));
}
bool TCP_Transporter::connect_common(NDB_SOCKET_TYPE sockfd)
@@ -119,6 +120,8 @@ bool TCP_Transporter::connect_common(NDB_SOCKET_TYPE sockfd)
theSocket = sockfd;
setSocketOptions();
setSocketNonBlocking(theSocket);
+ DBUG_PRINT("info", ("Successfully set-up TCP transporter to node %d",
+ remoteNodeId));
return true;
}
@@ -359,50 +362,56 @@ TCP_Transporter::doReceive() {
// Select-function must return the socket for read
// before this method is called
// It reads the external TCP/IP interface once
-
- const int nBytesRead = recv(theSocket,
- receiveBuffer.insertPtr, maxReceiveSize, 0);
-
- if (nBytesRead > 0) {
- receiveBuffer.sizeOfData += nBytesRead;
- receiveBuffer.insertPtr += nBytesRead;
+ Uint32 size = receiveBuffer.sizeOfBuffer - receiveBuffer.sizeOfData;
+ if(size > 0){
+ const int nBytesRead = recv(theSocket,
+ receiveBuffer.insertPtr,
+ size < maxReceiveSize ? size : maxReceiveSize,
+ 0);
- if(receiveBuffer.sizeOfData > receiveBuffer.sizeOfBuffer){
+ if (nBytesRead > 0) {
+ receiveBuffer.sizeOfData += nBytesRead;
+ receiveBuffer.insertPtr += nBytesRead;
+
+ if(receiveBuffer.sizeOfData > receiveBuffer.sizeOfBuffer){
#ifdef DEBUG_TRANSPORTER
- ndbout_c("receiveBuffer.sizeOfData(%d) > receiveBuffer.sizeOfBuffer(%d)",
- receiveBuffer.sizeOfData, receiveBuffer.sizeOfBuffer);
- ndbout_c("nBytesRead = %d", nBytesRead);
+ ndbout_c("receiveBuffer.sizeOfData(%d) > receiveBuffer.sizeOfBuffer(%d)",
+ receiveBuffer.sizeOfData, receiveBuffer.sizeOfBuffer);
+ ndbout_c("nBytesRead = %d", nBytesRead);
#endif
- ndbout_c("receiveBuffer.sizeOfData(%d) > receiveBuffer.sizeOfBuffer(%d)",
- receiveBuffer.sizeOfData, receiveBuffer.sizeOfBuffer);
- report_error(TE_INVALID_MESSAGE_LENGTH);
- return 0;
- }
-
- receiveCount ++;
- receiveSize += nBytesRead;
-
- if(receiveCount == reportFreq){
- reportReceiveLen(get_callback_obj(), remoteNodeId, receiveCount, receiveSize);
- receiveCount = 0;
- receiveSize = 0;
+ ndbout_c("receiveBuffer.sizeOfData(%d) > receiveBuffer.sizeOfBuffer(%d)",
+ receiveBuffer.sizeOfData, receiveBuffer.sizeOfBuffer);
+ report_error(TE_INVALID_MESSAGE_LENGTH);
+ return 0;
+ }
+
+ receiveCount ++;
+ receiveSize += nBytesRead;
+
+ if(receiveCount == reportFreq){
+ reportReceiveLen(get_callback_obj(), remoteNodeId, receiveCount, receiveSize);
+ receiveCount = 0;
+ receiveSize = 0;
+ }
+ return nBytesRead;
+ } else {
+#if defined DEBUG_TRANSPORTER
+ ndbout_c("Receive Failure(disconnect==%d) to node = %d nBytesSent = %d "
+ "errno = %d strerror = %s",
+ DISCONNECT_ERRNO(InetErrno, nBytesRead),
+ remoteNodeId, nBytesRead, InetErrno,
+ (char*)ndbstrerror(InetErrno));
+#endif
+ if(DISCONNECT_ERRNO(InetErrno, nBytesRead)){
+ // The remote node has closed down
+ doDisconnect();
+ report_disconnect(InetErrno);
+ }
}
return nBytesRead;
} else {
-#if defined DEBUG_TRANSPORTER
- ndbout_c("Receive Failure(disconnect==%d) to node = %d nBytesSent = %d "
- "errno = %d strerror = %s",
- DISCONNECT_ERRNO(InetErrno, nBytesRead),
- remoteNodeId, nBytesRead, InetErrno,
- (char*)ndbstrerror(InetErrno));
-#endif
- if(DISCONNECT_ERRNO(InetErrno, nBytesRead)){
- // The remote node has closed down
- doDisconnect();
- report_disconnect(InetErrno);
- }
+ return 0;
}
- return nBytesRead;
}
void
diff --git a/ndb/src/common/transporter/TCP_Transporter.hpp b/ndb/src/common/transporter/TCP_Transporter.hpp
index 958cfde03a1..48046310bf8 100644
--- a/ndb/src/common/transporter/TCP_Transporter.hpp
+++ b/ndb/src/common/transporter/TCP_Transporter.hpp
@@ -52,8 +52,7 @@ private:
int r_port,
NodeId lHostId,
NodeId rHostId,
- int byteorder,
- bool compression, bool checksum, bool signalId,
+ bool checksum, bool signalId,
Uint32 reportFreq = 4096);
// Disconnect, delete send buffers and receive buffer
diff --git a/ndb/src/common/transporter/Transporter.cpp b/ndb/src/common/transporter/Transporter.cpp
index 7a469252c00..e68bc86718e 100644
--- a/ndb/src/common/transporter/Transporter.cpp
+++ b/ndb/src/common/transporter/Transporter.cpp
@@ -32,7 +32,7 @@ Transporter::Transporter(TransporterRegistry &t_reg,
NodeId rNodeId,
int _byteorder,
bool _compression, bool _checksum, bool _signalId)
- : m_r_port(r_port), localNodeId(lNodeId), remoteNodeId(rNodeId),
+ : m_r_port(r_port), remoteNodeId(rNodeId), localNodeId(lNodeId),
isServer(lNodeId < rNodeId),
m_packer(_signalId, _checksum),
m_transporter_registry(t_reg)
diff --git a/ndb/src/common/transporter/TransporterRegistry.cpp b/ndb/src/common/transporter/TransporterRegistry.cpp
index 01f1f74f053..cacbbed00f1 100644
--- a/ndb/src/common/transporter/TransporterRegistry.cpp
+++ b/ndb/src/common/transporter/TransporterRegistry.cpp
@@ -15,6 +15,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <ndb_global.h>
+#include <my_pthread.h>
#include <TransporterRegistry.hpp>
#include "TransporterInternalDefinitions.hpp"
@@ -48,9 +49,10 @@
SocketServer::Session * TransporterService::newSession(NDB_SOCKET_TYPE sockfd)
{
+ DBUG_ENTER("SocketServer::Session * TransporterService::newSession");
if (m_auth && !m_auth->server_authenticate(sockfd)){
NDB_CLOSE_SOCKET(sockfd);
- return 0;
+ DBUG_RETURN(0);
}
{
@@ -60,27 +62,32 @@ SocketServer::Session * TransporterService::newSession(NDB_SOCKET_TYPE sockfd)
char buf[256];
if (s_input.gets(buf, 256) == 0) {
NDB_CLOSE_SOCKET(sockfd);
- return 0;
+ DBUG_PRINT("error", ("Could not get node id from client"));
+ DBUG_RETURN(0);
}
if (sscanf(buf, "%d", &nodeId) != 1) {
NDB_CLOSE_SOCKET(sockfd);
- return 0;
+ DBUG_PRINT("error", ("Error in node id from client"));
+ DBUG_RETURN(0);
}
//check that nodeid is valid and that there is an allocated transporter
- if ( nodeId < 0 || nodeId >= m_transporter_registry->maxTransporters) {
- NDB_CLOSE_SOCKET(sockfd);
- return 0;
+ if ( nodeId < 0 || nodeId >= (int)m_transporter_registry->maxTransporters) {
+ NDB_CLOSE_SOCKET(sockfd);
+ DBUG_PRINT("error", ("Node id out of range from client"));
+ DBUG_RETURN(0);
}
if (m_transporter_registry->theTransporters[nodeId] == 0) {
NDB_CLOSE_SOCKET(sockfd);
- return 0;
+ DBUG_PRINT("error", ("No transporter for this node id from client"));
+ DBUG_RETURN(0);
}
//check that the transporter should be connected
if (m_transporter_registry->performStates[nodeId] != TransporterRegistry::CONNECTING) {
NDB_CLOSE_SOCKET(sockfd);
- return 0;
+ DBUG_PRINT("error", ("Transporter in wrong state for this node id from client"));
+ DBUG_RETURN(0);
}
Transporter *t= m_transporter_registry->theTransporters[nodeId];
@@ -93,14 +100,13 @@ SocketServer::Session * TransporterService::newSession(NDB_SOCKET_TYPE sockfd)
t->connect_server(sockfd);
}
- return 0;
+ DBUG_RETURN(0);
}
TransporterRegistry::TransporterRegistry(void * callback,
unsigned _maxTransporters,
unsigned sizeOfLongSignalMemory) {
- m_transporter_service= 0;
nodeIdSpecified = false;
maxTransporters = _maxTransporters;
sendCounter = 1;
@@ -209,8 +215,6 @@ TransporterRegistry::createTransporter(TCP_TransporterConfiguration *config) {
config->port,
localNodeId,
config->remoteNodeId,
- config->byteOrder,
- config->compression,
config->checksum,
config->signalId);
if (t == NULL)
@@ -264,8 +268,6 @@ TransporterRegistry::createTransporter(OSE_TransporterConfiguration *conf) {
conf->localHostName,
conf->remoteNodeId,
conf->remoteHostName,
- conf->byteOrder,
- conf->compression,
conf->checksum,
conf->signalId);
if (t == NULL)
@@ -306,15 +308,17 @@ TransporterRegistry::createTransporter(SCI_TransporterConfiguration *config) {
if(theTransporters[config->remoteNodeId] != NULL)
return false;
- SCI_Transporter * t = new SCI_Transporter(config->sendLimit,
+ SCI_Transporter * t = new SCI_Transporter(*this,
+ config->localHostName,
+ config->remoteHostName,
+ config->port,
+ config->sendLimit,
config->bufferSize,
config->nLocalAdapters,
config->remoteSciNodeId0,
config->remoteSciNodeId1,
localNodeId,
config->remoteNodeId,
- config->byteOrder,
- config->compression,
config->checksum,
config->signalId);
@@ -357,7 +361,6 @@ TransporterRegistry::createTransporter(SHM_TransporterConfiguration *config) {
config->port,
localNodeId,
config->remoteNodeId,
- config->compression,
config->checksum,
config->signalId,
config->shmKey,
@@ -855,8 +858,8 @@ TransporterRegistry::performReceive(){
if(t->isConnected() && t->checkConnected()){
Uint32 * readPtr, * eodPtr;
t->getReceivePtr(&readPtr, &eodPtr);
- readPtr = unpack(readPtr, eodPtr, nodeId, ioStates[nodeId]);
- t->updateReceivePtr(readPtr);
+ Uint32 *newPtr = unpack(readPtr, eodPtr, nodeId, ioStates[nodeId]);
+ t->updateReceivePtr(newPtr);
}
}
}
@@ -870,8 +873,8 @@ TransporterRegistry::performReceive(){
if(t->isConnected() && t->checkConnected()){
Uint32 * readPtr, * eodPtr;
t->getReceivePtr(&readPtr, &eodPtr);
- readPtr = unpack(readPtr, eodPtr, nodeId, ioStates[nodeId]);
- t->updateReceivePtr(readPtr);
+ Uint32 *newPtr = unpack(readPtr, eodPtr, nodeId, ioStates[nodeId]);
+ t->updateReceivePtr(newPtr);
}
}
}
@@ -1023,7 +1026,9 @@ TransporterRegistry::setIOState(NodeId nodeId, IOState state) {
static void *
run_start_clients_C(void * me)
{
+ my_thread_init();
((TransporterRegistry*) me)->start_clients_thread();
+ my_thread_end();
NdbThread_Exit(0);
return me;
}
@@ -1106,6 +1111,7 @@ TransporterRegistry::update_connections()
void
TransporterRegistry::start_clients_thread()
{
+ DBUG_ENTER("TransporterRegistry::start_clients_thread");
while (m_run_start_clients_thread) {
NdbSleep_MilliSleep(100);
for (int i= 0, n= 0; n < nTransporters && m_run_start_clients_thread; i++){
@@ -1129,6 +1135,7 @@ TransporterRegistry::start_clients_thread()
}
}
}
+ DBUG_VOID_RETURN;
}
bool
@@ -1159,55 +1166,67 @@ TransporterRegistry::stop_clients()
return true;
}
-bool
-TransporterRegistry::start_service(SocketServer& socket_server)
+void
+TransporterRegistry::add_transporter_interface(const char *interface, unsigned short port)
{
-#if 0
- for (int i= 0, n= 0; n < nTransporters; i++){
- Transporter * t = theTransporters[i];
- if (!t)
+ DBUG_ENTER("TransporterRegistry::add_transporter_interface");
+ DBUG_PRINT("enter",("interface=%s, port= %d", interface, port));
+ if (interface && strlen(interface) == 0)
+ interface= 0;
+
+ for (unsigned i= 0; i < m_transporter_interface.size(); i++)
+ {
+ Transporter_interface &tmp= m_transporter_interface[i];
+ if (port != tmp.m_service_port)
continue;
- n++;
- if (t->isServer) {
- t->m_service = new TransporterService(new SocketAuthSimple("ndbd passwd"));
- if(!socket_server.setup(t->m_service, t->m_r_port, 0))
- {
- ndbout_c("Unable to setup transporter service port: %d!\n"
- "Please check if the port is already used,\n"
- "(perhaps a mgmtsrvrserver is already running)",
- m_service_port);
- delete t->m_service;
- return false;
- }
+ if (interface != 0 && tmp.m_interface != 0 &&
+ strcmp(interface, tmp.m_interface) == 0)
+ {
+ DBUG_VOID_RETURN; // found match, no need to insert
+ }
+ if (interface == 0 && tmp.m_interface == 0)
+ {
+ DBUG_VOID_RETURN; // found match, no need to insert
}
}
-#endif
-
- if (m_service_port != 0) {
+ Transporter_interface t;
+ t.m_service_port= port;
+ t.m_interface= interface;
+ m_transporter_interface.push_back(t);
+ DBUG_PRINT("exit",("interface and port added"));
+ DBUG_VOID_RETURN;
+}
- m_transporter_service = new TransporterService(new SocketAuthSimple("ndbd", "ndbd passwd"));
+bool
+TransporterRegistry::start_service(SocketServer& socket_server)
+{
+ if (m_transporter_interface.size() > 0 && nodeIdSpecified != true)
+ {
+ ndbout_c("TransporterRegistry::startReceiving: localNodeId not specified");
+ return false;
+ }
- if (nodeIdSpecified != true) {
- ndbout_c("TransporterRegistry::startReceiving: localNodeId not specified");
+ for (unsigned i= 0; i < m_transporter_interface.size(); i++)
+ {
+ Transporter_interface &t= m_transporter_interface[i];
+ if (t.m_service_port == 0)
+ {
+ continue;
+ }
+ TransporterService *transporter_service =
+ new TransporterService(new SocketAuthSimple("ndbd", "ndbd passwd"));
+ if(!socket_server.setup(transporter_service,
+ t.m_service_port, t.m_interface))
+ {
+ ndbout_c("Unable to setup transporter service port: %s:%d!\n"
+ "Please check if the port is already used,\n"
+ "(perhaps the node is already running)",
+ t.m_interface ? t.m_interface : "*", t.m_service_port);
+ delete transporter_service;
return false;
}
-
- //m_interface_name = "ndbd";
- m_interface_name = 0;
-
- if(!socket_server.setup(m_transporter_service, m_service_port, m_interface_name))
- {
- ndbout_c("Unable to setup transporter service port: %d!\n"
- "Please check if the port is already used,\n"
- "(perhaps a mgmtsrvrserver is already running)",
- m_service_port);
- delete m_transporter_service;
- return false;
- }
- m_transporter_service->setTransporterRegistry(this);
- } else
- m_transporter_service= 0;
-
+ transporter_service->setTransporterRegistry(this);
+ }
return true;
}
@@ -1281,3 +1300,5 @@ NdbOut & operator <<(NdbOut & out, SignalHeader & sh){
out << "trace: " << (int)sh.theTrace << endl;
return out;
}
+
+template class Vector<TransporterRegistry::Transporter_interface>;
diff --git a/ndb/src/common/transporter/perftest/perfTransporterTest.cpp b/ndb/src/common/transporter/perftest/perfTransporterTest.cpp
index d33221c2835..71df9f12a4c 100644
--- a/ndb/src/common/transporter/perftest/perfTransporterTest.cpp
+++ b/ndb/src/common/transporter/perftest/perfTransporterTest.cpp
@@ -276,7 +276,7 @@ printReport(TestPhase & p){
char buf[255];
if(p.signalSize != 0){
- snprintf(buf, 255,
+ BaseString::snprintf(buf, 255,
"%d\t%d\t%s\t%s\t%s\t%s\t%d\t%d",
p.noOfSignals,
4*p.signalSize,
@@ -287,7 +287,7 @@ printReport(TestPhase & p){
(int)(p.sendLenBytes / (p.sendCount == 0 ? 1 : p.sendCount)),
(int)(p.recvLenBytes / (p.recvCount == 0 ? 1 : p.recvCount)));
} else {
- snprintf(buf, 255,
+ BaseString::snprintf(buf, 255,
"%d\trand\t%s\t%s\t%s\t%s\t%d\t%d",
p.noOfSignals,
st,
diff --git a/ndb/src/common/transporter/priotest/prioTransporterTest.cpp b/ndb/src/common/transporter/priotest/prioTransporterTest.cpp
index 0fce6aaad39..6c5623a49a6 100644
--- a/ndb/src/common/transporter/priotest/prioTransporterTest.cpp
+++ b/ndb/src/common/transporter/priotest/prioTransporterTest.cpp
@@ -375,7 +375,7 @@ printReport(TestPhase & p){
char buf[255];
if(p.signalSize != 0){
- snprintf(buf, 255,
+ BaseString::snprintf(buf, 255,
"%d\t%d\t%d\t%s\t%s\t%s\t%d\t%d\t%d\t%d",
p.noOfSignals,
p.signalSize,
@@ -388,7 +388,7 @@ printReport(TestPhase & p){
(int)(p.totTimePrioA / p.loopCount),
(int)(p.bytesSentBeforePrioA));
} else {
- snprintf(buf, 255,
+ BaseString::snprintf(buf, 255,
"%d\trand\t4*rand\t%s\t%s\t%s\t%d\t%d\t%d\t%d",
p.noOfSignals,
st,
diff --git a/ndb/src/common/util/BaseString.cpp b/ndb/src/common/util/BaseString.cpp
index 8b7df485f77..dbff44c377d 100644
--- a/ndb/src/common/util/BaseString.cpp
+++ b/ndb/src/common/util/BaseString.cpp
@@ -17,6 +17,7 @@
/* -*- c-basic-offset: 4; -*- */
#include <ndb_global.h>
#include <BaseString.hpp>
+#include <basestring_vsnprintf.h>
BaseString::BaseString()
{
@@ -127,14 +128,14 @@ BaseString::assfmt(const char *fmt, ...)
* when called as vsnprintf(NULL, 0, ...).
*/
va_start(ap, fmt);
- l = vsnprintf(buf, sizeof(buf), fmt, ap) + 1;
+ l = basestring_vsnprintf(buf, sizeof(buf), fmt, ap) + 1;
va_end(ap);
if(l > (int)m_len) {
delete[] m_chr;
m_chr = new char[l];
}
va_start(ap, fmt);
- vsnprintf(m_chr, l, fmt, ap);
+ basestring_vsnprintf(m_chr, l, fmt, ap);
va_end(ap);
m_len = strlen(m_chr);
return *this;
@@ -152,11 +153,11 @@ BaseString::appfmt(const char *fmt, ...)
* when called as vsnprintf(NULL, 0, ...).
*/
va_start(ap, fmt);
- l = vsnprintf(buf, sizeof(buf), fmt, ap) + 1;
+ l = basestring_vsnprintf(buf, sizeof(buf), fmt, ap) + 1;
va_end(ap);
char *tmp = new char[l];
va_start(ap, fmt);
- vsnprintf(tmp, l, fmt, ap);
+ basestring_vsnprintf(tmp, l, fmt, ap);
va_end(ap);
append(tmp);
delete[] tmp;
@@ -335,6 +336,22 @@ BaseString::trim(char * str, const char * delim){
return str;
}
+int
+BaseString::vsnprintf(char *str, size_t size, const char *format, va_list ap)
+{
+ return(basestring_vsnprintf(str, size, format, ap));
+}
+
+int
+BaseString::snprintf(char *str, size_t size, const char *format, ...)
+{
+ va_list ap;
+ va_start(ap, format);
+ int ret= basestring_vsnprintf(str, size, format, ap);
+ va_end(ap);
+ return(ret);
+}
+
#ifdef TEST_BASE_STRING
diff --git a/ndb/src/common/util/ConfigValues.cpp b/ndb/src/common/util/ConfigValues.cpp
index 8a14882550c..5c4b17c73ca 100644
--- a/ndb/src/common/util/ConfigValues.cpp
+++ b/ndb/src/common/util/ConfigValues.cpp
@@ -1,9 +1,6 @@
+
+#include <ndb_global.h>
#include <ConfigValues.hpp>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <assert.h>
-#include <new>
#include <NdbOut.hpp>
#include <NdbTCP.h>
diff --git a/ndb/src/common/util/File.cpp b/ndb/src/common/util/File.cpp
index 22d262a0d27..f3faa8c4f7f 100644
--- a/ndb/src/common/util/File.cpp
+++ b/ndb/src/common/util/File.cpp
@@ -83,7 +83,7 @@ File_class::File_class(const char* aFileName, const char* mode) :
m_file(NULL),
m_fileMode(mode)
{
- ::snprintf(m_fileName, MAX_FILE_NAME_SIZE, aFileName);
+ BaseString::snprintf(m_fileName, MAX_FILE_NAME_SIZE, aFileName);
}
bool
@@ -99,7 +99,7 @@ File_class::open(const char* aFileName, const char* mode)
/**
* Only copy if it's not the same string
*/
- ::snprintf(m_fileName, MAX_FILE_NAME_SIZE, aFileName);
+ BaseString::snprintf(m_fileName, MAX_FILE_NAME_SIZE, aFileName);
}
m_fileMode = mode;
bool rc = true;
diff --git a/ndb/src/common/util/Makefile.am b/ndb/src/common/util/Makefile.am
index 678added01e..0235adae7c9 100644
--- a/ndb/src/common/util/Makefile.am
+++ b/ndb/src/common/util/Makefile.am
@@ -9,7 +9,7 @@ libgeneral_la_SOURCES = \
NdbSqlUtil.cpp new.cpp \
uucode.c random.c getarg.c version.c \
strdup.c strlcat.c strlcpy.c \
- ConfigValues.cpp
+ ConfigValues.cpp ndb_init.c basestring_vsnprintf.c
include $(top_srcdir)/ndb/config/common.mk.am
include $(top_srcdir)/ndb/config/type_util.mk.am
diff --git a/ndb/src/common/util/NdbErrHnd.cpp b/ndb/src/common/util/NdbErrHnd.cpp
index f1c28a7bbdd..38a67f29853 100644
--- a/ndb/src/common/util/NdbErrHnd.cpp
+++ b/ndb/src/common/util/NdbErrHnd.cpp
@@ -346,53 +346,53 @@ extern "C" OSBOOLEAN ndb_err_hnd(bool user_called,
file_name = "ose_err.h";
}
- snprintf (error_message.header1,
+ BaseString::snprintf(error_message.header1,
BUFSIZE,
"This is the OSE Example System Error handler\r\n");
- snprintf (error_message.err_hnd_file,
+ BaseString::snprintf(error_message.err_hnd_file,
BUFSIZE,
"located in: " __FILE__ "\r\n");
- snprintf (error_message.header2,
+ BaseString::snprintf(error_message.header2,
BUFSIZE,
"An Error has been reported:\r\n");
if (user_called == (OSBOOLEAN) 0 ) {
- snprintf(error_message.user_called_line,
+ BaseString::snprintf(error_message.user_called_line,
BUFSIZE,
"user_called: 0x%x (Error detected by the kernel)\r\n",
user_called);
}
else {
- snprintf(error_message.user_called_line,
+ BaseString::snprintf(error_message.user_called_line,
BUFSIZE,
"user_called: 0x%x (Error detected by an application)\r\n",
user_called);
}
- snprintf (error_message.error_code_line,
+ BaseString::snprintf(error_message.error_code_line,
BUFSIZE,
"error code: 0x%08x\r\n",
error_code);
- snprintf (error_message.subcode_line,
+ BaseString::snprintf(error_message.subcode_line,
BUFSIZE,
" subcode: %s (0x%08x)\r\n",
subcode_mnemonic,
( subcode << 16));
- snprintf (error_message.product_line,
+ BaseString::snprintf(error_message.product_line,
BUFSIZE,
" product: %s\r\n",
product_name);
- snprintf (error_message.header_file_line,
+ BaseString::snprintf(error_message.header_file_line,
BUFSIZE,
" header file: %s\r\n",
file_name);
- snprintf (error_message.extra_line,
+ BaseString::snprintf(error_message.extra_line,
BUFSIZE,
"extra: 0x%08x\r\n",
extra);
@@ -401,22 +401,22 @@ extern "C" OSBOOLEAN ndb_err_hnd(bool user_called,
struct OS_pcb *pcb = get_pcb(current_process());
const char *process_name = &pcb->strings[pcb->name];
- snprintf(error_message.current_process_id_line,
+ BaseString::snprintf(error_message.current_process_id_line,
BUFSIZE,
"Current Process: 0x%08x\r\n",
current_process());
- snprintf(error_message.current_process_name_line,
+ BaseString::snprintf(error_message.current_process_name_line,
BUFSIZE,
"Process Name: %s\r\n",
process_name);
- snprintf(error_message.file_line,
+ BaseString::snprintf(error_message.file_line,
BUFSIZE,
"File: %s\r\n",
&pcb->strings[pcb->file]);
- snprintf(error_message.line_line,
+ BaseString::snprintf(error_message.line_line,
BUFSIZE,
"Line: %d\r\n",
pcb->line);
@@ -452,7 +452,7 @@ extern "C" OSBOOLEAN ndb_err_hnd(bool user_called,
char *expr = ((char **)extra)[0];
char *file = ((char **)extra)[1];
unsigned line = ((unsigned *)extra)[2];
- snprintf(assert_line, BUFSIZE, "Assertion Failed: %s:%u: %s\r\n", file, line, expr);
+ BaseString::snprintf(assert_line, BUFSIZE, "Assertion Failed: %s:%u: %s\r\n", file, line, expr);
ndbout << assert_line;
}
}
@@ -467,13 +467,13 @@ extern "C" OSBOOLEAN ndb_err_hnd(bool user_called,
const char *rcv_name = &rcv->strings[rcv->name];
struct OS_pcb *snd = get_pcb(snd_);
const char *snd_name = &snd->strings[snd->name];
- snprintf(unknown_signal_line, BUFSIZE,
+ BaseString::snprintf(unknown_signal_line, BUFSIZE,
"Unknown Signal Received\r\n");
- snprintf(unknown_signal_line, BUFSIZE,
+ BaseString::snprintf(unknown_signal_line, BUFSIZE,
"Signal Number: 0x%08lx\r\n", signo);
- snprintf(unknown_signal_line, BUFSIZE,
+ BaseString::snprintf(unknown_signal_line, BUFSIZE,
"Sending Process: 0x%08lx (%s))\r\n", snd_, snd_name);
- snprintf(unknown_signal_line, BUFSIZE,
+ BaseString::snprintf(unknown_signal_line, BUFSIZE,
"Receiving Process: 0x%08lx (%s))\r\n", rcv_, rcv_name);
free_buf((union SIGNAL **)&rcv);
free_buf((union SIGNAL **)&snd); }
diff --git a/ndb/src/common/util/NdbOut.cpp b/ndb/src/common/util/NdbOut.cpp
index 6d76cf22402..fa74cb364f3 100644
--- a/ndb/src/common/util/NdbOut.cpp
+++ b/ndb/src/common/util/NdbOut.cpp
@@ -102,7 +102,7 @@ NdbOut::print(const char * fmt, ...){
va_start(ap, fmt);
if (fmt != 0)
- vsnprintf(buf, sizeof(buf)-1, fmt, ap);
+ BaseString::vsnprintf(buf, sizeof(buf)-1, fmt, ap);
ndbout << buf;
va_end(ap);
}
@@ -114,7 +114,7 @@ NdbOut::println(const char * fmt, ...){
va_start(ap, fmt);
if (fmt != 0)
- vsnprintf(buf, sizeof(buf)-1, fmt, ap);
+ BaseString::vsnprintf(buf, sizeof(buf)-1, fmt, ap);
ndbout << buf << endl;
va_end(ap);
}
@@ -127,7 +127,7 @@ ndbout_c(const char * fmt, ...){
va_start(ap, fmt);
if (fmt != 0)
- vsnprintf(buf, sizeof(buf)-1, fmt, ap);
+ BaseString::vsnprintf(buf, sizeof(buf)-1, fmt, ap);
ndbout << buf << endl;
va_end(ap);
}
diff --git a/ndb/src/common/util/NdbSqlUtil.cpp b/ndb/src/common/util/NdbSqlUtil.cpp
index 9d05fc7fb02..6e4e5919e43 100644
--- a/ndb/src/common/util/NdbSqlUtil.cpp
+++ b/ndb/src/common/util/NdbSqlUtil.cpp
@@ -167,7 +167,7 @@ NdbSqlUtil::m_typeList[] = {
};
const NdbSqlUtil::Type&
-NdbSqlUtil::type(Uint32 typeId)
+NdbSqlUtil::getType(Uint32 typeId)
{
if (typeId < sizeof(m_typeList) / sizeof(m_typeList[0]) &&
m_typeList[typeId].m_typeId != Type::Undefined) {
@@ -176,132 +176,470 @@ NdbSqlUtil::type(Uint32 typeId)
return m_typeList[Type::Undefined];
}
+const NdbSqlUtil::Type&
+NdbSqlUtil::getTypeBinary(Uint32 typeId)
+{
+ switch (typeId) {
+ case Type::Char:
+ typeId = Type::Binary;
+ break;
+ case Type::Varchar:
+ typeId = Type::Varbinary;
+ break;
+ case Type::Text:
+ typeId = Type::Blob;
+ break;
+ default:
+ break;
+ }
+ return getType(typeId);
+}
+
// compare
int
-NdbSqlUtil::cmpTinyint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpTinyint(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
- return cmp(Type::Tinyint, p1, p2, full, size);
+ assert(full >= size && size > 0);
+ union { Uint32 p[1]; Int8 v; } u1, u2;
+ u1.p[0] = p1[0];
+ u2.p[0] = p2[0];
+ if (u1.v < u2.v)
+ return -1;
+ if (u1.v > u2.v)
+ return +1;
+ return 0;
}
int
-NdbSqlUtil::cmpTinyunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpTinyunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
- return cmp(Type::Tinyunsigned, p1, p2, full, size);
+ assert(full >= size && size > 0);
+ union { Uint32 p[1]; Uint8 v; } u1, u2;
+ u1.p[0] = p1[0];
+ u2.p[0] = p2[0];
+ if (u1.v < u2.v)
+ return -1;
+ if (u1.v > u2.v)
+ return +1;
+ return 0;
}
int
-NdbSqlUtil::cmpSmallint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpSmallint(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
- return cmp(Type::Smallint, p1, p2, full, size);
+ assert(full >= size && size > 0);
+ union { Uint32 p[1]; Int16 v; } u1, u2;
+ u1.p[0] = p1[0];
+ u2.p[0] = p2[0];
+ if (u1.v < u2.v)
+ return -1;
+ if (u1.v > u2.v)
+ return +1;
+ return 0;
}
int
-NdbSqlUtil::cmpSmallunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpSmallunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
- return cmp(Type::Smallunsigned, p1, p2, full, size);
+ assert(full >= size && size > 0);
+ union { Uint32 p[1]; Uint16 v; } u1, u2;
+ u1.p[0] = p1[0];
+ u2.p[0] = p2[0];
+ if (u1.v < u2.v)
+ return -1;
+ if (u1.v > u2.v)
+ return +1;
+ return 0;
}
int
-NdbSqlUtil::cmpMediumint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpMediumint(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
- return cmp(Type::Mediumint, p1, p2, full, size);
+ assert(full >= size && size > 0);
+ union { const Uint32* p; const unsigned char* v; } u1, u2;
+ u1.p = p1;
+ u2.p = p2;
+ Int32 v1 = sint3korr(u1.v);
+ Int32 v2 = sint3korr(u2.v);
+ if (v1 < v2)
+ return -1;
+ if (v1 > v2)
+ return +1;
+ return 0;
}
int
-NdbSqlUtil::cmpMediumunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpMediumunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
- return cmp(Type::Mediumunsigned, p1, p2, full, size);
+ assert(full >= size && size > 0);
+ union { const Uint32* p; const unsigned char* v; } u1, u2;
+ u1.p = p1;
+ u2.p = p2;
+ Uint32 v1 = uint3korr(u1.v);
+ Uint32 v2 = uint3korr(u2.v);
+ if (v1 < v2)
+ return -1;
+ if (v1 > v2)
+ return +1;
+ return 0;
}
int
-NdbSqlUtil::cmpInt(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpInt(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
- return cmp(Type::Int, p1, p2, full, size);
+ assert(full >= size && size > 0);
+ union { Uint32 p[1]; Int32 v; } u1, u2;
+ u1.p[0] = p1[0];
+ u2.p[0] = p2[0];
+ if (u1.v < u2.v)
+ return -1;
+ if (u1.v > u2.v)
+ return +1;
+ return 0;
}
int
-NdbSqlUtil::cmpUnsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpUnsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
- return cmp(Type::Unsigned, p1, p2, full, size);
+ assert(full >= size && size > 0);
+ union { Uint32 p[1]; Uint32 v; } u1, u2;
+ u1.v = p1[0];
+ u2.v = p2[0];
+ if (u1.v < u2.v)
+ return -1;
+ if (u1.v > u2.v)
+ return +1;
+ return 0;
}
int
-NdbSqlUtil::cmpBigint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpBigint(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
- return cmp(Type::Bigint, p1, p2, full, size);
+ assert(full >= size && size > 0);
+ if (size >= 2) {
+ union { Uint32 p[2]; Int64 v; } u1, u2;
+ u1.p[0] = p1[0];
+ u1.p[1] = p1[1];
+ u2.p[0] = p2[0];
+ u2.p[1] = p2[1];
+ if (u1.v < u2.v)
+ return -1;
+ if (u1.v > u2.v)
+ return +1;
+ return 0;
+ }
+ return CmpUnknown;
}
int
-NdbSqlUtil::cmpBigunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpBigunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
- return cmp(Type::Bigunsigned, p1, p2, full, size);
+ assert(full >= size && size > 0);
+ if (size >= 2) {
+ union { Uint32 p[2]; Uint64 v; } u1, u2;
+ u1.p[0] = p1[0];
+ u1.p[1] = p1[1];
+ u2.p[0] = p2[0];
+ u2.p[1] = p2[1];
+ if (u1.v < u2.v)
+ return -1;
+ if (u1.v > u2.v)
+ return +1;
+ return 0;
+ }
+ return CmpUnknown;
}
int
-NdbSqlUtil::cmpFloat(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpFloat(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
- return cmp(Type::Float, p1, p2, full, size);
+ assert(full >= size && size > 0);
+ union { Uint32 p[1]; float v; } u1, u2;
+ u1.p[0] = p1[0];
+ u2.p[0] = p2[0];
+ // no format check
+ if (u1.v < u2.v)
+ return -1;
+ if (u1.v > u2.v)
+ return +1;
+ return 0;
}
int
-NdbSqlUtil::cmpDouble(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpDouble(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
- return cmp(Type::Double, p1, p2, full, size);
+ assert(full >= size && size > 0);
+ if (size >= 2) {
+ union { Uint32 p[2]; double v; } u1, u2;
+ u1.p[0] = p1[0];
+ u1.p[1] = p1[1];
+ u2.p[0] = p2[0];
+ u2.p[1] = p2[1];
+ // no format check
+ if (u1.v < u2.v)
+ return -1;
+ if (u1.v > u2.v)
+ return +1;
+ return 0;
+ }
+ return CmpUnknown;
}
int
-NdbSqlUtil::cmpDecimal(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpDecimal(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
- return cmp(Type::Decimal, p1, p2, full, size);
+ assert(full >= size && size > 0);
+ // not used by MySQL or NDB
+ assert(false);
+ return 0;
}
int
-NdbSqlUtil::cmpChar(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpChar(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
- return cmp(Type::Char, p1, p2, full, size);
+ // collation does not work on prefix for some charsets
+ assert(full == size && size > 0);
+ /*
+ * Char is blank-padded to length and null-padded to word size.
+ */
+ union { const Uint32* p; const uchar* v; } u1, u2;
+ u1.p = p1;
+ u2.p = p2;
+ // not const in MySQL
+ CHARSET_INFO* cs = (CHARSET_INFO*)(info);
+ // length in bytes including null padding to Uint32
+ uint l1 = (full << 2);
+ int k = (*cs->coll->strnncollsp)(cs, u1.v, l1, u2.v, l1);
+ return k < 0 ? -1 : k > 0 ? +1 : 0;
}
int
-NdbSqlUtil::cmpVarchar(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpVarchar(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
- return cmp(Type::Varchar, p1, p2, full, size);
+ assert(full >= size && size > 0);
+ /*
+ * Varchar is not allowed to contain a null byte and the value is
+ * null-padded. Therefore comparison does not need to use the length.
+ *
+ * Not used before MySQL 5.0. Format is likely to change. Handle
+ * only binary collation for now.
+ */
+ union { const Uint32* p; const char* v; } u1, u2;
+ u1.p = p1;
+ u2.p = p2;
+ // skip length in first 2 bytes
+ int k = strncmp(u1.v + 2, u2.v + 2, (size << 2) - 2);
+ return k < 0 ? -1 : k > 0 ? +1 : full == size ? 0 : CmpUnknown;
}
int
-NdbSqlUtil::cmpBinary(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpBinary(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
- return cmp(Type::Binary, p1, p2, full, size);
+ assert(full >= size && size > 0);
+ /*
+ * Binary data of full length. Compare bytewise.
+ */
+ union { const Uint32* p; const unsigned char* v; } u1, u2;
+ u1.p = p1;
+ u2.p = p2;
+ int k = memcmp(u1.v, u2.v, size << 2);
+ return k < 0 ? -1 : k > 0 ? +1 : full == size ? 0 : CmpUnknown;
}
int
-NdbSqlUtil::cmpVarbinary(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpVarbinary(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
- return cmp(Type::Varbinary, p1, p2, full, size);
+ assert(full >= size && size > 0);
+ /*
+ * Binary data of variable length padded with nulls. The comparison
+ * does not need to use the length.
+ *
+ * Not used before MySQL 5.0. Format is likely to change.
+ */
+ union { const Uint32* p; const unsigned char* v; } u1, u2;
+ u1.p = p1;
+ u2.p = p2;
+ // skip length in first 2 bytes
+ int k = memcmp(u1.v + 2, u2.v + 2, (size << 2) - 2);
+ return k < 0 ? -1 : k > 0 ? +1 : full == size ? 0 : CmpUnknown;
}
int
-NdbSqlUtil::cmpDatetime(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpDatetime(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
- return cmp(Type::Datetime, p1, p2, full, size);
+ assert(full >= size && size > 0);
+ /*
+ * Datetime is CC YY MM DD hh mm ss \0
+ *
+ * Not used via MySQL.
+ */
+ union { const Uint32* p; const unsigned char* v; } u1, u2;
+ u1.p = p1;
+ u2.p = p2;
+ // no format check
+ int k = memcmp(u1.v, u2.v, 4);
+ if (k != 0)
+ return k < 0 ? -1 : +1;
+ if (size >= 2) {
+ k = memcmp(u1.v + 4, u2.v + 4, 4);
+ return k < 0 ? -1 : k > 0 ? +1 : 0;
+ }
+ return CmpUnknown;
}
int
-NdbSqlUtil::cmpTimespec(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpTimespec(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
- return cmp(Type::Timespec, p1, p2, full, size);
+ assert(full >= size && size > 0);
+ /*
+ * Timespec is CC YY MM DD hh mm ss \0 NN NN NN NN
+ *
+ * Not used via MySQL.
+ */
+ union { const Uint32* p; const unsigned char* v; } u1, u2;
+ u1.p = p1;
+ u2.p = p2;
+ // no format check
+ int k = memcmp(u1.v, u2.v, 4);
+ if (k != 0)
+ return k < 0 ? -1 : +1;
+ if (size >= 2) {
+ k = memcmp(u1.v + 4, u2.v + 4, 4);
+ if (k != 0)
+ return k < 0 ? -1 : +1;
+ if (size >= 3) {
+ Uint32 n1 = *(const Uint32*)(u1.v + 8);
+ Uint32 n2 = *(const Uint32*)(u2.v + 8);
+ if (n1 < n2)
+ return -1;
+ if (n2 > n1)
+ return +1;
+ return 0;
+ }
+ }
+ return CmpUnknown;
}
int
-NdbSqlUtil::cmpBlob(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpBlob(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
- return cmp(Type::Blob, p1, p2, full, size);
+ assert(full >= size && size > 0);
+ /*
+ * Blob comparison is on the inline bytes (null padded).
+ */
+ const unsigned head = NDB_BLOB_HEAD_SIZE;
+ // skip blob head
+ if (size >= head + 1) {
+ union { const Uint32* p; const unsigned char* v; } u1, u2;
+ u1.p = p1 + head;
+ u2.p = p2 + head;
+ int k = memcmp(u1.v, u2.v, (size - head) << 2);
+ return k < 0 ? -1 : k > 0 ? +1 : full == size ? 0 : CmpUnknown;
+ }
+ return CmpUnknown;
}
int
-NdbSqlUtil::cmpText(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpText(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+{
+ // collation does not work on prefix for some charsets
+ assert(full == size && size > 0);
+ /*
+ * Text comparison is on the inline bytes (blank padded). Currently
+ * not supported for multi-byte charsets.
+ */
+ const unsigned head = NDB_BLOB_HEAD_SIZE;
+ // skip blob head
+ if (size >= head + 1) {
+ union { const Uint32* p; const uchar* v; } u1, u2;
+ u1.p = p1 + head;
+ u2.p = p2 + head;
+ // not const in MySQL
+ CHARSET_INFO* cs = (CHARSET_INFO*)(info);
+ // length in bytes including null padding to Uint32
+ uint l1 = (full << 2);
+ int k = (*cs->coll->strnncollsp)(cs, u1.v, l1, u2.v, l1);
+ return k < 0 ? -1 : k > 0 ? +1 : 0;
+ }
+ return CmpUnknown;
+}
+
+// check charset
+
+bool
+NdbSqlUtil::usable_in_pk(Uint32 typeId, const void* info)
+{
+ const Type& type = getType(typeId);
+ switch (type.m_typeId) {
+ case Type::Undefined:
+ break;
+ case Type::Char:
+ {
+ const CHARSET_INFO *cs = (const CHARSET_INFO*)info;
+ return
+ cs != 0 &&
+ cs->cset != 0 &&
+ cs->coll != 0 &&
+ cs->coll->strnxfrm != 0 &&
+ cs->strxfrm_multiply == 1; // current limitation
+ }
+ break;
+ case Type::Varchar:
+ return true; // Varchar not used via MySQL
+ case Type::Blob:
+ case Type::Text:
+ break;
+ default:
+ return true;
+ }
+ return false;
+}
+
+bool
+NdbSqlUtil::usable_in_hash_index(Uint32 typeId, const void* info)
+{
+ return usable_in_pk(typeId, info);
+}
+
+bool
+NdbSqlUtil::usable_in_ordered_index(Uint32 typeId, const void* info)
{
- return cmp(Type::Text, p1, p2, full, size);
+ const Type& type = getType(typeId);
+ switch (type.m_typeId) {
+ case Type::Undefined:
+ break;
+ case Type::Char:
+ {
+ const CHARSET_INFO *cs = (const CHARSET_INFO*)info;
+ return
+ cs != 0 &&
+ cs->cset != 0 &&
+ cs->coll != 0 &&
+ cs->coll->strnxfrm != 0 &&
+ cs->coll->strnncollsp != 0 &&
+ cs->strxfrm_multiply == 1; // current limitation
+ }
+ break;
+ case Type::Varchar:
+ return true; // Varchar not used via MySQL
+ case Type::Text:
+ {
+ const CHARSET_INFO *cs = (const CHARSET_INFO*)info;
+ return
+ cs != 0 &&
+ cs->mbmaxlen == 1 && // extra limitation
+ cs->cset != 0 &&
+ cs->coll != 0 &&
+ cs->coll->strnxfrm != 0 &&
+ cs->coll->strnncollsp != 0 &&
+ cs->strxfrm_multiply == 1; // current limitation
+ }
+ break;
+ default:
+ return true;
+ }
+ return false;
}
#ifdef NDB_SQL_UTIL_TEST
@@ -331,6 +669,7 @@ const Testcase testcase[] = {
int
main(int argc, char** argv)
{
+ ndb_init(); // for charsets
unsigned count = argc > 1 ? atoi(argv[1]) : 1000000;
ndbout_c("count = %u", count);
assert(count != 0);
diff --git a/ndb/src/common/util/OutputStream.cpp b/ndb/src/common/util/OutputStream.cpp
index bf3599dbac9..a41eef649dd 100644
--- a/ndb/src/common/util/OutputStream.cpp
+++ b/ndb/src/common/util/OutputStream.cpp
@@ -74,7 +74,7 @@ SoftOseOutputStream::print(const char * fmt, ...){
va_start(ap, fmt);
if (fmt != 0)
- vsnprintf(buf, sizeof(buf)-1, fmt, ap);
+ BaseString::vsnprintf(buf, sizeof(buf)-1, fmt, ap);
else
buf[0] = 0;
va_end(ap);
@@ -88,7 +88,7 @@ SoftOseOutputStream::println(const char * fmt, ...){
va_start(ap, fmt);
if (fmt != 0)
- vsnprintf(buf, sizeof(buf)-1, fmt, ap);
+ BaseString::vsnprintf(buf, sizeof(buf)-1, fmt, ap);
else
buf[0] = 0;
va_end(ap);
diff --git a/ndb/src/common/util/Properties.cpp b/ndb/src/common/util/Properties.cpp
index 80fb0027830..4443fd45bba 100644
--- a/ndb/src/common/util/Properties.cpp
+++ b/ndb/src/common/util/Properties.cpp
@@ -31,6 +31,7 @@ char * f_strdup(const char * s){
* Note has to be a multiple of 4 bytes
*/
const char Properties::version[] = { 2, 0, 0, 1, 1, 1, 1, 4 };
+const char Properties::delimiter = ':';
/**
* PropertyImpl
@@ -371,7 +372,7 @@ Properties::print(FILE * out, const char * prefix) const{
break;
case PropertiesType_Properties:
char buf2 [1024];
- snprintf(buf2, sizeof(buf2), "%s%s%c",buf, impl->content[i]->name,
+ BaseString::snprintf(buf2, sizeof(buf2), "%s%s%c",buf, impl->content[i]->name,
Properties::delimiter);
((Properties *)impl->content[i]->value)->print(out, buf2);
break;
@@ -994,7 +995,7 @@ bool
Properties::put(const char * name, Uint32 no, Uint32 val, bool replace){
size_t tmp_len = strlen(name)+20;
char * tmp = (char*)malloc(tmp_len);
- snprintf(tmp, tmp_len, "%s_%d", name, no);
+ BaseString::snprintf(tmp, tmp_len, "%s_%d", name, no);
bool res = put(tmp, val, replace);
free(tmp);
return res;
@@ -1004,7 +1005,7 @@ bool
Properties::put64(const char * name, Uint32 no, Uint64 val, bool replace){
size_t tmp_len = strlen(name)+20;
char * tmp = (char*)malloc(tmp_len);
- snprintf(tmp, tmp_len, "%s_%d", name, no);
+ BaseString::snprintf(tmp, tmp_len, "%s_%d", name, no);
bool res = put(tmp, val, replace);
free(tmp);
return res;
@@ -1015,7 +1016,7 @@ bool
Properties::put(const char * name, Uint32 no, const char * val, bool replace){
size_t tmp_len = strlen(name)+20;
char * tmp = (char*)malloc(tmp_len);
- snprintf(tmp, tmp_len, "%s_%d", name, no);
+ BaseString::snprintf(tmp, tmp_len, "%s_%d", name, no);
bool res = put(tmp, val, replace);
free(tmp);
return res;
@@ -1027,7 +1028,7 @@ Properties::put(const char * name, Uint32 no, const Properties * val,
bool replace){
size_t tmp_len = strlen(name)+20;
char * tmp = (char*)malloc(tmp_len);
- snprintf(tmp, tmp_len, "%s_%d", name, no);
+ BaseString::snprintf(tmp, tmp_len, "%s_%d", name, no);
bool res = put(tmp, val, replace);
free(tmp);
return res;
@@ -1039,7 +1040,7 @@ Properties::getTypeOf(const char * name, Uint32 no,
PropertiesType * type) const {
size_t tmp_len = strlen(name)+20;
char * tmp = (char*)malloc(tmp_len);
- snprintf(tmp, tmp_len, "%s_%d", name, no);
+ BaseString::snprintf(tmp, tmp_len, "%s_%d", name, no);
bool res = getTypeOf(tmp, type);
free(tmp);
return res;
@@ -1049,7 +1050,7 @@ bool
Properties::contains(const char * name, Uint32 no) const {
size_t tmp_len = strlen(name)+20;
char * tmp = (char*)malloc(tmp_len);
- snprintf(tmp, tmp_len, "%s_%d", name, no);
+ BaseString::snprintf(tmp, tmp_len, "%s_%d", name, no);
bool res = contains(tmp);
free(tmp);
return res;
@@ -1059,7 +1060,7 @@ bool
Properties::get(const char * name, Uint32 no, Uint32 * value) const{
size_t tmp_len = strlen(name)+20;
char * tmp = (char*)malloc(tmp_len);
- snprintf(tmp, tmp_len, "%s_%d", name, no);
+ BaseString::snprintf(tmp, tmp_len, "%s_%d", name, no);
bool res = get(tmp, value);
free(tmp);
return res;
@@ -1069,7 +1070,7 @@ bool
Properties::get(const char * name, Uint32 no, Uint64 * value) const{
size_t tmp_len = strlen(name)+20;
char * tmp = (char*)malloc(tmp_len);
- snprintf(tmp, tmp_len, "%s_%d", name, no);
+ BaseString::snprintf(tmp, tmp_len, "%s_%d", name, no);
bool res = get(tmp, value);
free(tmp);
return res;
@@ -1080,7 +1081,7 @@ bool
Properties::get(const char * name, Uint32 no, const char ** value) const {
size_t tmp_len = strlen(name)+20;
char * tmp = (char*)malloc(tmp_len);
- snprintf(tmp, tmp_len, "%s_%d", name, no);
+ BaseString::snprintf(tmp, tmp_len, "%s_%d", name, no);
bool res = get(tmp, value);
free(tmp);
return res;
@@ -1091,7 +1092,7 @@ bool
Properties::get(const char * name, Uint32 no, const Properties ** value) const{
size_t tmp_len = strlen(name)+20;
char * tmp = (char*)malloc(tmp_len);
- snprintf(tmp, tmp_len, "%s_%d", name, no);
+ BaseString::snprintf(tmp, tmp_len, "%s_%d", name, no);
bool res = get(tmp, value);
free(tmp);
return res;
@@ -1102,7 +1103,7 @@ bool
Properties::getCopy(const char * name, Uint32 no, char ** value) const {
size_t tmp_len = strlen(name)+20;
char * tmp = (char*)malloc(tmp_len);
- snprintf(tmp, tmp_len, "%s_%d", name, no);
+ BaseString::snprintf(tmp, tmp_len, "%s_%d", name, no);
bool res = getCopy(tmp, value);
free(tmp);
return res;
@@ -1113,7 +1114,7 @@ bool
Properties::getCopy(const char * name, Uint32 no, Properties ** value) const {
size_t tmp_len = strlen(name)+20;
char * tmp = (char*)malloc(tmp_len);
- snprintf(tmp, tmp_len, "%s_%d", name, no);
+ BaseString::snprintf(tmp, tmp_len, "%s_%d", name, no);
bool res = getCopy(tmp, value);
free(tmp);
return res;
diff --git a/ndb/src/common/util/SimpleProperties.cpp b/ndb/src/common/util/SimpleProperties.cpp
index c3980f03c4d..00c440fcb4e 100644
--- a/ndb/src/common/util/SimpleProperties.cpp
+++ b/ndb/src/common/util/SimpleProperties.cpp
@@ -293,7 +293,7 @@ SimpleProperties::Reader::printAll(NdbOut& ndbout){
break;
default:
ndbout << "Unknown type for key: " << getKey()
- << " type: " << getValueType() << endl;
+ << " type: " << (Uint32)getValueType() << endl;
}
}
}
diff --git a/ndb/src/common/util/SocketClient.cpp b/ndb/src/common/util/SocketClient.cpp
index ec837babc24..50e60956b94 100644
--- a/ndb/src/common/util/SocketClient.cpp
+++ b/ndb/src/common/util/SocketClient.cpp
@@ -66,7 +66,9 @@ SocketClient::connect()
if (m_sockfd < 0)
{
if (!init()) {
+#ifdef VM_TRACE
ndbout << "SocketClient::connect() failed " << m_server_name << " " << m_port << endl;
+#endif
return -1;
}
}
diff --git a/ndb/src/common/util/SocketServer.cpp b/ndb/src/common/util/SocketServer.cpp
index 0cc06a54496..c3cffa1399b 100644
--- a/ndb/src/common/util/SocketServer.cpp
+++ b/ndb/src/common/util/SocketServer.cpp
@@ -16,6 +16,7 @@
#include <ndb_global.h>
+#include <my_pthread.h>
#include <SocketServer.hpp>
@@ -46,7 +47,7 @@ SocketServer::~SocketServer() {
}
bool
-SocketServer::tryBind(unsigned short port, const char * intface) const {
+SocketServer::tryBind(unsigned short port, const char * intface) {
struct sockaddr_in servaddr;
memset(&servaddr, 0, sizeof(servaddr));
servaddr.sin_family = AF_INET;
@@ -83,7 +84,8 @@ bool
SocketServer::setup(SocketServer::Service * service,
unsigned short port,
const char * intface){
-
+ DBUG_ENTER("SocketServer::setup");
+ DBUG_PRINT("enter",("interface=%s, port=%d", intface, port));
struct sockaddr_in servaddr;
memset(&servaddr, 0, sizeof(servaddr));
servaddr.sin_family = AF_INET;
@@ -92,36 +94,44 @@ SocketServer::setup(SocketServer::Service * service,
if(intface != 0){
if(Ndb_getInAddr(&servaddr.sin_addr, intface))
- return false;
+ DBUG_RETURN(false);
}
const NDB_SOCKET_TYPE sock = socket(AF_INET, SOCK_STREAM, 0);
if (sock == NDB_INVALID_SOCKET) {
- return false;
+ DBUG_PRINT("error",("socket() - %d - %s",
+ errno, strerror(errno)));
+ DBUG_RETURN(false);
}
const int on = 1;
if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
(const char*)&on, sizeof(on)) == -1) {
+ DBUG_PRINT("error",("getsockopt() - %d - %s",
+ errno, strerror(errno)));
NDB_CLOSE_SOCKET(sock);
- return false;
+ DBUG_RETURN(false);
}
if (bind(sock, (struct sockaddr*) &servaddr, sizeof(servaddr)) == -1) {
+ DBUG_PRINT("error",("bind() - %d - %s",
+ errno, strerror(errno)));
NDB_CLOSE_SOCKET(sock);
- return false;
+ DBUG_RETURN(false);
}
if (listen(sock, m_maxSessions) == -1){
+ DBUG_PRINT("error",("listen() - %d - %s",
+ errno, strerror(errno)));
NDB_CLOSE_SOCKET(sock);
- return false;
+ DBUG_RETURN(false);
}
ServiceInstance i;
i.m_socket = sock;
i.m_service = service;
m_services.push_back(i);
- return true;
+ DBUG_RETURN(true);
}
void
@@ -177,8 +187,9 @@ void*
socketServerThread_C(void* _ss){
SocketServer * ss = (SocketServer *)_ss;
+ my_thread_init();
ss->doRun();
-
+ my_thread_end();
NdbThread_Exit(0);
return 0;
}
@@ -287,8 +298,10 @@ void*
sessionThread_C(void* _sc){
SocketServer::Session * si = (SocketServer::Session *)_sc;
+ my_thread_init();
if(!transfer(si->m_socket)){
si->m_stopped = true;
+ my_thread_end();
NdbThread_Exit(0);
return 0;
}
@@ -301,6 +314,7 @@ sessionThread_C(void* _sc){
}
si->m_stopped = true;
+ my_thread_end();
NdbThread_Exit(0);
return 0;
}
diff --git a/ndb/src/common/util/basestring_vsnprintf.c b/ndb/src/common/util/basestring_vsnprintf.c
new file mode 100644
index 00000000000..10932226d18
--- /dev/null
+++ b/ndb/src/common/util/basestring_vsnprintf.c
@@ -0,0 +1,37 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/* define on IRIX to get posix compliant vsnprintf */
+#define _XOPEN_SOURCE 500
+#include <stdio.h>
+#include <basestring_vsnprintf.h>
+
+int
+basestring_snprintf(char *str, size_t size, const char *format, ...)
+{
+ int ret;
+ va_list ap;
+ va_start(ap, format);
+ ret= basestring_vsnprintf(str, size, format, ap);
+ va_end(ap);
+ return(ret);
+}
+
+int
+basestring_vsnprintf(char *str, size_t size, const char *format, va_list ap)
+{
+ return(vsnprintf(str, size, format, ap));
+}
diff --git a/ndb/src/common/util/getarg.c b/ndb/src/common/util/getarg.c
index ae016746987..99b2840a5a6 100644
--- a/ndb/src/common/util/getarg.c
+++ b/ndb/src/common/util/getarg.c
@@ -494,7 +494,7 @@ arg_match_short (struct getargs *args, size_t num_args,
optarg = &argv[j + 1];
else {
++*optind;
- optarg = rargv[*optind];
+ optarg = (char *) rargv[*optind];
}
if(optarg == NULL) {
--*optind;
@@ -545,10 +545,10 @@ getarg(struct getargs *args, size_t num_args,
i++;
break;
}
- ret = arg_match_long (args, num_args, argv[i] + 2,
+ ret = arg_match_long (args, num_args, (char *) argv[i] + 2,
argc, argv, &i);
} else {
- ret = arg_match_short (args, num_args, argv[i],
+ ret = arg_match_short (args, num_args, (char *) argv[i],
argc, argv, &i);
}
if(ret)
diff --git a/ndb/src/common/debugger/LogLevel.cpp b/ndb/src/common/util/ndb_init.c
index f9e2f318432..f3aa734d7f9 100644
--- a/ndb/src/common/debugger/LogLevel.cpp
+++ b/ndb/src/common/util/ndb_init.c
@@ -14,17 +14,22 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#include <LogLevel.hpp>
+#include <ndb_global.h>
+#include <my_sys.h>
-const LogLevel::LogLevelCategoryName LogLevel::LOGLEVEL_CATEGORY_NAME[] = {
- { "LogLevelStartup" },
- { "LogLevelShutdown" },
- { "LogLevelStatistic" },
- { "LogLevelCheckpoint" },
- { "LogLevelNodeRestart" },
- { "LogLevelConnection" },
- { "LogLevelError" },
- { "LogLevelWarning" },
- { "LogLevelInfo" },
- { "LogLevelGrep" }
-};
+int
+ndb_init()
+{
+ if (my_init()) {
+ const char* err = "my_init() failed - exit\n";
+ write(2, err, strlen(err));
+ exit(1);
+ }
+ return 0;
+}
+
+void
+ndb_end(int flags)
+{
+ my_end(flags);
+}
diff --git a/ndb/src/common/util/new.cpp b/ndb/src/common/util/new.cpp
index b61541b7474..901f74bf979 100644
--- a/ndb/src/common/util/new.cpp
+++ b/ndb/src/common/util/new.cpp
@@ -1,5 +1,6 @@
#include <ndb_global.h>
+#include <NdbMem.h>
extern "C" {
void (* ndb_new_handler)() = 0;
@@ -9,7 +10,7 @@ extern "C" {
void *operator new (size_t sz)
{
- void * p = malloc (sz ? sz : 1);
+ void * p = NdbMem_Allocate(sz ? sz : 1);
if(p)
return p;
if(ndb_new_handler)
@@ -19,7 +20,7 @@ void *operator new (size_t sz)
void *operator new[] (size_t sz)
{
- void * p = (void *) malloc (sz ? sz : 1);
+ void * p = (void *) NdbMem_Allocate(sz ? sz : 1);
if(p)
return p;
if(ndb_new_handler)
@@ -30,13 +31,13 @@ void *operator new[] (size_t sz)
void operator delete (void *ptr)
{
if (ptr)
- free(ptr);
+ NdbMem_Free(ptr);
}
void operator delete[] (void *ptr) throw ()
{
if (ptr)
- free(ptr);
+ NdbMem_Free(ptr);
}
#endif // USE_MYSYS_NEW
diff --git a/ndb/src/common/util/random.c b/ndb/src/common/util/random.c
index 286ab093a26..21235763793 100644
--- a/ndb/src/common/util/random.c
+++ b/ndb/src/common/util/random.c
@@ -154,8 +154,8 @@ static void localRandom48(DRand48Data *buffer, long int *result)
static void shuffleSequence(RandomSequence *seq)
{
- int i;
- int j;
+ unsigned int i;
+ unsigned int j;
unsigned int tmp;
if( !seq ) return;
@@ -254,7 +254,7 @@ unsigned int getNextRandom(RandomSequence *seq)
void printSequence(RandomSequence *seq, unsigned int numPerRow)
{
- int i;
+ unsigned int i;
if( !seq ) return;
diff --git a/ndb/src/common/util/socket_io.cpp b/ndb/src/common/util/socket_io.cpp
index b2f4ef91031..6f4c7e63684 100644
--- a/ndb/src/common/util/socket_io.cpp
+++ b/ndb/src/common/util/socket_io.cpp
@@ -175,13 +175,13 @@ vprint_socket(NDB_SOCKET_TYPE socket, int timeout_millis,
size_t size = sizeof(buf);
if (fmt != 0) {
- size = vsnprintf(buf, sizeof(buf), fmt, ap);
+ size = BaseString::vsnprintf(buf, sizeof(buf), fmt, ap);
/* Check if the output was truncated */
if(size >= sizeof(buf)) {
buf2 = (char *)malloc(size+1);
if(buf2 == NULL)
return -1;
- vsnprintf(buf2, size, fmt, ap);
+ BaseString::vsnprintf(buf2, size, fmt, ap);
} else
size = sizeof(buf);
} else
@@ -202,13 +202,13 @@ vprintln_socket(NDB_SOCKET_TYPE socket, int timeout_millis,
size_t size = sizeof(buf);
if (fmt != 0) {
- size = vsnprintf(buf, sizeof(buf), fmt, ap);
+ size = BaseString::vsnprintf(buf, sizeof(buf), fmt, ap);
/* Check if the output was truncated */
if(size >= sizeof(buf)-1) {
buf2 = (char *)malloc(size+2);
if(buf2 == NULL)
return -1;
- vsnprintf(buf2, size+1, fmt, ap);
+ BaseString::vsnprintf(buf2, size+1, fmt, ap);
} else
size = sizeof(buf);
} else
diff --git a/ndb/src/common/util/version.c b/ndb/src/common/util/version.c
index 10f621d5db8..e2515b243b1 100644
--- a/ndb/src/common/util/version.c
+++ b/ndb/src/common/util/version.c
@@ -35,15 +35,21 @@ Uint32 makeVersion(Uint32 major, Uint32 minor, Uint32 build) {
}
-char * getVersionString(Uint32 version, char * status) {
+const char * getVersionString(Uint32 version, const char * status) {
char buff[100];
- snprintf(buff, sizeof(buff),
- "Version %d.%d.%d (%s)",
- getMajor(version),
- getMinor(version),
- getBuild(version),
- status);
-
+ if (status && status[0] != 0)
+ snprintf(buff, sizeof(buff),
+ "Version %d.%d.%d (%s)",
+ getMajor(version),
+ getMinor(version),
+ getBuild(version),
+ status);
+ else
+ snprintf(buff, sizeof(buff),
+ "Version %d.%d.%d",
+ getMajor(version),
+ getMinor(version),
+ getBuild(version));
return strdup(buff);
}
@@ -63,6 +69,7 @@ struct NdbUpGradeCompatible {
#ifndef TEST_VERSION
struct NdbUpGradeCompatible ndbCompatibleTable_full[] = {
+ { MAKE_VERSION(3,5,2), MAKE_VERSION(3,5,1), UG_Exact },
{ 0, 0, UG_Null }
};
@@ -135,7 +142,7 @@ ndbSearchUpgradeCompatibleTable(Uint32 ownVersion, Uint32 otherVersion,
int i;
for (i = 0; table[i].ownVersion != 0 && table[i].otherVersion != 0; i++) {
if (table[i].ownVersion == ownVersion ||
- table[i].ownVersion == ~0) {
+ table[i].ownVersion == (Uint32) ~0) {
switch (table[i].matchType) {
case UG_Range:
if (otherVersion >= table[i].otherVersion){
diff --git a/ndb/src/cw/cpcd/APIService.cpp b/ndb/src/cw/cpcd/APIService.cpp
index 46b043c7004..63d0aaafe86 100644
--- a/ndb/src/cw/cpcd/APIService.cpp
+++ b/ndb/src/cw/cpcd/APIService.cpp
@@ -47,7 +47,7 @@
ParserRow<CPCDAPISession>::IgnoreMinMax, \
0, 0, \
fun, \
- desc }
+ desc, 0 }
#define CPCD_ARG(name, type, opt, desc) \
{ name, \
@@ -58,7 +58,7 @@
ParserRow<CPCDAPISession>::IgnoreMinMax, \
0, 0, \
0, \
- desc }
+ desc, 0 }
#define CPCD_ARG2(name, type, opt, min, max, desc) \
{ name, \
@@ -69,7 +69,7 @@
ParserRow<CPCDAPISession>::IgnoreMinMax, \
min, max, \
0, \
- desc }
+ desc, 0 }
#define CPCD_END() \
{ 0, \
@@ -80,7 +80,7 @@
ParserRow<CPCDAPISession>::IgnoreMinMax, \
0, 0, \
0, \
- 0 }
+ 0, 0 }
#define CPCD_CMD_ALIAS(name, realName, fun) \
{ name, \
@@ -91,7 +91,7 @@
ParserRow<CPCDAPISession>::IgnoreMinMax, \
0, 0, \
0, \
- 0 }
+ 0, 0 }
#define CPCD_ARG_ALIAS(name, realName, fun) \
{ name, \
@@ -102,7 +102,7 @@
ParserRow<CPCDAPISession>::IgnoreMinMax, \
0, 0, \
0, \
- 0 }
+ 0, 0 }
const
ParserRow<CPCDAPISession> commands[] =
@@ -309,7 +309,7 @@ propToString(Properties *prop, const char *key) {
case PropertiesType_Uint32:
Uint32 val;
prop->get(key, &val);
- snprintf(buf, sizeof buf, "%d", val);
+ BaseString::snprintf(buf, sizeof buf, "%d", val);
retval = buf;
break;
case PropertiesType_char:
@@ -318,7 +318,7 @@ propToString(Properties *prop, const char *key) {
retval = str;
break;
default:
- snprintf(buf, sizeof buf, "(unknown)");
+ BaseString::snprintf(buf, sizeof buf, "(unknown)");
retval = buf;
}
return retval;
diff --git a/ndb/src/cw/cpcd/CPCD.cpp b/ndb/src/cw/cpcd/CPCD.cpp
index 44db10422b9..69a7b840528 100644
--- a/ndb/src/cw/cpcd/CPCD.cpp
+++ b/ndb/src/cw/cpcd/CPCD.cpp
@@ -237,9 +237,9 @@ CPCD::saveProcessList(){
FILE *f;
/* Create the filenames that we will use later */
- snprintf(newfile, sizeof(newfile), "%s.new", m_procfile.c_str());
- snprintf(oldfile, sizeof(oldfile), "%s.old", m_procfile.c_str());
- snprintf(curfile, sizeof(curfile), "%s", m_procfile.c_str());
+ BaseString::snprintf(newfile, sizeof(newfile), "%s.new", m_procfile.c_str());
+ BaseString::snprintf(oldfile, sizeof(oldfile), "%s.old", m_procfile.c_str());
+ BaseString::snprintf(curfile, sizeof(curfile), "%s", m_procfile.c_str());
f = fopen(newfile, "w");
@@ -378,9 +378,9 @@ CPCD::getProcessList() {
}
void
-CPCD::RequestStatus::err(enum RequestStatusCode status, char *msg) {
+CPCD::RequestStatus::err(enum RequestStatusCode status, const char *msg) {
m_status = status;
- snprintf(m_errorstring, sizeof(m_errorstring), "%s", msg);
+ BaseString::snprintf(m_errorstring, sizeof(m_errorstring), "%s", msg);
}
#if 0
diff --git a/ndb/src/cw/cpcd/CPCD.hpp b/ndb/src/cw/cpcd/CPCD.hpp
index 4a7cab23bab..a5c0bef1dac 100644
--- a/ndb/src/cw/cpcd/CPCD.hpp
+++ b/ndb/src/cw/cpcd/CPCD.hpp
@@ -91,7 +91,7 @@ public:
RequestStatus() { m_status = OK; m_errorstring[0] = '\0'; };
/** @brief Sets an errorcode and a printable message */
- void err(enum RequestStatusCode, char *);
+ void err(enum RequestStatusCode, const char *);
/** @brief Returns the error message */
char *getErrMsg() { return m_errorstring; };
diff --git a/ndb/src/cw/cpcd/Makefile.am b/ndb/src/cw/cpcd/Makefile.am
index 1f7b0d88448..6af44a359fc 100644
--- a/ndb/src/cw/cpcd/Makefile.am
+++ b/ndb/src/cw/cpcd/Makefile.am
@@ -3,7 +3,11 @@ ndbbin_PROGRAMS = ndb_cpcd
ndb_cpcd_SOURCES = main.cpp CPCD.cpp Process.cpp APIService.cpp Monitor.cpp common.cpp
-LDADD_LOC = $(top_builddir)/ndb/src/libndbclient.la
+LDADD_LOC = \
+ $(top_builddir)/ndb/src/libndbclient.la \
+ $(top_builddir)/dbug/libdbug.a \
+ $(top_builddir)/mysys/libmysys.a \
+ $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
include $(top_srcdir)/ndb/config/common.mk.am
include $(top_srcdir)/ndb/config/type_util.mk.am
diff --git a/ndb/src/cw/cpcd/Process.cpp b/ndb/src/cw/cpcd/Process.cpp
index 0a986f63fda..2509f34e882 100644
--- a/ndb/src/cw/cpcd/Process.cpp
+++ b/ndb/src/cw/cpcd/Process.cpp
@@ -140,7 +140,7 @@ CPCD::Process::readPid() {
memset(buf, 0, sizeof(buf));
- snprintf(filename, sizeof(filename), "%d", m_id);
+ BaseString::snprintf(filename, sizeof(filename), "%d", m_id);
f = fopen(filename, "r");
@@ -167,8 +167,8 @@ CPCD::Process::writePid(int pid) {
char filename[PATH_MAX*2+1];
FILE *f;
- snprintf(tmpfilename, sizeof(tmpfilename), "tmp.XXXXXX");
- snprintf(filename, sizeof(filename), "%d", m_id);
+ BaseString::snprintf(tmpfilename, sizeof(tmpfilename), "tmp.XXXXXX");
+ BaseString::snprintf(filename, sizeof(filename), "%d", m_id);
int fd = mkstemp(tmpfilename);
if(fd < 0) {
@@ -237,6 +237,7 @@ set_ulimit(const BaseString & pair){
} else if(list[0] == "t"){
_RLIMIT_FIX(RLIMIT_CPU);
} else {
+ res= -11;
errno = EINVAL;
}
if(res){
@@ -313,7 +314,7 @@ CPCD::Process::do_exec() {
}
/* Close all filedescriptors */
- for(i = STDERR_FILENO+1; i < getdtablesize(); i++)
+ for(i = STDERR_FILENO+1; (int)i < getdtablesize(); i++)
close(i);
execv(m_path.c_str(), argv);
@@ -353,6 +354,7 @@ CPCD::Process::start() {
setsid();
writePid(getpgrp());
if(runas(m_runas.c_str()) == 0){
+ signal(SIGCHLD, SIG_DFL);
do_exec();
}
_exit(1);
@@ -383,6 +385,7 @@ CPCD::Process::start() {
if(runas(m_runas.c_str()) != 0){
_exit(1);
}
+ signal(SIGCHLD, SIG_DFL);
do_exec();
_exit(1);
/* NOTREACHED */
@@ -437,7 +440,7 @@ void
CPCD::Process::stop() {
char filename[PATH_MAX*2+1];
- snprintf(filename, sizeof(filename), "%d", m_id);
+ BaseString::snprintf(filename, sizeof(filename), "%d", m_id);
unlink(filename);
if(m_pid <= 1){
diff --git a/ndb/src/cw/cpcd/main.cpp b/ndb/src/cw/cpcd/main.cpp
index 913c31de1f7..207b81bfa89 100644
--- a/ndb/src/cw/cpcd/main.cpp
+++ b/ndb/src/cw/cpcd/main.cpp
@@ -28,12 +28,12 @@
#include "common.hpp"
-static char *work_dir = CPCD_DEFAULT_WORK_DIR;
+static const char *work_dir = CPCD_DEFAULT_WORK_DIR;
static int port = CPCD_DEFAULT_TCP_PORT;
static int use_syslog = 0;
-static char *logfile = NULL;
-static char *config_file = CPCD_DEFAULT_CONFIG_FILE;
-static char *user = 0;
+static const char *logfile = NULL;
+static const char *config_file = CPCD_DEFAULT_CONFIG_FILE;
+static const char *user = 0;
static struct getargs args[] = {
{ "work-dir", 'w', arg_string, &work_dir,
diff --git a/ndb/src/kernel/Makefile.am b/ndb/src/kernel/Makefile.am
index 60284f6a369..493ab4f9982 100644
--- a/ndb/src/kernel/Makefile.am
+++ b/ndb/src/kernel/Makefile.am
@@ -52,7 +52,10 @@ LDADD += \
$(top_builddir)/ndb/src/common/mgmcommon/libmgmsrvcommon.la \
$(top_builddir)/ndb/src/mgmapi/libmgmapi.la \
$(top_builddir)/ndb/src/common/portlib/libportlib.la \
- $(top_builddir)/ndb/src/common/util/libgeneral.la
+ $(top_builddir)/ndb/src/common/util/libgeneral.la \
+ $(top_builddir)/dbug/libdbug.a \
+ $(top_builddir)/mysys/libmysys.a \
+ $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
# Don't update the files from bitkeeper
%::SCCS/s.%
diff --git a/ndb/src/kernel/blocks/ERROR_codes.txt b/ndb/src/kernel/blocks/ERROR_codes.txt
index af575de4f62..70f11c33cd7 100644
--- a/ndb/src/kernel/blocks/ERROR_codes.txt
+++ b/ndb/src/kernel/blocks/ERROR_codes.txt
@@ -3,9 +3,9 @@ Next NDBCNTR 1000
Next NDBFS 2000
Next DBACC 3001
Next DBTUP 4007
-Next DBLQH 5040
+Next DBLQH 5042
Next DBDICT 6006
-Next DBDIH 7173
+Next DBDIH 7174
Next DBTC 8035
Next CMVMI 9000
Next BACKUP 10022
@@ -193,6 +193,8 @@ Delay execution of ABORTREQ signal 2 seconds to generate time-out.
5038: Drop LQHKEYREQ + set 5039
5039: Drop ABORT + set 5003
+8048: Make TC not choose own node for simple/dirty read
+5041: Crash is receiving simple read from other TC on different node
ERROR CODES FOR TESTING TIME-OUT HANDLING IN DBTC
-------------------------------------------------
@@ -387,6 +389,11 @@ Backup Stuff:
5028: Crash when receiving LQHKEYREQ (in non-master)
+Failed Create Table:
+--------------------
+7173: Create table failed due to not sufficient number of fragment or
+ replica records.
+
Drop Table/Index:
-----------------
4001: Crash on REL_TABMEMREQ in TUP
diff --git a/ndb/src/kernel/blocks/backup/Backup.cpp b/ndb/src/kernel/blocks/backup/Backup.cpp
index 52a543dbcdc..e6fe63d9014 100644
--- a/ndb/src/kernel/blocks/backup/Backup.cpp
+++ b/ndb/src/kernel/blocks/backup/Backup.cpp
@@ -40,6 +40,7 @@
#include <signaldata/BackupImpl.hpp>
#include <signaldata/BackupSignalData.hpp>
#include <signaldata/BackupContinueB.hpp>
+#include <signaldata/EventReport.hpp>
#include <signaldata/UtilSequence.hpp>
@@ -885,7 +886,7 @@ Backup::execBACKUP_REQ(Signal* signal)
}//if
ndbrequire(ptr.p->pages.empty());
- ndbrequire(ptr.p->tables.empty());
+ ndbrequire(ptr.p->tables.isEmpty());
ptr.p->masterData.state.forceState(INITIAL);
ptr.p->masterData.state.setState(DEFINING);
@@ -944,6 +945,13 @@ Backup::sendBackupRef(BlockReference senderRef, Signal *signal,
ref->errorCode = errorCode;
ref->masterRef = numberToRef(BACKUP, getMasterNodeId());
sendSignal(senderRef, GSN_BACKUP_REF, signal, BackupRef::SignalLength, JBB);
+
+ if(errorCode != BackupRef::IAmNotMaster){
+ signal->theData[0] = EventReport::BackupFailedToStart;
+ signal->theData[1] = senderRef;
+ signal->theData[2] = errorCode;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
+ }
}
void
@@ -1226,7 +1234,13 @@ Backup::defineBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId)
conf->nodes = ptr.p->nodes;
sendSignal(ptr.p->clientRef, GSN_BACKUP_CONF, signal,
BackupConf::SignalLength, JBB);
-
+
+ signal->theData[0] = EventReport::BackupStarted;
+ signal->theData[1] = ptr.p->clientRef;
+ signal->theData[2] = ptr.p->backupId;
+ ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+3);
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3+NdbNodeBitmask::Size, JBB);
+
ptr.p->masterData.state.setState(DEFINED);
/**
* Prepare Trig
@@ -1293,7 +1307,7 @@ Backup::sendCreateTrig(Signal* signal,
for (int i=0; i < 3; i++) {
req->setTriggerEvent(triggerEventValues[i]);
- snprintf(triggerName, sizeof(triggerName), triggerNameFormat[i],
+ BaseString::snprintf(triggerName, sizeof(triggerName), triggerNameFormat[i],
ptr.p->backupId, tabPtr.p->tableId);
w.reset();
w.add(CreateTrigReq::TriggerNameKey, triggerName);
@@ -1931,7 +1945,7 @@ Backup::sendDropTrig(Signal* signal, BackupRecordPtr ptr, TablePtr tabPtr)
sendSignal(DBDICT_REF, GSN_DROP_TRIG_REQ,
signal, DropTrigReq::SignalLength, JBB);
} else {
- snprintf(triggerName, sizeof(triggerName), triggerNameFormat[i],
+ BaseString::snprintf(triggerName, sizeof(triggerName), triggerNameFormat[i],
ptr.p->backupId, tabPtr.p->tableId);
w.reset();
w.add(CreateTrigReq::TriggerNameKey, triggerName);
@@ -2069,6 +2083,18 @@ Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId)
rep->nodes = ptr.p->nodes;
sendSignal(ptr.p->clientRef, GSN_BACKUP_COMPLETE_REP, signal,
BackupCompleteRep::SignalLength, JBB);
+
+ signal->theData[0] = EventReport::BackupCompleted;
+ signal->theData[1] = ptr.p->clientRef;
+ signal->theData[2] = ptr.p->backupId;
+ signal->theData[3] = ptr.p->startGCP;
+ signal->theData[4] = ptr.p->stopGCP;
+ signal->theData[5] = ptr.p->noOfBytes;
+ signal->theData[6] = ptr.p->noOfRecords;
+ signal->theData[7] = ptr.p->noOfLogBytes;
+ signal->theData[8] = ptr.p->noOfLogRecords;
+ ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+9);
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 9+NdbNodeBitmask::Size, JBB);
}
/*****************************************************************************
@@ -2259,6 +2285,12 @@ Backup::masterSendAbortBackup(Signal* signal, BackupRecordPtr ptr)
rep->reason = ptr.p->errorCode;
sendSignal(ptr.p->clientRef, GSN_BACKUP_ABORT_REP, signal,
BackupAbortRep::SignalLength, JBB);
+
+ signal->theData[0] = EventReport::BackupAborted;
+ signal->theData[1] = ptr.p->clientRef;
+ signal->theData[2] = ptr.p->backupId;
+ signal->theData[3] = ptr.p->errorCode;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB);
}//if
// ptr.p->masterData.state.setState(INITIAL);
@@ -2484,8 +2516,7 @@ Backup::execLIST_TABLES_CONF(Signal* signal)
jam();
Uint32 tableId = ListTablesConf::getTableId(conf->tableData[i]);
Uint32 tableType = ListTablesConf::getTableType(conf->tableData[i]);
- if (tableType != DictTabInfo::SystemTable &&
- tableType != DictTabInfo::UserTable) {
+ if (!DictTabInfo::isTable(tableType) && !DictTabInfo::isIndex(tableType)){
jam();
continue;
}//if
@@ -2864,7 +2895,12 @@ Backup::execGET_TABINFO_CONF(Signal* signal)
return;
}//if
+ TablePtr tmp = tabPtr;
ptr.p->tables.next(tabPtr);
+ if(DictTabInfo::isIndex(tmp.p->tableType)){
+ ptr.p->tables.release(tmp);
+ }
+
if(tabPtr.i == RNIL) {
jam();
@@ -2906,7 +2942,11 @@ Backup::parseTableDescription(Signal* signal, BackupRecordPtr ptr, Uint32 len)
TablePtr tabPtr;
ndbrequire(findTable(ptr, tabPtr, tmpTab.TableId));
-
+ if(DictTabInfo::isIndex(tabPtr.p->tableType)){
+ jam();
+ return tabPtr;
+ }
+
/**
* Initialize table object
*/
@@ -3320,24 +3360,21 @@ Backup::execBACKUP_FRAGMENT_REQ(Signal* signal)
req->senderData = filePtr.i;
req->resultRef = reference();
req->schemaVersion = table.schemaVersion;
- req->fragmentNo = fragNo;
+ req->fragmentNoKeyLen = fragNo;
req->requestInfo = 0;
req->savePointId = 0;
req->tableId = table.tableId;
- ScanFragReq::setConcurrency(req->requestInfo, parallelism);
ScanFragReq::setLockMode(req->requestInfo, 0);
ScanFragReq::setHoldLockFlag(req->requestInfo, 0);
ScanFragReq::setKeyinfoFlag(req->requestInfo, 1);
ScanFragReq::setAttrLen(req->requestInfo,attrLen);
req->transId1 = 0;
req->transId2 = (BACKUP << 20) + (getOwnNodeId() << 8);
-
- Uint32 i;
- for(i = 0; i<parallelism; i++) {
- jam();
- req->clientOpPtr[i] = filePtr.i;
- }//for
- sendSignal(DBLQH_REF, GSN_SCAN_FRAGREQ, signal, 25, JBB);
+ req->clientOpPtr= filePtr.i;
+ req->batch_size_rows= 16;
+ req->batch_size_bytes= 0;
+ sendSignal(DBLQH_REF, GSN_SCAN_FRAGREQ, signal,
+ ScanFragReq::SignalLength, JBB);
signal->theData[0] = filePtr.i;
signal->theData[1] = 0;
@@ -3351,6 +3388,7 @@ Backup::execBACKUP_FRAGMENT_REQ(Signal* signal)
signal->theData[7] = 0;
Uint32 dataPos = 8;
+ Uint32 i;
for(i = 0; i<table.noOfAttributes; i++) {
jam();
AttributePtr attr;
@@ -3552,8 +3590,7 @@ Backup::OperationRecord::newFragment(Uint32 tableId, Uint32 fragNo)
head->FragmentNo = htonl(fragNo);
head->ChecksumType = htonl(0);
- opNoDone = opNoConf = 0;
- memset(attrLen, 0, sizeof(attrLen));
+ opNoDone = opNoConf = opLen = 0;
newRecord(tmp + headSz);
scanStart = tmp;
scanStop = (tmp + headSz);
@@ -3596,8 +3633,7 @@ Backup::OperationRecord::newScan()
ndbrequire(16 * maxRecordSize < dataBuffer.getMaxWrite());
if(dataBuffer.getWritePtr(&tmp, 16 * maxRecordSize)) {
jam();
- opNoDone = opNoConf = 0;
- memset(attrLen, 0, sizeof(attrLen));
+ opNoDone = opNoConf = opLen = 0;
newRecord(tmp);
scanStart = tmp;
scanStop = tmp;
@@ -3607,14 +3643,14 @@ Backup::OperationRecord::newScan()
}
bool
-Backup::OperationRecord::scanConf(Uint32 noOfOps, Uint32 opLen[])
+Backup::OperationRecord::scanConf(Uint32 noOfOps, Uint32 total_len)
{
const Uint32 done = opNoDone-opNoConf;
ndbrequire(noOfOps == done);
- ndbrequire(memcmp(&attrLen[opNoConf], opLen, done << 2) == 0);
+ ndbrequire(opLen == total_len);
opNoConf = opNoDone;
-
+
const Uint32 len = (scanStop - scanStart);
ndbrequire(len < dataBuffer.getMaxWrite());
dataBuffer.updateWritePtr(len);
@@ -3655,8 +3691,8 @@ Backup::execSCAN_FRAGCONF(Signal* signal)
c_backupFilePool.getPtr(filePtr, filePtrI);
OperationRecord & op = filePtr.p->operation;
- op.scanConf(conf->completedOps, conf->opReturnDataLen);
-
+
+ op.scanConf(conf->completedOps, conf->total_len);
const Uint32 completed = conf->fragmentCompleted;
if(completed != 2) {
jam();
@@ -3725,6 +3761,8 @@ Backup::checkScan(Signal* signal, BackupFilePtr filePtr)
req->closeFlag = 0;
req->transId1 = 0;
req->transId2 = (BACKUP << 20) + (getOwnNodeId() << 8);
+ req->batch_size_rows= 16;
+ req->batch_size_bytes= 0;
sendSignal(DBLQH_REF, GSN_SCAN_NEXTREQ, signal,
ScanFragNextReq::SignalLength, JBB);
return;
diff --git a/ndb/src/kernel/blocks/backup/Backup.hpp b/ndb/src/kernel/blocks/backup/Backup.hpp
index 1e2100251be..4dc2cd13ae0 100644
--- a/ndb/src/kernel/blocks/backup/Backup.hpp
+++ b/ndb/src/kernel/blocks/backup/Backup.hpp
@@ -236,7 +236,7 @@ public:
* Once per scan frag (next) req/conf
*/
bool newScan();
- bool scanConf(Uint32 noOfOps, Uint32 opLen[]);
+ bool scanConf(Uint32 noOfOps, Uint32 opLen);
/**
* Per record
@@ -268,7 +268,7 @@ public:
Uint32 opNoDone;
Uint32 opNoConf;
- Uint32 attrLen[16];
+ Uint32 opLen;
public:
Uint32* dst;
@@ -441,7 +441,7 @@ public:
Uint32 startGCP;
Uint32 currGCP;
Uint32 stopGCP;
- SLList<Table> tables;
+ DLList<Table> tables;
SLList<TriggerRecord> triggers;
SLList<BackupFile> files;
@@ -713,7 +713,7 @@ Backup::OperationRecord::finished(){
return false;
}
- attrLen[opNoDone] = attrSzTotal + sz_FixedKeys;
+ opLen += attrSzTotal + sz_FixedKeys;
opNoDone++;
scanStop = dst = (Uint32 *)dst_VariableData;
diff --git a/ndb/src/kernel/blocks/backup/BackupInit.cpp b/ndb/src/kernel/blocks/backup/BackupInit.cpp
index d8cbb36df62..8daad05558b 100644
--- a/ndb/src/kernel/blocks/backup/BackupInit.cpp
+++ b/ndb/src/kernel/blocks/backup/BackupInit.cpp
@@ -45,16 +45,9 @@ Backup::Backup(const Configuration & conf) :
ndb_mgm_get_int_parameter(p, CFG_DB_PARALLEL_BACKUPS, &noBackups);
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_TABLES, &noTables));
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_ATTRIBUTES, &noAttribs));
-
- // To allow for user tables AND SYSTAB
- // See ClusterConfig
- //TODO get this infor from NdbCntr
- noTables += 2;
-
- // Considering also TR527, this is a KISS work-around to be able to
- // continue testing the real thing
- noAttribs += 2 + 1;
-
+
+ noAttribs++; //RT 527 bug fix
+
c_backupPool.setSize(noBackups);
c_backupFilePool.setSize(3 * noBackups);
c_tablePool.setSize(noBackups * noTables);
diff --git a/ndb/src/kernel/blocks/backup/read.cpp b/ndb/src/kernel/blocks/backup/read.cpp
index 921c352ea13..89cc08ee9de 100644
--- a/ndb/src/kernel/blocks/backup/read.cpp
+++ b/ndb/src/kernel/blocks/backup/read.cpp
@@ -48,6 +48,7 @@ static Uint32 logEntryNo;
int
main(int argc, const char * argv[]){
+ ndb_init();
if(argc <= 1){
printf("Usage: %s <filename>", argv[0]);
exit(1);
diff --git a/ndb/src/kernel/blocks/backup/restore/Makefile.am b/ndb/src/kernel/blocks/backup/restore/Makefile.am
index e0429c60723..16550f13546 100644
--- a/ndb/src/kernel/blocks/backup/restore/Makefile.am
+++ b/ndb/src/kernel/blocks/backup/restore/Makefile.am
@@ -3,7 +3,11 @@ ndbtools_PROGRAMS = ndb_restore
ndb_restore_SOURCES = main.cpp consumer.cpp consumer_restore.cpp consumer_printer.cpp Restore.cpp
-LDADD_LOC = $(top_builddir)/ndb/src/libndbclient.la
+LDADD_LOC = \
+ $(top_builddir)/ndb/src/libndbclient.la \
+ $(top_builddir)/dbug/libdbug.a \
+ $(top_builddir)/mysys/libmysys.a \
+ $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
include $(top_srcdir)/ndb/config/common.mk.am
diff --git a/ndb/src/kernel/blocks/backup/restore/Restore.cpp b/ndb/src/kernel/blocks/backup/restore/Restore.cpp
index f0ca54884be..fb3bde6bdef 100644
--- a/ndb/src/kernel/blocks/backup/restore/Restore.cpp
+++ b/ndb/src/kernel/blocks/backup/restore/Restore.cpp
@@ -88,7 +88,7 @@ RestoreMetaData::~RestoreMetaData(){
allTables.clear();
}
-const TableS *
+TableS *
RestoreMetaData::getTable(Uint32 tableId) const {
for(Uint32 i= 0; i < allTables.size(); i++)
if(allTables[i]->getTableId() == tableId)
@@ -201,6 +201,8 @@ TableS::TableS(NdbTableImpl* tableImpl)
{
m_dictTable = tableImpl;
m_noOfNullable = m_nullBitmaskSize = 0;
+ m_auto_val_id= ~(Uint32)0;
+ m_max_auto_val= 0;
for (int i = 0; i < tableImpl->getNoOfColumns(); i++)
createAttr(tableImpl->getColumn(i));
@@ -240,6 +242,7 @@ RestoreMetaData::parseTableDescriptor(const Uint32 * data, Uint32 len)
debug << "Pushing table " << table->getTableName() << endl;
debug << " with " << table->getNoOfAttributes() << " attributes" << endl;
+
allTables.push_back(table);
return true;
@@ -268,7 +271,7 @@ int TupleS::getNoOfAttributes() const {
return m_currentTable->getNoOfAttributes();
};
-const TableS * TupleS::getTable() const {
+TableS * TupleS::getTable() const {
return m_currentTable;
};
@@ -281,7 +284,7 @@ AttributeData * TupleS::getData(int i) const{
};
bool
-TupleS::prepareRecord(const TableS & tab){
+TupleS::prepareRecord(TableS & tab){
if (allAttrData) {
if (getNoOfAttributes() == tab.getNoOfAttributes())
{
@@ -512,7 +515,7 @@ BackupFile::setCtlFile(Uint32 nodeId, Uint32 backupId, const char * path){
m_expectedFileHeader.FileType = BackupFormat::CTL_FILE;
char name[PATH_MAX]; const Uint32 sz = sizeof(name);
- snprintf(name, sz, "BACKUP-%d.%d.ctl", backupId, nodeId);
+ BaseString::snprintf(name, sz, "BACKUP-%d.%d.ctl", backupId, nodeId);
setName(path, name);
}
@@ -523,7 +526,7 @@ BackupFile::setDataFile(const BackupFile & bf, Uint32 no){
m_expectedFileHeader.FileType = BackupFormat::DATA_FILE;
char name[PATH_MAX]; const Uint32 sz = sizeof(name);
- snprintf(name, sz, "BACKUP-%d-%d.%d.Data",
+ BaseString::snprintf(name, sz, "BACKUP-%d-%d.%d.Data",
m_expectedFileHeader.BackupId, no, m_nodeId);
setName(bf.m_path, name);
}
@@ -535,7 +538,7 @@ BackupFile::setLogFile(const BackupFile & bf, Uint32 no){
m_expectedFileHeader.FileType = BackupFormat::LOG_FILE;
char name[PATH_MAX]; const Uint32 sz = sizeof(name);
- snprintf(name, sz, "BACKUP-%d.%d.log",
+ BaseString::snprintf(name, sz, "BACKUP-%d.%d.log",
m_expectedFileHeader.BackupId, m_nodeId);
setName(bf.m_path, name);
}
@@ -545,15 +548,15 @@ BackupFile::setName(const char * p, const char * n){
const Uint32 sz = sizeof(m_path);
if(p != 0 && strlen(p) > 0){
if(p[strlen(p)-1] == '/'){
- snprintf(m_path, sz, "%s", p);
+ BaseString::snprintf(m_path, sz, "%s", p);
} else {
- snprintf(m_path, sz, "%s%s", p, "/");
+ BaseString::snprintf(m_path, sz, "%s%s", p, "/");
}
} else {
m_path[0] = 0;
}
- snprintf(m_fileName, sizeof(m_fileName), "%s%s", m_path, n);
+ BaseString::snprintf(m_fileName, sizeof(m_fileName), "%s%s", m_path, n);
debug << "Filename = " << m_fileName << endl;
}
@@ -683,8 +686,8 @@ RestoreDataIterator::validateFragmentFooter() {
AttributeDesc::AttributeDesc(NdbDictionary::Column *c)
: m_column(c)
{
- size = c->getSize()*8;
- arraySize = c->getLength();
+ size = 8*NdbColumnImpl::getImpl(* c).m_attrSize;
+ arraySize = NdbColumnImpl::getImpl(* c).m_arraySize;
}
void TableS::createAttr(NdbDictionary::Column *column)
@@ -697,6 +700,9 @@ void TableS::createAttr(NdbDictionary::Column *column)
d->attrId = allAttributesDesc.size();
allAttributesDesc.push_back(d);
+ if (d->m_column->getAutoIncrement())
+ m_auto_val_id= d->attrId;
+
if(d->m_column->getPrimaryKey() /* && not variable */)
{
m_fixedKeys.push_back(d);
@@ -930,8 +936,9 @@ operator<<(NdbOut& ndbout, const TableS & table){
for (int j = 0; j < table.getNoOfAttributes(); j++)
{
const AttributeDesc * desc = table[j];
- ndbout << desc->m_column->getName() << ": " << desc->m_column->getType();
- ndbout << " key: " << desc->m_column->getPrimaryKey();
+ ndbout << desc->m_column->getName() << ": "
+ << (Uint32) desc->m_column->getType();
+ ndbout << " key: " << (Uint32) desc->m_column->getPrimaryKey();
ndbout << " array: " << desc->arraySize;
ndbout << " size: " << desc->size << endl;
} // for
diff --git a/ndb/src/kernel/blocks/backup/restore/Restore.hpp b/ndb/src/kernel/blocks/backup/restore/Restore.hpp
index 5a705740c69..0ec1ab852e9 100644
--- a/ndb/src/kernel/blocks/backup/restore/Restore.hpp
+++ b/ndb/src/kernel/blocks/backup/restore/Restore.hpp
@@ -91,9 +91,9 @@ class TupleS {
private:
friend class RestoreDataIterator;
- const TableS *m_currentTable;
+ class TableS *m_currentTable;
AttributeData *allAttrData;
- bool prepareRecord(const TableS &);
+ bool prepareRecord(TableS &);
public:
TupleS() {
@@ -108,7 +108,7 @@ public:
TupleS(const TupleS& tuple); // disable copy constructor
TupleS & operator=(const TupleS& tuple);
int getNoOfAttributes() const;
- const TableS * getTable() const;
+ TableS * getTable() const;
const AttributeDesc * getDesc(int i) const;
AttributeData * getData(int i) const;
}; // class TupleS
@@ -130,6 +130,9 @@ class TableS {
Uint32 m_noOfNullable;
Uint32 m_nullBitmaskSize;
+ Uint32 m_auto_val_id;
+ Uint64 m_max_auto_val;
+
int pos;
void createAttr(NdbDictionary::Column *column);
@@ -170,6 +173,42 @@ public:
return allAttributesDesc.size();
};
+ bool have_auto_inc() const {
+ return m_auto_val_id != ~(Uint32)0;
+ };
+
+ bool have_auto_inc(Uint32 id) const {
+ return m_auto_val_id == id;
+ };
+
+ Uint64 get_max_auto_val() const {
+ return m_max_auto_val;
+ };
+
+ void update_max_auto_val(const char *data, int size) {
+ Uint64 val= 0;
+ switch(size){
+ case 8:
+ val= *(Uint8*)data;
+ break;
+ case 16:
+ val= *(Uint16*)data;
+ break;
+ case 24:
+ val= (0xffffff)&*(Uint32*)data;
+ break;
+ case 32:
+ val= *(Uint32*)data;
+ break;
+ case 64:
+ val= *(Uint64*)data;
+ break;
+ default:
+ return;
+ };
+ if(val > m_max_auto_val)
+ m_max_auto_val= val;
+ };
/**
* Get attribute descriptor
*/
@@ -245,7 +284,7 @@ public:
Uint32 getNoOfTables() const { return allTables.size();}
const TableS * operator[](int i) const { return allTables[i];}
- const TableS * getTable(Uint32 tableId) const;
+ TableS * getTable(Uint32 tableId) const;
Uint32 getStopGCP() const;
}; // RestoreMetaData
@@ -254,7 +293,7 @@ public:
class RestoreDataIterator : public BackupFile {
const RestoreMetaData & m_metaData;
Uint32 m_count;
- const TableS* m_currentTable;
+ TableS* m_currentTable;
TupleS m_tuple;
public:
@@ -278,7 +317,7 @@ public:
LE_UPDATE
};
EntryType m_type;
- const TableS * m_table;
+ TableS * m_table;
Vector<AttributeS*> m_values;
Vector<AttributeS*> m_values_e;
AttributeS *add_attr() {
diff --git a/ndb/src/kernel/blocks/backup/restore/consumer.hpp b/ndb/src/kernel/blocks/backup/restore/consumer.hpp
index e3ba2041a22..692c814159f 100644
--- a/ndb/src/kernel/blocks/backup/restore/consumer.hpp
+++ b/ndb/src/kernel/blocks/backup/restore/consumer.hpp
@@ -24,11 +24,13 @@ public:
virtual ~BackupConsumer() { }
virtual bool init() { return true;}
virtual bool table(const TableS &){return true;}
+ virtual bool endOfTables() { return true; }
virtual void tuple(const TupleS &){}
virtual void tuple_free(){}
virtual void endOfTuples(){}
virtual void logEntry(const LogEntry &){}
virtual void endOfLogEntrys(){}
+ virtual bool finalize_table(const TableS &){return true;}
};
#endif
diff --git a/ndb/src/kernel/blocks/backup/restore/consumer_restore.cpp b/ndb/src/kernel/blocks/backup/restore/consumer_restore.cpp
index 5731a9a3883..a35d9d22c65 100644
--- a/ndb/src/kernel/blocks/backup/restore/consumer_restore.cpp
+++ b/ndb/src/kernel/blocks/backup/restore/consumer_restore.cpp
@@ -16,6 +16,7 @@
#include "consumer_restore.hpp"
#include <NdbSleep.h>
+#include <NdbDictionaryImpl.hpp>
extern FilteredNdbOut err;
extern FilteredNdbOut info;
@@ -36,9 +37,6 @@ BackupRestore::init()
if (m_ndb == NULL)
return false;
- // Turn off table name completion
- m_ndb->useFullyQualifiedNames(false);
-
m_ndb->init(1024);
if (m_ndb->waitUntilReady(30) != 0)
{
@@ -102,19 +100,165 @@ BackupRestore::~BackupRestore()
release();
}
+static
+int
+match_blob(const char * name){
+ int cnt, id1, id2;
+ char buf[256];
+ if((cnt = sscanf(name, "%[^/]/%[^/]/NDB$BLOB_%d_%d", buf, buf, &id1, &id2)) == 4){
+ return id1;
+ }
+
+ return -1;
+}
+
+const NdbDictionary::Table*
+BackupRestore::get_table(const NdbDictionary::Table* tab){
+ if(m_cache.m_old_table == tab)
+ return m_cache.m_new_table;
+ m_cache.m_old_table = tab;
+
+ int cnt, id1, id2;
+ char buf[256];
+ if((cnt = sscanf(tab->getName(), "%[^/]/%[^/]/NDB$BLOB_%d_%d", buf, buf, &id1, &id2)) == 4){
+ BaseString::snprintf(buf, sizeof(buf), "NDB$BLOB_%d_%d", m_new_tables[id1]->getTableId(), id2);
+ m_cache.m_new_table = m_ndb->getDictionary()->getTable(buf);
+ } else {
+ m_cache.m_new_table = m_new_tables[tab->getTableId()];
+ }
+
+ return m_cache.m_new_table;
+}
+
+bool
+BackupRestore::finalize_table(const TableS & table){
+ bool ret= true;
+ if (!m_restore && !m_restore_meta)
+ return ret;
+ if (table.have_auto_inc())
+ {
+ Uint64 max_val= table.get_max_auto_val();
+ Uint64 auto_val= m_ndb->readAutoIncrementValue(get_table(table.m_dictTable));
+ if (max_val+1 > auto_val || auto_val == ~(Uint64)0)
+ ret= m_ndb->setAutoIncrementValue(get_table(table.m_dictTable), max_val+1, false);
+ }
+ return ret;
+}
+
bool
BackupRestore::table(const TableS & table){
- if (!m_restore_meta)
+ if (!m_restore && !m_restore_meta)
return true;
+ const char * name = table.getTableName();
+
+ /**
+ * Ignore blob tables
+ */
+ if(match_blob(name) >= 0)
+ return true;
+
+ const NdbTableImpl & tmptab = NdbTableImpl::getImpl(* table.m_dictTable);
+ if(tmptab.m_indexType != NdbDictionary::Index::Undefined){
+ m_indexes.push_back(table.m_dictTable);
+ return true;
+ }
+
+ BaseString tmp(name);
+ Vector<BaseString> split;
+ if(tmp.split(split, "/") != 3){
+ err << "Invalid table name format " << name << endl;
+ return false;
+ }
+
+ m_ndb->setDatabaseName(split[0].c_str());
+ m_ndb->setSchemaName(split[1].c_str());
+
NdbDictionary::Dictionary* dict = m_ndb->getDictionary();
- if (dict->createTable(*table.m_dictTable) == -1)
- {
- err << "Create table " << table.getTableName() << " failed: "
- << dict->getNdbError() << endl;
+ if(m_restore_meta){
+ NdbDictionary::Table copy(*table.m_dictTable);
+
+ copy.setName(split[2].c_str());
+
+ if (dict->createTable(copy) == -1)
+ {
+ err << "Create table " << table.getTableName() << " failed: "
+ << dict->getNdbError() << endl;
+ return false;
+ }
+ info << "Successfully restored table " << table.getTableName()<< endl ;
+ }
+
+ const NdbDictionary::Table* tab = dict->getTable(split[2].c_str());
+ if(tab == 0){
+ err << "Unable to find table: " << split[2].c_str() << endl;
return false;
}
- info << "Successfully restored table " << table.getTableName()<< endl ;
+ if(m_restore_meta){
+ m_ndb->setAutoIncrementValue(tab, ~(Uint64)0, false);
+ }
+ const NdbDictionary::Table* null = 0;
+ m_new_tables.fill(table.m_dictTable->getTableId(), null);
+ m_new_tables[table.m_dictTable->getTableId()] = tab;
+ return true;
+}
+
+bool
+BackupRestore::endOfTables(){
+ if(!m_restore_meta)
+ return true;
+
+ NdbDictionary::Dictionary* dict = m_ndb->getDictionary();
+ for(size_t i = 0; i<m_indexes.size(); i++){
+ const NdbTableImpl & indtab = NdbTableImpl::getImpl(* m_indexes[i]);
+
+ BaseString tmp(indtab.m_primaryTable.c_str());
+ Vector<BaseString> split;
+ if(tmp.split(split, "/") != 3){
+ err << "Invalid table name format " << indtab.m_primaryTable.c_str()
+ << endl;
+ return false;
+ }
+
+ m_ndb->setDatabaseName(split[0].c_str());
+ m_ndb->setSchemaName(split[1].c_str());
+
+ const NdbDictionary::Table * prim = dict->getTable(split[2].c_str());
+ if(prim == 0){
+ err << "Unable to find base table \"" << split[2].c_str()
+ << "\" for index "
+ << indtab.getName() << endl;
+ return false;
+ }
+ NdbTableImpl& base = NdbTableImpl::getImpl(*prim);
+ NdbIndexImpl* idx;
+ int id;
+ char idxName[255], buf[255];
+ if(sscanf(indtab.getName(), "%[^/]/%[^/]/%d/%s",
+ buf, buf, &id, idxName) != 4){
+ err << "Invalid index name format " << indtab.getName() << endl;
+ return false;
+ }
+ if(NdbDictInterface::create_index_obj_from_table(&idx, &indtab, &base))
+ {
+ err << "Failed to create index " << idxName
+ << " on " << split[2].c_str() << endl;
+ return false;
+ }
+ idx->setName(idxName);
+ if(dict->createIndex(* idx) != 0)
+ {
+ delete idx;
+ err << "Failed to create index " << idxName
+ << " on " << split[2].c_str() << endl
+ << dict->getNdbError() << endl;
+
+ return false;
+ }
+ delete idx;
+ info << "Successfully created index " << idxName
+ << " on " << split[2].c_str() << endl;
+ }
return true;
}
@@ -161,8 +305,9 @@ void BackupRestore::tuple_a(restore_callback_t *cb)
} // if
const TupleS &tup = *(cb->tup);
- const TableS * table = tup.getTable();
- NdbOperation * op = cb->connection->getNdbOperation(table->getTableName());
+ const NdbDictionary::Table * table = get_table(tup.getTable()->m_dictTable);
+
+ NdbOperation * op = cb->connection->getNdbOperation(table);
if (op == NULL)
{
@@ -189,6 +334,10 @@ void BackupRestore::tuple_a(restore_callback_t *cb)
int arraySize = attr_desc->arraySize;
char * dataPtr = attr_data->string_value;
Uint32 length = (size * arraySize) / 8;
+
+ if (j == 0 && tup.getTable()->have_auto_inc(i))
+ tup.getTable()->update_max_auto_val(dataPtr,size);
+
if (attr_desc->m_column->getPrimaryKey())
{
if (j == 1) continue;
@@ -203,8 +352,9 @@ void BackupRestore::tuple_a(restore_callback_t *cb)
ret = op->setValue(i, dataPtr, length);
}
if (ret < 0) {
- ndbout_c("Column: %d type %d",i,
- attr_desc->m_column->getType());
+ ndbout_c("Column: %d type %d %d %d %d",i,
+ attr_desc->m_column->getType(),
+ size, arraySize, attr_data->size);
break;
}
}
@@ -349,8 +499,8 @@ BackupRestore::logEntry(const LogEntry & tup)
exit(-1);
} // if
- const TableS * table = tup.m_table;
- NdbOperation * op = trans->getNdbOperation(table->getTableName());
+ const NdbDictionary::Table * table = get_table(tup.m_table->m_dictTable);
+ NdbOperation * op = trans->getNdbOperation(table);
if (op == NULL)
{
err << "Cannot get operation: " << trans->getNdbError() << endl;
@@ -382,8 +532,11 @@ BackupRestore::logEntry(const LogEntry & tup)
int arraySize = attr->Desc->arraySize;
const char * dataPtr = attr->Data.string_value;
+ if (tup.m_table->have_auto_inc(attr->Desc->attrId))
+ tup.m_table->update_max_auto_val(dataPtr,size);
+
const Uint32 length = (size / 8) * arraySize;
- if (attr->Desc->m_column->getPrimaryKey())
+ if (attr->Desc->m_column->getPrimaryKey())
op->equal(attr->Desc->attrId, dataPtr, length);
else
op->setValue(attr->Desc->attrId, dataPtr, length);
@@ -514,3 +667,6 @@ BackupRestore::tuple(const TupleS & tup)
m_dataCount++;
}
#endif
+
+template class Vector<NdbDictionary::Table*>;
+template class Vector<const NdbDictionary::Table*>;
diff --git a/ndb/src/kernel/blocks/backup/restore/consumer_restore.hpp b/ndb/src/kernel/blocks/backup/restore/consumer_restore.hpp
index 2d36501bf40..59e2734ea1f 100644
--- a/ndb/src/kernel/blocks/backup/restore/consumer_restore.hpp
+++ b/ndb/src/kernel/blocks/backup/restore/consumer_restore.hpp
@@ -42,12 +42,14 @@ public:
m_tuples = 0;
m_free_callback = 0;
m_transactions = 0;
+ m_cache.m_old_table = 0;
}
virtual ~BackupRestore();
virtual bool init();
virtual void release();
virtual bool table(const TableS &);
+ virtual bool endOfTables();
virtual void tuple(const TupleS &);
virtual void tuple_free();
virtual void tuple_a(restore_callback_t *cb);
@@ -57,6 +59,7 @@ public:
virtual void endOfTuples();
virtual void logEntry(const LogEntry &);
virtual void endOfLogEntrys();
+ virtual bool finalize_table(const TableS &);
void connectToMysql();
Ndb * m_ndb;
bool m_restore;
@@ -70,6 +73,20 @@ public:
TupleS *m_tuples;
restore_callback_t *m_callback;
restore_callback_t *m_free_callback;
+
+ /**
+ * m_new_table_ids[X] = Y;
+ * X - old table id
+ * Y != 0 - new table
+ */
+ Vector<const NdbDictionary::Table*> m_new_tables;
+ struct {
+ const NdbDictionary::Table* m_old_table;
+ const NdbDictionary::Table* m_new_table;
+ } m_cache;
+ const NdbDictionary::Table* get_table(const NdbDictionary::Table* );
+
+ Vector<const NdbDictionary::Table*> m_indexes;
};
#endif
diff --git a/ndb/src/kernel/blocks/backup/restore/main.cpp b/ndb/src/kernel/blocks/backup/restore/main.cpp
index 23805173484..f7b1479cc93 100644
--- a/ndb/src/kernel/blocks/backup/restore/main.cpp
+++ b/ndb/src/kernel/blocks/backup/restore/main.cpp
@@ -206,6 +206,7 @@ free_data_callback()
int
main(int argc, const char** argv)
{
+ ndb_init();
if (!readArguments(argc, argv))
{
return -1;
@@ -276,81 +277,102 @@ main(int argc, const char** argv)
}
}
+ for(i= 0; i < g_consumers.size(); i++)
+ if (!g_consumers[i]->endOfTables())
+ {
+ ndbout_c("Restore: Failed while closing tables");
+ return -11;
+ }
+
if (ga_restore || ga_print)
{
- if (ga_restore)
+ if (ga_restore)
+ {
+ RestoreDataIterator dataIter(metaData, &free_data_callback);
+
+ // Read data file header
+ if (!dataIter.readHeader())
{
- RestoreDataIterator dataIter(metaData, &free_data_callback);
-
- // Read data file header
- if (!dataIter.readHeader())
- {
- ndbout << "Failed to read header of data file. Exiting..." ;
- return -11;
- }
-
-
- while (dataIter.readFragmentHeader(res= 0))
+ ndbout << "Failed to read header of data file. Exiting..." ;
+ return -11;
+ }
+
+
+ while (dataIter.readFragmentHeader(res= 0))
+ {
+ const TupleS* tuple;
+ while ((tuple = dataIter.getNextTuple(res= 1)) != 0)
{
- const TupleS* tuple;
- while ((tuple = dataIter.getNextTuple(res= 1)) != 0)
- {
- if (checkSysTable(tuple->getTable()->getTableName()))
- for(Uint32 i= 0; i < g_consumers.size(); i++)
- g_consumers[i]->tuple(* tuple);
- } // while (tuple != NULL);
-
- if (res < 0)
- {
- ndbout_c("Restore: An error occured while restoring data. "
- "Exiting...");
- return -1;
- }
- if (!dataIter.validateFragmentFooter()) {
- ndbout_c("Restore: Error validating fragment footer. "
- "Exiting...");
- return -1;
- }
- } // while (dataIter.readFragmentHeader(res))
+ if (checkSysTable(tuple->getTable()->getTableName()))
+ for(Uint32 i= 0; i < g_consumers.size(); i++)
+ g_consumers[i]->tuple(* tuple);
+ } // while (tuple != NULL);
if (res < 0)
{
- err << "Restore: An error occured while restoring data. Exiting... res=" << res << endl;
+ ndbout_c("Restore: An error occured while restoring data. "
+ "Exiting...");
return -1;
}
-
-
- dataIter.validateFooter(); //not implemented
-
- for (Uint32 i= 0; i < g_consumers.size(); i++)
- g_consumers[i]->endOfTuples();
-
- RestoreLogIterator logIter(metaData);
- if (!logIter.readHeader())
- {
- err << "Failed to read header of data file. Exiting..." << endl;
+ if (!dataIter.validateFragmentFooter()) {
+ ndbout_c("Restore: Error validating fragment footer. "
+ "Exiting...");
return -1;
}
-
- const LogEntry * logEntry = 0;
- while ((logEntry = logIter.getNextLogEntry(res= 0)) != 0)
- {
- if (checkSysTable(logEntry->m_table->getTableName()))
- for(Uint32 i= 0; i < g_consumers.size(); i++)
- g_consumers[i]->logEntry(* logEntry);
- }
- if (res < 0)
+ } // while (dataIter.readFragmentHeader(res))
+
+ if (res < 0)
+ {
+ err << "Restore: An error occured while restoring data. Exiting... res=" << res << endl;
+ return -1;
+ }
+
+
+ dataIter.validateFooter(); //not implemented
+
+ for (i= 0; i < g_consumers.size(); i++)
+ g_consumers[i]->endOfTuples();
+
+ RestoreLogIterator logIter(metaData);
+ if (!logIter.readHeader())
+ {
+ err << "Failed to read header of data file. Exiting..." << endl;
+ return -1;
+ }
+
+ const LogEntry * logEntry = 0;
+ while ((logEntry = logIter.getNextLogEntry(res= 0)) != 0)
+ {
+ if (checkSysTable(logEntry->m_table->getTableName()))
+ for(Uint32 i= 0; i < g_consumers.size(); i++)
+ g_consumers[i]->logEntry(* logEntry);
+ }
+ if (res < 0)
+ {
+ err << "Restore: An restoring the data log. Exiting... res=" << res << endl;
+ return -1;
+ }
+ logIter.validateFooter(); //not implemented
+ for (i= 0; i < g_consumers.size(); i++)
+ g_consumers[i]->endOfLogEntrys();
+ for(i = 0; i<metaData.getNoOfTables(); i++)
+ {
+ if (checkSysTable(metaData[i]->getTableName()))
{
- err << "Restore: An restoring the data log. Exiting... res=" << res << endl;
- return -1;
+ for(Uint32 j= 0; j < g_consumers.size(); j++)
+ if (!g_consumers[j]->finalize_table(* metaData[i]))
+ {
+ ndbout_c("Restore: Failed to finalize restore table: %s. "
+ "Exiting...",
+ metaData[i]->getTableName());
+ return -11;
+ }
}
- logIter.validateFooter(); //not implemented
- for (i= 0; i < g_consumers.size(); i++)
- g_consumers[i]->endOfLogEntrys();
}
+ }
}
clearConsumers();
- return 1;
+ return 0;
} // main
template class Vector<BackupConsumer*>;
diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
index 0f25391fccb..234d832655c 100644
--- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
+++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
@@ -46,6 +46,7 @@
// Used here only to print event reports on stdout/console.
EventLogger g_eventLogger;
+extern int simulate_error_during_shutdown;
Cmvmi::Cmvmi(const Configuration & conf) :
SimulatedBlock(CMVMI, conf)
@@ -54,6 +55,16 @@ Cmvmi::Cmvmi(const Configuration & conf) :
{
BLOCK_CONSTRUCTOR(Cmvmi);
+ Uint32 long_sig_buffer_size;
+ const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ ndb_mgm_get_int_parameter(p, CFG_DB_LONG_SIGNAL_BUFFER,
+ &long_sig_buffer_size);
+
+ long_sig_buffer_size= long_sig_buffer_size / 256;
+ g_sectionSegmentPool.setSize(long_sig_buffer_size);
+
// Add received signals
addRecSignal(GSN_CONNECT_REP, &Cmvmi::execCONNECT_REP);
addRecSignal(GSN_DISCONNECT_REP, &Cmvmi::execDISCONNECT_REP);
@@ -86,7 +97,7 @@ Cmvmi::Cmvmi(const Configuration & conf) :
const ndb_mgm_configuration_iterator * db = theConfig.getOwnConfigIterator();
for(unsigned j = 0; j<LogLevel::LOGLEVEL_CATEGORIES; j++){
Uint32 logLevel;
- if(!ndb_mgm_get_int_parameter(db, LogLevel::MIN_LOGLEVEL_ID+j, &logLevel)){
+ if(!ndb_mgm_get_int_parameter(db, CFG_MIN_LOGLEVEL+j, &logLevel)){
clogLevel.setLogLevel((LogLevel::EventCategory)j,
logLevel);
}
@@ -138,6 +149,16 @@ void Cmvmi::execNDB_TAMPER(Signal* signal)
if(ERROR_INSERTED(9997)){
ndbrequire(false);
}
+
+ if(ERROR_INSERTED(9996)){
+ simulate_error_during_shutdown= SIGSEGV;
+ ndbrequire(false);
+ }
+
+ if(ERROR_INSERTED(9995)){
+ simulate_error_during_shutdown= SIGSEGV;
+ kill(getpid(), SIGABRT);
+ }
}//execNDB_TAMPER()
void Cmvmi::execSET_LOGLEVELORD(Signal* signal)
@@ -148,9 +169,9 @@ void Cmvmi::execSET_LOGLEVELORD(Signal* signal)
jamEntry();
for(unsigned int i = 0; i<llOrd->noOfEntries; i++){
- category = (LogLevel::EventCategory)llOrd->theCategories[i];
- level = llOrd->theLevels[i];
-
+ category = (LogLevel::EventCategory)(llOrd->theData[i] >> 16);
+ level = llOrd->theData[i] & 0xFFFF;
+
clogLevel.setLogLevel(category, level);
}
}//execSET_LOGLEVELORD()
@@ -175,10 +196,10 @@ void Cmvmi::execEVENT_REP(Signal* signal)
Uint32 threshold = 16;
LogLevel::EventCategory eventCategory = (LogLevel::EventCategory)0;
- for(unsigned int i = 0; i< EventLogger::matrixSize; i++){
- if(EventLogger::matrix[i].eventType == eventType){
- eventCategory = EventLogger::matrix[i].eventCategory;
- threshold = EventLogger::matrix[i].threshold;
+ for(unsigned int i = 0; i< EventLoggerBase::matrixSize; i++){
+ if(EventLoggerBase::matrix[i].eventType == eventType){
+ eventCategory = EventLoggerBase::matrix[i].eventCategory;
+ threshold = EventLoggerBase::matrix[i].threshold;
break;
}
}
@@ -229,17 +250,7 @@ Cmvmi::execEVENT_SUBSCRIBE_REQ(Signal * signal){
sendSignal(subReq->blockRef, GSN_EVENT_SUBSCRIBE_REF, signal, 1, JBB);
return;
}
- /**
- * If it's a new subscription, clear the loglevel
- *
- * Clear only if noOfEntries is 0, this is needed beacuse we set
- * the default loglevels for the MGMT nodes during the inital connect phase.
- * See reportConnected().
- */
- if (subReq->noOfEntries == 0){
- ptr.p->logLevel.clear();
- }
-
+ ptr.p->logLevel.clear();
ptr.p->blockRef = subReq->blockRef;
}
@@ -255,10 +266,9 @@ Cmvmi::execEVENT_SUBSCRIBE_REQ(Signal * signal){
LogLevel::EventCategory category;
Uint32 level = 0;
for(Uint32 i = 0; i<subReq->noOfEntries; i++){
- category = (LogLevel::EventCategory)subReq->theCategories[i];
- level = subReq->theLevels[i];
- ptr.p->logLevel.setLogLevel(category,
- level);
+ category = (LogLevel::EventCategory)(subReq->theData[i] >> 16);
+ level = subReq->theData[i] & 0xFFFF;
+ ptr.p->logLevel.setLogLevel(category, level);
}
}
@@ -363,11 +373,6 @@ void Cmvmi::execCLOSE_COMREQ(Signal* signal)
globalTransporterRegistry.setIOState(i, HaltIO);
globalTransporterRegistry.do_disconnect(i);
-
- /**
- * Cancel possible event subscription
- */
- cancelSubscription(i);
}
}
if (failNo != 0) {
@@ -473,6 +478,8 @@ void Cmvmi::execDISCONNECT_REP(Signal *signal)
globalTransporterRegistry.do_connect(hostId);
}
+ cancelSubscription(hostId);
+
signal->theData[0] = EventReport::Disconnected;
signal->theData[1] = hostId;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
@@ -518,20 +525,6 @@ void Cmvmi::execCONNECT_REP(Signal *signal){
if(type == NodeInfo::MGM){
jam();
globalTransporterRegistry.setIOState(hostId, NoHalt);
-
- EventSubscribeReq* dst = (EventSubscribeReq *)&signal->theData[0];
-
- for (Uint32 i = 0; i < EventLogger::defEventLogMatrixSize; i++) {
- dst->theCategories[i] = EventLogger::defEventLogMatrix[i].eventCategory;
- dst->theLevels[i] = EventLogger::defEventLogMatrix[i].threshold;
- }
-
- dst->noOfEntries = EventLogger::defEventLogMatrixSize;
- /* The BlockNumber is hardcoded as 1 in MgmtSrvr */
- dst->blockRef = numberToRef(MIN_API_BLOCK_NO, hostId);
-
- execEVENT_SUBSCRIBE_REQ(signal);
-
}
//------------------------------------------
diff --git a/ndb/src/kernel/blocks/dbacc/Dbacc.hpp b/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
index cc3e646f219..169b77c0d85 100644
--- a/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
+++ b/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
@@ -194,7 +194,6 @@ ndbout << "Ptr: " << ptr.p->word32 << " \tIndex: " << tmp_string << " \tValue: "
#define ZTABLESIZE 16
#define ZTABMAXINDEX 3
#define ZUNDEFINED_OP 6
-#define ZUNDOPAGESIZE 64
#define ZUNDOHEADSIZE 7
#define ZUNLOCKED 1
#define ZUNDOPAGE_BASE_ADD 2
@@ -830,6 +829,7 @@ struct Rootfragmentrec {
Uint32 nextroot;
Uint32 roothashcheck;
Uint32 noOfElements;
+ Uint32 m_commit_count;
State rootState;
}; /* p2c: size = 72 bytes */
@@ -894,8 +894,8 @@ struct SrVersionRec {
/* TABREC */
/* --------------------------------------------------------------------------------- */
struct Tabrec {
- Uint32 fragholder[NO_OF_FRAG_PER_NODE];
- Uint32 fragptrholder[NO_OF_FRAG_PER_NODE];
+ Uint32 fragholder[MAX_FRAG_PER_NODE];
+ Uint32 fragptrholder[MAX_FRAG_PER_NODE];
Uint32 tabUserPtr;
BlockReference tabUserRef;
};
@@ -926,6 +926,7 @@ private:
void execACC_OVER_REC(Signal* signal);
void execACC_SAVE_PAGES(Signal* signal);
void execNEXTOPERATION(Signal* signal);
+ void execREAD_PSUEDO_REQ(Signal* signal);
// Received signals
void execSTTOR(Signal* signal);
@@ -1086,10 +1087,10 @@ private:
void deleteLongKey(Signal* signal);
void removeFromPageArrayList(Signal* signal);
void insertPageArrayList(Signal* signal);
- void checkPageArrayList(Signal* signal, char *);
- void checkPageB4Insert(Uint32, char *);
- void checkPageB4Remove(Uint32, char *);
- void checkIndexInLongKeyPage(Uint32, char *);
+ void checkPageArrayList(Signal* signal, const char *);
+ void checkPageB4Insert(Uint32, const char *);
+ void checkPageB4Remove(Uint32, const char *);
+ void checkIndexInLongKeyPage(Uint32, const char *);
void printoutInfoAndShutdown(LongKeyPage *);
void releaseLongPage(Signal* signal);
void abortOperation(Signal* signal);
diff --git a/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp b/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
index b22fd6ce641..2705f95f6dd 100644
--- a/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
+++ b/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
@@ -32,7 +32,6 @@ void Dbacc::initData()
crootfragmentsize = ZROOTFRAGMENTSIZE;
cdirrangesize = ZDIRRANGESIZE;
coverflowrecsize = ZOVERFLOWRECSIZE;
- cundopagesize = ZUNDOPAGESIZE;
cfsConnectsize = ZFS_CONNECTSIZE;
cfsOpsize = ZFS_OPSIZE;
cscanRecSize = ZSCAN_REC_SIZE;
@@ -136,8 +135,25 @@ void Dbacc::initRecords()
Dbacc::Dbacc(const class Configuration & conf):
SimulatedBlock(DBACC, conf)
{
+ Uint32 log_page_size= 0;
BLOCK_CONSTRUCTOR(Dbacc);
+ const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ ndb_mgm_get_int_parameter(p, CFG_DB_UNDO_INDEX_BUFFER,
+ &log_page_size);
+
+ /**
+ * Always set page size in half MBytes
+ */
+ cundopagesize= (log_page_size / sizeof(Undopage));
+ Uint32 mega_byte_part= cundopagesize & 15;
+ if (mega_byte_part != 0) {
+ jam();
+ cundopagesize+= (16 - mega_byte_part);
+ }
+
// Transit signals
addRecSignal(GSN_DUMP_STATE_ORD, &Dbacc::execDUMP_STATE_ORD);
addRecSignal(GSN_DEBUG_SIG, &Dbacc::execDEBUG_SIG);
@@ -148,6 +164,7 @@ Dbacc::Dbacc(const class Configuration & conf):
addRecSignal(GSN_ACC_OVER_REC, &Dbacc::execACC_OVER_REC);
addRecSignal(GSN_ACC_SAVE_PAGES, &Dbacc::execACC_SAVE_PAGES);
addRecSignal(GSN_NEXTOPERATION, &Dbacc::execNEXTOPERATION);
+ addRecSignal(GSN_READ_PSUEDO_REQ, &Dbacc::execREAD_PSUEDO_REQ);
// Received signals
addRecSignal(GSN_STTOR, &Dbacc::execSTTOR);
diff --git a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
index 9cfac0ad2a2..9a1bbd86562 100644
--- a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
+++ b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
@@ -17,6 +17,7 @@
#define DBACC_C
#include "Dbacc.hpp"
+#include <AttributeHeader.hpp>
#include <signaldata/AccFrag.hpp>
#include <signaldata/AccScan.hpp>
#include <signaldata/AccLock.hpp>
@@ -1021,7 +1022,7 @@ void Dbacc::initialiseTableRec(Signal* signal)
for (tabptr.i = 0; tabptr.i < ctablesize; tabptr.i++) {
refresh_watch_dog();
ptrAss(tabptr, tabrec);
- for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
+ for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
tabptr.p->fragholder[i] = RNIL;
tabptr.p->fragptrholder[i] = RNIL;
}//for
@@ -1051,6 +1052,7 @@ void Dbacc::initRootfragrec(Signal* signal)
rootfragrecptr.p->mytabptr = req->tableId;
rootfragrecptr.p->roothashcheck = req->kValue + req->lhFragBits;
rootfragrecptr.p->noOfElements = 0;
+ rootfragrecptr.p->m_commit_count = 0;
for (Uint32 i = 0; i < MAX_PARALLEL_SCANS_PER_FRAG; i++) {
rootfragrecptr.p->scan[i] = RNIL;
}//for
@@ -1187,7 +1189,7 @@ void Dbacc::releaseRootFragResources(Signal* signal, Uint32 tableId)
TabrecPtr tabPtr;
tabPtr.i = tableId;
ptrCheckGuard(tabPtr, ctablesize, tabrec);
- for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
+ for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
jam();
if (tabPtr.p->fragholder[i] != RNIL) {
jam();
@@ -1419,7 +1421,7 @@ void Dbacc::execFSREMOVEREF(Signal* signal)
/* -------------------------------------------------------------------------- */
bool Dbacc::addfragtotab(Signal* signal, Uint32 rootIndex, Uint32 fid)
{
- for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
+ for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
jam();
if (tabptr.p->fragholder[i] == RNIL) {
jam();
@@ -2335,46 +2337,51 @@ void Dbacc::execACC_COMMITREQ(Signal* signal)
Toperation = operationRecPtr.p->operation;
operationRecPtr.p->transactionstate = IDLE;
operationRecPtr.p->operation = ZUNDEFINED_OP;
- if (Toperation != ZINSERT) {
- if (Toperation != ZDELETE) {
- return;
+ if(Toperation != ZREAD){
+ rootfragrecptr.p->m_commit_count++;
+ if (Toperation != ZINSERT) {
+ if (Toperation != ZDELETE) {
+ return;
+ } else {
+ jam();
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ rootfragrecptr.p->noOfElements--;
+ fragrecptr.p->slack += operationRecPtr.p->insertDeleteLen;
+ if (fragrecptr.p->slack > fragrecptr.p->slackCheck) {
+ /* TIME FOR JOIN BUCKETS PROCESS */
+ if (fragrecptr.p->expandCounter > 0) {
+ if (fragrecptr.p->expandFlag < 2) {
+ jam();
+ signal->theData[0] = fragrecptr.i;
+ signal->theData[1] = fragrecptr.p->p;
+ signal->theData[2] = fragrecptr.p->maxp;
+ signal->theData[3] = fragrecptr.p->expandFlag;
+ fragrecptr.p->expandFlag = 2;
+ sendSignal(cownBlockref, GSN_SHRINKCHECK2, signal, 4, JBB);
+ }//if
+ }//if
+ }//if
+ }//if
} else {
- jam();
+ jam(); /* EXPAND PROCESS HANDLING */
rootfragrecptr.i = fragrecptr.p->myroot;
ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- rootfragrecptr.p->noOfElements--;
- fragrecptr.p->slack += operationRecPtr.p->insertDeleteLen;
- if (fragrecptr.p->slack > fragrecptr.p->slackCheck) { /* TIME FOR JOIN BUCKETS PROCESS */
- if (fragrecptr.p->expandCounter > 0) {
- if (fragrecptr.p->expandFlag < 2) {
- jam();
- signal->theData[0] = fragrecptr.i;
- signal->theData[1] = fragrecptr.p->p;
- signal->theData[2] = fragrecptr.p->maxp;
- signal->theData[3] = fragrecptr.p->expandFlag;
- fragrecptr.p->expandFlag = 2;
- sendSignal(cownBlockref, GSN_SHRINKCHECK2, signal, 4, JBB);
- }//if
- }//if
- }//if
- }//if
- } else {
- jam(); /* EXPAND PROCESS HANDLING */
- rootfragrecptr.i = fragrecptr.p->myroot;
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
- rootfragrecptr.p->noOfElements++;
- fragrecptr.p->slack -= operationRecPtr.p->insertDeleteLen;
- if (fragrecptr.p->slack >= (Uint32)(1 << 31)) { /* IT MEANS THAT IF SLACK < ZERO */
- if (fragrecptr.p->expandFlag == 0) {
- jam();
- fragrecptr.p->expandFlag = 2;
- signal->theData[0] = fragrecptr.i;
- signal->theData[1] = fragrecptr.p->p;
- signal->theData[2] = fragrecptr.p->maxp;
- sendSignal(cownBlockref, GSN_EXPANDCHECK2, signal, 3, JBB);
+ rootfragrecptr.p->noOfElements++;
+ fragrecptr.p->slack -= operationRecPtr.p->insertDeleteLen;
+ if (fragrecptr.p->slack >= (1u << 31)) {
+ /* IT MEANS THAT IF SLACK < ZERO */
+ if (fragrecptr.p->expandFlag == 0) {
+ jam();
+ fragrecptr.p->expandFlag = 2;
+ signal->theData[0] = fragrecptr.i;
+ signal->theData[1] = fragrecptr.p->p;
+ signal->theData[2] = fragrecptr.p->maxp;
+ sendSignal(cownBlockref, GSN_EXPANDCHECK2, signal, 3, JBB);
+ }//if
}//if
}//if
- }//if
+ }
return;
}//Dbacc::execACC_COMMITREQ()
@@ -2435,7 +2442,7 @@ void Dbacc::execACC_LOCKREQ(Signal* signal)
ptrCheckGuard(tabptr, ctablesize, tabrec);
// find fragment (TUX will know it)
if (req->fragPtrI == RNIL) {
- for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
+ for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
jam();
if (tabptr.p->fragptrholder[i] != RNIL) {
rootfragrecptr.i = tabptr.p->fragptrholder[i];
@@ -2472,7 +2479,7 @@ void Dbacc::execACC_LOCKREQ(Signal* signal)
Uint32 opCode = ZSCAN_OP;
signal->theData[0] = operationRecPtr.i;
signal->theData[1] = fragrecptr.i;
- signal->theData[2] = opCode | (lockMode << 4) | (1 << 31);
+ signal->theData[2] = opCode | (lockMode << 4) | (1u << 31);
signal->theData[3] = req->hashValue;
signal->theData[4] = 1; // fake primKeyLen
signal->theData[5] = req->transId1;
@@ -4044,7 +4051,7 @@ void Dbacc::deleteLongKey(Signal* signal)
}//Dbacc::deleteLongKey()
-void Dbacc::checkIndexInLongKeyPage(Uint32 pageId, char *calledFrom) {
+void Dbacc::checkIndexInLongKeyPage(Uint32 pageId, const char *calledFrom) {
Page8Ptr pagePtr;
LongKeyPage *page;
Uint32 indexNo;
@@ -4199,7 +4206,7 @@ void Dbacc::insertPageArrayList(Signal* signal)
// --------------------------------------------------------------------------------- */
// Check the page array list.
// --------------------------------------------------------------------------------- */
-void Dbacc::checkPageArrayList(Signal* signal, char *calledFrom)
+void Dbacc::checkPageArrayList(Signal* signal, const char *calledFrom)
{
Page8Ptr pagePtr;
Uint32 pageArrayIndex;
@@ -4244,7 +4251,7 @@ void Dbacc::checkPageArrayList(Signal* signal, char *calledFrom)
// --------------------------------------------------------------------------------- */
// Check the page to put into the pageArrayList.
// --------------------------------------------------------------------------------- */
-void Dbacc::checkPageB4Insert(Uint32 pageId, char *calledFrom) {
+void Dbacc::checkPageB4Insert(Uint32 pageId, const char *calledFrom) {
Page8Ptr pagePtr;
Uint32 pageArrayIndex;
LongKeyPage *page;
@@ -4311,7 +4318,7 @@ void Dbacc::checkPageB4Insert(Uint32 pageId, char *calledFrom) {
// --------------------------------------------------------------------------------- */
// Check the page to remove from the pageArrayList.
// --------------------------------------------------------------------------------- */
-void Dbacc::checkPageB4Remove(Uint32 pageId, char *calledFrom) {
+void Dbacc::checkPageB4Remove(Uint32 pageId, const char *calledFrom) {
Page8Ptr pagePtr;
Uint32 pageArrayIndex;
Uint32 noOfOccurrence = 0;
@@ -6503,7 +6510,7 @@ void Dbacc::endofexpLab(Signal* signal)
Uint32 noOfBuckets = (fragrecptr.p->maxp + 1) + fragrecptr.p->p;
Uint32 Thysteres = fragrecptr.p->maxloadfactor - fragrecptr.p->minloadfactor;
fragrecptr.p->slackCheck = noOfBuckets * Thysteres;
- if (fragrecptr.p->slack > (Uint32)(1 << 31)) {
+ if (fragrecptr.p->slack > (1u << 31)) {
jam();
/* IT MEANS THAT IF SLACK < ZERO */
/* --------------------------------------------------------------------------------- */
@@ -6967,7 +6974,7 @@ void Dbacc::execSHRINKCHECK2(Signal* signal)
/*--------------------------------------------------------------*/
return;
}//if
- if (fragrecptr.p->slack > (Uint32)(1 << 31)) {
+ if (fragrecptr.p->slack > (1u << 31)) {
jam();
/*--------------------------------------------------------------*/
/* THE SLACK IS NEGATIVE, IN THIS CASE WE WILL NOT NEED ANY */
@@ -7206,7 +7213,7 @@ void Dbacc::endofshrinkbucketLab(Signal* signal)
expDirRangePtr.p->dirArray[fragrecptr.p->expSenderDirIndex >> 8] = RNIL;
}//if
}//if
- if (fragrecptr.p->slack < (Uint32)(1 << 31)) {
+ if (fragrecptr.p->slack < (1u << 31)) {
jam();
/*--------------------------------------------------------------*/
/* THE SLACK IS POSITIVE, IN THIS CASE WE WILL CHECK WHETHER */
@@ -12184,7 +12191,7 @@ void Dbacc::takeOutReadyScanQueue(Signal* signal)
bool Dbacc::getrootfragmentrec(Signal* signal, RootfragmentrecPtr& rootPtr, Uint32 fid)
{
- for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
+ for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
jam();
if (tabptr.p->fragholder[i] == fid) {
jam();
@@ -12562,7 +12569,7 @@ void Dbacc::releaseLcpConnectRec(Signal* signal)
/* --------------------------------------------------------------------------------- */
void Dbacc::releaseOpRec(Signal* signal)
{
-#ifdef VM_TRACE
+#if 0
// DEBUG CODE
// Check that the operation to be released isn't
// already in the list of free operations
@@ -13384,3 +13391,32 @@ void Dbacc::execSET_VAR_REQ(Signal* signal)
#endif
}//execSET_VAR_REQ()
+
+void
+Dbacc::execREAD_PSUEDO_REQ(Signal* signal){
+ jamEntry();
+ fragrecptr.i = signal->theData[0];
+ Uint32 attrId = signal->theData[1];
+ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
+ Uint64 tmp;
+ switch(attrId){
+ case AttributeHeader::ROW_COUNT:
+ tmp = rootfragrecptr.p->noOfElements;
+ break;
+ case AttributeHeader::COMMIT_COUNT:
+ tmp = rootfragrecptr.p->m_commit_count;
+ break;
+ default:
+ tmp = 0;
+ }
+ memcpy(signal->theData, &tmp, 8); /* must be memcpy, gives strange results on
+ * ithanium gcc (GCC) 3.4.1 smp linux 2.4
+ * otherwise
+ */
+ // Uint32 * src = (Uint32*)&tmp;
+ // signal->theData[0] = src[0];
+ // signal->theData[1] = src[1];
+}
+
diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
index 7126842459e..d1a8128ea7f 100644
--- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
+++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
@@ -15,6 +15,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <ndb_global.h>
+#include <my_sys.h>
#define DBDICT_C
#include "Dbdict.hpp"
@@ -2866,8 +2867,6 @@ Dbdict::execALTER_TABLE_REQ(Signal* signal)
if(parseRecord.errorCode != 0){
jam();
c_opCreateTable.release(alterTabPtr);
- parseRecord.tablePtr.p->tabState = TableRecord::NOT_DEFINED;
- releaseTableObject(parseRecord.tablePtr.i, false);
alterTableRef(signal, req,
(AlterTableRef::ErrorCode) parseRecord.errorCode,
aParseRecord);
@@ -3052,8 +3051,6 @@ Dbdict::execALTER_TAB_REQ(Signal * signal)
if(parseRecord.errorCode != 0){
jam();
c_opCreateTable.release(alterTabPtr);
- parseRecord.tablePtr.p->tabState = TableRecord::NOT_DEFINED;
- releaseTableObject(parseRecord.tablePtr.i, false);
alterTabRef(signal, req,
(AlterTableRef::ErrorCode) parseRecord.errorCode,
aParseRecord);
@@ -3438,7 +3435,6 @@ Dbdict::execALTER_TAB_CONF(Signal * signal){
// Release resources
TableRecordPtr tabPtr;
c_tableRecordPool.getPtr(tabPtr, regAlterTabPtr->m_tablePtrI);
- tabPtr.p->tabState = TableRecord::NOT_DEFINED;
releaseTableObject(tabPtr.i, false);
c_opCreateTable.release(alterTabPtr);
c_blockState = BS_IDLE;
@@ -3479,12 +3475,19 @@ int Dbdict::handleAlterTab(AlterTabReq * req,
jam();
// Table rename
// Remove from hashtable
+#ifdef VM_TRACE
+ TableRecordPtr tmp;
+ ndbrequire(c_tableRecordHash.find(tmp, *origTablePtr.p));
+#endif
c_tableRecordHash.remove(origTablePtr);
strcpy(regAlterTabPtr->previousTableName, origTablePtr.p->tableName);
strcpy(origTablePtr.p->tableName, newTablePtr.p->tableName);
// Set new schema version
origTablePtr.p->tableVersion = newTablePtr.p->tableVersion;
// Put it back
+#ifdef VM_TRACE
+ ndbrequire(!c_tableRecordHash.find(tmp, *origTablePtr.p));
+#endif
c_tableRecordHash.add(origTablePtr);
return 0;
@@ -3505,12 +3508,19 @@ void Dbdict::revertAlterTable(Signal * signal,
TableRecordPtr tablePtr;
c_tableRecordPool.getPtr(tablePtr, tableId);
// Remove from hashtable
+#ifdef VM_TRACE
+ TableRecordPtr tmp;
+ ndbrequire(c_tableRecordHash.find(tmp, * tablePtr.p));
+#endif
c_tableRecordHash.remove(tablePtr);
// Restore name
strcpy(tablePtr.p->tableName, regAlterTabPtr->previousTableName);
// Revert schema version
tablePtr.p->tableVersion = tablePtr.p->tableVersion - 1;
// Put it back
+#ifdef VM_TRACE
+ ndbrequire(!c_tableRecordHash.find(tmp, * tablePtr.p));
+#endif
c_tableRecordHash.add(tablePtr);
return;
@@ -3572,7 +3582,6 @@ Dbdict::alterTab_writeTableConf(Signal* signal,
jam();
// Release resources
c_tableRecordPool.getPtr(tabPtr, regAlterTabPtr->m_tablePtrI);
- tabPtr.p->tabState = TableRecord::NOT_DEFINED;
releaseTableObject(tabPtr.i, false);
c_opCreateTable.release(alterTabPtr);
c_blockState = BS_IDLE;
@@ -4100,6 +4109,8 @@ Dbdict::execADD_FRAGREQ(Signal* signal) {
req->noOfKeyAttr = tabPtr.p->noOfPrimkey;
req->noOfNewAttr = 0;
+ // noOfCharsets passed to TUP in upper half
+ req->noOfNewAttr |= (tabPtr.p->noOfCharsets << 16);
req->checksumIndicator = 1;
req->noOfAttributeGroups = 1;
req->GCPIndicator = 0;
@@ -4161,6 +4172,8 @@ Dbdict::sendLQHADDATTRREQ(Signal* signal,
entry.attrId = attrPtr.p->attributeId;
entry.attrDescriptor = attrPtr.p->attributeDescriptor;
entry.extTypeInfo = attrPtr.p->extType;
+ // charset number passed to TUP, TUX in upper half
+ entry.extTypeInfo |= (attrPtr.p->extPrecision & ~0xFFFF);
if (tabPtr.p->isIndex()) {
Uint32 primaryAttrId;
if (attrPtr.p->nextAttrInTable != RNIL) {
@@ -4456,7 +4469,6 @@ Dbdict::createTab_dropComplete(Signal* signal,
TableRecordPtr tabPtr;
c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
- tabPtr.p->tabState = TableRecord::NOT_DEFINED;
releaseTableObject(tabPtr.i);
PageRecordPtr pagePtr;
@@ -4540,6 +4552,15 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
parseP->errorLine = __LINE__;
return;
}
+
+ if(parseP->requestType == DictTabInfo::AlterTableFromAPI)
+ {
+ ndbrequire(!checkExist);
+ }
+ if(!checkExist)
+ {
+ ndbrequire(parseP->requestType == DictTabInfo::AlterTableFromAPI);
+ }
/* ---------------------------------------------------------------- */
// Verify that table name is an allowed table name.
@@ -4554,14 +4575,15 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
TableRecordPtr tablePtr;
c_tableRecordHash.find(tablePtr, keyRecord);
-
- if (checkExist)
+
+ if (checkExist){
jam();
/* ---------------------------------------------------------------- */
// Check if table already existed.
/* ---------------------------------------------------------------- */
tabRequire(tablePtr.i == RNIL, CreateTableRef::TableAlreadyExist);
-
+ }
+
switch (parseP->requestType) {
case DictTabInfo::CreateTableFromAPI: {
jam();
@@ -4634,12 +4656,13 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
strcpy(tablePtr.p->tableName, keyRecord.tableName);
if (parseP->requestType != DictTabInfo::AlterTableFromAPI) {
jam();
- c_tableRecordHash.add(tablePtr);
- }
-
#ifdef VM_TRACE
- ndbout_c("Dbdict: name=%s,id=%u", tablePtr.p->tableName, tablePtr.i);
+ ndbout_c("Dbdict: name=%s,id=%u", tablePtr.p->tableName, tablePtr.i);
+ TableRecordPtr tmp;
+ ndbrequire(!c_tableRecordHash.find(tmp, * tablePtr.p));
#endif
+ c_tableRecordHash.add(tablePtr);
+ }
//tablePtr.p->noOfPrimkey = tableDesc.NoOfKeyAttr;
//tablePtr.p->noOfNullAttr = tableDesc.NoOfNullable;
@@ -4678,11 +4701,12 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
handleTabInfo(it, parseP);
- if(parseP->errorCode != 0){
+ if(parseP->errorCode != 0)
+ {
/**
* Release table
*/
- releaseTableObject(tablePtr.i);
+ releaseTableObject(tablePtr.i, checkExist);
}
}//handleTabInfoInit()
@@ -4697,6 +4721,8 @@ void Dbdict::handleTabInfo(SimpleProperties::Reader & it,
Uint32 keyLength = 0;
Uint32 attrCount = tablePtr.p->noOfAttributes;
Uint32 nullCount = 0;
+ Uint32 noOfCharsets = 0;
+ Uint16 charsets[128];
Uint32 recordLength = 0;
AttributeRecordPtr attrPtr;
c_attributeRecordHash.removeAll();
@@ -4751,6 +4777,31 @@ void Dbdict::handleTabInfo(SimpleProperties::Reader & it,
attrPtr.p->extPrecision = attrDesc.AttributeExtPrecision;
attrPtr.p->extScale = attrDesc.AttributeExtScale;
attrPtr.p->extLength = attrDesc.AttributeExtLength;
+ // charset in upper half of precision
+ unsigned csNumber = (attrPtr.p->extPrecision >> 16);
+ if (csNumber != 0) {
+ CHARSET_INFO* cs = get_charset(csNumber, MYF(0));
+ if (cs == NULL) {
+ parseP->errorCode = CreateTableRef::InvalidCharset;
+ parseP->errorLine = __LINE__;
+ return;
+ }
+ unsigned i = 0;
+ while (i < noOfCharsets) {
+ if (charsets[i] == csNumber)
+ break;
+ i++;
+ }
+ if (i == noOfCharsets) {
+ noOfCharsets++;
+ if (noOfCharsets > sizeof(charsets)/sizeof(charsets[0])) {
+ parseP->errorCode = CreateTableRef::InvalidFormat;
+ parseP->errorLine = __LINE__;
+ return;
+ }
+ charsets[i] = csNumber;
+ }
+ }
/**
* Ignore incoming old-style type and recompute it.
@@ -4814,6 +4865,7 @@ void Dbdict::handleTabInfo(SimpleProperties::Reader & it,
tablePtr.p->noOfPrimkey = keyCount;
tablePtr.p->noOfNullAttr = nullCount;
+ tablePtr.p->noOfCharsets = noOfCharsets;
tablePtr.p->tupKeyLength = keyLength;
tabRequire(recordLength<= MAX_TUPLE_SIZE_IN_WORDS,
@@ -5465,7 +5517,14 @@ void Dbdict::releaseTableObject(Uint32 tableId, bool removeFromHash)
AttributeRecordPtr attrPtr;
c_tableRecordPool.getPtr(tablePtr, tableId);
if (removeFromHash)
+ {
+#ifdef VM_TRACE
+ TableRecordPtr tmp;
+ ndbrequire(c_tableRecordHash.find(tmp, * tablePtr.p));
+#endif
c_tableRecordHash.remove(tablePtr);
+ }
+ tablePtr.p->tabState = TableRecord::NOT_DEFINED;
Uint32 nextAttrRecord = tablePtr.p->firstAttribute;
while (nextAttrRecord != RNIL) {
@@ -6317,6 +6376,8 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
w.add(DictTabInfo::AttributeStoredInd, (Uint32)DictTabInfo::Stored);
// ext type overrides
w.add(DictTabInfo::AttributeExtType, aRec->extType);
+ w.add(DictTabInfo::AttributeExtPrecision, aRec->extPrecision);
+ w.add(DictTabInfo::AttributeExtScale, aRec->extScale);
w.add(DictTabInfo::AttributeExtLength, aRec->extLength);
w.add(DictTabInfo::AttributeEnd, (Uint32)true);
}
@@ -6510,6 +6571,8 @@ Dbdict::execDROP_INDX_REQ(Signal* signal)
jamEntry();
DropIndxReq* const req = (DropIndxReq*)signal->getDataPtrSend();
OpDropIndexPtr opPtr;
+
+ int err = DropIndxRef::BadRequestType;
const Uint32 senderRef = signal->senderBlockRef();
const DropIndxReq::RequestType requestType = req->getRequestType();
if (requestType == DropIndxReq::RT_USER) {
@@ -6524,6 +6587,34 @@ Dbdict::execDROP_INDX_REQ(Signal* signal)
return;
}
// forward initial request plus operation key to all
+ Uint32 indexId= req->getIndexId();
+ Uint32 indexVersion= req->getIndexVersion();
+ TableRecordPtr tmp;
+ int res = getMetaTablePtr(tmp, indexId, indexVersion);
+ switch(res){
+ case MetaData::InvalidArgument:
+ err = DropIndxRef::IndexNotFound;
+ goto error;
+ case MetaData::TableNotFound:
+ case MetaData::InvalidTableVersion:
+ err = DropIndxRef::InvalidIndexVersion;
+ goto error;
+ }
+
+ if (! tmp.p->isIndex()) {
+ jam();
+ err = DropIndxRef::NotAnIndex;
+ goto error;
+ }
+
+ if (tmp.p->indexState == TableRecord::IS_DROPPING){
+ jam();
+ err = DropIndxRef::IndexNotFound;
+ goto error;
+ }
+
+ tmp.p->indexState = TableRecord::IS_DROPPING;
+
req->setOpKey(++c_opRecordSequence);
NodeReceiverGroup rg(DBDICT, c_aliveNodes);
sendSignal(rg, GSN_DROP_INDX_REQ,
@@ -6573,12 +6664,13 @@ Dbdict::execDROP_INDX_REQ(Signal* signal)
return;
}
}
+error:
jam();
// return to sender
OpDropIndex opBad;
opPtr.p = &opBad;
opPtr.p->save(req);
- opPtr.p->m_errorCode = DropIndxRef::BadRequestType;
+ opPtr.p->m_errorCode = (DropIndxRef::ErrorCode)err;
opPtr.p->m_errorLine = __LINE__;
dropIndex_sendReply(signal, opPtr, true);
}
diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
index de1d9757b2a..19c03a86e22 100644
--- a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
+++ b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
@@ -1507,19 +1507,19 @@ private:
// Common operation record pool
public:
- static const size_t opCreateTableSize = sizeof(CreateTableRecord);
- static const size_t opDropTableSize = sizeof(DropTableRecord);
- static const size_t opCreateIndexSize = sizeof(OpCreateIndex);
- static const size_t opDropIndexSize = sizeof(OpDropIndex);
- static const size_t opAlterIndexSize = sizeof(OpAlterIndex);
- static const size_t opBuildIndexSize = sizeof(OpBuildIndex);
- static const size_t opCreateEventSize = sizeof(OpCreateEvent);
- static const size_t opSubEventSize = sizeof(OpSubEvent);
- static const size_t opDropEventSize = sizeof(OpDropEvent);
- static const size_t opSignalUtilSize = sizeof(OpSignalUtil);
- static const size_t opCreateTriggerSize = sizeof(OpCreateTrigger);
- static const size_t opDropTriggerSize = sizeof(OpDropTrigger);
- static const size_t opAlterTriggerSize = sizeof(OpAlterTrigger);
+ STATIC_CONST( opCreateTableSize = sizeof(CreateTableRecord) );
+ STATIC_CONST( opDropTableSize = sizeof(DropTableRecord) );
+ STATIC_CONST( opCreateIndexSize = sizeof(OpCreateIndex) );
+ STATIC_CONST( opDropIndexSize = sizeof(OpDropIndex) );
+ STATIC_CONST( opAlterIndexSize = sizeof(OpAlterIndex) );
+ STATIC_CONST( opBuildIndexSize = sizeof(OpBuildIndex) );
+ STATIC_CONST( opCreateEventSize = sizeof(OpCreateEvent) );
+ STATIC_CONST( opSubEventSize = sizeof(OpSubEvent) );
+ STATIC_CONST( opDropEventSize = sizeof(OpDropEvent) );
+ STATIC_CONST( opSignalUtilSize = sizeof(OpSignalUtil) );
+ STATIC_CONST( opCreateTriggerSize = sizeof(OpCreateTrigger) );
+ STATIC_CONST( opDropTriggerSize = sizeof(OpDropTrigger) );
+ STATIC_CONST( opAlterTriggerSize = sizeof(OpAlterTrigger) );
private:
#define PTR_ALIGN(n) ((((n)+sizeof(void*)-1)>>2)&~((sizeof(void*)-1)>>2))
union OpRecordUnion {
diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
index 0a8abe59aed..76aa745c3e0 100644
--- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
+++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
@@ -1438,7 +1438,7 @@ void Dbdih::execREAD_NODESCONF(Signal* signal)
continue;
}
char buf[255];
- snprintf(buf, sizeof(buf),
+ BaseString::snprintf(buf, sizeof(buf),
"Illegal configuration change."
" Initial start needs to be performed "
" when changing no of storage nodes (node %d)", i);
@@ -1638,7 +1638,7 @@ void Dbdih::execSTART_PERMREQ(Signal* signal)
}//if
if (getNodeStatus(nodeId) != NodeRecord::DEAD){
ndbout << "nodeStatus in START_PERMREQ = "
- << getNodeStatus(nodeId) << endl;
+ << (Uint32) getNodeStatus(nodeId) << endl;
ndbrequire(false);
}//if
@@ -3500,7 +3500,7 @@ void Dbdih::selectMasterCandidateAndSend(Signal* signal)
Uint32 count = node_groups[nodePtr.i];
if(count != 0 && count != cnoReplicas){
char buf[255];
- snprintf(buf, sizeof(buf),
+ BaseString::snprintf(buf, sizeof(buf),
"Illegal configuration change."
" Initial start needs to be performed "
" when changing no of replicas (%d != %d)",
@@ -4268,7 +4268,7 @@ void Dbdih::failedNodeLcpHandling(Signal* signal, NodeRecordPtr failedNodePtr)
failedNodePtr.p->activeStatus = Sysfile::NS_NotActive_NotTakenOver;
break;
default:
- ndbout << "activeStatus = " << failedNodePtr.p->activeStatus;
+ ndbout << "activeStatus = " << (Uint32) failedNodePtr.p->activeStatus;
ndbout << " at failure after NODE_FAILREP of node = ";
ndbout << failedNodePtr.i << endl;
ndbrequire(false);
@@ -4618,6 +4618,7 @@ void Dbdih::execMASTER_GCPREQ(Signal* signal)
/* BUT NOT YET COMPLETED. */
/*--------------------------------------------------*/
ndbrequire(false);
+ gcpState= MasterGCPConf::GCP_READY; // remove warning
break;
default:
/*------------------------------------------------*/
@@ -4627,6 +4628,7 @@ void Dbdih::execMASTER_GCPREQ(Signal* signal)
/* NODE WHICH WAS NOT A MASTER NODE. */
/*------------------------------------------------*/
ndbrequire(false);
+ gcpState= MasterGCPConf::GCP_READY; // remove warning
break;
}//switch
MasterGCPConf * const masterGCPConf = (MasterGCPConf *)&signal->theData[0];
@@ -5535,6 +5537,7 @@ Dbdih::sendMASTER_LCPCONF(Signal * signal){
* it not allowed
*/
ndbrequire(false);
+ lcpState= MasterLCPConf::LCP_STATUS_IDLE; // remove warning
break;
case LCP_COPY_GCI:
case LCP_INIT_TABLES:
@@ -5543,6 +5546,7 @@ Dbdih::sendMASTER_LCPCONF(Signal * signal){
* These two states are handled by if statements above
*/
ndbrequire(false);
+ lcpState= MasterLCPConf::LCP_STATUS_IDLE; // remove warning
break;
}//switch
ndbrequire(ok);
@@ -6166,15 +6170,15 @@ void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal){
switch(fragmentType){
case DictTabInfo::AllNodesSmallTable:
jam();
- noOfFragments = cnoOfNodeGroups;
+ noOfFragments = csystemnodes;
break;
case DictTabInfo::AllNodesMediumTable:
jam();
- noOfFragments = 2 * cnoOfNodeGroups;
+ noOfFragments = csystemnodes;
break;
case DictTabInfo::AllNodesLargeTable:
jam();
- noOfFragments = 8 * cnoOfNodeGroups;
+ noOfFragments = 4 * csystemnodes;
break;
case DictTabInfo::SingleFragment:
jam();
@@ -6192,13 +6196,14 @@ void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal){
}
if(err)
break;
-
+
NodeGroupRecordPtr NGPtr;
TabRecordPtr primTabPtr;
if (primaryTableId == RNIL) {
if(fragmentNode == 0){
jam();
- NGPtr.i = c_nextNodeGroup;
+ // needs to be fixed for single fragment tables
+ NGPtr.i = 0; //c_nextNodeGroup;
c_nextNodeGroup = (NGPtr.i + 1 == cnoOfNodeGroups ? 0 : NGPtr.i + 1);
} else if(! (fragmentNode < MAX_NDB_NODES)) {
jam();
@@ -6255,20 +6260,22 @@ void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal){
//@todo use section writer
Uint32 count = 2;
Uint32 fragments[2 + 8*MAX_REPLICAS*MAX_NDB_NODES];
+ Uint32 next_replica_node[MAX_NDB_NODES];
+ memset(next_replica_node,0,sizeof(next_replica_node));
if (primaryTableId == RNIL) {
jam();
for(Uint32 fragNo = 0; fragNo<noOfFragments; fragNo++){
jam();
ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
- Uint32 ind = NGPtr.p->nextReplicaNode;
+ Uint32 ind = next_replica_node[NGPtr.i];
const Uint32 max = NGPtr.p->nodeCount;
//-------------------------------------------------------------------
// We make an extra step to ensure that the primary replicas are
// spread among the nodes.
//-------------------------------------------------------------------
- NGPtr.p->nextReplicaNode = (ind + 1 >= max ? 0 : ind + 1);
+ next_replica_node[NGPtr.i] = (ind + 1 >= max ? 0 : ind + 1);
for(Uint32 replicaNo = 0; replicaNo<noOfReplicas; replicaNo++){
jam();
@@ -6425,6 +6432,10 @@ void Dbdih::execDIADDTABREQ(Signal* signal)
tabPtr.p->totalfragments = noFragments;
ndbrequire(noReplicas == cnoReplicas); // Only allowed
+ if (ERROR_INSERTED(7173)) {
+ addtabrefuseLab(signal, connectPtr, ZREPLERROR1);
+ return;
+ }
if ((noReplicas * noFragments) > cnoFreeReplicaRec) {
jam();
addtabrefuseLab(signal, connectPtr, ZREPLERROR1);
@@ -6736,13 +6747,15 @@ void Dbdih::tableDeleteLab(Signal* signal, FileRecordPtr filePtr)
void Dbdih::releaseTable(TabRecordPtr tabPtr)
{
FragmentstorePtr fragPtr;
- for (Uint32 fragId = 0; fragId < tabPtr.p->totalfragments; fragId++) {
- jam();
- getFragstore(tabPtr.p, fragId, fragPtr);
- releaseReplicas(fragPtr.p->storedReplicas);
- releaseReplicas(fragPtr.p->oldStoredReplicas);
- }//for
- releaseFragments(tabPtr);
+ if (tabPtr.p->noOfFragChunks > 0) {
+ for (Uint32 fragId = 0; fragId < tabPtr.p->totalfragments; fragId++) {
+ jam();
+ getFragstore(tabPtr.p, fragId, fragPtr);
+ releaseReplicas(fragPtr.p->storedReplicas);
+ releaseReplicas(fragPtr.p->oldStoredReplicas);
+ }//for
+ releaseFragments(tabPtr);
+ }
if (tabPtr.p->tabFile[0] != RNIL) {
jam();
releaseFile(tabPtr.p->tabFile[0]);
@@ -6875,9 +6888,6 @@ Uint32 Dbdih::extractNodeInfo(const Fragmentstore * fragPtr, Uint32 nodes[])
return nodeCount;
}//Dbdih::extractNodeInfo()
-#define NO_OF_FRAGS_PER_CHUNK 16
-#define LOG_NO_OF_FRAGS_PER_CHUNK 4
-
void
Dbdih::getFragstore(TabRecord * tab, //In parameter
Uint32 fragNo, //In parameter
@@ -7124,7 +7134,7 @@ void Dbdih::checkGcpStopLab(Signal* signal)
jam();
#ifdef VM_TRACE
ndbout << "System crash due to GCP Stop in state = ";
- ndbout << cgcpStatus << endl;
+ ndbout << (Uint32) cgcpStatus << endl;
#endif
crashSystemAtGcpStop(signal);
return;
@@ -7138,7 +7148,7 @@ void Dbdih::checkGcpStopLab(Signal* signal)
jam();
#ifdef VM_TRACE
ndbout << "System crash due to GCP Stop in state = ";
- ndbout << cgcpStatus << endl;
+ ndbout << (Uint32) cgcpStatus << endl;
#endif
crashSystemAtGcpStop(signal);
return;
@@ -8628,7 +8638,7 @@ void Dbdih::startFragment(Signal* signal, Uint32 tableId, Uint32 fragId)
/* POSSIBLE TO RESTORE THE SYSTEM. */
/* --------------------------------------------------------------------- */
char buf[100];
- snprintf(buf, sizeof(buf),
+ BaseString::snprintf(buf, sizeof(buf),
"Unable to find restorable replica for "
"table: %d fragment: %d gci: %d",
tableId, fragId, SYSFILE->newestRestorableGCI);
@@ -9071,7 +9081,7 @@ void Dbdih::checkTcCounterLab(Signal* signal)
{
CRASH_INSERTION(7009);
if (c_lcpState.lcpStatus != LCP_STATUS_IDLE) {
- ndbout << "lcpStatus = " << c_lcpState.lcpStatus;
+ ndbout << "lcpStatus = " << (Uint32) c_lcpState.lcpStatus;
ndbout << "lcpStatusUpdatedPlace = " <<
c_lcpState.lcpStatusUpdatedPlace << endl;
ndbrequire(false);
@@ -11051,6 +11061,7 @@ void Dbdih::initRestorableGciFiles()
void Dbdih::initTable(TabRecordPtr tabPtr)
{
+ tabPtr.p->noOfFragChunks = 0;
tabPtr.p->method = TabRecord::NOTDEFINED;
tabPtr.p->tabStatus = TabRecord::TS_IDLE;
tabPtr.p->noOfWords = 0;
@@ -12731,6 +12742,7 @@ void Dbdih::setNodeRestartInfoBits()
break;
default:
ndbrequire(false);
+ tsnrNodeActiveStatus = Sysfile::NS_NotDefined; // remove warning
break;
}//switch
Sysfile::setNodeStatus(nodePtr.i, SYSFILE->nodeStatus,
@@ -12935,7 +12947,7 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
snprintf(buf, sizeof(buf), " Table %d Fragment %d - ", tabPtr.i, j);
for(Uint32 k = 0; k < noOfReplicas; k++){
char tmp[100];
- snprintf(tmp, sizeof(tmp), "%d ", nodeOrder[k]);
+ BaseString::snprintf(tmp, sizeof(tmp), "%d ", nodeOrder[k]);
strcat(buf, tmp);
}
infoEvent(buf);
@@ -13151,12 +13163,12 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal)
replicaPtr.i = fragPtr.p->storedReplicas;
do {
ptrCheckGuard(replicaPtr, creplicaFileSize, replicaRecord);
- snprintf(buf2, sizeof(buf2), "%s %d(on %d)=%d(%s)",
+ BaseString::snprintf(buf2, sizeof(buf2), "%s %d(on %d)=%d(%s)",
buf, num,
replicaPtr.p->procNode,
replicaPtr.p->lcpIdStarted,
replicaPtr.p->lcpOngoingFlag ? "Ongoing" : "Idle");
- snprintf(buf, sizeof(buf), "%s", buf2);
+ BaseString::snprintf(buf, sizeof(buf), "%s", buf2);
num++;
replicaPtr.i = replicaPtr.p->nextReplica;
diff --git a/ndb/src/kernel/blocks/dbdih/Sysfile.hpp b/ndb/src/kernel/blocks/dbdih/Sysfile.hpp
index a44992d6ad0..3e2f3b0dd48 100644
--- a/ndb/src/kernel/blocks/dbdih/Sysfile.hpp
+++ b/ndb/src/kernel/blocks/dbdih/Sysfile.hpp
@@ -63,7 +63,7 @@ public:
/**
* No of 32 bits words in the sysfile
*/
- static const Uint32 SYSFILE_SIZE32 = _SYSFILE_SIZE32;
+ STATIC_CONST( SYSFILE_SIZE32 = _SYSFILE_SIZE32 );
Uint32 systemRestartBits;
@@ -106,7 +106,7 @@ public:
,NS_NotDefined = 8
,NS_Standby = 9
};
- static const Uint32 NODE_STATUS_SIZE = NODE_ARRAY_SIZE(MAX_NDB_NODES, 4);
+ STATIC_CONST( NODE_STATUS_SIZE = NODE_ARRAY_SIZE(MAX_NDB_NODES, 4) );
Uint32 nodeStatus[NODE_STATUS_SIZE];
static Uint32 getNodeStatus(NodeId, const Uint32 nodeStatus[]);
@@ -116,8 +116,8 @@ public:
* The node group of each node
* Sizeof(NodeGroup) = 8 Bit
*/
- static const Uint32 NODE_GROUPS_SIZE = NODE_ARRAY_SIZE(MAX_NDB_NODES,
- NODEID_BITS);
+ STATIC_CONST( NODE_GROUPS_SIZE = NODE_ARRAY_SIZE(MAX_NDB_NODES,
+ NODEID_BITS) );
Uint32 nodeGroups[NODE_GROUPS_SIZE];
static Uint16 getNodeGroup(NodeId, const Uint32 nodeGroups[]);
@@ -126,8 +126,8 @@ public:
/**
* Any node can take over for any node
*/
- static const Uint32 TAKE_OVER_SIZE = NODE_ARRAY_SIZE(MAX_NDB_NODES,
- NODEID_BITS);
+ STATIC_CONST( TAKE_OVER_SIZE = NODE_ARRAY_SIZE(MAX_NDB_NODES,
+ NODEID_BITS) );
Uint32 takeOver[TAKE_OVER_SIZE];
static NodeId getTakeOverNode(NodeId, const Uint32 takeOver[]);
diff --git a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
index 9fcb6faf3e3..d6987f3e478 100644
--- a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
+++ b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
@@ -64,25 +64,12 @@
/* CONSTANTS OF THE LOG PAGES */
/* ------------------------------------------------------------------------- */
#define ZPAGE_HEADER_SIZE 32
-#if defined NDB_OSE
-/**
- * Set the fragment log file size to 2Mb in OSE
- * This is done in order to speed up the initial start
- */
-#define ZNO_MBYTES_IN_FILE 2
-#define ZPAGE_SIZE 2048
-#define ZPAGES_IN_MBYTE 128
-#define ZTWOLOG_NO_PAGES_IN_MBYTE 7
-#define ZTWOLOG_PAGE_SIZE 11
-#define ZMAX_MM_BUFFER_SIZE 32 // Main memory window during log execution
-#else
#define ZNO_MBYTES_IN_FILE 16
#define ZPAGE_SIZE 8192
#define ZPAGES_IN_MBYTE 32
#define ZTWOLOG_NO_PAGES_IN_MBYTE 5
#define ZTWOLOG_PAGE_SIZE 13
#define ZMAX_MM_BUFFER_SIZE 32 // Main memory window during log execution
-#endif
#define ZMAX_PAGES_WRITTEN 8 // Max pages before writing to disk (=> config)
#define ZMIN_READ_BUFFER_SIZE 2 // Minimum number of pages to execute log
@@ -247,10 +234,6 @@
#define ZNODE_UP 0
#define ZNODE_DOWN 1
/* ------------------------------------------------------------------------- */
-/* OPERATION TYPES */
-/* ------------------------------------------------------------------------- */
-#define ZSIMPLE_READ 1
-/* ------------------------------------------------------------------------- */
/* START PHASES */
/* ------------------------------------------------------------------------- */
#define ZLAST_START_PHASE 255
@@ -468,7 +451,7 @@ public:
Uint16 totalAttrReceived;
Uint16 fragCopyCreation;
Uint16 noOfKeyAttr;
- Uint16 noOfNewAttr;
+ Uint32 noOfNewAttr; // noOfCharsets in upper half
Uint16 noOfAttributeGroups;
Uint16 lh3DistrBits;
Uint16 tableType;
@@ -532,10 +515,21 @@ public:
SCAN = 1,
COPY = 2
};
- UintR scanAccOpPtr[MAX_PARALLEL_OP_PER_SCAN];
- UintR scanApiOpPtr[MAX_PARALLEL_OP_PER_SCAN];
- UintR scanOpLength[MAX_PARALLEL_OP_PER_SCAN];
+
+ UintR scan_acc_op_ptr[32];
+ Uint32 scan_acc_index;
+ Uint32 scan_acc_attr_recs;
+ UintR scanApiOpPtr;
UintR scanLocalref[2];
+
+ Uint32 m_max_batch_size_rows;
+ Uint32 m_max_batch_size_bytes;
+
+ Uint32 m_curr_batch_size_rows;
+ Uint32 m_curr_batch_size_bytes;
+
+ bool check_scan_batch_completed() const;
+
UintR copyPtr;
union {
Uint32 nextPool;
@@ -553,8 +547,6 @@ public:
UintR scanAccPtr;
UintR scanAiLength;
- UintR scanCompletedOperations;
- UintR scanConcurrentOperations;
UintR scanErrorCounter;
UintR scanLocalFragid;
UintR scanSchemaVersion;
@@ -565,16 +557,18 @@ public:
ScanType scanType;
BlockReference scanApiBlockref;
NodeId scanNodeId;
+ Uint16 scanReleaseCounter;
+ Uint16 scanNumber;
+
Uint8 scanCompletedStatus;
Uint8 scanFlag;
Uint8 scanLockHold;
Uint8 scanLockMode;
Uint8 readCommitted;
Uint8 rangeScan;
- Uint8 scanNumber;
- Uint8 scanReleaseCounter;
Uint8 scanTcWaiting;
Uint8 scanKeyinfoFlag;
+ Uint8 m_last_row;
}; // Size 272 bytes
typedef Ptr<ScanRecord> ScanRecordPtr;
@@ -1829,11 +1823,7 @@ public:
* - There is no more information needed.
* The next mbyte will always refer to the start of the next mbyte.
*/
-#ifdef NDB_OSE
- UintR logPageWord[2048]; // Size 8 kbytes
-#else
UintR logPageWord[8192]; // Size 32 kbytes
-#endif
};
typedef Ptr<LogPageRecord> LogPageRecordPtr;
@@ -1855,8 +1845,8 @@ public:
PREP_DROP_TABLE_DONE = 4
};
- UintR fragrec[NO_OF_FRAG_PER_NODE];
- Uint16 fragid[NO_OF_FRAG_PER_NODE];
+ UintR fragrec[MAX_FRAG_PER_NODE];
+ Uint16 fragid[MAX_FRAG_PER_NODE];
/**
* Status of the table
*/
@@ -2097,7 +2087,8 @@ private:
void execSTART_EXEC_SR(Signal* signal);
void execEXEC_SRREQ(Signal* signal);
void execEXEC_SRCONF(Signal* signal);
-
+ void execREAD_PSUEDO_REQ(Signal* signal);
+
void execDUMP_STATE_ORD(Signal* signal);
void execACC_COM_BLOCK(Signal* signal);
void execACC_COM_UNBLOCK(Signal* signal);
@@ -2217,6 +2208,14 @@ private:
void execTUX_ADD_ATTRREF(Signal* signal);
// Statement blocks
+
+ void init_acc_ptr_list(ScanRecord*);
+ bool seize_acc_ptr_list(ScanRecord*, Uint32);
+ void release_acc_ptr_list(ScanRecord*);
+ Uint32 get_acc_ptr_from_scan_record(ScanRecord*, Uint32, bool);
+ void set_acc_ptr_in_scan_record(ScanRecord*, Uint32, Uint32);
+ void i_get_acc_ptr(ScanRecord*, Uint32*&, Uint32);
+
void removeTable(Uint32 tableId);
void sendLCP_COMPLETE_REP(Signal* signal, Uint32 lcpId);
void sendEMPTY_LCP_CONF(Signal* signal, bool idle);
@@ -2245,8 +2244,7 @@ private:
void sendAttrinfoLoop(Signal* signal);
void sendAttrinfoSignal(Signal* signal);
void sendLqhAttrinfoSignal(Signal* signal);
- void sendKeyinfoAcc(Signal* signal);
- void initScanAccOp(Signal* signal);
+ void sendKeyinfoAcc(Signal* signal, Uint32 pos);
Uint32 initScanrec(const class ScanFragReq *);
void initScanTc(Signal* signal,
Uint32 transid1,
@@ -2383,6 +2381,8 @@ private:
int saveTupattrbuf(Signal* signal, Uint32* dataPtr, Uint32 length);
void seizeAddfragrec(Signal* signal);
void seizeAttrinbuf(Signal* signal);
+ Uint32 seize_attrinbuf();
+ Uint32 release_attrinbuf(Uint32);
void seizeFragmentrec(Signal* signal);
void seizePageRef(Signal* signal);
void seizeTcrec();
@@ -2433,6 +2433,7 @@ private:
void abortStateHandlerLab(Signal* signal);
void writeAttrinfoLab(Signal* signal);
void scanAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length);
+ void abort_scan(Signal* signal, Uint32 scan_ptr_i, Uint32 errcode);
void localAbortStateHandlerLab(Signal* signal);
void logLqhkeyreqLab(Signal* signal);
void lqhAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length);
@@ -2586,13 +2587,14 @@ private:
UintR cfirstfreeAddfragrec;
UintR caddfragrecFileSize;
-#define ZATTRINBUF_FILE_SIZE 10000 // 1.25 MByte
+#define ZATTRINBUF_FILE_SIZE 12288 // 1.5 MByte
#define ZINBUF_DATA_LEN 24 /* POSITION OF 'DATA LENGHT'-VARIABLE. */
#define ZINBUF_NEXT 25 /* POSITION OF 'NEXT'-VARIABLE. */
Attrbuf *attrbuf;
AttrbufPtr attrinbufptr;
UintR cfirstfreeAttrinbuf;
UintR cattrinbufFileSize;
+ Uint32 c_no_attrinbuf_recs;
#define ZDATABUF_FILE_SIZE 10000 // 200 kByte
Databuf *databuf;
@@ -2643,7 +2645,6 @@ private:
UintR cfirstfreeLfo;
UintR clfoFileSize;
-#define ZLOG_PAGE_FILE_SIZE 256 // 8 MByte
LogPageRecord *logPageRecord;
LogPageRecordPtr logPagePtr;
UintR cfirstfreeLogPage;
@@ -2913,4 +2914,15 @@ public:
DLHashTable<ScanRecord> c_scanTakeOverHash;
};
+inline
+bool
+Dblqh::ScanRecord::check_scan_batch_completed() const
+{
+ Uint32 max_rows = m_max_batch_size_rows;
+ Uint32 max_bytes = m_max_batch_size_bytes;
+
+ return (max_rows > 0 && (m_curr_batch_size_rows >= max_rows)) ||
+ (max_bytes > 0 && (m_curr_batch_size_bytes >= max_bytes));
+}
+
#endif
diff --git a/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp b/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
index 4bb31185cfe..d0fef8753cb 100644
--- a/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
+++ b/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
@@ -27,13 +27,13 @@ void Dblqh::initData()
{
caddfragrecFileSize = ZADDFRAGREC_FILE_SIZE;
cattrinbufFileSize = ZATTRINBUF_FILE_SIZE;
+ c_no_attrinbuf_recs= ZATTRINBUF_FILE_SIZE;
cdatabufFileSize = ZDATABUF_FILE_SIZE;
cfragrecFileSize = 0;
cgcprecFileSize = ZGCPREC_FILE_SIZE;
chostFileSize = MAX_NDB_NODES;
clcpFileSize = ZNO_CONCURRENT_LCP;
clcpLocrecFileSize = ZLCP_LOCREC_FILE_SIZE;
- clogPageFileSize = ZLOG_PAGE_FILE_SIZE;
clfoFileSize = ZLFO_FILE_SIZE;
clogFileFileSize = 0;
clogPartFileSize = ZLOG_PART_FILE_SIZE;
@@ -176,8 +176,25 @@ Dblqh::Dblqh(const class Configuration & conf):
m_commitAckMarkerHash(m_commitAckMarkerPool),
c_scanTakeOverHash(c_scanRecordPool)
{
+ Uint32 log_page_size= 0;
BLOCK_CONSTRUCTOR(Dblqh);
+ const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ ndb_mgm_get_int_parameter(p, CFG_DB_REDO_BUFFER,
+ &log_page_size);
+
+ /**
+ * Always set page size in half MBytes
+ */
+ clogPageFileSize= (log_page_size / sizeof(LogPageRecord));
+ Uint32 mega_byte_part= clogPageFileSize & 15;
+ if (mega_byte_part != 0) {
+ jam();
+ clogPageFileSize+= (16 - mega_byte_part);
+ }
+
addRecSignal(GSN_PACKED_SIGNAL, &Dblqh::execPACKED_SIGNAL);
addRecSignal(GSN_DEBUG_SIG, &Dblqh::execDEBUG_SIG);
addRecSignal(GSN_ATTRINFO, &Dblqh::execATTRINFO);
@@ -323,6 +340,8 @@ Dblqh::Dblqh(const class Configuration & conf):
addRecSignal(GSN_TUX_ADD_ATTRCONF, &Dblqh::execTUX_ADD_ATTRCONF);
addRecSignal(GSN_TUX_ADD_ATTRREF, &Dblqh::execTUX_ADD_ATTRREF);
+ addRecSignal(GSN_READ_PSUEDO_REQ, &Dblqh::execREAD_PSUEDO_REQ);
+
initData();
#ifdef VM_TRACE
diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
index 6b4a78380be..cd15ad0c3b2 100644
--- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -890,7 +890,7 @@ void Dblqh::execREAD_CONFIG_REQ(Signal* signal)
&ctcConnectrecFileSize));
clogFileFileSize = 4 * cnoLogFiles;
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_LQH_SCAN, &cscanrecFileSize));
- cmaxAccOps = cscanrecFileSize * MAX_PARALLEL_SCANS_PER_FRAG;
+ cmaxAccOps = cscanrecFileSize * MAX_PARALLEL_OP_PER_SCAN;
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_DISCLESS, &c_diskless));
@@ -991,7 +991,7 @@ void Dblqh::execLQHFRAGREQ(Signal* signal)
ptrCheckGuard(tTablePtr, ctabrecFileSize, tablerec);
FragrecordPtr tFragPtr;
tFragPtr.i = RNIL;
- for (Uint32 i = 0; i < NO_OF_FRAG_PER_NODE; i++) {
+ for (Uint32 i = 0; i < MAX_FRAG_PER_NODE; i++) {
if (tTablePtr.p->fragid[i] == fragptr.p->fragId) {
jam();
tFragPtr.i = tTablePtr.p->fragrec[i];
@@ -1444,6 +1444,7 @@ Dblqh::sendAddAttrReq(Signal* signal)
tupreq->notused1 = 0;
tupreq->attrId = attrId;
tupreq->attrDescriptor = entry.attrDescriptor;
+ tupreq->extTypeInfo = entry.extTypeInfo;
sendSignal(fragptr.p->tupBlockref, GSN_TUP_ADD_ATTRREQ,
signal, TupAddAttrReq::SignalLength, JBB);
return;
@@ -1916,7 +1917,7 @@ void Dblqh::removeTable(Uint32 tableId)
tabptr.i = tableId;
ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
- for (Uint32 i = (NO_OF_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
+ for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
jam();
if (tabptr.p->fragid[i] != ZNIL) {
jam();
@@ -2100,18 +2101,15 @@ void Dblqh::execTIME_SIGNAL(Signal* signal)
ScanRecordPtr TscanPtr;
c_scanRecordPool.getPtr(TscanPtr, tTcConptr.p->tcScanRec);
ndbout << " scanState = " << TscanPtr.p->scanState << endl;
- //TscanPtr.p->scanAccOpPtr[16];
- //TscanPtr.p->scanApiOpPtr[16];
- //TscanPtr.p->scanOpLength[16];
//TscanPtr.p->scanLocalref[2];
ndbout << " copyPtr="<<TscanPtr.p->copyPtr
<< " scanAccPtr="<<TscanPtr.p->scanAccPtr
<< " scanAiLength="<<TscanPtr.p->scanAiLength
<< endl;
- ndbout << " scanCompletedOperations="<<
- TscanPtr.p->scanCompletedOperations
- << " scanConcurrentOperations="<<
- TscanPtr.p->scanConcurrentOperations
+ ndbout << " m_curr_batch_size_rows="<<
+ TscanPtr.p->m_curr_batch_size_rows
+ << " m_max_batch_size_rows="<<
+ TscanPtr.p->m_max_batch_size_rows
<< " scanErrorCounter="<<TscanPtr.p->scanErrorCounter
<< " scanLocalFragid="<<TscanPtr.p->scanLocalFragid
<< endl;
@@ -2511,6 +2509,21 @@ Dblqh::updatePackedList(Signal* signal, HostRecord * ahostptr, Uint16 hostId)
}//if
}//Dblqh::updatePackedList()
+void
+Dblqh::execREAD_PSUEDO_REQ(Signal* signal){
+ jamEntry();
+ TcConnectionrecPtr regTcPtr;
+ regTcPtr.i = signal->theData[0];
+ ptrCheckGuard(regTcPtr, ctcConnectrecFileSize, tcConnectionrec);
+
+ FragrecordPtr regFragptr;
+ regFragptr.i = regTcPtr.p->fragmentptr;
+ ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
+
+ signal->theData[0] = regFragptr.p->accFragptr[regTcPtr.p->localFragptr];
+ EXECUTE_DIRECT(DBACC, GSN_READ_PSUEDO_REQ, signal, 2);
+}
+
/* ************>> */
/* TUPKEYCONF > */
/* ************>> */
@@ -2790,8 +2803,10 @@ void Dblqh::execKEYINFO(Signal* signal)
return;
}//if
TcConnectionrec * const regTcPtr = tcConnectptr.p;
- if (regTcPtr->transactionState !=
- TcConnectionrec::WAIT_TUPKEYINFO) {
+ TcConnectionrec::TransactionState state = regTcPtr->transactionState;
+ if (state != TcConnectionrec::WAIT_TUPKEYINFO &&
+ state != TcConnectionrec::WAIT_SCAN_AI)
+ {
jam();
/*****************************************************************************/
/* TRANSACTION WAS ABORTED, THIS IS MOST LIKELY A SIGNAL BELONGING TO THE */
@@ -2810,14 +2825,20 @@ void Dblqh::execKEYINFO(Signal* signal)
}//if
jam();
terrorCode = errorCode;
- abortErrorLab(signal);
+ if(state == TcConnectionrec::WAIT_TUPKEYINFO)
+ abortErrorLab(signal);
+ else
+ abort_scan(signal, regTcPtr->tcScanRec, errorCode);
return;
}//if
- FragrecordPtr regFragptr;
- regFragptr.i = regTcPtr->fragmentptr;
- ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
- fragptr = regFragptr;
- endgettupkeyLab(signal);
+ if(state == TcConnectionrec::WAIT_TUPKEYINFO)
+ {
+ FragrecordPtr regFragptr;
+ regFragptr.i = regTcPtr->fragmentptr;
+ ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
+ fragptr = regFragptr;
+ endgettupkeyLab(signal);
+ }
return;
}//Dblqh::execKEYINFO()
@@ -2825,9 +2846,9 @@ void Dblqh::execKEYINFO(Signal* signal)
/* FILL IN KEY DATA INTO DATA BUFFERS. */
/* ------------------------------------------------------------------------- */
Uint32 Dblqh::handleLongTupKey(Signal* signal,
- Uint32 keyLength,
- Uint32 primKeyLength,
- Uint32* dataPtr)
+ Uint32 keyLength,
+ Uint32 primKeyLength,
+ Uint32* dataPtr)
{
TcConnectionrec * const regTcPtr = tcConnectptr.p;
Uint32 dataPos = 0;
@@ -3091,10 +3112,9 @@ void Dblqh::seizeAttrinbuf(Signal* signal)
Attrbuf *regAttrbuf = attrbuf;
Uint32 tattrinbufFileSize = cattrinbufFileSize;
- regAttrinbufptr.i = cfirstfreeAttrinbuf;
+ regAttrinbufptr.i = seize_attrinbuf();
tmpAttrinbufptr.i = tcConnectptr.p->lastAttrinbuf;
ptrCheckGuard(regAttrinbufptr, tattrinbufFileSize, regAttrbuf);
- Uint32 nextFirst = regAttrinbufptr.p->attrbuf[ZINBUF_NEXT];
tcConnectptr.p->lastAttrinbuf = regAttrinbufptr.i;
regAttrinbufptr.p->attrbuf[ZINBUF_DATA_LEN] = 0;
if (tmpAttrinbufptr.i == RNIL) {
@@ -3106,7 +3126,6 @@ void Dblqh::seizeAttrinbuf(Signal* signal)
tmpAttrinbufptr.p->attrbuf[ZINBUF_NEXT] = regAttrinbufptr.i;
}//if
regAttrinbufptr.p->attrbuf[ZINBUF_NEXT] = RNIL;
- cfirstfreeAttrinbuf = nextFirst;
attrinbufptr = regAttrinbufptr;
}//Dblqh::seizeAttrinbuf()
@@ -3282,12 +3301,15 @@ void Dblqh::execLQHKEYREQ(Signal* signal)
regTcPtr->dirtyOp = LqhKeyReq::getDirtyFlag(Treqinfo);
regTcPtr->opExec = LqhKeyReq::getInterpretedFlag(Treqinfo);
regTcPtr->opSimple = LqhKeyReq::getSimpleFlag(Treqinfo);
- regTcPtr->simpleRead = ((Treqinfo >> 18) & 15);
regTcPtr->operation = LqhKeyReq::getOperation(Treqinfo);
+ regTcPtr->simpleRead = regTcPtr->operation == ZREAD && regTcPtr->opSimple;
regTcPtr->seqNoReplica = LqhKeyReq::getSeqNoReplica(Treqinfo);
UintR TreclenAiLqhkey = LqhKeyReq::getAIInLqhKeyReq(Treqinfo);
regTcPtr->apiVersionNo = 0;
+ CRASH_INSERTION2(5041, regTcPtr->simpleRead &&
+ refToNode(signal->senderBlockRef()) != cownNodeid);
+
regTcPtr->reclenAiLqhkey = TreclenAiLqhkey;
regTcPtr->currReclenAi = TreclenAiLqhkey;
UintR TitcKeyLen = LqhKeyReq::getKeyLen(Treqinfo);
@@ -3414,7 +3436,7 @@ void Dblqh::execLQHKEYREQ(Signal* signal)
if ((tfragDistKey != TdistKey) &&
(regTcPtr->seqNoReplica == 0) &&
(regTcPtr->dirtyOp == ZFALSE) &&
- (regTcPtr->simpleRead != ZSIMPLE_READ)) {
+ (regTcPtr->simpleRead == ZFALSE)) {
/* ----------------------------------------------------------------------
* WE HAVE DIFFERENT OPINION THAN THE DIH THAT STARTED THE TRANSACTION.
* THE REASON COULD BE THAT THIS IS AN OLD DISTRIBUTION WHICH IS NO LONGER
@@ -3422,7 +3444,7 @@ void Dblqh::execLQHKEYREQ(Signal* signal)
* ONE IS ADDED TO THE DISTRIBUTION KEY EVERY TIME WE ADD A NEW REPLICA.
* FAILED REPLICAS DO NOT AFFECT THE DISTRIBUTION KEY. THIS MEANS THAT THE
* MAXIMUM DEVIATION CAN BE ONE BETWEEN THOSE TWO VALUES.
- * ---------------------------------------------------------------------- */
+ * --------------------------------------------------------------------- */
Int32 tmp = TdistKey - tfragDistKey;
tmp = (tmp < 0 ? - tmp : tmp);
if ((tmp <= 1) || (tfragDistKey == 0)) {
@@ -3596,7 +3618,9 @@ void Dblqh::prepareContinueAfterBlockedLab(Signal* signal)
takeOverErrorLab(signal);
return;
}//if
- Uint32 accOpPtr = scanptr.p->scanAccOpPtr[ttcScanOp];
+ Uint32 accOpPtr= get_acc_ptr_from_scan_record(scanptr.p,
+ ttcScanOp,
+ true);
if (accOpPtr == RNIL) {
jam();
releaseActiveFrag(signal);
@@ -3673,7 +3697,7 @@ void Dblqh::prepareContinueAfterBlockedLab(Signal* signal)
signal->theData[9] = sig3;
signal->theData[10] = sig4;
if (regTcPtr->primKeyLen > 4) {
- sendKeyinfoAcc(signal);
+ sendKeyinfoAcc(signal, 11);
}//if
EXECUTE_DIRECT(refToBlock(regTcPtr->tcAccBlockref), GSN_ACCKEYREQ,
signal, 7 + regTcPtr->primKeyLen);
@@ -3695,9 +3719,8 @@ void Dblqh::prepareContinueAfterBlockedLab(Signal* signal)
/* ======= SEND KEYINFO TO ACC ======= */
/* */
/* ========================================================================== */
-void Dblqh::sendKeyinfoAcc(Signal* signal)
+void Dblqh::sendKeyinfoAcc(Signal* signal, Uint32 Ti)
{
- UintR Ti = 11;
DatabufPtr regDatabufptr;
regDatabufptr.i = tcConnectptr.p->firstTupkeybuf;
@@ -3861,7 +3884,7 @@ void Dblqh::tupkeyConfLab(Signal* signal)
/* ---- GET OPERATION TYPE AND CHECK WHAT KIND OF OPERATION IS REQUESTED ---- */
const TupKeyConf * const tupKeyConf = (TupKeyConf *)&signal->theData[0];
TcConnectionrec * const regTcPtr = tcConnectptr.p;
- if (regTcPtr->simpleRead == ZSIMPLE_READ) {
+ if (regTcPtr->simpleRead) {
jam();
/* ----------------------------------------------------------------------
* THE OPERATION IS A SIMPLE READ. WE WILL IMMEDIATELY COMMIT THE OPERATION.
@@ -4711,11 +4734,7 @@ void Dblqh::releaseOprec(Signal* signal)
* ####################################################################### */
while (regAttrinbufptr.i != RNIL) {
jam();
- ptrCheckGuard(regAttrinbufptr, cattrinbufFileSize, attrbuf);
- Tmpbuf = regAttrinbufptr.p->attrbuf[ZINBUF_NEXT];
- regAttrinbufptr.p->attrbuf[ZINBUF_NEXT] = cfirstfreeAttrinbuf;
- cfirstfreeAttrinbuf = regAttrinbufptr.i;
- regAttrinbufptr.i = Tmpbuf;
+ regAttrinbufptr.i= release_attrinbuf(regAttrinbufptr.i);
}//while
regTcPtr->firstAttrinbuf = RNIL;
regTcPtr->lastAttrinbuf = RNIL;
@@ -5453,6 +5472,8 @@ void Dblqh::commitContinueAfterBlockedLab(Signal* signal)
TcConnectionrec * const regTcPtr = tcConnectptr.p;
Fragrecord * const regFragptr = fragptr.p;
Uint32 operation = regTcPtr->operation;
+ Uint32 simpleRead = regTcPtr->simpleRead;
+ Uint32 dirtyOp = regTcPtr->dirtyOp;
if (regTcPtr->activeCreat == ZFALSE) {
if ((cCommitBlocked == true) &&
(regFragptr->fragActiveStatus == ZTRUE)) {
@@ -5490,13 +5511,18 @@ void Dblqh::commitContinueAfterBlockedLab(Signal* signal)
tupCommitReq->hashValue = regTcPtr->hashValue;
EXECUTE_DIRECT(tup, GSN_TUP_COMMITREQ, signal,
TupCommitReq::SignalLength);
- }//if
- Uint32 acc = refToBlock(regTcPtr->tcAccBlockref);
- signal->theData[0] = regTcPtr->accConnectrec;
- EXECUTE_DIRECT(acc, GSN_ACC_COMMITREQ, signal, 1);
- Uint32 simpleRead = regTcPtr->simpleRead;
+ Uint32 acc = refToBlock(regTcPtr->tcAccBlockref);
+ signal->theData[0] = regTcPtr->accConnectrec;
+ EXECUTE_DIRECT(acc, GSN_ACC_COMMITREQ, signal, 1);
+ } else {
+ if(!dirtyOp){
+ Uint32 acc = refToBlock(regTcPtr->tcAccBlockref);
+ signal->theData[0] = regTcPtr->accConnectrec;
+ EXECUTE_DIRECT(acc, GSN_ACC_COMMITREQ, signal, 1);
+ }
+ }
jamEntry();
- if (simpleRead == ZSIMPLE_READ) {
+ if (simpleRead) {
jam();
/* ------------------------------------------------------------------------- */
/*THE OPERATION WAS A SIMPLE READ THUS THE COMMIT PHASE IS ONLY NEEDED TO */
@@ -5509,7 +5535,6 @@ void Dblqh::commitContinueAfterBlockedLab(Signal* signal)
return;
}//if
}//if
- Uint32 dirtyOp = regTcPtr->dirtyOp;
Uint32 seqNoReplica = regTcPtr->seqNoReplica;
if (regTcPtr->gci > regFragptr->newestGci) {
jam();
@@ -6078,7 +6103,7 @@ void Dblqh::abortStateHandlerLab(Signal* signal)
/* ------------------------------------------------------------------------- */
return;
}//if
- if (regTcPtr->simpleRead == ZSIMPLE_READ) {
+ if (regTcPtr->simpleRead) {
jam();
/* ------------------------------------------------------------------------- */
/*A SIMPLE READ IS CURRENTLY RELEASING THE LOCKS OR WAITING FOR ACCESS TO */
@@ -6364,7 +6389,7 @@ void Dblqh::continueAbortLab(Signal* signal)
void Dblqh::continueAfterLogAbortWriteLab(Signal* signal)
{
TcConnectionrec * const regTcPtr = tcConnectptr.p;
- if (regTcPtr->simpleRead == ZSIMPLE_READ) {
+ if (regTcPtr->simpleRead) {
jam();
TcKeyRef * const tcKeyRef = (TcKeyRef *) signal->getDataPtrSend();
@@ -6982,6 +7007,15 @@ void Dblqh::execSCAN_NEXTREQ(Signal* signal)
fragptr.i = tcConnectptr.p->fragmentptr;
ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
+ /**
+ * Change parameters while running
+ * (is currently not supported)
+ */
+ const Uint32 max_rows = nextReq->batch_size_rows;
+ const Uint32 max_bytes = nextReq->batch_size_bytes;
+ ndbrequire(scanptr.p->m_max_batch_size_rows == max_rows);
+ ndbrequire(scanptr.p->m_max_batch_size_bytes == max_bytes);
+
/* --------------------------------------------------------------------
* If scanLockHold = TRUE we need to unlock previous round of
* scanned records.
@@ -6991,7 +7025,7 @@ void Dblqh::execSCAN_NEXTREQ(Signal* signal)
* acquiring new locks.
* -------------------------------------------------------------------- */
if ((scanptr.p->scanLockHold == ZTRUE) &&
- (scanptr.p->scanCompletedOperations > 0)) {
+ (scanptr.p->m_curr_batch_size_rows > 0)) {
jam();
scanptr.p->scanReleaseCounter = 1;
scanReleaseLocksLab(signal);
@@ -7014,11 +7048,20 @@ void Dblqh::continueScanNextReqLab(Signal* signal)
return;
}//if
+ if(scanptr.p->m_last_row){
+ jam();
+ scanptr.p->scanCompletedStatus = ZTRUE;
+ scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
+ sendScanFragConf(signal, ZFALSE);
+ return;
+ }
+
// Update timer on tcConnectRecord
tcConnectptr.p->tcTimer = cLqhTimeOutCount;
- initScanAccOp(signal);
- scanptr.p->scanCompletedOperations = 0;
+ init_acc_ptr_list(scanptr.p);
+ scanptr.p->m_curr_batch_size_rows = 0;
+ scanptr.p->m_curr_batch_size_bytes= 0;
scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT;
scanNextLoopLab(signal);
}//Dblqh::continueScanNextReqLab()
@@ -7061,9 +7104,10 @@ void Dblqh::continueScanReleaseAfterBlockedLab(Signal* signal)
c_scanRecordPool.getPtr(scanptr);
scanptr.p->scanState = ScanRecord::WAIT_RELEASE_LOCK;
signal->theData[0] = scanptr.p->scanAccPtr;
- ndbrequire((scanptr.p->scanReleaseCounter -1) < MAX_PARALLEL_OP_PER_SCAN);
- signal->theData[1] =
- scanptr.p->scanAccOpPtr[scanptr.p->scanReleaseCounter -1];
+ signal->theData[1]=
+ get_acc_ptr_from_scan_record(scanptr.p,
+ scanptr.p->scanReleaseCounter -1,
+ false);
signal->theData[2] = NextScanReq::ZSCAN_COMMIT;
if (! scanptr.p->rangeScan)
sendSignal(tcConnectptr.p->tcAccBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
@@ -7141,7 +7185,7 @@ void Dblqh::closeScanRequestLab(Signal* signal)
ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
if (scanptr.p->scanLockHold == ZTRUE) {
- if (scanptr.p->scanCompletedOperations > 0) {
+ if (scanptr.p->m_curr_batch_size_rows > 0) {
jam();
scanptr.p->scanReleaseCounter = 1;
scanReleaseLocksLab(signal);
@@ -7172,7 +7216,8 @@ void Dblqh::closeScanRequestLab(Signal* signal)
return;
}//if
tcConnectptr.p->abortState = TcConnectionrec::ABORT_ACTIVE;
- scanptr.p->scanCompletedOperations = 0;
+ scanptr.p->m_curr_batch_size_rows = 0;
+ scanptr.p->m_curr_batch_size_bytes= 0;
sendScanFragConf(signal, ZTRUE);
break;
case TcConnectionrec::SCAN_TUPKEY:
@@ -7215,13 +7260,12 @@ void Dblqh::scanLockReleasedLab(Signal* signal)
tcConnectptr.i = scanptr.p->scanTcrec;
ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
releaseActiveFrag(signal);
- if (scanptr.p->scanReleaseCounter == scanptr.p->scanCompletedOperations) {
+ if (scanptr.p->scanReleaseCounter == scanptr.p->m_curr_batch_size_rows) {
if ((scanptr.p->scanErrorCounter > 0) ||
(scanptr.p->scanCompletedStatus == ZTRUE)) {
jam();
closeScanLab(signal);
- } else if ((scanptr.p->scanConcurrentOperations ==
- scanptr.p->scanCompletedOperations) &&
+ } else if (scanptr.p->check_scan_batch_completed() &&
scanptr.p->scanLockHold != ZTRUE) {
jam();
scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
@@ -7234,7 +7278,7 @@ void Dblqh::scanLockReleasedLab(Signal* signal)
*/
continueScanNextReqLab(signal);
}//if
- } else if (scanptr.p->scanReleaseCounter < scanptr.p->scanCompletedOperations) {
+ } else if (scanptr.p->scanReleaseCounter < scanptr.p->m_curr_batch_size_rows) {
jam();
scanptr.p->scanReleaseCounter++;
scanReleaseLocksLab(signal);
@@ -7242,7 +7286,7 @@ void Dblqh::scanLockReleasedLab(Signal* signal)
jam();
/*
We come here when we have been scanning for a long time and not been able
- to find scanConcurrentOperations records to return. We needed to release
+ to find m_max_batch_size_rows records to return. We needed to release
the record we didn't want, but now we are returning all found records to
the API.
*/
@@ -7251,12 +7295,126 @@ void Dblqh::scanLockReleasedLab(Signal* signal)
}//if
}//Dblqh::scanLockReleasedLab()
+bool
+Dblqh::seize_acc_ptr_list(ScanRecord* scanP, Uint32 batch_size)
+{
+ Uint32 i;
+ Uint32 attr_buf_recs= (batch_size + 30) / 32;
+
+ if (batch_size > 1) {
+ if (c_no_attrinbuf_recs < attr_buf_recs) {
+ jam();
+ return false;
+ }
+ for (i= 1; i <= attr_buf_recs; i++) {
+ scanP->scan_acc_op_ptr[i]= seize_attrinbuf();
+ }
+ }
+ scanP->scan_acc_attr_recs= attr_buf_recs;
+ scanP->scan_acc_index = 0;
+ return true;
+}
+
+void
+Dblqh::release_acc_ptr_list(ScanRecord* scanP)
+{
+ Uint32 i, attr_buf_recs;
+ attr_buf_recs= scanP->scan_acc_attr_recs;
+
+ for (i= 1; i <= attr_buf_recs; i++) {
+ release_attrinbuf(scanP->scan_acc_op_ptr[i]);
+ }
+ scanP->scan_acc_attr_recs= 0;
+ scanP->scan_acc_index = 0;
+}
+
+Uint32
+Dblqh::seize_attrinbuf()
+{
+ AttrbufPtr regAttrPtr;
+ Uint32 ret_attr_buf;
+ ndbrequire(c_no_attrinbuf_recs > 0);
+ c_no_attrinbuf_recs--;
+ ret_attr_buf= cfirstfreeAttrinbuf;
+ regAttrPtr.i= ret_attr_buf;
+ ptrCheckGuard(regAttrPtr, cattrinbufFileSize, attrbuf);
+ cfirstfreeAttrinbuf= regAttrPtr.p->attrbuf[ZINBUF_NEXT];
+ return ret_attr_buf;
+}
+
+Uint32
+Dblqh::release_attrinbuf(Uint32 attr_buf_i)
+{
+ Uint32 next_buf;
+ AttrbufPtr regAttrPtr;
+ c_no_attrinbuf_recs++;
+ regAttrPtr.i= attr_buf_i;
+ ptrCheckGuard(regAttrPtr, cattrinbufFileSize, attrbuf);
+ next_buf= regAttrPtr.p->attrbuf[ZINBUF_NEXT];
+ regAttrPtr.p->attrbuf[ZINBUF_NEXT]= cfirstfreeAttrinbuf;
+ cfirstfreeAttrinbuf= regAttrPtr.i;
+ return next_buf;
+}
+
+void
+Dblqh::init_acc_ptr_list(ScanRecord* scanP)
+{
+ scanP->scan_acc_index = 0;
+}
+
+inline
+void
+Dblqh::i_get_acc_ptr(ScanRecord* scanP, Uint32* &acc_ptr, Uint32 index)
+{
+ if (index == 0) {
+ acc_ptr= (Uint32*)&scanP->scan_acc_op_ptr[0];
+ } else {
+ Uint32 attr_buf_index, attr_buf_rec;
+
+ AttrbufPtr regAttrPtr;
+ jam();
+ attr_buf_rec= (index + 31) / 32;
+ attr_buf_index= (index - 1) & 31;
+ regAttrPtr.i= scanP->scan_acc_op_ptr[attr_buf_rec];
+ ptrCheckGuard(regAttrPtr, cattrinbufFileSize, attrbuf);
+ acc_ptr= (Uint32*)&regAttrPtr.p->attrbuf[attr_buf_index];
+ }
+}
+
+Uint32
+Dblqh::get_acc_ptr_from_scan_record(ScanRecord* scanP,
+ Uint32 index,
+ bool crash_flag)
+{
+ Uint32* acc_ptr;
+ Uint32 attr_buf_rec, attr_buf_index;
+ if (!((index < MAX_PARALLEL_OP_PER_SCAN) &&
+ index < scanP->scan_acc_index)) {
+ ndbrequire(crash_flag);
+ return RNIL;
+ }
+ i_get_acc_ptr(scanP, acc_ptr, index);
+ return *acc_ptr;
+}
+
+void
+Dblqh::set_acc_ptr_in_scan_record(ScanRecord* scanP,
+ Uint32 index, Uint32 acc)
+{
+ Uint32 *acc_ptr;
+ ndbrequire((index == 0 || scanP->scan_acc_index == index) &&
+ (index < MAX_PARALLEL_OP_PER_SCAN));
+ scanP->scan_acc_index= index + 1;
+ i_get_acc_ptr(scanP, acc_ptr, index);
+ *acc_ptr= acc;
+}
+
/* -------------------------------------------------------------------------
* SCAN_FRAGREQ: Request to start scanning the specified fragment of a table.
* ------------------------------------------------------------------------- */
void Dblqh::execSCAN_FRAGREQ(Signal* signal)
{
- const ScanFragReq * const scanFragReq = (ScanFragReq *)&signal->theData[0];
+ ScanFragReq * const scanFragReq = (ScanFragReq *)&signal->theData[0];
ScanFragRef * ref;
const Uint32 transid1 = scanFragReq->transId1;
const Uint32 transid2 = scanFragReq->transId2;
@@ -7267,9 +7425,10 @@ void Dblqh::execSCAN_FRAGREQ(Signal* signal)
jamEntry();
const Uint32 reqinfo = scanFragReq->requestInfo;
- const Uint32 fragId = scanFragReq->fragmentNo;
+ const Uint32 fragId = (scanFragReq->fragmentNoKeyLen & 0xFFFF);
+ const Uint32 keyLen = (scanFragReq->fragmentNoKeyLen >> 16);
tabptr.i = scanFragReq->tableId;
- const Uint32 scanConcurrentOperations = ScanFragReq::getConcurrency(reqinfo);
+ const Uint32 max_rows = scanFragReq->batch_size_rows;
const Uint32 scanLockMode = ScanFragReq::getLockMode(reqinfo);
const Uint8 keyinfo = ScanFragReq::getKeyinfoFlag(reqinfo);
const Uint8 rangeScan = ScanFragReq::getRangeScanFlag(reqinfo);
@@ -7287,9 +7446,9 @@ void Dblqh::execSCAN_FRAGREQ(Signal* signal)
tcConnectptr.p->savePointId = scanFragReq->savePointId;
} else {
jam();
- /* ---------------------------------------------------------------------
- * NO FREE TC RECORD AVAILABLE, THUS WE CANNOT HANDLE THE REQUEST.
- * --------------------------------------------------------------------- */
+ /* --------------------------------------------------------------------
+ * NO FREE TC RECORD AVAILABLE, THUS WE CANNOT HANDLE THE REQUEST.
+ * -------------------------------------------------------------------- */
errorCode = ZNO_TC_CONNECT_ERROR;
senderData = scanFragReq->senderData;
goto error_handler_early;
@@ -7299,8 +7458,7 @@ void Dblqh::execSCAN_FRAGREQ(Signal* signal)
*/
ndbrequire(scanLockMode == 0 || keyinfo);
- ndbrequire(scanConcurrentOperations <= MAX_PARALLEL_OP_PER_SCAN);
- ndbrequire(scanConcurrentOperations != 0);
+ ndbrequire(max_rows > 0 && max_rows <= MAX_PARALLEL_OP_PER_SCAN);
if (!getFragmentrec(signal, fragId)) {
errorCode = __LINE__;
goto error_handler;
@@ -7320,7 +7478,7 @@ void Dblqh::execSCAN_FRAGREQ(Signal* signal)
}
// XXX adjust cmaxAccOps for range scans and remove this comment
- if ((cbookedAccOps + scanConcurrentOperations) > cmaxAccOps) {
+ if ((cbookedAccOps + max_rows) > cmaxAccOps) {
jam();
errorCode = ScanFragRef::ZSCAN_BOOK_ACC_OP_ERROR;
goto error_handler;
@@ -7332,13 +7490,15 @@ void Dblqh::execSCAN_FRAGREQ(Signal* signal)
transid2,
fragId,
ZNIL);
+ tcConnectptr.p->save1 = 4;
+ tcConnectptr.p->primKeyLen = keyLen + 4; // hard coded in execKEYINFO
errorCode = initScanrec(scanFragReq);
if (errorCode != ZOK) {
jam();
goto error_handler2;
}//if
cscanNoFreeRec--;
- cbookedAccOps += scanConcurrentOperations;
+ cbookedAccOps += max_rows;
hashIndex = (tcConnectptr.p->transid[0] ^ tcConnectptr.p->tcOprec) & 1023;
nextHashptr.i = ctransidHash[hashIndex];
@@ -7432,9 +7592,9 @@ void Dblqh::continueAfterReceivingAllAiLab(Signal* signal)
void Dblqh::scanAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length)
{
+ scanptr.i = tcConnectptr.p->tcScanRec;
+ c_scanRecordPool.getPtr(scanptr);
if (saveTupattrbuf(signal, dataPtr, length) == ZOK) {
- scanptr.i = tcConnectptr.p->tcScanRec;
- c_scanRecordPool.getPtr(scanptr);
if (tcConnectptr.p->currTupAiLen < scanptr.p->scanAiLength) {
jam();
} else {
@@ -7444,23 +7604,29 @@ void Dblqh::scanAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length)
}//if
return;
}//if
- terrorCode = ZGET_ATTRINBUF_ERROR;
+ abort_scan(signal, scanptr.i, ZGET_ATTRINBUF_ERROR);
+}
+
+void Dblqh::abort_scan(Signal* signal, Uint32 scan_ptr_i, Uint32 errcode){
+ jam();
+ scanptr.i = scan_ptr_i;
+ c_scanRecordPool.getPtr(scanptr);
finishScanrec(signal);
releaseScanrec(signal);
tcConnectptr.p->transactionState = TcConnectionrec::IDLE;
tcConnectptr.p->abortState = TcConnectionrec::ABORT_ACTIVE;
-
+
ScanFragRef * ref = (ScanFragRef*)&signal->theData[0];
ref->senderData = tcConnectptr.p->clientConnectrec;
ref->transId1 = tcConnectptr.p->transid[0];
ref->transId2 = tcConnectptr.p->transid[1];
- ref->errorCode = terrorCode;
+ ref->errorCode = errcode;
sendSignal(tcConnectptr.p->clientBlockref, GSN_SCAN_FRAGREF, signal,
ScanFragRef::SignalLength, JBB);
deleteTransidHash(signal);
releaseOprec(signal);
releaseTcrec(signal, tcConnectptr);
-}//Dblqh::scanAttrinfoLab()
+}
/*---------------------------------------------------------------------*/
/* Send this 'I am alive' signal to TC when it is received from ACC */
@@ -7531,34 +7697,18 @@ void Dblqh::accScanConfScanLab(Signal* signal)
return;
}//if
scanptr.p->scanAccPtr = accScanConf->accPtr;
- AttrbufPtr regAttrinbufptr;
- regAttrinbufptr.i = tcConnectptr.p->firstAttrinbuf;
- Uint32 boundAiLength = 0;
+ Uint32 boundAiLength = tcConnectptr.p->primKeyLen - 4;
if (scanptr.p->rangeScan) {
jam();
- // bound info length is in first of the 5 header words
- ptrCheckGuard(regAttrinbufptr, cattrinbufFileSize, attrbuf);
- boundAiLength = regAttrinbufptr.p->attrbuf[0];
TuxBoundInfo* const req = (TuxBoundInfo*)signal->getDataPtrSend();
req->errorCode = RNIL;
req->tuxScanPtrI = scanptr.p->scanAccPtr;
req->boundAiLength = boundAiLength;
- Uint32* out = (Uint32*)req + TuxBoundInfo::SignalLength;
- Uint32 sz = 0;
- while (sz < boundAiLength) {
- jam();
- ptrCheckGuard(regAttrinbufptr, cattrinbufFileSize, attrbuf);
- Uint32 dataLen = regAttrinbufptr.p->attrbuf[ZINBUF_DATA_LEN];
- MEMCOPY_NO_WORDS(&out[sz],
- &regAttrinbufptr.p->attrbuf[0],
- dataLen);
- sz += dataLen;
- regAttrinbufptr.i = regAttrinbufptr.p->attrbuf[ZINBUF_NEXT];
- ptrCheckGuard(regAttrinbufptr, cattrinbufFileSize, attrbuf);
- }
- ndbrequire(sz == boundAiLength);
+ if(boundAiLength > 0)
+ sendKeyinfoAcc(signal, TuxBoundInfo::SignalLength);
EXECUTE_DIRECT(DBTUX, GSN_TUX_BOUND_INFO,
- signal, TuxBoundInfo::SignalLength + boundAiLength);
+ signal, TuxBoundInfo::SignalLength + boundAiLength);
+ jamEntry();
if (req->errorCode != 0) {
jam();
/*
@@ -7574,12 +7724,14 @@ void Dblqh::accScanConfScanLab(Signal* signal)
signal->theData[1] = tcConnectptr.p->tableref;
signal->theData[2] = scanptr.p->scanSchemaVersion;
signal->theData[3] = ZSTORED_PROC_SCAN;
- ndbrequire(boundAiLength <= scanptr.p->scanAiLength);
- signal->theData[4] = scanptr.p->scanAiLength - boundAiLength;
+
+ signal->theData[4] = scanptr.p->scanAiLength;
sendSignal(tcConnectptr.p->tcTupBlockref,
GSN_STORED_PROCREQ, signal, 5, JBB);
signal->theData[0] = tcConnectptr.p->tupConnectrec;
+ AttrbufPtr regAttrinbufptr;
+ regAttrinbufptr.i = tcConnectptr.p->firstAttrinbuf;
while (regAttrinbufptr.i != RNIL) {
ptrCheckGuard(regAttrinbufptr, cattrinbufFileSize, attrbuf);
jam();
@@ -7646,7 +7798,7 @@ void Dblqh::continueFirstScanAfterBlockedLab(Signal* signal)
scanptr.i = tcConnectptr.p->tcScanRec;
c_scanRecordPool.getPtr(scanptr);
scanptr.p->scanState = ScanRecord::WAIT_NEXT_SCAN;
- initScanAccOp(signal);
+ init_acc_ptr_list(scanptr.p);
signal->theData[0] = scanptr.p->scanAccPtr;
signal->theData[1] = RNIL;
signal->theData[2] = NextScanReq::ZSCAN_NEXT;
@@ -7754,7 +7906,7 @@ void Dblqh::nextScanConfScanLab(Signal* signal)
************************************************************ */
if (scanptr.p->scanCompletedStatus == ZTRUE) {
if ((scanptr.p->scanLockHold == ZTRUE) &&
- (scanptr.p->scanCompletedOperations > 0)) {
+ (scanptr.p->m_curr_batch_size_rows > 0)) {
jam();
scanptr.p->scanReleaseCounter = 1;
scanReleaseLocksLab(signal);
@@ -7765,7 +7917,7 @@ void Dblqh::nextScanConfScanLab(Signal* signal)
return;
}//if
- if (scanptr.p->scanCompletedOperations > 0) {
+ if (scanptr.p->m_curr_batch_size_rows > 0) {
jam();
scanptr.p->scanCompletedStatus = ZTRUE;
scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
@@ -7785,7 +7937,7 @@ void Dblqh::nextScanConfScanLab(Signal* signal)
if (scanptr.p->scanCompletedStatus == ZTRUE) {
releaseActiveFrag(signal);
if ((scanptr.p->scanLockHold == ZTRUE) &&
- (scanptr.p->scanCompletedOperations > 0)) {
+ (scanptr.p->m_curr_batch_size_rows > 0)) {
jam();
scanptr.p->scanReleaseCounter = 1;
scanReleaseLocksLab(signal);
@@ -7796,7 +7948,7 @@ void Dblqh::nextScanConfScanLab(Signal* signal)
return;
}//if
- if (scanptr.p->scanCompletedOperations > 0) {
+ if (scanptr.p->m_curr_batch_size_rows > 0) {
jam();
releaseActiveFrag(signal);
scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
@@ -7814,10 +7966,11 @@ void Dblqh::nextScanConfScanLab(Signal* signal)
GSN_ACC_CHECK_SCAN, signal, 2, JBB);
return;
}//if
-
- ndbrequire(scanptr.p->scanCompletedOperations < MAX_PARALLEL_OP_PER_SCAN);
- scanptr.p->scanAccOpPtr[scanptr.p->scanCompletedOperations] =
- nextScanConf->accOperationPtr;
+ jam();
+ set_acc_ptr_in_scan_record(scanptr.p,
+ scanptr.p->m_curr_batch_size_rows,
+ nextScanConf->accOperationPtr);
+ jam();
scanptr.p->scanLocalref[0] = nextScanConf->localKey[0];
scanptr.p->scanLocalref[1] = nextScanConf->localKey[1];
scanptr.p->scanLocalFragid = nextScanConf->fragId;
@@ -7836,6 +7989,7 @@ void Dblqh::nextScanConfScanLab(Signal* signal)
return;
}//if
}//if
+ jam();
nextScanConfLoopLab(signal);
}//Dblqh::nextScanConfScanLab()
@@ -7849,7 +8003,7 @@ void Dblqh::nextScanConfLoopLab(Signal* signal)
releaseActiveFrag(signal);
releaseOprec(signal);
if ((scanptr.p->scanLockHold == ZTRUE) &&
- (scanptr.p->scanCompletedOperations > 0)) {
+ (scanptr.p->m_curr_batch_size_rows > 0)) {
jam();
scanptr.p->scanReleaseCounter = 1;
scanReleaseLocksLab(signal);
@@ -7858,7 +8012,7 @@ void Dblqh::nextScanConfLoopLab(Signal* signal)
closeScanLab(signal);
return;
}//if
-
+ jam();
Uint32 tableRef;
Uint32 tupFragPtr;
Uint32 reqinfo = (scanptr.p->scanLockHold == ZFALSE);
@@ -7892,6 +8046,7 @@ void Dblqh::nextScanConfLoopLab(Signal* signal)
}//if
}
{
+ jam();
TupKeyReq * const tupKeyReq = (TupKeyReq *)signal->getDataPtrSend();
tupKeyReq->connectPtr = tcConnectptr.p->tupConnectrec;
@@ -7901,9 +8056,7 @@ void Dblqh::nextScanConfLoopLab(Signal* signal)
tupKeyReq->keyRef1 = scanptr.p->scanLocalref[0];
tupKeyReq->keyRef2 = scanptr.p->scanLocalref[1];
tupKeyReq->attrBufLen = 0;
- ndbrequire(scanptr.p->scanCompletedOperations < MAX_PARALLEL_OP_PER_SCAN);
- tupKeyReq->opRef =
- scanptr.p->scanApiOpPtr[scanptr.p->scanCompletedOperations];
+ tupKeyReq->opRef = scanptr.p->scanApiOpPtr;
tupKeyReq->applRef = scanptr.p->scanApiBlockref;
tupKeyReq->schemaVersion = scanptr.p->scanSchemaVersion;
tupKeyReq->storedProcedure = scanptr.p->scanStoredProcId;
@@ -7918,7 +8071,7 @@ void Dblqh::nextScanConfLoopLab(Signal* signal)
EXECUTE_DIRECT(blockNo, GSN_TUPKEYREQ, signal,
TupKeyReq::SignalLength);
}
-}//Dblqh::nextScanConfLoopLab()
+}
/* -------------------------------------------------------------------------
* RECEPTION OF FURTHER KEY INFORMATION WHEN KEY SIZE > 16 BYTES.
@@ -7959,13 +8112,10 @@ bool Dblqh::keyinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length)
* ------------------------------------------------------------------------- */
void Dblqh::scanTupkeyConfLab(Signal* signal)
{
- UintR tdata3;
- UintR tdata4;
- UintR tdata5;
+ const TupKeyConf * conf = (TupKeyConf *)signal->getDataPtr();
+ UintR tdata4 = conf->readLength;
+ UintR tdata5 = conf->lastRow;
- tdata3 = signal->theData[2];
- tdata4 = signal->theData[3];
- tdata5 = signal->theData[4];
tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
scanptr.i = tcConnectptr.p->tcScanRec;
releaseActiveFrag(signal);
@@ -7976,7 +8126,7 @@ void Dblqh::scanTupkeyConfLab(Signal* signal)
* --------------------------------------------------------------------- */
releaseOprec(signal);
if ((scanptr.p->scanLockHold == ZTRUE) &&
- (scanptr.p->scanCompletedOperations > 0)) {
+ (scanptr.p->m_curr_batch_size_rows > 0)) {
jam();
scanptr.p->scanReleaseCounter = 1;
scanReleaseLocksLab(signal);
@@ -7993,29 +8143,31 @@ void Dblqh::scanTupkeyConfLab(Signal* signal)
tdata4 += tcConnectptr.p->primKeyLen;// Inform API about keyinfo len aswell
}//if
- ndbrequire(scanptr.p->scanCompletedOperations < MAX_PARALLEL_OP_PER_SCAN);
- scanptr.p->scanOpLength[scanptr.p->scanCompletedOperations] = tdata4;
- scanptr.p->scanCompletedOperations++;
- if ((scanptr.p->scanCompletedOperations ==
- scanptr.p->scanConcurrentOperations) &&
- (scanptr.p->scanLockHold == ZTRUE)) {
- jam();
- scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
- sendScanFragConf(signal, ZFALSE);
- return;
- } else if (scanptr.p->scanCompletedOperations ==
- scanptr.p->scanConcurrentOperations) {
- jam();
- scanptr.p->scanReleaseCounter = scanptr.p->scanCompletedOperations;
- scanReleaseLocksLab(signal);
- return;
- } else if (scanptr.p->scanLockHold == ZTRUE) {
- jam();
- scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT;
+ ndbrequire(scanptr.p->m_curr_batch_size_rows < MAX_PARALLEL_OP_PER_SCAN);
+ scanptr.p->m_curr_batch_size_bytes+= tdata4;
+ scanptr.p->m_curr_batch_size_rows++;
+ scanptr.p->m_last_row = tdata5;
+ if (scanptr.p->check_scan_batch_completed() | tdata5){
+ if (scanptr.p->scanLockHold == ZTRUE) {
+ jam();
+ scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
+ sendScanFragConf(signal, ZFALSE);
+ return;
+ } else {
+ jam();
+ scanptr.p->scanReleaseCounter = scanptr.p->m_curr_batch_size_rows;
+ scanReleaseLocksLab(signal);
+ return;
+ }
} else {
- jam();
- scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT_COMMIT;
- }//if
+ if (scanptr.p->scanLockHold == ZTRUE) {
+ jam();
+ scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT;
+ } else {
+ jam();
+ scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT_COMMIT;
+ }
+ }
scanNextLoopLab(signal);
}//Dblqh::scanTupkeyConfLab()
@@ -8056,10 +8208,15 @@ void Dblqh::continueScanAfterBlockedLab(Signal* signal)
if (scanptr.p->scanFlag == NextScanReq::ZSCAN_NEXT_ABORT) {
jam();
scanptr.p->scanFlag = NextScanReq::ZSCAN_NEXT_COMMIT;
- accOpPtr = scanptr.p->scanAccOpPtr[scanptr.p->scanCompletedOperations];
+ accOpPtr= get_acc_ptr_from_scan_record(scanptr.p,
+ scanptr.p->m_curr_batch_size_rows,
+ false);
+ scanptr.p->scan_acc_index--;
} else if (scanptr.p->scanFlag == NextScanReq::ZSCAN_NEXT_COMMIT) {
jam();
- accOpPtr = scanptr.p->scanAccOpPtr[scanptr.p->scanCompletedOperations - 1];
+ accOpPtr= get_acc_ptr_from_scan_record(scanptr.p,
+ scanptr.p->m_curr_batch_size_rows-1,
+ false);
} else {
jam();
accOpPtr = RNIL; // The value is not used in ACC
@@ -8069,9 +8226,9 @@ void Dblqh::continueScanAfterBlockedLab(Signal* signal)
signal->theData[1] = accOpPtr;
signal->theData[2] = scanptr.p->scanFlag;
if (! scanptr.p->rangeScan)
- sendSignal(tcConnectptr.p->tcAccBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
+ sendSignal(tcConnectptr.p->tcAccBlockref, GSN_NEXT_SCANREQ, signal, 3,JBB);
else
- sendSignal(tcConnectptr.p->tcTuxBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
+ sendSignal(tcConnectptr.p->tcTuxBlockref, GSN_NEXT_SCANREQ, signal, 3,JBB);
}//Dblqh::continueScanAfterBlockedLab()
/* -------------------------------------------------------------------------
@@ -8093,7 +8250,7 @@ void Dblqh::scanTupkeyRefLab(Signal* signal)
* STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED.
* --------------------------------------------------------------------- */
if ((scanptr.p->scanLockHold == ZTRUE) &&
- (scanptr.p->scanCompletedOperations > 0)) {
+ (scanptr.p->m_curr_batch_size_rows > 0)) {
jam();
scanptr.p->scanReleaseCounter = 1;
scanReleaseLocksLab(signal);
@@ -8114,8 +8271,8 @@ void Dblqh::scanTupkeyRefLab(Signal* signal)
scanptr.p->scanReleaseCounter = 1;
} else {
jam();
- scanptr.p->scanCompletedOperations++;
- scanptr.p->scanReleaseCounter = scanptr.p->scanCompletedOperations;
+ scanptr.p->m_curr_batch_size_rows++;
+ scanptr.p->scanReleaseCounter = scanptr.p->m_curr_batch_size_rows;
}//if
/* --------------------------------------------------------------------
* WE NEED TO RELEASE ALL LOCKS CURRENTLY
@@ -8125,7 +8282,7 @@ void Dblqh::scanTupkeyRefLab(Signal* signal)
return;
}//if
Uint32 time_passed= tcConnectptr.p->tcTimer - cLqhTimeOutCount;
- if (scanptr.p->scanCompletedOperations > 0) {
+ if (scanptr.p->m_curr_batch_size_rows > 0) {
if (time_passed > 1) {
/* -----------------------------------------------------------------------
* WE NEED TO ENSURE THAT WE DO NOT SEARCH FOR THE NEXT TUPLE FOR A
@@ -8133,7 +8290,7 @@ void Dblqh::scanTupkeyRefLab(Signal* signal)
* THE FOUND TUPLE IF FOUND TUPLES ARE RARE. If more than 10 ms passed we
* send the found tuples to the API.
* ----------------------------------------------------------------------- */
- scanptr.p->scanReleaseCounter = scanptr.p->scanCompletedOperations + 1;
+ scanptr.p->scanReleaseCounter = scanptr.p->m_curr_batch_size_rows + 1;
scanReleaseLocksLab(signal);
return;
}
@@ -8247,7 +8404,8 @@ void Dblqh::tupScanCloseConfLab(Signal* signal)
ScanFragRef::SignalLength, JBB);
} else {
jam();
- scanptr.p->scanCompletedOperations = 0;
+ scanptr.p->m_curr_batch_size_rows = 0;
+ scanptr.p->m_curr_batch_size_bytes= 0;
sendScanFragConf(signal, ZSCAN_FRAG_CLOSED);
}//if
finishScanrec(signal);
@@ -8258,20 +8416,6 @@ void Dblqh::tupScanCloseConfLab(Signal* signal)
releaseTcrec(signal, tcConnectptr);
}//Dblqh::tupScanCloseConfLab()
-/* =========================================================================
- * ======= INITIATE SCAN_ACC_OP_PTR TO RNIL IN SCAN RECORD =======
- *
- * SUBROUTINE SHORT NAME = ISA
- * ========================================================================= */
-void Dblqh::initScanAccOp(Signal* signal)
-{
- UintR tisaIndex;
-
- for (tisaIndex = 0; tisaIndex < MAX_PARALLEL_OP_PER_SCAN; tisaIndex++) {
- scanptr.p->scanAccOpPtr[tisaIndex] = RNIL;
- }//for
-}//Dblqh::initScanAccOp()
-
/* =========================================================================
* ======= INITIATE SCAN RECORD =======
*
@@ -8280,7 +8424,8 @@ void Dblqh::initScanAccOp(Signal* signal)
Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq)
{
const Uint32 reqinfo = scanFragReq->requestInfo;
- const Uint32 scanConcurrentOperations = ScanFragReq::getConcurrency(reqinfo);
+ const Uint32 max_rows = scanFragReq->batch_size_rows;
+ const Uint32 max_bytes = scanFragReq->batch_size_bytes;
const Uint32 scanLockMode = ScanFragReq::getLockMode(reqinfo);
const Uint32 scanLockHold = ScanFragReq::getHoldLockFlag(reqinfo);
const Uint32 keyinfo = ScanFragReq::getKeyinfoFlag(reqinfo);
@@ -8297,8 +8442,12 @@ Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq)
scanptr.p->scanAiLength = attrLen;
scanptr.p->scanTcrec = tcConnectptr.i;
scanptr.p->scanSchemaVersion = scanFragReq->schemaVersion;
- scanptr.p->scanCompletedOperations = 0;
- scanptr.p->scanConcurrentOperations = scanConcurrentOperations;
+
+ scanptr.p->m_curr_batch_size_rows = 0;
+ scanptr.p->m_curr_batch_size_bytes= 0;
+ scanptr.p->m_max_batch_size_rows = max_rows;
+ scanptr.p->m_max_batch_size_bytes = max_bytes;
+
scanptr.p->scanErrorCounter = 0;
scanptr.p->scanLockMode = scanLockMode;
scanptr.p->readCommitted = readCommitted;
@@ -8310,14 +8459,17 @@ Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq)
scanptr.p->scanLocalFragid = 0;
scanptr.p->scanTcWaiting = ZTRUE;
scanptr.p->scanNumber = ~0;
+ scanptr.p->scanApiOpPtr = scanFragReq->clientOpPtr;
+ scanptr.p->m_last_row = 0;
- for (Uint32 i = 0; i < scanConcurrentOperations; i++) {
+ if (max_rows == 0 || (max_bytes > 0 && max_rows > max_bytes)){
jam();
- scanptr.p->scanApiOpPtr[i] = scanFragReq->clientOpPtr[i];
- scanptr.p->scanOpLength[i] = 0;
- scanptr.p->scanAccOpPtr[i] = 0;
- }//for
-
+ return ScanFragRef::ZWRONG_BATCH_SIZE;
+ }
+ if (!seize_acc_ptr_list(scanptr.p, max_rows)){
+ jam();
+ return ScanFragRef::ZTOO_MANY_ACTIVE_SCAN_ERROR;
+ }
/**
* Used for scan take over
*/
@@ -8372,38 +8524,9 @@ Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq)
#endif
c_scanTakeOverHash.add(scanptr);
}
+ init_acc_ptr_list(scanptr.p);
return ZOK;
-
-#if 0
- if (! rangeScan) {
- jam();
- for (Int32 i = NR_ScanNo - 1; i >= 0; i--) {
- jam();
- if (fragptr.p->fragScanRec[i] == ZNIL) {
- jam();
- scanptr.p->scanNumber = i;
- fragptr.p->fragScanRec[i] = scanptr.i;
- return ZOK;
- }//if
- }//for
- } else {
- jam();
- // put in second half of fragScanRec of primary table fragment
- FragrecordPtr tFragPtr;
- tFragPtr.i = fragptr.p->tableFragptr;
- ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
- for (Uint32 i = NR_MinRangeScanNo; i < NR_MaxRangeScanNo; i++) {
- if (tFragPtr.p->fragScanRec[i] == ZNIL) {
- jam();
- scanptr.p->scanNumber = i;
- tFragPtr.p->fragScanRec[i] = scanptr.i;
- return ZOK;
- }
- }
- }
- return ZNO_FREE_FRAG_SCAN_REC_ERROR;
-#endif
-}//Dblqh::initScanrec()
+}
/* =========================================================================
* ======= INITIATE TC RECORD AT SCAN =======
@@ -8444,10 +8567,12 @@ void Dblqh::initScanTc(Signal* signal,
* ========================================================================= */
void Dblqh::finishScanrec(Signal* signal)
{
+ release_acc_ptr_list(scanptr.p);
+
FragrecordPtr tFragPtr;
tFragPtr.i = scanptr.p->fragPtrI;
ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
-
+
LocalDLFifoList<ScanRecord> queue(c_scanRecordPool,
tFragPtr.p->m_queuedScans);
@@ -8528,7 +8653,7 @@ void Dblqh::releaseScanrec(Signal* signal)
scanptr.p->scanState = ScanRecord::SCAN_FREE;
scanptr.p->scanType = ScanRecord::ST_IDLE;
scanptr.p->scanTcWaiting = ZFALSE;
- cbookedAccOps -= scanptr.p->scanConcurrentOperations;
+ cbookedAccOps -= scanptr.p->m_max_batch_size_rows;
cscanNoFreeRec++;
}//Dblqh::releaseScanrec()
@@ -8540,7 +8665,7 @@ void Dblqh::sendKeyinfo20(Signal* signal,
ScanRecord * scanP,
TcConnectionrec * tcConP)
{
- ndbrequire(scanP->scanCompletedOperations < MAX_PARALLEL_OP_PER_SCAN);
+ ndbrequire(scanP->m_curr_batch_size_rows < MAX_PARALLEL_OP_PER_SCAN);
KeyInfo20 * keyInfo = (KeyInfo20 *)&signal->theData[0];
DatabufPtr TdataBuf;
@@ -8554,7 +8679,7 @@ void Dblqh::sendKeyinfo20(Signal* signal,
*/
ndbrequire(keyLen * 4 <= sizeof(signal->theData));
const BlockReference ref = scanP->scanApiBlockref;
- const Uint32 scanOp = scanP->scanCompletedOperations;
+ const Uint32 scanOp = scanP->m_curr_batch_size_rows;
const Uint32 nodeId = refToNode(ref);
const bool connectedToNode = getNodeInfo(nodeId).m_connected;
const Uint32 type = getNodeInfo(nodeId).m_type;
@@ -8578,11 +8703,11 @@ void Dblqh::sendKeyinfo20(Signal* signal,
TdataBuf.i = TdataBuf.p->nextDatabuf;
}
- keyInfo->clientOpPtr = scanP->scanApiOpPtr[scanOp];
+ keyInfo->clientOpPtr = scanP->scanApiOpPtr;
keyInfo->keyLen = keyLen;
keyInfo->scanInfo_Node = KeyInfo20::setScanInfo(scanOp,
scanP->scanNumber)+
- (getOwnNodeId() << 16);
+ (getOwnNodeId() << 20);
keyInfo->transId1 = tcConP->transid[0];
keyInfo->transId2 = tcConP->transid[1];
@@ -8663,23 +8788,27 @@ void Dblqh::sendKeyinfo20(Signal* signal,
* ------------------------------------------------------------------------ */
void Dblqh::sendScanFragConf(Signal* signal, Uint32 scanCompleted)
{
+ Uint32 completed_ops= scanptr.p->m_curr_batch_size_rows;
+ Uint32 total_len= scanptr.p->m_curr_batch_size_bytes;
+ scanptr.p->scanTcWaiting = ZFALSE;
+
if(ERROR_INSERTED(5037)){
CLEAR_ERROR_INSERT_VALUE;
return;
}
-
- scanptr.p->scanTcWaiting = ZFALSE;
ScanFragConf * conf = (ScanFragConf*)&signal->theData[0];
+ NodeId tc_node_id= refToNode(tcConnectptr.p->clientBlockref);
+ Uint32 trans_id1= tcConnectptr.p->transid[0];
+ Uint32 trans_id2= tcConnectptr.p->transid[1];
conf->senderData = tcConnectptr.p->clientConnectrec;
- conf->completedOps = scanptr.p->scanCompletedOperations;
+ conf->completedOps = completed_ops;
conf->fragmentCompleted = scanCompleted;
- for(Uint32 i = 0; i<MAX_PARALLEL_OP_PER_SCAN; i++)
- conf->opReturnDataLen[i] = scanptr.p->scanOpLength[i];
- conf->transId1 = tcConnectptr.p->transid[0];
- conf->transId2 = tcConnectptr.p->transid[1];
+ conf->transId1 = trans_id1;
+ conf->transId2 = trans_id2;
+ conf->total_len= total_len;
sendSignal(tcConnectptr.p->clientBlockref, GSN_SCAN_FRAGCONF,
- signal, ScanFragConf::SignalLength, JBB);
+ signal, ScanFragConf::SignalLength, JBB);
}//Dblqh::sendScanFragConf()
/* ######################################################################### */
@@ -8775,11 +8904,11 @@ void Dblqh::execCOPY_FRAGREQ(Signal* signal)
/* ------------------------------------------------------------------------- */
// We keep track of how many operation records in ACC that has been booked.
// Copy fragment has records always booked and thus need not book any. The
-// most operations in parallel use is the scanConcurrentOperations.
+// most operations in parallel use is the m_max_batch_size_rows.
// This variable has to be set-up here since it is used by releaseScanrec
// to unbook operation records in ACC.
/* ------------------------------------------------------------------------- */
- scanptr.p->scanConcurrentOperations = 0;
+ scanptr.p->m_max_batch_size_rows = 0;
scanptr.p->rangeScan = 0;
seizeTcrec();
@@ -8970,7 +9099,7 @@ void Dblqh::nextScanConfCopyLab(Signal* signal)
return;
}
- scanptr.p->scanAccOpPtr[0] = nextScanConf->accOperationPtr;
+ set_acc_ptr_in_scan_record(scanptr.p, 0, nextScanConf->accOperationPtr);
initCopyTc(signal);
if (tcConnectptr.p->primKeyLen > 4) {
jam();
@@ -9244,8 +9373,9 @@ void Dblqh::continueCopyAfterBlockedLab(Signal* signal)
scanptr.i = tcConnectptr.p->tcScanRec;
c_scanRecordPool.getPtr(scanptr);
tcConnectptr.p->errorCode = 0;
+ Uint32 acc_op_ptr= get_acc_ptr_from_scan_record(scanptr.p, 0, false);
signal->theData[0] = scanptr.p->scanAccPtr;
- signal->theData[1] = scanptr.p->scanAccOpPtr[0];
+ signal->theData[1] = acc_op_ptr;
signal->theData[2] = NextScanReq::ZSCAN_NEXT_COMMIT;
sendSignal(tcConnectptr.p->tcAccBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
return;
@@ -14677,7 +14807,7 @@ void Dblqh::execDEBUG_SIG(Signal* signal)
tdebug = logPagePtr.p->logPageWord[0];
char buf[100];
- snprintf(buf, 100,
+ BaseString::snprintf(buf, 100,
"Error while reading REDO log.\n"
"D=%d, F=%d Mb=%d FP=%d W1=%d W2=%d",
signal->theData[2], signal->theData[3], signal->theData[4],
@@ -15864,7 +15994,7 @@ void Dblqh::deleteFragrec(Uint32 fragId)
{
Uint32 indexFound= RNIL;
fragptr.i = RNIL;
- for (Uint32 i = (NO_OF_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
+ for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
jam();
if (tabptr.p->fragid[i] == fragId) {
fragptr.i = tabptr.p->fragrec[i];
@@ -15972,7 +16102,7 @@ void Dblqh::getFirstInLogQueue(Signal* signal)
/* ---------------------------------------------------------------- */
bool Dblqh::getFragmentrec(Signal* signal, Uint32 fragId)
{
- for (Uint32 i = (NO_OF_FRAG_PER_NODE - 1); (UintR)~i; i--) {
+ for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (UintR)~i; i--) {
jam();
if (tabptr.p->fragid[i] == fragId) {
fragptr.i = tabptr.p->fragrec[i];
@@ -16378,6 +16508,8 @@ void Dblqh::initialiseScanrec(Signal* signal)
scanptr.p->scanTcWaiting = ZFALSE;
scanptr.p->nextHash = RNIL;
scanptr.p->prevHash = RNIL;
+ scanptr.p->scan_acc_index= 0;
+ scanptr.p->scan_acc_attr_recs= 0;
}
tmp.release();
}//Dblqh::initialiseScanrec()
@@ -16394,7 +16526,7 @@ void Dblqh::initialiseTabrec(Signal* signal)
ptrAss(tabptr, tablerec);
tabptr.p->tableStatus = Tablerec::NOT_DEFINED;
tabptr.p->usageCount = 0;
- for (Uint32 i = 0; i <= (NO_OF_FRAG_PER_NODE - 1); i++) {
+ for (Uint32 i = 0; i <= (MAX_FRAG_PER_NODE - 1); i++) {
tabptr.p->fragid[i] = ZNIL;
tabptr.p->fragrec[i] = RNIL;
}//for
@@ -16716,7 +16848,7 @@ bool Dblqh::insertFragrec(Signal* signal, Uint32 fragId)
return false;
}//if
seizeFragmentrec(signal);
- for (Uint32 i = (NO_OF_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
+ for (Uint32 i = (MAX_FRAG_PER_NODE - 1); (Uint32)~i; i--) {
jam();
if (tabptr.p->fragid[i] == ZNIL) {
jam();
@@ -18173,8 +18305,8 @@ Dblqh::execDUMP_STATE_ORD(Signal* signal)
infoEvent(" copyptr=%d, ailen=%d, complOps=%d, concurrOps=%d",
sp.p->copyPtr,
sp.p->scanAiLength,
- sp.p->scanCompletedOperations,
- sp.p->scanConcurrentOperations);
+ sp.p->m_curr_batch_size_rows,
+ sp.p->m_max_batch_size_rows);
infoEvent(" errCnt=%d, localFid=%d, schV=%d",
sp.p->scanErrorCounter,
sp.p->scanLocalFragid,
diff --git a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
index 6e32216557c..a209df24c44 100644
--- a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
+++ b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
@@ -33,6 +33,7 @@
#include <signaldata/TrigAttrInfo.hpp>
#include <signaldata/TcIndx.hpp>
#include <signaldata/TransIdAI.hpp>
+#include <signaldata/EventReport.hpp>
#include <trigger_definitions.h>
#include <SignalCounter.hpp>
@@ -191,7 +192,8 @@ public:
OS_WAIT_ATTR = 14,
OS_WAIT_COMMIT_CONF = 15,
OS_WAIT_ABORT_CONF = 16,
- OS_WAIT_COMPLETE_CONF = 17
+ OS_WAIT_COMPLETE_CONF = 17,
+ OS_WAIT_SCAN = 18
};
enum AbortState {
@@ -542,13 +544,11 @@ public:
attrInfo(abp),
expectedTransIdAI(0),
transIdAI(abp),
- tcIndxReq(new TcIndxReq()),
indexReadTcConnect(RNIL)
{}
~TcIndexOperation()
{
- delete tcIndxReq;
}
// Index data
@@ -561,7 +561,7 @@ public:
Uint32 expectedTransIdAI;
AttributeBuffer transIdAI; // For accumulating TransId_AI
- TcIndxReq* tcIndxReq;
+ TcIndxReq tcIndxReq;
UintR connectionIndex;
UintR indexReadTcConnect; //
@@ -928,17 +928,22 @@ public:
UintR distributionGroup;
UintR nextCacheRec;
UintR distributionKeySize;
- Uint16 scanNode;
- unsigned scanTakeOverInd : 1;
- unsigned scanInfo : 15; // 12 bits used currently
+ Uint32 scanInfo;
//---------------------------------------------------
- // Third and fourth 16 byte cache line in second 64
- // byte cache line. Not used currently.
+ // Third 16 byte cache line in second 64
+ // byte cache line. Diverse use.
//---------------------------------------------------
+ Uint32 scanNode;
+ Uint32 scanTakeOverInd;
UintR firstKeybuf; /* POINTER THE LINKED LIST OF KEY BUFFERS */
UintR lastKeybuf; /* VARIABLE POINTING TO THE LAST KEY BUFFER */
- UintR packedCacheVar[6];
+
+ //---------------------------------------------------
+ // Fourth 16 byte cache line in second 64
+ // byte cache line. Not used currently.
+ //---------------------------------------------------
+ UintR packedCacheVar[4];
};
typedef Ptr<CacheRecord> CacheRecordPtr;
@@ -1151,7 +1156,6 @@ public:
union { Uint32 m_queued_count; Uint32 scanReceivedOperations; };
DLList<ScanFragRec>::Head m_queued_scan_frags; // In TC !sent to API
DLList<ScanFragRec>::Head m_delivered_scan_frags;// Delivered to API
- DLList<ScanFragRec>::Head m_completed_scan_frags;// Completed
// Id of the next fragment to be scanned. Used by scan fragment
// processes when they are ready for the next fragment
@@ -1166,6 +1170,8 @@ public:
// Length of expected attribute information
Uint32 scanAiLength;
+ Uint32 scanKeyLen;
+
// Reference to ApiConnectRecord
Uint32 scanApiRec;
@@ -1185,20 +1191,13 @@ public:
Uint32 scanTableref;
// Number of operation records per scanned fragment
+ // Number of operations in first batch
+ // Max number of bytes per batch
Uint16 noOprecPerFrag;
+ Uint16 first_batch_size;
+ Uint32 batch_byte_size;
- // Shall the locks be held until the application have read the
- // records
- Uint8 scanLockHold;
-
- // Shall the locks be read or write locks
- Uint8 scanLockMode;
-
- // Skip locks by other transactions and read latest committed
- Uint8 readCommitted;
-
- // Scan is on ordered index
- Uint8 rangeScan;
+ Uint32 scanRequestInfo; // ScanFrag format
// Close is ordered
bool m_close_scan_req;
@@ -1418,18 +1417,14 @@ private:
UintR anApiConnectPtr);
void handleScanStop(Signal* signal, UintR aFailedNode);
void initScanTcrec(Signal* signal);
- void initScanApirec(Signal* signal,
- Uint32 buddyPtr,
- UintR transid1,
- UintR transid2);
- void initScanrec(ScanRecordPtr, const class ScanTabReq*,
+ void initScanrec(ScanRecordPtr, const class ScanTabReq*,
const UintR scanParallel,
const UintR noOprecPerFrag);
void initScanfragrec(Signal* signal);
void releaseScanResources(ScanRecordPtr);
ScanRecordPtr seizeScanrec(Signal* signal);
- void sendScanFragReq(Signal* signal, ScanRecord*, ScanFragRec*);
- void sendScanTabConf(Signal* signal, ScanRecord*);
+ void sendScanFragReq(Signal*, ScanRecord*, ScanFragRec*);
+ void sendScanTabConf(Signal* signal, ScanRecordPtr);
void close_scan_req(Signal*, ScanRecordPtr, bool received_req);
void close_scan_req_send_conf(Signal*, ScanRecordPtr);
@@ -1464,7 +1459,7 @@ private:
void releaseAttrinfo();
void releaseGcp(Signal* signal);
void releaseKeys();
- void releaseSimpleRead(Signal* signal);
+ void releaseSimpleRead(Signal*, ApiConnectRecordPtr, TcConnectRecord*);
void releaseDirtyWrite(Signal* signal);
void releaseTcCon();
void releaseTcConnectFail(Signal* signal);
@@ -1568,7 +1563,7 @@ private:
void diFcountReqLab(Signal* signal, ScanRecordPtr);
void signalErrorRefuseLab(Signal* signal);
void abort080Lab(Signal* signal);
- void packKeyData000Lab(Signal* signal, BlockReference TBRef);
+ void packKeyData000Lab(Signal* signal, BlockReference TBRef, Uint32 len);
void abortScanLab(Signal* signal, ScanRecordPtr, Uint32 errCode);
void sendAbortedAfterTimeout(Signal* signal, int Tcheck);
void abort010Lab(Signal* signal);
@@ -1673,16 +1668,40 @@ private:
ApiConnectRecordPtr tmpApiConnectptr;
UintR tcheckGcpId;
- UintR cconcurrentOp;
-
- UintR cattrinfoCount;
- UintR ctransCount;
- UintR ccommitCount;
- UintR creadCount;
- UintR csimpleReadCount;
- UintR cwriteCount;
- UintR cabortCount;
+ struct TransCounters {
+ enum { Off, Timer, Started } c_trans_status;
+ UintR cattrinfoCount;
+ UintR ctransCount;
+ UintR ccommitCount;
+ UintR creadCount;
+ UintR csimpleReadCount;
+ UintR cwriteCount;
+ UintR cabortCount;
+ UintR cconcurrentOp;
+ Uint32 c_scan_count;
+ Uint32 c_range_scan_count;
+ void reset () {
+ cattrinfoCount = ctransCount = ccommitCount = creadCount =
+ csimpleReadCount = cwriteCount = cabortCount =
+ c_scan_count = c_range_scan_count = 0;
+ }
+ Uint32 report(Signal* signal){
+ signal->theData[0] = EventReport::TransReportCounters;
+ signal->theData[1] = ctransCount;
+ signal->theData[2] = ccommitCount;
+ signal->theData[3] = creadCount;
+ signal->theData[4] = csimpleReadCount;
+ signal->theData[5] = cwriteCount;
+ signal->theData[6] = cattrinfoCount;
+ signal->theData[7] = cconcurrentOp;
+ signal->theData[8] = cabortCount;
+ signal->theData[9] = c_scan_count;
+ signal->theData[10] = c_range_scan_count;
+ return 11;
+ }
+ } c_counters;
+
Uint16 cownNodeid;
Uint16 terrorCode;
diff --git a/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp b/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
index 6803c3609ed..e38089242c3 100644
--- a/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
+++ b/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
@@ -73,6 +73,7 @@ void Dbtc::initData()
void Dbtc::initRecords()
{
+ void *p;
// Records with dynamic sizes
cacheRecord = (CacheRecord*)allocRecord("CacheRecord",
sizeof(CacheRecord),
@@ -83,7 +84,7 @@ void Dbtc::initRecords()
capiConnectFilesize);
for(unsigned i = 0; i<capiConnectFilesize; i++) {
- void * p = &apiConnectRecord[i];
+ p = &apiConnectRecord[i];
new (p) ApiConnectRecord(c_theFiredTriggerPool,
c_theSeizedIndexOperationPool);
}
@@ -91,7 +92,8 @@ void Dbtc::initRecords()
DLFifoList<TcFiredTriggerData> triggers(c_theFiredTriggerPool);
FiredTriggerPtr tptr;
while(triggers.seize(tptr) == true) {
- new (tptr.p) TcFiredTriggerData();
+ p= tptr.p;
+ new (p) TcFiredTriggerData();
}
triggers.release();
@@ -109,7 +111,8 @@ void Dbtc::initRecords()
ArrayList<TcIndexOperation> indexOps(c_theIndexOperationPool);
TcIndexOperationPtr ioptr;
while(indexOps.seize(ioptr) == true) {
- new (ioptr.p) TcIndexOperation(c_theAttributeBufferPool);
+ p= ioptr.p;
+ new (p) TcIndexOperation(c_theAttributeBufferPool);
}
indexOps.release();
@@ -179,7 +182,6 @@ Dbtc::Dbtc(const class Configuration & conf):
c_maxNumberOfIndexOperations(0),
m_commitAckMarkerHash(m_commitAckMarkerPool)
{
-
BLOCK_CONSTRUCTOR(Dbtc);
const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
@@ -191,7 +193,7 @@ Dbtc::Dbtc(const class Configuration & conf):
ndb_mgm_get_int_parameter(p, CFG_DB_TRANS_BUFFER_MEM,
&transactionBufferMemory);
- ndb_mgm_get_int_parameter(p, CFG_DB_NO_INDEXES,
+ ndb_mgm_get_int_parameter(p, CFG_DB_NO_UNIQUE_HASH_INDEXES,
&maxNoOfIndexes);
ndb_mgm_get_int_parameter(p, CFG_DB_NO_INDEX_OPS,
&maxNoOfConcurrentIndexOperations);
diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
index 7e1db71faee..d8b3ee10532 100644
--- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
+++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
@@ -221,28 +221,14 @@ void Dbtc::execCONTINUEB(Signal* signal)
/* -------------------------------------------------------------------- */
// Report information about transaction activity once per second.
/* -------------------------------------------------------------------- */
- if (signal->theData[1] == 0) {
- signal->theData[0] = EventReport::TransReportCounters;
- signal->theData[1] = ctransCount;
- signal->theData[2] = ccommitCount;
- signal->theData[3] = creadCount;
- signal->theData[4] = csimpleReadCount;
- signal->theData[5] = cwriteCount;
- signal->theData[6] = cattrinfoCount;
- signal->theData[7] = cconcurrentOp;
- signal->theData[8] = cabortCount;
- sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 9, JBB);
- }//if
- ctransCount = 0;
- ccommitCount = 0;
- creadCount = 0;
- csimpleReadCount = 0;
- cwriteCount = 0;
- cattrinfoCount = 0;
- cabortCount = 0;
- signal->theData[0] = TcContinueB::ZTRANS_EVENT_REP;
- signal->theData[1] = 0;
- sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 5000, 2);
+ if (c_counters.c_trans_status == TransCounters::Timer){
+ Uint32 len = c_counters.report(signal);
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, len, JBB);
+
+ c_counters.reset();
+ signal->theData[0] = TcContinueB::ZTRANS_EVENT_REP;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 5000, 1);
+ }
return;
case TcContinueB::ZCONTINUE_TIME_OUT_FRAG_CONTROL:
jam();
@@ -288,6 +274,12 @@ void Dbtc::execCONTINUEB(Signal* signal)
transPtr.p->triggerPending = false;
executeTriggers(signal, &transPtr);
return;
+ case TcContinueB::DelayTCKEYCONF:
+ jam();
+ apiConnectptr.i = Tdata0;
+ ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+ sendtckeyconf(signal, Tdata1);
+ return;
default:
ndbrequire(false);
}//switch
@@ -693,9 +685,10 @@ void Dbtc::execNDB_STTOR(Signal* signal)
jam();
intstartphase3x010Lab(signal); /* SEIZE CONNECT RECORD IN EACH LQH*/
// Start transaction event reporting.
+ c_counters.c_trans_status = TransCounters::Timer;
+ c_counters.reset();
signal->theData[0] = TcContinueB::ZTRANS_EVENT_REP;
- signal->theData[1] = 1;
- sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 2);
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 10, 1);
return;
case ZINTSPH6:
jam();
@@ -1262,7 +1255,8 @@ void Dbtc::execTCRELEASEREQ(Signal* signal)
jam();
signal->theData[0] = tuserpointer;
signal->theData[1] = ZINVALID_CONNECTION;
- sendSignal(tapiBlockref, GSN_TCRELEASEREF, signal, 2, JBB);
+ signal->theData[2] = __LINE__;
+ sendSignal(tapiBlockref, GSN_TCRELEASEREF, signal, 3, JBB);
return;
} else {
jam();
@@ -1275,7 +1269,9 @@ void Dbtc::execTCRELEASEREQ(Signal* signal)
sendSignal(tapiBlockref, GSN_TCRELEASECONF, signal, 1, JBB);
} else {
if (tapiBlockref == apiConnectptr.p->ndbapiBlockref) {
- if (apiConnectptr.p->apiConnectstate == CS_CONNECTED) {
+ if (apiConnectptr.p->apiConnectstate == CS_CONNECTED ||
+ (apiConnectptr.p->apiConnectstate == CS_ABORTING &&
+ apiConnectptr.p->abortState == AS_IDLE)){
jam(); /* JUST REPLY OK */
releaseApiCon(signal, apiConnectptr.i);
signal->theData[0] = tuserpointer;
@@ -1285,14 +1281,19 @@ void Dbtc::execTCRELEASEREQ(Signal* signal)
jam();
signal->theData[0] = tuserpointer;
signal->theData[1] = ZINVALID_CONNECTION;
+ signal->theData[2] = __LINE__;
+ signal->theData[3] = apiConnectptr.p->apiConnectstate;
sendSignal(tapiBlockref,
- GSN_TCRELEASEREF, signal, 2, JBB);
+ GSN_TCRELEASEREF, signal, 4, JBB);
}
} else {
jam();
signal->theData[0] = tuserpointer;
signal->theData[1] = ZINVALID_CONNECTION;
- sendSignal(tapiBlockref, GSN_TCRELEASEREF, signal, 2, JBB);
+ signal->theData[2] = __LINE__;
+ signal->theData[3] = tapiBlockref;
+ signal->theData[4] = apiConnectptr.p->ndbapiBlockref;
+ sendSignal(tapiBlockref, GSN_TCRELEASEREF, signal, 5, JBB);
}//if
}//if
}//Dbtc::execTCRELEASEREQ()
@@ -1764,6 +1765,7 @@ void Dbtc::execKEYINFO(Signal* signal)
switch (apiConnectptr.p->apiConnectstate) {
case CS_RECEIVING:
case CS_REC_COMMITTING:
+ case CS_START_SCAN:
jam();
/*empty*/;
break;
@@ -1817,12 +1819,54 @@ void Dbtc::execKEYINFO(Signal* signal)
jam();
tckeyreq020Lab(signal);
return;
+ case OS_WAIT_SCAN:
+ break;
default:
jam();
terrorCode = ZSTATE_ERROR;
abortErrorLab(signal);
return;
}//switch
+
+ UintR TdataPos = 0;
+ UintR TkeyLen = regCachePtr->keylen;
+ UintR Tlen = regCachePtr->save1;
+
+ do {
+ if (cfirstfreeDatabuf == RNIL) {
+ jam();
+ abort();
+ seizeDatabuferrorLab(signal);
+ return;
+ }//if
+ linkKeybuf(signal);
+ arrGuard(TdataPos, 19);
+ databufptr.p->data[0] = signal->theData[TdataPos + 3];
+ databufptr.p->data[1] = signal->theData[TdataPos + 4];
+ databufptr.p->data[2] = signal->theData[TdataPos + 5];
+ databufptr.p->data[3] = signal->theData[TdataPos + 6];
+ Tlen = Tlen + 4;
+ TdataPos = TdataPos + 4;
+ if (Tlen < TkeyLen) {
+ jam();
+ if (TdataPos >= tmaxData) {
+ jam();
+ /*----------------------------------------------------*/
+ /** EXIT AND WAIT FOR SIGNAL KEYINFO OR KEYINFO9 **/
+ /** WHEN EITHER OF THE SIGNALS IS RECEIVED A JUMP **/
+ /** TO LABEL "KEYINFO_LABEL" IS DONE. THEN THE **/
+ /** PROGRAM RETURNS TO LABEL TCKEYREQ020 **/
+ /*----------------------------------------------------*/
+ setApiConTimer(apiConnectptr.i, ctcTimer, __LINE__);
+ regCachePtr->save1 = Tlen;
+ return;
+ }//if
+ } else {
+ jam();
+ return;
+ }//if
+ } while (1);
+ return;
}//Dbtc::execKEYINFO()
/*---------------------------------------------------------------------------*/
@@ -1831,45 +1875,45 @@ void Dbtc::execKEYINFO(Signal* signal)
/* WE WILL ALWAYS PACK 4 WORDS AT A TIME. */
/*---------------------------------------------------------------------------*/
void Dbtc::packKeyData000Lab(Signal* signal,
- BlockReference TBRef)
+ BlockReference TBRef,
+ Uint32 totalLen)
{
CacheRecord * const regCachePtr = cachePtr.p;
UintR Tmp;
- Uint16 tdataPos;
jam();
- tdataPos = 0;
- Tmp = regCachePtr->keylen;
+ Uint32 len = 0;
databufptr.i = regCachePtr->firstKeybuf;
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = apiConnectptr.p->transid[0];
+ signal->theData[2] = apiConnectptr.p->transid[1];
+ Uint32 * dst = signal->theData+3;
+ ptrCheckGuard(databufptr, cdatabufFilesize, databufRecord);
+
do {
jam();
- if (tdataPos == 20) {
- jam();
- /*---------------------------------------------------------------------*/
- /* 4 MORE WORDS WILL NOT FIT IN THE 24 DATA WORDS IN A SIGNAL */
- /*---------------------------------------------------------------------*/
- sendKeyinfo(signal, TBRef, 20);
- tdataPos = 0;
- }//if
- Tmp = Tmp - 4;
- ptrCheckGuard(databufptr, cdatabufFilesize, databufRecord);
- cdata[tdataPos ] = databufptr.p->data[0];
- cdata[tdataPos + 1] = databufptr.p->data[1];
- cdata[tdataPos + 2] = databufptr.p->data[2];
- cdata[tdataPos + 3] = databufptr.p->data[3];
- tdataPos = tdataPos + 4;
- if (Tmp <= 4) {
+ databufptr.i = databufptr.p->nextDatabuf;
+ dst[len + 0] = databufptr.p->data[0];
+ dst[len + 1] = databufptr.p->data[1];
+ dst[len + 2] = databufptr.p->data[2];
+ dst[len + 3] = databufptr.p->data[3];
+ len += 4;
+ if (totalLen <= 4) {
jam();
/*---------------------------------------------------------------------*/
/* LAST PACK OF KEY DATA HAVE BEEN SENT */
/*---------------------------------------------------------------------*/
/* THERE WERE UNSENT INFORMATION, SEND IT. */
/*---------------------------------------------------------------------*/
- sendKeyinfo(signal, TBRef, tdataPos);
- releaseKeys();
+ sendSignal(TBRef, GSN_KEYINFO, signal, 3 + len, JBB);
return;
- }//if
- databufptr.i = databufptr.p->nextDatabuf;
+ } else if(len == KeyInfo::DataLength){
+ jam();
+ len = 0;
+ sendSignal(TBRef, GSN_KEYINFO, signal, 3 + KeyInfo::DataLength, JBB);
+ }
+ totalLen -= 4;
+ ptrCheckGuard(databufptr, cdatabufFilesize, databufRecord);
} while (1);
}//Dbtc::packKeyData000Lab()
@@ -2245,13 +2289,14 @@ void Dbtc::initApiConnectRec(Signal* signal,
{
const TcKeyReq * const tcKeyReq = (TcKeyReq *)&signal->theData[0];
UintR TfailureNr = cfailure_nr;
- UintR TtransCount = ctransCount;
+ UintR TtransCount = c_counters.ctransCount;
UintR Ttransid0 = tcKeyReq->transId1;
UintR Ttransid1 = tcKeyReq->transId2;
regApiPtr->m_exec_flag = 0;
regApiPtr->returncode = 0;
regApiPtr->returnsignal = RS_TCKEYCONF;
+ ndbassert(regApiPtr->firstTcConnect == RNIL);
regApiPtr->firstTcConnect = RNIL;
regApiPtr->lastTcConnect = RNIL;
regApiPtr->globalcheckpointid = 0;
@@ -2273,7 +2318,7 @@ void Dbtc::initApiConnectRec(Signal* signal,
if(releaseIndexOperations)
releaseAllSeizedIndexOperations(regApiPtr);
- ctransCount = TtransCount + 1;
+ c_counters.ctransCount = TtransCount + 1;
}//Dbtc::initApiConnectRec()
int
@@ -2298,7 +2343,7 @@ Dbtc::seizeTcRecord(Signal* signal)
TcConnectRecord * const regTcPtr =
&localTcConnectRecord[TfirstfreeTcConnect];
- UintR TconcurrentOp = cconcurrentOp;
+ UintR TconcurrentOp = c_counters.cconcurrentOp;
UintR TlastTcConnect = regApiPtr->lastTcConnect;
UintR TtcConnectptrIndex = tcConnectptr.i;
TcConnectRecordPtr tmpTcConnectptr;
@@ -2306,7 +2351,7 @@ Dbtc::seizeTcRecord(Signal* signal)
cfirstfreeTcConnect = regTcPtr->nextTcConnect;
tcConnectptr.p = regTcPtr;
- cconcurrentOp = TconcurrentOp + 1;
+ c_counters.cconcurrentOp = TconcurrentOp + 1;
regTcPtr->prevTcConnect = TlastTcConnect;
regTcPtr->nextTcConnect = RNIL;
regTcPtr->accumulatingTriggerData.i = RNIL;
@@ -2446,18 +2491,30 @@ void Dbtc::execTCKEYREQ(Signal* signal)
}
break;
case CS_STARTED:
- //------------------------------------------------------------------------
- // Transaction is started already. Check that the operation is on the same
- // transaction.
- //------------------------------------------------------------------------
- compare_transid1 = regApiPtr->transid[0] ^ tcKeyReq->transId1;
- compare_transid2 = regApiPtr->transid[1] ^ tcKeyReq->transId2;
- jam();
- compare_transid1 = compare_transid1 | compare_transid2;
- if (compare_transid1 != 0) {
- TCKEY_abort(signal, 1);
- return;
- }//if
+ if(TstartFlag == 1 && regApiPtr->firstTcConnect == RNIL)
+ {
+ /**
+ * If last operation in last transaction was a simple/dirty read
+ * it does not have to be committed or rollbacked hence,
+ * the state will be CS_STARTED
+ */
+ jam();
+ initApiConnectRec(signal, regApiPtr);
+ regApiPtr->m_exec_flag = TexecFlag;
+ } else {
+ //----------------------------------------------------------------------
+ // Transaction is started already.
+ // Check that the operation is on the same transaction.
+ //-----------------------------------------------------------------------
+ compare_transid1 = regApiPtr->transid[0] ^ tcKeyReq->transId1;
+ compare_transid2 = regApiPtr->transid[1] ^ tcKeyReq->transId2;
+ jam();
+ compare_transid1 = compare_transid1 | compare_transid2;
+ if (compare_transid1 != 0) {
+ TCKEY_abort(signal, 1);
+ return;
+ }//if
+ }
break;
case CS_ABORTING:
if (regApiPtr->abortState == AS_IDLE) {
@@ -2576,7 +2633,7 @@ void Dbtc::execTCKEYREQ(Signal* signal)
UintR TapiConnectptrIndex = apiConnectptr.i;
UintR TsenderData = tcKeyReq->senderData;
UintR TattrLen = tcKeyReq->getAttrinfoLen(tcKeyReq->attrLen);
- UintR TattrinfoCount = cattrinfoCount;
+ UintR TattrinfoCount = c_counters.cattrinfoCount;
regTcPtr->apiConnect = TapiConnectptrIndex;
regTcPtr->clientData = TsenderData;
@@ -2597,7 +2654,7 @@ void Dbtc::execTCKEYREQ(Signal* signal)
}
regCachePtr->attrlength = TattrLen;
- cattrinfoCount = TattrinfoCount + TattrLen;
+ c_counters.cattrinfoCount = TattrinfoCount + TattrLen;
UintR TtabptrIndex = localTabptr.i;
UintR TtableSchemaVersion = tcKeyReq->tableSchemaVersion;
@@ -2606,7 +2663,7 @@ void Dbtc::execTCKEYREQ(Signal* signal)
regCachePtr->schemaVersion = TtableSchemaVersion;
regTcPtr->operation = TOperationType;
- // Uint8 TSimpleFlag = tcKeyReq->getSimpleFlag(Treqinfo);
+ Uint8 TSimpleFlag = tcKeyReq->getSimpleFlag(Treqinfo);
Uint8 TDirtyFlag = tcKeyReq->getDirtyFlag(Treqinfo);
Uint8 TInterpretedFlag = tcKeyReq->getInterpretedFlag(Treqinfo);
Uint8 TDistrGroupFlag = tcKeyReq->getDistributionGroupFlag(Treqinfo);
@@ -2614,11 +2671,9 @@ void Dbtc::execTCKEYREQ(Signal* signal)
Uint8 TDistrKeyFlag = tcKeyReq->getDistributionKeyFlag(Treqinfo);
Uint8 TexecuteFlag = TexecFlag;
- //RONM_TEST Disable simple reads temporarily
- regCachePtr->opSimple = 0;
- // regCachePtr->opSimple = TSimpleFlag;
- regTcPtr->dirtyOp = TDirtyFlag;
+ regCachePtr->opSimple = TSimpleFlag;
regCachePtr->opExec = TInterpretedFlag;
+ regTcPtr->dirtyOp = TDirtyFlag;
regCachePtr->distributionGroupIndicator = TDistrGroupFlag;
regCachePtr->distributionGroupType = TDistrGroupTypeFlag;
@@ -2632,8 +2687,9 @@ void Dbtc::execTCKEYREQ(Signal* signal)
{
Uint32 TDistrGHIndex = tcKeyReq->getScanIndFlag(Treqinfo);
Uint32 TDistrKeyIndex = TDistrGHIndex + TDistrGroupFlag;
- Uint32 TscanNode = tcKeyReq->getTakeOverScanNode(TOptionalDataPtr[0]);
- Uint32 TscanInfo = tcKeyReq->getTakeOverScanInfo(TOptionalDataPtr[0]);
+
+ Uint32 TscanNode = tcKeyReq->getTakeOverScanNode(TOptionalDataPtr[0]);
+ Uint32 TscanInfo = tcKeyReq->getTakeOverScanInfo(TOptionalDataPtr[0]);
regCachePtr->scanTakeOverInd = TDistrGHIndex;
regCachePtr->scanNode = TscanNode;
@@ -2688,17 +2744,17 @@ void Dbtc::execTCKEYREQ(Signal* signal)
regCachePtr->attrinfo15[3] = Tdata5;
if (TOperationType == ZREAD) {
- Uint8 TreadCount = creadCount;
+ Uint32 TreadCount = c_counters.creadCount;
jam();
regCachePtr->opLock = 0;
- creadCount = TreadCount + 1;
+ c_counters.creadCount = TreadCount + 1;
} else if(TOperationType == ZREAD_EX){
- Uint8 TreadCount = creadCount;
+ Uint32 TreadCount = c_counters.creadCount;
jam();
TOperationType = ZREAD;
regTcPtr->operation = ZREAD;
regCachePtr->opLock = ZUPDATE;
- creadCount = TreadCount + 1;
+ c_counters.creadCount = TreadCount + 1;
} else {
if(regApiPtr->commitAckMarker == RNIL){
jam();
@@ -2718,8 +2774,7 @@ void Dbtc::execTCKEYREQ(Signal* signal)
}
}
- UintR Tattrlength = regCachePtr->attrlength;
- UintR TwriteCount = cwriteCount;
+ UintR TwriteCount = c_counters.cwriteCount;
UintR Toperationsize = coperationsize;
/* --------------------------------------------------------------------
* THIS IS A TEMPORARY TABLE, DON'T UPDATE coperationsize.
@@ -2727,13 +2782,13 @@ void Dbtc::execTCKEYREQ(Signal* signal)
* TEMP TABLES DON'T PARTICIPATE.
* -------------------------------------------------------------------- */
if (localTabptr.p->storedTable) {
- coperationsize = ((Toperationsize + Tattrlength) + TkeyLength) + 17;
+ coperationsize = ((Toperationsize + TattrLen) + TkeyLength) + 17;
}
- cwriteCount = TwriteCount + 1;
+ c_counters.cwriteCount = TwriteCount + 1;
switch (TOperationType) {
case ZUPDATE:
jam();
- if (Tattrlength == 0) {
+ if (TattrLen == 0) {
//TCKEY_abort(signal, 5);
//return;
}//if
@@ -2779,7 +2834,6 @@ void Dbtc::execTCKEYREQ(Signal* signal)
if (regApiPtr->apiConnectstate == CS_START_COMMITTING) {
jam();
// Trigger execution at commit
-
regApiPtr->apiConnectstate = CS_REC_COMMITTING;
} else {
jam();
@@ -2899,12 +2953,13 @@ void Dbtc::tckeyreq050Lab(Signal* signal)
regTcPtr->tcNodedata[3] = Tdata6;
Uint8 Toperation = regTcPtr->operation;
+ Uint8 Tdirty = regTcPtr->dirtyOp;
tnoOfBackup = tnodeinfo & 3;
tnoOfStandby = (tnodeinfo >> 8) & 3;
regCachePtr->distributionKey = (tnodeinfo >> 16) & 255;
if (Toperation == ZREAD) {
- if (regCachePtr->opSimple == 1) {
+ if (Tdirty == 1) {
jam();
/*-------------------------------------------------------------*/
/* A SIMPLE READ CAN SELECT ANY OF THE PRIMARY AND */
@@ -2914,23 +2969,28 @@ void Dbtc::tckeyreq050Lab(Signal* signal)
/*-------------------------------------------------------------*/
arrGuard(tnoOfBackup, 4);
UintR Tindex;
+ UintR TownNode = cownNodeid;
for (Tindex = 1; Tindex <= tnoOfBackup; Tindex++) {
UintR Tnode = regTcPtr->tcNodedata[Tindex];
- UintR TownNode = cownNodeid;
jam();
if (Tnode == TownNode) {
jam();
regTcPtr->tcNodedata[0] = Tnode;
}//if
}//for
- if (regCachePtr->attrlength == 0) {
- /*-------------------------------------------------------------*/
- // A simple read which does not read anything is a strange
- // creature and we abort rather than continue.
- /*-------------------------------------------------------------*/
- TCKEY_abort(signal, 12);
- return;
- }//if
+ if(ERROR_INSERTED(8048) || ERROR_INSERTED(8049))
+ {
+ for (Tindex = 0; Tindex <= tnoOfBackup; Tindex++)
+ {
+ UintR Tnode = regTcPtr->tcNodedata[Tindex];
+ jam();
+ if (Tnode != TownNode) {
+ jam();
+ regTcPtr->tcNodedata[0] = Tnode;
+ ndbout_c("Choosing %d", Tnode);
+ }//if
+ }//for
+ }
}//if
jam();
regTcPtr->lastReplicaNo = 0;
@@ -3018,7 +3078,8 @@ void Dbtc::packLqhkeyreq(Signal* signal,
UintR TfirstAttrbuf = regCachePtr->firstAttrbuf;
sendlqhkeyreq(signal, TBRef);
if (Tkeylen > 4) {
- packKeyData000Lab(signal, TBRef);
+ packKeyData000Lab(signal, TBRef, Tkeylen - 4);
+ releaseKeys();
}//if
packLqhkeyreq040Lab(signal,
TfirstAttrbuf,
@@ -3239,7 +3300,7 @@ void Dbtc::packLqhkeyreq040Lab(Signal* signal,
releaseAttrinfo();
if (Tboth) {
jam();
- releaseSimpleRead(signal);
+ releaseSimpleRead(signal, apiConnectptr, tcConnectptr.p);
return;
}//if
regTcPtr->tcConnectstate = OS_OPERATING;
@@ -3301,8 +3362,21 @@ void Dbtc::releaseAttrinfo()
/* ========================================================================= */
/* ------- RELEASE ALL RECORDS CONNECTED TO A SIMPLE OPERATION ------- */
/* ========================================================================= */
-void Dbtc::releaseSimpleRead(Signal* signal)
+void Dbtc::releaseSimpleRead(Signal* signal,
+ ApiConnectRecordPtr regApiPtr,
+ TcConnectRecord* regTcPtr)
{
+ Uint32 Ttckeyrec = regApiPtr.p->tckeyrec;
+ Uint32 TclientData = regTcPtr->clientData;
+ Uint32 Tnode = regTcPtr->tcNodedata[0];
+ Uint32 Tlqhkeyreqrec = regApiPtr.p->lqhkeyreqrec;
+ Uint32 TsimpleReadCount = c_counters.csimpleReadCount;
+ ConnectionState state = regApiPtr.p->apiConnectstate;
+
+ regApiPtr.p->tcSendArray[Ttckeyrec] = TclientData;
+ regApiPtr.p->tcSendArray[Ttckeyrec + 1] = TcKeyConf::SimpleReadBit | Tnode;
+ regApiPtr.p->tckeyrec = Ttckeyrec + 2;
+
unlinkReadyTcCon(signal);
releaseTcCon();
@@ -3310,31 +3384,27 @@ void Dbtc::releaseSimpleRead(Signal* signal)
* No LQHKEYCONF in Simple/Dirty read
* Therefore decrese no LQHKEYCONF(REF) we are waiting for
*/
- ApiConnectRecord * const regApiPtr = apiConnectptr.p;
- UintR TsimpleReadCount = csimpleReadCount;
- UintR Tlqhkeyreqrec = regApiPtr->lqhkeyreqrec;
-
- csimpleReadCount = TsimpleReadCount + 1;
- regApiPtr->lqhkeyreqrec = Tlqhkeyreqrec - 1;
-
- /**
- * If start committing and no operation in lists
- * simply return
- */
- if (regApiPtr->apiConnectstate == CS_START_COMMITTING &&
- regApiPtr->firstTcConnect == RNIL) {
+ c_counters.csimpleReadCount = TsimpleReadCount + 1;
+ regApiPtr.p->lqhkeyreqrec = --Tlqhkeyreqrec;
+
+ if(Tlqhkeyreqrec == 0)
+ {
+ /**
+ * Special case of lqhKeyConf_checkTransactionState:
+ * - commit with zero operations: handle only for simple read
+ */
+ sendtckeyconf(signal, state == CS_START_COMMITTING);
+ regApiPtr.p->apiConnectstate =
+ (state == CS_START_COMMITTING ? CS_CONNECTED : state);
+ setApiConTimer(regApiPtr.i, 0, __LINE__);
- jam();
- setApiConTimer(apiConnectptr.i, 0, __LINE__);
- regApiPtr->apiConnectstate = CS_CONNECTED;
return;
- }//if
-
+ }
+
/**
- * Else Emulate LQHKEYCONF
+ * Emulate LQHKEYCONF
*/
- lqhKeyConf_checkTransactionState(signal, regApiPtr);
-
+ lqhKeyConf_checkTransactionState(signal, regApiPtr.p);
}//Dbtc::releaseSimpleRead()
/* ------------------------------------------------------------------------- */
@@ -3372,7 +3442,7 @@ void Dbtc::releaseTcCon()
{
TcConnectRecord * const regTcPtr = tcConnectptr.p;
UintR TfirstfreeTcConnect = cfirstfreeTcConnect;
- UintR TconcurrentOp = cconcurrentOp;
+ UintR TconcurrentOp = c_counters.cconcurrentOp;
UintR TtcConnectptrIndex = tcConnectptr.i;
regTcPtr->tcConnectstate = OS_CONNECTED;
@@ -3381,7 +3451,7 @@ void Dbtc::releaseTcCon()
regTcPtr->isIndexOp = false;
regTcPtr->indexOp = RNIL;
cfirstfreeTcConnect = TtcConnectptrIndex;
- cconcurrentOp = TconcurrentOp - 1;
+ c_counters.cconcurrentOp = TconcurrentOp - 1;
}//Dbtc::releaseTcCon()
void Dbtc::execPACKED_SIGNAL(Signal* signal)
@@ -3770,6 +3840,15 @@ Dbtc::lqhKeyConf_checkTransactionState(Signal * signal,
void Dbtc::sendtckeyconf(Signal* signal, UintR TcommitFlag)
{
+ if(ERROR_INSERTED(8049)){
+ CLEAR_ERROR_INSERT_VALUE;
+ signal->theData[0] = TcContinueB::DelayTCKEYCONF;
+ signal->theData[1] = apiConnectptr.i;
+ signal->theData[2] = TcommitFlag;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 3000, 3);
+ return;
+ }
+
HostRecordPtr localHostptr;
ApiConnectRecord * const regApiPtr = apiConnectptr.p;
const UintR TopWords = (UintR)regApiPtr->tckeyrec;
@@ -4465,7 +4544,7 @@ void Dbtc::sendApiCommit(Signal* signal)
return;
}//if
UintR TapiConnectFilesize = capiConnectFilesize;
- UintR TcommitCount = ccommitCount;
+ UintR TcommitCount = c_counters.ccommitCount;
UintR TapiIndex = apiConnectptr.i;
UintR TnewApiIndex = regApiPtr->apiCopyRecord;
UintR TapiFailState = regApiPtr->apiFailState;
@@ -4473,7 +4552,7 @@ void Dbtc::sendApiCommit(Signal* signal)
tmpApiConnectptr.p = apiConnectptr.p;
tmpApiConnectptr.i = TapiIndex;
- ccommitCount = TcommitCount + 1;
+ c_counters.ccommitCount = TcommitCount + 1;
apiConnectptr.i = TnewApiIndex;
ptrCheckGuard(apiConnectptr, TapiConnectFilesize, localApiConnectRecord);
copyApi(signal);
@@ -5028,27 +5107,15 @@ void Dbtc::execLQHKEYREF(Signal* signal)
*---------------------------------------------------------------------*/
regApiPtr->lqhkeyreqrec--;
if (regApiPtr->lqhkeyconfrec == regApiPtr->lqhkeyreqrec) {
- if ((regApiPtr->lqhkeyconfrec == 0) &&
- (regApiPtr->apiConnectstate == CS_START_COMMITTING)) {
-
- if(abort == TcKeyReq::IgnoreError){
+ if (regApiPtr->apiConnectstate == CS_START_COMMITTING) {
+ if(regApiPtr->lqhkeyconfrec) {
jam();
- regApiPtr->returnsignal = RS_NO_RETURN;
- abort010Lab(signal);
- return;
+ diverify010Lab(signal);
+ } else {
+ jam();
+ sendtckeyconf(signal, 1);
+ regApiPtr->apiConnectstate = CS_CONNECTED;
}
-
- /*----------------------------------------------------------------
- * Not a single operation was successful.
- * This we report as an aborted transaction
- * to avoid performing a commit of zero operations.
- *----------------------------------------------------------------*/
- TCKEY_abort(signal, 54);
- return;
- }//if
- if (regApiPtr->apiConnectstate == CS_START_COMMITTING) {
- jam();
- diverify010Lab(signal);
return;
} else if (regApiPtr->tckeyrec > 0 || regApiPtr->m_exec_flag) {
jam();
@@ -6049,6 +6116,7 @@ void Dbtc::timeOutFoundLab(Signal* signal, Uint32 TapiConPtr)
<< " - place: " << c_apiConTimer_line[apiConnectptr.i]);
switch (apiConnectptr.p->apiConnectstate) {
case CS_STARTED:
+ ndbrequire(c_apiConTimer_line[apiConnectptr.i] != 3615);
if(apiConnectptr.p->lqhkeyreqrec == apiConnectptr.p->lqhkeyconfrec){
jam();
/*
@@ -6301,8 +6369,8 @@ void Dbtc::sendAbortedAfterTimeout(Signal* signal, int Tcheck)
snprintf(buf, sizeof(buf), "TC %d: %d ops:",
__LINE__, apiConnectptr.i);
for(Uint32 i = 0; i<TloopCount; i++){
- snprintf(buf2, sizeof(buf2), "%s %d", buf, tmp[i]);
- snprintf(buf, sizeof(buf), buf2);
+ BaseString::snprintf(buf2, sizeof(buf2), "%s %d", buf, tmp[i]);
+ BaseString::snprintf(buf, sizeof(buf), buf2);
}
warningEvent(buf);
ndbout_c(buf);
@@ -6598,10 +6666,8 @@ void Dbtc::timeOutFoundFragLab(Signal* signal, UintR TscanConPtr)
*/
ptr.p->scanFragState = ScanFragRec::COMPLETED;
ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
- ScanFragList comp(c_scan_frag_pool, scanptr.p->m_completed_scan_frags);
- run.remove(ptr);
- comp.add(ptr);
+ run.release(ptr);
ptr.p->stopFragTimer();
}
@@ -6877,7 +6943,6 @@ void Dbtc::checkScanActiveInFailedLqh(Signal* signal,
jam();
ScanFragRecPtr ptr;
ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
- ScanFragList comp(c_scan_frag_pool, scanptr.p->m_completed_scan_frags);
for(run.first(ptr); !ptr.isNull(); ){
jam();
@@ -6887,8 +6952,7 @@ void Dbtc::checkScanActiveInFailedLqh(Signal* signal,
refToNode(curr.p->lqhBlockref) == failedNodeId){
jam();
- run.remove(curr);
- comp.add(curr);
+ run.release(curr);
curr.p->scanFragState = ScanFragRec::COMPLETED;
curr.p->stopFragTimer();
found = true;
@@ -8413,11 +8477,12 @@ void Dbtc::systemErrorLab(Signal* signal)
void Dbtc::execSCAN_TABREQ(Signal* signal)
{
const ScanTabReq * const scanTabReq = (ScanTabReq *)&signal->theData[0];
- const UintR reqinfo = scanTabReq->requestInfo;
- const Uint32 aiLength = scanTabReq->attrLen;
+ const Uint32 reqinfo = scanTabReq->requestInfo;
+ const Uint32 aiLength = (scanTabReq->attrLenKeyLen & 0xFFFF);
+ const Uint32 keyLen = scanTabReq->attrLenKeyLen >> 16;
const Uint32 schemaVersion = scanTabReq->tableSchemaVersion;
- const UintR transid1 = scanTabReq->transId1;
- const UintR transid2 = scanTabReq->transId2;
+ const Uint32 transid1 = scanTabReq->transId1;
+ const Uint32 transid2 = scanTabReq->transId2;
const Uint32 tmpXX = scanTabReq->buddyConPtr;
const Uint32 buddyPtr = (tmpXX == 0xFFFFFFFF ? RNIL : tmpXX);
Uint32 currSavePointId = 0;
@@ -8428,17 +8493,15 @@ void Dbtc::execSCAN_TABREQ(Signal* signal)
Uint32 errCode;
ScanRecordPtr scanptr;
- if(noOprecPerFrag == 0){
- jam();
- scanParallel = (scanConcurrency + 15) / 16;
- noOprecPerFrag = (scanConcurrency >= 16 ? 16 : scanConcurrency & 15);
- }
+ jamEntry();
+
+ SegmentedSectionPtr api_op_ptr;
+ signal->getSection(api_op_ptr, 0);
+ copy(&cdata[0], api_op_ptr);
+ releaseSections(signal);
- jamEntry();
apiConnectptr.i = scanTabReq->apiConnectPtr;
tabptr.i = scanTabReq->tableId;
- for(int i=0; i<16; i++)
- cdata[i] = scanTabReq->apiOperationPtr[i];
if (apiConnectptr.i >= capiConnectFilesize ||
tabptr.i >= ctabrecFilesize) {
@@ -8448,13 +8511,16 @@ void Dbtc::execSCAN_TABREQ(Signal* signal)
}//if
ptrAss(apiConnectptr, apiConnectRecord);
ApiConnectRecord * transP = apiConnectptr.p;
-
if (transP->apiConnectstate != CS_CONNECTED) {
jam();
// could be left over from TCKEYREQ rollback
if (transP->apiConnectstate == CS_ABORTING &&
transP->abortState == AS_IDLE) {
jam();
+ } else if(transP->apiConnectstate == CS_STARTED &&
+ transP->firstTcConnect == RNIL){
+ jam();
+ // left over from simple/dirty read
} else {
jam();
errCode = ZSTATE_ERROR;
@@ -8462,50 +8528,15 @@ void Dbtc::execSCAN_TABREQ(Signal* signal)
}
}
ptrAss(tabptr, tableRecord);
-
- if (aiLength == 0) {
- jam()
- errCode = ZSCAN_AI_LEN_ERROR;
- goto SCAN_TAB_error;
- }//if
- if (!tabptr.p->checkTable(schemaVersion)){
- jam();
- goto SCAN_TAB_schema_error;
- }//if
- /*****************************************************************
- * THE CONCURRENCY LEVEL SPECIFIED BY THE APPLICATION. IT MUST BE
- * BETWEEN 1 AND 240. IF IT IS 16 OR GREATER IT MUST BE A MULTIPLE
- * OF 16. CONCURRENCY LEVELS UPTO 16 ONLY SCAN ONE FRAGMENT AT A
- * TIME. IF WE SPECIFY 32 IT WILL SCAN TWO FRAGMENTS AT A TIME AND
- * SO FORTH. MAXIMUM 15 PARALLEL SCANS ARE ALLOWED
- ******************************************************************/
- if (scanConcurrency == 0) {
- jam();
- errCode = ZNO_CONCURRENCY_ERROR;
- goto SCAN_TAB_error;
- }//if
-
- /**********************************************************
- * CALCULATE THE NUMBER OF SCAN_TABINFO SIGNALS THAT WILL
- * ARRIVE TO DEFINE THIS SCAN. THIS ALSO DEFINES THE NUMBER
- * OF PARALLEL SCANS AND IT ALSO DEFINES THE NUMBER OF SCAN
- * OPERATION POINTER RECORDS TO ALLOCATE.
- **********************************************************/
- if (cfirstfreeTcConnect == RNIL) {
- jam();
- errCode = ZNO_FREE_TC_CONNECTION;
- goto SCAN_TAB_error;
- }//if
-
- if (cfirstfreeScanrec == RNIL) {
- jam();
- errCode = ZNO_SCANREC_ERROR;
- goto SCAN_TAB_error;
- }//if
-
+ if ((aiLength == 0) ||
+ (!tabptr.p->checkTable(schemaVersion)) ||
+ (scanConcurrency == 0) ||
+ (cfirstfreeTcConnect == RNIL) ||
+ (cfirstfreeScanrec == RNIL)) {
+ goto SCAN_error_check;
+ }
if (buddyPtr != RNIL) {
jam();
-
ApiConnectRecordPtr buddyApiPtr;
buddyApiPtr.i = buddyPtr;
ptrCheckGuard(buddyApiPtr, capiConnectFilesize, apiConnectRecord);
@@ -8526,8 +8557,12 @@ void Dbtc::execSCAN_TABREQ(Signal* signal)
seizeTcConnect(signal);
tcConnectptr.p->apiConnect = apiConnectptr.i;
+ tcConnectptr.p->tcConnectstate = OS_WAIT_SCAN;
+ apiConnectptr.p->lastTcConnect = tcConnectptr.i;
seizeCacheRecord(signal);
+ cachePtr.p->keylen = keyLen;
+ cachePtr.p->save1 = 0;
scanptr = seizeScanrec(signal);
ndbrequire(transP->apiScanRec == RNIL);
@@ -8535,7 +8570,6 @@ void Dbtc::execSCAN_TABREQ(Signal* signal)
initScanrec(scanptr, scanTabReq, scanParallel, noOprecPerFrag);
- //initScanApirec(signal, buddyPtr, transid1, transid2);
transP->apiScanRec = scanptr.i;
transP->returncode = 0;
transP->transid[0] = transid1;
@@ -8561,10 +8595,32 @@ void Dbtc::execSCAN_TABREQ(Signal* signal)
scanptr.p->scanState = ScanRecord::WAIT_AI;
return;
- SCAN_TAB_schema_error:
+ SCAN_error_check:
+ if (aiLength == 0) {
+ jam()
+ errCode = ZSCAN_AI_LEN_ERROR;
+ goto SCAN_TAB_error;
+ }//if
+ if (!tabptr.p->checkTable(schemaVersion)){
+ jam();
+ errCode = tabptr.p->getErrorCode(schemaVersion);
+ goto SCAN_TAB_error;
+ }//if
+ if (scanConcurrency == 0) {
+ jam();
+ errCode = ZNO_CONCURRENCY_ERROR;
+ goto SCAN_TAB_error;
+ }//if
+ if (cfirstfreeTcConnect == RNIL) {
+ jam();
+ errCode = ZNO_FREE_TC_CONNECTION;
+ goto SCAN_TAB_error;
+ }//if
+ ndbrequire(cfirstfreeScanrec == RNIL);
jam();
- errCode = tabptr.p->getErrorCode(schemaVersion);
-
+ errCode = ZNO_SCANREC_ERROR;
+ goto SCAN_TAB_error;
+
SCAN_TAB_error:
jam();
ScanTabRef * ref = (ScanTabRef*)&signal->theData[0];
@@ -8575,39 +8631,39 @@ void Dbtc::execSCAN_TABREQ(Signal* signal)
ref->closeNeeded = 0;
sendSignal(transP->ndbapiBlockref, GSN_SCAN_TABREF,
signal, ScanTabRef::SignalLength, JBB);
-
return;
}//Dbtc::execSCAN_TABREQ()
-
-void Dbtc::initScanApirec(Signal* signal,
- Uint32 buddyPtr, UintR transid1, UintR transid2)
-{
-}//Dbtc::initScanApirec()
-
void Dbtc::initScanrec(ScanRecordPtr scanptr,
const ScanTabReq * scanTabReq,
UintR scanParallel,
UintR noOprecPerFrag)
{
- const UintR reqinfo = scanTabReq->requestInfo;
- ndbrequire(scanParallel < 16);
-
scanptr.p->scanTcrec = tcConnectptr.i;
scanptr.p->scanApiRec = apiConnectptr.i;
- scanptr.p->scanAiLength = scanTabReq->attrLen;
+ scanptr.p->scanAiLength = scanTabReq->attrLenKeyLen & 0xFFFF;
+ scanptr.p->scanKeyLen = scanTabReq->attrLenKeyLen >> 16;
scanptr.p->scanTableref = tabptr.i;
scanptr.p->scanSchemaVersion = scanTabReq->tableSchemaVersion;
scanptr.p->scanParallel = scanParallel;
scanptr.p->noOprecPerFrag = noOprecPerFrag;
- scanptr.p->scanLockMode = ScanTabReq::getLockMode(reqinfo);
- scanptr.p->scanLockHold = ScanTabReq::getHoldLockFlag(reqinfo);
- scanptr.p->readCommitted = ScanTabReq::getReadCommittedFlag(reqinfo);
- scanptr.p->rangeScan = ScanTabReq::getRangeScanFlag(reqinfo);
+ scanptr.p->first_batch_size= scanTabReq->first_batch_size;
+ scanptr.p->batch_byte_size= scanTabReq->batch_byte_size;
+
+ Uint32 tmp = 0;
+ const UintR ri = scanTabReq->requestInfo;
+ ScanFragReq::setLockMode(tmp, ScanTabReq::getLockMode(ri));
+ ScanFragReq::setHoldLockFlag(tmp, ScanTabReq::getHoldLockFlag(ri));
+ ScanFragReq::setKeyinfoFlag(tmp, ScanTabReq::getKeyinfoFlag(ri));
+ ScanFragReq::setReadCommittedFlag(tmp,ScanTabReq::getReadCommittedFlag(ri));
+ ScanFragReq::setRangeScanFlag(tmp, ScanTabReq::getRangeScanFlag(ri));
+ ScanFragReq::setAttrLen(tmp, scanTabReq->attrLenKeyLen & 0xFFFF);
+
+ scanptr.p->scanRequestInfo = tmp;
scanptr.p->scanStoredProcId = scanTabReq->storedProcId;
scanptr.p->scanState = ScanRecord::RUNNING;
scanptr.p->m_queued_count = 0;
-
+
ScanFragList list(c_scan_frag_pool,
scanptr.p->m_running_scan_frags);
for (Uint32 i = 0; i < scanParallel; i++) {
@@ -8619,6 +8675,10 @@ void Dbtc::initScanrec(ScanRecordPtr scanptr,
ptr.p->scanFragConcurrency = noOprecPerFrag;
ptr.p->m_apiPtr = cdata[i];
}//for
+
+ (* (ScanTabReq::getRangeScanFlag(ri) ?
+ &c_counters.c_range_scan_count :
+ &c_counters.c_scan_count))++;
}//Dbtc::initScanrec()
void Dbtc::scanTabRefLab(Signal* signal, Uint32 errCode)
@@ -8834,19 +8894,16 @@ void Dbtc::releaseScanResources(ScanRecordPtr scanPtr)
if (apiConnectptr.p->cachePtr != RNIL) {
cachePtr.i = apiConnectptr.p->cachePtr;
ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord);
+ releaseKeys();
releaseAttrinfo();
}//if
tcConnectptr.i = scanPtr.p->scanTcrec;
ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
releaseTcCon();
- ScanFragList x(c_scan_frag_pool,
- scanPtr.p->m_completed_scan_frags);
- x.release();
ndbrequire(scanPtr.p->m_running_scan_frags.isEmpty());
ndbrequire(scanPtr.p->m_queued_scan_frags.isEmpty());
ndbrequire(scanPtr.p->m_delivered_scan_frags.isEmpty());
-
ndbassert(scanPtr.p->scanApiRec == apiConnectptr.i);
ndbassert(apiConnectptr.p->apiScanRec == scanPtr.i);
@@ -8899,10 +8956,8 @@ void Dbtc::execDIGETPRIMCONF(Signal* signal)
if(tabPtr.p->checkTable(schemaVersion) == false){
jam();
ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
- ScanFragList comp(c_scan_frag_pool, scanptr.p->m_completed_scan_frags);
- run.remove(scanFragptr);
- comp.add(scanFragptr);
+ run.release(scanFragptr);
scanError(signal, scanptr, tabPtr.p->getErrorCode(schemaVersion));
return;
}
@@ -8920,10 +8975,8 @@ void Dbtc::execDIGETPRIMCONF(Signal* signal)
updateBuddyTimer(apiConnectptr);
{
ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
- ScanFragList comp(c_scan_frag_pool, scanptr.p->m_completed_scan_frags);
- run.remove(scanFragptr);
- comp.add(scanFragptr);
+ run.release(scanFragptr);
}
close_scan_req_send_conf(signal, scanptr);
return;
@@ -8976,10 +9029,8 @@ void Dbtc::execDIGETPRIMREF(Signal* signal)
ptrCheckGuard(scanptr, cscanrecFileSize, scanRecord);
ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
- ScanFragList comp(c_scan_frag_pool, scanptr.p->m_completed_scan_frags);
- run.remove(scanFragptr);
- comp.add(scanFragptr);
+ run.release(scanFragptr);
scanError(signal, scanptr, errCode);
}//Dbtc::execDIGETPRIMREF()
@@ -9024,10 +9075,8 @@ void Dbtc::execSCAN_FRAGREF(Signal* signal)
{
scanFragptr.p->scanFragState = ScanFragRec::COMPLETED;
ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
- ScanFragList comp(c_scan_frag_pool, scanptr.p->m_completed_scan_frags);
- run.remove(scanFragptr);
- comp.add(scanFragptr);
+ run.release(scanFragptr);
scanFragptr.p->stopFragTimer();
}
scanError(signal, scanptr, errCode);
@@ -9087,6 +9136,7 @@ void Dbtc::scanError(Signal* signal, ScanRecordPtr scanptr, Uint32 errorCode)
************************************************************/
void Dbtc::execSCAN_FRAGCONF(Signal* signal)
{
+ Uint32 transid1, transid2, total_len;
jamEntry();
const ScanFragConf * const conf = (ScanFragConf*)&signal->theData[0];
@@ -9102,8 +9152,9 @@ void Dbtc::execSCAN_FRAGCONF(Signal* signal)
apiConnectptr.i = scanptr.p->scanApiRec;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- Uint32 transid1 = apiConnectptr.p->transid[0] ^ conf->transId1;
- Uint32 transid2 = apiConnectptr.p->transid[1] ^ conf->transId2;
+ transid1 = apiConnectptr.p->transid[0] ^ conf->transId1;
+ transid2 = apiConnectptr.p->transid[1] ^ conf->transId2;
+ total_len= conf->total_len;
transid1 = transid1 | transid2;
if (transid1 != 0) {
jam();
@@ -9124,10 +9175,8 @@ void Dbtc::execSCAN_FRAGCONF(Signal* signal)
} else {
jam();
ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
- ScanFragList comp(c_scan_frag_pool, scanptr.p->m_completed_scan_frags);
- run.remove(scanFragptr);
- comp.add(scanFragptr);
+ run.release(scanFragptr);
scanFragptr.p->stopFragTimer();
scanFragptr.p->scanFragState = ScanFragRec::COMPLETED;
}
@@ -9153,15 +9202,13 @@ void Dbtc::execSCAN_FRAGCONF(Signal* signal)
sendSignal(cdihblockref, GSN_DIGETPRIMREQ, signal, 4, JBB);
return;
}
-
- Uint32 chksum = 0;
+ /*
Uint32 totalLen = 0;
for(Uint32 i = 0; i<noCompletedOps; i++){
Uint32 tmp = conf->opReturnDataLen[i];
- chksum += (tmp << i);
totalLen += tmp;
}
-
+ */
{
ScanFragList run(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
ScanFragList queued(c_scan_frag_pool, scanptr.p->m_queued_scan_frags);
@@ -9172,14 +9219,13 @@ void Dbtc::execSCAN_FRAGCONF(Signal* signal)
}
scanFragptr.p->m_ops = noCompletedOps;
- scanFragptr.p->m_chksum = chksum;
- scanFragptr.p->m_totalLen = totalLen;
+ scanFragptr.p->m_totalLen = total_len;
scanFragptr.p->scanFragState = ScanFragRec::QUEUED_FOR_DELIVERY;
scanFragptr.p->stopFragTimer();
if(scanptr.p->m_queued_count > /** Min */ 0){
jam();
- sendScanTabConf(signal, scanptr.p);
+ sendScanTabConf(signal, scanptr);
}
}//Dbtc::execSCAN_FRAGCONF()
@@ -9236,6 +9282,7 @@ void Dbtc::execSCAN_NEXTREQ(Signal* signal)
* We will send a SCAN_TABREF to indicate a time-out occurred.
*********************************************************************/
DEBUG("scanTabRefLab: ZSCANTIME_OUT_ERROR2");
+ ndbout_c("apiConnectptr(%d) -> abort", apiConnectptr.i);
ndbrequire(false); //B2 indication of strange things going on
scanTabRefLab(signal, ZSCANTIME_OUT_ERROR2);
return;
@@ -9287,7 +9334,8 @@ void Dbtc::execSCAN_NEXTREQ(Signal* signal)
nextReq->closeFlag = ZFALSE;
nextReq->transId1 = apiConnectptr.p->transid[0];
nextReq->transId2 = apiConnectptr.p->transid[1];
-
+ nextReq->batch_size_bytes= scanP->batch_byte_size;
+
ScanFragList running(c_scan_frag_pool, scanP->m_running_scan_frags);
ScanFragList delivered(c_scan_frag_pool, scanP->m_delivered_scan_frags);
for(Uint32 i = 0 ; i<len; i++){
@@ -9301,6 +9349,8 @@ void Dbtc::execSCAN_NEXTREQ(Signal* signal)
scanFragptr.p->m_ops = 0;
nextReq->senderData = scanFragptr.i;
+ nextReq->batch_size_rows= scanFragptr.p->scanFragConcurrency;
+
sendSignal(scanFragptr.p->lqhBlockref, GSN_SCAN_NEXTREQ, signal,
ScanFragNextReq::SignalLength, JBB);
delivered.remove(scanFragptr);
@@ -9336,7 +9386,6 @@ Dbtc::close_scan_req(Signal* signal, ScanRecordPtr scanPtr, bool req_received){
{
ScanFragRecPtr ptr;
ScanFragList running(c_scan_frag_pool, scanP->m_running_scan_frags);
- ScanFragList completed(c_scan_frag_pool, scanP->m_completed_scan_frags);
ScanFragList delivered(c_scan_frag_pool, scanP->m_delivered_scan_frags);
ScanFragList queued(c_scan_frag_pool, scanP->m_queued_scan_frags);
@@ -9378,7 +9427,7 @@ Dbtc::close_scan_req(Signal* signal, ScanRecordPtr scanPtr, bool req_received){
} else {
jam();
- completed.add(curr);
+ c_scan_frag_pool.release(curr);
curr.p->scanFragState = ScanFragRec::COMPLETED;
curr.p->stopFragTimer();
}
@@ -9406,7 +9455,7 @@ Dbtc::close_scan_req(Signal* signal, ScanRecordPtr scanPtr, bool req_received){
ScanFragNextReq::SignalLength, JBB);
} else {
jam();
- completed.add(curr);
+ c_scan_frag_pool.release(curr);
curr.p->scanFragState = ScanFragRec::COMPLETED;
curr.p->stopFragTimer();
}
@@ -9485,45 +9534,41 @@ Dbtc::seizeScanrec(Signal* signal) {
void Dbtc::sendScanFragReq(Signal* signal,
ScanRecord* scanP,
- ScanFragRec* scanFragP){
- Uint32 requestInfo = 0;
- ScanFragReq::setConcurrency(requestInfo, scanFragP->scanFragConcurrency);
- ScanFragReq::setLockMode(requestInfo, scanP->scanLockMode);
- ScanFragReq::setHoldLockFlag(requestInfo, scanP->scanLockHold);
- if(scanP->scanLockMode == 1){ // Not read -> keyinfo
- jam();
- ScanFragReq::setKeyinfoFlag(requestInfo, 1);
- }
- ScanFragReq::setReadCommittedFlag(requestInfo, scanP->readCommitted);
- ScanFragReq::setRangeScanFlag(requestInfo, scanP->rangeScan);
- ScanFragReq::setAttrLen(requestInfo, scanP->scanAiLength);
+ ScanFragRec* scanFragP)
+{
+ ScanFragReq * const req = (ScanFragReq *)&signal->theData[0];
+ Uint32 requestInfo = scanP->scanRequestInfo;
ScanFragReq::setScanPrio(requestInfo, 1);
apiConnectptr.i = scanP->scanApiRec;
+ req->tableId = scanP->scanTableref;
+ req->schemaVersion = scanP->scanSchemaVersion;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
- ScanFragReq * const req = (ScanFragReq *)&signal->theData[0];
req->senderData = scanFragptr.i;
- req->resultRef = apiConnectptr.p->ndbapiBlockref;
req->requestInfo = requestInfo;
+ req->fragmentNoKeyLen = scanFragP->scanFragId | (scanP->scanKeyLen << 16);
+ req->resultRef = apiConnectptr.p->ndbapiBlockref;
req->savePointId = apiConnectptr.p->currSavePointId;
- req->tableId = scanP->scanTableref;
- req->fragmentNo = scanFragP->scanFragId;
- req->schemaVersion = scanP->scanSchemaVersion;
req->transId1 = apiConnectptr.p->transid[0];
req->transId2 = apiConnectptr.p->transid[1];
- for(int i = 0; i<16; i++){
- req->clientOpPtr[i] = scanFragP->m_apiPtr;
+ req->clientOpPtr = scanFragP->m_apiPtr;
+ req->batch_size_rows= scanFragP->scanFragConcurrency;
+ req->batch_size_bytes= scanP->batch_byte_size;
+ sendSignal(scanFragP->lqhBlockref, GSN_SCAN_FRAGREQ, signal,
+ ScanFragReq::SignalLength, JBB);
+ if(scanP->scanKeyLen > 0)
+ {
+ tcConnectptr.i = scanFragptr.i;
+ packKeyData000Lab(signal, scanFragP->lqhBlockref, scanP->scanKeyLen);
}
- sendSignal(scanFragP->lqhBlockref, GSN_SCAN_FRAGREQ, signal, 25, JBB);
updateBuddyTimer(apiConnectptr);
scanFragP->startFragTimer(ctcTimer);
-
}//Dbtc::sendScanFragReq()
-void Dbtc::sendScanTabConf(Signal* signal, ScanRecord * scanP) {
+void Dbtc::sendScanTabConf(Signal* signal, ScanRecordPtr scanPtr) {
jam();
Uint32* ops = signal->getDataPtrSend()+4;
- Uint32 op_count = scanP->m_queued_count;
+ Uint32 op_count = scanPtr.p->m_queued_count;
if(4 + 3 * op_count > 25){
jam();
ops += 21;
@@ -9535,30 +9580,37 @@ void Dbtc::sendScanTabConf(Signal* signal, ScanRecord * scanP) {
conf->transId1 = apiConnectptr.p->transid[0];
conf->transId2 = apiConnectptr.p->transid[1];
ScanFragRecPtr ptr;
- ScanFragList queued(c_scan_frag_pool, scanP->m_queued_scan_frags);
- ScanFragList completed(c_scan_frag_pool, scanP->m_completed_scan_frags);
- ScanFragList delivered(c_scan_frag_pool, scanP->m_delivered_scan_frags);
- for(queued.first(ptr); !ptr.isNull(); ){
- ndbrequire(ptr.p->scanFragState == ScanFragRec::QUEUED_FOR_DELIVERY);
- ScanFragRecPtr curr = ptr; // Remove while iterating...
- queued.next(ptr);
-
- * ops++ = curr.p->m_apiPtr;
- * ops++ = curr.i;
- * ops++ = (curr.p->m_totalLen << 5) + curr.p->m_ops;
-
- queued.remove(curr);
- if(curr.p->m_ops > 0){
- delivered.add(curr);
- curr.p->scanFragState = ScanFragRec::DELIVERED;
- curr.p->stopFragTimer();
- } else {
- (* --ops) = ScanTabConf::EndOfData; ops++;
- completed.add(curr);
- curr.p->scanFragState = ScanFragRec::COMPLETED;
- curr.p->stopFragTimer();
+ {
+ ScanFragList queued(c_scan_frag_pool, scanPtr.p->m_queued_scan_frags);
+ ScanFragList delivered(c_scan_frag_pool,scanPtr.p->m_delivered_scan_frags);
+ for(queued.first(ptr); !ptr.isNull(); ){
+ ndbrequire(ptr.p->scanFragState == ScanFragRec::QUEUED_FOR_DELIVERY);
+ ScanFragRecPtr curr = ptr; // Remove while iterating...
+ queued.next(ptr);
+
+ * ops++ = curr.p->m_apiPtr;
+ * ops++ = curr.i;
+ * ops++ = (curr.p->m_totalLen << 10) + curr.p->m_ops;
+
+ queued.remove(curr);
+ if(curr.p->m_ops > 0){
+ delivered.add(curr);
+ curr.p->scanFragState = ScanFragRec::DELIVERED;
+ curr.p->stopFragTimer();
+ } else {
+ (* --ops) = ScanTabConf::EndOfData; ops++;
+ c_scan_frag_pool.release(curr);
+ curr.p->scanFragState = ScanFragRec::COMPLETED;
+ curr.p->stopFragTimer();
+ }
}
}
+
+ if(scanPtr.p->m_delivered_scan_frags.isEmpty() &&
+ scanPtr.p->m_running_scan_frags.isEmpty()){
+ conf->requestInfo = op_count | ScanTabConf::EndOfData;
+ releaseScanResources(scanPtr);
+ }
if(4 + 3 * op_count > 25){
jam();
@@ -9572,7 +9624,7 @@ void Dbtc::sendScanTabConf(Signal* signal, ScanRecord * scanP) {
sendSignal(apiConnectptr.p->ndbapiBlockref, GSN_SCAN_TABCONF, signal,
ScanTabConf::SignalLength + 3 * op_count, JBB);
}
- scanP->m_queued_count = 0;
+ scanPtr.p->m_queued_count = 0;
}//Dbtc::sendScanTabConf()
@@ -9618,6 +9670,7 @@ void Dbtc::initApiConnect(Signal* signal)
apiConnectptr.p->ndbapiBlockref = 0xFFFFFFFF; // Invalid ref
apiConnectptr.p->commitAckMarker = RNIL;
apiConnectptr.p->firstTcConnect = RNIL;
+ apiConnectptr.p->lastTcConnect = RNIL;
apiConnectptr.p->triggerPending = false;
apiConnectptr.p->isIndexOp = false;
apiConnectptr.p->accumulatingIndexOp = RNIL;
@@ -9644,6 +9697,7 @@ void Dbtc::initApiConnect(Signal* signal)
apiConnectptr.p->ndbapiBlockref = 0xFFFFFFFF; // Invalid ref
apiConnectptr.p->commitAckMarker = RNIL;
apiConnectptr.p->firstTcConnect = RNIL;
+ apiConnectptr.p->lastTcConnect = RNIL;
apiConnectptr.p->triggerPending = false;
apiConnectptr.p->isIndexOp = false;
apiConnectptr.p->accumulatingIndexOp = RNIL;
@@ -9670,6 +9724,7 @@ void Dbtc::initApiConnect(Signal* signal)
apiConnectptr.p->ndbapiBlockref = 0xFFFFFFFF; // Invalid ref
apiConnectptr.p->commitAckMarker = RNIL;
apiConnectptr.p->firstTcConnect = RNIL;
+ apiConnectptr.p->lastTcConnect = RNIL;
apiConnectptr.p->triggerPending = false;
apiConnectptr.p->isIndexOp = false;
apiConnectptr.p->accumulatingIndexOp = RNIL;
@@ -9902,7 +9957,7 @@ void Dbtc::initialiseTcConnect(Signal* signal)
ptrAss(tcConnectptr, tcConnectRecord);
tcConnectptr.p->nextTcConnect = RNIL;
cfirstfreeTcConnect = titcTmp;
- cconcurrentOp = 0;
+ c_counters.cconcurrentOp = 0;
}//Dbtc::initialiseTcConnect()
/* ------------------------------------------------------------------------- */
@@ -9973,7 +10028,7 @@ void Dbtc::releaseAbortResources(Signal* signal)
{
TcConnectRecordPtr rarTcConnectptr;
- cabortCount++;
+ c_counters.cabortCount++;
if (apiConnectptr.p->cachePtr != RNIL) {
cachePtr.i = apiConnectptr.p->cachePtr;
ptrCheckGuard(cachePtr, ccacheFilesize, cacheRecord);
@@ -10147,7 +10202,7 @@ void Dbtc::seizeTcConnect(Signal* signal)
tcConnectptr.i = cfirstfreeTcConnect;
ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
cfirstfreeTcConnect = tcConnectptr.p->nextTcConnect;
- cconcurrentOp++;
+ c_counters.cconcurrentOp++;
tcConnectptr.p->isIndexOp = false;
}//Dbtc::seizeTcConnect()
@@ -10454,9 +10509,6 @@ Dbtc::execDUMP_STATE_ORD(Signal* signal)
sp.p->scanSchemaVersion,
sp.p->scanTableref,
sp.p->scanStoredProcId);
- infoEvent(" lhold=%d, lmode=%d",
- sp.p->scanLockHold,
- sp.p->scanLockMode);
infoEvent(" apiRec=%d, next=%d",
sp.p->scanApiRec, sp.p->nextScan);
@@ -10474,7 +10526,6 @@ Dbtc::execDUMP_STATE_ORD(Signal* signal)
DUMP_SFR(sp.p->m_running_scan_frags);
DUMP_SFR(sp.p->m_queued_scan_frags);
DUMP_SFR(sp.p->m_delivered_scan_frags);
- DUMP_SFR(sp.p->m_completed_scan_frags);
// Request dump of ApiConnectRecord
dumpState->args[0] = DumpStateOrd::TcDumpOneApiConnectRec;
@@ -10559,6 +10610,25 @@ Dbtc::execDUMP_STATE_ORD(Signal* signal)
set_appl_timeout_value(signal->theData[1]);
}
}
+
+ if (dumpState->args[0] == DumpStateOrd::StartTcTimer){
+ c_counters.c_trans_status = TransCounters::Started;
+ c_counters.reset();
+ }
+
+ if (dumpState->args[0] == DumpStateOrd::StopTcTimer){
+ c_counters.c_trans_status = TransCounters::Off;
+ Uint32 len = c_counters.report(signal);
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, len, JBB);
+ c_counters.reset();
+ }
+
+ if (dumpState->args[0] == DumpStateOrd::StartPeriodicTcTimer){
+ c_counters.c_trans_status = TransCounters::Timer;
+ c_counters.reset();
+ signal->theData[0] = TcContinueB::ZTRANS_EVENT_REP;
+ sendSignalWithDelay(cownref, GSN_CONTINUEB, signal, 5000, 1);
+ }
}//Dbtc::execDUMP_STATE_ORD()
void Dbtc::execSET_VAR_REQ(Signal* signal)
@@ -11009,9 +11079,11 @@ void Dbtc::execTCINDXREQ(Signal* signal)
// Seize index operation
TcIndexOperationPtr indexOpPtr;
if ((startFlag == 1) &&
- ((regApiPtr->apiConnectstate == CS_CONNECTED) ||
- ((regApiPtr->apiConnectstate == CS_ABORTING) &&
- (regApiPtr->abortState == AS_IDLE)))) {
+ (regApiPtr->apiConnectstate == CS_CONNECTED ||
+ (regApiPtr->apiConnectstate == CS_STARTED &&
+ regApiPtr->firstTcConnect == RNIL)) ||
+ (regApiPtr->apiConnectstate == CS_ABORTING &&
+ regApiPtr->abortState == AS_IDLE)) {
jam();
// This is a newly started transaction, clean-up
releaseAllSeizedIndexOperations(regApiPtr);
@@ -11033,7 +11105,7 @@ void Dbtc::execTCINDXREQ(Signal* signal)
indexOp->indexOpId = indexOpPtr.i;
// Save original signal
- *indexOp->tcIndxReq = *tcIndxReq;
+ indexOp->tcIndxReq = *tcIndxReq;
indexOp->connectionIndex = TapiIndex;
regApiPtr->accumulatingIndexOp = indexOp->indexOpId;
@@ -11342,7 +11414,7 @@ void Dbtc::execTCKEYCONF(Signal* signal)
// Should never happen, abort
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
- tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
+ tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
@@ -11361,7 +11433,7 @@ void Dbtc::execTCKEYCONF(Signal* signal)
// Double TCKEYCONF, should never happen, abort
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
- tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
+ tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
@@ -11381,8 +11453,9 @@ void Dbtc::execTCKEYCONF(Signal* signal)
Uint32 Ttcindxrec = regApiPtr->tcindxrec;
// Copy reply from TcKeyConf
+ ndbassert(regApiPtr->noIndexOp);
regApiPtr->noIndexOp--; // Decrease count
- regApiPtr->tcIndxSendArray[Ttcindxrec] = indexOp->tcIndxReq->senderData;
+ regApiPtr->tcIndxSendArray[Ttcindxrec] = indexOp->tcIndxReq.senderData;
regApiPtr->tcIndxSendArray[Ttcindxrec + 1] =
tcKeyConf->operations[0].attrInfoLen;
regApiPtr->tcindxrec = Ttcindxrec + 2;
@@ -11415,7 +11488,7 @@ void Dbtc::execTCKEYREF(Signal* signal)
}
const UintR TconnectIndex = indexOp->connectionIndex;
ApiConnectRecord * const regApiPtr = &apiConnectRecord[TconnectIndex];
- Uint32 tcKeyRequestInfo = indexOp->tcIndxReq->requestInfo;
+ Uint32 tcKeyRequestInfo = indexOp->tcIndxReq.requestInfo;
Uint32 commitFlg = TcKeyReq::getCommitFlag(tcKeyRequestInfo);
switch(indexOp->indexOpState) {
@@ -11439,15 +11512,22 @@ void Dbtc::execTCKEYREF(Signal* signal)
abortErrorLab(signal);
break;
}
+ /**
+ * Increase count as it will be decreased below...
+ * (and the code is written to handle failing lookup on "real" table
+ * not lookup on index table)
+ */
+ regApiPtr->noIndexOp++;
// else continue
}
case(IOS_INDEX_OPERATION): {
// Send TCINDXREF
jam();
- TcIndxReq * const tcIndxReq = indexOp->tcIndxReq;
+ TcIndxReq * const tcIndxReq = &indexOp->tcIndxReq;
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
+ ndbassert(regApiPtr->noIndexOp);
regApiPtr->noIndexOp--; // Decrease count
tcIndxRef->connectPtr = tcIndxReq->senderData;
tcIndxRef->transId[0] = tcKeyRef->transId[0];
@@ -11523,7 +11603,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
// Failed to allocate space for TransIdAI
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
- tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
+ tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4000;
@@ -11538,7 +11618,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
// Should never happen, abort
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
- tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
+ tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
@@ -11566,7 +11646,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
// Too many TRANSID_AI
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
- tcIndexRef->connectPtr = indexOp->tcIndxReq->senderData;
+ tcIndexRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
@@ -11591,7 +11671,7 @@ void Dbtc::execTRANSID_AI(Signal* signal)
jam();
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
- tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
+ tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
@@ -11611,7 +11691,7 @@ void Dbtc::execTCROLLBACKREP(Signal* signal)
TcIndexOperation* indexOp = c_theIndexOperations.getPtr(indexOpPtr.i);
indexOpPtr.p = indexOp;
tcRollbackRep = (TcRollbackRep *)signal->getDataPtrSend();
- tcRollbackRep->connectPtr = indexOp->tcIndxReq->senderData;
+ tcRollbackRep->connectPtr = indexOp->tcIndxReq.senderData;
sendSignal(apiConnectptr.p->ndbapiBlockref,
GSN_TCROLLBACKREP, signal, TcRollbackRep::SignalLength, JBB);
}
@@ -11628,23 +11708,23 @@ void Dbtc::readIndexTable(Signal* signal,
TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend();
Uint32 * dataPtr = &tcKeyReq->scanInfo;
Uint32 tcKeyLength = TcKeyReq::StaticLength;
- Uint32 tcKeyRequestInfo = indexOp->tcIndxReq->requestInfo;
+ Uint32 tcKeyRequestInfo = indexOp->tcIndxReq.requestInfo;
AttributeBuffer::DataBufferIterator keyIter;
Uint32 keyLength = TcKeyReq::getKeyLength(tcKeyRequestInfo);
TcIndexData* indexData;
- Uint32 transId1 = indexOp->tcIndxReq->transId1;
- Uint32 transId2 = indexOp->tcIndxReq->transId2;
+ Uint32 transId1 = indexOp->tcIndxReq.transId1;
+ Uint32 transId2 = indexOp->tcIndxReq.transId2;
const Operation_t opType =
(Operation_t)TcKeyReq::getOperationType(tcKeyRequestInfo);
// Find index table
- if ((indexData = c_theIndexes.getPtr(indexOp->tcIndxReq->indexId)) == NULL) {
+ if ((indexData = c_theIndexes.getPtr(indexOp->tcIndxReq.indexId)) == NULL) {
jam();
// Failed to find index record
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
- tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
+ tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4000;
@@ -11656,9 +11736,9 @@ void Dbtc::readIndexTable(Signal* signal,
tcKeyReq->transId2 = transId2;
tcKeyReq->tableId = indexData->indexId;
tcKeyLength += MIN(keyLength, keyBufSize);
- tcKeyReq->tableSchemaVersion = indexOp->tcIndxReq->indexSchemaVersion;
+ tcKeyReq->tableSchemaVersion = indexOp->tcIndxReq.indexSchemaVersion;
TcKeyReq::setOperationType(tcKeyRequestInfo,
- opType == ZREAD ? opType : ZREAD_EX);
+ opType == ZREAD ? ZREAD : ZREAD_EX);
TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, 1); // Allways send one AttrInfo
TcKeyReq::setExecutingTrigger(tcKeyRequestInfo, 0);
BlockReference originalReceiver = regApiPtr->ndbapiBlockref;
@@ -11683,6 +11763,9 @@ void Dbtc::readIndexTable(Signal* signal,
AttributeHeader::init(dataPtr, indexData->primaryKeyPos, 0);
tcKeyLength++;
tcKeyReq->requestInfo = tcKeyRequestInfo;
+
+ ndbassert(TcKeyReq::getDirtyFlag(tcKeyRequestInfo) == 0);
+ ndbassert(TcKeyReq::getSimpleFlag(tcKeyRequestInfo) == 0);
EXECUTE_DIRECT(DBTC, GSN_TCKEYREQ, signal, tcKeyLength);
/**
@@ -11705,7 +11788,7 @@ void Dbtc::readIndexTable(Signal* signal,
// Send KEYINFO sequence
KeyInfo * const keyInfo = (KeyInfo *)signal->getDataPtrSend();
- keyInfo->connectPtr = indexOp->tcIndxReq->apiConnectPtr;
+ keyInfo->connectPtr = indexOp->tcIndxReq.apiConnectPtr;
keyInfo->transId[0] = transId1;
keyInfo->transId[1] = transId2;
dataPtr = (Uint32 *) &keyInfo->keyData;
@@ -11745,7 +11828,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
Uint32 keyBufSize = 8; // Maximum for key in TCKEYREQ
Uint32 attrBufSize = 5;
Uint32 dataPos = 0;
- TcIndxReq * const tcIndxReq = indexOp->tcIndxReq;
+ TcIndxReq * const tcIndxReq = &indexOp->tcIndxReq;
TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend();
Uint32 * dataPtr = &tcKeyReq->scanInfo;
Uint32 tcKeyLength = TcKeyReq::StaticLength;
@@ -11761,7 +11844,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
// Failed to find index record
TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
- tcIndxRef->connectPtr = indexOp->tcIndxReq->senderData;
+ tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
@@ -11834,6 +11917,9 @@ void Dbtc::executeIndexOperation(Signal* signal,
TcKeyReq::setExecutingTrigger(tcKeyRequestInfo, 0);
tcKeyReq->requestInfo = tcKeyRequestInfo;
+ ndbassert(TcKeyReq::getDirtyFlag(tcKeyRequestInfo) == 0);
+ ndbassert(TcKeyReq::getSimpleFlag(tcKeyRequestInfo) == 0);
+
/**
* Decrease lqhkeyreqrec to compensate for addition
* during read of index table
@@ -11861,7 +11947,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
// Send KEYINFO sequence
KeyInfo * const keyInfo = (KeyInfo *)signal->getDataPtrSend();
- keyInfo->connectPtr = indexOp->tcIndxReq->apiConnectPtr;
+ keyInfo->connectPtr = indexOp->tcIndxReq.apiConnectPtr;
keyInfo->transId[0] = regApiPtr->transid[0];
keyInfo->transId[1] = regApiPtr->transid[1];
dataPtr = (Uint32 *) &keyInfo->keyData;
@@ -11897,7 +11983,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
AttrInfo * const attrInfo = (AttrInfo *)signal->getDataPtrSend();
Uint32 attrInfoPos = 0;
- attrInfo->connectPtr = indexOp->tcIndxReq->apiConnectPtr;
+ attrInfo->connectPtr = indexOp->tcIndxReq.apiConnectPtr;
attrInfo->transId[0] = regApiPtr->transid[0];
attrInfo->transId[1] = regApiPtr->transid[1];
dataPtr = (Uint32 *) &attrInfo->attrData;
diff --git a/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp b/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp
index 0f3881e9024..2c62adab3e5 100644
--- a/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp
+++ b/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp
@@ -22,26 +22,59 @@ class AttributeOffset {
private:
static void setOffset(Uint32 & desc, Uint32 offset);
+ static void setCharsetPos(Uint32 & desc, Uint32 offset);
static void setNullFlagPos(Uint32 & desc, Uint32 offset);
static Uint32 getOffset(const Uint32 &);
+ static bool getCharsetFlag(const Uint32 &);
+ static Uint32 getCharsetPos(const Uint32 &);
static Uint32 getNullFlagPos(const Uint32 &);
static Uint32 getNullFlagOffset(const Uint32 &);
static Uint32 getNullFlagBitOffset(const Uint32 &);
static bool isNULL(const Uint32 &, const Uint32 &);
};
-#define AO_ATTRIBUTE_OFFSET_MASK (0xffff)
-#define AO_NULL_FLAG_POS_MASK (0x7ff)
-#define AO_NULL_FLAG_POS_SHIFT (21)
-#define AO_NULL_FLAG_WORD_MASK (31)
-#define AO_NULL_FLAG_OFFSET_SHIFT (5)
+/**
+ * Allow for 4096 attributes, all nullable, and for 128 different
+ * character sets.
+ *
+ * a = Attribute offset - 11 bits 0-10 ( addr word in 8 kb )
+ * c = Has charset flag 1 bits 11-11
+ * s = Charset pointer position - 7 bits 12-18 ( in table descriptor )
+ * f = Null flag offset in word - 5 bits 20-24 ( address 32 bits )
+ * w = Null word offset - 7 bits 25-32 ( f+w addr 4096 attrs )
+ *
+ * 1111111111222222222233
+ * 01234567890123456789012345678901
+ * aaaaaaaaaaacsssssss fffffwwwwwww
+ */
+
+#define AO_ATTRIBUTE_OFFSET_SHIFT 0
+#define AO_ATTRIBUTE_OFFSET_MASK 0x7ff
+
+#define AO_CHARSET_FLAG_SHIFT 11
+#define AO_CHARSET_POS_SHIFT 12
+#define AO_CHARSET_POS_MASK 127
+
+#define AO_NULL_FLAG_POS_MASK 0xfff // f+w
+#define AO_NULL_FLAG_POS_SHIFT 20
+
+#define AO_NULL_FLAG_WORD_MASK 31 // f
+#define AO_NULL_FLAG_OFFSET_SHIFT 5
inline
void
AttributeOffset::setOffset(Uint32 & desc, Uint32 offset){
ASSERT_MAX(offset, AO_ATTRIBUTE_OFFSET_MASK, "AttributeOffset::setOffset");
- desc |= offset;
+ desc |= (offset << AO_ATTRIBUTE_OFFSET_SHIFT);
+}
+
+inline
+void
+AttributeOffset::setCharsetPos(Uint32 & desc, Uint32 offset) {
+ ASSERT_MAX(offset, AO_CHARSET_POS_MASK, "AttributeOffset::setCharsetPos");
+ desc |= (1 << AO_CHARSET_FLAG_SHIFT);
+ desc |= (offset << AO_CHARSET_POS_SHIFT);
}
inline
@@ -55,7 +88,21 @@ inline
Uint32
AttributeOffset::getOffset(const Uint32 & desc)
{
- return desc & AO_ATTRIBUTE_OFFSET_MASK;
+ return (desc >> AO_ATTRIBUTE_OFFSET_SHIFT) & AO_ATTRIBUTE_OFFSET_MASK;
+}
+
+inline
+bool
+AttributeOffset::getCharsetFlag(const Uint32 & desc)
+{
+ return (desc >> AO_CHARSET_FLAG_SHIFT) & 1;
+}
+
+inline
+Uint32
+AttributeOffset::getCharsetPos(const Uint32 & desc)
+{
+ return (desc >> AO_CHARSET_POS_SHIFT) & AO_CHARSET_POS_MASK;
}
inline
diff --git a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
index b792edf9333..55ad1d0910a 100644
--- a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
+++ b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
@@ -35,14 +35,6 @@
#define ZMIN_PAGE_LIMIT_TUPKEYREQ 5
#define ZTUP_VERSION_BITS 15
-typedef bool (Dbtup::* ReadFunction)(Uint32*,
- AttributeHeader*,
- Uint32,
- Uint32);
-typedef bool (Dbtup::* UpdateFunction)(Uint32*,
- Uint32,
- Uint32);
-
#ifdef DBTUP_C
//------------------------------------------------------------------
// Jam Handling:
@@ -85,21 +77,12 @@ typedef bool (Dbtup::* UpdateFunction)(Uint32*,
#define ZNO_OF_CONCURRENT_OPEN_OP 40 /* NUMBER OF CONCURRENT OPENS */
#define ZNO_OF_CONCURRENT_WRITE_OP 80 /* NUMBER OF CONCURRENT DISK WRITES*/
#define ZNO_OF_FRAGOPREC 20 /* NUMBER OF CONCURRENT ADD FRAG. */
-#define ZNO_OF_FRAGREC 64 /* SIZE OF FRAGMENT FILE. */
#define ZNO_OF_LCP_REC 10 /* NUMBER OF CONCURRENT CHECKPOINTS*/
-#define ZNO_OF_OPREC 116 /* SIZE OF OPERATION RECORD FILE */
#define TOT_PAGE_RECORD_SPACE 262144 /* SIZE OF PAGE RECORD FILE. */
#define ZNO_OF_PAGE TOT_PAGE_RECORD_SPACE/ZWORDS_ON_PAGE
#define ZNO_OF_PAGE_RANGE_REC 128 /* SIZE OF PAGE RANGE FILE */
#define ZNO_OF_PARALLELL_UNDO_FILES 16 /* NUMBER OF PARALLEL UNDO FILES */
#define ZNO_OF_RESTART_INFO_REC 10 /* MAXIMUM PARALLELL RESTART INFOS */
-#define ZNO_OF_TAB_DESCR_REC 484 /* SIZE OF TABLE DESCRIPTOR FILE */
-#define ZNO_OF_TABLEREC 16 /* SIZE OF TABLE RECORD FILE. */
-#ifdef NDB_OSE
-#define ZNO_OF_UNDO_PAGE 80 // Must be multiple of 8
-#else
-#define ZNO_OF_UNDO_PAGE 500 // Must be multiple of 8
-#endif
/* 24 SEGMENTS WITH 8 PAGES IN EACH*/
/* PLUS ONE UNDO BUFFER CACHE */
// Undo record identifiers are 32-bits with page index 13-bits
@@ -360,6 +343,14 @@ typedef bool (Dbtup::* UpdateFunction)(Uint32*,
class Dbtup: public SimulatedBlock {
public:
+
+ typedef bool (Dbtup::* ReadFunction)(Uint32*,
+ AttributeHeader*,
+ Uint32,
+ Uint32);
+ typedef bool (Dbtup::* UpdateFunction)(Uint32*,
+ Uint32,
+ Uint32);
// State values
enum State {
NOT_INITIALIZED = 0,
@@ -511,6 +502,7 @@ struct Fragoperrec {
Uint32 attributeCount;
Uint32 freeNullBit;
Uint32 noOfNewAttrCount;
+ Uint32 charsetIndex;
BlockReference lqhBlockrefFrag;
};
typedef Ptr<Fragoperrec> FragoperrecPtr;
@@ -523,6 +515,7 @@ struct Fragrecord {
Uint32 emptyPrimPage;
Uint32 firstusedOprec;
+ Uint32 lastusedOprec;
Uint32 thFreeFirst;
Uint32 thFreeCopyFirst;
@@ -622,7 +615,10 @@ struct Operationrec {
Uint32 tcOpIndex;
Uint32 gci;
Uint32 noFiredTriggers;
- Uint32 hashValue; // only used in TUP_COMMITREQ
+ union {
+ Uint32 hashValue; // only used in TUP_COMMITREQ
+ Uint32 lastRow;
+ };
Bitmask<MAXNROFATTRIBUTESINWORDS> changeMask;
};
typedef Ptr<Operationrec> OperationrecPtr;
@@ -791,6 +787,7 @@ struct Tablerec {
ReadFunction* readFunctionArray;
UpdateFunction* updateFunctionArray;
+ CHARSET_INFO** charsetArray;
Uint32 readKeyArray;
Uint32 tabDescriptor;
@@ -802,6 +799,7 @@ struct Tablerec {
Uint16 tupheadsize;
Uint16 noOfAttr;
Uint16 noOfKeyAttr;
+ Uint16 noOfCharsets;
Uint16 noOfNewAttr;
Uint16 noOfNullAttr;
Uint16 noOfAttributeGroups;
@@ -823,8 +821,8 @@ struct Tablerec {
// List of ordered indexes
ArrayList<TupTriggerData> tuxCustomTriggers;
- Uint32 fragid[2 * NO_OF_FRAG_PER_NODE];
- Uint32 fragrec[2 * NO_OF_FRAG_PER_NODE];
+ Uint32 fragid[2 * MAX_FRAG_PER_NODE];
+ Uint32 fragrec[2 * MAX_FRAG_PER_NODE];
struct {
Uint32 tabUserPtr;
@@ -1007,17 +1005,20 @@ public:
void tuxGetNode(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32*& node);
/*
- * TUX reads primary table attributes for index keys. Input is
- * attribute ids in AttributeHeader format. Output is pointers to
- * attribute data within tuple or 0 for NULL value.
+ * TUX reads primary table attributes for index keys. Tuple is
+ * specified by location of original tuple and version number. Input
+ * is attribute ids in AttributeHeader format. Output is attribute
+ * data with headers. Uses readAttributes with xfrm option set.
+ * Returns number of words or negative (-terrorCode) on error.
*/
- void tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, Uint32 numAttrs, const Uint32* attrIds, const Uint32** attrData);
+ int tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, const Uint32* attrIds, Uint32 numAttrs, Uint32* dataOut);
/*
* TUX reads primary key without headers into an array of words. Used
- * for md5 summing and when returning keyinfo.
+ * for md5 summing and when returning keyinfo. Returns number of
+ * words or negative (-terrorCode) on error.
*/
- void tuxReadKeys(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* pkSize, Uint32* pkData);
+ int tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* dataOut);
/*
* TUX checks if tuple is visible to scan.
@@ -1371,10 +1372,11 @@ private:
//------------------------------------------------------------------
int readAttributes(Page* const pagePtr,
Uint32 TupHeadOffset,
- Uint32* inBuffer,
+ const Uint32* inBuffer,
Uint32 inBufLen,
Uint32* outBuffer,
- Uint32 TmaxRead);
+ Uint32 TmaxRead,
+ bool xfrmFlag);
//------------------------------------------------------------------
//------------------------------------------------------------------
@@ -1620,9 +1622,24 @@ private:
Uint32 attrDescriptor,
Uint32 attrDes2);
+// *****************************************************************
+// Read char routines optionally (tXfrmFlag) apply strxfrm
+// *****************************************************************
+
+ bool readCharNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+ bool readCharNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
//------------------------------------------------------------------
//------------------------------------------------------------------
bool nullFlagCheck(Uint32 attrDes2);
+ Uint32 read_psuedo(Uint32 attrId, Uint32* outBuffer);
//------------------------------------------------------------------
//------------------------------------------------------------------
@@ -1637,7 +1654,7 @@ private:
//------------------------------------------------------------------
//------------------------------------------------------------------
- void initOpConnection(Operationrec* const regOperPtr);
+ void initOpConnection(Operationrec* regOperPtr, Fragrecord*);
//------------------------------------------------------------------
//------------------------------------------------------------------
@@ -1914,7 +1931,8 @@ private:
void updatePackedList(Signal* signal, Uint16 ahostIndex);
void setUpDescriptorReferences(Uint32 descriptorReference,
- Tablerec* const regTabPtr);
+ Tablerec* const regTabPtr,
+ const Uint32* offset);
void setUpKeyArray(Tablerec* const regTabPtr);
bool addfragtotab(Tablerec* const regTabPtr, Uint32 fragId, Uint32 fragIndex);
void deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId);
@@ -2103,7 +2121,8 @@ private:
//-----------------------------------------------------------------------------
// Public methods
- Uint32 allocTabDescr(Uint32 noOfAttributes, Uint32 noOfKeyAttr, Uint32 noOfAttributeGroups);
+ Uint32 getTabDescrOffsets(const Tablerec* regTabPtr, Uint32* offset);
+ Uint32 allocTabDescr(const Tablerec* regTabPtr, Uint32* offset);
void freeTabDescr(Uint32 retRef, Uint32 retNo);
Uint32 getTabDescrWord(Uint32 index);
void setTabDescrWord(Uint32 index, Uint32 word);
@@ -2222,6 +2241,7 @@ private:
Uint32 tMaxRead;
Uint32 tOutBufIndex;
Uint32* tTupleHeader;
+ bool tXfrmFlag;
// updateAttributes module
Uint32 tInBufIndex;
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp b/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
index 1ffc5f06754..e9043a8b52d 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
@@ -77,7 +77,7 @@ void Dbtup::execTUP_ABORTREQ(Signal* signal)
if (regOperPtr.p->optype == ZREAD) {
ljam();
freeAllAttrBuffers(regOperPtr.p);
- initOpConnection(regOperPtr.p);
+ initOpConnection(regOperPtr.p, 0);
return;
}//if
@@ -134,7 +134,7 @@ void Dbtup::execTUP_ABORTREQ(Signal* signal)
ndbrequire(regOperPtr.p->tupleState == ALREADY_ABORTED);
commitUpdate(signal, regOperPtr.p, regFragPtr.p, regTabPtr.p);
}//if
- initOpConnection(regOperPtr.p);
+ initOpConnection(regOperPtr.p, regFragPtr.p);
}//execTUP_ABORTREQ()
void Dbtup::setTupleStateOnPreviousOps(Uint32 prevOpIndex)
@@ -459,7 +459,7 @@ void Dbtup::tupkeyErrorLab(Signal* signal)
freeAllAttrBuffers(regOperPtr);
abortUpdate(signal, regOperPtr, fragptr.p, tabptr.p);
removeActiveOpList(regOperPtr);
- initOpConnection(regOperPtr);
+ initOpConnection(regOperPtr, fragptr.p);
regOperPtr->transstate = IDLE;
regOperPtr->tupleState = NO_OTHER_OP;
TupKeyRef * const tupKeyRef = (TupKeyRef *)signal->getDataPtrSend();
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp b/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp
index ea46ee94fdc..6527864135b 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupBuffer.cpp
@@ -134,6 +134,10 @@ void Dbtup::sendReadAttrinfo(Signal* signal,
const Operationrec * const regOperPtr)
{
const BlockReference recBlockref = regOperPtr->recBlockref;
+ const Uint32 sig0 = regOperPtr->tcOperationPtr;
+ const Uint32 sig1 = regOperPtr->transid1;
+ const Uint32 sig2 = regOperPtr->transid2;
+
const Uint32 block = refToBlock(recBlockref);
const Uint32 nodeId = refToNode(recBlockref);
@@ -141,6 +145,8 @@ void Dbtup::sendReadAttrinfo(Signal* signal,
const Uint32 type = getNodeInfo(nodeId).m_type;
bool is_api = (type >= NodeInfo::API && type <= NodeInfo::REP);
bool old_dest = (getNodeInfo(nodeId).m_version < MAKE_VERSION(3,5,0));
+ const Uint32 TpacketTA = hostBuffer[nodeId].noOfPacketsTA;
+ const Uint32 TpacketLen = hostBuffer[nodeId].packetLenTA;
if (ERROR_INSERTED(4006) && (nodeId != getOwnNodeId())){
// Use error insert to turn routing on
@@ -148,15 +154,11 @@ void Dbtup::sendReadAttrinfo(Signal* signal,
connectedToNode = false;
}
- Uint32 sig0 = regOperPtr->tcOperationPtr;
- Uint32 sig1 = regOperPtr->transid1;
- Uint32 sig2 = regOperPtr->transid2;
-
TransIdAI * transIdAI = (TransIdAI *)signal->getDataPtrSend();
transIdAI->connectPtr = sig0;
transIdAI->transId[0] = sig1;
transIdAI->transId[1] = sig2;
-
+
if (connectedToNode){
/**
* Own node -> execute direct
@@ -169,6 +171,22 @@ void Dbtup::sendReadAttrinfo(Signal* signal,
*/
if(ToutBufIndex >= 22 && is_api && !old_dest) {
ljam();
+ /**
+ * Flush buffer so that order is maintained
+ */
+ if (TpacketTA != 0) {
+ ljam();
+ BlockReference TBref = numberToRef(API_PACKED, nodeId);
+ MEMCOPY_NO_WORDS(&signal->theData[0],
+ &hostBuffer[nodeId].packetBufferTA[0],
+ TpacketLen);
+ sendSignal(TBref, GSN_TRANSID_AI, signal, TpacketLen, JBB);
+ hostBuffer[nodeId].noOfPacketsTA = 0;
+ hostBuffer[nodeId].packetLenTA = 0;
+ transIdAI->connectPtr = sig0;
+ transIdAI->transId[0] = sig1;
+ transIdAI->transId[1] = sig2;
+ }//if
LinearSectionPtr ptr[3];
ptr[0].p = &signal->theData[25];
ptr[0].sz = ToutBufIndex;
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp b/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
index fa3667b221e..cbd56c3281f 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
@@ -224,7 +224,8 @@ void Dbtup::removeActiveOpList(Operationrec* const regOperPtr)
/* ---------------------------------------------------------------- */
/* INITIALIZATION OF ONE CONNECTION RECORD TO PREPARE FOR NEXT OP. */
/* ---------------------------------------------------------------- */
-void Dbtup::initOpConnection(Operationrec* const regOperPtr)
+void Dbtup::initOpConnection(Operationrec* regOperPtr,
+ Fragrecord * fragPtrP)
{
Uint32 RinFragList = regOperPtr->inFragList;
regOperPtr->transstate = IDLE;
@@ -244,22 +245,18 @@ void Dbtup::initOpConnection(Operationrec* const regOperPtr)
regOperPtr->inFragList = ZFALSE;
if (tropPrevLinkPtr.i == RNIL) {
ljam();
- FragrecordPtr regFragPtr;
- regFragPtr.i = regOperPtr->fragmentPtr;
- ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord);
- regFragPtr.p->firstusedOprec = tropNextLinkPtr.i;
+ fragPtrP->firstusedOprec = tropNextLinkPtr.i;
} else {
ljam();
ptrCheckGuard(tropPrevLinkPtr, cnoOfOprec, operationrec);
tropPrevLinkPtr.p->nextOprecInList = tropNextLinkPtr.i;
}//if
if (tropNextLinkPtr.i == RNIL) {
- ;
+ fragPtrP->lastusedOprec = tropPrevLinkPtr.i;
} else {
- ljam();
ptrCheckGuard(tropNextLinkPtr, cnoOfOprec, operationrec);
tropNextLinkPtr.p->prevOprecInList = tropPrevLinkPtr.i;
- }//if
+ }
regOperPtr->prevOprecInList = RNIL;
regOperPtr->nextOprecInList = RNIL;
}//if
@@ -336,7 +333,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal)
commitUpdate(signal, regOperPtr.p, regFragPtr.p, regTabPtr.p);
removeActiveOpList(regOperPtr.p);
}//if
- initOpConnection(regOperPtr.p);
+ initOpConnection(regOperPtr.p, regFragPtr.p);
}//execTUP_COMMITREQ()
void
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp b/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp
index 768a61655b5..808cfd33696 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp
@@ -353,11 +353,11 @@ operator<<(NdbOut& out, const Dbtup::Operationrec& op)
out << " [interpretedExec " << dec << op.interpretedExec << "]";
out << " [opSimple " << dec << op.opSimple << "]";
// state
- out << " [tupleState " << dec << op.tupleState << "]";
- out << " [transstate " << dec << op.transstate << "]";
+ out << " [tupleState " << dec << (Uint32) op.tupleState << "]";
+ out << " [transstate " << dec << (Uint32) op.transstate << "]";
out << " [inFragList " << dec << op.inFragList << "]";
out << " [inActiveOpList " << dec << op.inActiveOpList << "]";
- out << " [undoLogged " << dec << op.undoLogged << "]";
+ out << " [undoLogged " << dec << (Uint32) op.undoLogged << "]";
// links
out << " [prevActiveOp " << hex << op.prevActiveOp << "]";
out << " [nextActiveOp " << hex << op.nextActiveOp << "]";
@@ -405,3 +405,7 @@ operator<<(NdbOut& out, const Dbtup::Th& th)
return out;
}
#endif
+
+#ifdef VM_TRACE
+template class Vector<Chunk>;
+#endif
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
index 0dc196d5f56..49de0d80bcd 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
@@ -319,24 +319,20 @@ void Dbtup::linkOpIntoFragList(OperationrecPtr regOperPtr,
Fragrecord* const regFragPtr)
{
OperationrecPtr sopTmpOperPtr;
-/* ----------------------------------------------------------------- */
-/* LINK THE OPERATION INTO A DOUBLY LINKED LIST ON THE FRAGMENT*/
-/* PUT IT FIRST IN THIS LIST SINCE IT DOESN'T MATTER WHERE IT */
-/* IS PUT. */
-/* ----------------------------------------------------------------- */
+ Uint32 tail = regFragPtr->lastusedOprec;
ndbrequire(regOperPtr.p->inFragList == ZFALSE);
regOperPtr.p->inFragList = ZTRUE;
- regOperPtr.p->prevOprecInList = RNIL;
- sopTmpOperPtr.i = regFragPtr->firstusedOprec;
- regFragPtr->firstusedOprec = regOperPtr.i;
- regOperPtr.p->nextOprecInList = sopTmpOperPtr.i;
- if (sopTmpOperPtr.i == RNIL) {
- return;
+ regOperPtr.p->prevOprecInList = tail;
+ regOperPtr.p->nextOprecInList = RNIL;
+ sopTmpOperPtr.i = tail;
+ if (tail == RNIL) {
+ regFragPtr->firstusedOprec = regOperPtr.i;
} else {
jam();
ptrCheckGuard(sopTmpOperPtr, cnoOfOprec, operationrec);
- sopTmpOperPtr.p->prevOprecInList = regOperPtr.i;
+ sopTmpOperPtr.p->nextOprecInList = regOperPtr.i;
}//if
+ regFragPtr->lastusedOprec = regOperPtr.i;
}//Dbtup::linkOpIntoFragList()
/*
@@ -708,7 +704,7 @@ void Dbtup::execTUPKEYREQ(Signal* signal)
regOperPtr->tupleState = TUPLE_BLOCKED;
regOperPtr->changeMask.clear();
-
+
if (Rstoredid != ZNIL) {
ndbrequire(initStoredOperationrec(regOperPtr, Rstoredid) == ZOK);
}//if
@@ -844,20 +840,18 @@ void Dbtup::sendTUPKEYCONF(Signal* signal,
TupKeyConf * const tupKeyConf = (TupKeyConf *)signal->getDataPtrSend();
Uint32 RuserPointer = regOperPtr->userpointer;
- Uint32 RfragPageId = regOperPtr->fragPageId;
- Uint32 RpageIndex = regOperPtr->pageIndex;
Uint32 RattroutbufLen = regOperPtr->attroutbufLen;
Uint32 RnoFiredTriggers = regOperPtr->noFiredTriggers;
BlockReference Ruserblockref = regOperPtr->userblockref;
+ Uint32 lastRow = regOperPtr->lastRow;
regOperPtr->transstate = STARTED;
regOperPtr->tupleState = NO_OTHER_OP;
tupKeyConf->userPtr = RuserPointer;
- tupKeyConf->pageId = RfragPageId;
- tupKeyConf->pageIndex = RpageIndex;
tupKeyConf->readLength = RattroutbufLen;
tupKeyConf->writeLength = TlogSize;
tupKeyConf->noFiredTriggers = RnoFiredTriggers;
+ tupKeyConf->lastRow = lastRow;
EXECUTE_DIRECT(refToBlock(Ruserblockref), GSN_TUPKEYCONF, signal,
TupKeyConf::SignalLength);
@@ -899,18 +893,19 @@ int Dbtup::handleReadReq(Signal* signal,
if (regOperPtr->interpretedExec != 1) {
jam();
-
- Uint32 TnoOfDataRead = readAttributes(pagePtr,
- Ttupheadoffset,
- &cinBuffer[0],
- regOperPtr->attrinbufLen,
- dst,
- dstLen);
- if (TnoOfDataRead != (Uint32)-1) {
+ int ret = readAttributes(pagePtr,
+ Ttupheadoffset,
+ &cinBuffer[0],
+ regOperPtr->attrinbufLen,
+ dst,
+ dstLen,
+ false);
+ if (ret != -1) {
/* ------------------------------------------------------------------------- */
// We have read all data into coutBuffer. Now send it to the API.
/* ------------------------------------------------------------------------- */
jam();
+ Uint32 TnoOfDataRead= (Uint32) ret;
regOperPtr->attroutbufLen = TnoOfDataRead;
sendReadAttrinfo(signal, TnoOfDataRead, regOperPtr);
return 0;
@@ -920,6 +915,7 @@ int Dbtup::handleReadReq(Signal* signal,
return -1;
} else {
jam();
+ regOperPtr->lastRow = 0;
if (interpreterStartLab(signal, pagePtr, Ttupheadoffset) != -1) {
return 0;
}//if
@@ -1104,7 +1100,7 @@ Dbtup::updateStartLab(Signal* signal,
Tablerec* const regTabPtr,
Page* const pagePtr)
{
- Uint32 retValue;
+ int retValue;
if (regOperPtr->optype == ZINSERT) {
jam();
setNullBits(pagePtr, regTabPtr, regOperPtr->pageOffset);
@@ -1115,7 +1111,7 @@ Dbtup::updateStartLab(Signal* signal,
regOperPtr->pageOffset,
&cinBuffer[0],
regOperPtr->attrinbufLen);
- if (retValue == (Uint32)-1) {
+ if (retValue == -1) {
tupkeyErrorLab(signal);
}//if
} else {
@@ -1219,7 +1215,7 @@ int Dbtup::interpreterStartLab(Signal* signal,
{
Operationrec * const regOperPtr = operPtr.p;
Uint32 RtotalLen;
- Uint32 TnoDataRW;
+ int TnoDataRW;
Uint32 RinitReadLen = cinBuffer[0];
Uint32 RexecRegionLen = cinBuffer[1];
@@ -1275,8 +1271,9 @@ int Dbtup::interpreterStartLab(Signal* signal,
&cinBuffer[5],
RinitReadLen,
&dst[0],
- dstLen);
- if (TnoDataRW != (Uint32)-1) {
+ dstLen,
+ false);
+ if (TnoDataRW != -1) {
RattroutCounter = TnoDataRW;
RinstructionCounter += RinitReadLen;
} else {
@@ -1303,7 +1300,7 @@ int Dbtup::interpreterStartLab(Signal* signal,
RsubLen,
&coutBuffer[0],
sizeof(coutBuffer) / 4);
- if (TnoDataRW != (Uint32)-1) {
+ if (TnoDataRW != -1) {
RinstructionCounter += RexecRegionLen;
RlogSize = TnoDataRW;
} else {
@@ -1322,7 +1319,7 @@ int Dbtup::interpreterStartLab(Signal* signal,
TupHeadOffset,
&cinBuffer[RinstructionCounter],
RfinalUpdateLen);
- if (TnoDataRW != (Uint32)-1) {
+ if (TnoDataRW != -1) {
MEMCOPY_NO_WORDS(&clogMemBuffer[RlogSize],
&cinBuffer[RinstructionCounter],
RfinalUpdateLen);
@@ -1348,8 +1345,9 @@ int Dbtup::interpreterStartLab(Signal* signal,
&cinBuffer[RinstructionCounter],
RfinalRLen,
&dst[RattroutCounter],
- (dstLen - RattroutCounter));
- if (TnoDataRW != (Uint32)-1) {
+ (dstLen - RattroutCounter),
+ false);
+ if (TnoDataRW != -1) {
RattroutCounter += TnoDataRW;
} else {
jam();
@@ -1482,13 +1480,13 @@ int Dbtup::interpreterNextLab(Signal* signal,
/* ---------------------------------------------------------------- */
{
Uint32 theAttrinfo = theInstruction;
- Uint32 TnoDataRW;
- TnoDataRW = readAttributes(pagePtr,
- TupHeadOffset,
- &theAttrinfo,
- (Uint32)1,
- &TregMemBuffer[theRegister],
- (Uint32)3);
+ int TnoDataRW= readAttributes(pagePtr,
+ TupHeadOffset,
+ &theAttrinfo,
+ (Uint32)1,
+ &TregMemBuffer[theRegister],
+ (Uint32)3,
+ false);
if (TnoDataRW == 2) {
/* ------------------------------------------------------------- */
// Two words read means that we get the instruction plus one 32
@@ -1512,7 +1510,7 @@ int Dbtup::interpreterNextLab(Signal* signal,
TregMemBuffer[theRegister] = 0;
TregMemBuffer[theRegister + 2] = 0;
TregMemBuffer[theRegister + 3] = 0;
- } else if (TnoDataRW == (Uint32)-1) {
+ } else if (TnoDataRW == -1) {
jam();
tupkeyErrorLab(signal);
return -1;
@@ -1565,12 +1563,11 @@ int Dbtup::interpreterNextLab(Signal* signal,
ah.setNULL();
Tlen = 1;
}//if
- Uint32 TnoDataRW;
- TnoDataRW = updateAttributes(pagePtr,
- TupHeadOffset,
- &TdataForUpdate[0],
- Tlen);
- if (TnoDataRW != (Uint32)-1) {
+ int TnoDataRW= updateAttributes(pagePtr,
+ TupHeadOffset,
+ &TdataForUpdate[0],
+ Tlen);
+ if (TnoDataRW != -1) {
/* --------------------------------------------------------- */
// Write the written data also into the log buffer so that it
// will be logged.
@@ -1834,7 +1831,8 @@ int Dbtup::interpreterNextLab(Signal* signal,
Int32 TnoDataR = readAttributes(pagePtr,
TupHeadOffset,
&attrId, 1,
- tmpArea, tmpAreaSz);
+ tmpArea, tmpAreaSz,
+ false);
if (TnoDataR == -1) {
jam();
@@ -1930,7 +1928,8 @@ int Dbtup::interpreterNextLab(Signal* signal,
Int32 TnoDataR = readAttributes(pagePtr,
TupHeadOffset,
&attrId, 1,
- tmpArea, tmpAreaSz);
+ tmpArea, tmpAreaSz,
+ false);
if (TnoDataR == -1) {
jam();
@@ -1958,7 +1957,8 @@ int Dbtup::interpreterNextLab(Signal* signal,
Int32 TnoDataR = readAttributes(pagePtr,
TupHeadOffset,
&attrId, 1,
- tmpArea, tmpAreaSz);
+ tmpArea, tmpAreaSz,
+ false);
if (TnoDataR == -1) {
jam();
@@ -1978,12 +1978,19 @@ int Dbtup::interpreterNextLab(Signal* signal,
}
case Interpreter::EXIT_OK:
- case Interpreter::EXIT_OK_LAST:
jam();
#ifdef TRACE_INTERPRETER
ndbout_c(" - exit_ok");
#endif
return TdataWritten;
+
+ case Interpreter::EXIT_OK_LAST:
+ jam();
+#ifdef TRACE_INTERPRETER
+ ndbout_c(" - exit_ok_last");
+#endif
+ operPtr.p->lastRow = 1;
+ return TdataWritten;
case Interpreter::EXIT_REFUSE:
jam();
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
index f5c3e2b4128..d33adcd08e1 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
@@ -44,16 +44,10 @@ void Dbtup::initData()
cnoOfLcpRec = ZNO_OF_LCP_REC;
cnoOfConcurrentOpenOp = ZNO_OF_CONCURRENT_OPEN_OP;
cnoOfConcurrentWriteOp = ZNO_OF_CONCURRENT_WRITE_OP;
- cnoOfFragoprec = 2 * NO_OF_FRAG_PER_NODE;
- cnoOfFragrec = ZNO_OF_FRAGREC;
- cnoOfOprec = ZNO_OF_OPREC;
- cnoOfPage = ZNO_OF_PAGE;
+ cnoOfFragoprec = 2 * MAX_FRAG_PER_NODE;
cnoOfPageRangeRec = ZNO_OF_PAGE_RANGE_REC;
cnoOfParallellUndoFiles = ZNO_OF_PARALLELL_UNDO_FILES;
cnoOfRestartInfoRec = ZNO_OF_RESTART_INFO_REC;
- cnoOfTablerec = ZNO_OF_TABLEREC;
- cnoOfTabDescrRec = ZNO_OF_TAB_DESCR_REC;
- cnoOfUndoPage = ZNO_OF_UNDO_PAGE;
c_maxTriggersPerTable = ZDEFAULT_MAX_NO_TRIGGERS_PER_TABLE;
c_noOfBuildIndexRec = 32;
@@ -83,9 +77,25 @@ Dbtup::Dbtup(const class Configuration & conf)
c_storedProcPool(),
c_buildIndexList(c_buildIndexPool)
{
-
+ Uint32 log_page_size= 0;
BLOCK_CONSTRUCTOR(Dbtup);
+ const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ ndb_mgm_get_int_parameter(p, CFG_DB_UNDO_DATA_BUFFER,
+ &log_page_size);
+
+ /**
+ * Always set page size in half MBytes
+ */
+ cnoOfUndoPage= (log_page_size / sizeof(UndoPage));
+ Uint32 mega_byte_part= cnoOfUndoPage & 15;
+ if (mega_byte_part != 0) {
+ jam();
+ cnoOfUndoPage+= (16 - mega_byte_part);
+ }
+
addRecSignal(GSN_DEBUG_SIG, &Dbtup::execDEBUG_SIG);
addRecSignal(GSN_CONTINUEB, &Dbtup::execCONTINUEB);
@@ -628,6 +638,7 @@ void Dbtup::execREAD_CONFIG_REQ(Signal* signal)
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_PAGE, &tmp));
Uint64 pages = (tmp * 2048 + (ZWORDS_ON_PAGE - 1))/ (Uint64)ZWORDS_ON_PAGE;
cnoOfPage = (Uint32)pages;
+ Uint32 noOfTriggers= 0;
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_PAGE_RANGE, &tmp));
initPageRangeSize(tmp);
@@ -637,10 +648,13 @@ void Dbtup::execREAD_CONFIG_REQ(Signal* signal)
Uint32 noOfStoredProc;
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_STORED_PROC,
&noOfStoredProc));
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_DB_NO_TRIGGERS,
+ &noOfTriggers));
cnoOfTabDescrRec = (cnoOfTabDescrRec & 0xFFFFFFF0) + 16;
c_storedProcPool.setSize(noOfStoredProc);
c_buildIndexPool.setSize(c_noOfBuildIndexRec);
+ c_triggerPool.setSize(noOfTriggers);
initRecords();
czero = 0;
@@ -715,8 +729,6 @@ void Dbtup::initRecords()
sizeof(RestartInfoRecord),
cnoOfRestartInfoRec);
- // Trigger data
- c_triggerPool.setSize(cnoOfTablerec*c_maxTriggersPerTable);
tablerec = (Tablerec*)allocRecord("Tablerec",
sizeof(Tablerec),
@@ -951,6 +963,7 @@ void Dbtup::initializeFragrecord()
regFragPtr.p->nextfreefrag = regFragPtr.i + 1;
regFragPtr.p->checkpointVersion = RNIL;
regFragPtr.p->firstusedOprec = RNIL;
+ regFragPtr.p->lastusedOprec = RNIL;
regFragPtr.p->fragStatus = IDLE;
}//for
regFragPtr.i = cnoOfFragrec - 1;
@@ -1049,12 +1062,13 @@ void Dbtup::initializeTablerec()
void
Dbtup::initTab(Tablerec* const regTabPtr)
{
- for (Uint32 i = 0; i < (2 * NO_OF_FRAG_PER_NODE); i++) {
+ for (Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
regTabPtr->fragid[i] = RNIL;
regTabPtr->fragrec[i] = RNIL;
}//for
regTabPtr->readFunctionArray = NULL;
regTabPtr->updateFunctionArray = NULL;
+ regTabPtr->charsetArray = NULL;
regTabPtr->tabDescriptor = RNIL;
regTabPtr->attributeGroupDescriptor = RNIL;
@@ -1151,7 +1165,7 @@ void Dbtup::execTUPSEIZEREQ(Signal* signal)
return;
}//if
regOperPtr.p->optype = ZREAD;
- initOpConnection(regOperPtr.p);
+ initOpConnection(regOperPtr.p, 0);
regOperPtr.p->userpointer = userPtr;
regOperPtr.p->userblockref = userRef;
signal->theData[0] = regOperPtr.p->userpointer;
@@ -1160,7 +1174,7 @@ void Dbtup::execTUPSEIZEREQ(Signal* signal)
return;
}//Dbtup::execTUPSEIZEREQ()
-#define printFragment(t){ for(Uint32 i = 0; i < (2 * NO_OF_FRAG_PER_NODE);i++){\
+#define printFragment(t){ for(Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE);i++){\
ndbout_c("table = %d fragid[%d] = %d fragrec[%d] = %d", \
t.i, t.p->fragid[i], i, t.p->fragrec[i]); }}
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp b/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp
index e7a431f17de..5a8642c4d2e 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp
@@ -112,10 +112,11 @@ Dbtup::tuxGetNode(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32*& no
node = &pagePtr.p->pageWord[pageOffset] + attrDataOffset;
}
-void
-Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, Uint32 numAttrs, const Uint32* attrIds, const Uint32** attrData)
+int
+Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, const Uint32* attrIds, Uint32 numAttrs, Uint32* dataOut)
{
ljamEntry();
+ // use own variables instead of globals
FragrecordPtr fragPtr;
fragPtr.i = fragPtrI;
ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
@@ -134,6 +135,7 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tu
while (true) {
ptrCheckGuard(opPtr, cnoOfOprec, operationrec);
if (opPtr.p->realPageIdC != RNIL) {
+ // update page and offset
pagePtr.i = opPtr.p->realPageIdC;
pageOffset = opPtr.p->pageOffsetC;
ptrCheckGuard(pagePtr, cnoOfPage, page);
@@ -147,33 +149,34 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tu
ndbrequire(++loopGuard < (1 << ZTUP_VERSION_BITS));
}
}
- const Uint32 tabDescriptor = tablePtr.p->tabDescriptor;
- const Uint32* tupleHeader = &pagePtr.p->pageWord[pageOffset];
- for (Uint32 i = 0; i < numAttrs; i++) {
- AttributeHeader ah(attrIds[i]);
- const Uint32 attrId = ah.getAttributeId();
- const Uint32 index = tabDescriptor + (attrId << ZAD_LOG_SIZE);
- const Uint32 desc1 = tableDescriptor[index].tabDescr;
- const Uint32 desc2 = tableDescriptor[index + 1].tabDescr;
- if (AttributeDescriptor::getNullable(desc1)) {
- Uint32 offset = AttributeOffset::getNullFlagOffset(desc2);
- ndbrequire(offset < tablePtr.p->tupNullWords);
- offset += tablePtr.p->tupNullIndex;
- ndbrequire(offset < tablePtr.p->tupheadsize);
- if (AttributeOffset::isNULL(tupleHeader[offset], desc2)) {
- ljam();
- attrData[i] = 0;
- continue;
- }
- }
- attrData[i] = tupleHeader + AttributeOffset::getOffset(desc2);
+ // read key attributes from found tuple version
+ // save globals
+ TablerecPtr tabptr_old = tabptr;
+ FragrecordPtr fragptr_old = fragptr;
+ OperationrecPtr operPtr_old = operPtr;
+ // new globals
+ tabptr = tablePtr;
+ fragptr = fragPtr;
+ operPtr.i = RNIL;
+ operPtr.p = NULL;
+ // do it
+ int ret = readAttributes(pagePtr.p, pageOffset, attrIds, numAttrs, dataOut, ZNIL, true);
+ // restore globals
+ tabptr = tabptr_old;
+ fragptr = fragptr_old;
+ operPtr = operPtr_old;
+ // done
+ if (ret == -1) {
+ ret = terrorCode ? (-(int)terrorCode) : -1;
}
+ return ret;
}
-void
-Dbtup::tuxReadKeys(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* pkSize, Uint32* pkData)
+int
+Dbtup::tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* dataOut)
{
ljamEntry();
+ // use own variables instead of globals
FragrecordPtr fragPtr;
fragPtr.i = fragPtrI;
ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
@@ -184,25 +187,46 @@ Dbtup::tuxReadKeys(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* pk
pagePtr.i = pageId;
ptrCheckGuard(pagePtr, cnoOfPage, page);
const Uint32 tabDescriptor = tablePtr.p->tabDescriptor;
- const Uint32 numAttrs = tablePtr.p->noOfKeyAttr;
const Uint32* attrIds = &tableDescriptor[tablePtr.p->readKeyArray].tabDescr;
- const Uint32* tupleHeader = &pagePtr.p->pageWord[pageOffset];
- Uint32 size = 0;
- for (Uint32 i = 0; i < numAttrs; i++) {
- AttributeHeader ah(attrIds[i]);
- const Uint32 attrId = ah.getAttributeId();
- const Uint32 index = tabDescriptor + (attrId << ZAD_LOG_SIZE);
- const Uint32 desc1 = tableDescriptor[index].tabDescr;
- const Uint32 desc2 = tableDescriptor[index + 1].tabDescr;
- ndbrequire(! AttributeDescriptor::getNullable(desc1));
- const Uint32 attrSize = AttributeDescriptor::getSizeInWords(desc1);
- const Uint32* attrData = tupleHeader + AttributeOffset::getOffset(desc2);
- for (Uint32 j = 0; j < attrSize; j++) {
- pkData[size + j] = attrData[j];
+ const Uint32 numAttrs = tablePtr.p->noOfKeyAttr;
+ // read pk attributes from original tuple
+ // save globals
+ TablerecPtr tabptr_old = tabptr;
+ FragrecordPtr fragptr_old = fragptr;
+ OperationrecPtr operPtr_old = operPtr;
+ // new globals
+ tabptr = tablePtr;
+ fragptr = fragPtr;
+ operPtr.i = RNIL;
+ operPtr.p = NULL;
+ // do it
+ int ret = readAttributes(pagePtr.p, pageOffset, attrIds,
+ numAttrs, dataOut, ZNIL, true);
+ // restore globals
+ tabptr = tabptr_old;
+ fragptr = fragptr_old;
+ operPtr = operPtr_old;
+ // done
+ if (ret != -1) {
+ // remove headers
+ Uint32 n = 0;
+ Uint32 i = 0;
+ while (n < numAttrs) {
+ const AttributeHeader ah(dataOut[i]);
+ Uint32 size = ah.getDataSize();
+ ndbrequire(size != 0);
+ for (Uint32 j = 0; j < size; j++) {
+ dataOut[i + j - n] = dataOut[i + j + 1];
+ }
+ n += 1;
+ i += 1 + size;
}
- size += attrSize;
+ ndbrequire((int)i == ret);
+ ret -= numAttrs;
+ } else {
+ ret = terrorCode ? (-(int)terrorCode) : -1;
}
- *pkSize = size;
+ return ret;
}
bool
@@ -349,14 +373,14 @@ Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI)
do {
// get fragment
FragrecordPtr fragPtr;
- if (buildPtr.p->m_fragNo == 2 * NO_OF_FRAG_PER_NODE) {
+ if (buildPtr.p->m_fragNo == 2 * MAX_FRAG_PER_NODE) {
ljam();
// build ready
buildIndexReply(signal, buildPtr.p);
c_buildIndexList.release(buildPtr);
return;
}
- ndbrequire(buildPtr.p->m_fragNo < 2 * NO_OF_FRAG_PER_NODE);
+ ndbrequire(buildPtr.p->m_fragNo < 2 * MAX_FRAG_PER_NODE);
fragPtr.i = tablePtr.p->fragrec[buildPtr.p->m_fragNo];
if (fragPtr.i == RNIL) {
ljam();
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupLCP.cpp b/ndb/src/kernel/blocks/dbtup/DbtupLCP.cpp
index f8f2b9bdbd2..370ef4c4ba5 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupLCP.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupLCP.cpp
@@ -265,7 +265,8 @@ void Dbtup::lcpSaveCopyListLab(Signal* signal, CheckpointInfoPtr ciPtr)
// We ensure that we have actually allocated the tuple header and
// also found it. Otherwise we will fill the undo log with garbage.
/* ---------------------------------------------------------------- */
- if (regOpPtr.p->optype == ZUPDATE) {
+ if (regOpPtr.p->optype == ZUPDATE ||
+ (regOpPtr.p->optype == ZINSERT && regOpPtr.p->deleteInsertFlag)) {
ljam();
if (regOpPtr.p->realPageIdC != RNIL) {
/* ---------------------------------------------------------------- */
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
index 54cb93e9736..efea312b865 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
@@ -20,12 +20,14 @@
#include <RefConvert.hpp>
#include <ndb_limits.h>
#include <pc.hpp>
+#include <signaldata/TupFrag.hpp>
#include <signaldata/FsConf.hpp>
#include <signaldata/FsRemoveReq.hpp>
#include <signaldata/DropTab.hpp>
#include <signaldata/AlterTab.hpp>
#include <AttributeDescriptor.hpp>
#include "AttributeOffset.hpp"
+#include <my_sys.h>
#define ljam() { jamLine(20000 + __LINE__); }
#define ljamEntry() { jamEntryLine(20000 + __LINE__); }
@@ -52,7 +54,10 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
/* Uint32 schemaVersion = signal->theData[8];*/
Uint32 noOfKeyAttr = signal->theData[9];
- Uint32 noOfNewAttr = signal->theData[10];
+ Uint32 noOfNewAttr = (signal->theData[10] & 0xFFFF);
+ /* DICT sends number of character sets in upper half */
+ Uint32 noOfCharsets = (signal->theData[10] >> 16);
+
Uint32 checksumIndicator = signal->theData[11];
Uint32 noOfAttributeGroups = signal->theData[12];
Uint32 globalCheckpointIdIndicator = signal->theData[13];
@@ -75,6 +80,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
fragOperPtr.p->attributeCount = noOfAttributes;
fragOperPtr.p->freeNullBit = noOfNullAttr;
fragOperPtr.p->noOfNewAttrCount = noOfNewAttr;
+ fragOperPtr.p->charsetIndex = 0;
ndbrequire(reqinfo == ZADDFRAG);
@@ -156,6 +162,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
regTabPtr.p->tupheadsize = regTabPtr.p->tupGCPIndex;
regTabPtr.p->noOfKeyAttr = noOfKeyAttr;
+ regTabPtr.p->noOfCharsets = noOfCharsets;
regTabPtr.p->noOfAttr = noOfAttributes;
regTabPtr.p->noOfNewAttr = noOfNewAttr;
regTabPtr.p->noOfNullAttr = noOfNullAttr;
@@ -163,13 +170,14 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
regTabPtr.p->notNullAttributeMask.clear();
- Uint32 tableDescriptorRef = allocTabDescr(noOfAttributes, noOfKeyAttr, noOfAttributeGroups);
+ Uint32 offset[10];
+ Uint32 tableDescriptorRef = allocTabDescr(regTabPtr.p, offset);
if (tableDescriptorRef == RNIL) {
ljam();
fragrefuse4Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId);
return;
}//if
- setUpDescriptorReferences(tableDescriptorRef, regTabPtr.p);
+ setUpDescriptorReferences(tableDescriptorRef, regTabPtr.p, offset);
} else {
ljam();
fragOperPtr.p->definingFragment = false;
@@ -188,7 +196,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
/* -------------------------------------------------------------------- */
bool Dbtup::addfragtotab(Tablerec* const regTabPtr, Uint32 fragId, Uint32 fragIndex)
{
- for (Uint32 i = 0; i < (2 * NO_OF_FRAG_PER_NODE); i++) {
+ for (Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
ljam();
if (regTabPtr->fragid[i] == RNIL) {
ljam();
@@ -202,7 +210,7 @@ bool Dbtup::addfragtotab(Tablerec* const regTabPtr, Uint32 fragId, Uint32 fragIn
void Dbtup::getFragmentrec(FragrecordPtr& regFragPtr, Uint32 fragId, Tablerec* const regTabPtr)
{
- for (Uint32 i = 0; i < (2 * NO_OF_FRAG_PER_NODE); i++) {
+ for (Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
ljam();
if (regTabPtr->fragid[i] == fragId) {
ljam();
@@ -251,6 +259,9 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
ptrCheckGuard(fragOperPtr, cnoOfFragoprec, fragoperrec);
Uint32 attrId = signal->theData[2];
Uint32 attrDescriptor = signal->theData[3];
+ // DICT sends extended type (ignored) and charset number
+ Uint32 extType = (signal->theData[4] & 0xFF);
+ Uint32 csNumber = (signal->theData[4] >> 16);
regTabPtr.i = fragOperPtr.p->tableidFrag;
ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec);
@@ -304,6 +315,29 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
} else {
ndbrequire(false);
}//if
+ if (csNumber != 0) {
+ CHARSET_INFO* cs = get_charset(csNumber, MYF(0));
+ if (cs == NULL) {
+ ljam();
+ terrorCode = TupAddAttrRef::InvalidCharset;
+ addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
+ return;
+ }
+ Uint32 i = 0;
+ while (i < fragOperPtr.p->charsetIndex) {
+ ljam();
+ if (regTabPtr.p->charsetArray[i] == cs)
+ break;
+ i++;
+ }
+ if (i == fragOperPtr.p->charsetIndex) {
+ ljam();
+ fragOperPtr.p->charsetIndex++;
+ }
+ ndbrequire(i < regTabPtr.p->noOfCharsets);
+ regTabPtr.p->charsetArray[i] = cs;
+ AttributeOffset::setCharsetPos(attrDes2, i);
+ }
setTabDescrWord(firstTabDesIndex + 1, attrDes2);
if (regTabPtr.p->tupheadsize > MAX_TUPLE_SIZE_IN_WORDS) {
@@ -340,20 +374,28 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
return;
}//Dbtup::execTUP_ADD_ATTRREQ()
+/*
+ * Descriptor has these parts:
+ *
+ * 0 readFunctionArray ( one for each attribute )
+ * 1 updateFunctionArray ( ditto )
+ * 2 charsetArray ( pointers to distinct CHARSET_INFO )
+ * 3 readKeyArray ( attribute ids of keys )
+ * 4 attributeGroupDescriptor ( currently size 1 but unused )
+ * 5 tabDescriptor ( attribute descriptors, each ZAD_SIZE )
+ */
+
void Dbtup::setUpDescriptorReferences(Uint32 descriptorReference,
- Tablerec* const regTabPtr)
+ Tablerec* const regTabPtr,
+ const Uint32* offset)
{
- Uint32 noOfAttributes = regTabPtr->noOfAttr;
- descriptorReference += ZTD_SIZE;
- ReadFunction * tmp = (ReadFunction*)&tableDescriptor[descriptorReference].tabDescr;
- regTabPtr->readFunctionArray = tmp;
- regTabPtr->updateFunctionArray = (UpdateFunction*)(tmp + noOfAttributes);
-
- TableDescriptor * start = &tableDescriptor[descriptorReference];
- TableDescriptor * end = (TableDescriptor*)(tmp + 2 * noOfAttributes);
- regTabPtr->readKeyArray = descriptorReference + (end - start);
- regTabPtr->attributeGroupDescriptor = regTabPtr->readKeyArray + regTabPtr->noOfKeyAttr;
- regTabPtr->tabDescriptor = regTabPtr->attributeGroupDescriptor + regTabPtr->noOfAttributeGroups;
+ Uint32* desc = &tableDescriptor[descriptorReference].tabDescr;
+ regTabPtr->readFunctionArray = (ReadFunction*)(desc + offset[0]);
+ regTabPtr->updateFunctionArray = (UpdateFunction*)(desc + offset[1]);
+ regTabPtr->charsetArray = (CHARSET_INFO**)(desc + offset[2]);
+ regTabPtr->readKeyArray = descriptorReference + offset[3];
+ regTabPtr->attributeGroupDescriptor = descriptorReference + offset[4];
+ regTabPtr->tabDescriptor = descriptorReference + offset[5];
}//Dbtup::setUpDescriptorReferences()
Uint32
@@ -456,7 +498,7 @@ void Dbtup::releaseFragoperrec(FragoperrecPtr fragOperPtr)
void Dbtup::deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId)
{
- for (Uint32 i = 0; i < (2 * NO_OF_FRAG_PER_NODE); i++) {
+ for (Uint32 i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
ljam();
if (regTabPtr->fragid[i] == fragId) {
ljam();
@@ -491,14 +533,18 @@ void Dbtup::releaseTabDescr(Tablerec* const regTabPtr)
Uint32 descriptor = regTabPtr->readKeyArray;
if (descriptor != RNIL) {
ljam();
+ Uint32 offset[10];
+ getTabDescrOffsets(regTabPtr, offset);
+
regTabPtr->tabDescriptor = RNIL;
regTabPtr->readKeyArray = RNIL;
regTabPtr->readFunctionArray = NULL;
regTabPtr->updateFunctionArray = NULL;
+ regTabPtr->charsetArray = NULL;
regTabPtr->attributeGroupDescriptor= RNIL;
- Uint32 sizeFunctionArrays = 2 * (regTabPtr->noOfAttr * sizeOfReadFunction());
- descriptor -= (sizeFunctionArrays + ZTD_SIZE);
+ // move to start of descriptor
+ descriptor -= offset[3];
Uint32 retNo = getTabDescrWord(descriptor + ZTD_DATASIZE);
ndbrequire(getTabDescrWord(descriptor + ZTD_HEADER) == ZTD_TYPE_NORMAL);
ndbrequire(retNo == getTabDescrWord((descriptor + retNo) - ZTD_TR_SIZE));
@@ -515,7 +561,7 @@ void Dbtup::releaseFragment(Signal* signal, Uint32 tableId)
Uint32 fragIndex = RNIL;
Uint32 fragId = RNIL;
Uint32 i = 0;
- for (i = 0; i < (2 * NO_OF_FRAG_PER_NODE); i++) {
+ for (i = 0; i < (2 * MAX_FRAG_PER_NODE); i++) {
ljam();
if (tabPtr.p->fragid[i] != RNIL) {
ljam();
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
index a5f56a356f9..e6cc6f68842 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
@@ -35,6 +35,7 @@ Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr)
for (Uint32 i = 0; i < regTabPtr->noOfAttr; i++) {
Uint32 attrDescriptorStart = startDescriptor + (i << ZAD_LOG_SIZE);
Uint32 attrDescriptor = tableDescriptor[attrDescriptorStart].tabDescr;
+ Uint32 attrOffset = tableDescriptor[attrDescriptorStart + 1].tabDescr;
if (!AttributeDescriptor::getDynamic(attrDescriptor)) {
if ((AttributeDescriptor::getArrayType(attrDescriptor) == ZNON_ARRAY) ||
(AttributeDescriptor::getArrayType(attrDescriptor) == ZFIXED_ARRAY)) {
@@ -54,6 +55,11 @@ Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr)
} else {
ndbrequire(false);
}//if
+ // replace read function of char attribute
+ if (AttributeOffset::getCharsetFlag(attrOffset)) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readCharNotNULL;
+ }
} else {
if (AttributeDescriptor::getSizeInWords(attrDescriptor) == 1) {
ljam();
@@ -72,6 +78,11 @@ Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr)
regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHZeroWordNULLable;
regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable;
}//if
+ // replace read function of char attribute
+ if (AttributeOffset::getCharsetFlag(attrOffset)) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readCharNULLable;
+ }
}//if
} else if (AttributeDescriptor::getArrayType(attrDescriptor) == ZVAR_ARRAY) {
if (!AttributeDescriptor::getNullable(attrDescriptor)) {
@@ -145,11 +156,12 @@ Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr)
// tabptr.p Table record pointer
/* ---------------------------------------------------------------- */
int Dbtup::readAttributes(Page* const pagePtr,
- Uint32 tupHeadOffset,
- Uint32* inBuffer,
- Uint32 inBufLen,
- Uint32* outBuffer,
- Uint32 maxRead)
+ Uint32 tupHeadOffset,
+ const Uint32* inBuffer,
+ Uint32 inBufLen,
+ Uint32* outBuffer,
+ Uint32 maxRead,
+ bool xfrmFlag)
{
Tablerec* const regTabPtr = tabptr.p;
Uint32 numAttributes = regTabPtr->noOfAttr;
@@ -162,6 +174,7 @@ int Dbtup::readAttributes(Page* const pagePtr,
tCheckOffset = regTabPtr->tupheadsize;
tMaxRead = maxRead;
tTupleHeader = &pagePtr->pageWord[tupHeadOffset];
+ tXfrmFlag = xfrmFlag;
ndbrequire(tupHeadOffset + tCheckOffset <= ZWORDS_ON_PAGE);
while (inBufIndex < inBufLen) {
@@ -185,16 +198,22 @@ int Dbtup::readAttributes(Page* const pagePtr,
attributeOffset)) {
continue;
} else {
- return (Uint32)-1;
+ return -1;
}//if
+ } else if(attributeId & AttributeHeader::PSUEDO){
+ Uint32 sz = read_psuedo(attributeId,
+ outBuffer+tmpAttrBufIndex+1);
+ AttributeHeader::init(&outBuffer[tmpAttrBufIndex], attributeId, sz);
+ tOutBufIndex = tmpAttrBufIndex + 1 + sz;
} else {
terrorCode = ZATTRIBUTE_ID_ERROR;
- return (Uint32)-1;
+ return -1;
}//if
}//while
return tOutBufIndex;
}//Dbtup::readAttributes()
+#if 0
int Dbtup::readAttributesWithoutHeader(Page* const pagePtr,
Uint32 tupHeadOffset,
Uint32* inBuffer,
@@ -237,16 +256,17 @@ int Dbtup::readAttributesWithoutHeader(Page* const pagePtr,
attributeOffset)) {
continue;
} else {
- return (Uint32)-1;
+ return -1;
}//if
} else {
terrorCode = ZATTRIBUTE_ID_ERROR;
- return (Uint32)-1;
+ return -1;
}//if
}//while
ndbrequire(attrBufIndex == inBufLen);
return tOutBufIndex;
}//Dbtup::readAttributes()
+#endif
bool
Dbtup::readFixedSizeTHOneWordNotNULL(Uint32* outBuffer,
@@ -535,6 +555,74 @@ Dbtup::readDynSmallVarSize(Uint32* outBuffer,
return false;
}//Dbtup::readDynSmallVarSize()
+
+bool
+Dbtup::readCharNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Uint32 indexBuf = tOutBufIndex;
+ Uint32 readOffset = AttributeOffset::getOffset(attrDes2);
+ Uint32 attrNoOfWords = AttributeDescriptor::getSizeInWords(attrDescriptor);
+ Uint32 newIndexBuf = indexBuf + attrNoOfWords;
+ Uint32 maxRead = tMaxRead;
+
+ ndbrequire((readOffset + attrNoOfWords - 1) < tCheckOffset);
+ if (newIndexBuf <= maxRead) {
+ ljam();
+ ahOut->setDataSize(attrNoOfWords);
+ if (! tXfrmFlag) {
+ MEMCOPY_NO_WORDS(&outBuffer[indexBuf],
+ &tTupleHeader[readOffset],
+ attrNoOfWords);
+ } else {
+ ljam();
+ Tablerec* regTabPtr = tabptr.p;
+ Uint32 i = AttributeOffset::getCharsetPos(attrDes2);
+ ndbrequire(i < tabptr.p->noOfCharsets);
+ // not const in MySQL
+ CHARSET_INFO* cs = tabptr.p->charsetArray[i];
+ // XXX should strip Uint32 null padding
+ const unsigned nBytes = attrNoOfWords << 2;
+ unsigned n =
+ (*cs->coll->strnxfrm)(cs,
+ (uchar*)&outBuffer[indexBuf],
+ nBytes,
+ (const uchar*)&tTupleHeader[readOffset],
+ nBytes);
+ // pad with ascii spaces
+ while (n < nBytes)
+ ((uchar*)&outBuffer[indexBuf])[n++] = 0x20;
+ }
+ tOutBufIndex = newIndexBuf;
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
+ return false;
+ }
+}
+
+bool
+Dbtup::readCharNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ if (!nullFlagCheck(attrDes2)) {
+ ljam();
+ return readCharNotNULL(outBuffer,
+ ahOut,
+ attrDescriptor,
+ attrDes2);
+ } else {
+ ljam();
+ ahOut->setNULL();
+ return true;
+ }
+}
+
/* ---------------------------------------------------------------------- */
/* THIS ROUTINE IS USED TO UPDATE A NUMBER OF ATTRIBUTES. IT IS */
/* USED BY THE INSERT ROUTINE, THE UPDATE ROUTINE AND IT CAN BE */
@@ -590,12 +678,12 @@ int Dbtup::updateAttributes(Page* const pagePtr,
continue;
} else {
ljam();
- return (Uint32)-1;
+ return -1;
}//if
} else {
ljam();
terrorCode = ZATTRIBUTE_ID_ERROR;
- return (Uint32)-1;
+ return -1;
}//if
}//while
return 0;
@@ -893,4 +981,24 @@ Dbtup::updateDynSmallVarSize(Uint32* inBuffer,
return false;
}//Dbtup::updateDynSmallVarSize()
-
+Uint32
+Dbtup::read_psuedo(Uint32 attrId, Uint32* outBuffer){
+ Uint32 tmp[sizeof(SignalHeader)+25];
+ Signal * signal = (Signal*)&tmp;
+ switch(attrId){
+ case AttributeHeader::FRAGMENT:
+ * outBuffer = operPtr.p->fragId;
+ return 1;
+ case AttributeHeader::ROW_COUNT:
+ case AttributeHeader::COMMIT_COUNT:
+ signal->theData[0] = operPtr.p->userpointer;
+ signal->theData[1] = attrId;
+
+ EXECUTE_DIRECT(DBLQH, GSN_READ_PSUEDO_REQ, signal, 2);
+ outBuffer[0] = signal->theData[0];
+ outBuffer[1] = signal->theData[1];
+ return 2;
+ default:
+ return 0;
+ }
+}
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp b/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp
index d31ab43f108..642ba270760 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp
@@ -31,12 +31,33 @@
/* memory attached to fragments (could be allocated per table */
/* instead. Performs its task by a buddy algorithm. */
/* **************************************************************** */
-Uint32 Dbtup::allocTabDescr(Uint32 noOfAttributes, Uint32 noOfKeyAttr, Uint32 noOfAttributeGroups)
+
+Uint32
+Dbtup::getTabDescrOffsets(const Tablerec* regTabPtr, Uint32* offset)
+{
+ // belongs to configure.in
+ unsigned sizeOfPointer = sizeof(CHARSET_INFO*);
+ ndbrequire((sizeOfPointer & 0x3) == 0);
+ sizeOfPointer = (sizeOfPointer >> 2);
+ // do in layout order and return offsets (see DbtupMeta.cpp)
+ Uint32 allocSize = 0;
+ // magically aligned to 8 bytes
+ offset[0] = allocSize += ZTD_SIZE;
+ offset[1] = allocSize += regTabPtr->noOfAttr * sizeOfReadFunction();
+ offset[2] = allocSize += regTabPtr->noOfAttr * sizeOfReadFunction();
+ offset[3] = allocSize += regTabPtr->noOfCharsets * sizeOfPointer;
+ offset[4] = allocSize += regTabPtr->noOfKeyAttr;
+ offset[5] = allocSize += regTabPtr->noOfAttributeGroups;
+ allocSize += regTabPtr->noOfAttr * ZAD_SIZE;
+ allocSize += ZTD_TRAILER_SIZE;
+ // return number of words
+ return allocSize;
+}
+
+Uint32 Dbtup::allocTabDescr(const Tablerec* regTabPtr, Uint32* offset)
{
Uint32 reference = RNIL;
- Uint32 allocSize = (ZTD_SIZE + ZTD_TRAILER_SIZE) + (noOfAttributes * ZAD_SIZE);
- allocSize += noOfAttributeGroups;
- allocSize += ((2 * noOfAttributes * sizeOfReadFunction()) + noOfKeyAttr);
+ Uint32 allocSize = getTabDescrOffsets(regTabPtr, offset);
/* ---------------------------------------------------------------- */
/* ALWAYS ALLOCATE A MULTIPLE OF 16 BYTES */
/* ---------------------------------------------------------------- */
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp b/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
index a93ff4566e7..aac5c326cad 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
@@ -652,6 +652,7 @@ void Dbtup::executeTrigger(Signal* signal,
return;
default:
ndbrequire(false);
+ executeDirect= false; // remove warning
}//switch
regOperPtr->noFiredTriggers++;
@@ -746,13 +747,15 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr,
//--------------------------------------------------------------------
// Read Primary Key Values
//--------------------------------------------------------------------
- noPrimKey = readAttributes(pagep.p,
- tupheadoffset,
- &tableDescriptor[regTabPtr->readKeyArray].tabDescr,
- regTabPtr->noOfKeyAttr,
- keyBuffer,
- ZATTR_BUFFER_SIZE);
- ndbrequire(noPrimKey != (Uint32)-1);
+ int ret= readAttributes(pagep.p,
+ tupheadoffset,
+ &tableDescriptor[regTabPtr->readKeyArray].tabDescr,
+ regTabPtr->noOfKeyAttr,
+ keyBuffer,
+ ZATTR_BUFFER_SIZE,
+ true);
+ ndbrequire(ret != -1);
+ noPrimKey= ret;
Uint32 numAttrsToRead;
if ((regOperPtr->optype == ZUPDATE) &&
@@ -787,13 +790,15 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr,
if ((regOperPtr->optype != ZDELETE) ||
(trigPtr->sendBeforeValues)) {
ljam();
- noMainWords = readAttributes(pagep.p,
- tupheadoffset,
- &readBuffer[0],
- numAttrsToRead,
- mainBuffer,
- ZATTR_BUFFER_SIZE);
- ndbrequire(noMainWords != (Uint32)-1);
+ int ret= readAttributes(pagep.p,
+ tupheadoffset,
+ &readBuffer[0],
+ numAttrsToRead,
+ mainBuffer,
+ ZATTR_BUFFER_SIZE,
+ true);
+ ndbrequire(ret != -1);
+ noMainWords= ret;
} else {
ljam();
noMainWords = 0;
@@ -811,14 +816,16 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr,
pagep.i = regOperPtr->realPageIdC;
ptrCheckGuard(pagep, cnoOfPage, page);
- noCopyWords = readAttributes(pagep.p,
- tupheadoffset,
- &readBuffer[0],
- numAttrsToRead,
- copyBuffer,
- ZATTR_BUFFER_SIZE);
+ int ret= readAttributes(pagep.p,
+ tupheadoffset,
+ &readBuffer[0],
+ numAttrsToRead,
+ copyBuffer,
+ ZATTR_BUFFER_SIZE,
+ true);
- ndbrequire(noCopyWords != (Uint32)-1);
+ ndbrequire(ret != -1);
+ noCopyWords = ret;
if ((noMainWords == noCopyWords) &&
(memcmp(mainBuffer, copyBuffer, noMainWords << 2) == 0)) {
//--------------------------------------------------------------------
@@ -1071,6 +1078,7 @@ Dbtup::executeTuxCommitTriggers(Signal* signal,
ndbrequire(tupVersion == regOperPtr->tupVersion);
} else {
ndbrequire(false);
+ tupVersion= 0; // remove warning
}
// fill in constant part
req->tableId = regOperPtr->tableRef;
@@ -1115,6 +1123,7 @@ Dbtup::executeTuxAbortTriggers(Signal* signal,
return;
} else {
ndbrequire(false);
+ tupVersion= 0; // remove warning
}
// fill in constant part
req->tableId = regOperPtr->tableRef;
diff --git a/ndb/src/kernel/blocks/dbtux/Dbtux.hpp b/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
index 1a3c7f64ac3..8896324f793 100644
--- a/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
+++ b/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
@@ -32,7 +32,6 @@
// signal classes
#include <signaldata/DictTabInfo.hpp>
#include <signaldata/TuxContinueB.hpp>
-#include <signaldata/BuildIndx.hpp>
#include <signaldata/TupFrag.hpp>
#include <signaldata/AlterIndx.hpp>
#include <signaldata/DropTab.hpp>
@@ -108,16 +107,16 @@ public:
private:
// sizes are in words (Uint32)
- static const unsigned MaxIndexFragments = 2 * NO_OF_FRAG_PER_NODE;
- static const unsigned MaxIndexAttributes = MAX_ATTRIBUTES_IN_INDEX;
- static const unsigned MaxAttrDataSize = 2048;
+ STATIC_CONST( MaxIndexFragments = 2 * MAX_FRAG_PER_NODE );
+ STATIC_CONST( MaxIndexAttributes = MAX_ATTRIBUTES_IN_INDEX );
+ STATIC_CONST( MaxAttrDataSize = 2048 );
public:
- static const unsigned DescPageSize = 256;
+ STATIC_CONST( DescPageSize = 256 );
private:
- static const unsigned MaxTreeNodeSize = MAX_TTREE_NODE_SIZE;
- static const unsigned MaxPrefSize = MAX_TTREE_PREF_SIZE;
- static const unsigned ScanBoundSegmentSize = 7;
- static const unsigned MaxAccLockOps = MAX_PARALLEL_OP_PER_SCAN;
+ STATIC_CONST( MaxTreeNodeSize = MAX_TTREE_NODE_SIZE );
+ STATIC_CONST( MaxPrefSize = MAX_TTREE_PREF_SIZE );
+ STATIC_CONST( ScanBoundSegmentSize = 7 );
+ STATIC_CONST( MaxAccLockOps = MAX_PARALLEL_OP_PER_SCAN );
BLOCK_DEFINES(Dbtux);
// forward declarations
@@ -160,29 +159,33 @@ private:
};
// AttributeHeader size is assumed to be 1 word
- static const unsigned AttributeHeaderSize = 1;
-
- /*
- * Array of pointers to TUP table attributes. Always read-on|y.
- */
- typedef const Uint32** TableData;
+ STATIC_CONST( AttributeHeaderSize = 1 );
/*
* Logical tuple address, "local key". Identifies table tuples.
*/
typedef Uint32 TupAddr;
- static const unsigned NullTupAddr = (Uint32)-1;
+ STATIC_CONST( NullTupAddr = (Uint32)-1 );
/*
* Physical tuple address in TUP. Provides fast access to table tuple
* or index node. Valid within the db node and across timeslices.
* Not valid between db nodes or across restarts.
+ *
+ * To avoid wasting an Uint16 the pageid is split in two.
*/
struct TupLoc {
- Uint32 m_pageId; // page i-value
+ private:
+ Uint16 m_pageId1; // page i-value (big-endian)
+ Uint16 m_pageId2;
Uint16 m_pageOffset; // page offset in words
+ public:
TupLoc();
TupLoc(Uint32 pageId, Uint16 pageOffset);
+ Uint32 getPageId() const;
+ void setPageId(Uint32 pageId);
+ Uint32 getPageOffset() const;
+ void setPageOffset(Uint32 pageOffset);
bool operator==(const TupLoc& loc) const;
bool operator!=(const TupLoc& loc) const;
};
@@ -213,7 +216,7 @@ private:
bool eq(const TreeEnt ent) const;
int cmp(const TreeEnt ent) const;
};
- static const unsigned TreeEntSize = sizeof(TreeEnt) >> 2;
+ STATIC_CONST( TreeEntSize = sizeof(TreeEnt) >> 2 );
static const TreeEnt NullTreeEnt;
/*
@@ -229,30 +232,25 @@ private:
* work entry part 5
*
* There are 3 links to other nodes: left child, right child, parent.
- * These are in TupLoc format but the pageIds and pageOffsets are
- * stored in separate arrays (saves 1 word).
- *
* Occupancy (number of entries) is at least 1 except temporarily when
- * a node is about to be removed. If occupancy is 1, only max entry
- * is present but both min and max prefixes are set.
+ * a node is about to be removed.
*/
struct TreeNode;
friend struct TreeNode;
struct TreeNode {
- Uint32 m_linkPI[3]; // link to 0-left child 1-right child 2-parent
- Uint16 m_linkPO[3]; // page offsets for above real page ids
+ TupLoc m_link[3]; // link to 0-left child 1-right child 2-parent
unsigned m_side : 2; // we are 0-left child 1-right child 2-root
- int m_balance : 2; // balance -1, 0, +1
+ unsigned m_balance : 2; // balance -1, 0, +1 plus 1 for Solaris CC
unsigned pad1 : 4;
Uint8 m_occup; // current number of entries
Uint32 m_nodeScan; // list of scans at this node
TreeNode();
};
- static const unsigned NodeHeadSize = sizeof(TreeNode) >> 2;
+ STATIC_CONST( NodeHeadSize = sizeof(TreeNode) >> 2 );
/*
- * Tree nodes are not always accessed fully, for cache reasons. There
- * are 3 access sizes.
+ * Tree node "access size" was for an early version with signal
+ * interface to TUP. It is now used only to compute sizes.
*/
enum AccSize {
AccNone = 0,
@@ -285,7 +283,7 @@ private:
* m_occup), and whether the position is at an existing entry or
* before one (if any). Position m_occup points past the node and is
* also represented by position 0 of next node. Includes direction
- * and copy of entry used by scan.
+ * used by scan.
*/
struct TreePos;
friend struct TreePos;
@@ -293,8 +291,7 @@ private:
TupLoc m_loc; // physical node address
Uint16 m_pos; // position 0 to m_occup
Uint8 m_match; // at an existing entry
- Uint8 m_dir; // from link (0-2) or within node (3)
- TreeEnt m_ent; // copy of current entry
+ Uint8 m_dir; // see scanNext()
TreePos();
};
@@ -326,17 +323,21 @@ private:
unsigned m_indexId : 24;
unsigned pad1 : 8;
};
- static const unsigned DescHeadSize = sizeof(DescHead) >> 2;
+ STATIC_CONST( DescHeadSize = sizeof(DescHead) >> 2 );
/*
* Attribute metadata. Size must be multiple of word size.
+ *
+ * Prefix comparison of char data must use strxfrm and binary
+ * comparison. The charset is currently unused.
*/
struct DescAttr {
Uint32 m_attrDesc; // standard AttributeDescriptor
Uint16 m_primaryAttrId;
- Uint16 m_typeId;
+ unsigned m_typeId : 6;
+ unsigned m_charset : 10;
};
- static const unsigned DescAttrSize = sizeof(DescAttr) >> 2;
+ STATIC_CONST( DescAttrSize = sizeof(DescAttr) >> 2 );
/*
* Complete metadata for one index. The array of attributes has
@@ -371,6 +372,10 @@ private:
* a separate lock wait flag. It may be for current entry or it may
* be for an entry we were moved away from. In any case nothing
* happens with current entry before lock wait flag is cleared.
+ *
+ * An unfinished scan is always linked to some tree node, and has
+ * current position and direction (see comments at scanNext). There
+ * is also a copy of latest entry found.
*/
struct ScanOp;
friend struct ScanOp;
@@ -399,8 +404,6 @@ private:
Uint32 m_savePointId;
// lock waited for or obtained and not yet passed to LQH
Uint32 m_accLockOp;
- // locks obtained and passed to LQH but not yet returned by LQH
- Uint32 m_accLockOps[MaxAccLockOps];
Uint8 m_readCommitted; // no locking
Uint8 m_lockMode;
Uint8 m_keyInfo;
@@ -409,13 +412,20 @@ private:
ScanBound* m_bound[2]; // pointers to above 2
Uint16 m_boundCnt[2]; // number of bounds in each
TreePos m_scanPos; // position
- TreeEnt m_lastEnt; // last entry returned
+ TreeEnt m_scanEnt; // latest entry found
Uint32 m_nodeScan; // next scan at node (single-linked)
union {
Uint32 nextPool;
Uint32 nextList;
};
Uint32 prevList;
+ /*
+ * Locks obtained and passed to LQH but not yet returned by LQH.
+ * The max was increased from 16 to 992 (default 64). Record max
+ * ever used in this scan. TODO fix quadratic behaviour
+ */
+ Uint32 m_maxAccLockOps;
+ Uint32 m_accLockOps[MaxAccLockOps];
ScanOp(ScanBoundPool& scanBoundPool);
};
typedef Ptr<ScanOp> ScanOpPtr;
@@ -472,7 +482,7 @@ private:
Uint16 m_numAttrs;
bool m_storeNullKey;
TreeHead m_tree;
- TupLoc m_freeLoc; // one node pre-allocated for insert
+ TupLoc m_freeLoc; // list of free index nodes
DLList<ScanOp> m_scanList; // current scans on this fragment
Uint32 m_tupIndexFragPtrI;
Uint32 m_tupTableFragPtrI[2];
@@ -516,7 +526,6 @@ private:
Frag& m_frag; // fragment using the node
TupLoc m_loc; // physical node address
TreeNode* m_node; // pointer to node storage
- AccSize m_acc; // accessed size
NodeHandle(Frag& frag);
NodeHandle(const NodeHandle& node);
NodeHandle& operator=(const NodeHandle& node);
@@ -553,9 +562,9 @@ private:
void execREAD_CONFIG_REQ(Signal* signal);
// utils
void setKeyAttrs(const Frag& frag);
- void readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, TableData keyData);
- void readTablePk(const Frag& frag, TreeEnt ent, unsigned& pkSize, Data pkData);
- void copyAttrs(const Frag& frag, TableData data1, Data data2, unsigned maxlen2 = MaxAttrDataSize);
+ void readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, Data keyData);
+ void readTablePk(const Frag& frag, TreeEnt ent, Data pkData, unsigned& pkSize);
+ void copyAttrs(const Frag& frag, ConstData data1, Data data2, unsigned maxlen2 = MaxAttrDataSize);
/*
* DbtuxMeta.cpp
@@ -577,18 +586,24 @@ private:
* DbtuxNode.cpp
*/
int allocNode(Signal* signal, NodeHandle& node);
- void accessNode(Signal* signal, NodeHandle& node, AccSize acc);
- void selectNode(Signal* signal, NodeHandle& node, TupLoc loc, AccSize acc);
- void insertNode(Signal* signal, NodeHandle& node, AccSize acc);
- void deleteNode(Signal* signal, NodeHandle& node);
- void setNodePref(Signal* signal, NodeHandle& node);
+ void selectNode(NodeHandle& node, TupLoc loc);
+ void insertNode(NodeHandle& node);
+ void deleteNode(NodeHandle& node);
+ void setNodePref(NodeHandle& node);
// node operations
- void nodePushUp(Signal* signal, NodeHandle& node, unsigned pos, const TreeEnt& ent);
- void nodePopDown(Signal* signal, NodeHandle& node, unsigned pos, TreeEnt& ent);
- void nodePushDown(Signal* signal, NodeHandle& node, unsigned pos, TreeEnt& ent);
- void nodePopUp(Signal* signal, NodeHandle& node, unsigned pos, TreeEnt& ent);
- void nodeSlide(Signal* signal, NodeHandle& dstNode, NodeHandle& srcNode, unsigned i);
+ void nodePushUp(NodeHandle& node, unsigned pos, const TreeEnt& ent, Uint32 scanList);
+ void nodePushUpScans(NodeHandle& node, unsigned pos);
+ void nodePopDown(NodeHandle& node, unsigned pos, TreeEnt& en, Uint32* scanList);
+ void nodePopDownScans(NodeHandle& node, unsigned pos);
+ void nodePushDown(NodeHandle& node, unsigned pos, TreeEnt& ent, Uint32& scanList);
+ void nodePushDownScans(NodeHandle& node, unsigned pos);
+ void nodePopUp(NodeHandle& node, unsigned pos, TreeEnt& ent, Uint32 scanList);
+ void nodePopUpScans(NodeHandle& node, unsigned pos);
+ void nodeSlide(NodeHandle& dstNode, NodeHandle& srcNode, unsigned cnt, unsigned i);
// scans linked to node
+ void addScanList(NodeHandle& node, unsigned pos, Uint32 scanList);
+ void removeScanList(NodeHandle& node, unsigned pos, Uint32& scanList);
+ void moveScanList(NodeHandle& node, unsigned pos);
void linkScan(NodeHandle& node, ScanOpPtr scanPtr);
void unlinkScan(NodeHandle& node, ScanOpPtr scanPtr);
bool islinkScan(NodeHandle& node, ScanOpPtr scanPtr);
@@ -596,10 +611,21 @@ private:
/*
* DbtuxTree.cpp
*/
- void treeAdd(Signal* signal, Frag& frag, TreePos treePos, TreeEnt ent);
- void treeRemove(Signal* signal, Frag& frag, TreePos treePos);
- void treeRotateSingle(Signal* signal, Frag& frag, NodeHandle& node, unsigned i);
- void treeRotateDouble(Signal* signal, Frag& frag, NodeHandle& node, unsigned i);
+ // add entry
+ void treeAdd(Frag& frag, TreePos treePos, TreeEnt ent);
+ void treeAddFull(Frag& frag, NodeHandle lubNode, unsigned pos, TreeEnt ent);
+ void treeAddNode(Frag& frag, NodeHandle lubNode, unsigned pos, TreeEnt ent, NodeHandle parentNode, unsigned i);
+ void treeAddRebalance(Frag& frag, NodeHandle node, unsigned i);
+ // remove entry
+ void treeRemove(Frag& frag, TreePos treePos);
+ void treeRemoveInner(Frag& frag, NodeHandle lubNode, unsigned pos);
+ void treeRemoveSemi(Frag& frag, NodeHandle node, unsigned i);
+ void treeRemoveLeaf(Frag& frag, NodeHandle node);
+ void treeRemoveNode(Frag& frag, NodeHandle node);
+ void treeRemoveRebalance(Frag& frag, NodeHandle node, unsigned i);
+ // rotate
+ void treeRotateSingle(Frag& frag, NodeHandle& node, unsigned i);
+ void treeRotateDouble(Frag& frag, NodeHandle& node, unsigned i);
/*
* DbtuxScan.cpp
@@ -611,9 +637,9 @@ private:
void execACCKEYCONF(Signal* signal);
void execACCKEYREF(Signal* signal);
void execACC_ABORTCONF(Signal* signal);
- void scanFirst(Signal* signal, ScanOpPtr scanPtr);
- void scanNext(Signal* signal, ScanOpPtr scanPtr);
- bool scanVisible(Signal* signal, ScanOpPtr scanPtr, TreeEnt ent);
+ void scanFirst(ScanOpPtr scanPtr);
+ void scanNext(ScanOpPtr scanPtr);
+ bool scanVisible(ScanOpPtr scanPtr, TreeEnt ent);
void scanClose(Signal* signal, ScanOpPtr scanPtr);
void addAccLockOp(ScanOp& scan, Uint32 accLockOp);
void removeAccLockOp(ScanOp& scan, Uint32 accLockOp);
@@ -622,17 +648,15 @@ private:
/*
* DbtuxSearch.cpp
*/
- void searchToAdd(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos);
- void searchToRemove(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos);
- void searchToScan(Signal* signal, Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos);
+ void searchToAdd(Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos);
+ void searchToRemove(Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos);
+ void searchToScan(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos);
/*
* DbtuxCmp.cpp
*/
- int cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, ConstData entryData, unsigned maxlen = MaxAttrDataSize);
- int cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, TableData entryKey);
+ int cmpSearchKey(const Frag& frag, unsigned& start, ConstData searchKey, ConstData entryData, unsigned maxlen = MaxAttrDataSize);
int cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, ConstData entryData, unsigned maxlen = MaxAttrDataSize);
- int cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, TableData entryKey);
/*
* DbtuxDebug.cpp
@@ -650,7 +674,7 @@ private:
PrintPar();
};
void printTree(Signal* signal, Frag& frag, NdbOut& out);
- void printNode(Signal* signal, Frag& frag, NdbOut& out, TupLoc loc, PrintPar& par);
+ void printNode(Frag& frag, NdbOut& out, TupLoc loc, PrintPar& par);
friend class NdbOut& operator<<(NdbOut&, const TupLoc&);
friend class NdbOut& operator<<(NdbOut&, const TreeEnt&);
friend class NdbOut& operator<<(NdbOut&, const TreeNode&);
@@ -670,8 +694,8 @@ private:
DebugTree = 4, // log and check tree after each op
DebugScan = 8 // log scans
};
- static const int DataFillByte = 0xa2;
- static const int NodeFillByte = 0xa4;
+ STATIC_CONST( DataFillByte = 0xa2 );
+ STATIC_CONST( NodeFillByte = 0xa4 );
#endif
// start up info
@@ -679,17 +703,27 @@ private:
Uint32 c_typeOfStart;
/*
- * Array of index key attribute ids in AttributeHeader format.
- * Includes fixed attribute sizes. This is global data set at
- * operation start and is not passed as a parameter.
+ * Global data set at operation start. Unpacked from index metadata.
+ * Not passed as parameter to methods. Invalid across timeslices.
+ *
+ * TODO inline all into index metadata
*/
+
+ // index key attr ids with sizes in AttributeHeader format
Data c_keyAttrs;
- // buffer for search key data as pointers to TUP storage
- TableData c_searchKey;
+ // pointers to index key comparison functions
+ NdbSqlUtil::Cmp** c_sqlCmp;
- // buffer for current entry key data as pointers to TUP storage
- TableData c_entryKey;
+ /*
+ * Other buffers used during the operation.
+ */
+
+ // buffer for search key data with headers
+ Data c_searchKey;
+
+ // buffer for current entry key data with headers
+ Data c_entryKey;
// buffer for scan bounds and keyinfo (primary key)
Data c_dataBuffer;
@@ -798,22 +832,52 @@ Dbtux::ConstData::operator=(Data data)
inline
Dbtux::TupLoc::TupLoc() :
- m_pageId(RNIL),
+ m_pageId1(RNIL >> 16),
+ m_pageId2(RNIL & 0xFFFF),
m_pageOffset(0)
{
}
inline
Dbtux::TupLoc::TupLoc(Uint32 pageId, Uint16 pageOffset) :
- m_pageId(pageId),
+ m_pageId1(pageId >> 16),
+ m_pageId2(pageId & 0xFFFF),
m_pageOffset(pageOffset)
{
}
+inline Uint32
+Dbtux::TupLoc::getPageId() const
+{
+ return (m_pageId1 << 16) | m_pageId2;
+}
+
+inline void
+Dbtux::TupLoc::setPageId(Uint32 pageId)
+{
+ m_pageId1 = (pageId >> 16);
+ m_pageId2 = (pageId & 0xFFFF);
+}
+
+inline Uint32
+Dbtux::TupLoc::getPageOffset() const
+{
+ return (Uint32)m_pageOffset;
+}
+
+inline void
+Dbtux::TupLoc::setPageOffset(Uint32 pageOffset)
+{
+ m_pageOffset = (Uint16)pageOffset;
+}
+
inline bool
Dbtux::TupLoc::operator==(const TupLoc& loc) const
{
- return m_pageId == loc.m_pageId && m_pageOffset == loc.m_pageOffset;
+ return
+ m_pageId1 == loc.m_pageId1 &&
+ m_pageId2 == loc.m_pageId2 &&
+ m_pageOffset == loc.m_pageOffset;
}
inline bool
@@ -844,13 +908,13 @@ Dbtux::TreeEnt::eq(const TreeEnt ent) const
inline int
Dbtux::TreeEnt::cmp(const TreeEnt ent) const
{
- if (m_tupLoc.m_pageId < ent.m_tupLoc.m_pageId)
+ if (m_tupLoc.getPageId() < ent.m_tupLoc.getPageId())
return -1;
- if (m_tupLoc.m_pageId > ent.m_tupLoc.m_pageId)
+ if (m_tupLoc.getPageId() > ent.m_tupLoc.getPageId())
return +1;
- if (m_tupLoc.m_pageOffset < ent.m_tupLoc.m_pageOffset)
+ if (m_tupLoc.getPageOffset() < ent.m_tupLoc.getPageOffset())
return -1;
- if (m_tupLoc.m_pageOffset > ent.m_tupLoc.m_pageOffset)
+ if (m_tupLoc.getPageOffset() > ent.m_tupLoc.getPageOffset())
return +1;
if (m_tupVersion < ent.m_tupVersion)
return -1;
@@ -868,17 +932,14 @@ Dbtux::TreeEnt::cmp(const TreeEnt ent) const
inline
Dbtux::TreeNode::TreeNode() :
m_side(2),
- m_balance(0),
+ m_balance(0 + 1),
pad1(0),
m_occup(0),
m_nodeScan(RNIL)
{
- m_linkPI[0] = NullTupLoc.m_pageId;
- m_linkPO[0] = NullTupLoc.m_pageOffset;
- m_linkPI[1] = NullTupLoc.m_pageId;
- m_linkPO[1] = NullTupLoc.m_pageOffset;
- m_linkPI[2] = NullTupLoc.m_pageId;
- m_linkPO[2] = NullTupLoc.m_pageOffset;
+ m_link[0] = NullTupLoc;
+ m_link[1] = NullTupLoc;
+ m_link[2] = NullTupLoc;
}
// Dbtux::TreeHead
@@ -906,7 +967,6 @@ Dbtux::TreeHead::getSize(AccSize acc) const
case AccFull:
return m_nodeSize;
}
- abort();
return 0;
}
@@ -931,8 +991,7 @@ Dbtux::TreePos::TreePos() :
m_loc(),
m_pos(ZNIL),
m_match(false),
- m_dir(255),
- m_ent()
+ m_dir(255)
{
}
@@ -973,16 +1032,19 @@ Dbtux::ScanOp::ScanOp(ScanBoundPool& scanBoundPool) :
m_boundMin(scanBoundPool),
m_boundMax(scanBoundPool),
m_scanPos(),
- m_lastEnt(),
- m_nodeScan(RNIL)
+ m_scanEnt(),
+ m_nodeScan(RNIL),
+ m_maxAccLockOps(0)
{
m_bound[0] = &m_boundMin;
m_bound[1] = &m_boundMax;
m_boundCnt[0] = 0;
m_boundCnt[1] = 0;
+#ifdef VM_TRACE
for (unsigned i = 0; i < MaxAccLockOps; i++) {
- m_accLockOps[i] = RNIL;
+ m_accLockOps[i] = 0x1f1f1f1f;
}
+#endif
}
// Dbtux::Index
@@ -1047,8 +1109,7 @@ inline
Dbtux::NodeHandle::NodeHandle(Frag& frag) :
m_frag(frag),
m_loc(),
- m_node(0),
- m_acc(AccNone)
+ m_node(0)
{
}
@@ -1056,8 +1117,7 @@ inline
Dbtux::NodeHandle::NodeHandle(const NodeHandle& node) :
m_frag(node.m_frag),
m_loc(node.m_loc),
- m_node(node.m_node),
- m_acc(node.m_acc)
+ m_node(node.m_node)
{
}
@@ -1067,7 +1127,6 @@ Dbtux::NodeHandle::operator=(const NodeHandle& node)
ndbassert(&m_frag == &node.m_frag);
m_loc = node.m_loc;
m_node = node.m_node;
- m_acc = node.m_acc;
return *this;
}
@@ -1081,13 +1140,13 @@ inline Dbtux::TupLoc
Dbtux::NodeHandle::getLink(unsigned i)
{
ndbrequire(i <= 2);
- return TupLoc(m_node->m_linkPI[i], m_node->m_linkPO[i]);
+ return m_node->m_link[i];
}
inline unsigned
Dbtux::NodeHandle::getChilds()
{
- return (getLink(0) != NullTupLoc) + (getLink(1) != NullTupLoc);
+ return (m_node->m_link[0] != NullTupLoc) + (m_node->m_link[1] != NullTupLoc);
}
inline unsigned
@@ -1105,7 +1164,7 @@ Dbtux::NodeHandle::getOccup()
inline int
Dbtux::NodeHandle::getBalance()
{
- return m_node->m_balance;
+ return (int)m_node->m_balance - 1;
}
inline Uint32
@@ -1118,8 +1177,7 @@ inline void
Dbtux::NodeHandle::setLink(unsigned i, TupLoc loc)
{
ndbrequire(i <= 2);
- m_node->m_linkPI[i] = loc.m_pageId;
- m_node->m_linkPO[i] = loc.m_pageOffset;
+ m_node->m_link[i] = loc;
}
inline void
@@ -1141,7 +1199,7 @@ inline void
Dbtux::NodeHandle::setBalance(int b)
{
ndbrequire(abs(b) <= 1);
- m_node->m_balance = b;
+ m_node->m_balance = (unsigned)(b + 1);
}
inline void
@@ -1154,7 +1212,6 @@ inline Dbtux::Data
Dbtux::NodeHandle::getPref()
{
TreeHead& tree = m_frag.m_tree;
- ndbrequire(m_acc >= AccPref);
return tree.getPref(m_node);
}
@@ -1165,11 +1222,6 @@ Dbtux::NodeHandle::getEnt(unsigned pos)
TreeEnt* entList = tree.getEntList(m_node);
const unsigned occup = m_node->m_occup;
ndbrequire(pos < occup);
- if (pos == 0 || pos == occup - 1) {
- ndbrequire(m_acc >= AccPref)
- } else {
- ndbrequire(m_acc == AccFull)
- }
return entList[(1 + pos) % occup];
}
@@ -1217,7 +1269,7 @@ Dbtux::getTupAddr(const Frag& frag, TreeEnt ent)
const Uint32 tableFragPtrI = frag.m_tupTableFragPtrI[ent.m_fragBit];
const TupLoc tupLoc = ent.m_tupLoc;
Uint32 tupAddr = NullTupAddr;
- c_tup->tuxGetTupAddr(tableFragPtrI, tupLoc.m_pageId, tupLoc.m_pageOffset, tupAddr);
+ c_tup->tuxGetTupAddr(tableFragPtrI, tupLoc.getPageId(), tupLoc.getPageOffset(), tupAddr);
jamEntry();
return tupAddr;
}
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp
index 1b8755a1dc4..ddab77b97b5 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp
@@ -18,44 +18,47 @@
#include "Dbtux.hpp"
/*
- * Search key vs node prefix.
+ * Search key vs node prefix or entry
*
- * The comparison starts at given attribute position (in fact 0). The
- * position is updated by number of equal initial attributes found. The
- * prefix may be partial in which case CmpUnknown may be returned.
+ * The comparison starts at given attribute position. The position is
+ * updated by number of equal initial attributes found. The entry data
+ * may be partial in which case CmpUnknown may be returned.
*/
int
-Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, ConstData entryData, unsigned maxlen)
+Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, ConstData searchKey, ConstData entryData, unsigned maxlen)
{
const unsigned numAttrs = frag.m_numAttrs;
const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff);
// number of words of attribute data left
unsigned len2 = maxlen;
- // skip to right position in search key
- searchKey += start;
+ // skip to right position in search key only
+ for (unsigned i = 0; i < start; i++) {
+ jam();
+ searchKey += AttributeHeaderSize + searchKey.ah().getDataSize();
+ }
int ret = 0;
while (start < numAttrs) {
- if (len2 < AttributeHeaderSize) {
+ if (len2 <= AttributeHeaderSize) {
jam();
ret = NdbSqlUtil::CmpUnknown;
break;
}
len2 -= AttributeHeaderSize;
- if (*searchKey != 0) {
+ if (! searchKey.ah().isNULL()) {
if (! entryData.ah().isNULL()) {
jam();
// current attribute
const DescAttr& descAttr = descEnt.m_descAttr[start];
- const unsigned typeId = descAttr.m_typeId;
// full data size
const unsigned size1 = AttributeDescriptor::getSizeInWords(descAttr.m_attrDesc);
ndbrequire(size1 != 0 && size1 == entryData.ah().getDataSize());
const unsigned size2 = min(size1, len2);
len2 -= size2;
// compare
- const Uint32* const p1 = *searchKey;
+ NdbSqlUtil::Cmp* const cmp = c_sqlCmp[start];
+ const Uint32* const p1 = &searchKey[AttributeHeaderSize];
const Uint32* const p2 = &entryData[AttributeHeaderSize];
- ret = NdbSqlUtil::cmp(typeId, p1, p2, size1, size2);
+ ret = (*cmp)(0, p1, p2, size1, size2);
if (ret != 0) {
jam();
break;
@@ -74,87 +77,33 @@ Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, Cons
break;
}
}
- searchKey += 1;
+ searchKey += AttributeHeaderSize + searchKey.ah().getDataSize();
entryData += AttributeHeaderSize + entryData.ah().getDataSize();
start++;
}
- // XXX until data format errors are handled
- ndbrequire(ret != NdbSqlUtil::CmpError);
return ret;
}
/*
- * Search key vs tree entry.
+ * Scan bound vs node prefix or entry.
*
- * Start position is updated as in previous routine.
- */
-int
-Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, TableData entryKey)
-{
- const unsigned numAttrs = frag.m_numAttrs;
- const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff);
- // skip to right position
- searchKey += start;
- entryKey += start;
- int ret = 0;
- while (start < numAttrs) {
- if (*searchKey != 0) {
- if (*entryKey != 0) {
- jam();
- // current attribute
- const DescAttr& descAttr = descEnt.m_descAttr[start];
- const unsigned typeId = descAttr.m_typeId;
- // full data size
- const unsigned size1 = AttributeDescriptor::getSizeInWords(descAttr.m_attrDesc);
- // compare
- const Uint32* const p1 = *searchKey;
- const Uint32* const p2 = *entryKey;
- ret = NdbSqlUtil::cmp(typeId, p1, p2, size1, size1);
- if (ret != 0) {
- jam();
- break;
- }
- } else {
- jam();
- // not NULL > NULL
- ret = +1;
- break;
- }
- } else {
- if (*entryKey != 0) {
- jam();
- // NULL < not NULL
- ret = -1;
- break;
- }
- }
- searchKey += 1;
- entryKey += 1;
- start++;
- }
- // XXX until data format errors are handled
- ndbrequire(ret != NdbSqlUtil::CmpError);
- return ret;
-}
-
-/*
- * Scan bound vs node prefix.
+ * Compare lower or upper bound and index entry data. The entry data
+ * may be partial in which case CmpUnknown may be returned. Otherwise
+ * returns -1 if the bound is to the left of the entry and +1 if the
+ * bound is to the right of the entry.
*
- * Compare lower or upper bound and index attribute data. The attribute
- * data may be partial in which case CmpUnknown may be returned.
- * Returns -1 if the boundary is to the left of the compared key and +1
- * if the boundary is to the right of the compared key.
+ * The routine is similar to cmpSearchKey, but 0 is never returned.
+ * Suppose all attributes compare equal. Recall that all bounds except
+ * possibly the last one are non-strict. Use the given bound direction
+ * (0-lower 1-upper) and strictness of last bound to return -1 or +1.
*
- * To get this behaviour we treat equality a little bit special. If the
- * boundary is a lower bound then the boundary is to the left of all
- * equal keys and if it is an upper bound then the boundary is to the
- * right of all equal keys.
+ * Following example illustrates this. We are at (a=2, b=3).
*
- * When searching for the first key we are using the lower bound to try
- * to find the first key that is to the right of the boundary. Then we
- * start scanning from this tuple (including the tuple itself) until we
- * find the first key which is to the right of the boundary. Then we
- * stop and do not include that key in the scan result.
+ * dir bounds strict return
+ * 0 a >= 2 and b >= 3 no -1
+ * 0 a >= 2 and b > 3 yes +1
+ * 1 a <= 2 and b <= 3 no +1
+ * 1 a <= 2 and b < 3 yes -1
*/
int
Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, ConstData entryData, unsigned maxlen)
@@ -164,20 +113,15 @@ Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigne
ndbrequire(dir <= 1);
// number of words of data left
unsigned len2 = maxlen;
- /*
- * No boundary means full scan, low boundary is to the right of all
- * keys. Thus we should always return -1. For upper bound we are to
- * the right of all keys, thus we should always return +1. We achieve
- * this behaviour by initializing type to 4.
- */
+ // in case of no bounds, init last type to something non-strict
unsigned type = 4;
while (boundCount != 0) {
- if (len2 < AttributeHeaderSize) {
+ if (len2 <= AttributeHeaderSize) {
jam();
return NdbSqlUtil::CmpUnknown;
}
len2 -= AttributeHeaderSize;
- // get and skip bound type
+ // get and skip bound type (it is used after the loop)
type = boundInfo[0];
boundInfo += 1;
if (! boundInfo.ah().isNULL()) {
@@ -185,8 +129,8 @@ Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigne
jam();
// current attribute
const unsigned index = boundInfo.ah().getAttributeId();
+ ndbrequire(index < frag.m_numAttrs);
const DescAttr& descAttr = descEnt.m_descAttr[index];
- const unsigned typeId = descAttr.m_typeId;
ndbrequire(entryData.ah().getAttributeId() == descAttr.m_primaryAttrId);
// full data size
const unsigned size1 = boundInfo.ah().getDataSize();
@@ -194,11 +138,10 @@ Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigne
const unsigned size2 = min(size1, len2);
len2 -= size2;
// compare
+ NdbSqlUtil::Cmp* const cmp = c_sqlCmp[index];
const Uint32* const p1 = &boundInfo[AttributeHeaderSize];
const Uint32* const p2 = &entryData[AttributeHeaderSize];
- int ret = NdbSqlUtil::cmp(typeId, p1, p2, size1, size2);
- // XXX until data format errors are handled
- ndbrequire(ret != NdbSqlUtil::CmpError);
+ int ret = (*cmp)(0, p1, p2, size1, size2);
if (ret != 0) {
jam();
return ret;
@@ -220,100 +163,7 @@ Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigne
entryData += AttributeHeaderSize + entryData.ah().getDataSize();
boundCount -= 1;
}
- if (dir == 0) {
- jam();
- /*
- * Looking for the lower bound. If strict lower bound then the
- * boundary is to the right of the compared key and otherwise (equal
- * included in range) then the boundary is to the left of the key.
- */
- if (type == 1) {
- jam();
- return +1;
- }
- return -1;
- } else {
- jam();
- /*
- * Looking for the upper bound. If strict upper bound then the
- * boundary is to the left of all equal keys and otherwise (equal
- * included in the range) then the boundary is to the right of all
- * equal keys.
- */
- if (type == 3) {
- jam();
- return -1;
- }
- return +1;
- }
-}
-
-/*
- * Scan bound vs tree entry.
- */
-int
-Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, TableData entryKey)
-{
- const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff);
- // direction 0-lower 1-upper
- ndbrequire(dir <= 1);
- // initialize type to equality
- unsigned type = 4;
- while (boundCount != 0) {
- // get and skip bound type
- type = boundInfo[0];
- boundInfo += 1;
- if (! boundInfo.ah().isNULL()) {
- if (*entryKey != 0) {
- jam();
- // current attribute
- const unsigned index = boundInfo.ah().getAttributeId();
- const DescAttr& descAttr = descEnt.m_descAttr[index];
- const unsigned typeId = descAttr.m_typeId;
- // full data size
- const unsigned size1 = AttributeDescriptor::getSizeInWords(descAttr.m_attrDesc);
- // compare
- const Uint32* const p1 = &boundInfo[AttributeHeaderSize];
- const Uint32* const p2 = *entryKey;
- int ret = NdbSqlUtil::cmp(typeId, p1, p2, size1, size1);
- // XXX until data format errors are handled
- ndbrequire(ret != NdbSqlUtil::CmpError);
- if (ret != 0) {
- jam();
- return ret;
- }
- } else {
- jam();
- // not NULL > NULL
- return +1;
- }
- } else {
- jam();
- if (*entryKey != 0) {
- jam();
- // NULL < not NULL
- return -1;
- }
- }
- boundInfo += AttributeHeaderSize + boundInfo.ah().getDataSize();
- entryKey += 1;
- boundCount -= 1;
- }
- if (dir == 0) {
- // lower bound
- jam();
- if (type == 1) {
- jam();
- return +1;
- }
- return -1;
- } else {
- // upper bound
- jam();
- if (type == 3) {
- jam();
- return -1;
- }
- return +1;
- }
+ // all attributes were equal
+ const int strict = (type & 0x1);
+ return (dir == 0 ? (strict == 0 ? -1 : +1) : (strict == 0 ? +1 : -1));
}
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp
index 11f4f12b7f6..c5c22264460 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp
@@ -98,7 +98,7 @@ Dbtux::printTree(Signal* signal, Frag& frag, NdbOut& out)
strcpy(par.m_path, ".");
par.m_side = 2;
par.m_parent = NullTupLoc;
- printNode(signal, frag, out, tree.m_root, par);
+ printNode(frag, out, tree.m_root, par);
out.m_out->flush();
if (! par.m_ok) {
if (debugFile == 0) {
@@ -114,7 +114,7 @@ Dbtux::printTree(Signal* signal, Frag& frag, NdbOut& out)
}
void
-Dbtux::printNode(Signal* signal, Frag& frag, NdbOut& out, TupLoc loc, PrintPar& par)
+Dbtux::printNode(Frag& frag, NdbOut& out, TupLoc loc, PrintPar& par)
{
if (loc == NullTupLoc) {
par.m_depth = 0;
@@ -122,7 +122,7 @@ Dbtux::printNode(Signal* signal, Frag& frag, NdbOut& out, TupLoc loc, PrintPar&
}
TreeHead& tree = frag.m_tree;
NodeHandle node(frag);
- selectNode(signal, node, loc, AccFull);
+ selectNode(node, loc);
out << par.m_path << " " << node << endl;
// check children
PrintPar cpar[2];
@@ -132,7 +132,7 @@ Dbtux::printNode(Signal* signal, Frag& frag, NdbOut& out, TupLoc loc, PrintPar&
cpar[i].m_side = i;
cpar[i].m_depth = 0;
cpar[i].m_parent = loc;
- printNode(signal, frag, out, node.getLink(i), cpar[i]);
+ printNode(frag, out, node.getLink(i), cpar[i]);
if (! cpar[i].m_ok) {
par.m_ok = false;
}
@@ -178,16 +178,19 @@ Dbtux::printNode(Signal* signal, Frag& frag, NdbOut& out, TupLoc loc, PrintPar&
out << "occupancy " << node.getOccup() << " of interior node";
out << " less than min " << tree.m_minOccup << endl;
}
- // check missed half-leaf/leaf merge
+#ifdef dbtux_totally_groks_t_trees
+ // check missed semi-leaf/leaf merge
for (unsigned i = 0; i <= 1; i++) {
if (node.getLink(i) != NullTupLoc &&
node.getLink(1 - i) == NullTupLoc &&
- node.getOccup() + cpar[i].m_occup <= tree.m_maxOccup) {
+ // our semi-leaf seems to satify interior minOccup condition
+ node.getOccup() < tree.m_minOccup) {
par.m_ok = false;
out << par.m_path << sep;
out << "missed merge with child " << i << endl;
}
}
+#endif
// check inline prefix
{ ConstData data1 = node.getPref();
Uint32 data2[MaxPrefSize];
@@ -207,14 +210,10 @@ Dbtux::printNode(Signal* signal, Frag& frag, NdbOut& out, TupLoc loc, PrintPar&
}
// check ordering within node
for (unsigned j = 1; j < node.getOccup(); j++) {
- unsigned start = 0;
const TreeEnt ent1 = node.getEnt(j - 1);
const TreeEnt ent2 = node.getEnt(j);
- if (j == 1) {
- readKeyAttrs(frag, ent1, start, c_searchKey);
- } else {
- memcpy(c_searchKey, c_entryKey, frag.m_numAttrs << 2);
- }
+ unsigned start = 0;
+ readKeyAttrs(frag, ent1, start, c_searchKey);
readKeyAttrs(frag, ent2, start, c_entryKey);
int ret = cmpSearchKey(frag, start, c_searchKey, c_entryKey);
if (ret == 0)
@@ -260,8 +259,8 @@ operator<<(NdbOut& out, const Dbtux::TupLoc& loc)
if (loc == Dbtux::NullTupLoc) {
out << "null";
} else {
- out << dec << loc.m_pageId;
- out << "." << dec << loc.m_pageOffset;
+ out << dec << loc.getPageId();
+ out << "." << dec << loc.getPageOffset();
}
return out;
}
@@ -278,16 +277,13 @@ operator<<(NdbOut& out, const Dbtux::TreeEnt& ent)
NdbOut&
operator<<(NdbOut& out, const Dbtux::TreeNode& node)
{
- Dbtux::TupLoc link0(node.m_linkPI[0], node.m_linkPO[0]);
- Dbtux::TupLoc link1(node.m_linkPI[1], node.m_linkPO[1]);
- Dbtux::TupLoc link2(node.m_linkPI[2], node.m_linkPO[2]);
out << "[TreeNode " << hex << &node;
- out << " [left " << link0 << "]";
- out << " [right " << link1 << "]";
- out << " [up " << link2 << "]";
+ out << " [left " << node.m_link[0] << "]";
+ out << " [right " << node.m_link[1] << "]";
+ out << " [up " << node.m_link[2] << "]";
out << " [side " << dec << node.m_side << "]";
out << " [occup " << dec << node.m_occup << "]";
- out << " [balance " << dec << (int)node.m_balance << "]";
+ out << " [balance " << dec << (int)node.m_balance - 1 << "]";
out << " [nodeScan " << hex << node.m_nodeScan << "]";
out << "]";
return out;
@@ -317,7 +313,6 @@ operator<<(NdbOut& out, const Dbtux::TreePos& pos)
out << " [pos " << dec << pos.m_pos << "]";
out << " [match " << dec << pos.m_match << "]";
out << " [dir " << dec << pos.m_dir << "]";
- out << " [ent " << pos.m_ent << "]";
out << "]";
return out;
}
@@ -354,6 +349,7 @@ operator<<(NdbOut& out, const Dbtux::ScanOp& scan)
out << " [lockMode " << dec << scan.m_lockMode << "]";
out << " [keyInfo " << dec << scan.m_keyInfo << "]";
out << " [pos " << scan.m_scanPos << "]";
+ out << " [ent " << scan.m_scanEnt << "]";
for (unsigned i = 0; i <= 1; i++) {
out << " [bound " << dec << i;
Dbtux::ScanBound& bound = *scan.m_bound[i];
@@ -414,27 +410,21 @@ operator<<(NdbOut& out, const Dbtux::NodeHandle& node)
const Dbtux::TreeHead& tree = frag.m_tree;
out << "[NodeHandle " << hex << &node;
out << " [loc " << node.m_loc << "]";
- out << " [acc " << dec << node.m_acc << "]";
out << " [node " << *node.m_node << "]";
- if (node.m_acc >= Dbtux::AccPref) {
- const Uint32* data;
- out << " [pref";
- data = (const Uint32*)node.m_node + Dbtux::NodeHeadSize;
- for (unsigned j = 0; j < tree.m_prefSize; j++)
- out << " " << hex << data[j];
- out << "]";
- out << " [entList";
- unsigned numpos = node.m_node->m_occup;
- if (node.m_acc < Dbtux::AccFull && numpos > 2) {
- numpos = 2;
- out << "(" << dec << numpos << ")";
- }
- data = (const Uint32*)node.m_node + Dbtux::NodeHeadSize + tree.m_prefSize;
- const Dbtux::TreeEnt* entList = (const Dbtux::TreeEnt*)data;
- for (unsigned pos = 0; pos < numpos; pos++)
- out << " " << entList[pos];
- out << "]";
- }
+ const Uint32* data;
+ out << " [pref";
+ data = (const Uint32*)node.m_node + Dbtux::NodeHeadSize;
+ for (unsigned j = 0; j < tree.m_prefSize; j++)
+ out << " " << hex << data[j];
+ out << "]";
+ out << " [entList";
+ unsigned numpos = node.m_node->m_occup;
+ data = (const Uint32*)node.m_node + Dbtux::NodeHeadSize + tree.m_prefSize;
+ const Dbtux::TreeEnt* entList = (const Dbtux::TreeEnt*)data;
+ // print entries in logical order
+ for (unsigned pos = 1; pos <= numpos; pos++)
+ out << " " << entList[pos % numpos];
+ out << "]";
out << "]";
return out;
}
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
index f6f1610c8c1..ded02696a89 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
@@ -16,8 +16,6 @@
#define DBTUX_GEN_CPP
#include "Dbtux.hpp"
-#include <signaldata/TuxContinueB.hpp>
-#include <signaldata/TuxContinueB.hpp>
Dbtux::Dbtux(const Configuration& conf) :
SimulatedBlock(DBTUX, conf),
@@ -202,8 +200,9 @@ Dbtux::execREAD_CONFIG_REQ(Signal* signal)
}
// allocate buffers
c_keyAttrs = (Uint32*)allocRecord("c_keyAttrs", sizeof(Uint32), MaxIndexAttributes);
- c_searchKey = (TableData)allocRecord("c_searchKey", sizeof(Uint32*), MaxIndexAttributes);
- c_entryKey = (TableData)allocRecord("c_entryKey", sizeof(Uint32*), MaxIndexAttributes);
+ c_sqlCmp = (NdbSqlUtil::Cmp**)allocRecord("c_sqlCmp", sizeof(NdbSqlUtil::Cmp*), MaxIndexAttributes);
+ c_searchKey = (Uint32*)allocRecord("c_searchKey", sizeof(Uint32), MaxAttrDataSize);
+ c_entryKey = (Uint32*)allocRecord("c_entryKey", sizeof(Uint32), MaxAttrDataSize);
c_dataBuffer = (Uint32*)allocRecord("c_dataBuffer", sizeof(Uint64), (MaxAttrDataSize + 1) >> 1);
// ack
ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
@@ -218,7 +217,8 @@ Dbtux::execREAD_CONFIG_REQ(Signal* signal)
void
Dbtux::setKeyAttrs(const Frag& frag)
{
- Data keyAttrs = c_keyAttrs; // global
+ Data keyAttrs = c_keyAttrs; // global
+ NdbSqlUtil::Cmp** sqlCmp = c_sqlCmp; // global
const unsigned numAttrs = frag.m_numAttrs;
const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff);
for (unsigned i = 0; i < numAttrs; i++) {
@@ -227,75 +227,71 @@ Dbtux::setKeyAttrs(const Frag& frag)
// set attr id and fixed size
keyAttrs.ah() = AttributeHeader(descAttr.m_primaryAttrId, size);
keyAttrs += 1;
+ // set comparison method pointer
+ const NdbSqlUtil::Type& sqlType = NdbSqlUtil::getTypeBinary(descAttr.m_typeId);
+ ndbrequire(sqlType.m_cmp != 0);
+ *(sqlCmp++) = sqlType.m_cmp;
}
}
void
-Dbtux::readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, TableData keyData)
+Dbtux::readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, Data keyData)
{
ConstData keyAttrs = c_keyAttrs; // global
const Uint32 tableFragPtrI = frag.m_tupTableFragPtrI[ent.m_fragBit];
const TupLoc tupLoc = ent.m_tupLoc;
const Uint32 tupVersion = ent.m_tupVersion;
ndbrequire(start < frag.m_numAttrs);
- const unsigned numAttrs = frag.m_numAttrs - start;
- // start applies to both keys and output data
+ const Uint32 numAttrs = frag.m_numAttrs - start;
+ // skip to start position in keyAttrs only
keyAttrs += start;
- keyData += start;
- c_tup->tuxReadAttrs(tableFragPtrI, tupLoc.m_pageId, tupLoc.m_pageOffset, tupVersion, numAttrs, keyAttrs, keyData);
+ int ret = c_tup->tuxReadAttrs(tableFragPtrI, tupLoc.getPageId(), tupLoc.getPageOffset(), tupVersion, keyAttrs, numAttrs, keyData);
jamEntry();
+ // TODO handle error
+ ndbrequire(ret > 0);
}
void
-Dbtux::readTablePk(const Frag& frag, TreeEnt ent, unsigned& pkSize, Data pkData)
+Dbtux::readTablePk(const Frag& frag, TreeEnt ent, Data pkData, unsigned& pkSize)
{
const Uint32 tableFragPtrI = frag.m_tupTableFragPtrI[ent.m_fragBit];
const TupLoc tupLoc = ent.m_tupLoc;
- Uint32 size = 0;
- c_tup->tuxReadKeys(tableFragPtrI, tupLoc.m_pageId, tupLoc.m_pageOffset, &size, pkData);
- ndbrequire(size != 0);
- pkSize = size;
+ int ret = c_tup->tuxReadPk(tableFragPtrI, tupLoc.getPageId(), tupLoc.getPageOffset(), pkData);
+ jamEntry();
+ // TODO handle error
+ ndbrequire(ret > 0);
+ pkSize = ret;
}
/*
- * Input is pointers to table attributes. Output is array of attribute
- * data with headers. Copies whatever fits.
+ * Copy attribute data with headers. Input is all index key data.
+ * Copies whatever fits.
*/
void
-Dbtux::copyAttrs(const Frag& frag, TableData data1, Data data2, unsigned maxlen2)
+Dbtux::copyAttrs(const Frag& frag, ConstData data1, Data data2, unsigned maxlen2)
{
- ConstData keyAttrs = c_keyAttrs; // global
- const unsigned numAttrs = frag.m_numAttrs;
+ unsigned n = frag.m_numAttrs;
unsigned len2 = maxlen2;
- for (unsigned n = 0; n < numAttrs; n++) {
+ while (n != 0) {
jam();
- const unsigned attrId = keyAttrs.ah().getAttributeId();
- const unsigned dataSize = keyAttrs.ah().getDataSize();
- const Uint32* const p1 = *data1;
- if (p1 != 0) {
- if (len2 == 0)
- return;
- data2.ah() = AttributeHeader(attrId, dataSize);
- data2 += 1;
- len2 -= 1;
- unsigned n = dataSize;
- for (unsigned i = 0; i < dataSize; i++) {
- if (len2 == 0)
- return;
- *data2 = p1[i];
- data2 += 1;
- len2 -= 1;
- }
- } else {
+ const unsigned dataSize = data1.ah().getDataSize();
+ // copy header
+ if (len2 == 0)
+ return;
+ data2[0] = data1[0];
+ data1 += 1;
+ data2 += 1;
+ len2 -= 1;
+ // copy data
+ for (unsigned i = 0; i < dataSize; i++) {
if (len2 == 0)
return;
- data2.ah() = AttributeHeader(attrId, 0);
- data2.ah().setNULL();
- data2 += 1;
+ data2[i] = data1[i];
len2 -= 1;
}
- keyAttrs += 1;
- data1 += 1;
+ data1 += dataSize;
+ data2 += dataSize;
+ n -= 1;
}
#ifdef VM_TRACE
memset(data2, DataFillByte, len2 << 2);
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp
index 24b030bf8ec..30afb51e7d7 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp
@@ -117,10 +117,10 @@ Dbtux::execTUX_MAINT_REQ(Signal* signal)
switch (opCode) {
case TuxMaintReq::OpAdd:
jam();
- searchToAdd(signal, frag, c_searchKey, ent, treePos);
+ searchToAdd(frag, c_searchKey, ent, treePos);
#ifdef VM_TRACE
if (debugFlags & DebugMaint) {
- debugOut << treePos << endl;
+ debugOut << treePos << (treePos.m_match ? " - error" : "") << endl;
}
#endif
if (treePos.m_match) {
@@ -133,8 +133,8 @@ Dbtux::execTUX_MAINT_REQ(Signal* signal)
break;
}
/*
- * At most one new node is inserted in the operation. We keep one
- * free node pre-allocated so the operation cannot fail.
+ * At most one new node is inserted in the operation. Pre-allocate
+ * it so that the operation cannot fail.
*/
if (frag.m_freeLoc == NullTupLoc) {
jam();
@@ -144,17 +144,19 @@ Dbtux::execTUX_MAINT_REQ(Signal* signal)
jam();
break;
}
+ // link to freelist
+ node.setLink(0, frag.m_freeLoc);
frag.m_freeLoc = node.m_loc;
ndbrequire(frag.m_freeLoc != NullTupLoc);
}
- treeAdd(signal, frag, treePos, ent);
+ treeAdd(frag, treePos, ent);
break;
case TuxMaintReq::OpRemove:
jam();
- searchToRemove(signal, frag, c_searchKey, ent, treePos);
+ searchToRemove(frag, c_searchKey, ent, treePos);
#ifdef VM_TRACE
if (debugFlags & DebugMaint) {
- debugOut << treePos << endl;
+ debugOut << treePos << (! treePos.m_match ? " - error" : "") << endl;
}
#endif
if (! treePos.m_match) {
@@ -166,7 +168,7 @@ Dbtux::execTUX_MAINT_REQ(Signal* signal)
}
break;
}
- treeRemove(signal, frag, treePos);
+ treeRemove(frag, treePos);
break;
default:
ndbrequire(false);
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp
index b30b555ccad..1577c5045e0 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp
@@ -53,11 +53,7 @@ Dbtux::execTUXFRAGREQ(Signal* signal)
}
// get new operation record
c_fragOpPool.seize(fragOpPtr);
- if (fragOpPtr.i == RNIL) {
- jam();
- errorCode = TuxFragRef::NoFreeFragmentOper;
- break;
- }
+ ndbrequire(fragOpPtr.i != RNIL);
new (fragOpPtr.p) FragOp();
fragOpPtr.p->m_userPtr = req->userPtr;
fragOpPtr.p->m_userRef = req->userRef;
@@ -66,11 +62,7 @@ Dbtux::execTUXFRAGREQ(Signal* signal)
fragOpPtr.p->m_fragNo = indexPtr.p->m_numFrags;
fragOpPtr.p->m_numAttrsRecvd = 0;
// check if index has place for more fragments
- if (indexPtr.p->m_numFrags == MaxIndexFragments) {
- jam();
- errorCode = TuxFragRef::NoFreeIndexFragment;
- break;
- }
+ ndbrequire(indexPtr.p->m_numFrags < MaxIndexFragments);
// seize new fragment record
FragPtr fragPtr;
c_fragPool.seize(fragPtr);
@@ -186,19 +178,31 @@ Dbtux::execTUX_ADD_ATTRREQ(Signal* signal)
descAttr.m_attrDesc = req->attrDescriptor;
descAttr.m_primaryAttrId = req->primaryAttrId;
descAttr.m_typeId = req->extTypeInfo & 0xFF;
+ descAttr.m_charset = (req->extTypeInfo >> 16);
#ifdef VM_TRACE
if (debugFlags & DebugMeta) {
debugOut << "Add frag " << fragPtr.i << " attr " << attrId << " " << descAttr << endl;
}
#endif
- // check if type is valid and has a comparison method
- const NdbSqlUtil::Type& type = NdbSqlUtil::type(descAttr.m_typeId);
+ // check that type is valid and has a binary comparison method
+ const NdbSqlUtil::Type& type = NdbSqlUtil::getTypeBinary(descAttr.m_typeId);
if (type.m_typeId == NdbSqlUtil::Type::Undefined ||
type.m_cmp == 0) {
jam();
errorCode = TuxAddAttrRef::InvalidAttributeType;
break;
}
+#ifdef dbtux_uses_charset
+ if (descAttr.m_charset != 0) {
+ CHARSET_INFO *cs = get_charset(descAttr.m_charset, MYF(0));
+ // here use the non-binary type
+ if (! NdbSqlUtil::usable_in_ordered_index(descAttr.m_typeId, cs)) {
+ jam();
+ errorCode = TuxAddAttrRef::InvalidCharset;
+ break;
+ }
+ }
+#endif
if (indexPtr.p->m_numAttrs == fragOpPtr.p->m_numAttrsRecvd) {
jam();
// initialize tree header
@@ -207,11 +211,7 @@ Dbtux::execTUX_ADD_ATTRREQ(Signal* signal)
// make these configurable later
tree.m_nodeSize = MAX_TTREE_NODE_SIZE;
tree.m_prefSize = MAX_TTREE_PREF_SIZE;
-#ifdef dbtux_min_occup_less_max_occup
const unsigned maxSlack = MAX_TTREE_NODE_SLACK;
-#else
- const unsigned maxSlack = 0;
-#endif
// size up to and including first 2 entries
const unsigned pref = tree.getSize(AccPref);
if (! (pref <= tree.m_nodeSize)) {
@@ -231,6 +231,20 @@ Dbtux::execTUX_ADD_ATTRREQ(Signal* signal)
tree.m_minOccup = tree.m_maxOccup - maxSlack;
// root node does not exist (also set by ctor)
tree.m_root = NullTupLoc;
+#ifdef VM_TRACE
+ if (debugFlags & DebugMeta) {
+ if (fragOpPtr.p->m_fragNo == 0) {
+ debugOut << "Index id=" << indexPtr.i;
+ debugOut << " nodeSize=" << tree.m_nodeSize;
+ debugOut << " headSize=" << NodeHeadSize;
+ debugOut << " prefSize=" << tree.m_prefSize;
+ debugOut << " entrySize=" << TreeEntSize;
+ debugOut << " minOccup=" << tree.m_minOccup;
+ debugOut << " maxOccup=" << tree.m_maxOccup;
+ debugOut << endl;
+ }
+ }
+#endif
// fragment is defined
c_fragOpPool.release(fragOpPtr);
}
@@ -295,6 +309,22 @@ Dbtux::execDROP_TAB_REQ(Signal* signal)
const DropTabReq reqCopy = *(const DropTabReq*)signal->getDataPtr();
const DropTabReq* const req = &reqCopy;
IndexPtr indexPtr;
+
+ Uint32 tableId = req->tableId;
+ Uint32 senderRef = req->senderRef;
+ Uint32 senderData = req->senderData;
+ if (tableId >= c_indexPool.getSize()) {
+ jam();
+ // reply to sender
+ DropTabConf* const conf = (DropTabConf*)signal->getDataPtrSend();
+ conf->senderRef = reference();
+ conf->senderData = senderData;
+ conf->tableId = tableId;
+ sendSignal(senderRef, GSN_DROP_TAB_CONF,
+ signal, DropTabConf::SignalLength, JBB);
+ return;
+ }
+
c_indexPool.getPtr(indexPtr, req->tableId);
// drop works regardless of index state
#ifdef VM_TRACE
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp
index a1bfa2179bb..389192fd0cf 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp
@@ -24,8 +24,8 @@ int
Dbtux::allocNode(Signal* signal, NodeHandle& node)
{
Frag& frag = node.m_frag;
- Uint32 pageId = NullTupLoc.m_pageId;
- Uint32 pageOffset = NullTupLoc.m_pageOffset;
+ Uint32 pageId = NullTupLoc.getPageId();
+ Uint32 pageOffset = NullTupLoc.getPageOffset();
Uint32* node32 = 0;
int errorCode = c_tup->tuxAllocNode(signal, frag.m_tupIndexFragPtrI, pageId, pageOffset, node32);
jamEntry();
@@ -33,55 +33,39 @@ Dbtux::allocNode(Signal* signal, NodeHandle& node)
jam();
node.m_loc = TupLoc(pageId, pageOffset);
node.m_node = reinterpret_cast<TreeNode*>(node32);
- node.m_acc = AccNone;
ndbrequire(node.m_loc != NullTupLoc && node.m_node != 0);
}
return errorCode;
}
/*
- * Access more of the node.
- */
-void
-Dbtux::accessNode(Signal* signal, NodeHandle& node, AccSize acc)
-{
- ndbrequire(node.m_loc != NullTupLoc && node.m_node != 0);
- if (node.m_acc >= acc)
- return;
- // XXX could do prefetch
- node.m_acc = acc;
-}
-
-/*
* Set handle to point to existing node.
*/
void
-Dbtux::selectNode(Signal* signal, NodeHandle& node, TupLoc loc, AccSize acc)
+Dbtux::selectNode(NodeHandle& node, TupLoc loc)
{
Frag& frag = node.m_frag;
ndbrequire(loc != NullTupLoc);
- Uint32 pageId = loc.m_pageId;
- Uint32 pageOffset = loc.m_pageOffset;
+ Uint32 pageId = loc.getPageId();
+ Uint32 pageOffset = loc.getPageOffset();
Uint32* node32 = 0;
c_tup->tuxGetNode(frag.m_tupIndexFragPtrI, pageId, pageOffset, node32);
jamEntry();
node.m_loc = loc;
node.m_node = reinterpret_cast<TreeNode*>(node32);
- node.m_acc = AccNone;
ndbrequire(node.m_loc != NullTupLoc && node.m_node != 0);
- accessNode(signal, node, acc);
}
/*
- * Set handle to point to new node. Uses the pre-allocated node.
+ * Set handle to point to new node. Uses a pre-allocated node.
*/
void
-Dbtux::insertNode(Signal* signal, NodeHandle& node, AccSize acc)
+Dbtux::insertNode(NodeHandle& node)
{
Frag& frag = node.m_frag;
- TupLoc loc = frag.m_freeLoc;
- frag.m_freeLoc = NullTupLoc;
- selectNode(signal, node, loc, acc);
+ // unlink from freelist
+ selectNode(node, frag.m_freeLoc);
+ frag.m_freeLoc = node.getLink(0);
new (node.m_node) TreeNode();
#ifdef VM_TRACE
TreeHead& tree = frag.m_tree;
@@ -92,20 +76,17 @@ Dbtux::insertNode(Signal* signal, NodeHandle& node, AccSize acc)
}
/*
- * Delete existing node.
+ * Delete existing node. Simply put it on the freelist.
*/
void
-Dbtux::deleteNode(Signal* signal, NodeHandle& node)
+Dbtux::deleteNode(NodeHandle& node)
{
Frag& frag = node.m_frag;
ndbrequire(node.getOccup() == 0);
- TupLoc loc = node.m_loc;
- Uint32 pageId = loc.m_pageId;
- Uint32 pageOffset = loc.m_pageOffset;
- Uint32* node32 = reinterpret_cast<Uint32*>(node.m_node);
- c_tup->tuxFreeNode(signal, frag.m_tupIndexFragPtrI, pageId, pageOffset, node32);
- jamEntry();
- // invalidate handle and storage
+ // link to freelist
+ node.setLink(0, frag.m_freeLoc);
+ frag.m_freeLoc = node.m_loc;
+ // invalidate the handle
node.m_loc = NullTupLoc;
node.m_node = 0;
}
@@ -115,7 +96,7 @@ Dbtux::deleteNode(Signal* signal, NodeHandle& node)
* attribute headers for now. XXX use null mask instead
*/
void
-Dbtux::setNodePref(Signal* signal, NodeHandle& node)
+Dbtux::setNodePref(NodeHandle& node)
{
const Frag& frag = node.m_frag;
const TreeHead& tree = frag.m_tree;
@@ -133,18 +114,45 @@ Dbtux::setNodePref(Signal* signal, NodeHandle& node)
* v
* A B C D E _ _ => A B C X D E _
* 0 1 2 3 4 5 6 0 1 2 3 4 5 6
+ *
+ * Add list of scans at the new entry.
*/
void
-Dbtux::nodePushUp(Signal* signal, NodeHandle& node, unsigned pos, const TreeEnt& ent)
+Dbtux::nodePushUp(NodeHandle& node, unsigned pos, const TreeEnt& ent, Uint32 scanList)
{
Frag& frag = node.m_frag;
TreeHead& tree = frag.m_tree;
const unsigned occup = node.getOccup();
ndbrequire(occup < tree.m_maxOccup && pos <= occup);
- // fix scans
+ // fix old scans
+ if (node.getNodeScan() != RNIL)
+ nodePushUpScans(node, pos);
+ // fix node
+ TreeEnt* const entList = tree.getEntList(node.m_node);
+ entList[occup] = entList[0];
+ TreeEnt* const tmpList = entList + 1;
+ for (unsigned i = occup; i > pos; i--) {
+ jam();
+ tmpList[i] = tmpList[i - 1];
+ }
+ tmpList[pos] = ent;
+ entList[0] = entList[occup + 1];
+ node.setOccup(occup + 1);
+ // add new scans
+ if (scanList != RNIL)
+ addScanList(node, pos, scanList);
+ // fix prefix
+ if (occup == 0 || pos == 0)
+ setNodePref(node);
+}
+
+void
+Dbtux::nodePushUpScans(NodeHandle& node, unsigned pos)
+{
+ const unsigned occup = node.getOccup();
ScanOpPtr scanPtr;
scanPtr.i = node.getNodeScan();
- while (scanPtr.i != RNIL) {
+ do {
jam();
c_scanOpPool.getPtr(scanPtr);
TreePos& scanPos = scanPtr.p->m_scanPos;
@@ -160,21 +168,7 @@ Dbtux::nodePushUp(Signal* signal, NodeHandle& node, unsigned pos, const TreeEnt&
scanPos.m_pos++;
}
scanPtr.i = scanPtr.p->m_nodeScan;
- }
- // fix node
- TreeEnt* const entList = tree.getEntList(node.m_node);
- entList[occup] = entList[0];
- TreeEnt* const tmpList = entList + 1;
- for (unsigned i = occup; i > pos; i--) {
- jam();
- tmpList[i] = tmpList[i - 1];
- }
- tmpList[pos] = ent;
- entList[0] = entList[occup + 1];
- node.setOccup(occup + 1);
- // fix prefix
- if (occup == 0 || pos == 0)
- setNodePref(signal, node);
+ } while (scanPtr.i != RNIL);
}
/*
@@ -185,42 +179,55 @@ Dbtux::nodePushUp(Signal* signal, NodeHandle& node, unsigned pos, const TreeEnt&
* ^ ^
* A B C D E F _ => A B C E F _ _
* 0 1 2 3 4 5 6 0 1 2 3 4 5 6
+ *
+ * Scans at removed entry are returned if non-zero location is passed or
+ * else moved forward.
*/
void
-Dbtux::nodePopDown(Signal* signal, NodeHandle& node, unsigned pos, TreeEnt& ent)
+Dbtux::nodePopDown(NodeHandle& node, unsigned pos, TreeEnt& ent, Uint32* scanList)
{
Frag& frag = node.m_frag;
TreeHead& tree = frag.m_tree;
const unsigned occup = node.getOccup();
ndbrequire(occup <= tree.m_maxOccup && pos < occup);
- ScanOpPtr scanPtr;
- // move scans whose entry disappears
- scanPtr.i = node.getNodeScan();
- while (scanPtr.i != RNIL) {
+ if (node.getNodeScan() != RNIL) {
+ // remove or move scans at this position
+ if (scanList == 0)
+ moveScanList(node, pos);
+ else
+ removeScanList(node, pos, *scanList);
+ // fix other scans
+ if (node.getNodeScan() != RNIL)
+ nodePopDownScans(node, pos);
+ }
+ // fix node
+ TreeEnt* const entList = tree.getEntList(node.m_node);
+ entList[occup] = entList[0];
+ TreeEnt* const tmpList = entList + 1;
+ ent = tmpList[pos];
+ for (unsigned i = pos; i < occup - 1; i++) {
jam();
- c_scanOpPool.getPtr(scanPtr);
- TreePos& scanPos = scanPtr.p->m_scanPos;
- ndbrequire(scanPos.m_loc == node.m_loc && scanPos.m_pos < occup);
- const Uint32 nextPtrI = scanPtr.p->m_nodeScan;
- if (scanPos.m_pos == pos) {
- jam();
-#ifdef VM_TRACE
- if (debugFlags & DebugScan) {
- debugOut << "Move scan " << scanPtr.i << " " << *scanPtr.p << endl;
- debugOut << "At popDown pos=" << pos << " " << node << endl;
- }
-#endif
- scanNext(signal, scanPtr);
- }
- scanPtr.i = nextPtrI;
+ tmpList[i] = tmpList[i + 1];
}
- // fix other scans
+ entList[0] = entList[occup - 1];
+ node.setOccup(occup - 1);
+ // fix prefix
+ if (occup != 1 && pos == 0)
+ setNodePref(node);
+}
+
+void
+Dbtux::nodePopDownScans(NodeHandle& node, unsigned pos)
+{
+ const unsigned occup = node.getOccup();
+ ScanOpPtr scanPtr;
scanPtr.i = node.getNodeScan();
- while (scanPtr.i != RNIL) {
+ do {
jam();
c_scanOpPool.getPtr(scanPtr);
TreePos& scanPos = scanPtr.p->m_scanPos;
ndbrequire(scanPos.m_loc == node.m_loc && scanPos.m_pos < occup);
+ // handled before
ndbrequire(scanPos.m_pos != pos);
if (scanPos.m_pos > pos) {
jam();
@@ -233,21 +240,7 @@ Dbtux::nodePopDown(Signal* signal, NodeHandle& node, unsigned pos, TreeEnt& ent)
scanPos.m_pos--;
}
scanPtr.i = scanPtr.p->m_nodeScan;
- }
- // fix node
- TreeEnt* const entList = tree.getEntList(node.m_node);
- entList[occup] = entList[0];
- TreeEnt* const tmpList = entList + 1;
- ent = tmpList[pos];
- for (unsigned i = pos; i < occup - 1; i++) {
- jam();
- tmpList[i] = tmpList[i + 1];
- }
- entList[0] = entList[occup - 1];
- node.setOccup(occup - 1);
- // fix prefix
- if (occup != 1 && pos == 0)
- setNodePref(signal, node);
+ } while (scanPtr.i != RNIL);
}
/*
@@ -258,43 +251,52 @@ Dbtux::nodePopDown(Signal* signal, NodeHandle& node, unsigned pos, TreeEnt& ent)
* ^ v ^
* A B C D E _ _ => B C D X E _ _
* 0 1 2 3 4 5 6 0 1 2 3 4 5 6
+ *
+ * Return list of scans at the removed position 0.
*/
void
-Dbtux::nodePushDown(Signal* signal, NodeHandle& node, unsigned pos, TreeEnt& ent)
+Dbtux::nodePushDown(NodeHandle& node, unsigned pos, TreeEnt& ent, Uint32& scanList)
{
Frag& frag = node.m_frag;
TreeHead& tree = frag.m_tree;
const unsigned occup = node.getOccup();
ndbrequire(occup <= tree.m_maxOccup && pos < occup);
- ScanOpPtr scanPtr;
- // move scans whose entry disappears
- scanPtr.i = node.getNodeScan();
- while (scanPtr.i != RNIL) {
+ if (node.getNodeScan() != RNIL) {
+ // remove scans at 0
+ removeScanList(node, 0, scanList);
+ // fix other scans
+ if (node.getNodeScan() != RNIL)
+ nodePushDownScans(node, pos);
+ }
+ // fix node
+ TreeEnt* const entList = tree.getEntList(node.m_node);
+ entList[occup] = entList[0];
+ TreeEnt* const tmpList = entList + 1;
+ TreeEnt oldMin = tmpList[0];
+ for (unsigned i = 0; i < pos; i++) {
jam();
- c_scanOpPool.getPtr(scanPtr);
- TreePos& scanPos = scanPtr.p->m_scanPos;
- ndbrequire(scanPos.m_loc == node.m_loc && scanPos.m_pos < occup);
- const Uint32 nextPtrI = scanPtr.p->m_nodeScan;
- if (scanPos.m_pos == 0) {
- jam();
-#ifdef VM_TRACE
- if (debugFlags & DebugScan) {
- debugOut << "Move scan " << scanPtr.i << " " << *scanPtr.p << endl;
- debugOut << "At pushDown pos=" << pos << " " << node << endl;
- }
-#endif
- // here we may miss a valid entry "X" XXX known bug
- scanNext(signal, scanPtr);
- }
- scanPtr.i = nextPtrI;
+ tmpList[i] = tmpList[i + 1];
}
- // fix other scans
+ tmpList[pos] = ent;
+ ent = oldMin;
+ entList[0] = entList[occup];
+ // fix prefix
+ if (true)
+ setNodePref(node);
+}
+
+void
+Dbtux::nodePushDownScans(NodeHandle& node, unsigned pos)
+{
+ const unsigned occup = node.getOccup();
+ ScanOpPtr scanPtr;
scanPtr.i = node.getNodeScan();
- while (scanPtr.i != RNIL) {
+ do {
jam();
c_scanOpPool.getPtr(scanPtr);
TreePos& scanPos = scanPtr.p->m_scanPos;
ndbrequire(scanPos.m_loc == node.m_loc && scanPos.m_pos < occup);
+ // handled before
ndbrequire(scanPos.m_pos != 0);
if (scanPos.m_pos <= pos) {
jam();
@@ -307,22 +309,7 @@ Dbtux::nodePushDown(Signal* signal, NodeHandle& node, unsigned pos, TreeEnt& ent
scanPos.m_pos--;
}
scanPtr.i = scanPtr.p->m_nodeScan;
- }
- // fix node
- TreeEnt* const entList = tree.getEntList(node.m_node);
- entList[occup] = entList[0];
- TreeEnt* const tmpList = entList + 1;
- TreeEnt oldMin = tmpList[0];
- for (unsigned i = 0; i < pos; i++) {
- jam();
- tmpList[i] = tmpList[i + 1];
- }
- tmpList[pos] = ent;
- ent = oldMin;
- entList[0] = entList[occup];
- // fix prefix
- if (true)
- setNodePref(signal, node);
+ } while (scanPtr.i != RNIL);
}
/*
@@ -334,39 +321,50 @@ Dbtux::nodePushDown(Signal* signal, NodeHandle& node, unsigned pos, TreeEnt& ent
* v ^ ^
* A B C D E _ _ => X A B C E _ _
* 0 1 2 3 4 5 6 0 1 2 3 4 5 6
+ *
+ * Move scans at removed entry and add scans at the new entry.
*/
void
-Dbtux::nodePopUp(Signal* signal, NodeHandle& node, unsigned pos, TreeEnt& ent)
+Dbtux::nodePopUp(NodeHandle& node, unsigned pos, TreeEnt& ent, Uint32 scanList)
{
Frag& frag = node.m_frag;
TreeHead& tree = frag.m_tree;
const unsigned occup = node.getOccup();
ndbrequire(occup <= tree.m_maxOccup && pos < occup);
- ScanOpPtr scanPtr;
- // move scans whose entry disappears
- scanPtr.i = node.getNodeScan();
- while (scanPtr.i != RNIL) {
+ if (node.getNodeScan() != RNIL) {
+ // move scans whose entry disappears
+ moveScanList(node, pos);
+ // fix other scans
+ if (node.getNodeScan() != RNIL)
+ nodePopUpScans(node, pos);
+ }
+ // fix node
+ TreeEnt* const entList = tree.getEntList(node.m_node);
+ entList[occup] = entList[0];
+ TreeEnt* const tmpList = entList + 1;
+ TreeEnt newMin = ent;
+ ent = tmpList[pos];
+ for (unsigned i = pos; i > 0; i--) {
jam();
- c_scanOpPool.getPtr(scanPtr);
- TreePos& scanPos = scanPtr.p->m_scanPos;
- ndbrequire(scanPos.m_loc == node.m_loc && scanPos.m_pos < occup);
- const Uint32 nextPtrI = scanPtr.p->m_nodeScan;
- if (scanPos.m_pos == pos) {
- jam();
-#ifdef VM_TRACE
- if (debugFlags & DebugScan) {
- debugOut << "Move scan " << scanPtr.i << " " << *scanPtr.p << endl;
- debugOut << "At popUp pos=" << pos << " " << node << endl;
- }
-#endif
- // here we may miss a valid entry "X" XXX known bug
- scanNext(signal, scanPtr);
- }
- scanPtr.i = nextPtrI;
+ tmpList[i] = tmpList[i - 1];
}
- // fix other scans
+ tmpList[0] = newMin;
+ entList[0] = entList[occup];
+ // add scans
+ if (scanList != RNIL)
+ addScanList(node, 0, scanList);
+ // fix prefix
+ if (true)
+ setNodePref(node);
+}
+
+void
+Dbtux::nodePopUpScans(NodeHandle& node, unsigned pos)
+{
+ const unsigned occup = node.getOccup();
+ ScanOpPtr scanPtr;
scanPtr.i = node.getNodeScan();
- while (scanPtr.i != RNIL) {
+ do {
jam();
c_scanOpPool.getPtr(scanPtr);
TreePos& scanPos = scanPtr.p->m_scanPos;
@@ -383,41 +381,123 @@ Dbtux::nodePopUp(Signal* signal, NodeHandle& node, unsigned pos, TreeEnt& ent)
scanPos.m_pos++;
}
scanPtr.i = scanPtr.p->m_nodeScan;
- }
- // fix node
- TreeEnt* const entList = tree.getEntList(node.m_node);
- entList[occup] = entList[0];
- TreeEnt* const tmpList = entList + 1;
- TreeEnt newMin = ent;
- ent = tmpList[pos];
- for (unsigned i = pos; i > 0; i--) {
- jam();
- tmpList[i] = tmpList[i - 1];
- }
- tmpList[0] = newMin;
- entList[0] = entList[occup];
- // fix prefix
- if (true)
- setNodePref(signal, node);
+ } while (scanPtr.i != RNIL);
}
/*
- * Move all possible entries from another node before the min (i=0) or
- * after the max (i=1). XXX can be optimized
+ * Move number of entries from another node to this node before the min
+ * (i=0) or after the max (i=1). Expensive but not often used.
*/
void
-Dbtux::nodeSlide(Signal* signal, NodeHandle& dstNode, NodeHandle& srcNode, unsigned i)
+Dbtux::nodeSlide(NodeHandle& dstNode, NodeHandle& srcNode, unsigned cnt, unsigned i)
{
Frag& frag = dstNode.m_frag;
TreeHead& tree = frag.m_tree;
ndbrequire(i <= 1);
- while (dstNode.getOccup() < tree.m_maxOccup && srcNode.getOccup() != 0) {
+ while (cnt != 0) {
TreeEnt ent;
- nodePopDown(signal, srcNode, i == 0 ? srcNode.getOccup() - 1 : 0, ent);
- nodePushUp(signal, dstNode, i == 0 ? 0 : dstNode.getOccup(), ent);
+ Uint32 scanList = RNIL;
+ nodePopDown(srcNode, i == 0 ? srcNode.getOccup() - 1 : 0, ent, &scanList);
+ nodePushUp(dstNode, i == 0 ? 0 : dstNode.getOccup(), ent, scanList);
+ cnt--;
}
}
+// scans linked to node
+
+
+/*
+ * Add list of scans to node at given position.
+ */
+void
+Dbtux::addScanList(NodeHandle& node, unsigned pos, Uint32 scanList)
+{
+ ScanOpPtr scanPtr;
+ scanPtr.i = scanList;
+ do {
+ jam();
+ c_scanOpPool.getPtr(scanPtr);
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "Add scan " << scanPtr.i << " " << *scanPtr.p << endl;
+ debugOut << "To pos=" << pos << " " << node << endl;
+ }
+#endif
+ const Uint32 nextPtrI = scanPtr.p->m_nodeScan;
+ scanPtr.p->m_nodeScan = RNIL;
+ linkScan(node, scanPtr);
+ TreePos& scanPos = scanPtr.p->m_scanPos;
+ // set position but leave direction alone
+ scanPos.m_loc = node.m_loc;
+ scanPos.m_pos = pos;
+ scanPtr.i = nextPtrI;
+ } while (scanPtr.i != RNIL);
+}
+
+/*
+ * Remove list of scans from node at given position. The return
+ * location must point to existing list (in fact RNIL always).
+ */
+void
+Dbtux::removeScanList(NodeHandle& node, unsigned pos, Uint32& scanList)
+{
+ ScanOpPtr scanPtr;
+ scanPtr.i = node.getNodeScan();
+ do {
+ jam();
+ c_scanOpPool.getPtr(scanPtr);
+ const Uint32 nextPtrI = scanPtr.p->m_nodeScan;
+ TreePos& scanPos = scanPtr.p->m_scanPos;
+ ndbrequire(scanPos.m_loc == node.m_loc);
+ if (scanPos.m_pos == pos) {
+ jam();
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "Remove scan " << scanPtr.i << " " << *scanPtr.p << endl;
+ debugOut << "Fron pos=" << pos << " " << node << endl;
+ }
+#endif
+ unlinkScan(node, scanPtr);
+ scanPtr.p->m_nodeScan = scanList;
+ scanList = scanPtr.i;
+ // unset position but leave direction alone
+ scanPos.m_loc = NullTupLoc;
+ scanPos.m_pos = ZNIL;
+ }
+ scanPtr.i = nextPtrI;
+ } while (scanPtr.i != RNIL);
+}
+
+/*
+ * Move list of scans away from entry about to be removed. Uses scan
+ * method scanNext().
+ */
+void
+Dbtux::moveScanList(NodeHandle& node, unsigned pos)
+{
+ ScanOpPtr scanPtr;
+ scanPtr.i = node.getNodeScan();
+ do {
+ jam();
+ c_scanOpPool.getPtr(scanPtr);
+ TreePos& scanPos = scanPtr.p->m_scanPos;
+ const Uint32 nextPtrI = scanPtr.p->m_nodeScan;
+ ndbrequire(scanPos.m_loc == node.m_loc);
+ if (scanPos.m_pos == pos) {
+ jam();
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "Move scan " << scanPtr.i << " " << *scanPtr.p << endl;
+ debugOut << "At pos=" << pos << " " << node << endl;
+ }
+#endif
+ scanNext(scanPtr);
+ ndbrequire(! (scanPos.m_loc == node.m_loc && scanPos.m_pos == pos));
+ }
+ scanPtr.i = nextPtrI;
+ } while (scanPtr.i != RNIL);
+}
+
/*
* Link scan to the list under the node. The list is single-linked and
* ordering does not matter.
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
index c4c33ff931f..8677ae741b3 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
@@ -108,14 +108,23 @@ Dbtux::execACC_SCANREQ(Signal* signal)
/*
* Receive bounds for scan in single direct call. The bounds can arrive
* in any order. Attribute ids are those of index table.
+ *
+ * Replace EQ by equivalent LE + GE. Check for conflicting bounds.
+ * Check that sets of lower and upper bounds are on initial sequences of
+ * keys and that all but possibly last bound is non-strict.
+ *
+ * Finally save the sets of lower and upper bounds (i.e. start key and
+ * end key). Full bound type (< 4) is included but only the strict bit
+ * is used since lower and upper have now been separated.
*/
void
Dbtux::execTUX_BOUND_INFO(Signal* signal)
{
+ jamEntry();
struct BoundInfo {
+ int type;
unsigned offset;
unsigned size;
- int type;
};
TuxBoundInfo* const sig = (TuxBoundInfo*)signal->getDataPtrSend();
const TuxBoundInfo reqCopy = *(const TuxBoundInfo*)sig;
@@ -123,19 +132,12 @@ Dbtux::execTUX_BOUND_INFO(Signal* signal)
// get records
ScanOp& scan = *c_scanOpPool.getPtr(req->tuxScanPtrI);
Index& index = *c_indexPool.getPtr(scan.m_indexId);
- // collect bound info for each index attribute
- BoundInfo boundInfo[MaxIndexAttributes][2];
+ // collect lower and upper bounds
+ BoundInfo boundInfo[2][MaxIndexAttributes];
// largest attrId seen plus one
- Uint32 maxAttrId = 0;
- // skip 5 words
- if (req->boundAiLength < 5) {
- jam();
- scan.m_state = ScanOp::Invalid;
- sig->errorCode = TuxBoundInfo::InvalidAttrInfo;
- return;
- }
+ Uint32 maxAttrId[2] = { 0, 0 };
+ unsigned offset = 0;
const Uint32* const data = (Uint32*)sig + TuxBoundInfo::SignalLength;
- unsigned offset = 5;
// walk through entries
while (offset + 2 <= req->boundAiLength) {
jam();
@@ -155,32 +157,35 @@ Dbtux::execTUX_BOUND_INFO(Signal* signal)
sig->errorCode = TuxBoundInfo::InvalidAttrInfo;
return;
}
- while (maxAttrId <= attrId) {
- BoundInfo* b = boundInfo[maxAttrId++];
- b[0].type = b[1].type = -1;
- }
- BoundInfo* b = boundInfo[attrId];
- if (type == 0 || type == 1 || type == 4) {
- if (b[0].type != -1) {
- jam();
- scan.m_state = ScanOp::Invalid;
- sig->errorCode = TuxBoundInfo::InvalidBounds;
- return;
+ for (unsigned j = 0; j <= 1; j++) {
+ // check if lower/upper bit matches
+ const unsigned luBit = (j << 1);
+ if ((type & 0x2) != luBit && type != 4)
+ continue;
+ // EQ -> LE, GE
+ const unsigned type2 = (type & 0x1) | luBit;
+ // fill in any gap
+ while (maxAttrId[j] <= attrId) {
+ BoundInfo& b = boundInfo[j][maxAttrId[j]++];
+ b.type = -1;
}
- b[0].offset = offset;
- b[0].size = 2 + dataSize;
- b[0].type = type;
- }
- if (type == 2 || type == 3 || type == 4) {
- if (b[1].type != -1) {
- jam();
- scan.m_state = ScanOp::Invalid;
- sig->errorCode = TuxBoundInfo::InvalidBounds;
- return;
+ BoundInfo& b = boundInfo[j][attrId];
+ if (b.type != -1) {
+ // compare with previous bound
+ if (b.type != (int)type2 ||
+ b.size != 2 + dataSize ||
+ memcmp(&data[b.offset + 2], &data[offset + 2], dataSize << 2) != 0) {
+ jam();
+ scan.m_state = ScanOp::Invalid;
+ sig->errorCode = TuxBoundInfo::InvalidBounds;
+ return;
+ }
+ } else {
+ // enter new bound
+ b.type = type2;
+ b.offset = offset;
+ b.size = 2 + dataSize;
}
- b[1].offset = offset;
- b[1].size = 2 + dataSize;
- b[1].type = type;
}
// jump to next
offset += 2 + dataSize;
@@ -191,34 +196,27 @@ Dbtux::execTUX_BOUND_INFO(Signal* signal)
sig->errorCode = TuxBoundInfo::InvalidAttrInfo;
return;
}
- // save the bounds in index attribute id order
- scan.m_boundCnt[0] = 0;
- scan.m_boundCnt[1] = 0;
- for (unsigned i = 0; i < maxAttrId; i++) {
- jam();
- const BoundInfo* b = boundInfo[i];
- // current limitation - check all but last is equality
- if (i + 1 < maxAttrId) {
- if (b[0].type != 4 || b[1].type != 4) {
+ for (unsigned j = 0; j <= 1; j++) {
+ // save lower/upper bound in index attribute id order
+ for (unsigned i = 0; i < maxAttrId[j]; i++) {
+ jam();
+ const BoundInfo& b = boundInfo[j][i];
+ // check for gap or strict bound before last
+ if (b.type == -1 || (i + 1 < maxAttrId[j] && (b.type & 0x1))) {
jam();
scan.m_state = ScanOp::Invalid;
sig->errorCode = TuxBoundInfo::InvalidBounds;
return;
}
- }
- for (unsigned j = 0; j <= 1; j++) {
- if (b[j].type != -1) {
+ bool ok = scan.m_bound[j]->append(&data[b.offset], b.size);
+ if (! ok) {
jam();
- bool ok = scan.m_bound[j]->append(&data[b[j].offset], b[j].size);
- if (! ok) {
- jam();
- scan.m_state = ScanOp::Invalid;
- sig->errorCode = TuxBoundInfo::OutOfBuffers;
- return;
- }
- scan.m_boundCnt[j]++;
+ scan.m_state = ScanOp::Invalid;
+ sig->errorCode = TuxBoundInfo::OutOfBuffers;
+ return;
}
}
+ scan.m_boundCnt[j] = maxAttrId[j];
}
// no error
sig->errorCode = 0;
@@ -277,7 +275,7 @@ Dbtux::execNEXT_SCANREQ(Signal* signal)
jam();
const TupLoc loc = scan.m_scanPos.m_loc;
NodeHandle node(frag);
- selectNode(signal, node, loc, AccHead);
+ selectNode(node, loc);
unlinkScan(node, scanPtr);
scan.m_scanPos.m_loc = NullTupLoc;
}
@@ -352,7 +350,7 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal)
if (scan.m_lockwait) {
jam();
// LQH asks if we are waiting for lock and we tell it to ask again
- const TreeEnt ent = scan.m_scanPos.m_ent;
+ const TreeEnt ent = scan.m_scanEnt;
NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend();
conf->scanPtr = scan.m_userPtr;
conf->accOperationPtr = RNIL; // no tuple returned
@@ -366,7 +364,7 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal)
if (scan.m_state == ScanOp::First) {
jam();
// search is done only once in single range scan
- scanFirst(signal, scanPtr);
+ scanFirst(scanPtr);
#ifdef VM_TRACE
if (debugFlags & DebugScan) {
debugOut << "First scan " << scanPtr.i << " " << scan << endl;
@@ -376,7 +374,7 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal)
if (scan.m_state == ScanOp::Next) {
jam();
// look for next
- scanNext(signal, scanPtr);
+ scanNext(scanPtr);
}
// for reading tuple key in Current or Locked state
Data pkData = c_dataBuffer;
@@ -387,9 +385,9 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal)
ndbrequire(scan.m_accLockOp == RNIL);
if (! scan.m_readCommitted) {
jam();
- const TreeEnt ent = scan.m_scanPos.m_ent;
+ const TreeEnt ent = scan.m_scanEnt;
// read tuple key
- readTablePk(frag, ent, pkSize, pkData);
+ readTablePk(frag, ent, pkData, pkSize);
// get read lock or exclusive lock
AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend();
lockReq->returnCode = RNIL;
@@ -475,12 +473,12 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal)
// we have lock or do not need one
jam();
// read keys if not already done (uses signal)
- const TreeEnt ent = scan.m_scanPos.m_ent;
+ const TreeEnt ent = scan.m_scanEnt;
if (scan.m_keyInfo) {
jam();
if (pkSize == 0) {
jam();
- readTablePk(frag, ent, pkSize, pkData);
+ readTablePk(frag, ent, pkData, pkSize);
}
}
// conf signal
@@ -538,8 +536,6 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal)
total += length;
}
}
- // remember last entry returned
- scan.m_lastEnt = ent;
// next time look for next entry
scan.m_state = ScanOp::Next;
return;
@@ -684,7 +680,7 @@ Dbtux::execACC_ABORTCONF(Signal* signal)
* by scanNext.
*/
void
-Dbtux::scanFirst(Signal* signal, ScanOpPtr scanPtr)
+Dbtux::scanFirst(ScanOpPtr scanPtr)
{
ScanOp& scan = *scanPtr.p;
Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI);
@@ -702,7 +698,7 @@ Dbtux::scanFirst(Signal* signal, ScanOpPtr scanPtr)
}
// search for scan start position
TreePos treePos;
- searchToScan(signal, frag, c_dataBuffer, scan.m_boundCnt[0], treePos);
+ searchToScan(frag, c_dataBuffer, scan.m_boundCnt[0], treePos);
if (treePos.m_loc == NullTupLoc) {
// empty tree
jam();
@@ -714,23 +710,27 @@ Dbtux::scanFirst(Signal* signal, ScanOpPtr scanPtr)
scan.m_state = ScanOp::Next;
// link the scan to node found
NodeHandle node(frag);
- selectNode(signal, node, treePos.m_loc, AccFull);
+ selectNode(node, treePos.m_loc);
linkScan(node, scanPtr);
}
/*
* Move to next entry. The scan is already linked to some node. When
- * we leave, if any entry was found, it will be linked to a possibly
- * different node. The scan has a direction, one of:
+ * we leave, if an entry was found, it will be linked to a possibly
+ * different node. The scan has a position, and a direction which tells
+ * from where we came to this position. This is one of:
+ *
+ * 0 - up from left child (scan this node next)
+ * 1 - up from right child (proceed to parent)
+ * 2 - up from root (the scan ends)
+ * 3 - left to right within node (at end proceed to right child)
+ * 4 - down from parent (proceed to left child)
*
- * 0 - coming up from left child
- * 1 - coming up from right child (proceed to parent immediately)
- * 2 - coming up from root (the scan ends)
- * 3 - left to right within node
- * 4 - coming down from parent to left or right child
+ * If an entry was found, scan direction is 3. Therefore tree
+ * re-organizations need not worry about scan direction.
*/
void
-Dbtux::scanNext(Signal* signal, ScanOpPtr scanPtr)
+Dbtux::scanNext(ScanOpPtr scanPtr)
{
ScanOp& scan = *scanPtr.p;
Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI);
@@ -739,22 +739,8 @@ Dbtux::scanNext(Signal* signal, ScanOpPtr scanPtr)
debugOut << "Next in scan " << scanPtr.i << " " << scan << endl;
}
#endif
- if (scan.m_state == ScanOp::Locked) {
- jam();
- // version of a tuple locked by us cannot disappear (assert only)
-#ifdef dbtux_wl_1942_is_done
- ndbassert(false);
-#endif
- AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend();
- lockReq->returnCode = RNIL;
- lockReq->requestInfo = AccLockReq::Unlock;
- lockReq->accOpPtr = scan.m_accLockOp;
- EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, AccLockReq::UndoSignalLength);
- jamEntry();
- ndbrequire(lockReq->returnCode == AccLockReq::Success);
- scan.m_accLockOp = RNIL;
- scan.m_state = ScanOp::Current;
- }
+ // cannot be moved away from tuple we have locked
+ ndbrequire(scan.m_state != ScanOp::Locked);
// set up index keys for this operation
setKeyAttrs(frag);
// unpack upper bound into c_dataBuffer
@@ -770,10 +756,12 @@ Dbtux::scanNext(Signal* signal, ScanOpPtr scanPtr)
TreePos pos = scan.m_scanPos;
// get and remember original node
NodeHandle origNode(frag);
- selectNode(signal, origNode, pos.m_loc, AccHead);
+ selectNode(origNode, pos.m_loc);
ndbrequire(islinkScan(origNode, scanPtr));
// current node in loop
NodeHandle node = origNode;
+ // copy of entry found
+ TreeEnt ent;
while (true) {
jam();
if (pos.m_dir == 2) {
@@ -785,7 +773,7 @@ Dbtux::scanNext(Signal* signal, ScanOpPtr scanPtr)
}
if (node.m_loc != pos.m_loc) {
jam();
- selectNode(signal, node, pos.m_loc, AccHead);
+ selectNode(node, pos.m_loc);
}
if (pos.m_dir == 4) {
// coming down from parent proceed to left child
@@ -801,7 +789,7 @@ Dbtux::scanNext(Signal* signal, ScanOpPtr scanPtr)
pos.m_dir = 0;
}
if (pos.m_dir == 0) {
- // coming from left child scan current node
+ // coming up from left child scan current node
jam();
pos.m_pos = 0;
pos.m_match = false;
@@ -812,8 +800,6 @@ Dbtux::scanNext(Signal* signal, ScanOpPtr scanPtr)
jam();
unsigned occup = node.getOccup();
ndbrequire(occup >= 1);
- // access full node
- accessNode(signal, node, AccFull);
// advance position
if (! pos.m_match)
pos.m_match = true;
@@ -821,10 +807,10 @@ Dbtux::scanNext(Signal* signal, ScanOpPtr scanPtr)
pos.m_pos++;
if (pos.m_pos < occup) {
jam();
- pos.m_ent = node.getEnt(pos.m_pos);
+ ent = node.getEnt(pos.m_pos);
pos.m_dir = 3; // unchanged
// read and compare all attributes
- readKeyAttrs(frag, pos.m_ent, 0, c_entryKey);
+ readKeyAttrs(frag, ent, 0, c_entryKey);
int ret = cmpScanBound(frag, 1, c_dataBuffer, scan.m_boundCnt[1], c_entryKey);
ndbrequire(ret != NdbSqlUtil::CmpUnknown);
if (ret < 0) {
@@ -835,7 +821,7 @@ Dbtux::scanNext(Signal* signal, ScanOpPtr scanPtr)
break;
}
// can we see it
- if (! scanVisible(signal, scanPtr, pos.m_ent)) {
+ if (! scanVisible(scanPtr, ent)) {
jam();
continue;
}
@@ -855,7 +841,7 @@ Dbtux::scanNext(Signal* signal, ScanOpPtr scanPtr)
pos.m_dir = 1;
}
if (pos.m_dir == 1) {
- // coming from right child proceed to parent
+ // coming up from right child proceed to parent
jam();
pos.m_loc = node.getLink(2);
pos.m_dir = node.getSide();
@@ -867,12 +853,15 @@ Dbtux::scanNext(Signal* signal, ScanOpPtr scanPtr)
scan.m_scanPos = pos;
// relink
if (scan.m_state == ScanOp::Current) {
+ ndbrequire(pos.m_match == true && pos.m_dir == 3);
ndbrequire(pos.m_loc == node.m_loc);
if (origNode.m_loc != node.m_loc) {
jam();
unlinkScan(origNode, scanPtr);
linkScan(node, scanPtr);
}
+ // copy found entry
+ scan.m_scanEnt = ent;
} else if (scan.m_state == ScanOp::Last) {
jam();
ndbrequire(pos.m_loc == NullTupLoc);
@@ -890,12 +879,12 @@ Dbtux::scanNext(Signal* signal, ScanOpPtr scanPtr)
/*
* Check if an entry is visible to the scan.
*
- * There is a special check to never return same tuple twice in a row.
+ * There is a special check to never accept same tuple twice in a row.
* This is faster than asking TUP. It also fixes some special cases
* which are not analyzed or handled yet.
*/
bool
-Dbtux::scanVisible(Signal* signal, ScanOpPtr scanPtr, TreeEnt ent)
+Dbtux::scanVisible(ScanOpPtr scanPtr, TreeEnt ent)
{
const ScanOp& scan = *scanPtr.p;
const Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI);
@@ -905,8 +894,8 @@ Dbtux::scanVisible(Signal* signal, ScanOpPtr scanPtr, TreeEnt ent)
Uint32 tupAddr = getTupAddr(frag, ent);
Uint32 tupVersion = ent.m_tupVersion;
// check for same tuple twice in row
- if (scan.m_lastEnt.m_tupLoc == ent.m_tupLoc &&
- scan.m_lastEnt.m_fragBit == fragBit) {
+ if (scan.m_scanEnt.m_tupLoc == ent.m_tupLoc &&
+ scan.m_scanEnt.m_fragBit == fragBit) {
jam();
return false;
}
@@ -928,7 +917,7 @@ Dbtux::scanClose(Signal* signal, ScanOpPtr scanPtr)
ScanOp& scan = *scanPtr.p;
ndbrequire(! scan.m_lockwait && scan.m_accLockOp == RNIL);
// unlock all not unlocked by LQH
- for (unsigned i = 0; i < MaxAccLockOps; i++) {
+ for (unsigned i = 0; i < scan.m_maxAccLockOps; i++) {
if (scan.m_accLockOps[i] != RNIL) {
jam();
AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend();
@@ -958,7 +947,7 @@ Dbtux::addAccLockOp(ScanOp& scan, Uint32 accLockOp)
ndbrequire(accLockOp != RNIL);
Uint32* list = scan.m_accLockOps;
bool ok = false;
- for (unsigned i = 0; i < MaxAccLockOps; i++) {
+ for (unsigned i = 0; i < scan.m_maxAccLockOps; i++) {
ndbrequire(list[i] != accLockOp);
if (! ok && list[i] == RNIL) {
list[i] = accLockOp;
@@ -966,6 +955,14 @@ Dbtux::addAccLockOp(ScanOp& scan, Uint32 accLockOp)
// continue check for duplicates
}
}
+ if (! ok) {
+ unsigned i = scan.m_maxAccLockOps;
+ if (i < MaxAccLockOps) {
+ list[i] = accLockOp;
+ ok = true;
+ scan.m_maxAccLockOps = i + 1;
+ }
+ }
ndbrequire(ok);
}
@@ -975,7 +972,7 @@ Dbtux::removeAccLockOp(ScanOp& scan, Uint32 accLockOp)
ndbrequire(accLockOp != RNIL);
Uint32* list = scan.m_accLockOps;
bool ok = false;
- for (unsigned i = 0; i < MaxAccLockOps; i++) {
+ for (unsigned i = 0; i < scan.m_maxAccLockOps; i++) {
if (list[i] == accLockOp) {
list[i] = RNIL;
ok = true;
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp
index 84048b308bc..7057d74c3ad 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp
@@ -25,16 +25,17 @@
* TODO optimize for initial equal attrs in node min/max
*/
void
-Dbtux::searchToAdd(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos)
+Dbtux::searchToAdd(Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos)
{
const TreeHead& tree = frag.m_tree;
const unsigned numAttrs = frag.m_numAttrs;
NodeHandle currNode(frag);
currNode.m_loc = tree.m_root;
+ // assume success
+ treePos.m_match = false;
if (currNode.m_loc == NullTupLoc) {
// empty tree
jam();
- treePos.m_match = false;
return;
}
NodeHandle glbNode(frag); // potential g.l.b of final node
@@ -45,7 +46,7 @@ Dbtux::searchToAdd(Signal* signal, Frag& frag, TableData searchKey, TreeEnt sear
NodeHandle bottomNode(frag);
while (true) {
jam();
- selectNode(signal, currNode, currNode.m_loc, AccPref);
+ selectNode(currNode, currNode.m_loc);
int ret;
// compare prefix
unsigned start = 0;
@@ -93,16 +94,22 @@ Dbtux::searchToAdd(Signal* signal, Frag& frag, TableData searchKey, TreeEnt sear
jam();
treePos.m_loc = currNode.m_loc;
treePos.m_pos = 0;
+ // failed
treePos.m_match = true;
return;
}
break;
}
- // access rest of current node
- accessNode(signal, currNode, AccFull);
- for (unsigned j = 0, occup = currNode.getOccup(); j < occup; j++) {
+ // anticipate
+ treePos.m_loc = currNode.m_loc;
+ // binary search
+ int lo = -1;
+ unsigned hi = currNode.getOccup();
+ int ret;
+ while (1) {
jam();
- int ret;
+ // hi - lo > 1 implies lo < j < hi
+ int j = (hi + lo) / 2;
// read and compare attributes
unsigned start = 0;
readKeyAttrs(frag, currNode.getEnt(j), start, c_entryKey);
@@ -113,25 +120,38 @@ Dbtux::searchToAdd(Signal* signal, Frag& frag, TableData searchKey, TreeEnt sear
// keys are equal, compare entry values
ret = searchEnt.cmp(currNode.getEnt(j));
}
- if (ret <= 0) {
- jam();
- treePos.m_loc = currNode.m_loc;
+ if (ret < 0)
+ hi = j;
+ else if (ret > 0)
+ lo = j;
+ else {
treePos.m_pos = j;
- treePos.m_match = (ret == 0);
+ // failed
+ treePos.m_match = true;
return;
}
+ if (hi - lo == 1)
+ break;
}
- if (! bottomNode.isNull()) {
+ if (ret < 0) {
jam();
- // backwards compatible for now
- treePos.m_loc = bottomNode.m_loc;
- treePos.m_pos = 0;
- treePos.m_match = false;
+ treePos.m_pos = hi;
return;
}
- treePos.m_loc = currNode.m_loc;
- treePos.m_pos = currNode.getOccup();
- treePos.m_match = false;
+ if (hi < currNode.getOccup()) {
+ jam();
+ treePos.m_pos = hi;
+ return;
+ }
+ if (bottomNode.isNull()) {
+ jam();
+ treePos.m_pos = hi;
+ return;
+ }
+ jam();
+ // backwards compatible for now
+ treePos.m_loc = bottomNode.m_loc;
+ treePos.m_pos = 0;
}
/*
@@ -139,27 +159,30 @@ Dbtux::searchToAdd(Signal* signal, Frag& frag, TableData searchKey, TreeEnt sear
*
* Compares search key to each node min. A move to right subtree can
* overshoot target node. The last such node is saved. The final node
- * is a half-leaf or leaf. If search key is less than final node min
+ * is a semi-leaf or leaf. If search key is less than final node min
* then the saved node is the g.l.b of the final node and we move back
* to it.
*/
void
-Dbtux::searchToRemove(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos)
+Dbtux::searchToRemove(Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos)
{
const TreeHead& tree = frag.m_tree;
const unsigned numAttrs = frag.m_numAttrs;
NodeHandle currNode(frag);
currNode.m_loc = tree.m_root;
+ // assume success
+ treePos.m_match = true;
if (currNode.m_loc == NullTupLoc) {
// empty tree
jam();
+ // failed
treePos.m_match = false;
return;
}
NodeHandle glbNode(frag); // potential g.l.b of final node
while (true) {
jam();
- selectNode(signal, currNode, currNode.m_loc, AccPref);
+ selectNode(currNode, currNode.m_loc);
int ret;
// compare prefix
unsigned start = 0;
@@ -206,27 +229,24 @@ Dbtux::searchToRemove(Signal* signal, Frag& frag, TableData searchKey, TreeEnt s
jam();
treePos.m_loc = currNode.m_loc;
treePos.m_pos = 0;
- treePos.m_match = true;
return;
}
break;
}
- // access rest of current node
- accessNode(signal, currNode, AccFull);
+ // anticipate
+ treePos.m_loc = currNode.m_loc;
// pos 0 was handled above
for (unsigned j = 1, occup = currNode.getOccup(); j < occup; j++) {
jam();
// compare only the entry
if (searchEnt.eq(currNode.getEnt(j))) {
jam();
- treePos.m_loc = currNode.m_loc;
treePos.m_pos = j;
- treePos.m_match = true;
return;
}
}
- treePos.m_loc = currNode.m_loc;
treePos.m_pos = currNode.getOccup();
+ // failed
treePos.m_match = false;
}
@@ -236,7 +256,7 @@ Dbtux::searchToRemove(Signal* signal, Frag& frag, TableData searchKey, TreeEnt s
* Similar to searchToAdd.
*/
void
-Dbtux::searchToScan(Signal* signal, Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos)
+Dbtux::searchToScan(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos)
{
const TreeHead& tree = frag.m_tree;
NodeHandle currNode(frag);
@@ -251,7 +271,7 @@ Dbtux::searchToScan(Signal* signal, Frag& frag, ConstData boundInfo, unsigned bo
NodeHandle bottomNode(frag);
while (true) {
jam();
- selectNode(signal, currNode, currNode.m_loc, AccPref);
+ selectNode(currNode, currNode.m_loc);
int ret;
// compare prefix
ret = cmpScanBound(frag, 0, boundInfo, boundCount, currNode.getPref(), tree.m_prefSize);
@@ -300,8 +320,6 @@ Dbtux::searchToScan(Signal* signal, Frag& frag, ConstData boundInfo, unsigned bo
}
break;
}
- // access rest of current node
- accessNode(signal, currNode, AccFull);
for (unsigned j = 0, occup = currNode.getOccup(); j < occup; j++) {
jam();
int ret;
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp
index 3baa62998db..b9e3b593a00 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp
@@ -18,74 +18,105 @@
#include "Dbtux.hpp"
/*
- * Add entry.
+ * Add entry. Handle the case when there is room for one more. This
+ * is the common case given slack in nodes.
*/
void
-Dbtux::treeAdd(Signal* signal, Frag& frag, TreePos treePos, TreeEnt ent)
+Dbtux::treeAdd(Frag& frag, TreePos treePos, TreeEnt ent)
{
TreeHead& tree = frag.m_tree;
- unsigned pos = treePos.m_pos;
NodeHandle node(frag);
- // check for empty tree
- if (treePos.m_loc == NullTupLoc) {
- jam();
- insertNode(signal, node, AccPref);
- nodePushUp(signal, node, 0, ent);
- node.setSide(2);
- tree.m_root = node.m_loc;
- return;
- }
- // access full node
- selectNode(signal, node, treePos.m_loc, AccFull);
- // check if it is bounding node
- if (pos != 0 && pos != node.getOccup()) {
+ if (treePos.m_loc != NullTupLoc) {
+ // non-empty tree
jam();
- // check if room for one more
+ selectNode(node, treePos.m_loc);
+ unsigned pos = treePos.m_pos;
if (node.getOccup() < tree.m_maxOccup) {
+ // node has room
jam();
- nodePushUp(signal, node, pos, ent);
+ nodePushUp(node, pos, ent, RNIL);
return;
}
- // returns min entry
- nodePushDown(signal, node, pos - 1, ent);
- // find position to add the removed min entry
- TupLoc childLoc = node.getLink(0);
- if (childLoc == NullTupLoc) {
+ treeAddFull(frag, node, pos, ent);
+ return;
+ }
+ jam();
+ insertNode(node);
+ nodePushUp(node, 0, ent, RNIL);
+ node.setSide(2);
+ tree.m_root = node.m_loc;
+}
+
+/*
+ * Add entry when node is full. Handle the case when there is g.l.b
+ * node in left subtree with room for one more. It will receive the min
+ * entry of this node. The min entry could be the entry to add.
+ */
+void
+Dbtux::treeAddFull(Frag& frag, NodeHandle lubNode, unsigned pos, TreeEnt ent)
+{
+ TreeHead& tree = frag.m_tree;
+ TupLoc loc = lubNode.getLink(0);
+ if (loc != NullTupLoc) {
+ // find g.l.b node
+ NodeHandle glbNode(frag);
+ do {
jam();
- // left child will be added
- pos = 0;
- } else {
+ selectNode(glbNode, loc);
+ loc = glbNode.getLink(1);
+ } while (loc != NullTupLoc);
+ if (glbNode.getOccup() < tree.m_maxOccup) {
+ // g.l.b node has room
jam();
- // find glb node
- while (childLoc != NullTupLoc) {
+ Uint32 scanList = RNIL;
+ if (pos != 0) {
jam();
- selectNode(signal, node, childLoc, AccHead);
- childLoc = node.getLink(1);
+ // add the new entry and return min entry
+ nodePushDown(lubNode, pos - 1, ent, scanList);
}
- // access full node again
- accessNode(signal, node, AccFull);
- pos = node.getOccup();
+ // g.l.b node receives min entry from l.u.b node
+ nodePushUp(glbNode, glbNode.getOccup(), ent, scanList);
+ return;
}
- // fall thru to next case
- }
- // adding new min or max
- unsigned i = (pos == 0 ? 0 : 1);
- ndbrequire(node.getLink(i) == NullTupLoc);
- // check if the half-leaf/leaf has room for one more
- if (node.getOccup() < tree.m_maxOccup) {
- jam();
- nodePushUp(signal, node, pos, ent);
+ treeAddNode(frag, lubNode, pos, ent, glbNode, 1);
return;
}
- // add a new node
- NodeHandle childNode(frag);
- insertNode(signal, childNode, AccPref);
- nodePushUp(signal, childNode, 0, ent);
+ treeAddNode(frag, lubNode, pos, ent, lubNode, 0);
+}
+
+/*
+ * Add entry when there is no g.l.b node in left subtree or the g.l.b
+ * node is full. We must add a new left or right child node which
+ * becomes the new g.l.b node.
+ */
+void
+Dbtux::treeAddNode(Frag& frag, NodeHandle lubNode, unsigned pos, TreeEnt ent, NodeHandle parentNode, unsigned i)
+{
+ NodeHandle glbNode(frag);
+ insertNode(glbNode);
// connect parent and child
- node.setLink(i, childNode.m_loc);
- childNode.setLink(2, node.m_loc);
- childNode.setSide(i);
- // re-balance tree at each node
+ parentNode.setLink(i, glbNode.m_loc);
+ glbNode.setLink(2, parentNode.m_loc);
+ glbNode.setSide(i);
+ Uint32 scanList = RNIL;
+ if (pos != 0) {
+ jam();
+ // add the new entry and return min entry
+ nodePushDown(lubNode, pos - 1, ent, scanList);
+ }
+ // g.l.b node receives min entry from l.u.b node
+ nodePushUp(glbNode, 0, ent, scanList);
+ // re-balance the tree
+ treeAddRebalance(frag, parentNode, i);
+}
+
+/*
+ * Re-balance tree after adding a node. The process starts with the
+ * parent of the added node.
+ */
+void
+Dbtux::treeAddRebalance(Frag& frag, NodeHandle node, unsigned i)
+{
while (true) {
// height of subtree i has increased by 1
int j = (i == 0 ? -1 : +1);
@@ -105,14 +136,14 @@ Dbtux::treeAdd(Signal* signal, Frag& frag, TreePos treePos, TreeEnt ent)
// height of longer subtree increased
jam();
NodeHandle childNode(frag);
- selectNode(signal, childNode, node.getLink(i), AccHead);
+ selectNode(childNode, node.getLink(i));
int b2 = childNode.getBalance();
if (b2 == b) {
jam();
- treeRotateSingle(signal, frag, node, i);
+ treeRotateSingle(frag, node, i);
} else if (b2 == -b) {
jam();
- treeRotateDouble(signal, frag, node, i);
+ treeRotateDouble(frag, node, i);
} else {
// height of subtree increased so it cannot be perfectly balanced
ndbrequire(false);
@@ -129,118 +160,169 @@ Dbtux::treeAdd(Signal* signal, Frag& frag, TreePos treePos, TreeEnt ent)
break;
}
i = node.getSide();
- selectNode(signal, node, parentLoc, AccHead);
+ selectNode(node, parentLoc);
}
}
/*
- * Remove entry.
+ * Remove entry. Optimize for nodes with slack. Handle the case when
+ * there is no underflow i.e. occupancy remains at least minOccup. For
+ * interior nodes this is a requirement. For others it means that we do
+ * not need to consider merge of semi-leaf and leaf.
*/
void
-Dbtux::treeRemove(Signal* signal, Frag& frag, TreePos treePos)
+Dbtux::treeRemove(Frag& frag, TreePos treePos)
{
TreeHead& tree = frag.m_tree;
unsigned pos = treePos.m_pos;
NodeHandle node(frag);
- // access full node
- selectNode(signal, node, treePos.m_loc, AccFull);
+ selectNode(node, treePos.m_loc);
TreeEnt ent;
- // check interior node first
- if (node.getChilds() == 2) {
+ if (node.getOccup() > tree.m_minOccup) {
+ // no underflow in any node type
jam();
- ndbrequire(node.getOccup() >= tree.m_minOccup);
- // check if no underflow
- if (node.getOccup() > tree.m_minOccup) {
- jam();
- nodePopDown(signal, node, pos, ent);
- return;
- }
- // save current handle
- NodeHandle parentNode = node;
- // find glb node
- TupLoc childLoc = node.getLink(0);
- while (childLoc != NullTupLoc) {
- jam();
- selectNode(signal, node, childLoc, AccHead);
- childLoc = node.getLink(1);
- }
- // access full node again
- accessNode(signal, node, AccFull);
- // use glb max as new parent min
- ent = node.getEnt(node.getOccup() - 1);
- nodePopUp(signal, parentNode, pos, ent);
- // set up to remove glb max
- pos = node.getOccup() - 1;
- // fall thru to next case
+ nodePopDown(node, pos, ent, 0);
+ return;
}
- // remove the element
- nodePopDown(signal, node, pos, ent);
- ndbrequire(node.getChilds() <= 1);
- // handle half-leaf
- unsigned i;
- for (i = 0; i <= 1; i++) {
+ if (node.getChilds() == 2) {
+ // underflow in interior node
jam();
- TupLoc childLoc = node.getLink(i);
- if (childLoc != NullTupLoc) {
- // move to child
- selectNode(signal, node, childLoc, AccFull);
- // balance of half-leaf parent requires child to be leaf
- break;
- }
+ treeRemoveInner(frag, node, pos);
+ return;
}
- ndbrequire(node.getChilds() == 0);
- // get parent if any
- TupLoc parentLoc = node.getLink(2);
- NodeHandle parentNode(frag);
- i = node.getSide();
- // move all that fits into parent
- if (parentLoc != NullTupLoc) {
+ // remove entry in semi/leaf
+ nodePopDown(node, pos, ent, 0);
+ if (node.getLink(0) != NullTupLoc) {
jam();
- selectNode(signal, parentNode, node.getLink(2), AccFull);
- nodeSlide(signal, parentNode, node, i);
- // fall thru to next case
+ treeRemoveSemi(frag, node, 0);
+ return;
}
- // non-empty leaf
- if (node.getOccup() >= 1) {
+ if (node.getLink(1) != NullTupLoc) {
jam();
+ treeRemoveSemi(frag, node, 1);
return;
}
- // remove empty leaf
- deleteNode(signal, node);
- if (parentLoc == NullTupLoc) {
+ treeRemoveLeaf(frag, node);
+}
+
+/*
+ * Remove entry when interior node underflows. There is g.l.b node in
+ * left subtree to borrow an entry from. The max entry of the g.l.b
+ * node becomes the min entry of this node.
+ */
+void
+Dbtux::treeRemoveInner(Frag& frag, NodeHandle lubNode, unsigned pos)
+{
+ TreeHead& tree = frag.m_tree;
+ TreeEnt ent;
+ // find g.l.b node
+ NodeHandle glbNode(frag);
+ TupLoc loc = lubNode.getLink(0);
+ do {
+ jam();
+ selectNode(glbNode, loc);
+ loc = glbNode.getLink(1);
+ } while (loc != NullTupLoc);
+ // borrow max entry from semi/leaf
+ Uint32 scanList = RNIL;
+ nodePopDown(glbNode, glbNode.getOccup() - 1, ent, &scanList);
+ nodePopUp(lubNode, pos, ent, scanList);
+ if (glbNode.getLink(0) != NullTupLoc) {
jam();
- // tree is now empty
- tree.m_root = NullTupLoc;
+ treeRemoveSemi(frag, glbNode, 0);
return;
}
- node = parentNode;
- node.setLink(i, NullTupLoc);
-#ifdef dbtux_min_occup_less_max_occup
- // check if we created a half-leaf
- if (node.getBalance() == 0) {
+ treeRemoveLeaf(frag, glbNode);
+}
+
+/*
+ * Handle semi-leaf after removing an entry. Move entries from leaf to
+ * semi-leaf to bring semi-leaf occupancy above minOccup, if possible.
+ * The leaf may become empty.
+ */
+void
+Dbtux::treeRemoveSemi(Frag& frag, NodeHandle semiNode, unsigned i)
+{
+ TreeHead& tree = frag.m_tree;
+ ndbrequire(semiNode.getChilds() < 2);
+ TupLoc leafLoc = semiNode.getLink(i);
+ NodeHandle leafNode(frag);
+ selectNode(leafNode, leafLoc);
+ if (semiNode.getOccup() < tree.m_minOccup) {
+ jam();
+ unsigned cnt = min(leafNode.getOccup(), tree.m_minOccup - semiNode.getOccup());
+ nodeSlide(semiNode, leafNode, cnt, i);
+ if (leafNode.getOccup() == 0) {
+ // remove empty leaf
+ jam();
+ treeRemoveNode(frag, leafNode);
+ }
+ }
+}
+
+/*
+ * Handle leaf after removing an entry. If parent is semi-leaf, move
+ * entries to it as in the semi-leaf case. If parent is interior node,
+ * do nothing.
+ */
+void
+Dbtux::treeRemoveLeaf(Frag& frag, NodeHandle leafNode)
+{
+ TreeHead& tree = frag.m_tree;
+ TupLoc parentLoc = leafNode.getLink(2);
+ if (parentLoc != NullTupLoc) {
jam();
- // move entries from the other child
- TupLoc childLoc = node.getLink(1 - i);
- NodeHandle childNode(frag);
- selectNode(signal, childNode, childLoc, AccFull);
- nodeSlide(signal, node, childNode, 1 - i);
- if (childNode.getOccup() == 0) {
+ NodeHandle parentNode(frag);
+ selectNode(parentNode, parentLoc);
+ unsigned i = leafNode.getSide();
+ if (parentNode.getLink(1 - i) == NullTupLoc) {
+ // parent is semi-leaf
jam();
- deleteNode(signal, childNode);
- node.setLink(1 - i, NullTupLoc);
- // we are balanced again but our parent balance changes by -1
- parentLoc = node.getLink(2);
- if (parentLoc == NullTupLoc) {
+ if (parentNode.getOccup() < tree.m_minOccup) {
jam();
- return;
+ unsigned cnt = min(leafNode.getOccup(), tree.m_minOccup - parentNode.getOccup());
+ nodeSlide(parentNode, leafNode, cnt, i);
}
- // fix side and become parent
- i = node.getSide();
- selectNode(signal, node, parentLoc, AccHead);
}
}
-#endif
- // re-balance tree at each node
+ if (leafNode.getOccup() == 0) {
+ jam();
+ // remove empty leaf
+ treeRemoveNode(frag, leafNode);
+ }
+}
+
+/*
+ * Remove empty leaf.
+ */
+void
+Dbtux::treeRemoveNode(Frag& frag, NodeHandle leafNode)
+{
+ TreeHead& tree = frag.m_tree;
+ ndbrequire(leafNode.getChilds() == 0);
+ TupLoc parentLoc = leafNode.getLink(2);
+ unsigned i = leafNode.getSide();
+ deleteNode(leafNode);
+ if (parentLoc != NullTupLoc) {
+ jam();
+ NodeHandle parentNode(frag);
+ selectNode(parentNode, parentLoc);
+ parentNode.setLink(i, NullTupLoc);
+ // re-balance the tree
+ treeRemoveRebalance(frag, parentNode, i);
+ return;
+ }
+ // tree is now empty
+ tree.m_root = NullTupLoc;
+}
+
+/*
+ * Re-balance tree after removing a node. The process starts with the
+ * parent of the removed node.
+ */
+void
+Dbtux::treeRemoveRebalance(Frag& frag, NodeHandle node, unsigned i)
+{
while (true) {
// height of subtree i has decreased by 1
int j = (i == 0 ? -1 : +1);
@@ -261,19 +343,19 @@ Dbtux::treeRemove(Signal* signal, Frag& frag, TreePos treePos)
jam();
// child on the other side
NodeHandle childNode(frag);
- selectNode(signal, childNode, node.getLink(1 - i), AccHead);
+ selectNode(childNode, node.getLink(1 - i));
int b2 = childNode.getBalance();
if (b2 == b) {
jam();
- treeRotateSingle(signal, frag, node, 1 - i);
+ treeRotateSingle(frag, node, 1 - i);
// height of tree decreased and propagates up
} else if (b2 == -b) {
jam();
- treeRotateDouble(signal, frag, node, 1 - i);
+ treeRotateDouble(frag, node, 1 - i);
// height of tree decreased and propagates up
} else {
jam();
- treeRotateSingle(signal, frag, node, 1 - i);
+ treeRotateSingle(frag, node, 1 - i);
// height of tree did not change - done
return;
}
@@ -287,7 +369,7 @@ Dbtux::treeRemove(Signal* signal, Frag& frag, TreePos treePos)
return;
}
i = node.getSide();
- selectNode(signal, node, parentLoc, AccHead);
+ selectNode(node, parentLoc);
}
}
@@ -308,10 +390,7 @@ Dbtux::treeRemove(Signal* signal, Frag& frag, TreePos treePos)
* all optional. If 4 are there it changes side.
*/
void
-Dbtux::treeRotateSingle(Signal* signal,
- Frag& frag,
- NodeHandle& node,
- unsigned i)
+Dbtux::treeRotateSingle(Frag& frag, NodeHandle& node, unsigned i)
{
ndbrequire(i <= 1);
/*
@@ -331,7 +410,7 @@ Dbtux::treeRotateSingle(Signal* signal,
*/
TupLoc loc3 = node5.getLink(i);
NodeHandle node3(frag);
- selectNode(signal, node3, loc3, AccHead);
+ selectNode(node3, loc3);
const int bal3 = node3.getBalance();
/*
2 must always be there but is not changed. Thus we mereley check that it
@@ -348,7 +427,7 @@ Dbtux::treeRotateSingle(Signal* signal,
NodeHandle node4(frag);
if (loc4 != NullTupLoc) {
jam();
- selectNode(signal, node4, loc4, AccHead);
+ selectNode(node4, loc4);
ndbrequire(node4.getSide() == (1 - i) &&
node4.getLink(2) == loc3);
node4.setSide(i);
@@ -383,7 +462,7 @@ Dbtux::treeRotateSingle(Signal* signal,
if (loc0 != NullTupLoc) {
jam();
NodeHandle node0(frag);
- selectNode(signal, node0, loc0, AccHead);
+ selectNode(node0, loc0);
node0.setLink(side5, loc3);
} else {
jam();
@@ -520,8 +599,10 @@ Dbtux::treeRotateSingle(Signal* signal,
*
*/
void
-Dbtux::treeRotateDouble(Signal* signal, Frag& frag, NodeHandle& node, unsigned i)
+Dbtux::treeRotateDouble(Frag& frag, NodeHandle& node, unsigned i)
{
+ TreeHead& tree = frag.m_tree;
+
// old top node
NodeHandle node6 = node;
const TupLoc loc6 = node6.m_loc;
@@ -532,13 +613,13 @@ Dbtux::treeRotateDouble(Signal* signal, Frag& frag, NodeHandle& node, unsigned i
// level 1
TupLoc loc2 = node6.getLink(i);
NodeHandle node2(frag);
- selectNode(signal, node2, loc2, AccHead);
+ selectNode(node2, loc2);
const int bal2 = node2.getBalance();
// level 2
TupLoc loc4 = node2.getLink(1 - i);
NodeHandle node4(frag);
- selectNode(signal, node4, loc4, AccHead);
+ selectNode(node4, loc4);
const int bal4 = node4.getBalance();
ndbrequire(i <= 1);
@@ -555,25 +636,26 @@ Dbtux::treeRotateDouble(Signal* signal, Frag& frag, NodeHandle& node, unsigned i
// fill up leaf before it becomes internal
if (loc3 == NullTupLoc && loc5 == NullTupLoc) {
jam();
- TreeHead& tree = frag.m_tree;
- accessNode(signal, node2, AccFull);
- accessNode(signal, node4, AccFull);
- nodeSlide(signal, node4, node2, i);
- // implied by rule of merging half-leaves with leaves
- ndbrequire(node4.getOccup() >= tree.m_minOccup);
- ndbrequire(node2.getOccup() != 0);
+ if (node4.getOccup() < tree.m_minOccup) {
+ jam();
+ unsigned cnt = tree.m_minOccup - node4.getOccup();
+ ndbrequire(cnt < node2.getOccup());
+ nodeSlide(node4, node2, cnt, i);
+ ndbrequire(node4.getOccup() >= tree.m_minOccup);
+ ndbrequire(node2.getOccup() != 0);
+ }
} else {
if (loc3 != NullTupLoc) {
jam();
NodeHandle node3(frag);
- selectNode(signal, node3, loc3, AccHead);
+ selectNode(node3, loc3);
node3.setLink(2, loc2);
node3.setSide(1 - i);
}
if (loc5 != NullTupLoc) {
jam();
NodeHandle node5(frag);
- selectNode(signal, node5, loc5, AccHead);
+ selectNode(node5, loc5);
node5.setLink(2, node6.m_loc);
node5.setSide(i);
}
@@ -596,7 +678,7 @@ Dbtux::treeRotateDouble(Signal* signal, Frag& frag, NodeHandle& node, unsigned i
if (loc0 != NullTupLoc) {
jam();
- selectNode(signal, node0, loc0, AccHead);
+ selectNode(node0, loc0);
node0.setLink(side6, loc4);
} else {
jam();
diff --git a/ndb/src/kernel/blocks/dbtux/Times.txt b/ndb/src/kernel/blocks/dbtux/Times.txt
index c4744a23c07..1e6d0a0a329 100644
--- a/ndb/src/kernel/blocks/dbtux/Times.txt
+++ b/ndb/src/kernel/blocks/dbtux/Times.txt
@@ -13,7 +13,7 @@ case c: full scan: index on PK Unsigned
testOIBasic -case v -table 1 -index 1 -fragtype small -threads 10 -rows 100000 -subloop 1 -nologging
case d: scan 1 tuple via EQ: index on PK Unsigned
-testOIBasic -case w -table 1 -index 1 -fragtype small -threads 10 -rows 100000 -samples 10000 -subloop 1 -nologging -v2
+testOIBasic -case w -table 1 -index 1 -fragtype small -threads 10 -rows 100000 -samples 50000 -subloop 1 -nologging -v2
a, b
1 million rows, pk update without index, pk update with index
@@ -28,6 +28,9 @@ d
shows ms / 1000 rows for each and index time overhead
samples 10% of all PKs (100,000 pk reads, 100,000 scans)
+the "pct" values are from more accurate total times (not shown)
+comments [ ... ] are after the case
+
040616 mc02/a 40 ms 87 ms 114 pct
mc02/b 51 ms 128 ms 148 pct
@@ -74,12 +77,67 @@ optim 13 mc02/a 40 ms 57 ms 42 pct
mc02/c 9 ms 13 ms 50 pct
mc02/d 170 ms 256 ms 50 pct
-after wl-1884 store all-NULL keys (the tests have pctnull=10 per column)
-[ what happened to PK read performance? ]
-
optim 13 mc02/a 39 ms 59 ms 50 pct
mc02/b 47 ms 77 ms 61 pct
mc02/c 9 ms 12 ms 44 pct
mc02/d 246 ms 289 ms 17 pct
+[ after wl-1884 store all-NULL keys (the tests have pctnull=10 per column) ]
+[ case d: bug in testOIBasic killed PK read performance ]
+
+optim 14 mc02/a 41 ms 60 ms 44 pct
+ mc02/b 46 ms 81 ms 73 pct
+ mc02/c 9 ms 13 ms 37 pct
+ mc02/d 242 ms 285 ms 17 pct
+
+[ case b: do long keys suffer from many subroutine calls? ]
+[ case d: bug in testOIBasic killed PK read performance ]
+
+none mc02/a 35 ms 60 ms 71 pct
+ mc02/b 42 ms 75 ms 76 pct
+ mc02/c 5 ms 12 ms 106 pct
+ mc02/d 165 ms 238 ms 44 pct
+
+[ johan re-installed mc02 as fedora gcc-3.3.2, tux uses more C++ stuff than tup]
+
+charsets mc02/a 35 ms 60 ms 71 pct
+ mc02/b 42 ms 84 ms 97 pct
+ mc02/c 5 ms 12 ms 109 pct
+ mc02/d 190 ms 236 ms 23 pct
+
+[ case b: TUX can no longer use pointers to TUP data ]
+
+optim 15 mc02/a 34 ms 60 ms 72 pct
+ mc02/b 42 ms 85 ms 100 pct
+ mc02/c 5 ms 12 ms 110 pct
+ mc02/d 178 ms 242 ms 35 pct
+
+[ corrected wasted space in index node ]
+
+optim 16 mc02/a 34 ms 53 ms 53 pct
+ mc02/b 42 ms 75 ms 75 pct
+
+[ binary search of bounding node when adding entry ]
+
+none mc02/a 35 ms 53 ms 51 pct
+ mc02/b 42 ms 75 ms 76 pct
+
+[ rewrote treeAdd / treeRemove ]
+
+optim 17 mc02/a 35 ms 52 ms 49 pct
+ mc02/b 43 ms 75 ms 75 pct
+
+[ allow slack (2) in interior nodes - almost no effect?? ]
+
+wl-1942 mc02/a 35 ms 52 ms 49 pct
+ mc02/b 42 ms 75 ms 76 pct
+
+before mc02/c 5 ms 13 ms 126 pct
+ mc02/d 134 ms 238 ms 78 pct
+
+after mc02/c 5 ms 10 ms 70 pct
+ mc02/d 178 ms 242 ms 69 pct
+
+[ prelim preformance fix for max batch size 16 -> 992 ]
+
vim: set et:
diff --git a/ndb/src/kernel/blocks/dbutil/DbUtil.cpp b/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
index ecaead3ba5a..f7e8981e122 100644
--- a/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
+++ b/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
@@ -112,7 +112,7 @@ DbUtil::DbUtil(const Configuration & conf) :
addRecSignal(GSN_UTIL_RELEASE_CONF, &DbUtil::execUTIL_RELEASE_CONF);
addRecSignal(GSN_UTIL_RELEASE_REF, &DbUtil::execUTIL_RELEASE_REF);
- c_pagePool.setSize(100);
+ c_pagePool.setSize(10);
c_preparePool.setSize(1); // one parallel prepare at a time
c_preparedOperationPool.setSize(5); // three hardcoded, two for test
c_operationPool.setSize(64); // 64 parallel operations
@@ -1059,6 +1059,7 @@ DbUtil::prepareOperation(Signal* signal, PreparePtr prepPtr)
ndbrequire(prepPagesReader.getValueLen() <= MAX_ATTR_NAME_SIZE);
prepPagesReader.getString(attrNameRequested);
+ attrIdRequested= ~0u;
} else {
jam();
attrIdRequested = prepPagesReader.getUint32();
@@ -1083,7 +1084,7 @@ DbUtil::prepareOperation(Signal* signal, PreparePtr prepPtr)
************************/
DictTabInfo::Attribute attrDesc; attrDesc.init();
char attrName[MAX_ATTR_NAME_SIZE];
- Uint32 attrId;
+ Uint32 attrId= ~(Uint32)0;
bool attributeFound = false;
Uint32 noOfKeysFound = 0; // # PK attrs found before attr in DICTdata
Uint32 noOfNonKeysFound = 0; // # nonPK attrs found before attr in DICTdata
@@ -1093,11 +1094,13 @@ DbUtil::prepareOperation(Signal* signal, PreparePtr prepPtr)
ndbrequire(dictInfoReader.getKey() == DictTabInfo::AttributeName);
ndbrequire(dictInfoReader.getValueLen() <= MAX_ATTR_NAME_SIZE);
dictInfoReader.getString(attrName);
+ attrId= ~(Uint32)0; // attrId not used
} else { // (tableKey == UtilPrepareReq::TableId)
jam();
dictInfoReader.next(); // Skip name
ndbrequire(dictInfoReader.getKey() == DictTabInfo::AttributeId);
attrId = dictInfoReader.getUint32();
+ attrName[0]= '\0'; // attrName not used
}
unpackStatus = SimpleProperties::unpack(dictInfoReader, &attrDesc,
DictTabInfo::AttributeMapping,
@@ -1493,6 +1496,7 @@ DbUtil::execUTIL_SEQUENCE_REQ(Signal* signal){
break;
default:
ndbrequire(false);
+ prepOp = 0; // remove warning
}
/**
diff --git a/ndb/src/kernel/blocks/dbutil/DbUtil.hpp b/ndb/src/kernel/blocks/dbutil/DbUtil.hpp
index c6e15a3c539..5499970fde3 100644
--- a/ndb/src/kernel/blocks/dbutil/DbUtil.hpp
+++ b/ndb/src/kernel/blocks/dbutil/DbUtil.hpp
@@ -37,7 +37,7 @@
#include <signaldata/UtilLock.hpp>
#include <SimpleProperties.hpp>
-#define UTIL_WORDS_PER_PAGE 8191
+#define UTIL_WORDS_PER_PAGE 1023
/**
* @class DbUtil
diff --git a/ndb/src/kernel/blocks/grep/Grep.cpp b/ndb/src/kernel/blocks/grep/Grep.cpp
index 8b93ef9cd20..0e41182348f 100644
--- a/ndb/src/kernel/blocks/grep/Grep.cpp
+++ b/ndb/src/kernel/blocks/grep/Grep.cpp
@@ -619,6 +619,12 @@ Grep::PSCoord::execCREATE_SUBID_REF(Signal* signal) {
{
jam();
err = GrepError::SUBSCRIPTION_ID_SUMA_FAILED_CREATE;
+ } else {
+ jam();
+ ndbrequire(false); /* Added since errorcode err unhandled
+ * TODO: fix correct errorcode
+ */
+ err= GrepError::NO_ERROR; // remove compiler warning
}
SubCoordinatorPtr subPtr;
@@ -1819,6 +1825,7 @@ Grep::PSCoord::sendRefToSS(Signal * signal,
break;
default:
ndbrequire(false);
+ event= GrepEvent::Rep_Disconnect; // remove compiler warning
}
/**
* Finally, send an event.
@@ -1906,6 +1913,7 @@ Grep::PSPart::sendRefToPSCoord(Signal * signal,
break;
default:
ndbrequire(false);
+ event= GrepEvent::Rep_Disconnect; // remove compiler warning
}
/**
diff --git a/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp b/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp
index 1069cf93b06..43044eeebcd 100644
--- a/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp
+++ b/ndb/src/kernel/blocks/ndbcntr/NdbcntrInit.cpp
@@ -37,10 +37,10 @@ void Ndbcntr::initRecords()
Ndbcntr::Ndbcntr(const class Configuration & conf):
SimulatedBlock(NDBCNTR, conf),
- c_stopRec(* this),
- c_missra(* this),
cnoWaitrep6(0),
- cnoWaitrep7(0)
+ cnoWaitrep7(0),
+ c_stopRec(* this),
+ c_missra(* this)
{
BLOCK_CONSTRUCTOR(Ndbcntr);
diff --git a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
index ff4876b1506..089cf613b03 100644
--- a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
+++ b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
@@ -103,7 +103,7 @@ void Ndbcntr::execCONTINUEB(Signal* signal)
}
Uint64 now = NdbTick_CurrentMillisecond();
- if(c_start.m_startFailureTimeout > now){
+ if(now > c_start.m_startFailureTimeout){
ndbrequire(false);
}
@@ -135,42 +135,42 @@ void Ndbcntr::execSYSTEM_ERROR(Signal* signal)
jamEntry();
switch (sysErr->errorCode){
case SystemError::StartInProgressError:
- snprintf(buf, sizeof(buf),
+ BaseString::snprintf(buf, sizeof(buf),
"Node %d killed this node because "
"master start in progress error",
killingNode);
break;
case SystemError::GCPStopDetected:
- snprintf(buf, sizeof(buf),
+ BaseString::snprintf(buf, sizeof(buf),
"Node %d killed this node because "
"GCP stop was detected",
killingNode);
break;
case SystemError::ScanfragTimeout:
- snprintf(buf, sizeof(buf),
+ BaseString::snprintf(buf, sizeof(buf),
"Node %d killed this node because "
"a fragment scan timed out and could not be stopped",
killingNode);
break;
case SystemError::ScanfragStateError:
- snprintf(buf, sizeof(buf),
+ BaseString::snprintf(buf, sizeof(buf),
"Node %d killed this node because "
"the state of a fragment scan was out of sync.",
killingNode);
break;
case SystemError::CopyFragRefError:
- snprintf(buf, sizeof(buf),
+ BaseString::snprintf(buf, sizeof(buf),
"Node %d killed this node because "
"it could not copy a fragment during node restart",
killingNode);
break;
default:
- snprintf(buf, sizeof(buf), "System error %d, "
+ BaseString::snprintf(buf, sizeof(buf), "System error %d, "
" this node was killed by node %d",
sysErr->errorCode, killingNode);
break;
@@ -446,13 +446,17 @@ void Ndbcntr::execREAD_NODESCONF(Signal* signal)
ndb_mgm_get_int_parameter(p, CFG_DB_START_PARTITION_TIMEOUT, &to_2);
ndb_mgm_get_int_parameter(p, CFG_DB_START_FAILURE_TIMEOUT, &to_3);
+ c_start.m_startTime = NdbTick_CurrentMillisecond();
c_start.m_startPartialTimeout = setTimeout(c_start.m_startTime, to_1);
c_start.m_startPartitionedTimeout = setTimeout(c_start.m_startTime, to_2);
c_start.m_startFailureTimeout = setTimeout(c_start.m_startTime, to_3);
-
+
UpgradeStartup::sendCmAppChg(* this, signal, 0); // ADD
sendCntrStartReq(signal);
+
+ signal->theData[0] = ZSTARTUP;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1000, 1);
return;
}
diff --git a/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp b/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp
index 40e6aa2dcd7..2a65271a32a 100644
--- a/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp
+++ b/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp
@@ -46,7 +46,7 @@ Ndbcntr::g_sysTable_SYSTAB_0 = {
static const Ndbcntr::SysColumn
column_NDBEVENTS_0[] = {
{ 0, "NAME",
- DictTabInfo::ExtChar, MAX_TAB_NAME_SIZE,
+ DictTabInfo::ExtBinary, MAX_TAB_NAME_SIZE,
true, false
},
{ 1, "EVENT_TYPE",
@@ -54,7 +54,7 @@ column_NDBEVENTS_0[] = {
false, false
},
{ 2, "TABLE_NAME",
- DictTabInfo::ExtChar, MAX_TAB_NAME_SIZE,
+ DictTabInfo::ExtBinary, MAX_TAB_NAME_SIZE,
false, false
},
{ 3, "ATTRIBUTE_MASK",
diff --git a/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp b/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
index 18e46d2d004..a02bfd459b3 100644
--- a/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
+++ b/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
@@ -108,8 +108,10 @@ AsyncFile::AsyncFile() :
}
void
-AsyncFile::doStart(Uint32 nodeId, const char * filesystemPath) {
- theFileName.init(nodeId, filesystemPath);
+AsyncFile::doStart(Uint32 nodeId,
+ const char * filesystemPath,
+ const char * backup_path) {
+ theFileName.init(nodeId, filesystemPath, backup_path);
// Stacksize for filesystem threads
// An 8k stack should be enough
@@ -117,7 +119,7 @@ AsyncFile::doStart(Uint32 nodeId, const char * filesystemPath) {
char buf[16];
numAsyncFiles++;
- snprintf(buf, sizeof(buf), "AsyncFile%d", numAsyncFiles);
+ BaseString::snprintf(buf, sizeof(buf), "AsyncFile%d", numAsyncFiles);
theStartMutexPtr = NdbMutex_Create();
theStartConditionPtr = NdbCondition_Create();
@@ -510,7 +512,7 @@ AsyncFile::extendfile(Request* request) {
DEBUG(ndbout_c("extendfile: maxOffset=%d, size=%d", maxOffset, maxSize));
// Allocate a buffer and fill it with zeros
- void* pbuf = malloc(maxSize);
+ void* pbuf = NdbMem_Allocate(maxSize);
memset(pbuf, 0, maxSize);
for (int p = 0; p <= maxOffset; p = p + maxSize) {
int return_value;
@@ -814,7 +816,7 @@ AsyncFile::rmrfReq(Request * request, char * path, bool removePath){
struct dirent * dp;
while ((dp = readdir(dirp)) != NULL){
if ((strcmp(".", dp->d_name) != 0) && (strcmp("..", dp->d_name) != 0)) {
- snprintf(path_add, (size_t)path_max_copy, "%s%s",
+ BaseString::snprintf(path_add, (size_t)path_max_copy, "%s%s",
DIR_SEPARATOR, dp->d_name);
if(remove((const char*)path) == 0){
path[path_len] = 0;
diff --git a/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp b/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp
index 9a405bc1580..2176c93c5d5 100644
--- a/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp
+++ b/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp
@@ -181,7 +181,7 @@ public:
void execute( Request* request );
- void doStart(Uint32 nodeId, const char * fspath);
+ void doStart(Uint32 nodeId, const char * fspath, const char * backup_path);
// its a thread so its always running
void run();
diff --git a/ndb/src/kernel/blocks/ndbfs/Filename.cpp b/ndb/src/kernel/blocks/ndbfs/Filename.cpp
index 660fe6eee94..15158ec19ef 100644
--- a/ndb/src/kernel/blocks/ndbfs/Filename.cpp
+++ b/ndb/src/kernel/blocks/ndbfs/Filename.cpp
@@ -46,43 +46,31 @@ Filename::Filename() :
}
void
-Filename::init(Uint32 nodeid, const char * pFileSystemPath){
+Filename::init(Uint32 nodeid,
+ const char * pFileSystemPath,
+ const char * pBackupDirPath){
+ DBUG_ENTER("Filename::init");
+
if (pFileSystemPath == NULL) {
ERROR_SET(fatal, AFS_ERROR_NOPATH, ""," Filename::init()");
return;
}
- strncpy(theBaseDirectory, pFileSystemPath, PATH_MAX);
-
- // the environment variable is set,
- // check that it is pointing on a valid directory
- //
- char buf2[PATH_MAX]; memset(buf2, 0,sizeof(buf2));
-#ifdef NDB_WIN32
- char* szFilePart;
- if(!GetFullPathName(theBaseDirectory, sizeof(buf2), buf2, &szFilePart)
- || (::GetFileAttributes(theBaseDirectory)&FILE_ATTRIBUTE_READONLY))
-#else
- if((::realpath(theBaseDirectory, buf2) == NULL)||
- (::access(theBaseDirectory, W_OK) != 0))
-#endif
- {
- ERROR_SET(fatal, AFS_ERROR_INVALIDPATH, pFileSystemPath, " Filename::init()");
- }
- strncpy(theBaseDirectory, buf2, sizeof(theBaseDirectory));
- // path seems ok, add delimiter if missing
- if (strcmp(&theBaseDirectory[strlen(theBaseDirectory) - 1],
- DIR_SEPARATOR) != 0)
- strcat(theBaseDirectory, DIR_SEPARATOR);
-
- snprintf(buf2, sizeof(buf2), "ndb_%u_fs%s", nodeid, DIR_SEPARATOR);
- strcat(theBaseDirectory, buf2);
+ BaseString::snprintf(theFileSystemDirectory, sizeof(theFileSystemDirectory),
+ "%sndb_%u_fs%s", pFileSystemPath, nodeid, DIR_SEPARATOR);
+ strncpy(theBackupDirectory, pBackupDirPath, sizeof(theBackupDirectory));
+
+ DBUG_PRINT("info", ("theFileSystemDirectory=%s", theFileSystemDirectory));
+ DBUG_PRINT("info", ("theBackupDirectory=%s", theBackupDirectory));
#ifdef NDB_WIN32
- CreateDirectory(theBaseDirectory, 0);
+ CreateDirectory(theFileSystemDirectory, 0);
#else
- mkdir(theBaseDirectory, S_IRUSR | S_IWUSR | S_IXUSR | S_IXGRP | S_IRGRP);
+ mkdir(theFileSystemDirectory, S_IRUSR | S_IWUSR | S_IXUSR | S_IXGRP | S_IRGRP);
#endif
+ theBaseDirectory= 0;
+
+ DBUG_VOID_RETURN;
}
Filename::~Filename(){
@@ -94,10 +82,16 @@ Filename::set(BlockReference blockReference,
{
char buf[PATH_MAX];
theLevelDepth = 0;
- strncpy(theName, theBaseDirectory, PATH_MAX);
-
+
const Uint32 type = FsOpenReq::getSuffix(filenumber);
const Uint32 version = FsOpenReq::getVersion(filenumber);
+
+ if (version == 2)
+ theBaseDirectory= theBackupDirectory;
+ else
+ theBaseDirectory= theFileSystemDirectory;
+ strncpy(theName, theBaseDirectory, PATH_MAX);
+
switch(version){
case 1 :{
const Uint32 diskNo = FsOpenReq::v1_getDisk(filenumber);
@@ -107,7 +101,7 @@ Filename::set(BlockReference blockReference,
const Uint32 P_val = FsOpenReq::v1_getP(filenumber);
if (diskNo < 0xff){
- snprintf(buf, sizeof(buf), "D%d%s", diskNo, DIR_SEPARATOR);
+ BaseString::snprintf(buf, sizeof(buf), "D%d%s", diskNo, DIR_SEPARATOR);
strcat(theName, buf);
theLevelDepth++;
}
@@ -118,31 +112,31 @@ Filename::set(BlockReference blockReference,
ERROR_SET(ecError, AFS_ERROR_PARAMETER,"","No Block Name");
return;
}
- snprintf(buf, sizeof(buf), "%s%s", blockName, DIR_SEPARATOR);
+ BaseString::snprintf(buf, sizeof(buf), "%s%s", blockName, DIR_SEPARATOR);
strcat(theName, buf);
theLevelDepth++;
}
if (table < 0xffffffff){
- snprintf(buf, sizeof(buf), "T%d%s", table, DIR_SEPARATOR);
+ BaseString::snprintf(buf, sizeof(buf), "T%d%s", table, DIR_SEPARATOR);
strcat(theName, buf);
theLevelDepth++;
}
if (frag < 0xffffffff){
- snprintf(buf, sizeof(buf), "F%d%s", frag, DIR_SEPARATOR);
+ BaseString::snprintf(buf, sizeof(buf), "F%d%s", frag, DIR_SEPARATOR);
strcat(theName, buf);
theLevelDepth++;
}
if (S_val < 0xffffffff){
- snprintf(buf, sizeof(buf), "S%d", S_val);
+ BaseString::snprintf(buf, sizeof(buf), "S%d", S_val);
strcat(theName, buf);
}
if (P_val < 0xff){
- snprintf(buf, sizeof(buf), "P%d", P_val);
+ BaseString::snprintf(buf, sizeof(buf), "P%d", P_val);
strcat(theName, buf);
}
@@ -153,14 +147,14 @@ Filename::set(BlockReference blockReference,
const Uint32 nodeId = FsOpenReq::v2_getNodeId(filenumber);
const Uint32 count = FsOpenReq::v2_getCount(filenumber);
- snprintf(buf, sizeof(buf), "BACKUP%sBACKUP-%d%s",
+ BaseString::snprintf(buf, sizeof(buf), "BACKUP%sBACKUP-%d%s",
DIR_SEPARATOR, seq, DIR_SEPARATOR);
strcat(theName, buf);
if(count == 0xffffffff) {
- snprintf(buf, sizeof(buf), "BACKUP-%d.%d",
+ BaseString::snprintf(buf, sizeof(buf), "BACKUP-%d.%d",
seq, nodeId); strcat(theName, buf);
} else {
- snprintf(buf, sizeof(buf), "BACKUP-%d-%d.%d",
+ BaseString::snprintf(buf, sizeof(buf), "BACKUP-%d-%d.%d",
seq, count, nodeId); strcat(theName, buf);
}
theLevelDepth = 2;
@@ -174,7 +168,7 @@ Filename::set(BlockReference blockReference,
ERROR_SET(ecError, AFS_ERROR_PARAMETER,"","Invalid disk specification");
}
- snprintf(buf, sizeof(buf), "D%d%s", diskNo, DIR_SEPARATOR);
+ BaseString::snprintf(buf, sizeof(buf), "D%d%s", diskNo, DIR_SEPARATOR);
strcat(theName, buf);
theLevelDepth++;
}
diff --git a/ndb/src/kernel/blocks/ndbfs/Filename.hpp b/ndb/src/kernel/blocks/ndbfs/Filename.hpp
index 25c06092436..249c1b1ca10 100644
--- a/ndb/src/kernel/blocks/ndbfs/Filename.hpp
+++ b/ndb/src/kernel/blocks/ndbfs/Filename.hpp
@@ -67,13 +67,16 @@ public:
const char* directory(int level);
int levels() const;
const char* c_str() const;
-
- void init(Uint32 nodeid, const char * fileSystemPath);
+
+ void init(Uint32 nodeid, const char * fileSystemPath,
+ const char * backupDirPath);
private:
int theLevelDepth;
char theName[PATH_MAX];
- char theBaseDirectory[PATH_MAX];
+ char theFileSystemDirectory[PATH_MAX];
+ char theBackupDirectory[PATH_MAX];
+ char *theBaseDirectory;
char theDirectory[PATH_MAX];
};
diff --git a/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp b/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
index 3b8cb20fe5c..56e3d3abbed 100644
--- a/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
+++ b/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
@@ -58,6 +58,8 @@ Ndbfs::Ndbfs(const Configuration & conf) :
m_maxOpenedFiles(0)
{
theFileSystemPath = conf.fileSystemPath();
+ theBackupFilePath = conf.backupFilePath();
+
theRequestPool = new Pool<Request>;
const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
@@ -67,7 +69,7 @@ Ndbfs::Ndbfs(const Configuration & conf) :
//ndb_mgm_get_int_parameter(p, CFG_DB_MAX_OPEN_FILES, &m_maxFiles);
// Create idle AsyncFiles
- Uint32 noIdleFiles = 16;
+ Uint32 noIdleFiles = 27;
for (Uint32 i = 0; i < noIdleFiles; i++){
theIdleFiles.push_back(createAsyncFile());
}
@@ -559,7 +561,7 @@ Ndbfs::createAsyncFile(){
}
AsyncFile* file = new AsyncFile;
- file->doStart(getOwnNodeId(), theFileSystemPath);
+ file->doStart(getOwnNodeId(), theFileSystemPath, theBackupFilePath);
// Put the file in list of all files
theFiles.push_back(file);
diff --git a/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp b/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp
index 080196a9ea5..c5aaa4e5c49 100644
--- a/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp
+++ b/ndb/src/kernel/blocks/ndbfs/Ndbfs.hpp
@@ -80,6 +80,7 @@ private:
Vector<AsyncFile*> theIdleFiles; // List of idle AsyncFiles
OpenFiles theOpenFiles; // List of open AsyncFiles
const char * theFileSystemPath;
+ const char * theBackupFilePath;
// Statistics variables
Uint32 m_maxOpenedFiles;
diff --git a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
index 6017365a463..41deb3403c8 100644
--- a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
+++ b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
@@ -592,7 +592,7 @@ void Qmgr::execCM_REGCONF(Signal* signal)
if (!ndbCompatible_ndb_ndb(NDB_VERSION, cmRegConf->presidentVersion)) {
jam();
char buf[128];
- snprintf(buf,sizeof(buf),"incompatible version own=0x%x other=0x%x, shutting down", NDB_VERSION, cmRegConf->presidentVersion);
+ BaseString::snprintf(buf,sizeof(buf),"incompatible version own=0x%x other=0x%x, shutting down", NDB_VERSION, cmRegConf->presidentVersion);
systemErrorLab(signal, buf);
return;
}
@@ -1666,7 +1666,7 @@ void Qmgr::checkStartInterface(Signal* signal)
} else {
if(((nodePtr.p->alarmCount + 1) % 60) == 0){
char buf[100];
- snprintf(buf, sizeof(buf),
+ BaseString::snprintf(buf, sizeof(buf),
"Failure handling of node %d has not completed in %d min."
" - state = %d",
nodePtr.i,
@@ -1760,8 +1760,8 @@ void Qmgr::execAPI_FAILCONF(Signal* signal)
} else {
jam();
#ifdef VM_TRACE
- ndbout << "failedNodePtr.p->failState = " << failedNodePtr.p->failState
- << endl;
+ ndbout << "failedNodePtr.p->failState = "
+ << (Uint32)(failedNodePtr.p->failState) << endl;
#endif
systemErrorLab(signal);
}//if
@@ -1932,10 +1932,6 @@ void Qmgr::execAPI_REGREQ(Signal* signal)
bool compatability_check;
switch(getNodeInfo(apiNodePtr.i).getType()){
- case NodeInfo::DB:
- case NodeInfo::INVALID:
- sendApiRegRef(signal, ref, ApiRegRef::WrongType);
- return;
case NodeInfo::API:
compatability_check = ndbCompatible_ndb_api(NDB_VERSION, version);
break;
@@ -1945,6 +1941,11 @@ void Qmgr::execAPI_REGREQ(Signal* signal)
case NodeInfo::REP:
compatability_check = ndbCompatible_ndb_api(NDB_VERSION, version);
break;
+ case NodeInfo::DB:
+ case NodeInfo::INVALID:
+ default:
+ sendApiRegRef(signal, ref, ApiRegRef::WrongType);
+ return;
}
if (!compatability_check) {
@@ -2672,7 +2673,7 @@ void Qmgr::systemErrorBecauseOtherNodeFailed(Signal* signal,
failReport(signal, getOwnNodeId(), (UintR)ZTRUE, FailRep::ZOWN_FAILURE);
char buf[100];
- snprintf(buf, 100,
+ BaseString::snprintf(buf, 100,
"Node was shutdown during startup because node %d failed",
failedNodeId);
diff --git a/ndb/src/kernel/blocks/suma/Suma.cpp b/ndb/src/kernel/blocks/suma/Suma.cpp
index 24e264291e7..d11d5f7176a 100644
--- a/ndb/src/kernel/blocks/suma/Suma.cpp
+++ b/ndb/src/kernel/blocks/suma/Suma.cpp
@@ -98,7 +98,7 @@ Suma::getNodeGroupMembers(Signal* signal) {
}
// ndbout_c("c_noNodesInGroup=%d", c_noNodesInGroup);
- ndbrequire(c_noNodesInGroup >= 0); // at least 1 node in the nodegroup
+ ndbrequire(c_noNodesInGroup > 0); // at least 1 node in the nodegroup
#ifdef NODEFAIL_DEBUG
for (Uint32 i = 0; i < c_noNodesInGroup; i++) {
@@ -673,6 +673,7 @@ Suma::execDUMP_STATE_ORD(Signal* signal){
*
*/
+#if 0
void
SumaParticipant::convertNameToId(SubscriptionPtr subPtr, Signal * signal)
{
@@ -703,6 +704,7 @@ SumaParticipant::convertNameToId(SubscriptionPtr subPtr, Signal * signal)
sendSubCreateConf(signal, subPtr.p->m_subscriberRef, subPtr);
}
}
+#endif
void
@@ -719,6 +721,7 @@ SumaParticipant::addTableId(Uint32 tableId,
psyncRec->m_tableList.append(&tableId, 1);
}
+#if 0
void
SumaParticipant::execGET_TABLEID_CONF(Signal * signal)
{
@@ -766,6 +769,8 @@ SumaParticipant::execGET_TABLEID_REF(Signal * signal)
SubCreateRef::SignalLength,
JBB);
}
+#endif
+
/*************************************************************
*
@@ -999,6 +1004,7 @@ SumaParticipant::execSUB_CREATE_REQ(Signal* signal) {
}
}
break;
+#if 0
case SubCreateReq::SelectiveTableSnapshot:
/**
* Tables specified by the user that does not exist
@@ -1041,6 +1047,7 @@ SumaParticipant::execSUB_CREATE_REQ(Signal* signal) {
return;
}
break;
+#endif
case SubCreateReq::DatabaseSnapshot:
{
jam();
@@ -1880,20 +1887,19 @@ SumaParticipant::SyncRecord::nextScan(Signal* signal){
req->tableId = tabPtr.p->m_tableId;
req->requestInfo = 0;
req->savePointId = 0;
- ScanFragReq::setConcurrency(req->requestInfo, parallelism);
ScanFragReq::setLockMode(req->requestInfo, 0);
ScanFragReq::setHoldLockFlag(req->requestInfo, 0);
ScanFragReq::setKeyinfoFlag(req->requestInfo, 0);
ScanFragReq::setAttrLen(req->requestInfo, attrLen);
- req->fragmentNo = fd.m_fragDesc.m_fragmentNo;
+ req->fragmentNoKeyLen = fd.m_fragDesc.m_fragmentNo;
req->schemaVersion = tabPtr.p->m_schemaVersion;
req->transId1 = 0;
req->transId2 = (SUMA << 20) + (suma.getOwnNodeId() << 8);
-
- for(unsigned int i = 0; i<parallelism; i++){
- req->clientOpPtr[i] = (ptrI << 16) + (i + 1);
- }
- suma.sendSignal(DBLQH_REF, GSN_SCAN_FRAGREQ, signal, 25, JBB);
+ req->clientOpPtr = (ptrI << 16);
+ req->batch_size_rows= 16;
+ req->batch_size_bytes= 0;
+ suma.sendSignal(DBLQH_REF, GSN_SCAN_FRAGREQ, signal,
+ ScanFragReq::SignalLength, JBB);
signal->theData[0] = ptrI;
signal->theData[1] = 0;
@@ -1995,6 +2001,8 @@ SumaParticipant::execSUB_SYNC_CONTINUE_CONF(Signal* signal){
req->closeFlag = 0;
req->transId1 = 0;
req->transId2 = (SUMA << 20) + (getOwnNodeId() << 8);
+ req->batch_size_rows = 16;
+ req->batch_size_bytes = 0;
sendSignal(DBLQH_REF, GSN_SCAN_NEXTREQ, signal,
ScanFragNextReq::SignalLength, JBB);
}
@@ -2705,6 +2713,7 @@ Suma::getResponsibleSumaNodeId(Uint32 D)
id = RNIL;
} else {
jam();
+ id = RNIL;
const Uint32 n = c_noNodesInGroup; // Number nodes in node group
const Uint32 C1 = D / n;
const Uint32 C2 = D - C1*n; // = D % n;
diff --git a/ndb/src/kernel/blocks/suma/Suma.hpp b/ndb/src/kernel/blocks/suma/Suma.hpp
index 7bd58b30640..08987fa9420 100644
--- a/ndb/src/kernel/blocks/suma/Suma.hpp
+++ b/ndb/src/kernel/blocks/suma/Suma.hpp
@@ -62,9 +62,10 @@ protected:
void execLIST_TABLES_CONF(Signal* signal);
void execGET_TABINFOREF(Signal* signal);
void execGET_TABINFO_CONF(Signal* signal);
+#if 0
void execGET_TABLEID_CONF(Signal* signal);
void execGET_TABLEID_REF(Signal* signal);
-
+#endif
/**
* Scan interface
*/
@@ -275,7 +276,9 @@ public:
*/
// TODO we've got to fix this, this is to inefficient. Tomas
char m_tables[MAX_TABLES];
+#if 0
char m_tableNames[MAX_TABLES][MAX_TAB_NAME_SIZE];
+#endif
/**
* "Iterator" used to iterate through m_tableNames
*/
diff --git a/ndb/src/kernel/blocks/suma/SumaInit.cpp b/ndb/src/kernel/blocks/suma/SumaInit.cpp
index 9f0659942a2..255abd47c94 100644
--- a/ndb/src/kernel/blocks/suma/SumaInit.cpp
+++ b/ndb/src/kernel/blocks/suma/SumaInit.cpp
@@ -51,9 +51,10 @@ SumaParticipant::SumaParticipant(const Configuration & conf) :
//addRecSignal(GSN_GET_TABINFOREF, &SumaParticipant::execGET_TABINFO_REF);
addRecSignal(GSN_GET_TABINFO_CONF, &SumaParticipant::execGET_TABINFO_CONF);
addRecSignal(GSN_GET_TABINFOREF, &SumaParticipant::execGET_TABINFOREF);
+#if 0
addRecSignal(GSN_GET_TABLEID_CONF, &SumaParticipant::execGET_TABLEID_CONF);
addRecSignal(GSN_GET_TABLEID_REF, &SumaParticipant::execGET_TABLEID_REF);
-
+#endif
/**
* Dih interface
*/
@@ -93,9 +94,15 @@ SumaParticipant::SumaParticipant(const Configuration & conf) :
/**
* @todo: fix pool sizes
*/
+ Uint32 noTables;
+ const ndb_mgm_configuration_iterator * p = conf.getOwnConfigIterator();
+ ndbrequire(p != 0);
+
+ ndb_mgm_get_int_parameter(p, CFG_DB_NO_TABLES,
+ &noTables);
- c_tablePool_.setSize(MAX_TABLES);
- c_tables.setSize(MAX_TABLES);
+ c_tablePool_.setSize(noTables);
+ c_tables.setSize(noTables);
c_subscriptions.setSize(20); //10
c_subscriberPool.setSize(64);
diff --git a/ndb/src/kernel/blocks/trix/Trix.cpp b/ndb/src/kernel/blocks/trix/Trix.cpp
index 4088d55c76d..80cf9f88c0d 100644
--- a/ndb/src/kernel/blocks/trix/Trix.cpp
+++ b/ndb/src/kernel/blocks/trix/Trix.cpp
@@ -47,8 +47,7 @@ Trix::Trix(const Configuration & conf) :
c_masterTrixRef(0),
c_noNodesFailed(0),
c_noActiveNodes(0),
- c_theSubscriptions(c_theSubscriptionRecPool),
- c_thePages(c_thePagePool)
+ c_theSubscriptions(c_theSubscriptionRecPool)
{
BLOCK_CONSTRUCTOR(Trix);
@@ -90,7 +89,6 @@ Trix::Trix(const Configuration & conf) :
// Allocate pool sizes
c_theAttrOrderBufferPool.setSize(100);
c_theSubscriptionRecPool.setSize(100);
- c_thePagePool.setSize(16);
ArrayList<SubscriptionRecord> subscriptions(c_theSubscriptionRecPool);
SubscriptionRecPtr subptr;
diff --git a/ndb/src/kernel/blocks/trix/Trix.hpp b/ndb/src/kernel/blocks/trix/Trix.hpp
index f096e135094..8dc01375fa1 100644
--- a/ndb/src/kernel/blocks/trix/Trix.hpp
+++ b/ndb/src/kernel/blocks/trix/Trix.hpp
@@ -138,19 +138,6 @@ private:
*/
ArrayList<SubscriptionRecord> c_theSubscriptions;
- // Linear memory abstraction
-#define TRIX_WORDS_PER_PAGE 8191
- struct Page32 {
- Uint32 data[TRIX_WORDS_PER_PAGE];
- Uint32 nextPool;
- };
- typedef Ptr<Page32> Page32Ptr;
-
- ArrayPool<Page32> c_thePagePool;
- Array<Page32> c_thePages;
-
- // Private methods
-
// System start
void execSTTOR(Signal* signal);
void execNDB_STTOR(Signal* signal);
diff --git a/ndb/src/kernel/error/ErrorHandlingMacros.hpp b/ndb/src/kernel/error/ErrorHandlingMacros.hpp
index 416507fee23..d8bb7ff759b 100644
--- a/ndb/src/kernel/error/ErrorHandlingMacros.hpp
+++ b/ndb/src/kernel/error/ErrorHandlingMacros.hpp
@@ -22,6 +22,8 @@
extern const char programName[];
+#define ERROR_SET_SIGNAL(messageCategory, messageID, problemData, objectRef) \
+ ErrorReporter::handleError(messageCategory, messageID, problemData, objectRef, NST_ErrorHandlerSignal)
#define ERROR_SET(messageCategory, messageID, problemData, objectRef) \
ErrorReporter::handleError(messageCategory, messageID, problemData, objectRef)
// Description:
diff --git a/ndb/src/kernel/error/ErrorReporter.cpp b/ndb/src/kernel/error/ErrorReporter.cpp
index f1320c44e09..35c99b30994 100644
--- a/ndb/src/kernel/error/ErrorReporter.cpp
+++ b/ndb/src/kernel/error/ErrorReporter.cpp
@@ -60,7 +60,7 @@ ErrorReporter::formatTimeStampString(){
DateTime.setTimeStamp();
- snprintf(theDateTimeString, 39, "%s %d %s %d - %s:%s:%s",
+ BaseString::snprintf(theDateTimeString, 39, "%s %d %s %d - %s:%s:%s",
DateTime.getDayName(), DateTime.getDayOfMonth(),
DateTime.getMonthName(), DateTime.getYear(), DateTime.getHour(),
DateTime.getMinute(), DateTime.getSecond());
@@ -126,7 +126,7 @@ ErrorReporter::formatMessage(ErrorCategory type,
processId = NdbHost_GetProcessId();
- snprintf(messptr, MESSAGE_LENGTH,
+ BaseString::snprintf(messptr, MESSAGE_LENGTH,
"Date/Time: %s\nType of error: %s\n"
"Message: %s\nFault ID: %d\nProblem data: %s"
"\nObject of reference: %s\nProgramName: %s\n"
@@ -139,7 +139,7 @@ ErrorReporter::formatMessage(ErrorCategory type,
objRef,
programName,
processId,
- theNameOfTheTraceFile);
+ theNameOfTheTraceFile ? theNameOfTheTraceFile : "<no tracefile>");
// Add trailing blanks to get a fixed lenght of the message
while (strlen(messptr) <= MESSAGE_LENGTH-3){
@@ -157,13 +157,13 @@ ErrorReporter::handleAssert(const char* message, const char* file, int line)
char refMessage[100];
#ifdef NO_EMULATED_JAM
- snprintf(refMessage, 100, "file: %s lineNo: %d",
+ BaseString::snprintf(refMessage, 100, "file: %s lineNo: %d",
file, line);
#else
const Uint32 blockNumber = theEmulatedJamBlockNumber;
const char *blockName = getBlockName(blockNumber);
- snprintf(refMessage, 100, "%s line: %d (block: %s)",
+ BaseString::snprintf(refMessage, 100, "%s line: %d (block: %s)",
file, line, blockName);
#endif
WriteMessage(assert, ERR_ERROR_PRGERR, message, refMessage,
@@ -178,7 +178,7 @@ ErrorReporter::handleThreadAssert(const char* message,
int line)
{
char refMessage[100];
- snprintf(refMessage, 100, "file: %s lineNo: %d - %s",
+ BaseString::snprintf(refMessage, 100, "file: %s lineNo: %d - %s",
file, line, message);
NdbShutdown(NST_ErrorHandler);
@@ -217,8 +217,10 @@ WriteMessage(ErrorCategory thrdType, int thrdMessageID,
/**
* Format trace file name
*/
- int file_no= ErrorReporter::get_trace_no();
- char *theTraceFileName= NdbConfig_TraceFileName(globalData.ownId, file_no);
+ char *theTraceFileName= 0;
+ if (globalData.ownId > 0)
+ theTraceFileName= NdbConfig_TraceFileName(globalData.ownId,
+ ErrorReporter::get_trace_no());
NdbAutoPtr<char> tmp_aptr1(theTraceFileName);
// The first 69 bytes is info about the current offset
@@ -291,26 +293,28 @@ WriteMessage(ErrorCategory thrdType, int thrdMessageID,
fflush(stream);
fclose(stream);
- // Open the tracefile...
- FILE *jamStream = fopen(theTraceFileName, "w");
+ if (theTraceFileName) {
+ // Open the tracefile...
+ FILE *jamStream = fopen(theTraceFileName, "w");
- // ...and "dump the jam" there.
- // ErrorReporter::dumpJam(jamStream);
- if(thrdTheEmulatedJam != 0){
- dumpJam(jamStream, thrdTheEmulatedJamIndex, thrdTheEmulatedJam);
- }
-
- /* Dont print the jobBuffers until a way to copy them,
- like the other variables,
- is implemented. Otherwise when NDB keeps running,
- with this function running
- in the background, the jobBuffers will change during runtime. And when
- they're printed here, they will not be correct anymore.
- */
- globalScheduler.dumpSignalMemory(jamStream);
+ // ...and "dump the jam" there.
+ // ErrorReporter::dumpJam(jamStream);
+ if(thrdTheEmulatedJam != 0){
+ dumpJam(jamStream, thrdTheEmulatedJamIndex, thrdTheEmulatedJam);
+ }
- fclose(jamStream);
+ /* Dont print the jobBuffers until a way to copy them,
+ like the other variables,
+ is implemented. Otherwise when NDB keeps running,
+ with this function running
+ in the background, the jobBuffers will change during runtime. And when
+ they're printed here, they will not be correct anymore.
+ */
+ globalScheduler.dumpSignalMemory(jamStream);
+ fclose(jamStream);
+ }
+
return 0;
}
diff --git a/ndb/src/kernel/main.cpp b/ndb/src/kernel/main.cpp
index 4d3a0afe6ed..fa44704807d 100644
--- a/ndb/src/kernel/main.cpp
+++ b/ndb/src/kernel/main.cpp
@@ -15,9 +15,11 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <ndb_global.h>
+#include <my_pthread.h>
#include <ndb_version.h>
#include "Configuration.hpp"
+#include <LocalConfig.hpp>
#include <TransporterRegistry.hpp>
#include "vm/SimBlockList.hpp"
@@ -26,6 +28,7 @@
#include <NdbOut.hpp>
#include <NdbMain.h>
#include <NdbDaemon.h>
+#include <NdbSleep.h>
#include <NdbConfig.h>
#include <WatchDog.hpp>
@@ -39,6 +42,7 @@
#endif
extern EventLogger g_eventLogger;
+extern NdbMutex * theShutdownMutex;
void catchsigs(bool ignore); // for process signal handling
@@ -53,6 +57,7 @@ const char programName[] = "NDB Kernel";
NDB_MAIN(ndb_kernel){
+ ndb_init();
// Print to stdout/console
g_eventLogger.createConsoleHandler();
g_eventLogger.setCategory("NDB");
@@ -63,14 +68,23 @@ NDB_MAIN(ndb_kernel){
// Parse command line options
Configuration* theConfig = globalEmulatorData.theConfiguration;
if(!theConfig->init(argc, argv)){
- return 0;
+ return NRT_Default;
}
+ LocalConfig local_config;
+ if (!local_config.init(theConfig->getConnectString(),0)){
+ local_config.printError();
+ local_config.printUsage();
+ return NRT_Default;
+ }
+
{ // Do configuration
signal(SIGPIPE, SIG_IGN);
- theConfig->fetch_configuration();
+ theConfig->fetch_configuration(local_config);
}
+ chdir(NdbConfig_get_path(0));
+
if (theConfig->getDaemonMode()) {
// Become a daemon
char *lockfile= NdbConfig_PidFileName(globalData.ownId);
@@ -129,7 +143,7 @@ NDB_MAIN(ndb_kernel){
exit(0);
}
g_eventLogger.info("Ndb has terminated (pid %d) restarting", child);
- theConfig->fetch_configuration();
+ theConfig->fetch_configuration(local_config);
}
g_eventLogger.info("Angel pid: %d ndb pid: %d", getppid(), getpid());
@@ -238,6 +252,9 @@ systemInfo(const Configuration & config, const LogLevel & logLevel){
if(logLevel.getLogLevel(LogLevel::llStartUp) > 0){
g_eventLogger.info("NDB Cluster -- DB node %d", globalData.ownId);
g_eventLogger.info("%s --", NDB_VERSION_STRING);
+ if (config.get_mgmd_host())
+ g_eventLogger.info("Configuration fetched at %s port %d",
+ config.get_mgmd_host(), config.get_mgmd_port());
#ifdef NDB_SOLARIS // ok
g_eventLogger.info("NDB is running on a machine with %d processor(s) at %d MHz",
processor, speed);
@@ -328,16 +345,24 @@ handler_shutdown(int signum){
extern "C"
void
handler_error(int signum){
+ // only let one thread run shutdown
+ static long thread_id= 0;
+
+ if (thread_id != 0 && thread_id == my_thread_id())
+ {
+ // Shutdown thread received signal
+ signal(signum, SIG_DFL);
+ kill(getpid(), signum);
+ while(true)
+ NdbSleep_MilliSleep(10);
+ }
+ if(theShutdownMutex && NdbMutex_Trylock(theShutdownMutex) != 0)
+ while(true)
+ NdbSleep_MilliSleep(10);
+ thread_id= my_thread_id();
g_eventLogger.info("Received signal %d. Running error handler.", signum);
// restart the system
char errorData[40];
- snprintf(errorData, 40, "Signal %d received", signum);
- ERROR_SET(fatal, 0, errorData, __FILE__);
+ BaseString::snprintf(errorData, 40, "Signal %d received", signum);
+ ERROR_SET_SIGNAL(fatal, 0, errorData, __FILE__);
}
-
-
-
-
-
-
-
diff --git a/ndb/src/kernel/vm/ClusterConfiguration.cpp b/ndb/src/kernel/vm/ClusterConfiguration.cpp
index 3a6478380d1..d5bd03f69d5 100644
--- a/ndb/src/kernel/vm/ClusterConfiguration.cpp
+++ b/ndb/src/kernel/vm/ClusterConfiguration.cpp
@@ -358,7 +358,7 @@ void ClusterConfiguration::init(const Properties & p, const Properties & db){
for(int i = 0; i<sz; i++){
if(!db.get(tmp[i].attrib, tmp[i].storage)){
char buf[255];
- snprintf(buf, sizeof(buf), "%s not found", tmp[i].attrib);
+ BaseString::snprintf(buf, sizeof(buf), "%s not found", tmp[i].attrib);
ERROR_SET(fatal, ERR_INVALID_CONFIG, msg, buf);
}
}
@@ -406,7 +406,7 @@ void ClusterConfiguration::init(const Properties & p, const Properties & db){
for(unsigned j = 0; j<nodeNo; j++){
if(cd.nodeData[j].nodeId == nodeId){
char buf[255];
- snprintf(buf, sizeof(buf), "Two node can not have the same node id");
+ BaseString::snprintf(buf, sizeof(buf), "Two node can not have the same node id");
ERROR_SET(fatal, ERR_INVALID_CONFIG, msg, buf);
}
}
@@ -429,12 +429,12 @@ void ClusterConfiguration::init(const Properties & p, const Properties & db){
if(nodeId > MAX_NDB_NODES){
char buf[255];
- snprintf(buf, sizeof(buf), "Maximum node id for a ndb node is: %d", MAX_NDB_NODES);
+ BaseString::snprintf(buf, sizeof(buf), "Maximum node id for a ndb node is: %d", MAX_NDB_NODES);
ERROR_SET(fatal, ERR_INVALID_CONFIG, msg, buf);
}
if(cd.SizeAltData.noOfNDBNodes > MAX_NDB_NODES){
char buf[255];
- snprintf(buf, sizeof(buf),
+ BaseString::snprintf(buf, sizeof(buf),
"Maximum %d ndb nodes is allowed in the cluster",
MAX_NDB_NODES);
ERROR_SET(fatal, ERR_INVALID_CONFIG, msg, buf);
diff --git a/ndb/src/kernel/vm/Configuration.cpp b/ndb/src/kernel/vm/Configuration.cpp
index 257b7a098e0..706f60fd9cf 100644
--- a/ndb/src/kernel/vm/Configuration.cpp
+++ b/ndb/src/kernel/vm/Configuration.cpp
@@ -14,6 +14,9 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#include <ndb_global.h>
+
+#include <LocalConfig.hpp>
#include "Configuration.hpp"
#include <ErrorHandlingMacros.hpp>
#include "GlobalData.hpp"
@@ -53,9 +56,13 @@ Configuration::init(int argc, const char** argv){
int _no_start = 0;
int _initial = 0;
const char* _connect_str = NULL;
- int _deamon = 0;
+ int _daemon = 1;
+ int _no_daemon = 0;
int _help = 0;
int _print_version = 0;
+#ifndef DBUG_OFF
+ const char *debug_option= 0;
+#endif
/**
* Arguments to NDB process
@@ -65,8 +72,13 @@ Configuration::init(int argc, const char** argv){
{ "version", 'v', arg_flag, &_print_version, "Print ndbd version", "" },
{ "nostart", 'n', arg_flag, &_no_start,
"Don't start ndbd immediately. Ndbd will await command from ndb_mgmd", "" },
- { "daemon", 'd', arg_flag, &_deamon, "Start ndbd as daemon", "" },
- { "initial", 'i', arg_flag, &_initial,
+ { "daemon", 'd', arg_flag, &_daemon, "Start ndbd as daemon (default)", "" },
+ { "nodaemon", 0, arg_flag, &_no_daemon, "Do not start ndbd as daemon, provided for testing purposes", "" },
+#ifndef DBUG_OFF
+ { "debug", 0, arg_string, &debug_option,
+ "Specify debug options e.g. d:t:i:o,out.trace", "options" },
+#endif
+ { "initial", 0, arg_flag, &_initial,
"Perform initial start of ndbd, including cleaning the file system. Consult documentation before using this", "" },
{ "connect-string", 'c', arg_string, &_connect_str,
@@ -81,18 +93,28 @@ Configuration::init(int argc, const char** argv){
if(getarg(args, num_args, argc, argv, &optind) || _help) {
arg_printusage(args, num_args, argv[0], desc);
+ for (int i = 0; i < argc; i++) {
+ if (strcmp("-i",argv[i]) == 0) {
+ printf("flag depricated %s, use %s\n", "-i", "--initial");
+ }
+ }
return false;
}
+ if (_no_daemon) {
+ _daemon= 0;
+ }
+ // check for depricated flag '-i'
-#if 0
- ndbout << "no_start=" <<_no_start<< endl;
- ndbout << "initial=" <<_initial<< endl;
- ndbout << "deamon=" <<_deamon<< endl;
- ndbout << "connect_str="<<_connect_str<<endl;
- arg_printusage(args, num_args, argv[0], desc);
- return false;
+#ifndef DBUG_OFF
+ if (debug_option)
+ DBUG_PUSH(debug_option);
#endif
+ DBUG_PRINT("info", ("no_start=%d", _no_start));
+ DBUG_PRINT("info", ("initial=%d", _initial));
+ DBUG_PRINT("info", ("daemon=%d", _daemon));
+ DBUG_PRINT("info", ("connect_str=%s", _connect_str));
+
ndbSetOwnVersion();
if (_print_version) {
@@ -114,8 +136,8 @@ Configuration::init(int argc, const char** argv){
if (_connect_str)
_connectString = strdup(_connect_str);
- // Check deamon flag
- if (_deamon)
+ // Check daemon flag
+ if (_daemon)
_daemonMode = true;
// Save programname
@@ -132,9 +154,11 @@ Configuration::Configuration()
_programName = 0;
_connectString = 0;
_fsPath = 0;
+ _backupPath = 0;
_initialStart = false;
_daemonMode = false;
m_config_retriever= 0;
+ m_clusterConfig= 0;
}
Configuration::~Configuration(){
@@ -144,6 +168,9 @@ Configuration::~Configuration(){
if(_fsPath != NULL)
free(_fsPath);
+ if(_backupPath != NULL)
+ free(_backupPath);
+
if (m_config_retriever) {
delete m_config_retriever;
}
@@ -158,7 +185,7 @@ Configuration::closeConfiguration(){
}
void
-Configuration::fetch_configuration(){
+Configuration::fetch_configuration(LocalConfig &local_config){
/**
* Fetch configuration from management server
*/
@@ -166,27 +193,28 @@ Configuration::fetch_configuration(){
delete m_config_retriever;
}
- m_config_retriever= new ConfigRetriever(NDB_VERSION, NODE_TYPE_DB);
- m_config_retriever->setConnectString(_connectString ? _connectString : "");
- if(m_config_retriever->init() == -1 ||
- m_config_retriever->do_connect() == -1){
-
+ m_mgmd_port= 0;
+ m_mgmd_host= 0;
+ m_config_retriever= new ConfigRetriever(local_config, NDB_VERSION, NODE_TYPE_DB);
+ if(m_config_retriever->do_connect() == -1){
const char * s = m_config_retriever->getErrorString();
if(s == 0)
s = "No error given!";
-
/* Set stop on error to true otherwise NDB will
go into an restart loop...
*/
- ERROR_SET(fatal, ERR_INVALID_CONFIG, "Could connect to ndb_mgmd", s);
+ ERROR_SET(fatal, ERR_INVALID_CONFIG, "Could not connect to ndb_mgmd", s);
}
+ m_mgmd_port= m_config_retriever->get_mgmd_port();
+ m_mgmd_host= m_config_retriever->get_mgmd_host();
+
ConfigRetriever &cr= *m_config_retriever;
if((globalData.ownId = cr.allocNodeId()) == 0){
for(Uint32 i = 0; i<3; i++){
NdbSleep_SecSleep(3);
- if(globalData.ownId = cr.allocNodeId())
+ if((globalData.ownId = cr.allocNodeId()) != 0)
break;
}
}
@@ -225,8 +253,48 @@ Configuration::fetch_configuration(){
}
}
+static char * get_and_validate_path(ndb_mgm_configuration_iterator &iter,
+ Uint32 param, const char *param_string)
+{
+ const char* path = NULL;
+ if(iter.get(param, &path)){
+ ERROR_SET(fatal, ERR_INVALID_CONFIG, "Invalid configuration fetched missing ",
+ param_string);
+ }
+
+ if(path == 0 || strlen(path) == 0){
+ ERROR_SET(fatal, ERR_INVALID_CONFIG,
+ "Invalid configuration fetched. Configuration does not contain valid ",
+ param_string);
+ }
+
+ // check that it is pointing on a valid directory
+ //
+ char buf2[PATH_MAX];
+ memset(buf2, 0,sizeof(buf2));
+#ifdef NDB_WIN32
+ char* szFilePart;
+ if(!GetFullPathName(path, sizeof(buf2), buf2, &szFilePart)
+ || (::GetFileAttributes(alloc_path)&FILE_ATTRIBUTE_READONLY))
+#else
+ if((::realpath(path, buf2) == NULL)||
+ (::access(buf2, W_OK) != 0))
+#endif
+ {
+ ERROR_SET(fatal, AFS_ERROR_INVALIDPATH, path, " Filename::init()");
+ }
+
+ if (strcmp(&buf2[strlen(buf2) - 1], DIR_SEPARATOR))
+ strcat(buf2, DIR_SEPARATOR);
+
+ return strdup(buf2);
+}
+
void
Configuration::setupConfiguration(){
+
+ DBUG_ENTER("Configuration::setupConfiguration");
+
ndb_mgm_configuration * p = m_clusterConfig;
/**
@@ -272,29 +340,15 @@ Configuration::setupConfiguration(){
}
/**
- * Get filesystem path
+ * Get paths
*/
- {
- const char* pFileSystemPath = NULL;
- if(iter.get(CFG_DB_FILESYSTEM_PATH, &pFileSystemPath)){
- ERROR_SET(fatal, ERR_INVALID_CONFIG, "Invalid configuration fetched",
- "FileSystemPath missing");
- }
-
- if(pFileSystemPath == 0 || strlen(pFileSystemPath) == 0){
- ERROR_SET(fatal, ERR_INVALID_CONFIG, "Invalid configuration fetched",
- "Configuration does not contain valid filesystem path");
- }
-
- if(pFileSystemPath[strlen(pFileSystemPath) - 1] == '/')
- _fsPath = strdup(pFileSystemPath);
- else {
- _fsPath = (char *)malloc(strlen(pFileSystemPath) + 2);
- strcpy(_fsPath, pFileSystemPath);
- strcat(_fsPath, "/");
- }
- }
-
+ if (_fsPath)
+ free(_fsPath);
+ _fsPath= get_and_validate_path(iter, CFG_DB_FILESYSTEM_PATH, "FileSystemPath");
+ if (_backupPath)
+ free(_backupPath);
+ _backupPath= get_and_validate_path(iter, CFG_DB_BACKUP_DATADIR, "BackupDataDir");
+
if(iter.get(CFG_DB_STOP_ON_ERROR_INSERT, &m_restartOnErrorInsert)){
ERROR_SET(fatal, ERR_INVALID_CONFIG, "Invalid configuration fetched",
"RestartOnErrorInsert missing");
@@ -315,6 +369,8 @@ Configuration::setupConfiguration(){
(p, CFG_SECTION_NODE);
calcSizeAlt(cf);
+
+ DBUG_VOID_RETURN;
}
bool
@@ -362,6 +418,11 @@ Configuration::setRestartOnErrorInsert(int i){
m_restartOnErrorInsert = i;
}
+const char *
+Configuration::getConnectString() const {
+ return _connectString;
+}
+
char *
Configuration::getConnectStringCopy() const {
if(_connectString != 0)
@@ -385,7 +446,8 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
char buf[255];
unsigned int noOfTables = 0;
- unsigned int noOfIndexes = 0;
+ unsigned int noOfUniqueHashIndexes = 0;
+ unsigned int noOfOrderedIndexes = 0;
unsigned int noOfReplicas = 0;
unsigned int noOfDBNodes = 0;
unsigned int noOfAPINodes = 0;
@@ -393,33 +455,28 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
unsigned int noOfNodes = 0;
unsigned int noOfAttributes = 0;
unsigned int noOfOperations = 0;
+ unsigned int noOfLocalOperations = 0;
unsigned int noOfTransactions = 0;
unsigned int noOfIndexPages = 0;
unsigned int noOfDataPages = 0;
unsigned int noOfScanRecords = 0;
+ unsigned int noOfLocalScanRecords = 0;
+ unsigned int noBatchSize = 0;
m_logLevel = new LogLevel();
- /**
- * {"NoOfConcurrentCheckpointsDuringRestart", &cd.ispValues[1][5] },
- * {"NoOfConcurrentCheckpointsAfterRestart", &cd.ispValues[2][4] },
- * {"NoOfConcurrentProcessesHandleTakeover", &cd.ispValues[1][7] },
- * {"TimeToWaitAlive", &cd.ispValues[0][0] },
- */
- struct AttribStorage { int paramId; Uint32 * storage; };
+ struct AttribStorage { int paramId; Uint32 * storage; bool computable; };
AttribStorage tmp[] = {
- { CFG_DB_NO_SCANS, &noOfScanRecords },
- { CFG_DB_NO_TABLES, &noOfTables },
- { CFG_DB_NO_INDEXES, &noOfIndexes },
- { CFG_DB_NO_REPLICAS, &noOfReplicas },
- { CFG_DB_NO_ATTRIBUTES, &noOfAttributes },
- { CFG_DB_NO_OPS, &noOfOperations },
- { CFG_DB_NO_TRANSACTIONS, &noOfTransactions }
-#if 0
- { "NoOfDiskPagesToDiskDuringRestartTUP", &cd.ispValues[3][8] },
- { "NoOfDiskPagesToDiskAfterRestartTUP", &cd.ispValues[3][9] },
- { "NoOfDiskPagesToDiskDuringRestartACC", &cd.ispValues[3][10] },
- { "NoOfDiskPagesToDiskAfterRestartACC", &cd.ispValues[3][11] },
-#endif
+ { CFG_DB_NO_SCANS, &noOfScanRecords, false },
+ { CFG_DB_NO_LOCAL_SCANS, &noOfLocalScanRecords, true },
+ { CFG_DB_BATCH_SIZE, &noBatchSize, false },
+ { CFG_DB_NO_TABLES, &noOfTables, false },
+ { CFG_DB_NO_ORDERED_INDEXES, &noOfOrderedIndexes, false },
+ { CFG_DB_NO_UNIQUE_HASH_INDEXES, &noOfUniqueHashIndexes, false },
+ { CFG_DB_NO_REPLICAS, &noOfReplicas, false },
+ { CFG_DB_NO_ATTRIBUTES, &noOfAttributes, false },
+ { CFG_DB_NO_OPS, &noOfOperations, false },
+ { CFG_DB_NO_LOCAL_OPS, &noOfLocalOperations, true },
+ { CFG_DB_NO_TRANSACTIONS, &noOfTransactions, false }
};
ndb_mgm_configuration_iterator db(*(ndb_mgm_configuration*)ownConfig, 0);
@@ -427,8 +484,12 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
const int sz = sizeof(tmp)/sizeof(AttribStorage);
for(int i = 0; i<sz; i++){
if(ndb_mgm_get_int_parameter(&db, tmp[i].paramId, tmp[i].storage)){
- snprintf(buf, sizeof(buf), "ConfigParam: %d not found", tmp[i].paramId);
- ERROR_SET(fatal, ERR_INVALID_CONFIG, msg, buf);
+ if (tmp[i].computable) {
+ *tmp[i].storage = 0;
+ } else {
+ BaseString::snprintf(buf, sizeof(buf),"ConfigParam: %d not found", tmp[i].paramId);
+ ERROR_SET(fatal, ERR_INVALID_CONFIG, msg, buf);
+ }
}
}
@@ -436,12 +497,12 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
ndb_mgm_get_int64_parameter(&db, CFG_DB_DATA_MEM, &dataMem);
ndb_mgm_get_int64_parameter(&db, CFG_DB_INDEX_MEM, &indexMem);
if(dataMem == 0){
- snprintf(buf, sizeof(buf), "ConfigParam: %d not found", CFG_DB_DATA_MEM);
+ BaseString::snprintf(buf, sizeof(buf), "ConfigParam: %d not found", CFG_DB_DATA_MEM);
ERROR_SET(fatal, ERR_INVALID_CONFIG, msg, buf);
}
if(indexMem == 0){
- snprintf(buf, sizeof(buf), "ConfigParam: %d not found", CFG_DB_INDEX_MEM);
+ BaseString::snprintf(buf, sizeof(buf), "ConfigParam: %d not found", CFG_DB_INDEX_MEM);
ERROR_SET(fatal, ERR_INVALID_CONFIG, msg, buf);
}
@@ -450,7 +511,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
for(unsigned j = 0; j<LogLevel::LOGLEVEL_CATEGORIES; j++){
Uint32 tmp;
- if(!ndb_mgm_get_int_parameter(&db, LogLevel::MIN_LOGLEVEL_ID+j, &tmp)){
+ if(!ndb_mgm_get_int_parameter(&db, CFG_MIN_LOGLEVEL+j, &tmp)){
m_logLevel->setLogLevel((LogLevel::EventCategory)j, tmp);
}
}
@@ -474,13 +535,13 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
}
if(nodeId > MAX_NODES || nodeId == 0){
- snprintf(buf, sizeof(buf),
+ BaseString::snprintf(buf, sizeof(buf),
"Invalid node id: %d", nodeId);
ERROR_SET(fatal, ERR_INVALID_CONFIG, msg, buf);
}
if(nodes.get(nodeId)){
- snprintf(buf, sizeof(buf), "Two node can not have the same node id: %d",
+ BaseString::snprintf(buf, sizeof(buf), "Two node can not have the same node id: %d",
nodeId);
ERROR_SET(fatal, ERR_INVALID_CONFIG, msg, buf);
}
@@ -507,42 +568,47 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
case NODE_TYPE_EXT_REP:
break;
default:
- snprintf(buf, sizeof(buf), "Unknown node type: %d", nodeType);
+ BaseString::snprintf(buf, sizeof(buf), "Unknown node type: %d", nodeType);
ERROR_SET(fatal, ERR_INVALID_CONFIG, msg, buf);
}
}
noOfNodes = nodeNo;
-
+
+ noOfTables+= 2; // Add System tables
+ noOfAttributes += 9; // Add System table attributes
+
+ ConfigValues::Iterator it2(*ownConfig, db.m_config);
+ it2.set(CFG_DB_NO_TABLES, noOfTables);
+ it2.set(CFG_DB_NO_ATTRIBUTES, noOfAttributes);
+
/**
* Do size calculations
*/
ConfigValuesFactory cfg(ownConfig);
- noOfTables++; // Remove impact of system table
- noOfTables += noOfIndexes; // Indexes are tables too
- noOfAttributes += 2; // ---"----
- noOfTables *= 2; // Remove impact of Dict need 2 ids for each table
-
- if (noOfDBNodes > 15) {
- noOfDBNodes = 15;
- }//if
- Uint32 noOfLocalScanRecords = (noOfDBNodes * noOfScanRecords) + 1;
+ if (noOfLocalScanRecords == 0) {
+ noOfLocalScanRecords = (noOfDBNodes * noOfScanRecords) + 1;
+ }
+ if (noOfLocalOperations == 0) {
+ noOfLocalOperations= (11 * noOfOperations) / 10;
+ }
Uint32 noOfTCScanRecords = noOfScanRecords;
{
+ Uint32 noOfAccTables= noOfTables + noOfUniqueHashIndexes;
/**
* Acc Size Alt values
*/
// Can keep 65536 pages (= 0.5 GByte)
cfg.put(CFG_ACC_DIR_RANGE,
- 4 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
+ 4 * NO_OF_FRAG_PER_NODE * noOfAccTables* noOfReplicas);
cfg.put(CFG_ACC_DIR_ARRAY,
(noOfIndexPages >> 8) +
- 4 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
+ 4 * NO_OF_FRAG_PER_NODE * noOfAccTables* noOfReplicas);
cfg.put(CFG_ACC_FRAGMENT,
- 2 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
+ 2 * NO_OF_FRAG_PER_NODE * noOfAccTables* noOfReplicas);
/*-----------------------------------------------------------------------*/
// The extra operation records added are used by the scan and node
@@ -552,25 +618,27 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
// The remainder are allowed for use by the scan processes.
/*-----------------------------------------------------------------------*/
cfg.put(CFG_ACC_OP_RECS,
- ((11 * noOfOperations) / 10 + 50) +
- (noOfLocalScanRecords * MAX_PARALLEL_SCANS_PER_FRAG) +
+ (noOfLocalOperations + 50) +
+ (noOfLocalScanRecords * noBatchSize) +
NODE_RECOVERY_SCAN_OP_RECORDS);
cfg.put(CFG_ACC_OVERFLOW_RECS,
noOfIndexPages +
- 2 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
+ 2 * NO_OF_FRAG_PER_NODE * noOfAccTables* noOfReplicas);
cfg.put(CFG_ACC_PAGE8,
noOfIndexPages + 32);
cfg.put(CFG_ACC_ROOT_FRAG,
- NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
+ NO_OF_FRAG_PER_NODE * noOfAccTables* noOfReplicas);
- cfg.put(CFG_ACC_TABLE, noOfTables);
+ cfg.put(CFG_ACC_TABLE, noOfAccTables);
cfg.put(CFG_ACC_SCAN, noOfLocalScanRecords);
}
+ Uint32 noOfMetaTables= noOfTables + noOfOrderedIndexes +
+ noOfUniqueHashIndexes;
{
/**
* Dict Size Alt values
@@ -579,7 +647,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
noOfAttributes);
cfg.put(CFG_DICT_TABLE,
- noOfTables);
+ noOfMetaTables);
}
{
@@ -592,8 +660,12 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
cfg.put(CFG_DIH_CONNECT,
noOfOperations + noOfTransactions + 46);
+ Uint32 noFragPerTable= ((noOfDBNodes + NO_OF_FRAGS_PER_CHUNK - 1) >>
+ LOG_NO_OF_FRAGS_PER_CHUNK) <<
+ LOG_NO_OF_FRAGS_PER_CHUNK;
+
cfg.put(CFG_DIH_FRAG_CONNECT,
- NO_OF_FRAG_PER_NODE * noOfTables * noOfDBNodes);
+ noFragPerTable * noOfMetaTables);
int temp;
temp = noOfReplicas - 2;
@@ -603,14 +675,14 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
temp++;
cfg.put(CFG_DIH_MORE_NODES,
temp * NO_OF_FRAG_PER_NODE *
- noOfTables * noOfDBNodes);
-
+ noOfMetaTables * noOfDBNodes);
+
cfg.put(CFG_DIH_REPLICAS,
- NO_OF_FRAG_PER_NODE * noOfTables *
+ NO_OF_FRAG_PER_NODE * noOfMetaTables *
noOfDBNodes * noOfReplicas);
cfg.put(CFG_DIH_TABLE,
- noOfTables);
+ noOfMetaTables);
}
{
@@ -618,13 +690,13 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
* Lqh Size Alt values
*/
cfg.put(CFG_LQH_FRAG,
- NO_OF_FRAG_PER_NODE * noOfTables * noOfReplicas);
+ NO_OF_FRAG_PER_NODE * noOfMetaTables * noOfReplicas);
cfg.put(CFG_LQH_TABLE,
- noOfTables);
+ noOfMetaTables);
cfg.put(CFG_LQH_TC_CONNECT,
- (11 * noOfOperations) / 10 + 50);
+ noOfLocalOperations + 50);
cfg.put(CFG_LQH_SCAN,
noOfLocalScanRecords);
@@ -641,7 +713,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
(2 * noOfOperations) + 16 + noOfTransactions);
cfg.put(CFG_TC_TABLE,
- noOfTables);
+ noOfMetaTables);
cfg.put(CFG_TC_LOCAL_SCAN,
noOfLocalScanRecords);
@@ -655,23 +727,23 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
* Tup Size Alt values
*/
cfg.put(CFG_TUP_FRAG,
- 2 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
+ 2 * NO_OF_FRAG_PER_NODE * noOfMetaTables* noOfReplicas);
cfg.put(CFG_TUP_OP_RECS,
- (11 * noOfOperations) / 10 + 50);
+ noOfLocalOperations + 50);
cfg.put(CFG_TUP_PAGE,
noOfDataPages);
cfg.put(CFG_TUP_PAGE_RANGE,
- 4 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas);
+ 4 * NO_OF_FRAG_PER_NODE * noOfMetaTables* noOfReplicas);
cfg.put(CFG_TUP_TABLE,
- noOfTables);
+ noOfMetaTables);
cfg.put(CFG_TUP_TABLE_DESC,
4 * NO_OF_FRAG_PER_NODE * noOfAttributes* noOfReplicas +
- 12 * NO_OF_FRAG_PER_NODE * noOfTables* noOfReplicas );
+ 12 * NO_OF_FRAG_PER_NODE * noOfMetaTables* noOfReplicas );
cfg.put(CFG_TUP_STORED_PROC,
noOfLocalScanRecords);
@@ -682,13 +754,13 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
* Tux Size Alt values
*/
cfg.put(CFG_TUX_INDEX,
- noOfTables);
+ noOfOrderedIndexes);
cfg.put(CFG_TUX_FRAGMENT,
- 2 * NO_OF_FRAG_PER_NODE * noOfTables * noOfReplicas);
+ 2 * NO_OF_FRAG_PER_NODE * noOfOrderedIndexes * noOfReplicas);
cfg.put(CFG_TUX_ATTRIBUTE,
- noOfIndexes * 4);
+ noOfOrderedIndexes * 4);
cfg.put(CFG_TUX_SCAN_OP, noOfLocalScanRecords);
}
diff --git a/ndb/src/kernel/vm/Configuration.hpp b/ndb/src/kernel/vm/Configuration.hpp
index bd91f3fa74b..2ea32ffea37 100644
--- a/ndb/src/kernel/vm/Configuration.hpp
+++ b/ndb/src/kernel/vm/Configuration.hpp
@@ -21,6 +21,7 @@
#include <ndb_types.h>
class ConfigRetriever;
+class LocalConfig;
class Configuration {
public:
@@ -32,7 +33,7 @@ public:
*/
bool init(int argc, const char** argv);
- void fetch_configuration();
+ void fetch_configuration(LocalConfig &local_config);
void setupConfiguration();
void closeConfiguration();
@@ -53,6 +54,8 @@ public:
// Cluster configuration
const char * programName() const;
const char * fileSystemPath() const;
+ const char * backupFilePath() const;
+ const char * getConnectString() const;
char * getConnectStringCopy() const;
/**
@@ -64,6 +67,9 @@ public:
const ndb_mgm_configuration_iterator * getOwnConfigIterator() const;
+ Uint32 get_mgmd_port() const {return m_mgmd_port;};
+ const char *get_mgmd_host() const {return m_mgmd_host;};
+
class LogLevel * m_logLevel;
private:
friend class Cmvmi;
@@ -89,8 +95,11 @@ private:
*/
char * _programName;
char * _fsPath;
+ char * _backupPath;
bool _initialStart;
char * _connectString;
+ Uint32 m_mgmd_port;
+ const char *m_mgmd_host;
bool _daemonMode;
void calcSizeAlt(class ConfigValues * );
@@ -109,6 +118,12 @@ Configuration::fileSystemPath() const {
}
inline
+const char *
+Configuration::backupFilePath() const {
+ return _backupPath;
+}
+
+inline
bool
Configuration::getInitialStart() const {
return _initialStart;
diff --git a/ndb/src/kernel/vm/Emulator.cpp b/ndb/src/kernel/vm/Emulator.cpp
index 75aea2bda7f..a5897cd4064 100644
--- a/ndb/src/kernel/vm/Emulator.cpp
+++ b/ndb/src/kernel/vm/Emulator.cpp
@@ -61,6 +61,7 @@ Uint32 theEmulatedJamBlockNumber = 0;
EmulatorData globalEmulatorData;
NdbMutex * theShutdownMutex = 0;
+int simulate_error_during_shutdown= 0;
EmulatorData::EmulatorData(){
theConfiguration = 0;
@@ -117,7 +118,8 @@ NdbShutdown(NdbShutdownType type,
}
}
- if(NdbMutex_Trylock(theShutdownMutex) == 0){
+ if((type == NST_ErrorHandlerSignal) || // Signal handler has already locked mutex
+ (NdbMutex_Trylock(theShutdownMutex) == 0)){
globalData.theRestartFlag = perform_stop;
bool restart = false;
@@ -145,12 +147,15 @@ NdbShutdown(NdbShutdownType type,
case NST_ErrorHandler:
ndbout << "Error handler " << shutting << " system" << endl;
break;
+ case NST_ErrorHandlerSignal:
+ ndbout << "Error handler signal " << shutting << " system" << endl;
+ break;
case NST_Restart:
ndbout << "Restarting system" << endl;
break;
default:
ndbout << "Error handler " << shutting << " system"
- << " (unknown type: " << type << ")" << endl;
+ << " (unknown type: " << (unsigned)type << ")" << endl;
type = NST_ErrorHandler;
break;
}
@@ -175,6 +180,12 @@ NdbShutdown(NdbShutdownType type,
#endif
}
+ if (simulate_error_during_shutdown) {
+ kill(getpid(), simulate_error_during_shutdown);
+ while(true)
+ NdbSleep_MilliSleep(10);
+ }
+
globalEmulatorData.theWatchDog->doStop();
#ifdef VM_TRACE
diff --git a/ndb/src/kernel/vm/Emulator.hpp b/ndb/src/kernel/vm/Emulator.hpp
index 8c4504b9ba7..bd240f8679b 100644
--- a/ndb/src/kernel/vm/Emulator.hpp
+++ b/ndb/src/kernel/vm/Emulator.hpp
@@ -79,6 +79,7 @@ enum NdbShutdownType {
NST_Normal,
NST_Watchdog,
NST_ErrorHandler,
+ NST_ErrorHandlerSignal,
NST_Restart,
NST_ErrorInsert
};
diff --git a/ndb/src/kernel/vm/FastScheduler.hpp b/ndb/src/kernel/vm/FastScheduler.hpp
index 9749dab5d85..dc707e47eef 100644
--- a/ndb/src/kernel/vm/FastScheduler.hpp
+++ b/ndb/src/kernel/vm/FastScheduler.hpp
@@ -141,7 +141,7 @@ int
FastScheduler::checkDoJob()
{
/*
- * Joob buffer overload protetction
+ * Job buffer overload protetction
* If the job buffer B is filled over a certain limit start
* to execute the signals in the job buffer's
*/
diff --git a/ndb/src/kernel/vm/LongSignal.hpp b/ndb/src/kernel/vm/LongSignal.hpp
index dfbfdb456da..f9ed443d995 100644
--- a/ndb/src/kernel/vm/LongSignal.hpp
+++ b/ndb/src/kernel/vm/LongSignal.hpp
@@ -25,7 +25,7 @@
*/
struct SectionSegment {
- static const Uint32 DataLength = 60;
+ STATIC_CONST( DataLength = 60 );
Uint32 m_ownerRef;
Uint32 m_sz;
diff --git a/ndb/src/kernel/vm/MetaData.hpp b/ndb/src/kernel/vm/MetaData.hpp
index f6a941e8f9f..11e262664c1 100644
--- a/ndb/src/kernel/vm/MetaData.hpp
+++ b/ndb/src/kernel/vm/MetaData.hpp
@@ -107,6 +107,9 @@ public:
/* Number of primary key attributes (should be computed) */
Uint16 noOfPrimkey;
+ /* Number of distinct character sets (computed) */
+ Uint16 noOfCharsets;
+
/* Length of primary key in words (should be computed) */
/* For ordered index this is tree node size in words */
Uint16 tupKeyLength;
diff --git a/ndb/src/kernel/vm/SignalCounter.hpp b/ndb/src/kernel/vm/SignalCounter.hpp
index ea770324aa6..62242cb65bd 100644
--- a/ndb/src/kernel/vm/SignalCounter.hpp
+++ b/ndb/src/kernel/vm/SignalCounter.hpp
@@ -151,7 +151,7 @@ const char *
SignalCounter::getText() const {
static char buf[255];
static char nodes[NodeBitmask::TextLength+1];
- snprintf(buf, sizeof(buf), "[SignalCounter: m_count=%d %s]", m_count, m_nodes.getText(nodes));
+ BaseString::snprintf(buf, sizeof(buf), "[SignalCounter: m_count=%d %s]", m_count, m_nodes.getText(nodes));
return buf;
}
diff --git a/ndb/src/kernel/vm/SimulatedBlock.cpp b/ndb/src/kernel/vm/SimulatedBlock.cpp
index 18b7f474ddc..e6b97771d36 100644
--- a/ndb/src/kernel/vm/SimulatedBlock.cpp
+++ b/ndb/src/kernel/vm/SimulatedBlock.cpp
@@ -68,25 +68,25 @@ SimulatedBlock::SimulatedBlock(BlockNumber blockNumber,
char buf[255];
count = 10;
- snprintf(buf, 255, "%s.FragmentSendPool", getBlockName(blockNumber));
+ BaseString::snprintf(buf, 255, "%s.FragmentSendPool", getBlockName(blockNumber));
if(!p->get(buf, &count))
p->get("FragmentSendPool", &count);
c_fragmentSendPool.setSize(count);
count = 10;
- snprintf(buf, 255, "%s.FragmentInfoPool", getBlockName(blockNumber));
+ BaseString::snprintf(buf, 255, "%s.FragmentInfoPool", getBlockName(blockNumber));
if(!p->get(buf, &count))
p->get("FragmentInfoPool", &count);
c_fragmentInfoPool.setSize(count);
count = 10;
- snprintf(buf, 255, "%s.FragmentInfoHash", getBlockName(blockNumber));
+ BaseString::snprintf(buf, 255, "%s.FragmentInfoHash", getBlockName(blockNumber));
if(!p->get(buf, &count))
p->get("FragmentInfoHash", &count);
c_fragmentInfoHash.setSize(count);
count = 5;
- snprintf(buf, 255, "%s.ActiveMutexes", getBlockName(blockNumber));
+ BaseString::snprintf(buf, 255, "%s.ActiveMutexes", getBlockName(blockNumber));
if(!p->get(buf, &count))
p->get("ActiveMutexes", &count);
c_mutexMgr.setSize(count);
@@ -147,7 +147,7 @@ SimulatedBlock::addRecSignalImpl(GlobalSignalNumber gsn,
ExecFunction f, bool force){
if(gsn > MAX_GSN || (!force && theExecArray[gsn] != 0)){
char errorMsg[255];
- snprintf(errorMsg, 255,
+ BaseString::snprintf(errorMsg, 255,
"Illeagal signal (%d %d)", gsn, MAX_GSN);
ERROR_SET(fatal, ERR_ERROR_PRGERR, errorMsg, errorMsg);
}
@@ -159,9 +159,9 @@ SimulatedBlock::signal_error(Uint32 gsn, Uint32 len, Uint32 recBlockNo,
const char* filename, int lineno) const
{
char objRef[255];
- snprintf(objRef, 255, "%s:%d", filename, lineno);
+ BaseString::snprintf(objRef, 255, "%s:%d", filename, lineno);
char probData[255];
- snprintf(probData, 255,
+ BaseString::snprintf(probData, 255,
"Signal (GSN: %d, Length: %d, Rec Block No: %d)",
gsn, len, recBlockNo);
@@ -664,9 +664,9 @@ SimulatedBlock::allocRecord(const char * type, size_t s, size_t n, bool clear)
if (p == NULL){
char buf1[255];
char buf2[255];
- snprintf(buf1, sizeof(buf1), "%s could not allocate memory for %s",
+ BaseString::snprintf(buf1, sizeof(buf1), "%s could not allocate memory for %s",
getBlockName(number()), type);
- snprintf(buf2, sizeof(buf2), "Requested: %ux%u = %u bytes",
+ BaseString::snprintf(buf2, sizeof(buf2), "Requested: %ux%u = %u bytes",
(Uint32)s, (Uint32)n, (Uint32)size);
ERROR_SET(fatal, ERR_MEMALLOC, buf1, buf2);
}
@@ -722,7 +722,7 @@ SimulatedBlock::progError(int line, int err_code, const char* extra) const {
/* Add line number to block name */
char buf[100];
- snprintf(&buf[0], 100, "%s (Line: %d) 0x%.8x",
+ BaseString::snprintf(&buf[0], 100, "%s (Line: %d) 0x%.8x",
aBlockName, line, magicStatus);
ErrorReporter::handleError(ecError, err_code, extra, buf);
@@ -740,7 +740,7 @@ SimulatedBlock::infoEvent(const char * msg, ...) const {
va_list ap;
va_start(ap, msg);
- vsnprintf(buf, 96, msg, ap); // 96 = 100 - 4
+ BaseString::vsnprintf(buf, 96, msg, ap); // 96 = 100 - 4
va_end(ap);
int len = strlen(buf) + 1;
@@ -781,7 +781,7 @@ SimulatedBlock::warningEvent(const char * msg, ...) const {
va_list ap;
va_start(ap, msg);
- vsnprintf(buf, 96, msg, ap); // 96 = 100 - 4
+ BaseString::vsnprintf(buf, 96, msg, ap); // 96 = 100 - 4
va_end(ap);
int len = strlen(buf) + 1;
diff --git a/ndb/src/kernel/vm/SimulatedBlock.hpp b/ndb/src/kernel/vm/SimulatedBlock.hpp
index 6d46e9cc377..7972cb39746 100644
--- a/ndb/src/kernel/vm/SimulatedBlock.hpp
+++ b/ndb/src/kernel/vm/SimulatedBlock.hpp
@@ -472,11 +472,11 @@ SimulatedBlock::executeFunction(GlobalSignalNumber gsn, Signal* signal){
*/
char errorMsg[255];
if (!(gsn <= MAX_GSN)) {
- snprintf(errorMsg, 255, "Illegal signal received (GSN %d too high)", gsn);
+ BaseString::snprintf(errorMsg, 255, "Illegal signal received (GSN %d too high)", gsn);
ERROR_SET(fatal, ERR_ERROR_PRGERR, errorMsg, errorMsg);
}
if (!(theExecArray[gsn] != 0)) {
- snprintf(errorMsg, 255, "Illegal signal received (GSN %d not added)", gsn);
+ BaseString::snprintf(errorMsg, 255, "Illegal signal received (GSN %d not added)", gsn);
ERROR_SET(fatal, ERR_ERROR_PRGERR, errorMsg, errorMsg);
}
ndbrequire(false);
diff --git a/ndb/src/kernel/vm/TransporterCallback.cpp b/ndb/src/kernel/vm/TransporterCallback.cpp
index 158de64c87f..ba929b7ea7a 100644
--- a/ndb/src/kernel/vm/TransporterCallback.cpp
+++ b/ndb/src/kernel/vm/TransporterCallback.cpp
@@ -38,13 +38,6 @@
*/
SectionSegmentPool g_sectionSegmentPool;
-static int f(int v){
- g_sectionSegmentPool.setSize(v);
- return v;
-}
-
-static int v = f(2048);
-
bool
import(Ptr<SectionSegment> & first, const Uint32 * src, Uint32 len){
/**
diff --git a/ndb/src/kernel/vm/WatchDog.cpp b/ndb/src/kernel/vm/WatchDog.cpp
index a90f63aff37..4e07dc1df90 100644
--- a/ndb/src/kernel/vm/WatchDog.cpp
+++ b/ndb/src/kernel/vm/WatchDog.cpp
@@ -15,6 +15,9 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#include <ndb_global.h>
+#include <my_pthread.h>
+
#include "WatchDog.hpp"
#include "GlobalData.hpp"
#include <NdbOut.hpp>
@@ -24,7 +27,9 @@
extern "C"
void*
runWatchDog(void* w){
+ my_thread_init();
((WatchDog*)w)->run();
+ my_thread_end();
NdbThread_Exit(0);
return NULL;
}
diff --git a/ndb/src/kernel/vm/pc.hpp b/ndb/src/kernel/vm/pc.hpp
index bc74adfc8f6..2d745d26b1c 100644
--- a/ndb/src/kernel/vm/pc.hpp
+++ b/ndb/src/kernel/vm/pc.hpp
@@ -147,7 +147,15 @@
// in future version since small tables want small value and large tables
// need large value.
/* ------------------------------------------------------------------------- */
-#define NO_OF_FRAG_PER_NODE 8
+#define NO_OF_FRAG_PER_NODE 1
+#define MAX_FRAG_PER_NODE 8
+
+/**
+* DIH allocates fragments in chunk for fast find of fragment record.
+* These parameters define chunk size and log of chunk size.
+*/
+#define NO_OF_FRAGS_PER_CHUNK 4
+#define LOG_NO_OF_FRAGS_PER_CHUNK 2
/* ---------------------------------------------------------------- */
// To avoid synching too big chunks at a time we synch after writing
diff --git a/ndb/src/mgmapi/mgmapi.cpp b/ndb/src/mgmapi/mgmapi.cpp
index e78b0d41cf2..4b62df968b3 100644
--- a/ndb/src/mgmapi/mgmapi.cpp
+++ b/ndb/src/mgmapi/mgmapi.cpp
@@ -15,6 +15,9 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <ndb_global.h>
+#include <my_sys.h>
+
+#include <NdbAutoPtr.hpp>
#include <NdbTCP.h>
#include "mgmapi.h"
@@ -107,7 +110,7 @@ setError(NdbMgmHandle h, int error, int error_line, const char * msg, ...){
va_list ap;
va_start(ap, msg);
- vsnprintf(h->last_error_desc, sizeof(h->last_error_desc), msg, ap);
+ BaseString::vsnprintf(h->last_error_desc, sizeof(h->last_error_desc), msg, ap);
va_end(ap);
}
@@ -137,7 +140,8 @@ extern "C"
NdbMgmHandle
ndb_mgm_create_handle()
{
- NdbMgmHandle h = (NdbMgmHandle)malloc(sizeof(ndb_mgm_handle));
+ NdbMgmHandle h =
+ (NdbMgmHandle)my_malloc(sizeof(ndb_mgm_handle),MYF(MY_WME));
h->connected = 0;
h->last_error = 0;
h->last_error_line = 0;
@@ -166,16 +170,14 @@ ndb_mgm_destroy_handle(NdbMgmHandle * handle)
if((* handle)->connected){
ndb_mgm_disconnect(* handle);
}
- if((* handle)->hostname != 0){
- free((* handle)->hostname);
- }
+ my_free((* handle)->hostname,MYF(MY_ALLOW_ZERO_PTR));
#ifdef MGMAPI_LOG
if ((* handle)->logfile != 0){
fclose((* handle)->logfile);
(* handle)->logfile = 0;
}
#endif
- free(* handle);
+ my_free((char*)* handle,MYF(MY_ALLOW_ZERO_PTR));
* handle = 0;
}
@@ -228,7 +230,8 @@ parse_connect_string(const char * connect_string,
return -1;
}
- char * line = strdup(connect_string);
+ char * line = my_strdup(connect_string,MYF(MY_WME));
+ My_auto_ptr<char> ap1(line);
if(line == 0){
SET_ERROR(handle, NDB_MGM_OUT_OF_MEMORY, "");
return -1;
@@ -236,7 +239,6 @@ parse_connect_string(const char * connect_string,
char * tmp = strchr(line, ':');
if(tmp == 0){
- free(line);
SET_ERROR(handle, NDB_MGM_OUT_OF_MEMORY, "");
return -1;
}
@@ -244,17 +246,13 @@ parse_connect_string(const char * connect_string,
int port = 0;
if(sscanf(tmp, "%d", &port) != 1){
- free(line);
SET_ERROR(handle, NDB_MGM_ILLEGAL_PORT_NUMBER, "");
return -1;
}
- if(handle->hostname != 0)
- free(handle->hostname);
-
- handle->hostname = strdup(line);
+ my_free(handle->hostname,MYF(MY_ALLOW_ZERO_PTR));
+ handle->hostname = my_strdup(line,MYF(MY_WME));
handle->port = port;
- free(line);
return 0;
}
@@ -361,7 +359,7 @@ ndb_mgm_connect(NdbMgmHandle handle, const char * mgmsrv)
* Open the log file
*/
char logname[64];
- snprintf(logname, 64, "mgmapi.log");
+ BaseString::snprintf(logname, 64, "mgmapi.log");
handle->logfile = fopen(logname, "w");
#endif
@@ -403,14 +401,15 @@ ndb_mgm_disconnect(NdbMgmHandle handle)
struct ndb_mgm_type_atoi
{
const char * str;
+ const char * alias;
enum ndb_mgm_node_type value;
};
static struct ndb_mgm_type_atoi type_values[] =
{
- { "NDB", NDB_MGM_NODE_TYPE_NDB},
- { "API", NDB_MGM_NODE_TYPE_API },
- { "MGM", NDB_MGM_NODE_TYPE_MGM }
+ { "NDB", "ndbd", NDB_MGM_NODE_TYPE_NDB},
+ { "API", "mysqld", NDB_MGM_NODE_TYPE_API },
+ { "MGM", "ndb_mgmd", NDB_MGM_NODE_TYPE_MGM }
};
const int no_of_type_values = (sizeof(type_values) /
@@ -440,6 +439,20 @@ ndb_mgm_get_node_type_string(enum ndb_mgm_node_type type)
return 0;
}
+extern "C"
+const char *
+ndb_mgm_get_node_type_alias_string(enum ndb_mgm_node_type type, const char** str)
+{
+ for(int i = 0; i<no_of_type_values; i++)
+ if(type_values[i].value == type)
+ {
+ if (str)
+ *str= type_values[i].str;
+ return type_values[i].alias;
+ }
+ return 0;
+}
+
struct ndb_mgm_status_atoi {
const char * str;
enum ndb_mgm_node_status value;
@@ -509,6 +522,9 @@ status_ackumulate(struct ndb_mgm_node_state * state,
state->version = atoi(value);
} else if(strcmp("connect_count", field) == 0){
state->connect_count = atoi(value);
+ } else if(strcmp("address", field) == 0){
+ strncpy(state->connect_address, value, sizeof(state->connect_address));
+ state->connect_address[sizeof(state->connect_address)-1]= 0;
} else {
ndbout_c("Unknown field: %s", field);
}
@@ -575,22 +591,27 @@ ndb_mgm_get_status(NdbMgmHandle handle)
ndb_mgm_cluster_state *state = (ndb_mgm_cluster_state*)
malloc(sizeof(ndb_mgm_cluster_state)+
- noOfNodes*sizeof(ndb_mgm_node_state));
+ noOfNodes*(sizeof(ndb_mgm_node_state)+sizeof("000.000.000.000#")));
- state->no_of_nodes = noOfNodes;
+ state->hostname= 0;
+ state->no_of_nodes= noOfNodes;
ndb_mgm_node_state * ptr = &state->node_states[0];
int nodeId = 0;
- int i = -1; ptr--;
+ int i;
+ for (i= 0; i < noOfNodes; i++) {
+ state->node_states[i].connect_address[0]= 0;
+ }
+ i = -1; ptr--;
for(; i<noOfNodes; ){
in.gets(buf, sizeof(buf));
tmp.assign(buf);
-
+
if(tmp.trim() == ""){
break;
}
Vector<BaseString> split;
- tmp.split(split, ":.");
+ tmp.split(split, ":.", 4);
if(split.size() != 4)
break;
@@ -931,13 +952,52 @@ struct ndb_mgm_event_categories
{
const char* name;
enum ndb_mgm_event_category category;
+} categories[] = {
+ { "STARTUP", NDB_MGM_EVENT_CATEGORY_STARTUP },
+ { "SHUTDOWN", NDB_MGM_EVENT_CATEGORY_SHUTDOWN },
+ { "STATISTICS", NDB_MGM_EVENT_CATEGORY_STATISTIC },
+ { "NODERESTART", NDB_MGM_EVENT_CATEGORY_NODE_RESTART },
+ { "CONNECTION", NDB_MGM_EVENT_CATEGORY_CONNECTION },
+ { "CHECKPOINT", NDB_MGM_EVENT_CATEGORY_CHECKPOINT },
+ { "DEBUG", NDB_MGM_EVENT_CATEGORY_DEBUG },
+ { "INFO", NDB_MGM_EVENT_CATEGORY_INFO },
+ { "ERROR", NDB_MGM_EVENT_CATEGORY_ERROR },
+ { "GREP", NDB_MGM_EVENT_CATEGORY_GREP },
+ { "BACKUP", NDB_MGM_EVENT_CATEGORY_BACKUP },
+ { 0, NDB_MGM_ILLEGAL_EVENT_CATEGORY }
};
extern "C"
+ndb_mgm_event_category
+ndb_mgm_match_event_category(const char * status)
+{
+ if(status == 0)
+ return NDB_MGM_ILLEGAL_EVENT_CATEGORY;
+
+ for(int i = 0; categories[i].name !=0 ; i++)
+ if(strcmp(status, categories[i].name) == 0)
+ return categories[i].category;
+
+ return NDB_MGM_ILLEGAL_EVENT_CATEGORY;
+}
+
+extern "C"
+const char *
+ndb_mgm_get_event_category_string(enum ndb_mgm_event_category status)
+{
+ int i;
+ for(i = 0; categories[i].name != 0; i++)
+ if(categories[i].category == status)
+ return categories[i].name;
+
+ return 0;
+}
+
+extern "C"
int
ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle handle, int nodeId,
- /*enum ndb_mgm_event_category*/
- char * category, int level,
+ enum ndb_mgm_event_category cat,
+ int level,
struct ndb_mgm_reply* /*reply*/)
{
SET_ERROR(handle, NDB_MGM_NO_ERROR,
@@ -952,14 +1012,14 @@ ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle handle, int nodeId,
Properties args;
args.put("node", nodeId);
- args.put("category", category);
+ args.put("category", cat);
args.put("level", level);
-
+
const Properties *reply;
reply = ndb_mgm_call(handle, clusterlog_reply,
"set cluster loglevel", &args);
CHECK_REPLY(reply, -1);
-
+
BaseString result;
reply->get("result", result);
if(strcmp(result.c_str(), "Ok") != 0) {
@@ -974,8 +1034,8 @@ ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle handle, int nodeId,
extern "C"
int
ndb_mgm_set_loglevel_node(NdbMgmHandle handle, int nodeId,
- /*enum ndb_mgm_event_category category*/
- char * category, int level,
+ enum ndb_mgm_event_category category,
+ int level,
struct ndb_mgm_reply* /*reply*/)
{
SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_set_loglevel_node");
@@ -1008,6 +1068,48 @@ ndb_mgm_set_loglevel_node(NdbMgmHandle handle, int nodeId,
}
extern "C"
+int
+ndb_mgm_listen_event(NdbMgmHandle handle, int filter[])
+{
+ SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_listen_event");
+ const ParserRow<ParserDummy> stat_reply[] = {
+ MGM_CMD("listen event", NULL, ""),
+ MGM_ARG("result", Int, Mandatory, "Error message"),
+ MGM_ARG("msg", String, Optional, "Error message"),
+ MGM_END()
+ };
+ CHECK_HANDLE(handle, -1);
+
+ SocketClient s(handle->hostname, handle->port);
+ const NDB_SOCKET_TYPE sockfd = s.connect();
+ if (sockfd < 0) {
+ setError(handle, NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, __LINE__,
+ "Unable to connect to");
+ return -1;
+ }
+
+ Properties args;
+ {
+ BaseString tmp;
+ for(int i = 0; filter[i] != 0; i += 2){
+ tmp.appfmt("%d=%d ", filter[i+1], filter[i]);
+ }
+ args.put("filter", tmp.c_str());
+ }
+
+ int tmp = handle->socket;
+ handle->socket = sockfd;
+
+ const Properties *reply;
+ reply = ndb_mgm_call(handle, stat_reply, "listen event", &args);
+
+ handle->socket = tmp;
+
+ CHECK_REPLY(reply, -1);
+ return sockfd;
+}
+
+extern "C"
int
ndb_mgm_get_stat_port(NdbMgmHandle handle, struct ndb_mgm_reply* /*reply*/)
{
@@ -1049,11 +1151,14 @@ ndb_mgm_dump_state(NdbMgmHandle handle, int nodeId, int* _args,
CHECK_CONNECTED(handle, -1);
char buf[256];
- char buf2[6];
buf[0] = 0;
for (int i = 0; i < _num_args; i++){
- snprintf(buf2, 6, "%d ", _args[i]);
- strncat(buf, buf2, 256);
+ unsigned n = strlen(buf);
+ if (n + 20 > sizeof(buf)) {
+ SET_ERROR(handle, NDB_MGM_USAGE_ERROR, "arguments too long");
+ return -1;
+ }
+ sprintf(buf + n, "%s%d", i ? " " : "", _args[i]);
}
Properties args;
@@ -1509,6 +1614,16 @@ ndb_mgm_get_configuration(NdbMgmHandle handle, unsigned int version) {
}
extern "C"
+void
+ndb_mgm_destroy_configuration(struct ndb_mgm_configuration *cfg)
+{
+ if (cfg) {
+ ((ConfigValues *)cfg)->~ConfigValues();
+ free((void *)cfg);
+ }
+}
+
+extern "C"
int
ndb_mgm_alloc_nodeid(NdbMgmHandle handle, unsigned int version, unsigned *pnodeid, int nodetype)
{
@@ -1539,8 +1654,11 @@ ndb_mgm_alloc_nodeid(NdbMgmHandle handle, unsigned int version, unsigned *pnodei
do {
const char * buf;
if(!prop->get("result", &buf) || strcmp(buf, "Ok") != 0){
+ BaseString err;
+ err.assfmt("Could not alloc node id at %s port %d: %s",
+ handle->hostname, handle->port, buf);
setError(handle, NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, __LINE__,
- "Could not alloc node id: %s",buf);
+ err.c_str());
break;
}
if(!prop->get("nodeid", pnodeid) != 0){
diff --git a/ndb/src/mgmclient/CommandInterpreter.cpp b/ndb/src/mgmclient/CommandInterpreter.cpp
index 141a0be0eff..e0935c2104e 100644
--- a/ndb/src/mgmclient/CommandInterpreter.cpp
+++ b/ndb/src/mgmclient/CommandInterpreter.cpp
@@ -14,13 +14,18 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#include <ndb_global.h>
+#include <my_sys.h>
+
#include "CommandInterpreter.hpp"
#include <mgmapi.h>
#include <mgmapi_debug.h>
#include <version.h>
+#include <NdbAutoPtr.hpp>
#include <NdbOut.hpp>
#include <NdbSleep.h>
+#include <NdbMem.h>
#include <EventLogger.hpp>
#include <signaldata/SetLogLevelOrd.hpp>
#include <signaldata/GrepImpl.hpp>
@@ -47,10 +52,13 @@ static const char* helpText =
"HELP DEBUG Help for debug compiled version\n"
#endif
"SHOW Print information about cluster\n"
+#if 0
"SHOW CONFIG Print configuration\n"
"SHOW PARAMETERS Print configuration parameters\n"
+#endif
"START BACKUP Start backup\n"
"ABORT BACKUP <backup id> Abort backup\n"
+"SHUTDOWN Shutdown all processes in cluster and quit\n"
"CLUSTERLOG ON Enable Cluster logging\n"
"CLUSTERLOG OFF Disable Cluster logging\n"
"CLUSTERLOG FILTER <severity> Toggle severity filter on/off\n"
@@ -62,7 +70,9 @@ static const char* helpText =
"EXIT SINGLE USER MODE Exit single user mode\n"
"<id> STATUS Print status\n"
"<id> CLUSTERLOG {<category>=<level>}+ Set log level for cluster log\n"
+#ifdef HAVE_GLOBAL_REPLICATION
"REP CONNECT <host:port> Connect to REP server on host:port\n"
+#endif
"QUIT Quit management client\n"
;
@@ -72,8 +82,10 @@ static const char* helpTextShow =
"---------------------------------------------------------------------------\n"
"SHOW prints NDB Cluster information\n\n"
"SHOW Print information about cluster\n"
+#if 0
"SHOW CONFIG Print configuration (in initial config file format)\n"
"SHOW PARAMETERS Print information about configuration parameters\n\n"
+#endif
;
#ifdef HAVE_GLOBAL_REPLICATION
@@ -170,7 +182,7 @@ CommandInterpreter::CommandInterpreter(const char *_host)
connected = false;
try_reconnect = 0;
- host = strdup(_host);
+ host = my_strdup(_host,MYF(MY_WME));
#ifdef HAVE_GLOBAL_REPLICATION
rep_host = NULL;
m_repserver = NULL;
@@ -185,7 +197,7 @@ CommandInterpreter::~CommandInterpreter()
{
connected = false;
ndb_mgm_destroy_handle(&m_mgmsrv);
- free((char *)host);
+ my_free((char *)host,MYF(0));
host = NULL;
}
@@ -205,16 +217,6 @@ emptyString(const char* s)
return true;
}
-class AutoPtr
-{
-public:
- AutoPtr(void * ptr) : m_ptr(ptr) {}
- ~AutoPtr() { free(m_ptr);}
-private:
- void * m_ptr;
-};
-
-
void
CommandInterpreter::printError()
{
@@ -274,9 +276,8 @@ CommandInterpreter::readAndExecute(int _try_reconnect)
return false;
}
- line = strdup(_line);
-
- AutoPtr ptr(line);
+ line = my_strdup(_line,MYF(MY_WME));
+ My_auto_ptr<char> ptr(line);
if (emptyString(line)) {
return true;
@@ -299,6 +300,10 @@ CommandInterpreter::readAndExecute(int _try_reconnect)
executeShow(allAfterFirstToken);
return true;
}
+ else if (strcmp(firstToken, "SHUTDOWN") == 0) {
+ executeShutdown(allAfterFirstToken);
+ return true;
+ }
else if (strcmp(firstToken, "CLUSTERLOG") == 0){
executeClusterLog(allAfterFirstToken);
return true;
@@ -498,20 +503,19 @@ CommandInterpreter::executeForAll(const char * cmd, ExecuteFunction fun,
ndbout_c("Use ALL STATUS to see the system start-up phases.");
} else {
connect();
- struct ndb_mgm_cluster_state *cl;
- cl = ndb_mgm_get_status(m_mgmsrv);
+ struct ndb_mgm_cluster_state *cl= ndb_mgm_get_status(m_mgmsrv);
if(cl == 0){
ndbout_c("Unable get status from management server");
printError();
return;
}
+ NdbAutoPtr<char> ap1((char*)cl);
while(get_next_nodeid(cl, &nodeId, NDB_MGM_NODE_TYPE_NDB)) {
if(strcmp(cmd, "STATUS") != 0)
ndbout_c("Executing %s on node %d.", cmd, nodeId);
(this->*fun)(nodeId, allAfterSecondToken, true);
ndbout << endl;
} // while
- free(cl);
}
}
@@ -528,7 +532,8 @@ CommandInterpreter::parseBlockSpecification(const char* allAfterLog,
}
// Copy allAfterLog since strtok will modify it
- char* newAllAfterLog = strdup(allAfterLog);
+ char* newAllAfterLog = my_strdup(allAfterLog,MYF(MY_WME));
+ My_auto_ptr<char> ap1(newAllAfterLog);
char* firstTokenAfterLog = strtok(newAllAfterLog, " ");
for (unsigned int i = 0; i < strlen(firstTokenAfterLog); ++i) {
firstTokenAfterLog[i] = toupper(firstTokenAfterLog[i]);
@@ -537,14 +542,12 @@ CommandInterpreter::parseBlockSpecification(const char* allAfterLog,
if (strcmp(firstTokenAfterLog, "BLOCK") != 0) {
ndbout << "Unexpected value: " << firstTokenAfterLog
<< ". Expected BLOCK." << endl;
- free(newAllAfterLog);
return false;
}
char* allAfterFirstToken = strtok(NULL, "\0");
if (emptyString(allAfterFirstToken)) {
ndbout << "Expected =." << endl;
- free(newAllAfterLog);
return false;
}
@@ -552,7 +555,6 @@ CommandInterpreter::parseBlockSpecification(const char* allAfterLog,
if (strcmp(secondTokenAfterLog, "=") != 0) {
ndbout << "Unexpected value: " << secondTokenAfterLog
<< ". Expected =." << endl;
- free(newAllAfterLog);
return false;
}
@@ -568,17 +570,14 @@ CommandInterpreter::parseBlockSpecification(const char* allAfterLog,
if (blocks.size() == 0) {
ndbout << "No block specified." << endl;
- free(newAllAfterLog);
return false;
}
if (blocks.size() > 1 && all) {
// More than "ALL" specified
ndbout << "Nothing expected after ALL." << endl;
- free(newAllAfterLog);
return false;
}
- free(newAllAfterLog);
return true;
}
@@ -599,10 +598,12 @@ CommandInterpreter::executeHelp(char* parameters)
<< endl;
ndbout << "<category> = ";
- for(Uint32 i = 0; i<EventLogger::noOfEventCategoryNames; i++){
- ndbout << EventLogger::eventCategoryNames[i].name;
- if (i < EventLogger::noOfEventCategoryNames - 1) {
- ndbout << " | ";
+ for(int i = CFG_MIN_LOGLEVEL; i <= CFG_MAX_LOGLEVEL; i++){
+ const char *str= ndb_mgm_get_event_category_string((ndb_mgm_event_category)i);
+ if (str) {
+ if (i != CFG_MIN_LOGLEVEL)
+ ndbout << " | ";
+ ndbout << str;
}
}
ndbout << endl;
@@ -629,9 +630,141 @@ CommandInterpreter::executeHelp(char* parameters)
/*****************************************************************************
+ * SHUTDOWN
+ *****************************************************************************/
+
+void
+CommandInterpreter::executeShutdown(char* parameters)
+{
+ connect();
+
+ ndb_mgm_cluster_state *state = ndb_mgm_get_status(m_mgmsrv);
+ if(state == NULL) {
+ ndbout_c("Could not get status");
+ printError();
+ return;
+ }
+ NdbAutoPtr<char> ap1((char*)state);
+
+ int result = 0;
+ result = ndb_mgm_stop(m_mgmsrv, 0, 0);
+ if (result < 0) {
+ ndbout << "Shutdown failed." << endl;
+ printError();
+ return;
+ }
+
+ ndbout << result << " NDB Cluster storage node(s) have shutdown." << endl;
+
+ int mgm_id= 0;
+ for(int i=0; i < state->no_of_nodes; i++) {
+ if(state->node_states[i].node_type == NDB_MGM_NODE_TYPE_MGM &&
+ state->node_states[i].version != 0){
+ if (mgm_id == 0)
+ mgm_id= state->node_states[i].node_id;
+ else {
+ ndbout << "Unable to locate management server, "
+ << "shutdown manually with <id> STOP"
+ << endl;
+ return;
+ }
+ }
+ }
+
+ result = 0;
+ result = ndb_mgm_stop(m_mgmsrv, 1, &mgm_id);
+ if (result <= 0) {
+ ndbout << "Shutdown failed." << endl;
+ printError();
+ return;
+ }
+
+ ndbout << "NDB Cluster management server shutdown." << endl;
+ exit(0);
+}
+
+/*****************************************************************************
* SHOW
*****************************************************************************/
+
+static
+const char *status_string(ndb_mgm_node_status status)
+{
+ switch(status){
+ case NDB_MGM_NODE_STATUS_NO_CONTACT:
+ return "not connected";
+ case NDB_MGM_NODE_STATUS_NOT_STARTED:
+ return "not started";
+ case NDB_MGM_NODE_STATUS_STARTING:
+ return "starting";
+ case NDB_MGM_NODE_STATUS_STARTED:
+ return "started";
+ case NDB_MGM_NODE_STATUS_SHUTTING_DOWN:
+ return "shutting down";
+ case NDB_MGM_NODE_STATUS_RESTARTING:
+ return "restarting";
+ case NDB_MGM_NODE_STATUS_SINGLEUSER:
+ return "single user mode";
+ default:
+ return "unknown state";
+ }
+}
+
+static void
+print_nodes(ndb_mgm_cluster_state *state, ndb_mgm_configuration_iterator *it,
+ const char *proc_name, int no_proc, ndb_mgm_node_type type,
+ int master_id)
+{
+ int i;
+ ndbout << "[" << proc_name
+ << "(" << ndb_mgm_get_node_type_string(type) << ")]\t"
+ << no_proc << " node(s)" << endl;
+ for(i=0; i < state->no_of_nodes; i++) {
+ struct ndb_mgm_node_state *node_state= &(state->node_states[i]);
+ if(node_state->node_type == type) {
+ int node_id= node_state->node_id;
+ ndbout << "id=" << node_id;
+ if(node_state->version != 0) {
+ const char *hostname= node_state->connect_address;
+ if (hostname == 0
+ || strlen(hostname) == 0
+ || strcmp(hostname,"0.0.0.0") == 0)
+ ndbout << " ";
+ else
+ ndbout << "\t@" << hostname;
+ ndbout << " (Version: "
+ << getMajor(node_state->version) << "."
+ << getMinor(node_state->version) << "."
+ << getBuild(node_state->version);
+ if (type == NDB_MGM_NODE_TYPE_NDB) {
+ if (node_state->node_status != NDB_MGM_NODE_STATUS_STARTED) {
+ ndbout << ", " << status_string(node_state->node_status);
+ }
+ if (node_state->node_group >= 0) {
+ ndbout << ", Nodegroup: " << node_state->node_group;
+ if (node_state->dynamic_id == master_id)
+ ndbout << ", Master";
+ }
+ }
+ ndbout << ")" << endl;
+ } else {
+ if(ndb_mgm_find(it, CFG_NODE_ID, node_id) != 0){
+ ndbout_c("Unable to find node with id: %d", node_id);
+ return;
+ }
+ const char *config_hostname= 0;
+ ndb_mgm_get_string_parameter(it, CFG_NODE_HOST, &config_hostname);
+ if (config_hostname == 0 || config_hostname[0] == 0)
+ config_hostname= "any host";
+ ndbout << " (not connected, accepting connect from "
+ << config_hostname << ")" << endl;
+ }
+ }
+ }
+ ndbout << endl;
+}
+
void
CommandInterpreter::executeShow(char* parameters)
{
@@ -647,6 +780,23 @@ CommandInterpreter::executeShow(char* parameters)
printError();
return;
}
+ NdbAutoPtr<char> ap1((char*)state);
+
+ ndb_mgm_configuration * conf = ndb_mgm_get_configuration(m_mgmsrv,0);
+ if(conf == 0){
+ ndbout_c("Could not get configuration");
+ printError();
+ return;
+ }
+
+ ndb_mgm_configuration_iterator * it;
+ it = ndb_mgm_create_configuration_iterator((struct ndb_mgm_configuration *)conf, CFG_SECTION_NODE);
+
+ if(it == 0){
+ ndbout_c("Unable to create config iterator");
+ return;
+ }
+ NdbAutoPtr<ndb_mgm_configuration_iterator> ptr(it);
int
master_id= 0,
@@ -678,76 +828,14 @@ CommandInterpreter::executeShow(char* parameters)
case NDB_MGM_NODE_TYPE_UNKNOWN:
ndbout << "Error: Unknown Node Type" << endl;
return;
+ case NDB_MGM_NODE_TYPE_REP:
+ abort();
}
}
- ndbout << ndb_nodes
- << " NDB Node(s)"
- << endl;
-
- for(i=0; i < state->no_of_nodes; i++) {
- if(state->node_states[i].node_type == NDB_MGM_NODE_TYPE_NDB) {
- ndbout << "DB node:\t" << state->node_states[i].node_id;
- if(state->node_states[i].version != 0) {
- ndbout << " (Version: "
- << getMajor(state->node_states[i].version) << "."
- << getMinor(state->node_states[i].version) << "."
- << getBuild(state->node_states[i].version) << ","
- << " Nodegroup: " << state->node_states[i].node_group;
- if (state->node_states[i].dynamic_id == master_id)
- ndbout << ", Master";
- ndbout << ")" << endl;
- } else
- {
- ndbout << " (not connected) " << endl;
- }
-
- }
- }
- ndbout << endl;
-
- ndbout << mgm_nodes
- << " MGM Node(s)"
- << endl;
-
- for(i=0; i < state->no_of_nodes; i++) {
- if(state->node_states[i].node_type == NDB_MGM_NODE_TYPE_MGM) {
- ndbout << "MGM node:\t" << state->node_states[i].node_id;
- if(state->node_states[i].version != 0) {
- ndbout << " (Version: "
- << getMajor(state->node_states[i].version) << "."
- << getMinor(state->node_states[i].version) << "."
- << getBuild(state->node_states[i].version) << ")" << endl;
-
- } else
- {
- ndbout << " (no version information available) " << endl;
- }
- }
- }
- ndbout << endl;
-
- ndbout << api_nodes
- << " API Node(s)"
- << endl;
-
- for(i=0; i < state->no_of_nodes; i++) {
- if(state->node_states[i].node_type == NDB_MGM_NODE_TYPE_API) {
- ndbout << "API node:\t" << state->node_states[i].node_id;
- if(state->node_states[i].version != 0) {
- ndbout << " (Version: "
- << getMajor(state->node_states[i].version) << "."
- << getMinor(state->node_states[i].version) << "."
- << getBuild(state->node_states[i].version) << ")" << endl;
-
- } else
- {
- ndbout << " (not connected) " << endl;
- }
- }
- }
- ndbout << endl;
-
+ print_nodes(state, it, "ndbd", ndb_nodes, NDB_MGM_NODE_TYPE_NDB, master_id);
+ print_nodes(state, it, "ndb_mgmd", mgm_nodes, NDB_MGM_NODE_TYPE_MGM, 0);
+ print_nodes(state, it, "mysqld", api_nodes, NDB_MGM_NODE_TYPE_API, 0);
// ndbout << helpTextShow;
return;
} else if (strcmp(parameters, "PROPERTIES") == 0 ||
@@ -778,12 +866,13 @@ CommandInterpreter::executeClusterLog(char* parameters)
int i;
connect();
if (parameters != 0 && strlen(parameters) != 0) {
- enum ndb_mgm_clusterlog_level severity = NDB_MGM_CLUSTERLOG_ALL;
+ enum ndb_mgm_clusterlog_level severity = NDB_MGM_CLUSTERLOG_ALL;
int isOk = true;
char name[12];
bool noArgs = false;
- char * tmpString = strdup(parameters);
+ char * tmpString = my_strdup(parameters,MYF(MY_WME));
+ My_auto_ptr<char> ap1(tmpString);
char * tmpPtr = 0;
char * item = strtok_r(tmpString, " ", &tmpPtr);
@@ -821,7 +910,6 @@ CommandInterpreter::executeClusterLog(char* parameters)
item = strtok_r(NULL, " ", &tmpPtr);
} // while(item != NULL){
- free(tmpString);
if (noArgs) {
ndbout << "Missing argument(s)." << endl;
@@ -929,7 +1017,7 @@ CommandInterpreter::executeStop(int processId, const char *, bool all)
} else {
result = ndb_mgm_stop(m_mgmsrv, 1, &processId);
}
- if (result <= 0) {
+ if (result < 0) {
ndbout << "Shutdown failed." << endl;
printError();
} else
@@ -1017,7 +1105,8 @@ CommandInterpreter::executeRestart(int processId, const char* parameters,
int abort = 0;
if(parameters != 0 && strlen(parameters) != 0){
- char * tmpString = strdup(parameters);
+ char * tmpString = my_strdup(parameters,MYF(MY_WME));
+ My_auto_ptr<char> ap1(tmpString);
char * tmpPtr = 0;
char * item = strtok_r(tmpString, " ", &tmpPtr);
while(item != NULL){
@@ -1029,7 +1118,6 @@ CommandInterpreter::executeRestart(int processId, const char* parameters,
abort = 1;
item = strtok_r(NULL, " ", &tmpPtr);
}
- free(tmpString);
}
if(all) {
@@ -1065,7 +1153,8 @@ CommandInterpreter::executeDumpState(int processId, const char* parameters,
Uint32 no = 0;
int pars[25];
- char * tmpString = strdup(parameters);
+ char * tmpString = my_strdup(parameters,MYF(MY_WME));
+ My_auto_ptr<char> ap1(tmpString);
char * tmpPtr = 0;
char * item = strtok_r(tmpString, " ", &tmpPtr);
while(item != NULL){
@@ -1085,7 +1174,6 @@ CommandInterpreter::executeDumpState(int processId, const char* parameters,
ndbout.setHexFormat(1) << pars[i] << " ";
if (!(i+1 & 0x3)) ndbout << endl;
}
- free(tmpString);
struct ndb_mgm_reply reply;
ndb_mgm_dump_state(m_mgmsrv, processId, pars, no, &reply);
@@ -1112,6 +1200,7 @@ CommandInterpreter::executeStatus(int processId,
printError();
return;
}
+ NdbAutoPtr<char> ap1((char*)cl);
int i = 0;
while((i < cl->no_of_nodes) && cl->node_states[i].node_id != processId)
@@ -1124,33 +1213,15 @@ CommandInterpreter::executeStatus(int processId,
startPhase = cl->node_states[i].start_phase;
version = cl->node_states[i].version;
- ndbout << "Node " << processId << ": ";
+ ndbout << "Node " << processId << ": " << status_string(status);
switch(status){
- case NDB_MGM_NODE_STATUS_NO_CONTACT:
- ndbout << "No contact" << endl;
- break;
- case NDB_MGM_NODE_STATUS_NOT_STARTED:
- ndbout << "Not started" ;
- break;
case NDB_MGM_NODE_STATUS_STARTING:
- ndbout << "Starting (Start phase " << startPhase << ")" ;
- break;
- case NDB_MGM_NODE_STATUS_STARTED:
- ndbout << "Started" ;
+ ndbout << " (Phase " << startPhase << ")" ;
break;
case NDB_MGM_NODE_STATUS_SHUTTING_DOWN:
- ndbout << "Shutting down " << (system == false ? "node" : "system")
- << " (Phase " << startPhase << ")"
- ;
- break;
- case NDB_MGM_NODE_STATUS_RESTARTING:
- ndbout << "Restarting" ;
- break;
- case NDB_MGM_NODE_STATUS_SINGLEUSER:
- ndbout << "Single user mode" ;
+ ndbout << " (Phase " << startPhase << ")";
break;
default:
- ndbout << "Unknown state" ;
break;
}
if(status != NDB_MGM_NODE_STATUS_NO_CONTACT)
@@ -1170,55 +1241,40 @@ CommandInterpreter::executeLogLevel(int processId, const char* parameters,
{
connect();
(void) all;
- (void) parameters;
- SetLogLevelOrd logLevel; logLevel.clear();
- LogLevel::EventCategory cat;
- int level;
- if (emptyString(parameters) || (strcmp(parameters, "ALL") == 0)) {
- for(Uint32 i = 0; i<EventLogger::noOfEventCategoryNames; i++)
- logLevel.setLogLevel(EventLogger::eventCategoryNames[i].category, 7);
- } else {
-
- char * tmpString = strdup(parameters);
- char * tmpPtr = 0;
- char * item = strtok_r(tmpString, ", ", &tmpPtr);
- while(item != NULL){
- char categoryTxt[255];
- const int m = sscanf(item, "%[^=]=%d", categoryTxt, &level);
- if(m != 2){
- free(tmpString);
- ndbout << "Invalid loglevel specification category=level" << endl;
- return;
- }
+ BaseString tmp(parameters);
+ Vector<BaseString> spec;
+ tmp.split(spec, "=");
+ if(spec.size() != 2){
+ ndbout << "Invalid loglevel specification: " << parameters << endl;
+ return;
+ }
- if(!EventLogger::matchEventCategory(categoryTxt,
- &cat)){
- ndbout << "Invalid loglevel specification, unknown category: "
- << categoryTxt << endl;
- free(tmpString);
- return ;
- }
- if(level < 0 || level > 15){
- ndbout << "Invalid loglevel specification row, level 0-15" << endl;
- free(tmpString);
- return ;
- }
- logLevel.setLogLevel(cat, level);
-
- item = strtok_r(NULL, ", ", &tmpPtr);
+ spec[0].trim().ndb_toupper();
+ int category = ndb_mgm_match_event_category(spec[0].c_str());
+ if(category == NDB_MGM_ILLEGAL_EVENT_CATEGORY){
+ category = atoi(spec[0].c_str());
+ if(category < NDB_MGM_MIN_EVENT_CATEGORY ||
+ category > NDB_MGM_MAX_EVENT_CATEGORY){
+ ndbout << "Unknown category: \"" << spec[0].c_str() << "\"" << endl;
+ return;
}
- free(tmpString);
}
-
+
+ int level = atoi(spec[1].c_str());
+ if(level < 0 || level > 15){
+ ndbout << "Invalid level: " << spec[1].c_str() << endl;
+ return;
+ }
+
struct ndb_mgm_reply reply;
int result;
result = ndb_mgm_set_loglevel_node(m_mgmsrv,
- processId, // fast fix - pekka
- (char*)EventLogger::getEventCategoryName(cat),
+ processId,
+ (ndb_mgm_event_category)category,
level,
&reply);
-
+
if (result < 0) {
ndbout_c("Executing LOGLEVEL on node %d failed.", processId);
printError();
@@ -1226,7 +1282,7 @@ CommandInterpreter::executeLogLevel(int processId, const char* parameters,
ndbout << "Executing LOGLEVEL on node " << processId << " OK!"
<< endl;
}
-
+
}
//*****************************************************************************
@@ -1241,26 +1297,23 @@ void CommandInterpreter::executeError(int processId,
connect();
// Copy parameters since strtok will modify it
- char* newpar = strdup(parameters);
+ char* newpar = my_strdup(parameters,MYF(MY_WME));
+ My_auto_ptr<char> ap1(newpar);
char* firstParameter = strtok(newpar, " ");
int errorNo;
if (! convert(firstParameter, errorNo)) {
ndbout << "Expected an integer." << endl;
- free(newpar);
return;
}
char* allAfterFirstParameter = strtok(NULL, "\0");
if (! emptyString(allAfterFirstParameter)) {
ndbout << "Nothing expected after error number." << endl;
- free(newpar);
return;
}
ndb_mgm_insert_error(m_mgmsrv, processId, errorNo, NULL);
-
- free(newpar);
}
//*****************************************************************************
@@ -1275,21 +1328,20 @@ CommandInterpreter::executeTrace(int /*processId*/,
return;
}
- char* newpar = strdup(parameters);
+ char* newpar = my_strdup(parameters,MYF(MY_WME));
+ My_auto_ptr<char> ap1(newpar);
char* firstParameter = strtok(newpar, " ");
int traceNo;
if (! convert(firstParameter, traceNo)) {
ndbout << "Expected an integer." << endl;
- free(newpar);
return;
}
char* allAfterFirstParameter = strtok(NULL, "\0");
if (! emptyString(allAfterFirstParameter)) {
ndbout << "Nothing expected after trace number." << endl;
- free(newpar);
return;
}
@@ -1297,7 +1349,6 @@ CommandInterpreter::executeTrace(int /*processId*/,
if (result != 0) {
ndbout << _mgmtSrvr.getErrorText(result) << endl;
}
- free(newpar);
#endif
}
@@ -1314,35 +1365,29 @@ CommandInterpreter::executeLog(int processId,
if (! parseBlockSpecification(parameters, blocks)) {
return;
}
- int len=0;
+ int len=1;
Uint32 i;
for(i=0; i<blocks.size(); i++) {
- ndbout_c("blocks %s %d",blocks[i], strlen(blocks[i]));
- len += strlen(blocks[i]);
+ len += strlen(blocks[i]) + 1;
}
- len += blocks.size()*2;
- char * blockNames = (char*)malloc(len);
+ char * blockNames = (char*)my_malloc(len,MYF(MY_WME));
+ My_auto_ptr<char> ap1(blockNames);
+ blockNames[0] = 0;
for(i=0; i<blocks.size(); i++) {
strcat(blockNames, blocks[i]);
strcat(blockNames, "|");
}
- strcat(blockNames, "\0");
- ndbout_c("blocknames %s", blockNames);
- /*int res =*/ndb_mgm_log_signals(m_mgmsrv,
+ int result = ndb_mgm_log_signals(m_mgmsrv,
processId,
NDB_MGM_SIGNAL_LOG_MODE_INOUT,
blockNames,
&reply);
-
-#if 0
- int result =
- _mgmtSrvr.setSignalLoggingMode(processId, MgmtSrvr::InOut, blocks);
if (result != 0) {
- ndbout << _mgmtSrvr.getErrorText(result) << endl;
+ ndbout_c("Execute LOG on node %d failed.", processId);
+ printError();
}
-#endif
}
//*****************************************************************************
@@ -1351,17 +1396,7 @@ void
CommandInterpreter::executeLogIn(int /* processId */,
const char* parameters, bool /* all */)
{
- Vector<const char*> blocks;
- if (! parseBlockSpecification(parameters, blocks)) {
- return;
- }
-
-#if 0
- int result = _mgmtSrvr.setSignalLoggingMode(processId, MgmtSrvr::In, blocks);
- if (result != 0) {
- ndbout << _mgmtSrvr.getErrorText(result) << endl;
- }
-#endif
+ ndbout << "Command LOGIN not implemented." << endl;
}
//*****************************************************************************
@@ -1370,19 +1405,7 @@ void
CommandInterpreter::executeLogOut(int /*processId*/,
const char* parameters, bool /*all*/)
{
- Vector<const char*> blocks;
- if (! parseBlockSpecification(parameters, blocks)) {
- return;
- }
-
-
-#if 0
- int result = _mgmtSrvr.setSignalLoggingMode(processId, MgmtSrvr::Out,
- blocks);
- if (result != 0) {
- ndbout << _mgmtSrvr.getErrorText(result) << endl;
- }
-#endif
+ ndbout << "Command LOGOUT not implemented." << endl;
}
//*****************************************************************************
@@ -1391,57 +1414,45 @@ void
CommandInterpreter::executeLogOff(int /*processId*/,
const char* parameters, bool /*all*/)
{
- Vector<const char*> blocks;
- if (! parseBlockSpecification(parameters, blocks)) {
- return;
- }
-
-
-#if 0
- int result = _mgmtSrvr.setSignalLoggingMode(processId, MgmtSrvr::Off,
- blocks);
- if (result != 0) {
- ndbout << _mgmtSrvr.getErrorText(result) << endl;
- }
-#endif
+ ndbout << "Command LOGOFF not implemented." << endl;
}
//*****************************************************************************
//*****************************************************************************
void
-CommandInterpreter::executeTestOn(int /*processId*/,
+CommandInterpreter::executeTestOn(int processId,
const char* parameters, bool /*all*/)
{
if (! emptyString(parameters)) {
ndbout << "No parameters expected to this command." << endl;
return;
}
-
-#if 0
- int result = _mgmtSrvr.startSignalTracing(processId);
+ connect();
+ struct ndb_mgm_reply reply;
+ int result = ndb_mgm_start_signallog(m_mgmsrv, processId, &reply);
if (result != 0) {
- ndbout << _mgmtSrvr.getErrorText(result) << endl;
+ ndbout_c("Execute TESTON failed.");
+ printError();
}
-#endif
}
//*****************************************************************************
//*****************************************************************************
void
-CommandInterpreter::executeTestOff(int /*processId*/,
+CommandInterpreter::executeTestOff(int processId,
const char* parameters, bool /*all*/)
{
if (! emptyString(parameters)) {
ndbout << "No parameters expected to this command." << endl;
return;
}
-
-#if 0
- int result = _mgmtSrvr.stopSignalTracing(processId);
+ connect();
+ struct ndb_mgm_reply reply;
+ int result = ndb_mgm_stop_signallog(m_mgmsrv, processId, &reply);
if (result != 0) {
- ndbout << _mgmtSrvr.getErrorText(result) << endl;
+ ndbout_c("Execute TESTOFF failed.");
+ printError();
}
-#endif
}
@@ -1457,13 +1468,13 @@ CommandInterpreter::executeSet(int /*processId*/,
}
#if 0
// Copy parameters since strtok will modify it
- char* newpar = strdup(parameters);
+ char* newpar = my_strdup(parameters,MYF(MY_WME));
+ My_auto_ptr<char> ap1(newpar);
char* configParameterName = strtok(newpar, " ");
char* allAfterParameterName = strtok(NULL, "\0");
if (emptyString(allAfterParameterName)) {
ndbout << "Missing parameter value." << endl;
- free(newpar);
return;
}
@@ -1472,7 +1483,6 @@ CommandInterpreter::executeSet(int /*processId*/,
char* allAfterValue = strtok(NULL, "\0");
if (! emptyString(allAfterValue)) {
ndbout << "Nothing expected after parameter value." << endl;
- free(newpar);
return;
}
@@ -1518,7 +1528,6 @@ CommandInterpreter::executeSet(int /*processId*/,
abort();
}
}
- free(newpar);
#endif
}
@@ -1556,54 +1565,41 @@ CommandInterpreter::executeEventReporting(int processId,
bool all)
{
connect();
- SetLogLevelOrd logLevel; logLevel.clear();
- char categoryTxt[255];
- int level;
- LogLevel::EventCategory cat;
- if (emptyString(parameters) || (strcmp(parameters, "ALL") == 0)) {
- for(Uint32 i = 0; i<EventLogger::noOfEventCategoryNames; i++)
- logLevel.setLogLevel(EventLogger::eventCategoryNames[i].category, 7);
- } else {
- char * tmpString = strdup(parameters);
- char * tmpPtr = 0;
- char * item = strtok_r(tmpString, ", ", &tmpPtr);
- while(item != NULL){
- const int m = sscanf(item, "%[^=]=%d", categoryTxt, &level);
- if(m != 2){
- free(tmpString);
- ndbout << "Invalid loglevel specification category=level" << endl;
- return;
- }
-
- if(!EventLogger::matchEventCategory(categoryTxt,
- &cat)){
- ndbout << "Invalid loglevel specification, unknown category: "
- << categoryTxt << endl;
- free(tmpString);
- return ;
- }
- if(level < 0 || level > 15){
- ndbout << "Invalid loglevel specification row, level 0-15" << endl;
- free(tmpString);
- return ;
- }
- logLevel.setLogLevel(cat, level);
-
- item = strtok_r(NULL, ", ", &tmpPtr);
+ BaseString tmp(parameters);
+ Vector<BaseString> spec;
+ tmp.split(spec, "=");
+ if(spec.size() != 2){
+ ndbout << "Invalid loglevel specification: " << parameters << endl;
+ return;
+ }
+
+ spec[0].trim().ndb_toupper();
+ int category = ndb_mgm_match_event_category(spec[0].c_str());
+ if(category == NDB_MGM_ILLEGAL_EVENT_CATEGORY){
+ category = atoi(spec[0].c_str());
+ if(category < NDB_MGM_MIN_EVENT_CATEGORY ||
+ category > NDB_MGM_MAX_EVENT_CATEGORY){
+ ndbout << "Unknown category: \"" << spec[0].c_str() << "\"" << endl;
+ return;
}
- free(tmpString);
}
+
+ int level = atoi(spec[1].c_str());
+ if(level < 0 || level > 15){
+ ndbout << "Invalid level: " << spec[1].c_str() << endl;
+ return;
+ }
+
+
struct ndb_mgm_reply reply;
int result;
- result =
- ndb_mgm_set_loglevel_clusterlog(m_mgmsrv,
- processId, // fast fix - pekka
- (char*)
- EventLogger::getEventCategoryName(cat),
- level,
- &reply);
+ result = ndb_mgm_set_loglevel_clusterlog(m_mgmsrv,
+ processId, // fast fix - pekka
+ (ndb_mgm_event_category)category,
+ level,
+ &reply);
if (result != 0) {
ndbout_c("Executing CLUSTERLOG on node %d failed", processId);
@@ -1623,13 +1619,45 @@ CommandInterpreter::executeStartBackup(char* /*parameters*/)
connect();
struct ndb_mgm_reply reply;
unsigned int backupId;
+
+ int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, 0 };
+ int fd = ndb_mgm_listen_event(m_mgmsrv, filter);
int result = ndb_mgm_start_backup(m_mgmsrv, &backupId, &reply);
if (result != 0) {
ndbout << "Start of backup failed" << endl;
printError();
- } else {
- ndbout << "Backup started. Backup id " << backupId << "." << endl;
+ close(fd);
+ return;
}
+
+ char *tmp;
+ char buf[1024];
+ {
+ SocketInputStream in(fd);
+ int count = 0;
+ do {
+ tmp = in.gets(buf, 1024);
+ if(tmp)
+ {
+ ndbout << tmp;
+ unsigned int id;
+ if(sscanf(tmp, "%*[^:]: Backup %d ", &id) == 1 && id == backupId){
+ count++;
+ }
+ }
+ } while(count < 2);
+ }
+
+ SocketInputStream in(fd, 10);
+ do {
+ tmp = in.gets(buf, 1024);
+ if(tmp && tmp[0] != 0)
+ {
+ ndbout << tmp;
+ }
+ } while(tmp && tmp[0] != 0);
+
+ close(fd);
}
void
@@ -1690,7 +1718,8 @@ CommandInterpreter::executeRep(char* parameters)
}
connect();
- char * line = strdup(parameters);
+ char * line = my_strdup(parameters,MYF(MY_WME));
+ My_auto_ptr<char> ap1((char*)line);
char * firstToken = strtok(line, " ");
struct ndb_rep_reply reply;
@@ -1991,46 +2020,46 @@ CmdBackupCallback(const MgmtSrvr::BackupEvent & event){
switch(event.Event){
case MgmtSrvr::BackupEvent::BackupStarted:
ok = true;
- snprintf(str, sizeof(str),
+ BaseString::snprintf(str, sizeof(str),
"Backup %d started", event.Started.BackupId);
break;
case MgmtSrvr::BackupEvent::BackupFailedToStart:
ok = true;
- snprintf(str, sizeof(str),
+ BaseString::snprintf(str, sizeof(str),
"Backup failed to start (Error %d)",
event.FailedToStart.ErrorCode);
break;
case MgmtSrvr::BackupEvent::BackupCompleted:
ok = true;
- snprintf(str, sizeof(str),
+ BaseString::snprintf(str, sizeof(str),
"Backup %d completed",
event.Completed.BackupId);
ndbout << str << endl;
- snprintf(str, sizeof(str),
+ BaseString::snprintf(str, sizeof(str),
" StartGCP: %d StopGCP: %d",
event.Completed.startGCP, event.Completed.stopGCP);
ndbout << str << endl;
- snprintf(str, sizeof(str),
+ BaseString::snprintf(str, sizeof(str),
" #Records: %d #LogRecords: %d",
event.Completed.NoOfRecords, event.Completed.NoOfLogRecords);
ndbout << str << endl;
- snprintf(str, sizeof(str),
+ BaseString::snprintf(str, sizeof(str),
" Data: %d bytes Log: %d bytes",
event.Completed.NoOfBytes, event.Completed.NoOfLogBytes);
break;
case MgmtSrvr::BackupEvent::BackupAborted:
ok = true;
- snprintf(str, sizeof(str),
+ BaseString::snprintf(str, sizeof(str),
"Backup %d has been aborted reason %d",
event.Aborted.BackupId,
event.Aborted.Reason);
break;
}
if(!ok){
- snprintf(str, sizeof(str),
+ BaseString::snprintf(str, sizeof(str),
"Unknown backup event: %d",
event.Event);
diff --git a/ndb/src/mgmclient/CommandInterpreter.hpp b/ndb/src/mgmclient/CommandInterpreter.hpp
index 478e03d129a..eecc48a739e 100644
--- a/ndb/src/mgmclient/CommandInterpreter.hpp
+++ b/ndb/src/mgmclient/CommandInterpreter.hpp
@@ -127,6 +127,7 @@ private:
*/
void executeHelp(char* parameters);
void executeShow(char* parameters);
+ void executeShutdown(char* parameters);
void executeRun(char* parameters);
void executeInfo(char* parameters);
void executeClusterLog(char* parameters);
diff --git a/ndb/src/mgmclient/CpcClient.cpp b/ndb/src/mgmclient/CpcClient.cpp
index 0291573a704..d407ba65312 100644
--- a/ndb/src/mgmclient/CpcClient.cpp
+++ b/ndb/src/mgmclient/CpcClient.cpp
@@ -44,7 +44,7 @@
ParserRow_t::IgnoreMinMax, \
0, 0, \
0, \
- (desc) }
+ (desc), 0 }
#define CPC_END() \
{ 0, \
@@ -55,7 +55,7 @@
ParserRow_t::IgnoreMinMax, \
0, 0, \
0, \
- 0 }
+ 0, 0 }
#ifdef DEBUG_PRINT_PROPERTIES
static void printprop(const Properties &p) {
diff --git a/ndb/src/mgmclient/Makefile.am b/ndb/src/mgmclient/Makefile.am
index f3eaaaa68bd..e271c7bed53 100644
--- a/ndb/src/mgmclient/Makefile.am
+++ b/ndb/src/mgmclient/Makefile.am
@@ -13,7 +13,10 @@ INCLUDES += -I$(top_srcdir)/ndb/include/mgmapi -I$(top_srcdir)/ndb/src/common/mg
LDADD_LOC = $(top_builddir)/ndb/src/libndbclient.la \
$(top_builddir)/ndb/src/common/editline/libeditline.a \
- @TERMCAP_LIB@
+ $(top_builddir)/dbug/libdbug.a \
+ $(top_builddir)/mysys/libmysys.a \
+ $(top_builddir)/strings/libmystrings.a \
+ @TERMCAP_LIB@ @NDB_SCI_LIBS@
ndb_mgm_LDFLAGS = @ndb_bin_am_ldflags@
diff --git a/ndb/src/mgmclient/main.cpp b/ndb/src/mgmclient/main.cpp
index df6659df0b1..cc6d4bf600e 100644
--- a/ndb/src/mgmclient/main.cpp
+++ b/ndb/src/mgmclient/main.cpp
@@ -44,6 +44,7 @@ handler(int sig){
}
int main(int argc, const char** argv){
+ ndb_init();
int optind = 0;
const char *_host = 0;
int _port = 0;
@@ -84,7 +85,7 @@ int main(int argc, const char** argv){
}
char buf[MAXHOSTNAMELEN+10];
- snprintf(buf, sizeof(buf), "%s:%d", _host, _port);
+ BaseString::snprintf(buf, sizeof(buf), "%s:%d", _host, _port);
ndbout << "-- NDB Cluster -- Management Client --" << endl;
printf("Connecting to Management Server: %s\n", buf);
diff --git a/ndb/src/mgmsrv/CommandInterpreter.cpp b/ndb/src/mgmsrv/CommandInterpreter.cpp
index 316b6d5795e..2a054a01f1e 100644
--- a/ndb/src/mgmsrv/CommandInterpreter.cpp
+++ b/ndb/src/mgmsrv/CommandInterpreter.cpp
@@ -52,7 +52,7 @@ static const char* helpTexts[] = {
"{<id>|ALL} CLUSTERLOG {<category>=<level>}+ Set log level for cluster log",
"QUIT Quit management server",
};
-static const int noOfHelpTexts = sizeof(helpTexts)/sizeof(const char*);
+static const unsigned noOfHelpTexts = sizeof(helpTexts)/sizeof(const char*);
static const char* helpTextShow =
"SHOW prints NDB Cluster information\n\n"
@@ -389,14 +389,14 @@ void CommandInterpreter::executeHelp(char* parameters) {
<< endl;
ndbout << "<category> = ";
- for(i = 0; i<EventLogger::noOfEventCategoryNames; i++){
- ndbout << EventLogger::eventCategoryNames[i].name;
- if (i < EventLogger::noOfEventCategoryNames - 1) {
+ for(i = 0; i<CFG_MIN_LOGLEVEL; i++){
+ ndbout << ndb_mgm_get_event_category_string((ndb_mgm_event_category)i);
+ if (i < CFG_MIN_LOGLEVEL - 1) {
ndbout << " | ";
}
}
ndbout << endl;
-
+
ndbout << "<level> = " << "0 - 15"
<< endl;
@@ -647,6 +647,7 @@ versionCallback(int nodeId, int version, void * anyData, int errCode){
}
break;
case NDB_MGM_NODE_TYPE_UNKNOWN:
+ case NDB_MGM_NODE_TYPE_REP:
abort();
};
@@ -831,12 +832,13 @@ void CommandInterpreter::executeStatus(int processId,
//*****************************************************************************
void CommandInterpreter::executeLogLevel(int processId,
const char* parameters, bool all) {
+#if 0
(void)all; // Don't want compiler warning
SetLogLevelOrd logLevel; logLevel.clear();
if (emptyString(parameters) || (strcmp(parameters, "ALL") == 0)) {
- for(Uint32 i = 0; i<EventLogger::noOfEventCategoryNames; i++)
- logLevel.setLogLevel(EventLogger::eventCategoryNames[i].category, 7);
+ for(Uint32 i = 0; i<EventLoggerBase::noOfEventCategoryNames; i++)
+ logLevel.setLogLevel(EventLoggerBase::eventCategoryNames[i].category, 7);
} else {
char * tmpString = strdup(parameters);
@@ -852,7 +854,7 @@ void CommandInterpreter::executeLogLevel(int processId,
return;
}
LogLevel::EventCategory cat;
- if(!EventLogger::matchEventCategory(categoryTxt,
+ if(!EventLoggerBase::matchEventCategory(categoryTxt,
&cat)){
ndbout << "Invalid loglevel specification, unknown category: "
<< categoryTxt << endl;
@@ -875,6 +877,7 @@ void CommandInterpreter::executeLogLevel(int processId,
if (result != 0) {
ndbout << _mgmtSrvr.getErrorText(result) << endl;
}
+#endif
}
@@ -1080,12 +1083,13 @@ void CommandInterpreter::executeTestOff(int processId,
void CommandInterpreter::executeEventReporting(int processId,
const char* parameters,
bool all) {
+#if 0
(void)all; // Don't want compiler warning
SetLogLevelOrd logLevel; logLevel.clear();
if (emptyString(parameters) || (strcmp(parameters, "ALL") == 0)) {
- for(Uint32 i = 0; i<EventLogger::noOfEventCategoryNames; i++)
- logLevel.setLogLevel(EventLogger::eventCategoryNames[i].category, 7);
+ for(Uint32 i = 0; i<EventLoggerBase::noOfEventCategoryNames; i++)
+ logLevel.setLogLevel(EventLoggerBase::eventCategoryNames[i].category, 7);
} else {
char * tmpString = strdup(parameters);
@@ -1101,7 +1105,7 @@ void CommandInterpreter::executeEventReporting(int processId,
return;
}
LogLevel::EventCategory cat;
- if(!EventLogger::matchEventCategory(categoryTxt,
+ if(!EventLoggerBase::matchEventCategory(categoryTxt,
&cat)){
ndbout << "Invalid loglevel specification, unknown category: "
<< categoryTxt << endl;
@@ -1124,6 +1128,7 @@ void CommandInterpreter::executeEventReporting(int processId,
if (result != 0) {
ndbout << _mgmtSrvr.getErrorText(result) << endl;
}
+#endif
}
void
diff --git a/ndb/src/common/mgmcommon/Config.cpp b/ndb/src/mgmsrv/Config.cpp
index c0819b9f463..f9c6a23f909 100644
--- a/ndb/src/common/mgmcommon/Config.cpp
+++ b/ndb/src/mgmsrv/Config.cpp
@@ -19,7 +19,6 @@
#include <string.h>
#include "MgmtErrorReporter.hpp"
#include <Properties.hpp>
-#include "ConfigInfo.hpp"
//*****************************************************************************
// Ctor / Dtor
@@ -86,6 +85,9 @@ Config::printAllNameValuePairs(NdbOut &out,
MGM_REQUIRE(prop->get(n, &str_value));
out << str_value;
break;
+ case ConfigInfo::SECTION:
+ out << "SECTION";
+ break;
}
out << endl;
}
diff --git a/ndb/src/common/mgmcommon/Config.hpp b/ndb/src/mgmsrv/Config.hpp
index 26fd53dbed2..b5e1e17b027 100644
--- a/ndb/src/common/mgmcommon/Config.hpp
+++ b/ndb/src/mgmsrv/Config.hpp
@@ -24,7 +24,7 @@
#include <NdbOut.hpp>
#include <ndb_limits.h>
#include <Properties.hpp>
-#include "ConfigInfo.hpp"
+#include <ConfigInfo.hpp>
class ConfigInfo;
diff --git a/ndb/src/common/mgmcommon/ConfigInfo.cpp b/ndb/src/mgmsrv/ConfigInfo.cpp
index 552b49727fb..ad346b30ead 100644
--- a/ndb/src/common/mgmcommon/ConfigInfo.cpp
+++ b/ndb/src/mgmsrv/ConfigInfo.cpp
@@ -14,23 +14,38 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#include <ndb_global.h>
+
#include <NdbTCP.h>
#include "ConfigInfo.hpp"
#include <mgmapi_config_parameters.h>
#include <ndb_limits.h>
+#include "InitConfigFileParser.hpp"
#define MAX_LINE_LENGTH 255
#define KEY_INTERNAL 0
-#define MAX_INT_RNIL (RNIL - 1)
+#define MAX_INT_RNIL 0xfffffeff
+
+#define _STR_VALUE(x) #x
+#define STR_VALUE(x) _STR_VALUE(x)
+
/****************************************************************************
* Section names
****************************************************************************/
+#define DB_TOKEN_PRINT "ndbd(DB)"
+#define MGM_TOKEN_PRINT "ndb_mgmd(MGM)"
+#define API_TOKEN_PRINT "mysqld(API)"
+
+#define DB_TOKEN "DB"
+#define MGM_TOKEN "MGM"
+#define API_TOKEN "API"
+
const ConfigInfo::AliasPair
ConfigInfo::m_sectionNameAliases[]={
- {"API", "MYSQLD"},
- {"DB", "NDBD"},
- {"MGM", "NDB_MGMD"},
+ {API_TOKEN, "MYSQLD"},
+ {DB_TOKEN, "NDBD"},
+ {MGM_TOKEN, "NDB_MGMD"},
{0, 0}
};
@@ -40,9 +55,9 @@ ConfigInfo::m_sectionNames[]={
"EXTERNAL SYSTEM",
"COMPUTER",
- "DB",
- "MGM",
- "API",
+ DB_TOKEN,
+ MGM_TOKEN,
+ API_TOKEN,
"REP",
"EXTERNAL REP",
@@ -77,6 +92,8 @@ static bool fixNodeId(InitConfigFileParser::Context & ctx, const char * data);
static bool fixExtConnection(InitConfigFileParser::Context & ctx, const char * data);
static bool fixDepricated(InitConfigFileParser::Context & ctx, const char *);
static bool saveInConfigValues(InitConfigFileParser::Context & ctx, const char *);
+static bool fixFileSystemPath(InitConfigFileParser::Context & ctx, const char * data);
+static bool fixBackupDataDir(InitConfigFileParser::Context & ctx, const char * data);
const ConfigInfo::SectionRule
ConfigInfo::m_SectionRules[] = {
@@ -84,9 +101,9 @@ ConfigInfo::m_SectionRules[] = {
{ "EXTERNAL SYSTEM", transformExternalSystem, 0 },
{ "COMPUTER", transformComputer, 0 },
- { "DB", transformNode, 0 },
- { "API", transformNode, 0 },
- { "MGM", transformNode, 0 },
+ { DB_TOKEN, transformNode, 0 },
+ { API_TOKEN, transformNode, 0 },
+ { MGM_TOKEN, transformNode, 0 },
{ "REP", transformNode, 0 },
{ "EXTERNAL REP", transformExtNode, 0 },
@@ -95,9 +112,9 @@ ConfigInfo::m_SectionRules[] = {
{ "SCI", transformConnection, 0 },
{ "OSE", transformConnection, 0 },
- { "DB", fixNodeHostname, 0 },
- { "API", fixNodeHostname, 0 },
- { "MGM", fixNodeHostname, 0 },
+ { DB_TOKEN, fixNodeHostname, 0 },
+ { API_TOKEN, fixNodeHostname, 0 },
+ { MGM_TOKEN, fixNodeHostname, 0 },
{ "REP", fixNodeHostname, 0 },
//{ "EXTERNAL REP", fixNodeHostname, 0 },
@@ -112,11 +129,14 @@ ConfigInfo::m_SectionRules[] = {
{ "TCP", fixHostname, "HostName1" },
{ "TCP", fixHostname, "HostName2" },
+ { "SCI", fixHostname, "HostName1" },
+ { "SCI", fixHostname, "HostName2" },
{ "OSE", fixHostname, "HostName1" },
{ "OSE", fixHostname, "HostName2" },
{ "TCP", fixPortNumber, 0 }, // has to come after fixHostName
{ "SHM", fixPortNumber, 0 }, // has to come after fixHostName
+ { "SCI", fixPortNumber, 0 }, // has to come after fixHostName
//{ "SHM", fixShmKey, 0 },
/**
@@ -131,7 +151,10 @@ ConfigInfo::m_SectionRules[] = {
{ "*", fixDepricated, 0 },
{ "*", applyDefaultValues, "system" },
- { "DB", checkDbConstraints, 0 },
+ { DB_TOKEN, fixFileSystemPath, 0 },
+ { DB_TOKEN, fixBackupDataDir, 0 },
+
+ { DB_TOKEN, checkDbConstraints, 0 },
/**
* checkConnectionConstraints must be after fixExtConnection
@@ -143,12 +166,14 @@ ConfigInfo::m_SectionRules[] = {
{ "TCP", checkTCPConstraints, "HostName1" },
{ "TCP", checkTCPConstraints, "HostName2" },
+ { "SCI", checkTCPConstraints, "HostName1" },
+ { "SCI", checkTCPConstraints, "HostName2" },
{ "*", checkMandatory, 0 },
- { "DB", saveInConfigValues, 0 },
- { "API", saveInConfigValues, 0 },
- { "MGM", saveInConfigValues, 0 },
+ { DB_TOKEN, saveInConfigValues, 0 },
+ { API_TOKEN, saveInConfigValues, 0 },
+ { MGM_TOKEN, saveInConfigValues, 0 },
{ "REP", saveInConfigValues, 0 },
{ "TCP", saveInConfigValues, 0 },
@@ -161,18 +186,22 @@ const int ConfigInfo::m_NoOfRules = sizeof(m_SectionRules)/sizeof(SectionRule);
/****************************************************************************
* Config Rules declarations
****************************************************************************/
+static bool sanity_checks(Vector<ConfigInfo::ConfigRuleSection>&sections,
+ struct InitConfigFileParser::Context &ctx,
+ const char * rule_data);
static bool add_node_connections(Vector<ConfigInfo::ConfigRuleSection>&sections,
struct InitConfigFileParser::Context &ctx,
const char * rule_data);
static bool add_server_ports(Vector<ConfigInfo::ConfigRuleSection>&sections,
- struct InitConfigFileParser::Context &ctx,
- const char * rule_data);
+ struct InitConfigFileParser::Context &ctx,
+ const char * rule_data);
static bool check_node_vs_replicas(Vector<ConfigInfo::ConfigRuleSection>&sections,
- struct InitConfigFileParser::Context &ctx,
- const char * rule_data);
+ struct InitConfigFileParser::Context &ctx,
+ const char * rule_data);
const ConfigInfo::ConfigRule
ConfigInfo::m_ConfigRules[] = {
+ { sanity_checks, 0 },
{ add_node_connections, 0 },
{ add_server_ports, 0 },
{ check_node_vs_replicas, 0 },
@@ -189,7 +218,7 @@ struct DepricationTransform {
static
const DepricationTransform f_deprication[] = {
- { "DB", "Discless", "Diskless", 0, 1 },
+ { DB_TOKEN, "Discless", "Diskless", 0, 1 },
{ 0, 0, 0, 0, 0}
};
@@ -216,6 +245,7 @@ const DepricationTransform f_deprication[] = {
* Parameters used under development should be marked "NOTIMPLEMENTED"
* *******************************************************************
*/
+
const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
/****************************************************************************
@@ -241,8 +271,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::STRING,
MANDATORY,
- 0,
- 0 },
+ 0, 0 },
{
KEY_INTERNAL,
@@ -253,8 +282,19 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::STRING,
MANDATORY,
+ 0, 0 },
+
+ {
+ KEY_INTERNAL,
+ "ByteOrder",
+ "COMPUTER",
+ 0,
+ ConfigInfo::DEPRICATED,
+ false,
+ ConfigInfo::STRING,
+ UNDEFINED,
0,
- MAX_INT_RNIL },
+ 0 },
/****************************************************************************
* SYSTEM
@@ -267,9 +307,8 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::SECTION,
- CFG_SECTION_SYSTEM,
- 0,
- 0 },
+ (const char *)CFG_SECTION_SYSTEM,
+ 0, 0 },
{
CFG_SYS_NAME,
@@ -280,8 +319,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::STRING,
MANDATORY,
- 0,
- 0 },
+ 0, 0 },
{
CFG_SYS_REPLICATION_ROLE,
@@ -292,20 +330,19 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::STRING,
UNDEFINED,
- 0,
- 0 },
+ 0, 0 },
{
CFG_SYS_PRIMARY_MGM_NODE,
"PrimaryMGMNode",
"SYSTEM",
- "Node id of Primary MGM node",
+ "Node id of Primary "MGM_TOKEN_PRINT" node",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 0,
- 0,
- MAX_INT_RNIL },
+ "0",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_SYS_CONFIG_GENERATION,
@@ -315,427 +352,532 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 0,
- 0,
- MAX_INT_RNIL },
+ "0",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
/***************************************************************************
* DB
***************************************************************************/
{
CFG_SECTION_NODE,
- "DB",
- "DB",
+ DB_TOKEN,
+ DB_TOKEN,
"Node section",
ConfigInfo::USED,
false,
ConfigInfo::SECTION,
- NODE_TYPE_DB,
- 0, 0
+ (const char *)NODE_TYPE_DB,
+ 0, 0
},
{
CFG_NODE_HOST,
"HostName",
- "DB",
+ DB_TOKEN,
"Name of computer for this node",
ConfigInfo::INTERNAL,
false,
ConfigInfo::STRING,
UNDEFINED,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
{
CFG_NODE_SYSTEM,
"System",
- "DB",
+ DB_TOKEN,
"Name of system for this node",
ConfigInfo::INTERNAL,
false,
ConfigInfo::STRING,
UNDEFINED,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
{
CFG_NODE_ID,
"Id",
- "DB",
- "Number identifying the database node (DB)",
+ DB_TOKEN,
+ "Number identifying the database node ("DB_TOKEN_PRINT")",
ConfigInfo::USED,
false,
ConfigInfo::INT,
MANDATORY,
- 1,
- (MAX_NODES - 1) },
+ "1",
+ STR_VALUE(MAX_NODES) },
{
KEY_INTERNAL,
"ServerPort",
- "DB",
+ DB_TOKEN,
"Port used to setup transporter",
ConfigInfo::USED,
false,
ConfigInfo::INT,
UNDEFINED,
- 1,
- 65535 },
+ "1",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_NO_REPLICAS,
"NoOfReplicas",
- "DB",
+ DB_TOKEN,
"Number of copies of all data in the database (1-4)",
ConfigInfo::USED,
false,
ConfigInfo::INT,
MANDATORY,
- 1,
- 4 },
+ "1",
+ "4" },
{
CFG_DB_NO_ATTRIBUTES,
"MaxNoOfAttributes",
- "DB",
+ DB_TOKEN,
"Total number of attributes stored in database. I.e. sum over all tables",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 1000,
- 32,
- MAX_INT_RNIL/16 },
+ "1000",
+ "32",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_NO_TABLES,
"MaxNoOfTables",
- "DB",
+ DB_TOKEN,
"Total number of tables stored in the database",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 128,
- 8,
- MAX_INT_RNIL },
+ "128",
+ "8",
+ STR_VALUE(MAX_INT_RNIL) },
{
+ CFG_DB_NO_ORDERED_INDEXES,
+ "MaxNoOfOrderedIndexes",
+ DB_TOKEN,
+ "Total number of ordered indexes that can be defined in the system",
+ ConfigInfo::USED,
+ false,
+ ConfigInfo::INT,
+ "128",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_NO_UNIQUE_HASH_INDEXES,
+ "MaxNoOfUniqueHashIndexes",
+ DB_TOKEN,
+ "Total number of unique hash indexes that can be defined in the system",
+ ConfigInfo::USED,
+ false,
+ ConfigInfo::INT,
+ "64",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
CFG_DB_NO_INDEXES,
"MaxNoOfIndexes",
- "DB",
+ DB_TOKEN,
"Total number of indexes that can be defined in the system",
- ConfigInfo::USED,
+ ConfigInfo::DEPRICATED,
false,
ConfigInfo::INT,
- 128,
- 0,
- MAX_INT_RNIL },
+ "128",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_NO_INDEX_OPS,
"MaxNoOfConcurrentIndexOperations",
- "DB",
- "Total number of index operations that can execute simultaneously on one DB node",
+ DB_TOKEN,
+ "Total number of index operations that can execute simultaneously on one "DB_TOKEN_PRINT" node",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 8192,
- 0,
- MAX_INT_RNIL
- },
+ "8K",
+ "0",
+ STR_VALUE(MAX_INT_RNIL)
+ },
{
CFG_DB_NO_TRIGGERS,
"MaxNoOfTriggers",
- "DB",
+ DB_TOKEN,
"Total number of triggers that can be defined in the system",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 768,
- 0,
- MAX_INT_RNIL },
+ "768",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_NO_TRIGGER_OPS,
"MaxNoOfFiredTriggers",
- "DB",
- "Total number of triggers that can fire simultaneously in one DB node",
+ DB_TOKEN,
+ "Total number of triggers that can fire simultaneously in one "DB_TOKEN_PRINT" node",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 4000,
- 0,
- MAX_INT_RNIL },
+ "4000",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
KEY_INTERNAL,
"ExecuteOnComputer",
- "DB",
+ DB_TOKEN,
"String referencing an earlier defined COMPUTER",
ConfigInfo::USED,
false,
ConfigInfo::STRING,
- MANDATORY,
- 0,
- MAX_INT_RNIL },
+ UNDEFINED,
+ 0, 0 },
{
CFG_DB_NO_SAVE_MSGS,
"MaxNoOfSavedMessages",
- "DB",
+ DB_TOKEN,
"Max number of error messages in error log and max number of trace files",
ConfigInfo::USED,
true,
ConfigInfo::INT,
- 25,
- 0,
- MAX_INT_RNIL },
+ "25",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_MEMLOCK,
"LockPagesInMainMemory",
- "DB",
+ DB_TOKEN,
"If set to yes, then NDB Cluster data will not be swapped out to disk",
ConfigInfo::USED,
true,
ConfigInfo::BOOL,
- false,
- 0,
- MAX_INT_RNIL },
+ "false",
+ "false",
+ "true" },
{
CFG_DB_WATCHDOG_INTERVAL,
"TimeBetweenWatchDogCheck",
- "DB",
+ DB_TOKEN,
"Time between execution checks inside a database node",
ConfigInfo::USED,
true,
ConfigInfo::INT,
- 6000,
- 70,
- MAX_INT_RNIL },
+ "6000",
+ "70",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_STOP_ON_ERROR,
"StopOnError",
- "DB",
- "If set to N, the DB automatically restarts/recovers in case of node failure",
+ DB_TOKEN,
+ "If set to N, "DB_TOKEN_PRINT" automatically restarts/recovers in case of node failure",
ConfigInfo::USED,
true,
ConfigInfo::BOOL,
- true,
- 0,
- MAX_INT_RNIL },
+ "true",
+ "false",
+ "true" },
{
CFG_DB_STOP_ON_ERROR_INSERT,
"RestartOnErrorInsert",
- "DB",
+ DB_TOKEN,
"See src/kernel/vm/Emulator.hpp NdbRestartType for details",
ConfigInfo::INTERNAL,
true,
ConfigInfo::INT,
- 2,
- 0,
- 4 },
+ "2",
+ "0",
+ "4" },
{
CFG_DB_NO_OPS,
"MaxNoOfConcurrentOperations",
- "DB",
- "Max no of op:s on DB (op:s within a transaction are concurrently executed)",
+ DB_TOKEN,
+ "Max number of operation records in transaction coordinator",
+ ConfigInfo::USED,
+ false,
+ ConfigInfo::INT,
+ "32k",
+ "32",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_NO_LOCAL_OPS,
+ "MaxNoOfLocalOperations",
+ DB_TOKEN,
+ "Max number of operation records defined in the local storage node",
+ ConfigInfo::USED,
+ false,
+ ConfigInfo::INT,
+ UNDEFINED,
+ "32",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_NO_LOCAL_SCANS,
+ "MaxNoOfLocalScans",
+ DB_TOKEN,
+ "Max number of fragment scans in parallel in the local storage node",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 32768,
- 32,
- MAX_INT_RNIL },
+ UNDEFINED,
+ "32",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_BATCH_SIZE,
+ "BatchSizePerLocalScan",
+ DB_TOKEN,
+ "Used to calculate the number of lock records for scan with hold lock",
+ ConfigInfo::USED,
+ false,
+ ConfigInfo::INT,
+ STR_VALUE(DEF_BATCH_SIZE),
+ "1",
+ STR_VALUE(MAX_PARALLEL_OP_PER_SCAN) },
{
CFG_DB_NO_TRANSACTIONS,
"MaxNoOfConcurrentTransactions",
- "DB",
- "Max number of transaction executing concurrently on the DB node",
+ DB_TOKEN,
+ "Max number of transaction executing concurrently on the "DB_TOKEN_PRINT" node",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 4096,
- 32,
- MAX_INT_RNIL },
+ "4096",
+ "32",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_NO_SCANS,
"MaxNoOfConcurrentScans",
- "DB",
- "Max number of scans executing concurrently on the DB node",
+ DB_TOKEN,
+ "Max number of scans executing concurrently on the "DB_TOKEN_PRINT" node",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 256,
- 2,
- 500 },
+ "256",
+ "2",
+ "500" },
{
CFG_DB_TRANS_BUFFER_MEM,
"TransactionBufferMemory",
- "DB",
- "Dynamic buffer space (in bytes) for key and attribute data allocated for each DB node",
+ DB_TOKEN,
+ "Dynamic buffer space (in bytes) for key and attribute data allocated for each "DB_TOKEN_PRINT" node",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- (1024 * 1024),
- 1024,
- MAX_INT_RNIL },
+ "1M",
+ "1K",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_INDEX_MEM,
"IndexMemory",
- "DB",
- "Number bytes on each DB node allocated for storing indexes",
+ DB_TOKEN,
+ "Number bytes on each "DB_TOKEN_PRINT" node allocated for storing indexes",
ConfigInfo::USED,
false,
ConfigInfo::INT64,
- 3 * 1024 * 8192,
- 128 * 8192,
- ((Uint64)MAX_INT_RNIL) * ((Uint64)8192) },
+ "18M",
+ "1M",
+ "1024G" },
{
CFG_DB_DATA_MEM,
"DataMemory",
- "DB",
- "Number bytes on each DB node allocated for storing data",
+ DB_TOKEN,
+ "Number bytes on each "DB_TOKEN_PRINT" node allocated for storing data",
ConfigInfo::USED,
false,
ConfigInfo::INT64,
- 10 * 1024 * 8192,
- 128 * 8192,
- ((Uint64)MAX_INT_RNIL) * ((Uint64)8192) },
+ "80M",
+ "1M",
+ "1024G" },
+
+ {
+ CFG_DB_UNDO_INDEX_BUFFER,
+ "UndoIndexBuffer",
+ DB_TOKEN,
+ "Number bytes on each "DB_TOKEN_PRINT" node allocated for writing UNDO logs for index part",
+ ConfigInfo::USED,
+ false,
+ ConfigInfo::INT,
+ "2M",
+ "1M",
+ STR_VALUE(MAX_INT_RNIL)},
+
+ {
+ CFG_DB_UNDO_DATA_BUFFER,
+ "UndoDataBuffer",
+ DB_TOKEN,
+ "Number bytes on each "DB_TOKEN_PRINT" node allocated for writing UNDO logs for data part",
+ ConfigInfo::USED,
+ false,
+ ConfigInfo::INT,
+ "16M",
+ "1M",
+ STR_VALUE(MAX_INT_RNIL)},
+
+ {
+ CFG_DB_REDO_BUFFER,
+ "RedoBuffer",
+ DB_TOKEN,
+ "Number bytes on each "DB_TOKEN_PRINT" node allocated for writing REDO logs",
+ ConfigInfo::USED,
+ false,
+ ConfigInfo::INT,
+ "8M",
+ "1M",
+ STR_VALUE(MAX_INT_RNIL)},
+
+ {
+ CFG_DB_LONG_SIGNAL_BUFFER,
+ "LongMessageBuffer",
+ DB_TOKEN,
+ "Number bytes on each "DB_TOKEN_PRINT" node allocated for internal long messages",
+ ConfigInfo::USED,
+ false,
+ ConfigInfo::INT,
+ "1M",
+ "512k",
+ STR_VALUE(MAX_INT_RNIL)},
{
CFG_DB_START_PARTIAL_TIMEOUT,
"StartPartialTimeout",
- "DB",
+ DB_TOKEN,
"Time to wait before trying to start wo/ all nodes. 0=Wait forever",
ConfigInfo::USED,
true,
ConfigInfo::INT,
- 30000,
- 0,
- ~0 },
+ "30000",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_START_PARTITION_TIMEOUT,
"StartPartitionedTimeout",
- "DB",
+ DB_TOKEN,
"Time to wait before trying to start partitioned. 0=Wait forever",
ConfigInfo::USED,
true,
ConfigInfo::INT,
- 60000,
- 0,
- ~0 },
+ "60000",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_START_FAILURE_TIMEOUT,
"StartFailureTimeout",
- "DB",
+ DB_TOKEN,
"Time to wait before terminating. 0=Wait forever",
ConfigInfo::USED,
true,
ConfigInfo::INT,
- 0,
- 0,
- ~0 },
+ "0",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_HEARTBEAT_INTERVAL,
"HeartbeatIntervalDbDb",
- "DB",
- "Time between DB-DB heartbeats. DB considered dead after 3 missed HBs",
+ DB_TOKEN,
+ "Time between "DB_TOKEN_PRINT"-"DB_TOKEN_PRINT" heartbeats. "DB_TOKEN_PRINT" considered dead after 3 missed HBs",
ConfigInfo::USED,
true,
ConfigInfo::INT,
- 1500,
- 10,
- MAX_INT_RNIL },
+ "1500",
+ "10",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_API_HEARTBEAT_INTERVAL,
"HeartbeatIntervalDbApi",
- "DB",
- "Time between API-DB heartbeats. API connection closed after 3 missed HBs",
+ DB_TOKEN,
+ "Time between "API_TOKEN_PRINT"-"DB_TOKEN_PRINT" heartbeats. "API_TOKEN_PRINT" connection closed after 3 missed HBs",
ConfigInfo::USED,
true,
ConfigInfo::INT,
- 1500,
- 100,
- MAX_INT_RNIL },
+ "1500",
+ "100",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_LCP_INTERVAL,
"TimeBetweenLocalCheckpoints",
- "DB",
+ DB_TOKEN,
"Time between taking snapshots of the database (expressed in 2log of bytes)",
ConfigInfo::USED,
true,
ConfigInfo::INT,
- 20,
- 0,
- 31 },
+ "20",
+ "0",
+ "31" },
{
CFG_DB_GCP_INTERVAL,
"TimeBetweenGlobalCheckpoints",
- "DB",
+ DB_TOKEN,
"Time between doing group commit of transactions to disk",
ConfigInfo::USED,
true,
ConfigInfo::INT,
- 2000,
- 10,
- 32000 },
+ "2000",
+ "10",
+ "32000" },
{
CFG_DB_NO_REDOLOG_FILES,
"NoOfFragmentLogFiles",
- "DB",
- "No of 16 Mbyte Redo log files in each of 4 file sets belonging to DB node",
+ DB_TOKEN,
+ "No of 16 Mbyte Redo log files in each of 4 file sets belonging to "DB_TOKEN_PRINT" node",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 8,
- 1,
- MAX_INT_RNIL },
+ "8",
+ "1",
+ STR_VALUE(MAX_INT_RNIL) },
{
KEY_INTERNAL,
"MaxNoOfOpenFiles",
- "DB",
- "Max number of files open per DB node.(One thread is created per file)",
+ DB_TOKEN,
+ "Max number of files open per "DB_TOKEN_PRINT" node.(One thread is created per file)",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 40,
- 20,
- 256 },
+ "40",
+ "20",
+ "256" },
{
CFG_DB_TRANSACTION_CHECK_INTERVAL,
"TimeBetweenInactiveTransactionAbortCheck",
- "DB",
+ DB_TOKEN,
"Time between inactive transaction checks",
ConfigInfo::USED,
true,
ConfigInfo::INT,
- 1000,
- 1000,
- MAX_INT_RNIL },
+ "1000",
+ "1000",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_TRANSACTION_INACTIVE_TIMEOUT,
"TransactionInactiveTimeout",
- "DB",
+ DB_TOKEN,
"Time application can wait before executing another transaction part (ms).\n"
"This is the time the transaction coordinator waits for the application\n"
"to execute or send another part (query, statement) of the transaction.\n"
@@ -744,14 +886,14 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
true,
ConfigInfo::INT,
- MAX_INT_RNIL,
- 0,
- MAX_INT_RNIL },
+ STR_VALUE(MAX_INT_RNIL),
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_TRANSACTION_DEADLOCK_TIMEOUT,
"TransactionDeadlockDetectionTimeout",
- "DB",
+ DB_TOKEN,
"Time transaction can be executing in a DB node (ms).\n"
"This is the time the transaction coordinator waits for each database node\n"
"of the transaction to execute a request. If the database node takes too\n"
@@ -759,204 +901,214 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
true,
ConfigInfo::INT,
- 1200,
- 50,
- MAX_INT_RNIL },
+ "1200",
+ "50",
+ STR_VALUE(MAX_INT_RNIL) },
{
KEY_INTERNAL,
"NoOfDiskPagesToDiskDuringRestartTUP",
- "DB",
+ DB_TOKEN,
"?",
ConfigInfo::USED,
true,
ConfigInfo::INT,
- 40,
- 1,
- MAX_INT_RNIL },
+ "40",
+ "1",
+ STR_VALUE(MAX_INT_RNIL) },
{
KEY_INTERNAL,
"NoOfDiskPagesToDiskAfterRestartTUP",
- "DB",
+ DB_TOKEN,
"?",
ConfigInfo::USED,
true,
ConfigInfo::INT,
- 40,
- 1,
- MAX_INT_RNIL },
+ "40",
+ "1",
+ STR_VALUE(MAX_INT_RNIL) },
{
KEY_INTERNAL,
"NoOfDiskPagesToDiskDuringRestartACC",
- "DB",
+ DB_TOKEN,
"?",
ConfigInfo::USED,
true,
ConfigInfo::INT,
- 20,
- 1,
- MAX_INT_RNIL },
+ "20",
+ "1",
+ STR_VALUE(MAX_INT_RNIL) },
{
KEY_INTERNAL,
"NoOfDiskPagesToDiskAfterRestartACC",
- "DB",
+ DB_TOKEN,
"?",
ConfigInfo::USED,
true,
ConfigInfo::INT,
- 20,
- 1,
- MAX_INT_RNIL },
+ "20",
+ "1",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_DISCLESS,
"Diskless",
- "DB",
+ DB_TOKEN,
"Run wo/ disk",
ConfigInfo::USED,
true,
ConfigInfo::BOOL,
- 0,
- 0,
- 1},
+ "false",
+ "false",
+ "true"},
{
KEY_INTERNAL,
"Discless",
- "DB",
+ DB_TOKEN,
"Diskless",
ConfigInfo::DEPRICATED,
true,
ConfigInfo::BOOL,
- 0,
- 0,
- 1},
+ "false",
+ "false",
+ "true"},
{
CFG_DB_ARBIT_TIMEOUT,
"ArbitrationTimeout",
- "DB",
+ DB_TOKEN,
"Max time (milliseconds) database partion waits for arbitration signal",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 3000,
- 10,
- MAX_INT_RNIL },
+ "3000",
+ "10",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_NODE_DATADIR,
+ "DataDir",
+ DB_TOKEN,
+ "Data directory for this node",
+ ConfigInfo::USED,
+ false,
+ ConfigInfo::STRING,
+ MYSQLCLUSTERDIR,
+ 0, 0 },
{
CFG_DB_FILESYSTEM_PATH,
"FileSystemPath",
- "DB",
- "Path to directory where the DB node stores its data (directory must exist)",
+ DB_TOKEN,
+ "Path to directory where the "DB_TOKEN_PRINT" node stores its data (directory must exist)",
ConfigInfo::USED,
false,
ConfigInfo::STRING,
UNDEFINED,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
{
CFG_LOGLEVEL_STARTUP,
"LogLevelStartup",
- "DB",
+ DB_TOKEN,
"Node startup info printed on stdout",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 1,
- 0,
- 15 },
+ "1",
+ "0",
+ "15" },
{
CFG_LOGLEVEL_SHUTDOWN,
"LogLevelShutdown",
- "DB",
+ DB_TOKEN,
"Node shutdown info printed on stdout",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 0,
- 0,
- 15 },
+ "0",
+ "0",
+ "15" },
{
CFG_LOGLEVEL_STATISTICS,
"LogLevelStatistic",
- "DB",
+ DB_TOKEN,
"Transaction, operation, transporter info printed on stdout",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 0,
- 0,
- 15 },
+ "0",
+ "0",
+ "15" },
{
CFG_LOGLEVEL_CHECKPOINT,
"LogLevelCheckpoint",
- "DB",
+ DB_TOKEN,
"Local and Global checkpoint info printed on stdout",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 0,
- 0,
- 15 },
+ "0",
+ "0",
+ "15" },
{
CFG_LOGLEVEL_NODERESTART,
"LogLevelNodeRestart",
- "DB",
+ DB_TOKEN,
"Node restart, node failure info printed on stdout",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 0,
- 0,
- 15 },
+ "0",
+ "0",
+ "15" },
{
CFG_LOGLEVEL_CONNECTION,
"LogLevelConnection",
- "DB",
+ DB_TOKEN,
"Node connect/disconnect info printed on stdout",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 0,
- 0,
- 15 },
+ "0",
+ "0",
+ "15" },
{
CFG_LOGLEVEL_ERROR,
"LogLevelError",
- "DB",
+ DB_TOKEN,
"Transporter, heartbeat errors printed on stdout",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 0,
- 0,
- 15 },
+ "0",
+ "0",
+ "15" },
{
CFG_LOGLEVEL_INFO,
"LogLevelInfo",
- "DB",
+ DB_TOKEN,
"Heartbeat and log info printed on stdout",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 0,
- 0,
- 15 },
+ "0",
+ "0",
+ "15" },
/**
* Backup
@@ -964,62 +1116,73 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
{
CFG_DB_PARALLEL_BACKUPS,
"ParallelBackups",
- "DB",
+ DB_TOKEN,
"Maximum number of parallel backups",
ConfigInfo::NOTIMPLEMENTED,
false,
ConfigInfo::INT,
- 1,
- 1,
- 1 },
+ "1",
+ "1",
+ "1" },
+
+ {
+ CFG_DB_BACKUP_DATADIR,
+ "BackupDataDir",
+ DB_TOKEN,
+ "Path to where to store backups",
+ ConfigInfo::USED,
+ false,
+ ConfigInfo::STRING,
+ UNDEFINED,
+ 0, 0 },
{
CFG_DB_BACKUP_MEM,
"BackupMemory",
- "DB",
+ DB_TOKEN,
"Total memory allocated for backups per node (in bytes)",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- (2 * 1024 * 1024) + (2 * 1024 * 1024), // sum of BackupDataBufferSize and BackupLogBufferSize
- 0,
- MAX_INT_RNIL },
+ "4M", // sum of BackupDataBufferSize and BackupLogBufferSize
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_BACKUP_DATA_BUFFER_MEM,
"BackupDataBufferSize",
- "DB",
+ DB_TOKEN,
"Default size of databuffer for a backup (in bytes)",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- (2 * 1024 * 1024), // remember to change BackupMemory
- 0,
- MAX_INT_RNIL },
+ "2M", // remember to change BackupMemory
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_BACKUP_LOG_BUFFER_MEM,
"BackupLogBufferSize",
- "DB",
+ DB_TOKEN,
"Default size of logbuffer for a backup (in bytes)",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- (2 * 1024 * 1024), // remember to change BackupMemory
- 0,
- MAX_INT_RNIL },
+ "2M", // remember to change BackupMemory
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_DB_BACKUP_WRITE_SIZE,
"BackupWriteSize",
- "DB",
+ DB_TOKEN,
"Default size of filesystem writes made by backup (in bytes)",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 32768,
- 0,
- MAX_INT_RNIL },
+ "32K",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
/***************************************************************************
* REP
@@ -1032,8 +1195,8 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::SECTION,
- NODE_TYPE_REP,
- 0, 0
+ (const char *)NODE_TYPE_REP,
+ 0, 0
},
{
@@ -1045,8 +1208,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::STRING,
UNDEFINED,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
{
CFG_NODE_SYSTEM,
@@ -1057,8 +1219,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::STRING,
UNDEFINED,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
{
CFG_NODE_ID,
@@ -1069,8 +1230,8 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::INT,
MANDATORY,
- 1,
- (MAX_NODES - 1) },
+ "1",
+ STR_VALUE(MAX_NODES) },
{
KEY_INTERNAL,
@@ -1081,8 +1242,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::STRING,
MANDATORY,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
{
CFG_REP_HEARTBEAT_INTERVAL,
@@ -1092,231 +1252,271 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
true,
ConfigInfo::INT,
- 3000,
- 100,
- MAX_INT_RNIL },
+ "3000",
+ "100",
+ STR_VALUE(MAX_INT_RNIL) },
/***************************************************************************
* API
***************************************************************************/
{
CFG_SECTION_NODE,
- "API",
- "API",
+ API_TOKEN,
+ API_TOKEN,
"Node section",
ConfigInfo::USED,
false,
ConfigInfo::SECTION,
- NODE_TYPE_API,
- 0, 0
+ (const char *)NODE_TYPE_API,
+ 0, 0
},
{
CFG_NODE_HOST,
"HostName",
- "API",
+ API_TOKEN,
"Name of computer for this node",
ConfigInfo::INTERNAL,
false,
ConfigInfo::STRING,
UNDEFINED,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
{
CFG_NODE_SYSTEM,
"System",
- "API",
+ API_TOKEN,
"Name of system for this node",
ConfigInfo::INTERNAL,
false,
ConfigInfo::STRING,
UNDEFINED,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
{
CFG_NODE_ID,
"Id",
- "API",
- "Number identifying application node (API)",
+ API_TOKEN,
+ "Number identifying application node ("API_TOKEN_PRINT")",
ConfigInfo::USED,
false,
ConfigInfo::INT,
MANDATORY,
- 1,
- (MAX_NODES - 1) },
+ "1",
+ STR_VALUE(MAX_NODES) },
{
KEY_INTERNAL,
"ExecuteOnComputer",
- "API",
+ API_TOKEN,
"String referencing an earlier defined COMPUTER",
ConfigInfo::USED,
false,
ConfigInfo::STRING,
- 0,
- 0,
- MAX_INT_RNIL },
+ UNDEFINED,
+ 0, 0 },
{
CFG_NODE_ARBIT_RANK,
"ArbitrationRank",
- "API",
- "If 0, then API is not arbitrator. Kernel selects arbitrators in order 1, 2",
+ API_TOKEN,
+ "If 0, then "API_TOKEN_PRINT" is not arbitrator. Kernel selects arbitrators in order 1, 2",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 0,
- 0,
- 2 },
+ "0",
+ "0",
+ "2" },
{
CFG_NODE_ARBIT_DELAY,
"ArbitrationDelay",
- "API",
+ API_TOKEN,
"When asked to arbitrate, arbitrator waits this long before voting (msec)",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 0,
- 0,
- MAX_INT_RNIL },
+ "0",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_MAX_SCAN_BATCH_SIZE,
+ "MaxScanBatchSize",
+ "API",
+ "The maximum collective batch size for one scan",
+ ConfigInfo::USED,
+ false,
+ ConfigInfo::INT,
+ STR_VALUE(MAX_SCAN_BATCH_SIZE),
+ "32k",
+ "16M" },
+
+ {
+ CFG_BATCH_BYTE_SIZE,
+ "BatchByteSize",
+ "API",
+ "The default batch size in bytes",
+ ConfigInfo::USED,
+ false,
+ ConfigInfo::INT,
+ STR_VALUE(SCAN_BATCH_SIZE),
+ "1k",
+ "1M" },
+
+ {
+ CFG_BATCH_SIZE,
+ "BatchSize",
+ "API",
+ "The default batch size in number of records",
+ ConfigInfo::USED,
+ false,
+ ConfigInfo::INT,
+ STR_VALUE(DEF_BATCH_SIZE),
+ "1",
+ STR_VALUE(MAX_PARALLEL_OP_PER_SCAN) },
/****************************************************************************
* MGM
***************************************************************************/
{
CFG_SECTION_NODE,
- "MGM",
- "MGM",
+ MGM_TOKEN,
+ MGM_TOKEN,
"Node section",
ConfigInfo::USED,
false,
ConfigInfo::SECTION,
- NODE_TYPE_MGM,
- 0, 0
+ (const char *)NODE_TYPE_MGM,
+ 0, 0
},
{
CFG_NODE_HOST,
"HostName",
- "MGM",
+ MGM_TOKEN,
"Name of computer for this node",
ConfigInfo::INTERNAL,
false,
ConfigInfo::STRING,
UNDEFINED,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
+
+ {
+ CFG_NODE_DATADIR,
+ "DataDir",
+ MGM_TOKEN,
+ "Data directory for this node",
+ ConfigInfo::USED,
+ false,
+ ConfigInfo::STRING,
+ MYSQLCLUSTERDIR,
+ 0, 0 },
{
CFG_NODE_SYSTEM,
"System",
- "MGM",
+ MGM_TOKEN,
"Name of system for this node",
ConfigInfo::INTERNAL,
false,
ConfigInfo::STRING,
UNDEFINED,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
{
CFG_NODE_ID,
"Id",
- "MGM",
- "Number identifying the management server node (MGM)",
+ MGM_TOKEN,
+ "Number identifying the management server node ("MGM_TOKEN_PRINT")",
ConfigInfo::USED,
false,
ConfigInfo::INT,
MANDATORY,
- 1,
- (MAX_NODES - 1) },
+ "1",
+ STR_VALUE(MAX_NODES) },
{
CFG_LOG_DESTINATION,
"LogDestination",
- "MGM",
+ MGM_TOKEN,
"String describing where logmessages are sent",
ConfigInfo::USED,
false,
ConfigInfo::STRING,
0,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
{
KEY_INTERNAL,
"ExecuteOnComputer",
- "MGM",
+ MGM_TOKEN,
"String referencing an earlier defined COMPUTER",
ConfigInfo::USED,
false,
ConfigInfo::STRING,
0,
- 0,
- MAX_INT_RNIL },
-
+ 0, 0 },
+
{
KEY_INTERNAL,
"MaxNoOfSavedEvents",
- "MGM",
+ MGM_TOKEN,
"",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 100,
- 0,
- MAX_INT_RNIL },
+ "100",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_MGM_PORT,
"PortNumber",
- "MGM",
+ MGM_TOKEN,
"Port number to give commands to/fetch configurations from management server",
ConfigInfo::USED,
false,
ConfigInfo::INT,
NDB_BASE_PORT,
- 0,
- MAX_INT_RNIL },
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
KEY_INTERNAL,
"PortNumberStats",
- "MGM",
+ MGM_TOKEN,
"Port number used to get statistical information from a management server",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 2199,
- 0,
- MAX_INT_RNIL },
+ "2199",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_NODE_ARBIT_RANK,
"ArbitrationRank",
- "MGM",
- "If 0, then MGM is not arbitrator. Kernel selects arbitrators in order 1, 2",
+ MGM_TOKEN,
+ "If 0, then "MGM_TOKEN_PRINT" is not arbitrator. Kernel selects arbitrators in order 1, 2",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 1,
- 0,
- 2 },
+ "1",
+ "0",
+ "2" },
{
CFG_NODE_ARBIT_DELAY,
"ArbitrationDelay",
- "MGM",
+ MGM_TOKEN,
"",
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 0,
- 0,
- MAX_INT_RNIL },
+ "0",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
/****************************************************************************
* TCP
@@ -1329,12 +1529,12 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::SECTION,
- CONNECTION_TYPE_TCP,
- 0, 0
+ (const char *)CONNECTION_TYPE_TCP,
+ 0, 0
},
{
- CFG_TCP_HOSTNAME_1,
+ CFG_CONNECTION_HOSTNAME_1,
"HostName1",
"TCP",
"Name/IP of computer on one side of the connection",
@@ -1342,11 +1542,10 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::STRING,
UNDEFINED,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
{
- CFG_TCP_HOSTNAME_2,
+ CFG_CONNECTION_HOSTNAME_2,
"HostName2",
"TCP",
"Name/IP of computer on one side of the connection",
@@ -1354,32 +1553,29 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::STRING,
UNDEFINED,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
{
CFG_CONNECTION_NODE_1,
"NodeId1",
"TCP",
- "Id of node (DB, API or MGM) on one side of the connection",
+ "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
ConfigInfo::USED,
false,
ConfigInfo::STRING,
MANDATORY,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
{
CFG_CONNECTION_NODE_2,
"NodeId2",
"TCP",
- "Id of node (DB, API or MGM) on one side of the connection",
+ "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
ConfigInfo::USED,
false,
ConfigInfo::STRING,
MANDATORY,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
{
CFG_CONNECTION_SEND_SIGNAL_ID,
@@ -1389,9 +1585,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::BOOL,
- true,
- 0,
- MAX_INT_RNIL },
+ "true",
+ "false",
+ "true" },
{
@@ -1402,9 +1598,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::BOOL,
- false,
- 0,
- MAX_INT_RNIL },
+ "false",
+ "false",
+ "true" },
{
CFG_CONNECTION_SERVER_PORT,
@@ -1415,8 +1611,8 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::INT,
MANDATORY,
- 0,
- MAX_INT_RNIL },
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_TCP_SEND_BUFFER_SIZE,
@@ -1426,9 +1622,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 16 * 16384,
- 1 * 16384,
- MAX_INT_RNIL },
+ "256K",
+ "16K",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_TCP_RECEIVE_BUFFER_SIZE,
@@ -1438,9 +1634,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 4 * 16384,
- 1 * 16384,
- MAX_INT_RNIL },
+ "64K",
+ "16K",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_TCP_PROXY,
@@ -1451,8 +1647,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::STRING,
UNDEFINED,
- 0,
- 0 },
+ 0, 0 },
{
CFG_CONNECTION_NODE_1_SYSTEM,
@@ -1463,8 +1658,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::STRING,
UNDEFINED,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
{
CFG_CONNECTION_NODE_2_SYSTEM,
@@ -1475,8 +1669,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::STRING,
UNDEFINED,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
/****************************************************************************
@@ -1490,21 +1683,19 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::SECTION,
- CONNECTION_TYPE_SHM,
- 0, 0
- },
+ (const char *)CONNECTION_TYPE_SHM,
+ 0, 0 },
{
CFG_CONNECTION_NODE_1,
"NodeId1",
"SHM",
- "Id of node (DB, API or MGM) on one side of the connection",
+ "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
ConfigInfo::USED,
false,
ConfigInfo::STRING,
MANDATORY,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
{
CFG_CONNECTION_SERVER_PORT,
@@ -1515,20 +1706,19 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::INT,
MANDATORY,
- 0,
- MAX_INT_RNIL },
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_CONNECTION_NODE_2,
"NodeId2",
"SHM",
- "Id of node (DB, API or MGM) on one side of the connection",
+ "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
ConfigInfo::USED,
false,
ConfigInfo::STRING,
MANDATORY,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
{
CFG_CONNECTION_SEND_SIGNAL_ID,
@@ -1538,9 +1728,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::BOOL,
- false,
- 0,
- MAX_INT_RNIL },
+ "false",
+ "false",
+ "true" },
{
@@ -1551,9 +1741,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::BOOL,
- true,
- 0,
- MAX_INT_RNIL },
+ "true",
+ "false",
+ "true" },
{
CFG_SHM_KEY,
@@ -1564,8 +1754,8 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::INT,
MANDATORY,
- 0,
- MAX_INT_RNIL },
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_SHM_BUFFER_MEM,
@@ -1575,10 +1765,10 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 1048576,
- 4096,
- MAX_INT_RNIL },
-
+ "1M",
+ "4K",
+ STR_VALUE(MAX_INT_RNIL) },
+
{
CFG_CONNECTION_NODE_1_SYSTEM,
"NodeId1_System",
@@ -1588,8 +1778,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::STRING,
UNDEFINED,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
{
CFG_CONNECTION_NODE_2_SYSTEM,
@@ -1600,8 +1789,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::STRING,
UNDEFINED,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
/****************************************************************************
* SCI
@@ -1614,7 +1802,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::SECTION,
- CONNECTION_TYPE_SCI,
+ (const char *)CONNECTION_TYPE_SCI,
0, 0
},
@@ -1622,49 +1810,107 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
CFG_CONNECTION_NODE_1,
"NodeId1",
"SCI",
- "Id of node (DB, API or MGM) on one side of the connection",
+ "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
ConfigInfo::USED,
false,
- ConfigInfo::INT,
+ ConfigInfo::STRING,
MANDATORY,
- 0,
- MAX_INT_RNIL },
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_CONNECTION_NODE_2,
"NodeId2",
"SCI",
- "Id of node (DB, API or MGM) on one side of the connection",
+ "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
+ ConfigInfo::USED,
+ false,
+ ConfigInfo::STRING,
+ MANDATORY,
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_CONNECTION_HOSTNAME_1,
+ "HostName1",
+ "SCI",
+ "Name/IP of computer on one side of the connection",
+ ConfigInfo::INTERNAL,
+ false,
+ ConfigInfo::STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ {
+ CFG_CONNECTION_HOSTNAME_2,
+ "HostName2",
+ "SCI",
+ "Name/IP of computer on one side of the connection",
+ ConfigInfo::INTERNAL,
+ false,
+ ConfigInfo::STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ {
+ CFG_CONNECTION_SERVER_PORT,
+ "PortNumber",
+ "SCI",
+ "Port used for this transporter",
ConfigInfo::USED,
false,
ConfigInfo::INT,
MANDATORY,
- 0,
- MAX_INT_RNIL },
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
- CFG_SCI_ID_0,
- "SciId0",
+ CFG_SCI_HOST1_ID_0,
+ "Host1SciId0",
"SCI",
- "Local SCI-node id for adapter 0 (a computer can have two adapters)",
+ "SCI-node id for adapter 0 on Host1 (a computer can have two adapters)",
ConfigInfo::USED,
false,
ConfigInfo::INT,
MANDATORY,
- 0,
- MAX_INT_RNIL },
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
- CFG_SCI_ID_1,
- "SciId1",
+ CFG_SCI_HOST1_ID_1,
+ "Host1SciId1",
"SCI",
- "Local SCI-node id for adapter 1 (a computer can have two adapters)",
+ "SCI-node id for adapter 1 on Host1 (a computer can have two adapters)",
+ ConfigInfo::USED,
+ false,
+ ConfigInfo::INT,
+ "0",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_SCI_HOST2_ID_0,
+ "Host2SciId0",
+ "SCI",
+ "SCI-node id for adapter 0 on Host2 (a computer can have two adapters)",
ConfigInfo::USED,
false,
ConfigInfo::INT,
MANDATORY,
- 0,
- MAX_INT_RNIL },
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_SCI_HOST2_ID_1,
+ "Host2SciId1",
+ "SCI",
+ "SCI-node id for adapter 1 on Host2 (a computer can have two adapters)",
+ ConfigInfo::USED,
+ false,
+ ConfigInfo::INT,
+ "0",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_CONNECTION_SEND_SIGNAL_ID,
@@ -1674,9 +1920,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::BOOL,
- true,
- 0,
- MAX_INT_RNIL },
+ "true",
+ "false",
+ "true" },
{
CFG_CONNECTION_CHECKSUM,
@@ -1686,9 +1932,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::BOOL,
- false,
- 0,
- MAX_INT_RNIL },
+ "false",
+ "false",
+ "true" },
{
CFG_SCI_SEND_LIMIT,
@@ -1698,9 +1944,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 2048,
- 512,
- MAX_INT_RNIL },
+ "8K",
+ "128",
+ "32K" },
{
CFG_SCI_BUFFER_MEM,
@@ -1710,9 +1956,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 1048576,
- 262144,
- MAX_INT_RNIL },
+ "1M",
+ "64K",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_CONNECTION_NODE_1_SYSTEM,
@@ -1723,8 +1969,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::STRING,
UNDEFINED,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
{
CFG_CONNECTION_NODE_2_SYSTEM,
@@ -1735,8 +1980,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::STRING,
UNDEFINED,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
/****************************************************************************
* OSE
@@ -1749,12 +1993,12 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::SECTION,
- CONNECTION_TYPE_OSE,
+ (const char *)CONNECTION_TYPE_OSE,
0, 0
},
{
- CFG_OSE_HOSTNAME_1,
+ CFG_CONNECTION_HOSTNAME_1,
"HostName1",
"OSE",
"Name of computer on one side of the connection",
@@ -1762,11 +2006,10 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::STRING,
UNDEFINED,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
{
- CFG_OSE_HOSTNAME_2,
+ CFG_CONNECTION_HOSTNAME_2,
"HostName2",
"OSE",
"Name of computer on one side of the connection",
@@ -1774,32 +2017,31 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::STRING,
UNDEFINED,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
{
CFG_CONNECTION_NODE_1,
"NodeId1",
"OSE",
- "Id of node (DB, API or MGM) on one side of the connection",
+ "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
ConfigInfo::USED,
false,
ConfigInfo::INT,
MANDATORY,
- 0,
- MAX_INT_RNIL },
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_CONNECTION_NODE_2,
"NodeId2",
"OSE",
- "Id of node (DB, API or MGM) on one side of the connection",
+ "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
ConfigInfo::USED,
false,
ConfigInfo::INT,
UNDEFINED,
- 0,
- MAX_INT_RNIL },
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_CONNECTION_SEND_SIGNAL_ID,
@@ -1809,9 +2051,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::BOOL,
- true,
- 0,
- MAX_INT_RNIL },
+ "true",
+ "false",
+ "true" },
{
CFG_CONNECTION_CHECKSUM,
@@ -1821,9 +2063,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::BOOL,
- false,
- 0,
- MAX_INT_RNIL },
+ "false",
+ "false",
+ "true" },
{
CFG_OSE_PRIO_A_SIZE,
@@ -1833,9 +2075,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 1000,
- 0,
- MAX_INT_RNIL },
+ "1000",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_OSE_PRIO_B_SIZE,
@@ -1845,9 +2087,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 1000,
- 0,
- MAX_INT_RNIL },
+ "1000",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_OSE_RECEIVE_ARRAY_SIZE,
@@ -1857,9 +2099,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::INT,
- 10,
- 0,
- MAX_INT_RNIL },
+ "10",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
{
CFG_CONNECTION_NODE_1_SYSTEM,
@@ -1870,8 +2112,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::STRING,
UNDEFINED,
- 0,
- MAX_INT_RNIL},
+ 0, 0 },
{
CFG_CONNECTION_NODE_2_SYSTEM,
@@ -1882,8 +2123,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::STRING,
UNDEFINED,
- 0,
- MAX_INT_RNIL },
+ 0, 0 },
};
const int ConfigInfo::m_NoOfParams = sizeof(m_ParamInfo) / sizeof(ParamInfo);
@@ -1903,6 +2143,8 @@ ConfigInfo::ConfigInfo()
for (i=0; i<m_NoOfParams; i++) {
const ParamInfo & param = m_ParamInfo[i];
+ Uint64 default_uint64;
+ bool default_bool;
// Create new section if it did not exist
if (!m_info.getCopy(param._section, &section)) {
@@ -1921,9 +2163,37 @@ ConfigInfo::ConfigInfo()
pinfo.put("Updateable", param._updateable);
pinfo.put("Type", param._type);
pinfo.put("Status", param._status);
- pinfo.put64("Default", param._default);
- pinfo.put64("Min", param._min);
- pinfo.put64("Max", param._max);
+
+ if(param._default == MANDATORY){
+ pinfo.put("Mandatory", (Uint32)1);
+ }
+
+ switch (param._type) {
+ case BOOL:
+ {
+ bool tmp_bool;
+ require(InitConfigFileParser::convertStringToBool(param._min, tmp_bool));
+ pinfo.put64("Min", tmp_bool);
+ require(InitConfigFileParser::convertStringToBool(param._max, tmp_bool));
+ pinfo.put64("Max", tmp_bool);
+ break;
+ }
+ case INT:
+ case INT64:
+ {
+ Uint64 tmp_uint64;
+ require(InitConfigFileParser::convertStringToUint64(param._min, tmp_uint64));
+ pinfo.put64("Min", tmp_uint64);
+ require(InitConfigFileParser::convertStringToUint64(param._max, tmp_uint64));
+ pinfo.put64("Max", tmp_uint64);
+ break;
+ }
+ case SECTION:
+ pinfo.put("SectionType", (Uint32)UintPtr(param._default));
+ break;
+ case STRING:
+ break;
+ }
// Check that pinfo is really new
if (section->get(param._fname, &oldpinfo)) {
@@ -1944,10 +2214,31 @@ ConfigInfo::ConfigInfo()
if(!m_systemDefaults.getCopy(param._section, &p)){
p = new Properties(true);
}
- if(param._type != STRING &&
- param._default != UNDEFINED &&
+ if(param._default != UNDEFINED &&
param._default != MANDATORY){
- require(p->put(param._fname, param._default));
+ switch (param._type)
+ {
+ case SECTION:
+ break;
+ case STRING:
+ require(p->put(param._fname, param._default));
+ break;
+ case BOOL:
+ {
+ bool tmp_bool;
+ require(InitConfigFileParser::convertStringToBool(param._default, default_bool));
+ require(p->put(param._fname, default_bool));
+ break;
+ }
+ case INT:
+ case INT64:
+ {
+ Uint64 tmp_uint64;
+ require(InitConfigFileParser::convertStringToUint64(param._default, default_uint64));
+ require(p->put(param._fname, default_uint64));
+ break;
+ }
+ }
}
require(m_systemDefaults.put(param._section, p, true));
delete p;
@@ -1989,7 +2280,8 @@ const Properties *
ConfigInfo::getInfo(const char * section) const {
const Properties * p;
if(!m_info.get(section, &p)){
- warning("getInfo", section);
+ return 0;
+ // warning("getInfo", section);
}
return p;
}
@@ -1998,7 +2290,8 @@ const Properties *
ConfigInfo::getDefaults(const char * section) const {
const Properties * p;
if(!m_systemDefaults.get(section, &p)){
- warning("getDefaults", section);
+ return 0;
+ //warning("getDefaults", section);
}
return p;
}
@@ -2064,7 +2357,7 @@ ConfigInfo::getDescription(const Properties * section,
bool
ConfigInfo::isSection(const char * section) const {
for (int i = 0; i<m_noOfSectionNames; i++) {
- if(!strcmp(section, m_sectionNames[i])) return true;
+ if(!strcasecmp(section, m_sectionNames[i])) return true;
}
return false;
}
@@ -2072,7 +2365,7 @@ ConfigInfo::isSection(const char * section) const {
const char*
ConfigInfo::getAlias(const char * section) const {
for (int i = 0; m_sectionNameAliases[i].name != 0; i++)
- if(!strcmp(section, m_sectionNameAliases[i].alias))
+ if(!strcasecmp(section, m_sectionNameAliases[i].alias))
return m_sectionNameAliases[i].name;
return 0;
}
@@ -2080,7 +2373,7 @@ ConfigInfo::getAlias(const char * section) const {
bool
ConfigInfo::verify(const Properties * section, const char* fname,
Uint64 value) const {
- Uint64 min, max; min = max + 1;
+ Uint64 min, max;
min = getInfoInt(section, fname, "Min");
max = getInfoInt(section, fname, "Max");
@@ -2139,7 +2432,7 @@ void ConfigInfo::print(const Properties * section,
ndbout << "Default: N (Legal values: Y, N)" << endl;
} else if (getDefault(section, parameter) == true) {
ndbout << "Default: Y (Legal values: Y, N)" << endl;
- } else if (getDefault(section, parameter) == MANDATORY) {
+ } else if (getDefault(section, parameter) == (UintPtr)MANDATORY) {
ndbout << "MANDATORY (Legal values: Y, N)" << endl;
} else {
ndbout << "UNKNOWN" << endl;
@@ -2151,9 +2444,9 @@ void ConfigInfo::print(const Properties * section,
case ConfigInfo::INT64:
ndbout << " (Non-negative Integer)" << endl;
ndbout << getDescription(section, parameter) << endl;
- if (getDefault(section, parameter) == MANDATORY) {
+ if (getDefault(section, parameter) == (UintPtr)MANDATORY) {
ndbout << "MANDATORY (";
- } else if (getDefault(section, parameter) == UNDEFINED) {
+ } else if (getDefault(section, parameter) == (UintPtr)UNDEFINED) {
ndbout << "UNDEFINED (";
} else {
ndbout << "Default: " << getDefault(section, parameter) << " (";
@@ -2166,7 +2459,7 @@ void ConfigInfo::print(const Properties * section,
case ConfigInfo::STRING:
ndbout << " (String)" << endl;
ndbout << getDescription(section, parameter) << endl;
- if (getDefault(section, parameter) == MANDATORY) {
+ if (getDefault(section, parameter) == (UintPtr)MANDATORY) {
ndbout << "MANDATORY" << endl;
} else {
ndbout << "No default value" << endl;
@@ -2211,7 +2504,7 @@ transformNode(InitConfigFileParser::Context & ctx, const char * data){
}
ctx.m_userProperties.put("AllocatedNodeId_", id, id);
- snprintf(ctx.pname, sizeof(ctx.pname), "Node_%d", id);
+ BaseString::snprintf(ctx.pname, sizeof(ctx.pname), "Node_%d", id);
ctx.m_currentSection->put("Type", ctx.fname);
@@ -2229,26 +2522,54 @@ transformNode(InitConfigFileParser::Context & ctx, const char * data){
return true;
}
+static bool checkLocalhostHostnameMix(InitConfigFileParser::Context & ctx)
+{
+ DBUG_ENTER("checkLocalhostHostnameMix");
+ const char * hostname= 0;
+ ctx.m_currentSection->get("HostName", &hostname);
+ if (hostname == 0 || hostname[0] == 0)
+ DBUG_RETURN(true);
+
+ Uint32 localhost_used= 0;
+ if(!strcmp(hostname, "localhost") || !strcmp(hostname, "127.0.0.1")){
+ localhost_used= 1;
+ ctx.m_userProperties.put("$computer-localhost-used", localhost_used);
+ if(!ctx.m_userProperties.get("$computer-localhost", &hostname))
+ DBUG_RETURN(true);
+ } else {
+ ctx.m_userProperties.get("$computer-localhost-used", &localhost_used);
+ ctx.m_userProperties.put("$computer-localhost", hostname);
+ }
+
+ if (localhost_used) {
+ ctx.reportError("Mixing of localhost with other hostname(%s) is illegal",
+ hostname);
+ DBUG_RETURN(false);
+ }
+
+ DBUG_RETURN(true);
+}
+
bool
fixNodeHostname(InitConfigFileParser::Context & ctx, const char * data){
+ const char * hostname;
+ if (ctx.m_currentSection->get("HostName", &hostname))
+ return checkLocalhostHostnameMix(ctx);
+
const char * compId;
if(!ctx.m_currentSection->get("ExecuteOnComputer", &compId)){
- require(ctx.m_currentSection->put("HostName", ""));
-
const char * type;
- if(ctx.m_currentSection->get("Type", &type) && strcmp(type,"DB") == 0) {
- ctx.reportError("Parameter \"ExecuteOnComputer\" missing from DB section"
- " [%s] starting at line: %d",
- ctx.fname, ctx.m_sectionLineno);
- return false;
- }
- return true;
+ if(ctx.m_currentSection->get("Type", &type) && strcmp(type,DB_TOKEN) == 0)
+ require(ctx.m_currentSection->put("HostName", "localhost"));
+ else
+ require(ctx.m_currentSection->put("HostName", ""));
+ return checkLocalhostHostnameMix(ctx);
}
const Properties * computer;
char tmp[255];
- snprintf(tmp, sizeof(tmp), "Computer_%s", compId);
+ BaseString::snprintf(tmp, sizeof(tmp), "Computer_%s", compId);
if(!ctx.m_config->get(tmp, &computer)){
ctx.reportError("Computer \"%s\" not declared"
"- [%s] starting at line: %d",
@@ -2256,7 +2577,6 @@ fixNodeHostname(InitConfigFileParser::Context & ctx, const char * data){
return false;
}
- const char * hostname;
if(!computer->get("HostName", &hostname)){
ctx.reportError("HostName missing in [COMPUTER] (Id: %d) "
" - [%s] starting at line: %d",
@@ -2265,7 +2585,40 @@ fixNodeHostname(InitConfigFileParser::Context & ctx, const char * data){
}
require(ctx.m_currentSection->put("HostName", hostname));
- return true;
+ return checkLocalhostHostnameMix(ctx);
+}
+
+bool
+fixFileSystemPath(InitConfigFileParser::Context & ctx, const char * data){
+ DBUG_ENTER("fixFileSystemPath");
+
+ const char * path;
+ if (ctx.m_currentSection->get("FileSystemPath", &path))
+ DBUG_RETURN(true);
+
+ if (ctx.m_currentSection->get("DataDir", &path)) {
+ require(ctx.m_currentSection->put("FileSystemPath", path));
+ DBUG_RETURN(true);
+ }
+
+ require(false);
+ DBUG_RETURN(false);
+}
+
+bool
+fixBackupDataDir(InitConfigFileParser::Context & ctx, const char * data){
+
+ const char * path;
+ if (ctx.m_currentSection->get("BackupDataDir", &path))
+ return true;
+
+ if (ctx.m_currentSection->get("FileSystemPath", &path)) {
+ require(ctx.m_currentSection->put("BackupDataDir", path));
+ return true;
+ }
+
+ require(false);
+ return false;
}
bool
@@ -2294,7 +2647,7 @@ transformExtNode(InitConfigFileParser::Context & ctx, const char * data){
ctx.m_userProperties.get("ExtNoOfNodes", &nodes);
require(ctx.m_userProperties.put("ExtNoOfNodes",++nodes, true));
- snprintf(ctx.pname, sizeof(ctx.pname), "EXTERNAL SYSTEM_%s:Node_%d",
+ BaseString::snprintf(ctx.pname, sizeof(ctx.pname), "EXTERNAL SYSTEM_%s:Node_%d",
systemName, id);
return true;
@@ -2308,7 +2661,7 @@ transformConnection(InitConfigFileParser::Context & ctx, const char * data){
Uint32 connections = 0;
ctx.m_userProperties.get("NoOfConnections", &connections);
- snprintf(ctx.pname, sizeof(ctx.pname), "Connection_%d", connections);
+ BaseString::snprintf(ctx.pname, sizeof(ctx.pname), "Connection_%d", connections);
ctx.m_userProperties.put("NoOfConnections", ++connections, true);
ctx.m_currentSection->put("Type", ctx.fname);
@@ -2331,7 +2684,7 @@ transformSystem(InitConfigFileParser::Context & ctx, const char * data){
ndbout << "transformSystem " << name << endl;
- snprintf(ctx.pname, sizeof(ctx.pname), "SYSTEM_%s", name);
+ BaseString::snprintf(ctx.pname, sizeof(ctx.pname), "SYSTEM_%s", name);
return true;
}
@@ -2348,7 +2701,7 @@ transformExternalSystem(InitConfigFileParser::Context & ctx, const char * data){
ctx.fname, ctx.m_sectionLineno);
return false;
}
- snprintf(ctx.pname, sizeof(ctx.pname), "EXTERNAL SYSTEM_%s", name);
+ BaseString::snprintf(ctx.pname, sizeof(ctx.pname), "EXTERNAL SYSTEM_%s", name);
return true;
}
@@ -2365,7 +2718,7 @@ transformComputer(InitConfigFileParser::Context & ctx, const char * data){
ctx.fname, ctx.m_sectionLineno);
return false;
}
- snprintf(ctx.pname, sizeof(ctx.pname), "Computer_%s", id);
+ BaseString::snprintf(ctx.pname, sizeof(ctx.pname), "Computer_%s", id);
Uint32 computers = 0;
ctx.m_userProperties.get("NoOfComputers", &computers);
@@ -2377,17 +2730,7 @@ transformComputer(InitConfigFileParser::Context & ctx, const char * data){
return true;
}
- if(!strcmp(hostname, "localhost") || !strcmp(hostname, "127.0.0.1")){
- if(ctx.m_userProperties.get("$computer-localhost", &hostname)){
- ctx.reportError("Mixing of localhost with other hostname(%s) is illegal",
- hostname);
- return false;
- }
- } else {
- ctx.m_userProperties.put("$computer-localhost", hostname);
- }
-
- return true;
+ return checkLocalhostHostnameMix(ctx);
}
/**
@@ -2454,7 +2797,7 @@ checkMandatory(InitConfigFileParser::Context & ctx, const char * data){
const Properties * info = NULL;
::require(ctx.m_currentInfo->get(name, &info));
Uint32 val;
- if(info->get("Default", &val) && val == MANDATORY){
+ if(info->get("Mandatory", &val)){
const char * fname;
::require(info->get("Fname", &fname));
if(!ctx.m_currentSection->contains(fname)){
@@ -2551,7 +2894,7 @@ fixExtConnection(InitConfigFileParser::Context & ctx, const char * data){
require(ctx.m_userProperties.put("ExtNoOfConnections",++connections, true));
char tmpLine1[MAX_LINE_LENGTH];
- snprintf(tmpLine1, MAX_LINE_LENGTH, "Connection_%d", connections-1);
+ BaseString::snprintf(tmpLine1, MAX_LINE_LENGTH, "Connection_%d", connections-1);
/**
* Section: EXTERNAL SYSTEM_<Ext System Name>
@@ -2619,23 +2962,41 @@ fixHostname(InitConfigFileParser::Context & ctx, const char * data){
static bool
fixPortNumber(InitConfigFileParser::Context & ctx, const char * data){
+ DBUG_ENTER("fixPortNumber");
+
Uint32 id1= 0, id2= 0;
+ const char *hostName1;
+ const char *hostName2;
require(ctx.m_currentSection->get("NodeId1", &id1));
require(ctx.m_currentSection->get("NodeId2", &id2));
- id1 = id1 < id2 ? id1 : id2;
+ require(ctx.m_currentSection->get("HostName1", &hostName1));
+ require(ctx.m_currentSection->get("HostName2", &hostName2));
+ DBUG_PRINT("info",("NodeId1=%d HostName1=\"%s\"",id1,hostName1));
+ DBUG_PRINT("info",("NodeId2=%d HostName2=\"%s\"",id2,hostName2));
+
+ if (id1 > id2) {
+ Uint32 tmp= id1;
+ const char *tmp_name= hostName1;
+ hostName1= hostName2;
+ id1= id2;
+ hostName2= tmp_name;
+ id2= tmp;
+ }
const Properties * node;
require(ctx.m_config->get("Node", id1, &node));
- BaseString hostname;
- require(node->get("HostName", hostname));
+ BaseString hostname(hostName1);
+ // require(node->get("HostName", hostname));
if (hostname.c_str()[0] == 0) {
- ctx.reportError("Hostname required on nodeid %d since it will act as server.", id1);
- return false;
+ ctx.reportError("Hostname required on nodeid %d since it will "
+ "act as server.", id1);
+ DBUG_RETURN(false);
}
Uint32 port= 0;
- if (!node->get("ServerPort", &port) && !ctx.m_userProperties.get("ServerPort_", id1, &port)) {
+ if (!node->get("ServerPort", &port) &&
+ !ctx.m_userProperties.get("ServerPort_", id1, &port)) {
Uint32 adder= 0;
{
BaseString server_port_adder(hostname);
@@ -2646,9 +3007,10 @@ fixPortNumber(InitConfigFileParser::Context & ctx, const char * data){
Uint32 base= 0;
if (!ctx.m_userProperties.get("ServerPortBase", &base)){
- if(!(ctx.m_userDefaults && ctx.m_userDefaults->get("PortNumber", &base)) &&
+ if(!(ctx.m_userDefaults &&
+ ctx.m_userDefaults->get("PortNumber", &base)) &&
!ctx.m_systemDefaults->get("PortNumber", &base)) {
- base= NDB_BASE_PORT+2;
+ base= strtoll(NDB_BASE_PORT,0,0)+2;
// ctx.reportError("Cannot retrieve base port number");
// return false;
}
@@ -2659,12 +3021,17 @@ fixPortNumber(InitConfigFileParser::Context & ctx, const char * data){
}
if(ctx.m_currentSection->contains("PortNumber")) {
- ndbout << "PortNumber should no longer be specificied per connection, please remove from config. Will be changed to " << port << endl;
+ ndbout << "PortNumber should no longer be specificied "
+ << "per connection, please remove from config. "
+ << "Will be changed to " << port << endl;
ctx.m_currentSection->put("PortNumber", port, true);
} else
ctx.m_currentSection->put("PortNumber", port);
- return true;
+ DBUG_PRINT("info", ("connection %d-%d port %d host %s",
+ id1, id2, port, hostname.c_str()));
+
+ DBUG_RETURN(true);
}
/**
@@ -2749,8 +3116,8 @@ checkConnectionConstraints(InitConfigFileParser::Context & ctx, const char *){
* -# Not both of them are MGMs
* -# None of them contain a "SystemX" name
*/
- if((strcmp(type1, "DB") != 0 && strcmp(type2, "DB") != 0) &&
- !(strcmp(type1, "MGM") == 0 && strcmp(type2, "MGM") == 0) &&
+ if((strcmp(type1, DB_TOKEN) != 0 && strcmp(type2, DB_TOKEN) != 0) &&
+ !(strcmp(type1, MGM_TOKEN) == 0 && strcmp(type2, MGM_TOKEN) == 0) &&
!ctx.m_currentSection->contains("System1") &&
!ctx.m_currentSection->contains("System2")){
ctx.reportError("Invalid connection between node %d (%s) and node %d (%s)"
@@ -2900,7 +3267,7 @@ saveInConfigValues(InitConfigFileParser::Context & ctx, const char * data){
require(sec->get("Fname", &secName));
require(sec->get("Id", &id));
require(sec->get("Status", &status));
- require(sec->get("Default", &typeVal));
+ require(sec->get("SectionType", &typeVal));
if(id == KEY_INTERNAL || status == ConfigInfo::INTERNAL){
ndbout_c("skipping section %s", ctx.fname);
@@ -2959,6 +3326,29 @@ saveInConfigValues(InitConfigFileParser::Context & ctx, const char * data){
}
static bool
+sanity_checks(Vector<ConfigInfo::ConfigRuleSection>&sections,
+ struct InitConfigFileParser::Context &ctx,
+ const char * rule_data)
+{
+ Uint32 db_nodes = 0;
+ Uint32 mgm_nodes = 0;
+ Uint32 api_nodes = 0;
+ if (!ctx.m_userProperties.get("DB", &db_nodes)) {
+ ctx.reportError("At least one database node should be defined in config file");
+ return false;
+ }
+ if (!ctx.m_userProperties.get("MGM", &mgm_nodes)) {
+ ctx.reportError("At least one management server node should be defined in config file");
+ return false;
+ }
+ if (!ctx.m_userProperties.get("API", &api_nodes)) {
+ ctx.reportError("At least one application node (for the mysqld) should be defined in config file");
+ return false;
+ }
+ return true;
+}
+
+static bool
add_node_connections(Vector<ConfigInfo::ConfigRuleSection>&sections,
struct InitConfigFileParser::Context &ctx,
const char * rule_data)
@@ -2998,10 +3388,10 @@ add_node_connections(Vector<ConfigInfo::ConfigRuleSection>&sections,
const char * type;
if(!tmp->get("Type", &type)) continue;
- if (strcmp(type,"DB") == 0)
+ if (strcmp(type,DB_TOKEN) == 0)
p_db_nodes.put("", i_db++, i);
- else if (strcmp(type,"API") == 0 ||
- strcmp(type,"MGM") == 0)
+ else if (strcmp(type,API_TOKEN) == 0 ||
+ strcmp(type,MGM_TOKEN) == 0)
p_api_mgm_nodes.put("", i_api_mgm++, i);
}
@@ -3015,9 +3405,9 @@ add_node_connections(Vector<ConfigInfo::ConfigRuleSection>&sections,
s.m_sectionType= BaseString("TCP");
s.m_sectionData= new Properties(true);
char buf[16];
- snprintf(buf, sizeof(buf), "%u", nodeId1);
+ BaseString::snprintf(buf, sizeof(buf), "%u", nodeId1);
s.m_sectionData->put("NodeId1", buf);
- snprintf(buf, sizeof(buf), "%u", nodeId2);
+ BaseString::snprintf(buf, sizeof(buf), "%u", nodeId2);
s.m_sectionData->put("NodeId2", buf);
sections.push_back(s);
}
@@ -3032,9 +3422,9 @@ add_node_connections(Vector<ConfigInfo::ConfigRuleSection>&sections,
s.m_sectionType= BaseString("TCP");
s.m_sectionData= new Properties(true);
char buf[16];
- snprintf(buf, sizeof(buf), "%u", nodeId1);
+ BaseString::snprintf(buf, sizeof(buf), "%u", nodeId1);
s.m_sectionData->put("NodeId1", buf);
- snprintf(buf, sizeof(buf), "%u", nodeId2);
+ BaseString::snprintf(buf, sizeof(buf), "%u", nodeId2);
s.m_sectionData->put("NodeId2", buf);
sections.push_back(s);
}
@@ -3074,7 +3464,7 @@ static bool add_server_ports(Vector<ConfigInfo::ConfigRuleSection>&sections,
Uint32 adder= 0;
computers.get("",computer, &adder);
- if (strcmp(type,"DB") == 0) {
+ if (strcmp(type,DB_TOKEN) == 0) {
adder++;
tmp->put("ServerPort", port_base+adder);
computers.put("",computer, adder);
@@ -3089,16 +3479,126 @@ check_node_vs_replicas(Vector<ConfigInfo::ConfigRuleSection>&sections,
struct InitConfigFileParser::Context &ctx,
const char * rule_data)
{
- Uint32 db_nodes = 0;
- Uint32 replicas = 0;
- ctx.m_userProperties.get("DB", &db_nodes);
+ Uint32 db_nodes= 0;
+ Uint32 replicas= 0;
+ Uint32 db_host_count= 0;
+ ctx.m_userProperties.get(DB_TOKEN, &db_nodes);
ctx.m_userProperties.get("NoOfReplicas", &replicas);
if((db_nodes % replicas) != 0){
ctx.reportError("Invalid no of db nodes wrt no of replicas.\n"
"No of nodes must be dividable with no or replicas");
return false;
}
-
+ // check that node groups and arbitrators are ok
+ // just issue warning if not
+ if(replicas > 1){
+ Properties * props= ctx.m_config;
+ Properties p_db_hosts(true); // store hosts which db nodes run on
+ Properties p_arbitrators(true); // store hosts which arbitrators run on
+ // arbitrator should not run together with db node on same host
+ Uint32 i, n, group= 0, i_group= 0;
+ Uint32 n_nodes;
+ BaseString node_group_warning, arbitration_warning;
+ const char *arbit_warn_fmt=
+ "\n arbitrator with id %d and db node with id %d on same host %s";
+ const char *arbit_warn_fmt2=
+ "\n arbitrator with id %d has no hostname specified";
+
+ ctx.m_userProperties.get("NoOfNodes", &n_nodes);
+ for (i= 0, n= 0; n < n_nodes; i++){
+ const Properties * tmp;
+ if(!props->get("Node", i, &tmp)) continue;
+ n++;
+
+ const char * type;
+ if(!tmp->get("Type", &type)) continue;
+
+ const char* host= 0;
+ tmp->get("HostName", &host);
+
+ if (strcmp(type,DB_TOKEN) == 0)
+ {
+ {
+ Uint32 ii;
+ if (!p_db_hosts.get(host,&ii))
+ db_host_count++;
+ p_db_hosts.put(host,i);
+ if (p_arbitrators.get(host,&ii))
+ {
+ arbitration_warning.appfmt(arbit_warn_fmt, ii, i, host);
+ p_arbitrators.remove(host); // only one warning per db node
+ }
+ }
+ {
+ unsigned j;
+ BaseString str, str2;
+ str.assfmt("#group%d_",group);
+ p_db_hosts.put(str.c_str(),i_group,host);
+ str2.assfmt("##group%d_",group);
+ p_db_hosts.put(str2.c_str(),i_group,i);
+ for (j= 0; j < i_group; j++)
+ {
+ const char *other_host;
+ p_db_hosts.get(str.c_str(),j,&other_host);
+ if (strcmp(host,other_host) == 0) {
+ unsigned int other_i, c= 0;
+ p_db_hosts.get(str2.c_str(),j,&other_i);
+ p_db_hosts.get(str.c_str(),&c);
+ if (c == 0) // first warning in this node group
+ node_group_warning.appfmt(" Node group %d", group);
+ c|= 1 << j;
+ p_db_hosts.put(str.c_str(),c);
+
+ node_group_warning.appfmt(",\n db node with id %d and id %d "
+ "on same host %s", other_i, i, host);
+ }
+ }
+ i_group++;
+ DBUG_ASSERT(i_group <= replicas);
+ if (i_group == replicas)
+ {
+ unsigned c= 0;
+ p_db_hosts.get(str.c_str(),&c);
+ if (c+1 == (1u << (replicas-1))) // all nodes on same machine
+ node_group_warning.append(".\n Host failure will "
+ "cause complete cluster shutdown.");
+ else if (c > 0)
+ node_group_warning.append(".\n Host failure may "
+ "cause complete cluster shutdown.");
+ group++;
+ i_group= 0;
+ }
+ }
+ }
+ else if (strcmp(type,API_TOKEN) == 0 ||
+ strcmp(type,MGM_TOKEN) == 0)
+ {
+ Uint32 rank;
+ if(tmp->get("ArbitrationRank", &rank) && rank > 0)
+ {
+ if(host && host[0] != 0)
+ {
+ Uint32 ii;
+ p_arbitrators.put(host,i);
+ if (p_db_hosts.get(host,&ii))
+ {
+ arbitration_warning.appfmt(arbit_warn_fmt, i, ii, host);
+ }
+ }
+ else
+ {
+ arbitration_warning.appfmt(arbit_warn_fmt2, i);
+ }
+ }
+ }
+ }
+ if (db_host_count > 1 && node_group_warning.length() > 0)
+ ndbout_c("Cluster configuration warning:\n%s",node_group_warning.c_str());
+ if (db_host_count > 1 && arbitration_warning.length() > 0)
+ ndbout_c("Cluster configuration warning:%s%s",arbitration_warning.c_str(),
+ "\n Running arbitrator on the same host as a database node may"
+ "\n cause complete cluster shutdown in case of host failure.");
+ }
return true;
}
diff --git a/ndb/src/common/mgmcommon/ConfigInfo.hpp b/ndb/src/mgmsrv/ConfigInfo.hpp
index 9a954fe78d5..512505cbd30 100644
--- a/ndb/src/common/mgmcommon/ConfigInfo.hpp
+++ b/ndb/src/mgmsrv/ConfigInfo.hpp
@@ -27,8 +27,8 @@
* A MANDATORY parameters must be specified in the config file
* An UNDEFINED parameter may or may not be specified in the config file
*/
-static const Uint64 MANDATORY = ~0; // Default value for mandatory params.
-static const Uint64 UNDEFINED = (~0)-1; // Default value for undefined params.
+static const char* MANDATORY = (char*)~(UintPtr)0;// Default value for mandatory params.
+static const char* UNDEFINED = 0; // Default value for undefined params.
/**
* @class ConfigInfo
@@ -56,9 +56,9 @@ public:
Status _status;
bool _updateable;
Type _type;
- Uint64 _default;
- Uint64 _min;
- Uint64 _max;
+ const char* _default;
+ const char* _min;
+ const char* _max;
};
struct AliasPair{
diff --git a/ndb/src/common/mgmcommon/InitConfigFileParser.cpp b/ndb/src/mgmsrv/InitConfigFileParser.cpp
index 7c842508491..fdfe7823fc2 100644
--- a/ndb/src/common/mgmcommon/InitConfigFileParser.cpp
+++ b/ndb/src/mgmsrv/InitConfigFileParser.cpp
@@ -31,7 +31,6 @@ static void require(bool v) { if(!v) abort();}
// Ctor / Dtor
//****************************************************************************
InitConfigFileParser::InitConfigFileParser(){
-
m_info = new ConfigInfo();
}
@@ -43,7 +42,7 @@ InitConfigFileParser::~InitConfigFileParser() {
// Read Config File
//****************************************************************************
InitConfigFileParser::Context::Context(const ConfigInfo * info)
- : m_configValues(1000, 20), m_userProperties(true) {
+ : m_userProperties(true), m_configValues(1000, 20) {
m_config = new Properties(true);
m_defaults = new Properties(true);
@@ -111,14 +110,13 @@ InitConfigFileParser::parseConfig(FILE * file) {
"of configuration file.");
return 0;
}
-
- snprintf(ctx.fname, sizeof(ctx.fname), section); free(section);
+ BaseString::snprintf(ctx.fname, sizeof(ctx.fname), section); free(section);
ctx.type = InitConfigFileParser::DefaultSection;
ctx.m_sectionLineno = ctx.m_lineno;
ctx.m_currentSection = new Properties(true);
ctx.m_userDefaults = NULL;
- ctx.m_currentInfo = m_info->getInfo(ctx.fname);
- ctx.m_systemDefaults = m_info->getDefaults(ctx.fname);
+ require((ctx.m_currentInfo = m_info->getInfo(ctx.fname)) != 0);
+ require((ctx.m_systemDefaults = m_info->getDefaults(ctx.fname)) != 0);
continue;
}
@@ -132,15 +130,14 @@ InitConfigFileParser::parseConfig(FILE * file) {
"of configuration file.");
return 0;
}
-
- snprintf(ctx.fname, sizeof(ctx.fname), section);
+ BaseString::snprintf(ctx.fname, sizeof(ctx.fname), section);
free(section);
ctx.type = InitConfigFileParser::Section;
ctx.m_sectionLineno = ctx.m_lineno;
ctx.m_currentSection = new Properties(true);
ctx.m_userDefaults = getSection(ctx.fname, ctx.m_defaults);
- ctx.m_currentInfo = m_info->getInfo(ctx.fname);
- ctx.m_systemDefaults = m_info->getDefaults(ctx.fname);
+ require((ctx.m_currentInfo = m_info->getInfo(ctx.fname)) != 0);
+ require((ctx.m_systemDefaults = m_info->getDefaults(ctx.fname)) != 0);
continue;
}
@@ -162,7 +159,6 @@ InitConfigFileParser::parseConfig(FILE * file) {
ctx.reportError("Could not store section of configuration file.");
return 0;
}
-
for(size_t i = 0; ConfigInfo::m_ConfigRules[i].m_configRule != 0; i++){
ctx.type = InitConfigFileParser::Undefined;
ctx.m_currentSection = 0;
@@ -176,12 +172,12 @@ InitConfigFileParser::parseConfig(FILE * file) {
return 0;
for(size_t j = 0; j<tmp.size(); j++){
- snprintf(ctx.fname, sizeof(ctx.fname), tmp[j].m_sectionType.c_str());
+ BaseString::snprintf(ctx.fname, sizeof(ctx.fname), tmp[j].m_sectionType.c_str());
ctx.type = InitConfigFileParser::Section;
ctx.m_currentSection = tmp[j].m_sectionData;
ctx.m_userDefaults = getSection(ctx.fname, ctx.m_defaults);
- ctx.m_currentInfo = m_info->getInfo(ctx.fname);
- ctx.m_systemDefaults = m_info->getDefaults(ctx.fname);
+ require((ctx.m_currentInfo = m_info->getInfo(ctx.fname)) != 0);
+ require((ctx.m_systemDefaults = m_info->getDefaults(ctx.fname)) != 0);
if(!storeSection(ctx))
return 0;
}
@@ -202,7 +198,7 @@ InitConfigFileParser::parseConfig(FILE * file) {
ctx.m_config->put("NoOfNodes", nNodes);
char tmpLine[MAX_LINE_LENGTH];
- snprintf(tmpLine, MAX_LINE_LENGTH, "EXTERNAL SYSTEM_");
+ BaseString::snprintf(tmpLine, MAX_LINE_LENGTH, "EXTERNAL SYSTEM_");
strncat(tmpLine, system, MAX_LINE_LENGTH);
strncat(tmpLine, ":NoOfConnections", MAX_LINE_LENGTH);
ctx.m_config->put(tmpLine, nExtConnections);
@@ -353,6 +349,8 @@ InitConfigFileParser::storeNameValuePair(Context& ctx,
case ConfigInfo::STRING:
MGM_REQUIRE(ctx.m_currentSection->put(pname, value));
break;
+ case ConfigInfo::SECTION:
+ abort();
}
return true;
}
@@ -387,7 +385,7 @@ bool InitConfigFileParser::convertStringToUint64(const char* s,
errno = 0;
char* p;
- long long v = strtoll(s, &p, 10);
+ long long v = strtoll(s, &p, log10base);
if (errno != 0)
return false;
@@ -484,10 +482,6 @@ InitConfigFileParser::parseSectionHeader(const char* line) const {
tmp[0] = ' ';
trim(tmp);
- // Convert section header to upper
- for(int i= strlen(tmp)-1; i >= 0; i--)
- tmp[i]= toupper(tmp[i]);
-
// Get the correct header name if an alias
{
const char *tmp_alias= m_info->getAlias(tmp);
@@ -516,7 +510,7 @@ char*
InitConfigFileParser::parseDefaultSectionHeader(const char* line) const {
static char token1[MAX_LINE_LENGTH], token2[MAX_LINE_LENGTH];
- int no = sscanf(line, "[%120[A-Za-z] %120[A-Za-z]]", token1, token2);
+ int no = sscanf(line, "[%120[A-Z_a-z] %120[A-Z_a-z]]", token1, token2);
// Not correct no of tokens
if (no != 2) return NULL;
@@ -524,8 +518,12 @@ InitConfigFileParser::parseDefaultSectionHeader(const char* line) const {
// Not correct keyword at end
if (!strcasecmp(token2, "DEFAULT") == 0) return NULL;
- if(m_info->getInfo(token1)){
- return strdup(token1);
+ const char *token1_alias= m_info->getAlias(token1);
+ if (token1_alias == 0)
+ token1_alias= token1;
+
+ if(m_info->getInfo(token1_alias)){
+ return strdup(token1_alias);
}
// Did not find section
@@ -551,30 +549,28 @@ InitConfigFileParser::storeSection(Context& ctx){
for(int i = strlen(ctx.fname) - 1; i>=0; i--){
ctx.fname[i] = toupper(ctx.fname[i]);
}
- snprintf(ctx.pname, sizeof(ctx.pname), ctx.fname);
+ BaseString::snprintf(ctx.pname, sizeof(ctx.pname), ctx.fname);
char buf[255];
if(ctx.type == InitConfigFileParser::Section)
- snprintf(buf, sizeof(buf), "%s", ctx.fname);
+ BaseString::snprintf(buf, sizeof(buf), "%s", ctx.fname);
if(ctx.type == InitConfigFileParser::DefaultSection)
- snprintf(buf, sizeof(buf), "%s DEFAULT", ctx.fname);
- snprintf(ctx.fname, sizeof(ctx.fname), buf);
+ BaseString::snprintf(buf, sizeof(buf), "%s DEFAULT", ctx.fname);
+ BaseString::snprintf(ctx.fname, sizeof(ctx.fname), buf);
if(ctx.type == InitConfigFileParser::Section){
for(int i = 0; i<m_info->m_NoOfRules; i++){
const ConfigInfo::SectionRule & rule = m_info->m_SectionRules[i];
- if(!strcmp(rule.m_section, "*") || !strcmp(rule.m_section, ctx.fname))
- if(!(* rule.m_sectionRule)(ctx, rule.m_ruleData))
+ if(!strcmp(rule.m_section, "*") || !strcmp(rule.m_section, ctx.fname)){
+ if(!(* rule.m_sectionRule)(ctx, rule.m_ruleData)){
return false;
+ }
+ }
}
}
-
if(ctx.type == InitConfigFileParser::DefaultSection)
require(ctx.m_defaults->put(ctx.pname, ctx.m_currentSection));
-
if(ctx.type == InitConfigFileParser::Section)
require(ctx.m_config->put(ctx.pname, ctx.m_currentSection));
-
delete ctx.m_currentSection; ctx.m_currentSection = NULL;
-
return true;
}
@@ -585,7 +581,7 @@ InitConfigFileParser::Context::reportError(const char * fmt, ...){
va_start(ap, fmt);
if (fmt != 0)
- vsnprintf(buf, sizeof(buf)-1, fmt, ap);
+ BaseString::vsnprintf(buf, sizeof(buf)-1, fmt, ap);
ndbout << "Error line " << m_lineno << ": " << buf << endl;
va_end(ap);
@@ -599,7 +595,7 @@ InitConfigFileParser::Context::reportWarning(const char * fmt, ...){
va_start(ap, fmt);
if (fmt != 0)
- vsnprintf(buf, sizeof(buf)-1, fmt, ap);
+ BaseString::vsnprintf(buf, sizeof(buf)-1, fmt, ap);
ndbout << "Warning line " << m_lineno << ": " << buf << endl;
va_end(ap);
}
diff --git a/ndb/src/common/mgmcommon/InitConfigFileParser.hpp b/ndb/src/mgmsrv/InitConfigFileParser.hpp
index 6b7482c12ae..1ea0a094ccd 100644
--- a/ndb/src/common/mgmcommon/InitConfigFileParser.hpp
+++ b/ndb/src/mgmsrv/InitConfigFileParser.hpp
@@ -86,6 +86,9 @@ public:
void reportWarning(const char * msg, ...);
};
+ static bool convertStringToUint64(const char* s, Uint64& val, Uint32 log10base = 0);
+ static bool convertStringToBool(const char* s, bool& val);
+
private:
/**
* Check if line only contains space/comments
@@ -111,8 +114,6 @@ private:
bool parseNameValuePair(Context&, const char* line);
bool storeNameValuePair(Context&, const char* fname, const char* value);
- bool convertStringToUint64(const char* s, Uint64& val, Uint32 log10base = 0);
- bool convertStringToBool(const char* s, bool& val);
bool storeSection(Context&);
const Properties* getSection(const char * name, const Properties* src);
diff --git a/ndb/src/mgmsrv/Makefile.am b/ndb/src/mgmsrv/Makefile.am
index fc493fe10c7..3b57b027827 100644
--- a/ndb/src/mgmsrv/Makefile.am
+++ b/ndb/src/mgmsrv/Makefile.am
@@ -1,3 +1,8 @@
+MYSQLDATAdir = $(localstatedir)
+MYSQLSHAREdir = $(pkgdatadir)
+MYSQLBASEdir= $(prefix)
+#MYSQLCLUSTERdir= $(prefix)/mysql-cluster
+MYSQLCLUSTERdir= .
ndbbin_PROGRAMS = ndb_mgmd
@@ -7,10 +12,11 @@ ndb_mgmd_SOURCES = \
main.cpp \
Services.cpp \
convertStrToInt.cpp \
- NodeLogLevel.cpp \
- NodeLogLevelList.cpp \
SignalQueue.cpp \
MgmtSrvrConfig.cpp \
+ ConfigInfo.cpp \
+ InitConfigFileParser.cpp \
+ Config.cpp \
CommandInterpreter.cpp
INCLUDES_LOC = -I$(top_srcdir)/ndb/src/ndbapi \
@@ -19,8 +25,17 @@ INCLUDES_LOC = -I$(top_srcdir)/ndb/src/ndbapi \
LDADD_LOC = $(top_builddir)/ndb/src/libndbclient.la \
$(top_builddir)/ndb/src/common/editline/libeditline.a \
+ $(top_builddir)/dbug/libdbug.a \
+ $(top_builddir)/mysys/libmysys.a \
+ $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
@TERMCAP_LIB@
+DEFS_LOC = -DDEFAULT_MYSQL_HOME="\"$(MYSQLBASEdir)\"" \
+ -DDATADIR="\"$(MYSQLDATAdir)\"" \
+ -DSHAREDIR="\"$(MYSQLSHAREdir)\"" \
+ -DMYSQLCLUSTERDIR="\"$(MYSQLCLUSTERdir)\"" \
+ -DNDB_BASE_PORT="\"@ndb_port_base@\""
+
include $(top_srcdir)/ndb/config/common.mk.am
include $(top_srcdir)/ndb/config/type_ndbapi.mk.am
diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp
index 0936ec234cf..29df10630f3 100644
--- a/ndb/src/mgmsrv/MgmtSrvr.cpp
+++ b/ndb/src/mgmsrv/MgmtSrvr.cpp
@@ -15,10 +15,11 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <ndb_global.h>
-#include <pthread.h>
+#include <my_pthread.h>
#include "MgmtSrvr.hpp"
#include "MgmtErrorReporter.hpp"
+#include <ConfigRetriever.hpp>
#include <NdbOut.hpp>
#include <NdbApiSignal.hpp>
@@ -44,7 +45,6 @@
#include <ndb_version.h>
#include <SocketServer.hpp>
-#include "NodeLogLevel.hpp"
#include <NdbConfig.h>
#include <NdbAutoPtr.hpp>
@@ -60,70 +60,17 @@
#define DEBUG(x)
#endif
-static
-void
-CmdBackupCallback(const MgmtSrvr::BackupEvent & event)
-{
- char str[255];
-
- ndbout << endl;
-
- bool ok = false;
- switch(event.Event){
- case MgmtSrvr::BackupEvent::BackupStarted:
- ok = true;
- snprintf(str, sizeof(str),
- "Backup %d started", event.Started.BackupId);
- break;
- case MgmtSrvr::BackupEvent::BackupFailedToStart:
- ok = true;
- snprintf(str, sizeof(str),
- "Backup failed to start (Error %d)",
- event.FailedToStart.ErrorCode);
- break;
- case MgmtSrvr::BackupEvent::BackupCompleted:
- ok = true;
- snprintf(str, sizeof(str),
- "Backup %d completed",
- event.Completed.BackupId);
- ndbout << str << endl;
-
- snprintf(str, sizeof(str),
- " StartGCP: %d StopGCP: %d",
- event.Completed.startGCP, event.Completed.stopGCP);
- ndbout << str << endl;
-
- snprintf(str, sizeof(str),
- " #Records: %d #LogRecords: %d",
- event.Completed.NoOfRecords, event.Completed.NoOfLogRecords);
- ndbout << str << endl;
-
- snprintf(str, sizeof(str),
- " Data: %d bytes Log: %d bytes",
- event.Completed.NoOfBytes, event.Completed.NoOfLogBytes);
- break;
- case MgmtSrvr::BackupEvent::BackupAborted:
- ok = true;
- snprintf(str, sizeof(str),
- "Backup %d has been aborted reason %d",
- event.Aborted.BackupId,
- event.Aborted.Reason);
- break;
- }
- if(!ok){
- snprintf(str, sizeof(str), "Unknown backup event: %d", event.Event);
- }
- ndbout << str << endl;
-}
-
+extern int global_flag_send_heartbeat_now;
+extern int g_no_nodeid_checks;
void *
MgmtSrvr::logLevelThread_C(void* m)
{
MgmtSrvr *mgm = (MgmtSrvr*)m;
-
+ my_thread_init();
mgm->logLevelThreadRun();
+ my_thread_end();
NdbThread_Exit(0);
/* NOTREACHED */
return 0;
@@ -133,9 +80,10 @@ void *
MgmtSrvr::signalRecvThread_C(void *m)
{
MgmtSrvr *mgm = (MgmtSrvr*)m;
-
+ my_thread_init();
mgm->signalRecvThreadRun();
+ my_thread_end();
NdbThread_Exit(0);
/* NOTREACHED */
return 0;
@@ -175,7 +123,7 @@ MgmtSrvr::signalRecvThreadRun()
while(!_isStopThread) {
SigMatch *handler = NULL;
NdbApiSignal *signal = NULL;
- if(m_signalRecvQueue.waitFor(siglist, handler, signal)) {
+ if(m_signalRecvQueue.waitFor(siglist, handler, signal, DEFAULT_TIMEOUT)) {
if(handler->function != 0)
(this->*handler->function)(signal);
}
@@ -185,44 +133,65 @@ MgmtSrvr::signalRecvThreadRun()
EventLogger g_EventLogger;
+static NdbOut&
+operator<<(NdbOut& out, const LogLevel & ll)
+{
+ out << "[LogLevel: ";
+ for(size_t i = 0; i<LogLevel::LOGLEVEL_CATEGORIES; i++)
+ out << ll.getLogLevel((LogLevel::EventCategory)i) << " ";
+ out << "]";
+ return out;
+}
+
void
MgmtSrvr::logLevelThreadRun()
{
- NdbMutex* threadMutex = NdbMutex_Create();
-
while (!_isStopThread) {
- if (_startedNodeId != 0) {
- NdbMutex_Lock(threadMutex);
-
- // Local node
- NodeLogLevel* n = NULL;
- while ((n = _nodeLogLevelList->next()) != NULL) {
- if (n->getNodeId() == _startedNodeId) {
- setNodeLogLevel(_startedNodeId, n->getLogLevelOrd(), true);
- }
- }
- // Cluster log
- while ((n = _clusterLogLevelList->next()) != NULL) {
- if (n->getNodeId() == _startedNodeId) {
- setEventReportingLevel(_startedNodeId, n->getLogLevelOrd(), true);
- }
- }
- _startedNodeId = 0;
-
- NdbMutex_Unlock(threadMutex);
+ /**
+ * Handle started nodes
+ */
+ EventSubscribeReq req;
+ req = m_statisticsListner.m_clients[0].m_logLevel;
+ req.blockRef = _ownReference;
- } // if (_startedNodeId != 0) {
+ SetLogLevelOrd ord;
+
+ m_started_nodes.lock();
+ while(m_started_nodes.size() > 0){
+ Uint32 node = m_started_nodes[0];
+ m_started_nodes.erase(0, false);
+ m_started_nodes.unlock();
+ setEventReportingLevelImpl(node, req);
+
+ ord = m_nodeLogLevel[node];
+ setNodeLogLevelImpl(node, ord);
+
+ m_started_nodes.lock();
+ }
+ m_started_nodes.unlock();
+
+ m_log_level_requests.lock();
+ while(m_log_level_requests.size() > 0){
+ req = m_log_level_requests[0];
+ m_log_level_requests.erase(0, false);
+ m_log_level_requests.unlock();
+
+ LogLevel tmp;
+ tmp = req;
+
+ if(req.blockRef == 0){
+ req.blockRef = _ownReference;
+ setEventReportingLevelImpl(0, req);
+ } else {
+ ord = req;
+ setNodeLogLevelImpl(req.blockRef, ord);
+ }
+ m_log_level_requests.lock();
+ }
+ m_log_level_requests.unlock();
NdbSleep_MilliSleep(_logLevelThreadSleep);
- } // while (!_isStopThread)
-
- NdbMutex_Destroy(threadMutex);
-}
-
-void
-MgmtSrvr::setStatisticsListner(StatisticsListner* listner)
-{
- m_statisticsListner = listner;
+ }
}
void
@@ -269,7 +238,7 @@ class ErrorItem
{
public:
int _errorCode;
- const BaseString _errorText;
+ const char * _errorText;
};
bool
@@ -426,108 +395,37 @@ MgmtSrvr::getPort() const {
ndb_mgm_destroy_iterator(iter);
- /*****************
- * Set Stat Port *
- *****************/
-#if 0
- if (!mgmProps->get("PortNumberStats", &tmp)){
- ndbout << "Could not find PortNumberStats in the configuration file."
- << endl;
- return false;
- }
- glob.port_stats = tmp;
-#endif
-
-#if 0
- const char * host;
- if(ndb_mgm_get_string_parameter(iter, mgmProps->get("ExecuteOnComputer", host)){
- ndbout << "Failed to find \"ExecuteOnComputer\" for my node" << endl;
- ndbout << "Unable to verify own hostname" << endl;
- return false;
- }
-
- const char * hostname;
- {
- const Properties * p;
- char buf[255];
- snprintf(buf, sizeof(buf), "Computer_%s", host.c_str());
- if(!glob.cluster_config->get(buf, &p)){
- ndbout << "Failed to find computer " << host << " in config" << endl;
- ndbout << "Unable to verify own hostname" << endl;
- return false;
- }
- if(!p->get("HostName", &hostname)){
- ndbout << "Failed to find \"HostName\" for computer " << host
- << " in config" << endl;
- ndbout << "Unable to verify own hostname" << endl;
- return false;
- }
- if(NdbHost_GetHostName(buf) != 0){
- ndbout << "Unable to get own hostname" << endl;
- ndbout << "Unable to verify own hostname" << endl;
- return false;
- }
- }
-
- const char * ip_address;
- if(mgmProps->get("IpAddress", &ip_address)){
- glob.use_specific_ip = true;
- glob.interface_name = strdup(ip_address);
- return true;
- }
-
- glob.interface_name = strdup(hostname);
-#endif
-
return port;
}
-int
-MgmtSrvr::getStatPort() const {
-#if 0
- const Properties *mgmProps;
- if(!getConfig()->get("Node", _ownNodeId, &mgmProps))
- return -1;
-
- int tmp = -1;
- if(!mgmProps->get("PortNumberStats", (Uint32 *)&tmp))
- return -1;
-
- return tmp;
-#else
- return -1;
-#endif
-}
-
/* Constructor */
MgmtSrvr::MgmtSrvr(NodeId nodeId,
const BaseString &configFilename,
- const BaseString &ndb_config_filename,
+ LocalConfig &local_config,
Config * config):
_blockNumber(1), // Hard coded block number since it makes it easy to send
// signals to other management servers.
_ownReference(0),
+ m_local_config(local_config),
+ m_allocated_resources(*this),
theSignalIdleList(NULL),
theWaitState(WAIT_SUBSCRIBE_CONF),
- theConfCount(0),
- m_allocated_resources(*this) {
+ m_statisticsListner(this)
+{
+
+ DBUG_ENTER("MgmtSrvr::MgmtSrvr");
_config = NULL;
- _isStatPortActive = false;
- _isClusterLogStatActive = false;
_isStopThread = false;
_logLevelThread = NULL;
_logLevelThreadSleep = 500;
m_signalRecvThread = NULL;
- _startedNodeId = 0;
theFacade = 0;
m_newConfig = NULL;
m_configFilename = configFilename;
- setCallback(CmdBackupCallback);
- m_localNdbConfigFilename = ndb_config_filename;
m_nextConfigGenerationNumber = 0;
@@ -540,55 +438,109 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId,
/**
* Fill the nodeTypes array
*/
- for(Uint32 i = 0; i<MAX_NODES; i++)
+ for(Uint32 i = 0; i<MAX_NODES; i++) {
nodeTypes[i] = (enum ndb_mgm_node_type)-1;
-
- ndb_mgm_configuration_iterator * iter = ndb_mgm_create_configuration_iterator
- (config->m_configValues, CFG_SECTION_NODE);
- for(ndb_mgm_first(iter); ndb_mgm_valid(iter); ndb_mgm_next(iter)){
- unsigned type, id;
- if(ndb_mgm_get_int_parameter(iter, CFG_TYPE_OF_SECTION, &type) != 0)
- continue;
-
- if(ndb_mgm_get_int_parameter(iter, CFG_NODE_ID, &id) != 0)
- continue;
-
- MGM_REQUIRE(id < MAX_NODES);
+ m_connect_address[i].s_addr= 0;
+ }
+ {
+ ndb_mgm_configuration_iterator * iter = ndb_mgm_create_configuration_iterator
+ (config->m_configValues, CFG_SECTION_NODE);
+ for(ndb_mgm_first(iter); ndb_mgm_valid(iter); ndb_mgm_next(iter)){
+ unsigned type, id;
+ if(ndb_mgm_get_int_parameter(iter, CFG_TYPE_OF_SECTION, &type) != 0)
+ continue;
- switch(type){
- case NODE_TYPE_DB:
- nodeTypes[id] = NDB_MGM_NODE_TYPE_NDB;
- break;
- case NODE_TYPE_API:
- nodeTypes[id] = NDB_MGM_NODE_TYPE_API;
- break;
- case NODE_TYPE_MGM:
- nodeTypes[id] = NDB_MGM_NODE_TYPE_MGM;
- break;
- case NODE_TYPE_REP:
- nodeTypes[id] = NDB_MGM_NODE_TYPE_REP;
- break;
- case NODE_TYPE_EXT_REP:
- default:
- break;
+ if(ndb_mgm_get_int_parameter(iter, CFG_NODE_ID, &id) != 0)
+ continue;
+
+ MGM_REQUIRE(id < MAX_NODES);
+
+ switch(type){
+ case NODE_TYPE_DB:
+ nodeTypes[id] = NDB_MGM_NODE_TYPE_NDB;
+ break;
+ case NODE_TYPE_API:
+ nodeTypes[id] = NDB_MGM_NODE_TYPE_API;
+ break;
+ case NODE_TYPE_MGM:
+ nodeTypes[id] = NDB_MGM_NODE_TYPE_MGM;
+ break;
+ case NODE_TYPE_REP:
+ nodeTypes[id] = NDB_MGM_NODE_TYPE_REP;
+ break;
+ case NODE_TYPE_EXT_REP:
+ default:
+ break;
+ }
}
+ ndb_mgm_destroy_iterator(iter);
}
- ndb_mgm_destroy_iterator(iter);
-
- m_statisticsListner = NULL;
-
- _nodeLogLevelList = new NodeLogLevelList();
- _clusterLogLevelList = new NodeLogLevelList();
_props = NULL;
-
_ownNodeId= 0;
NodeId tmp= nodeId;
- if (!alloc_node_id(&tmp, NDB_MGM_NODE_TYPE_MGM, 0, 0)){
- ndbout << "Unable to obtain requested nodeid " << nodeId;
+ BaseString error_string;
+#if 0
+ char my_hostname[256];
+ struct sockaddr_in tmp_addr;
+ SOCKET_SIZE_TYPE addrlen= sizeof(tmp_addr);
+ if (!g_no_nodeid_checks) {
+ if (gethostname(my_hostname, sizeof(my_hostname))) {
+ ndbout << "error: gethostname() - " << strerror(errno) << endl;
+ exit(-1);
+ }
+ if (Ndb_getInAddr(&(((sockaddr_in*)&tmp_addr)->sin_addr),my_hostname)) {
+ ndbout << "error: Ndb_getInAddr(" << my_hostname << ") - "
+ << strerror(errno) << endl;
+ exit(-1);
+ }
+ }
+ if (!alloc_node_id(&tmp, NDB_MGM_NODE_TYPE_MGM,
+ (struct sockaddr *)&tmp_addr,
+ &addrlen, error_string)){
+ ndbout << "Unable to obtain requested nodeid: "
+ << error_string.c_str() << endl;
+ exit(-1);
+ }
+#else
+ if (!alloc_node_id(&tmp, NDB_MGM_NODE_TYPE_MGM,
+ 0, 0, error_string)){
+ ndbout << "Unable to obtain requested nodeid: "
+ << error_string.c_str() << endl;
exit(-1);
}
+#endif
_ownNodeId = tmp;
+
+
+ {
+ DBUG_PRINT("info", ("verifyConfig"));
+ ConfigRetriever cr(m_local_config, NDB_VERSION, NDB_MGM_NODE_TYPE_MGM);
+ if (!cr.verifyConfig(config->m_configValues, _ownNodeId)) {
+ ndbout << cr.getErrorString() << endl;
+ exit(-1);
+ }
+ }
+
+ {
+ MgmStatService::StatListener se;
+ se.m_socket = -1;
+ for(size_t t = 0; t<LogLevel::LOGLEVEL_CATEGORIES; t++){
+ se.m_logLevel.setLogLevel((LogLevel::EventCategory)t, 7);
+ }
+ se.m_logLevel.setLogLevel(LogLevel::llError, 15);
+ se.m_logLevel.setLogLevel(LogLevel::llBackup, 15);
+ m_statisticsListner.m_clients.push_back(se);
+ m_statisticsListner.m_logLevel = se.m_logLevel;
+ }
+
+ if ((m_node_id_mutex = NdbMutex_Create()) == 0)
+ {
+ ndbout << "mutex creation failed line = " << __LINE__ << endl;
+ exit(-1);
+ }
+
+ DBUG_VOID_RETURN;
}
@@ -606,19 +558,26 @@ MgmtSrvr::check_start()
}
bool
-MgmtSrvr::start()
+MgmtSrvr::start(BaseString &error_string)
{
if (_props == NULL) {
- if (!check_start())
+ if (!check_start()) {
+ error_string.append("MgmtSrvr.cpp: check_start() failed.");
return false;
+ }
}
- theFacade = TransporterFacade::start_instance
- (_ownNodeId,(ndb_mgm_configuration*)_config->m_configValues);
+ theFacade= TransporterFacade::theFacadeInstance= new TransporterFacade();
if(theFacade == 0) {
DEBUG("MgmtSrvr.cpp: theFacade is NULL.");
+ error_string.append("MgmtSrvr.cpp: theFacade is NULL.");
return false;
}
+ if ( theFacade->start_instance
+ (_ownNodeId, (ndb_mgm_configuration*)_config->m_configValues) < 0) {
+ DEBUG("MgmtSrvr.cpp: TransporterFacade::start_instance < 0.");
+ return false;
+ }
MGM_REQUIRE(_blockNumber == 1);
@@ -630,6 +589,7 @@ MgmtSrvr::start()
if(_blockNumber == -1){
DEBUG("MgmtSrvr.cpp: _blockNumber is -1.");
+ error_string.append("MgmtSrvr.cpp: _blockNumber is -1.");
theFacade->stop_instance();
theFacade = 0;
return false;
@@ -641,8 +601,6 @@ MgmtSrvr::start()
// Set the initial confirmation count for subscribe requests confirm
// from NDB nodes in the cluster.
//
- theConfCount = getNodeCount(NDB_MGM_NODE_TYPE_NDB);
-
// Loglevel thread
_logLevelThread = NdbThread_Create(logLevelThread_C,
(void**)this,
@@ -675,7 +633,9 @@ MgmtSrvr::~MgmtSrvr()
stopEventLog();
- NdbCondition_Destroy(theMgmtWaitForResponseCondPtr); NdbMutex_Destroy(m_configMutex);
+ NdbMutex_Destroy(m_node_id_mutex);
+ NdbCondition_Destroy(theMgmtWaitForResponseCondPtr);
+ NdbMutex_Destroy(m_configMutex);
if(m_newConfig != NULL)
free(m_newConfig);
@@ -683,9 +643,6 @@ MgmtSrvr::~MgmtSrvr()
if(_config != NULL)
delete _config;
- delete _nodeLogLevelList;
- delete _clusterLogLevelList;
-
// End set log level thread
void* res = 0;
_isStopThread = true;
@@ -706,6 +663,9 @@ MgmtSrvr::~MgmtSrvr()
int MgmtSrvr::okToSendTo(NodeId processId, bool unCond)
{
+ if(processId == 0)
+ return 0;
+
if (getNodeType(processId) != NDB_MGM_NODE_TYPE_NDB)
return WRONG_PROCESS_TYPE;
@@ -940,7 +900,7 @@ MgmtSrvr::restart(bool nostart, bool initalStart, bool abort,
while(getNextNodeId(&nodeId, NDB_MGM_NODE_TYPE_NDB) && nodes.get(nodeId)) {
enum ndb_mgm_node_status s;
s = NDB_MGM_NODE_STATUS_NO_CONTACT;
- while (s == NDB_MGM_NODE_STATUS_NO_CONTACT && waitTime > 0) {
+ while (s != NDB_MGM_NODE_STATUS_NOT_STARTED && waitTime > 0) {
Uint32 startPhase = 0, version = 0, dynamicId = 0, nodeGroup = 0;
Uint32 connectCount = 0;
bool system;
@@ -990,36 +950,41 @@ int
MgmtSrvr::versionNode(int processId, bool abort,
VersionCallback callback, void * anyData)
{
+ int version;
+
if(m_versionRec.inUse)
return OPERATION_IN_PROGRESS;
m_versionRec.callback = callback;
m_versionRec.inUse = true ;
- ClusterMgr::Node node;
- int version;
- if (getNodeType(processId) == NDB_MGM_NODE_TYPE_MGM) {
- if(m_versionRec.callback != 0)
- m_versionRec.callback(processId, NDB_VERSION, this,0);
- }
- if (getNodeType(processId) == NDB_MGM_NODE_TYPE_NDB) {
- node = theFacade->theClusterMgr->getNodeInfo(processId);
- version = node.m_info.m_version;
- if(theFacade->theClusterMgr->getNodeInfo(processId).connected)
- if(m_versionRec.callback != 0)
- m_versionRec.callback(processId, version, this,0);
- else
- if(m_versionRec.callback != 0)
- m_versionRec.callback(processId, 0, this,0);
-
+ if (getOwnNodeId() == processId)
+ {
+ version= NDB_VERSION;
}
-
- if (getNodeType(processId) == NDB_MGM_NODE_TYPE_API) {
+ else if (getNodeType(processId) == NDB_MGM_NODE_TYPE_NDB)
+ {
+ ClusterMgr::Node node= theFacade->theClusterMgr->getNodeInfo(processId);
+ if(node.connected)
+ version= node.m_info.m_version;
+ else
+ version= 0;
+ }
+ else if (getNodeType(processId) == NDB_MGM_NODE_TYPE_API ||
+ getNodeType(processId) == NDB_MGM_NODE_TYPE_MGM)
+ {
return sendVersionReq(processId);
}
+ else
+ version= 0;
+
+ if(m_versionRec.callback != 0)
+ m_versionRec.callback(processId, version, this,0);
m_versionRec.inUse = false ;
- return 0;
+ m_versionRec.version[processId]= version;
+
+ return 0;
}
int
@@ -1430,17 +1395,14 @@ MgmtSrvr::status(int processId,
Uint32 * nodegroup,
Uint32 * connectCount)
{
- if (getNodeType(processId) == NDB_MGM_NODE_TYPE_API) {
+ if (getNodeType(processId) == NDB_MGM_NODE_TYPE_API ||
+ getNodeType(processId) == NDB_MGM_NODE_TYPE_MGM) {
if(versionNode(processId, false,0,0) ==0)
* version = m_versionRec.version[processId];
else
* version = 0;
}
- if (getNodeType(processId) == NDB_MGM_NODE_TYPE_MGM) {
- * version = NDB_VERSION;
- }
-
const ClusterMgr::Node node =
theFacade->theClusterMgr->getNodeInfo(processId);
@@ -1510,175 +1472,72 @@ MgmtSrvr::status(int processId,
return -1;
}
-
-
-//****************************************************************************
-//****************************************************************************
-int
-MgmtSrvr::startStatisticEventReporting(int level)
-{
- SetLogLevelOrd ll;
- NodeId nodeId = 0;
-
- ll.clear();
- ll.setLogLevel(LogLevel::llStatistic, level);
-
- if (level > 0) {
- _isStatPortActive = true;
- } else {
- _isStatPortActive = false;
-
- if (_isClusterLogStatActive) {
- return 0;
- }
- }
-
- while (getNextNodeId(&nodeId, NDB_MGM_NODE_TYPE_NDB)) {
- setEventReportingLevelImpl(nodeId, ll);
- }
-
- return 0;
-}
-
-int
-MgmtSrvr::setEventReportingLevel(int processId, const SetLogLevelOrd & ll,
- bool isResend)
-{
- for (Uint32 i = 0; i < ll.noOfEntries; i++) {
- if (ll.theCategories[i] == LogLevel::llStatistic) {
- if (ll.theLevels[i] > 0) {
- _isClusterLogStatActive = true;
- break;
- } else {
- _isClusterLogStatActive = false;
-
- if (_isStatPortActive) {
- return 0;
- }
- break;
- }
- } // if (ll.theCategories
- } // for (int i = 0
-
- return setEventReportingLevelImpl(processId, ll, isResend);
-}
int
MgmtSrvr::setEventReportingLevelImpl(int processId,
- const SetLogLevelOrd & ll,
- bool isResend)
+ const EventSubscribeReq& ll)
{
- Uint32 i;
- for(i = 0; i<ll.noOfEntries; i++){
- // Save log level for the cluster log
- if (!isResend) {
- NodeLogLevel* n = NULL;
- bool found = false;
- while ((n = _clusterLogLevelList->next()) != NULL) {
- if (n->getNodeId() == processId &&
- n->getCategory() == ll.theCategories[i]) {
-
- n->setLevel(ll.theLevels[i]);
- found = true;
- }
- }
- if (!found) {
- _clusterLogLevelList->add(new NodeLogLevel(processId, ll));
- }
- }
- }
-
+
int result = okToSendTo(processId, true);
if (result != 0) {
return result;
}
- NdbApiSignal* signal = getSignal();
- if (signal == NULL) {
- return COULD_NOT_ALLOCATE_MEMORY;
- }
+ NdbApiSignal signal(_ownReference);
EventSubscribeReq * dst =
- CAST_PTR(EventSubscribeReq, signal->getDataPtrSend());
- for(i = 0; i<ll.noOfEntries; i++){
- dst->theCategories[i] = ll.theCategories[i];
- dst->theLevels[i] = ll.theLevels[i];
- }
-
- dst->noOfEntries = ll.noOfEntries;
- dst->blockRef = _ownReference;
+ CAST_PTR(EventSubscribeReq, signal.getDataPtrSend());
- signal->set(TestOrd::TraceAPI, CMVMI, GSN_EVENT_SUBSCRIBE_REQ,
- EventSubscribeReq::SignalLength);
+ * dst = ll;
+
+ signal.set(TestOrd::TraceAPI, CMVMI, GSN_EVENT_SUBSCRIBE_REQ,
+ EventSubscribeReq::SignalLength);
+
+ theFacade->lock_mutex();
+ send(&signal, processId, NODE_TYPE_DB);
+ theFacade->unlock_mutex();
- result = sendSignal(processId, WAIT_SUBSCRIBE_CONF, signal, true);
- if (result == -1) {
- return SEND_OR_RECEIVE_FAILED;
- }
- else {
- // Increment the conf counter
- theConfCount++;
- }
-
return 0;
}
//****************************************************************************
//****************************************************************************
int
-MgmtSrvr::setNodeLogLevel(int processId, const SetLogLevelOrd & ll,
- bool isResend)
+MgmtSrvr::setNodeLogLevelImpl(int processId, const SetLogLevelOrd & ll)
{
- Uint32 i;
- for(i = 0; i<ll.noOfEntries; i++){
- // Save log level for the cluster log
- if (!isResend) {
- NodeLogLevel* n = NULL;
- bool found = false;
- while ((n = _clusterLogLevelList->next()) != NULL) {
- if (n->getNodeId() == processId &&
- n->getCategory() == ll.theCategories[i]) {
-
- n->setLevel(ll.theLevels[i]);
- found = true;
- }
- }
- if (!found) {
- _clusterLogLevelList->add(new NodeLogLevel(processId, ll));
- }
- }
- }
-
int result = okToSendTo(processId, true);
if (result != 0) {
return result;
}
- NdbApiSignal* signal = getSignal();
- if (signal == NULL) {
- return COULD_NOT_ALLOCATE_MEMORY;
- }
-
- SetLogLevelOrd * dst = CAST_PTR(SetLogLevelOrd, signal->getDataPtrSend());
-
- for(i = 0; i<ll.noOfEntries; i++){
- dst->theCategories[i] = ll.theCategories[i];
- dst->theLevels[i] = ll.theLevels[i];
- }
+ NdbApiSignal signal(_ownReference);
- dst->noOfEntries = ll.noOfEntries;
+ SetLogLevelOrd * dst = CAST_PTR(SetLogLevelOrd, signal.getDataPtrSend());
- signal->set(TestOrd::TraceAPI, CMVMI, GSN_SET_LOGLEVELORD,
- SetLogLevelOrd::SignalLength);
-
- result = sendSignal(processId, NO_WAIT, signal, true);
- if (result == -1) {
- return SEND_OR_RECEIVE_FAILED;
- }
+ * dst = ll;
+
+ signal.set(TestOrd::TraceAPI, CMVMI, GSN_SET_LOGLEVELORD,
+ SetLogLevelOrd::SignalLength);
+
+ theFacade->lock_mutex();
+ theFacade->sendSignalUnCond(&signal, processId);
+ theFacade->unlock_mutex();
return 0;
}
+int
+MgmtSrvr::send(NdbApiSignal* signal, Uint32 node, Uint32 node_type){
+ Uint32 max = (node == 0) ? MAX_NODES : node + 1;
+
+ for(; node < max; node++){
+ while(nodeTypes[node] != (int)node_type && node < max) node++;
+ if(nodeTypes[node] != (int)node_type)
+ break;
+ theFacade->sendSignalUnCond(signal, node);
+ }
+ return 0;
+}
//****************************************************************************
//****************************************************************************
@@ -1805,7 +1664,10 @@ MgmtSrvr::setSignalLoggingMode(int processId, LogMode mode,
logSpec = TestOrd::InputOutputSignals;
break;
default:
- assert("Unexpected value, MgmtSrvr::setSignalLoggingMode" == 0);
+ ndbout_c("Unexpected value %d, MgmtSrvr::setSignalLoggingMode, line %d",
+ (unsigned)mode, __LINE__);
+ assert(false);
+ return -1;
}
NdbApiSignal* signal = getSignal();
@@ -1973,29 +1835,14 @@ const char* MgmtSrvr::getErrorText(int errorCode)
for (int i = 0; i < noOfErrorCodes; ++i) {
if (errorCode == errorTable[i]._errorCode) {
- return errorTable[i]._errorText.c_str();
+ return errorTable[i]._errorText;
}
}
- snprintf(text, 255, "Unknown management server error code %d", errorCode);
+ BaseString::snprintf(text, 255, "Unknown management server error code %d", errorCode);
return text;
}
-/*****************************************************************************
- * Handle reception of various signals
- *****************************************************************************/
-
-int
-MgmtSrvr::handleSTATISTICS_CONF(NdbApiSignal* signal)
-{
- //ndbout << "MgmtSrvr::handleSTATISTICS_CONF" << endl;
-
- int x = signal->readData(1);
- //ndbout << "MgmtSrvr::handleSTATISTICS_CONF, x: " << x << endl;
- _statistics._test1 = x;
- return 0;
-}
-
void
MgmtSrvr::handleReceivedSignal(NdbApiSignal* signal)
{
@@ -2019,51 +1866,7 @@ MgmtSrvr::handleReceivedSignal(NdbApiSignal* signal)
}
break;
- case GSN_STATISTICS_CONF:
- if (theWaitState != WAIT_STATISTICS) {
- g_EventLogger.warning("MgmtSrvr::handleReceivedSignal, unexpected "
- "signal received, gsn %d, theWaitState = %d",
- gsn, theWaitState);
-
- return;
- }
- returnCode = handleSTATISTICS_CONF(signal);
- if (returnCode != -1) {
- theWaitState = NO_WAIT;
- }
- break;
-
-
- case GSN_SET_VAR_CONF:
- if (theWaitState != WAIT_SET_VAR) {
- g_EventLogger.warning("MgmtSrvr::handleReceivedSignal, unexpected "
- "signal received, gsn %d, theWaitState = %d",
- gsn, theWaitState);
- return;
- }
- theWaitState = NO_WAIT;
- _setVarReqResult = 0;
- break;
-
- case GSN_SET_VAR_REF:
- if (theWaitState != WAIT_SET_VAR) {
- g_EventLogger.warning("MgmtSrvr::handleReceivedSignal, unexpected "
- "signal received, gsn %d, theWaitState = %d",
- gsn, theWaitState);
- return;
- }
- theWaitState = NO_WAIT;
- _setVarReqResult = -1;
- break;
-
case GSN_EVENT_SUBSCRIBE_CONF:
- theConfCount--; // OK, we've received a conf message
- if (theConfCount < 0) {
- g_EventLogger.warning("MgmtSrvr::handleReceivedSignal, unexpected "
- "signal received, gsn %d, theWaitState = %d",
- gsn, theWaitState);
- theConfCount = 0;
- }
break;
case GSN_EVENT_REP:
@@ -2143,7 +1946,6 @@ MgmtSrvr::handleReceivedSignal(NdbApiSignal* signal)
event.Completed.NoOfLogBytes = rep->noOfLogBytes;
event.Completed.NoOfRecords = rep->noOfRecords;
event.Completed.NoOfLogRecords = rep->noOfLogRecords;
-
event.Completed.stopGCP = rep->stopGCP;
event.Completed.startGCP = rep->startGCP;
event.Nodes = rep->nodes;
@@ -2246,20 +2048,19 @@ void
MgmtSrvr::handleStatus(NodeId nodeId, bool alive)
{
if (alive) {
- _startedNodeId = nodeId; // Used by logLevelThreadRun()
+ m_started_nodes.push_back(nodeId);
Uint32 theData[25];
theData[0] = EventReport::Connected;
theData[1] = nodeId;
+ eventReport(_ownNodeId, theData);
} else {
handleStopReply(nodeId, 0);
- theConfCount++; // Increment the event subscr conf count because
-
+
Uint32 theData[25];
theData[0] = EventReport::Disconnected;
theData[1] = nodeId;
-
+
eventReport(_ownNodeId, theData);
- g_EventLogger.info("Lost connection to node %d", nodeId);
}
}
@@ -2278,7 +2079,7 @@ MgmtSrvr::signalReceivedNotification(void* mgmtSrvr,
//****************************************************************************
//****************************************************************************
void
-MgmtSrvr::nodeStatusNotification(void* mgmSrv, NodeId nodeId,
+MgmtSrvr::nodeStatusNotification(void* mgmSrv, Uint32 nodeId,
bool alive, bool nfComplete)
{
if(!(!alive && nfComplete))
@@ -2294,87 +2095,196 @@ MgmtSrvr::getNodeType(NodeId nodeId) const
return nodeTypes[nodeId];
}
-#ifdef NDB_WIN32
-static NdbMutex & f_node_id_mutex = * NdbMutex_Create();
-#else
-static NdbMutex f_node_id_mutex = NDB_MUTEX_INITIALIZER;
-#endif
-
bool
MgmtSrvr::alloc_node_id(NodeId * nodeId,
enum ndb_mgm_node_type type,
struct sockaddr *client_addr,
- SOCKET_SIZE_TYPE *client_addr_len)
+ SOCKET_SIZE_TYPE *client_addr_len,
+ BaseString &error_string)
{
- Guard g(&f_node_id_mutex);
-#if 0
- ndbout << "MgmtSrvr::getFreeNodeId type=" << type
- << " *nodeid=" << *nodeId << endl;
-#endif
-
+ DBUG_ENTER("MgmtSrvr::alloc_node_id");
+ DBUG_PRINT("enter", ("nodeid=%d, type=%d, client_addr=%d",
+ *nodeId, type, client_addr));
+ if (g_no_nodeid_checks) {
+ if (*nodeId == 0) {
+ error_string.appfmt("no-nodeid-ckecks set in manegment server.\n"
+ "node id must be set explicitly in connectstring");
+ DBUG_RETURN(false);
+ }
+ DBUG_RETURN(true);
+ }
+ Guard g(m_node_id_mutex);
+ int no_mgm= 0;
NodeBitmask connected_nodes(m_reserved_nodes);
- if (theFacade && theFacade->theClusterMgr) {
- for(Uint32 i = 0; i < MAX_NODES; i++)
- if (getNodeType(i) == NDB_MGM_NODE_TYPE_NDB) {
- const ClusterMgr::Node &node= theFacade->theClusterMgr->getNodeInfo(i);
- if (node.connected)
- connected_nodes.bitOR(node.m_state.m_connected_nodes);
+ for(Uint32 i = 0; i < MAX_NODES; i++)
+ {
+ if (getNodeType(i) == NDB_MGM_NODE_TYPE_NDB &&
+ theFacade && theFacade->theClusterMgr) {
+ const ClusterMgr::Node &node= theFacade->theClusterMgr->getNodeInfo(i);
+ if (node.connected) {
+ connected_nodes.bitOR(node.m_state.m_connected_nodes);
}
- }
-
- ndb_mgm_configuration_iterator iter(*(ndb_mgm_configuration *)_config->m_configValues,
- CFG_SECTION_NODE);
+ } else if (getNodeType(i) == NDB_MGM_NODE_TYPE_MGM)
+ no_mgm++;
+ }
+ bool found_matching_id= false;
+ bool found_matching_type= false;
+ bool found_free_node= false;
+ unsigned id_found= 0;
+ const char *config_hostname= 0;
+ struct in_addr config_addr= {0};
+ int r_config_addr= -1;
+ unsigned type_c= 0;
+
+ ndb_mgm_configuration_iterator
+ iter(*(ndb_mgm_configuration *)_config->m_configValues, CFG_SECTION_NODE);
for(iter.first(); iter.valid(); iter.next()) {
unsigned tmp= 0;
if(iter.get(CFG_NODE_ID, &tmp)) abort();
- if (connected_nodes.get(tmp))
- continue;
if (*nodeId && *nodeId != tmp)
continue;
- unsigned type_c;
+ found_matching_id= true;
if(iter.get(CFG_TYPE_OF_SECTION, &type_c)) abort();
- if(type_c != type)
+ if(type_c != (unsigned)type)
+ continue;
+ found_matching_type= true;
+ if (connected_nodes.get(tmp))
continue;
- const char *config_hostname = 0;
+ found_free_node= true;
if(iter.get(CFG_NODE_HOST, &config_hostname)) abort();
-
- if (config_hostname && config_hostname[0] != 0 && client_addr) {
+ if (config_hostname && config_hostname[0] == 0)
+ config_hostname= 0;
+ else if (client_addr) {
// check hostname compatability
- struct in_addr config_addr;
- const void *tmp= &(((sockaddr_in*)client_addr)->sin_addr);
- if(Ndb_getInAddr(&config_addr, config_hostname) != 0
- || memcmp(&config_addr, tmp, sizeof(config_addr)) != 0) {
+ const void *tmp_in= &(((sockaddr_in*)client_addr)->sin_addr);
+ if((r_config_addr= Ndb_getInAddr(&config_addr, config_hostname)) != 0
+ || memcmp(&config_addr, tmp_in, sizeof(config_addr)) != 0) {
struct in_addr tmp_addr;
if(Ndb_getInAddr(&tmp_addr, "localhost") != 0
- || memcmp(&tmp_addr, tmp, sizeof(config_addr)) != 0) {
+ || memcmp(&tmp_addr, tmp_in, sizeof(config_addr)) != 0) {
// not localhost
#if 0
- ndbout << "MgmtSrvr::getFreeNodeId compare failed for \"" << config_hostname
- << "\" id=" << tmp << endl;
+ ndbout << "MgmtSrvr::getFreeNodeId compare failed for \""
+ << config_hostname
+ << "\" id=" << tmp << endl;
#endif
continue;
}
// connecting through localhost
- // check if config_hostname match hostname
- char my_hostname[256];
- if (gethostname(my_hostname, sizeof(my_hostname)) != 0)
- continue;
- if(Ndb_getInAddr(&tmp_addr, my_hostname) != 0
- || memcmp(&tmp_addr, &config_addr, sizeof(config_addr)) != 0) {
- // no match
+ // check if config_hostname is local
+ if (!SocketServer::tryBind(0,config_hostname)) {
continue;
}
}
+ } else { // client_addr == 0
+ if (!SocketServer::tryBind(0,config_hostname)) {
+ continue;
+ }
}
- *nodeId= tmp;
- m_reserved_nodes.set(tmp);
-#if 0
- ndbout << "MgmtSrvr::getFreeNodeId found type=" << type
- << " *nodeid=" << *nodeId << endl;
-#endif
- return true;
+ if (*nodeId != 0 ||
+ type != NDB_MGM_NODE_TYPE_MGM ||
+ no_mgm == 1) { // any match is ok
+ id_found= tmp;
+ break;
+ }
+ if (id_found) { // mgmt server may only have one match
+ error_string.appfmt("Ambiguous node id's %d and %d.\n"
+ "Suggest specifying node id in connectstring,\n"
+ "or specifying unique host names in config file.",
+ id_found, tmp);
+ DBUG_RETURN(false);
+ }
+ if (config_hostname == 0) {
+ error_string.appfmt("Ambiguity for node id %d.\n"
+ "Suggest specifying node id in connectstring,\n"
+ "or specifying unique host names in config file,\n"
+ "or specifying just one mgmt server in config file.",
+ tmp);
+ DBUG_RETURN(false);
+ }
+ id_found= tmp; // mgmt server matched, check for more matches
+ }
+
+ if (id_found)
+ {
+ *nodeId= id_found;
+ DBUG_PRINT("info", ("allocating node id %d",*nodeId));
+ {
+ int r= 0;
+ if (client_addr)
+ m_connect_address[id_found]=
+ ((struct sockaddr_in *)client_addr)->sin_addr;
+ else if (config_hostname)
+ r= Ndb_getInAddr(&(m_connect_address[id_found]), config_hostname);
+ else {
+ char name[256];
+ r= gethostname(name, sizeof(name));
+ if (r == 0) {
+ name[sizeof(name)-1]= 0;
+ r= Ndb_getInAddr(&(m_connect_address[id_found]), name);
+ }
+ }
+ if (r)
+ m_connect_address[id_found].s_addr= 0;
+ }
+ m_reserved_nodes.set(id_found);
+ DBUG_RETURN(true);
+ }
+
+ if (found_matching_type && !found_free_node) {
+ // we have a temporary error which might be due to that
+ // we have got the latest connect status from db-nodes. Force update.
+ global_flag_send_heartbeat_now= 1;
}
- return false;
+
+ BaseString type_string, type_c_string;
+ {
+ const char *alias, *str;
+ alias= ndb_mgm_get_node_type_alias_string(type, &str);
+ type_string.assfmt("%s(%s)", alias, str);
+ alias= ndb_mgm_get_node_type_alias_string((enum ndb_mgm_node_type)type_c,
+ &str);
+ type_c_string.assfmt("%s(%s)", alias, str);
+ }
+
+ if (*nodeId == 0) {
+ if (found_matching_id)
+ if (found_matching_type)
+ if (found_free_node)
+ error_string.appfmt("Connection done from wrong host ip %s.",
+ inet_ntoa(((struct sockaddr_in *)
+ (client_addr))->sin_addr));
+ else
+ error_string.appfmt("No free node id found for %s.",
+ type_string.c_str());
+ else
+ error_string.appfmt("No %s node defined in config file.",
+ type_string.c_str());
+ else
+ error_string.append("No nodes defined in config file.");
+ } else {
+ if (found_matching_id)
+ if (found_matching_type)
+ if (found_free_node) {
+ // have to split these into two since inet_ntoa overwrites itself
+ error_string.appfmt("Connection with id %d done from wrong host ip %s,",
+ *nodeId, inet_ntoa(((struct sockaddr_in *)
+ (client_addr))->sin_addr));
+ error_string.appfmt(" expected %s(%s).", config_hostname,
+ r_config_addr ?
+ "lookup failed" : inet_ntoa(config_addr));
+ } else
+ error_string.appfmt("Id %d already allocated by another node.",
+ *nodeId);
+ else
+ error_string.appfmt("Id %d configured as %s, connect attempted as %s.",
+ *nodeId, type_c_string.c_str(),
+ type_string.c_str());
+ else
+ error_string.appfmt("No node defined with id=%d in config file.",
+ *nodeId);
+ }
+ DBUG_RETURN(false);
}
bool
@@ -2394,91 +2304,23 @@ MgmtSrvr::getNextNodeId(NodeId * nodeId, enum ndb_mgm_node_type type) const
return true;
}
+#include "Services.hpp"
+
void
MgmtSrvr::eventReport(NodeId nodeId, const Uint32 * theData)
{
const EventReport * const eventReport = (EventReport *)&theData[0];
-
+
EventReport::EventType type = eventReport->getEventType();
-
- if (type == EventReport::TransReportCounters ||
- type == EventReport::OperationReportCounters) {
-
- if (_isClusterLogStatActive) {
- g_EventLogger.log(type, theData, nodeId);
- }
-
- if (_isStatPortActive) {
- char theTime[128];
- struct tm* tm_now;
- time_t now;
- now = time((time_t*)NULL);
-#ifdef NDB_WIN32
- tm_now = localtime(&now);
-#else
- tm_now = gmtime(&now);
-#endif
-
- snprintf(theTime, sizeof(theTime),
- STATISTIC_DATE,
- tm_now->tm_year + 1900,
- tm_now->tm_mon,
- tm_now->tm_mday,
- tm_now->tm_hour,
- tm_now->tm_min,
- tm_now->tm_sec);
-
- char str[255];
-
- if (type == EventReport::TransReportCounters) {
- snprintf(str, sizeof(str),
- STATISTIC_LINE,
- theTime,
- (int)now,
- nodeId,
- theData[1],
- theData[2],
- theData[3],
- // theData[4], simple reads
- theData[5],
- theData[6],
- theData[7],
- theData[8]);
- } else if (type == EventReport::OperationReportCounters) {
- snprintf(str, sizeof(str),
- OP_STATISTIC_LINE,
- theTime,
- (int)now,
- nodeId,
- theData[1]);
- }
-
- if(m_statisticsListner != 0){
- m_statisticsListner->println_statistics(str);
- }
- }
-
- return;
-
- } // if (type ==
-
// Log event
- g_EventLogger.log(type, theData, nodeId);
-
+ g_EventLogger.log(type, theData, nodeId,
+ &m_statisticsListner.m_clients[0].m_logLevel);
+ m_statisticsListner.log(type, theData, nodeId);
}
/***************************************************************************
* Backup
***************************************************************************/
-
-MgmtSrvr::BackupCallback
-MgmtSrvr::setCallback(BackupCallback aCall)
-{
- BackupCallback ret = m_backupCallback;
- m_backupCallback = aCall;
- return ret;
-}
-
int
MgmtSrvr::startBackup(Uint32& backupId, bool waitCompleted)
{
@@ -2585,102 +2427,18 @@ MgmtSrvr::abortBackup(Uint32 backupId)
void
MgmtSrvr::backupCallback(BackupEvent & event)
{
- char str[255];
-
- bool ok = false;
+ m_lastBackupEvent = event;
switch(event.Event){
- case BackupEvent::BackupStarted:
- ok = true;
- snprintf(str, sizeof(str),
- "Backup %d started", event.Started.BackupId);
- break;
case BackupEvent::BackupFailedToStart:
- ok = true;
- snprintf(str, sizeof(str),
- "Backup failed to start (Backup error %d)",
- event.FailedToStart.ErrorCode);
- break;
- case BackupEvent::BackupCompleted:
- ok = true;
- snprintf(str, sizeof(str),
- "Backup %d completed",
- event.Completed.BackupId);
- g_EventLogger.info(str);
-
- snprintf(str, sizeof(str),
- " StartGCP: %d StopGCP: %d",
- event.Completed.startGCP, event.Completed.stopGCP);
- g_EventLogger.info(str);
-
- snprintf(str, sizeof(str),
- " #Records: %d #LogRecords: %d",
- event.Completed.NoOfRecords, event.Completed.NoOfLogRecords);
- g_EventLogger.info(str);
-
- snprintf(str, sizeof(str),
- " Data: %d bytes Log: %d bytes",
- event.Completed.NoOfBytes, event.Completed.NoOfLogBytes);
- break;
case BackupEvent::BackupAborted:
- ok = true;
- snprintf(str, sizeof(str),
- "Backup %d has been aborted reason %d",
- event.Aborted.BackupId,
- event.Aborted.Reason);
- break;
- }
- if(!ok){
- snprintf(str, sizeof(str),
- "Unknown backup event: %d",
- event.Event);
-
- }
- g_EventLogger.info(str);
-
- switch (theWaitState){
- case WAIT_BACKUP_STARTED:
- switch(event.Event){
- case BackupEvent::BackupStarted:
- case BackupEvent::BackupFailedToStart:
- m_lastBackupEvent = event;
- theWaitState = NO_WAIT;
- break;
- default:
- snprintf(str, sizeof(str),
- "Received event %d in unexpected state WAIT_BACKUP_STARTED",
- event.Event);
- g_EventLogger.info(str);
- return;
- }
-
+ case BackupEvent::BackupCompleted:
+ theWaitState = NO_WAIT;
break;
- case WAIT_BACKUP_COMPLETED:
- switch(event.Event){
- case BackupEvent::BackupCompleted:
- case BackupEvent::BackupAborted:
- case BackupEvent::BackupFailedToStart:
- m_lastBackupEvent = event;
+ case BackupEvent::BackupStarted:
+ if(theWaitState == WAIT_BACKUP_STARTED)
theWaitState = NO_WAIT;
- break;
- default:
- snprintf(str, sizeof(str),
- "Received event %d in unexpected state WAIT_BACKUP_COMPLETED",
- event.Event);
- g_EventLogger.info(str);
- return;
- }
- break;
- default:
- snprintf(str, sizeof(str), "Received event %d in unexpected state = %d",
- event.Event, theWaitState);
- g_EventLogger.info(str);
- return;
-
- }
-
- if(m_backupCallback != 0){
- (* m_backupCallback)(event);
}
+ return;
}
@@ -2772,7 +2530,11 @@ MgmtSrvr::Allocated_resources::Allocated_resources(MgmtSrvr &m)
MgmtSrvr::Allocated_resources::~Allocated_resources()
{
- Guard g(&f_node_id_mutex);
+ Guard g(m_mgmsrv.m_node_id_mutex);
+ if (!m_reserved_nodes.isclear()) {
+ // node has been reserved, force update signal to ndb nodes
+ global_flag_send_heartbeat_now= 1;
+ }
m_mgmsrv.m_reserved_nodes.bitANDC(m_reserved_nodes);
}
@@ -2864,15 +2626,15 @@ MgmtSrvr::setDbParameter(int node, int param, const char * value,
switch(p_type){
case 0:
res = i2.set(param, val_32);
- ndbout_c("Updateing node %d param: %d to %d", node, param, val_32);
+ ndbout_c("Updating node %d param: %d to %d", node, param, val_32);
break;
case 1:
res = i2.set(param, val_64);
- ndbout_c("Updateing node %d param: %d to %Ld", node, param, val_32);
+ ndbout_c("Updating node %d param: %d to %Ld", node, param, val_32);
break;
case 2:
res = i2.set(param, val_char);
- ndbout_c("Updateing node %d param: %d to %s", node, param, val_char);
+ ndbout_c("Updating node %d param: %d to %s", node, param, val_char);
break;
default:
abort();
@@ -2888,3 +2650,7 @@ template class Vector<SigMatch>;
#if __SUNPRO_CC != 0x560
template bool SignalQueue::waitFor<SigMatch>(Vector<SigMatch>&, SigMatch*&, NdbApiSignal*&, unsigned);
#endif
+
+template class MutexVector<unsigned short>;
+template class MutexVector<MgmStatService::StatListener>;
+template class MutexVector<EventSubscribeReq>;
diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp
index b26eaeb4ab9..a5f21b6bc4a 100644
--- a/ndb/src/mgmsrv/MgmtSrvr.hpp
+++ b/ndb/src/mgmsrv/MgmtSrvr.hpp
@@ -28,8 +28,8 @@
#include <signaldata/ManagementServer.hpp>
#include "SignalQueue.hpp"
#include <ndb_version.h>
-
-#include "NodeLogLevelList.hpp"
+#include <EventLogger.hpp>
+#include <signaldata/EventSubscribeReq.hpp>
/**
* @desc Block number for Management server.
@@ -43,6 +43,29 @@ class Config;
class SetLogLevelOrd;
class SocketServer;
+class MgmStatService : public EventLoggerBase
+{
+ friend class MgmtSrvr;
+public:
+ struct StatListener : public EventLoggerBase {
+ NDB_SOCKET_TYPE m_socket;
+ };
+
+private:
+ class MgmtSrvr * m_mgmsrv;
+ MutexVector<StatListener> m_clients;
+public:
+ MgmStatService(class MgmtSrvr * m) : m_clients(5) {
+ m_mgmsrv = m;
+ }
+
+ void add_listener(const StatListener&);
+
+ void log(int eventType, const Uint32* theData, NodeId nodeId);
+
+ void stopSessions();
+};
+
/**
* @class MgmtSrvr
* @brief Main class for the management server.
@@ -63,11 +86,6 @@ class SocketServer;
class MgmtSrvr {
public:
- class StatisticsListner {
- public:
- virtual void println_statistics(const BaseString &s) = 0;
- };
-
// some compilers need all of this
class Allocated_resources;
friend class Allocated_resources;
@@ -83,11 +101,7 @@ public:
MgmtSrvr &m_mgmsrv;
NodeBitmask m_reserved_nodes;
};
-
- /**
- * Set a reference to the socket server.
- */
- void setStatisticsListner(StatisticsListner* listner);
+ NdbMutex *m_node_id_mutex;
/**
* Start/initate the event log.
@@ -151,15 +165,6 @@ public:
STATIC_CONST( NO_CONTACT_WITH_DB_NODES = 5030 );
/**
- * This class holds all statistical variables fetched with
- * the getStatistics methods.
- */
- class Statistics { // TODO, Real statistic data to be added
- public:
- int _test1;
- };
-
- /**
* This enum specifies the different signal loggig modes possible to set
* with the setSignalLoggingMode method.
*/
@@ -169,7 +174,7 @@ public:
MgmtSrvr(NodeId nodeId, /* Local nodeid */
const BaseString &config_filename, /* Where to save config */
- const BaseString &ndb_config_filename, /* Ndb.cfg filename */
+ LocalConfig &local_config, /* Ndb.cfg filename */
Config * config);
NodeId getOwnNodeId() const {return _ownNodeId;};
@@ -179,7 +184,7 @@ public:
* @return true if succeeded, otherwise false
*/
bool check_start(); // may be run before start to check that some things are ok
- bool start();
+ bool start(BaseString &error_string);
~MgmtSrvr();
@@ -206,7 +211,7 @@ public:
typedef void (* EnterSingleCallback)(int nodeId, void * anyData,
int errorCode);
typedef void (* ExitSingleCallback)(int nodeId, void * anyData,
- int errorCode);
+ int errorCode);
/**
* Lock configuration
@@ -313,13 +318,6 @@ public:
bool abort = false,
int * stopCount = 0, StopCallback = 0, void * anyData = 0);
- int setEventReportingLevel(int processId,
- const class SetLogLevelOrd & logLevel,
- bool isResend = false);
-
- int startStatisticEventReporting(int level = 5);
-
-
struct BackupEvent {
enum Event {
BackupStarted = 1,
@@ -356,8 +354,6 @@ public:
/**
* Backup functionallity
*/
- typedef void (* BackupCallback)(const BackupEvent& Event);
- BackupCallback setCallback(BackupCallback);
int startBackup(Uint32& backupId, bool waitCompleted = false);
int abortBackup(Uint32 backupId);
int performBackup(Uint32* backupId);
@@ -377,22 +373,8 @@ public:
// INVALID_LEVEL
//**************************************************************************
- /**
- * Sets the Node's log level, i.e., its local event reporting.
- *
- * @param processId the DB node id.
- * @param logLevel the log level.
- * @param isResend Flag to indicate for resending log levels
- * during node restart
-
- * @return 0 if successful or NO_CONTACT_WITH_PROCESS,
- * SEND_OR_RECEIVE_FAILED,
- * COULD_NOT_ALLOCATE_MEMORY
- */
- int setNodeLogLevel(int processId,
- const class SetLogLevelOrd & logLevel,
- bool isResend = false);
-
+ int setEventReportingLevelImpl(int processId, const EventSubscribeReq& ll);
+ int setNodeLogLevelImpl(int processId, const SetLogLevelOrd & ll);
/**
* Insert an error in a DB process.
@@ -467,7 +449,8 @@ public:
*/
bool getNextNodeId(NodeId * _nodeId, enum ndb_mgm_node_type type) const ;
bool alloc_node_id(NodeId * _nodeId, enum ndb_mgm_node_type type,
- struct sockaddr *client_addr, SOCKET_SIZE_TYPE *client_addr_len);
+ struct sockaddr *client_addr, SOCKET_SIZE_TYPE *client_addr_len,
+ BaseString &error_string);
/**
*
@@ -508,11 +491,6 @@ public:
NodeId getPrimaryNode() const;
/**
- * Returns the statistics port number.
- * @return statistic port number.
- */
- int getStatPort() const;
- /**
* Returns the port number.
* @return port number.
*/
@@ -520,14 +498,12 @@ public:
int setDbParameter(int node, int parameter, const char * value, BaseString&);
+ const char *get_connect_address(Uint32 node_id) { return inet_ntoa(m_connect_address[node_id]); }
//**************************************************************************
private:
//**************************************************************************
- int setEventReportingLevelImpl(int processId,
- const class SetLogLevelOrd & logLevel,
- bool isResend = false);
-
+ int setEventReportingLevel(int processId, LogLevel::EventCategory, Uint32);
/**
* Check if it is possible to send a signal to a (DB) process
@@ -553,16 +529,13 @@ private:
NdbMutex *m_configMutex;
const Config * _config;
Config * m_newConfig;
+ LocalConfig &m_local_config;
BaseString m_configFilename;
- BaseString m_localNdbConfigFilename;
Uint32 m_nextConfigGenerationNumber;
NodeBitmask m_reserved_nodes;
Allocated_resources m_allocated_resources;
-
- int _setVarReqResult; // The result of the SET_VAR_REQ response
- Statistics _statistics; // handleSTATISTICS_CONF store the result here,
- // and getStatistics reads it.
+ struct in_addr m_connect_address[MAX_NODES];
//**************************************************************************
// Specific signal handling methods
@@ -595,14 +568,6 @@ private:
// Returns: -
//**************************************************************************
- int handleSTATISTICS_CONF(NdbApiSignal* signal);
- //**************************************************************************
- // Description: Handle reception of signal STATISTICS_CONF
- // Parameters:
- // signal: The recieved signal
- // Returns: TODO, to be defined
- //**************************************************************************
-
void handle_MGM_LOCK_CONFIG_REQ(NdbApiSignal *signal);
void handle_MGM_UNLOCK_CONFIG_REQ(NdbApiSignal *signal);
@@ -628,7 +593,6 @@ private:
*/
enum WaitSignalType {
NO_WAIT, // We don't expect to receive any signal
- WAIT_STATISTICS, // Accept STATISTICS_CONF
WAIT_SET_VAR, // Accept SET_VAR_CONF and SET_VAR_REF
WAIT_SUBSCRIBE_CONF, // Accept event subscription confirmation
WAIT_STOP,
@@ -699,7 +663,7 @@ private:
* shall receive the notification.
* @param processId: Id of the dead process.
*/
- static void nodeStatusNotification(void* mgmSrv, NodeId nodeId,
+ static void nodeStatusNotification(void* mgmSrv, Uint32 nodeId,
bool alive, bool nfCompleted);
/**
@@ -712,7 +676,7 @@ private:
//**************************************************************************
// General signal handling data
- static const unsigned int WAIT_FOR_RESPONSE_TIMEOUT = 300000; // Milliseconds
+ STATIC_CONST( WAIT_FOR_RESPONSE_TIMEOUT = 300000 ); // Milliseconds
// Max time to wait for a signal to arrive
NdbApiSignal* theSignalIdleList;
@@ -730,14 +694,6 @@ private:
class SignalQueue m_signalRecvQueue;
- enum ndb_mgm_node_type nodeTypes[MAX_NODES];
-
- int theConfCount; // The number of expected conf signals
-
- StatisticsListner * m_statisticsListner; // Used for sending statistics info
- bool _isStatPortActive;
- bool _isClusterLogStatActive;
-
struct StopRecord {
StopRecord(){ inUse = false; callback = 0; singleUserMode = false;}
bool inUse;
@@ -762,10 +718,16 @@ private:
void handleStopReply(NodeId nodeId, Uint32 errCode);
int translateStopRef(Uint32 errCode);
-
+
bool _isStopThread;
int _logLevelThreadSleep;
- int _startedNodeId;
+ MutexVector<NodeId> m_started_nodes;
+ MutexVector<EventSubscribeReq> m_log_level_requests;
+ LogLevel m_nodeLogLevel[MAX_NODES];
+ enum ndb_mgm_node_type nodeTypes[MAX_NODES];
+ friend class MgmApiSession;
+ friend class MgmStatService;
+ MgmStatService m_statisticsListner;
/**
* Handles the thread wich upon a 'Node is started' event will
@@ -779,15 +741,12 @@ private:
static void *signalRecvThread_C(void *);
void signalRecvThreadRun();
- NodeLogLevelList* _nodeLogLevelList;
- NodeLogLevelList* _clusterLogLevelList;
-
void backupCallback(BackupEvent &);
- BackupCallback m_backupCallback;
BackupEvent m_lastBackupEvent;
Config *_props;
+ int send(class NdbApiSignal* signal, Uint32 node, Uint32 node_type);
public:
/**
* This method does not exist
diff --git a/ndb/src/mgmsrv/MgmtSrvrConfig.cpp b/ndb/src/mgmsrv/MgmtSrvrConfig.cpp
index 44c2aadd1e2..1d51061e909 100644
--- a/ndb/src/mgmsrv/MgmtSrvrConfig.cpp
+++ b/ndb/src/mgmsrv/MgmtSrvrConfig.cpp
@@ -288,8 +288,7 @@ MgmtSrvr::readConfig() {
Config *
MgmtSrvr::fetchConfig() {
- ConfigRetriever cr(NDB_VERSION, NODE_TYPE_MGM);
- cr.setLocalConfigFileName(m_localNdbConfigFilename.c_str());
+ ConfigRetriever cr(m_local_config, NDB_VERSION, NODE_TYPE_MGM);
struct ndb_mgm_configuration * tmp = cr.getConfig();
if(tmp != 0){
Config * conf = new Config();
diff --git a/ndb/src/mgmsrv/NodeLogLevel.cpp b/ndb/src/mgmsrv/NodeLogLevel.cpp
deleted file mode 100644
index 5271cdb0f2b..00000000000
--- a/ndb/src/mgmsrv/NodeLogLevel.cpp
+++ /dev/null
@@ -1,70 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#include "NodeLogLevel.hpp"
-// TODO_RONM: Clearly getCategory and getLevel is not correctly coded. Must be taken care of.
-
-NodeLogLevel::NodeLogLevel(int nodeId, const SetLogLevelOrd& ll)
-{
- m_nodeId = nodeId;
- m_logLevel = ll;
-}
-
-NodeLogLevel::~NodeLogLevel()
-{
-}
-
-int
-NodeLogLevel::getNodeId() const
-{
- return m_nodeId;
-}
-
-Uint32
-NodeLogLevel::getCategory() const
-{
- for (Uint32 i = 0; i < m_logLevel.noOfEntries; i++)
- {
- return m_logLevel.theCategories[i];
- }
- return 0;
-}
-
-int
-NodeLogLevel::getLevel() const
-{
- for (Uint32 i = 0; i < m_logLevel.noOfEntries; i++)
- {
- return m_logLevel.theLevels[i];
- }
- return 0;
-}
-
-void
-NodeLogLevel::setLevel(int level)
-{
- for (Uint32 i = 0; i < m_logLevel.noOfEntries; i++)
- {
- m_logLevel.theLevels[i] = level;
- }
-
-}
-
-SetLogLevelOrd
-NodeLogLevel::getLogLevelOrd() const
-{
- return m_logLevel;
-}
diff --git a/ndb/src/mgmsrv/NodeLogLevel.hpp b/ndb/src/mgmsrv/NodeLogLevel.hpp
deleted file mode 100644
index 3ad758cde99..00000000000
--- a/ndb/src/mgmsrv/NodeLogLevel.hpp
+++ /dev/null
@@ -1,54 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef NODELOGLEVEL_H
-#define NODELOGLEVEL_H
-
-#include <ndb_global.h>
-
-#include <signaldata/SetLogLevelOrd.hpp>
-
-/**
- * Holds a DB node's log level settings for both local and event log levels.
- * It only holds one log level setting even though SetLogLevelOrd can handle
- * multiple log levels at once, it is not used in that way in the managment
- * server.
- *
- * @version #@ $Id: NodeLogLevel.hpp,v 1.2 2003/07/05 17:40:22 elathal Exp $
- */
-class NodeLogLevel
-{
-public:
- NodeLogLevel(int nodeId, const SetLogLevelOrd& ll);
- ~NodeLogLevel();
-
- int getNodeId() const;
- Uint32 getCategory() const;
- int getLevel() const;
- void setLevel(int level);
- SetLogLevelOrd getLogLevelOrd() const;
-
-private:
- NodeLogLevel();
- NodeLogLevel(const NodeLogLevel&);
- bool operator == (const NodeLogLevel&);
- NodeLogLevel operator = (const NodeLogLevel&);
-
- int m_nodeId;
- SetLogLevelOrd m_logLevel;
-};
-
-#endif
diff --git a/ndb/src/mgmsrv/NodeLogLevelList.cpp b/ndb/src/mgmsrv/NodeLogLevelList.cpp
deleted file mode 100644
index 6c7c091c1a8..00000000000
--- a/ndb/src/mgmsrv/NodeLogLevelList.cpp
+++ /dev/null
@@ -1,182 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#include <ndb_global.h>
-
-#include "NodeLogLevelList.hpp"
-#include "NodeLogLevel.hpp"
-
-//
-// PUBLIC
-//
-
-NodeLogLevelList::NodeLogLevelList() :
- m_size(0),
- m_pHeadNode(NULL),
- m_pTailNode(NULL),
- m_pCurrNode(NULL)
-{
-}
-
-NodeLogLevelList::~NodeLogLevelList()
-{
- removeAll();
-}
-
-void
-NodeLogLevelList::add(NodeLogLevel* pNewNode)
-{
- NodeLogLevelNode* pNode = new NodeLogLevelNode();
-
- if (m_pHeadNode == NULL)
- {
- m_pHeadNode = pNode;
- pNode->pPrev = NULL;
- }
- else
- {
- m_pTailNode->pNext = pNode;
- pNode->pPrev = m_pTailNode;
- }
- m_pTailNode = pNode;
- pNode->pNext = NULL;
- pNode->pHandler = pNewNode;
-
- m_size++;
-}
-
-bool
-NodeLogLevelList::remove(NodeLogLevel* pRemoveNode)
-{
- NodeLogLevelNode* pNode = m_pHeadNode;
- bool removed = false;
- do
- {
- if (pNode->pHandler == pRemoveNode)
- {
- removeNode(pNode);
- removed = true;
- break;
- }
- } while ( (pNode = next(pNode)) != NULL);
-
- return removed;
-}
-
-void
-NodeLogLevelList::removeAll()
-{
- while (m_pHeadNode != NULL)
- {
- removeNode(m_pHeadNode);
- }
-}
-
-NodeLogLevel*
-NodeLogLevelList::next()
-{
- NodeLogLevel* pHandler = NULL;
- if (m_pCurrNode == NULL)
- {
- m_pCurrNode = m_pHeadNode;
- if (m_pCurrNode != NULL)
- {
- pHandler = m_pCurrNode->pHandler;
- }
- }
- else
- {
- m_pCurrNode = next(m_pCurrNode); // Next node
- if (m_pCurrNode != NULL)
- {
- pHandler = m_pCurrNode->pHandler;
- }
- }
-
- return pHandler;
-}
-
-int
-NodeLogLevelList::size() const
-{
- return m_size;
-}
-
-//
-// PRIVATE
-//
-
-NodeLogLevelList::NodeLogLevelNode*
-NodeLogLevelList::next(NodeLogLevelNode* pNode)
-{
- NodeLogLevelNode* pCurr = pNode;
- if (pNode->pNext != NULL)
- {
- pCurr = pNode->pNext;
- }
- else
- {
- // Tail
- pCurr = NULL;
- }
- return pCurr;
-}
-
-NodeLogLevelList::NodeLogLevelNode*
-NodeLogLevelList::prev(NodeLogLevelNode* pNode)
-{
- NodeLogLevelNode* pCurr = pNode;
- if (pNode->pPrev != NULL) // head
- {
- pCurr = pNode->pPrev;
- }
- else
- {
- // Head
- pCurr = NULL;
- }
-
- return pCurr;
-}
-
-void
-NodeLogLevelList::removeNode(NodeLogLevelNode* pNode)
-{
- if (pNode->pPrev == NULL) // If head
- {
- m_pHeadNode = pNode->pNext;
- }
- else
- {
- pNode->pPrev->pNext = pNode->pNext;
- }
-
- if (pNode->pNext == NULL) // if tail
- {
- m_pTailNode = pNode->pPrev;
- }
- else
- {
- pNode->pNext->pPrev = pNode->pPrev;
- }
-
- pNode->pNext = NULL;
- pNode->pPrev = NULL;
- delete pNode->pHandler; // Delete log handler
- delete pNode;
-
- m_size--;
-}
diff --git a/ndb/src/mgmsrv/NodeLogLevelList.hpp b/ndb/src/mgmsrv/NodeLogLevelList.hpp
deleted file mode 100644
index 4a55ee211e2..00000000000
--- a/ndb/src/mgmsrv/NodeLogLevelList.hpp
+++ /dev/null
@@ -1,93 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef NODELOGLEVELLIST_H
-#define NODELOGLEVELLIST_H
-
-class NodeLogLevel;
-
-/**
- * Provides a simple linked list of NodeLogLevel.
- *
- * @see NodeLogLevel
- * @version #@ $Id: NodeLogLevelList.hpp,v 1.1 2002/08/09 12:53:50 eyualex Exp $
- */
-class NodeLogLevelList
-{
-public:
- /**
- * Default Constructor.
- */
- NodeLogLevelList();
-
- /**
- * Destructor.
- */
- ~NodeLogLevelList();
-
- /**
- * Adds a new node.
- *
- * @param pNewHandler a new NodeLogLevel.
- */
- void add(NodeLogLevel* pNewNode);
-
- /**
- * Removes a NodeLogLevel from the list and call its destructor.
- *
- * @param pRemoveHandler the NodeLogLevel to remove
- */
- bool remove(NodeLogLevel* pRemoveNode);
-
- /**
- * Removes all items.
- */
- void removeAll();
-
- /**
- * Returns the next node in the list.
- * returns a node or NULL.
- */
- NodeLogLevel* next();
-
- /**
- * Returns the size of the list.
- */
- int size() const;
-private:
- /** List node */
- struct NodeLogLevelNode
- {
- NodeLogLevelNode* pPrev;
- NodeLogLevelNode* pNext;
- NodeLogLevel* pHandler;
- };
-
- NodeLogLevelNode* next(NodeLogLevelNode* pNode);
- NodeLogLevelNode* prev(NodeLogLevelNode* pNode);
-
- void removeNode(NodeLogLevelNode* pNode);
-
- int m_size;
-
- NodeLogLevelNode* m_pHeadNode;
- NodeLogLevelNode* m_pTailNode;
- NodeLogLevelNode* m_pCurrNode;
-};
-
-#endif
-
-
diff --git a/ndb/src/mgmsrv/Services.cpp b/ndb/src/mgmsrv/Services.cpp
index 7bf408583de..5b552836955 100644
--- a/ndb/src/mgmsrv/Services.cpp
+++ b/ndb/src/mgmsrv/Services.cpp
@@ -23,6 +23,7 @@
#include <mgmapi.h>
#include <EventLogger.hpp>
#include <signaldata/SetLogLevelOrd.hpp>
+#include <LogLevel.hpp>
#include <BaseString.hpp>
#include <Base64.hpp>
@@ -133,7 +134,7 @@ ParserRow<MgmApiSession> commands[] = {
MGM_ARG("public key", String, Mandatory, "Public key"),
MGM_CMD("get version", &MgmApiSession::getVersion, ""),
-
+
MGM_CMD("get status", &MgmApiSession::getStatus, ""),
MGM_CMD("get info clusterlog", &MgmApiSession::getInfoClusterLog, ""),
@@ -236,7 +237,11 @@ ParserRow<MgmApiSession> commands[] = {
MGM_ARG("node", String, Mandatory, "Node"),
MGM_ARG("parameter", String, Mandatory, "Parameter"),
MGM_ARG("value", String, Mandatory, "Value"),
-
+
+ MGM_CMD("listen event", &MgmApiSession::listen_event, ""),
+ MGM_ARG("node", Int, Optional, "Node"),
+ MGM_ARG("filter", String, Mandatory, "Event category"),
+
MGM_END()
};
@@ -289,7 +294,8 @@ MgmApiSession::runSession() {
break;
}
}
- NDB_CLOSE_SOCKET(m_socket);
+ if(m_socket >= 0)
+ NDB_CLOSE_SOCKET(m_socket);
}
#ifdef MGM_GET_CONFIG_BACKWARDS_COMPAT
@@ -413,11 +419,15 @@ MgmApiSession::get_nodeid(Parser_t::Context &,
NodeId tmp= nodeid;
if(tmp == 0 || !m_allocated_resources->is_reserved(tmp)){
+ BaseString error_string;
if (!m_mgmsrv.alloc_node_id(&tmp, (enum ndb_mgm_node_type)nodetype,
- &addr, &addrlen)){
+ &addr, &addrlen, error_string)){
+ const char *alias;
+ const char *str;
+ alias= ndb_mgm_get_node_type_alias_string((enum ndb_mgm_node_type)
+ nodetype, &str);
m_output->println(cmd);
- m_output->println("result: no free nodeid %d for nodetype %d",
- nodeid, nodetype);
+ m_output->println("result: %s", error_string.c_str());
m_output->println("");
return;
}
@@ -551,7 +561,7 @@ MgmApiSession::getStatPort(Parser_t::Context &,
const class Properties &) {
m_output->println("get statport reply");
- m_output->println("tcpport: %d", m_mgmsrv.getStatPort());
+ m_output->println("tcpport: %d", 0);
m_output->println("");
}
@@ -753,13 +763,12 @@ MgmApiSession::bye(Parser<MgmApiSession>::Context &,
void
MgmApiSession::setClusterLogLevel(Parser<MgmApiSession>::Context &,
Properties const &args) {
- Uint32 node, level;
- BaseString categoryName, errorString;
+ Uint32 node, level, category;
+ BaseString errorString;
SetLogLevelOrd logLevel;
int result;
- logLevel.clear();
args.get("node", &node);
- args.get("category", categoryName);
+ args.get("category", &category);
args.get("level", &level);
/* XXX should use constants for this value */
@@ -768,25 +777,17 @@ MgmApiSession::setClusterLogLevel(Parser<MgmApiSession>::Context &,
goto error;
}
- categoryName.ndb_toupper();
-
- LogLevel::EventCategory category;
- if(!EventLogger::matchEventCategory(categoryName.c_str(), &category)) {
- errorString.assign("Unknown category");
- goto error;
- }
-
- logLevel.setLogLevel(category, level);
- result = m_mgmsrv.setEventReportingLevel(node, logLevel);
-
+ EventSubscribeReq req;
+ req.blockRef = 0;
+ req.noOfEntries = 1;
+ req.theData[0] = (category << 16) | level;
+ m_mgmsrv.m_log_level_requests.push_back(req);
+
m_output->println("set cluster loglevel reply");
- if(result != 0)
- m_output->println("result: %s", m_mgmsrv.getErrorText(result));
- else
- m_output->println("result: Ok");
+ m_output->println("result: Ok");
m_output->println("");
return;
- error:
+error:
m_output->println("set cluster loglevel reply");
m_output->println("result: %s", errorString.c_str());
m_output->println("");
@@ -795,13 +796,13 @@ MgmApiSession::setClusterLogLevel(Parser<MgmApiSession>::Context &,
void
MgmApiSession::setLogLevel(Parser<MgmApiSession>::Context &,
Properties const &args) {
- Uint32 node = 0, level = 0;
- BaseString categoryName, errorString;
+ Uint32 node = 0, level = 0, category;
+ BaseString errorString;
SetLogLevelOrd logLevel;
int result;
logLevel.clear();
args.get("node", &node);
- args.get("category", categoryName);
+ args.get("category", &category);
args.get("level", &level);
/* XXX should use constants for this value */
@@ -810,23 +811,14 @@ MgmApiSession::setLogLevel(Parser<MgmApiSession>::Context &,
goto error;
}
- categoryName.ndb_toupper();
-
- LogLevel::EventCategory category;
- if(!EventLogger::matchEventCategory(categoryName.c_str(), &category)) {
- errorString.assign("Unknown category");
- goto error;
- }
-
- logLevel.setLogLevel(category, level);
-
- result = m_mgmsrv.setNodeLogLevel(node, logLevel);
-
+ EventSubscribeReq req;
+ req.blockRef = node;
+ req.noOfEntries = 1;
+ req.theData[0] = (category << 16) | level;
+ m_mgmsrv.m_log_level_requests.push_back(req);
+
m_output->println("set loglevel reply");
- if(result != 0)
- m_output->println("result: %s", m_mgmsrv.getErrorText(result));
- else
- m_output->println("result: Ok");
+ m_output->println("result: Ok");
m_output->println("");
return;
error:
@@ -943,6 +935,7 @@ printNodeStatus(OutputStream *output,
output->println("node.%d.dynamic_id: %d", nodeId, dynamicId);
output->println("node.%d.node_group: %d", nodeId, nodeGroup);
output->println("node.%d.connect_count: %d", nodeId, connectCount);
+ output->println("node.%d.address: %s", nodeId, mgmsrv.get_connect_address(nodeId));
}
}
@@ -1015,8 +1008,9 @@ MgmApiSession::stop(Parser<MgmApiSession>::Context &,
}
int stop_self= 0;
+ size_t i;
- for(size_t i=0; i < nodes.size(); i++) {
+ for(i=0; i < nodes.size(); i++) {
if (nodes[i] == m_mgmsrv.getOwnNodeId()) {
stop_self= 1;
if (i != nodes.size()-1) {
@@ -1030,13 +1024,13 @@ MgmApiSession::stop(Parser<MgmApiSession>::Context &,
int stopped = 0, result = 0;
- for(size_t i=0; i < nodes.size(); i++)
+ for(i=0; i < nodes.size(); i++)
if (nodes[i] != m_mgmsrv.getOwnNodeId()) {
if((result = m_mgmsrv.stopNode(nodes[i], abort != 0)) == 0)
stopped++;
} else
stopped++;
-
+
m_output->println("stop reply");
if(result != 0)
m_output->println("result: %s", m_mgmsrv.getErrorText(result));
@@ -1129,7 +1123,7 @@ MgmApiSession::logSignals(Parser<MgmApiSession>::Context &,
args.get("blocks", blockList);
// fast fix - pekka
char buf[200];
- snprintf(buf, 200, "%s", blockList.c_str());
+ BaseString::snprintf(buf, 200, "%s", blockList.c_str());
Vector<BaseString> blocks;
blockName=strtok(buf,"|");
@@ -1244,33 +1238,94 @@ MgmApiSession::configChange(Parser_t::Context &,
m_output->println("");
}
-void
-MgmStatService::println_statistics(const BaseString &line){
- MutexVector<NDB_SOCKET_TYPE> copy(m_sockets.size());
- m_sockets.lock();
+static NdbOut&
+operator<<(NdbOut& out, const LogLevel & ll)
+{
+ out << "[LogLevel: ";
+ for(size_t i = 0; i<LogLevel::LOGLEVEL_CATEGORIES; i++)
+ out << ll.getLogLevel((LogLevel::EventCategory)i) << " ";
+ out << "]";
+ return out;
+}
+
+void
+MgmStatService::log(int eventType, const Uint32* theData, NodeId nodeId){
+
+ Uint32 threshold = 0;
+ LogLevel::EventCategory cat= LogLevel::llInvalid;
int i;
- for(i = m_sockets.size() - 1; i >= 0; i--){
- if(println_socket(m_sockets[i], MAX_WRITE_TIMEOUT, line.c_str()) == -1){
- copy.push_back(m_sockets[i]);
- m_sockets.erase(i, false);
+
+ for(i = 0; (unsigned)i<EventLogger::matrixSize; i++){
+ if(EventLogger::matrix[i].eventType == eventType){
+ cat = EventLogger::matrix[i].eventCategory;
+ threshold = EventLogger::matrix[i].threshold;
+ break;
+ }
+ }
+ if (cat == LogLevel::llInvalid)
+ return;
+
+ char m_text[256];
+ EventLogger::getText(m_text, sizeof(m_text), eventType, theData, nodeId);
+
+ Vector<NDB_SOCKET_TYPE> copy;
+ m_clients.lock();
+ for(i = m_clients.size() - 1; i >= 0; i--){
+ if(threshold <= m_clients[i].m_logLevel.getLogLevel(cat)){
+ if(m_clients[i].m_socket >= 0 &&
+ println_socket(m_clients[i].m_socket,
+ MAX_WRITE_TIMEOUT, m_text) == -1){
+ copy.push_back(m_clients[i].m_socket);
+ m_clients.erase(i, false);
+ }
}
}
- m_sockets.unlock();
+ m_clients.unlock();
- for(i = copy.size() - 1; i >= 0; i--){
+ for(i = 0; (unsigned)i < copy.size(); i++){
NDB_CLOSE_SOCKET(copy[i]);
- copy.erase(i);
}
- if(m_sockets.size() == 0 || false){
- m_mgmsrv->startStatisticEventReporting(0);
+
+ if(copy.size()){
+ LogLevel tmp; tmp.clear();
+ m_clients.lock();
+ for(i = 0; (unsigned)i < m_clients.size(); i++){
+ tmp.set_max(m_clients[i].m_logLevel);
+ }
+ m_clients.unlock();
+
+ if(!(tmp == m_logLevel)){
+ m_logLevel = tmp;
+ EventSubscribeReq req;
+ req = tmp;
+ req.blockRef = 0;
+ m_mgmsrv->m_log_level_requests.push_back(req);
+ }
+ }
+}
+
+void
+MgmStatService::add_listener(const StatListener& client){
+ m_clients.push_back(client);
+ LogLevel tmp = m_logLevel;
+ tmp.set_max(client.m_logLevel);
+
+ if(!(tmp == m_logLevel)){
+ m_logLevel = tmp;
+ EventSubscribeReq req;
+ req = tmp;
+ req.blockRef = 0;
+ m_mgmsrv->m_log_level_requests.push_back(req);
}
}
void
MgmStatService::stopSessions(){
- for(int i = m_sockets.size() - 1; i >= 0; i--){
- NDB_CLOSE_SOCKET(m_sockets[i]);
- m_sockets.erase(i);
+ for(int i = m_clients.size() - 1; i >= 0; i--){
+ if(m_clients[i].m_socket >= 0){
+ NDB_CLOSE_SOCKET(m_clients[i].m_socket);
+ m_clients.erase(i);
+ }
}
}
@@ -1294,6 +1349,75 @@ MgmApiSession::setParameter(Parser_t::Context &,
m_output->println("");
}
+void
+MgmApiSession::listen_event(Parser<MgmApiSession>::Context & ctx,
+ Properties const & args) {
+
+ BaseString node, param, value;
+ args.get("node", node);
+ args.get("filter", param);
+
+ int result = 0;
+ BaseString msg;
+
+ MgmStatService::StatListener le;
+ le.m_socket = m_socket;
+
+ Vector<BaseString> list;
+ param.trim();
+ param.split(list, " ,");
+ for(size_t i = 0; i<list.size(); i++){
+ Vector<BaseString> spec;
+ list[i].trim();
+ list[i].split(spec, "=:");
+ if(spec.size() != 2){
+ msg.appfmt("Invalid filter specification: >%s< >%s< %d",
+ param.c_str(), list[i].c_str(), spec.size());
+ result = -1;
+ goto done;
+ }
+
+ spec[0].trim().ndb_toupper();
+ int category = ndb_mgm_match_event_category(spec[0].c_str());
+ if(category == NDB_MGM_ILLEGAL_EVENT_CATEGORY){
+ category = atoi(spec[0].c_str());
+ if(category < NDB_MGM_MIN_EVENT_CATEGORY ||
+ category > NDB_MGM_MAX_EVENT_CATEGORY){
+ msg.appfmt("Unknown category: >%s<", spec[0].c_str());
+ result = -1;
+ goto done;
+ }
+ }
+
+ int level = atoi(spec[1].c_str());
+ if(level < 0 || level > 15){
+ msg.appfmt("Invalid level: >%s<", spec[1].c_str());
+ result = -1;
+ goto done;
+ }
+ category -= CFG_MIN_LOGLEVEL;
+ le.m_logLevel.setLogLevel((LogLevel::EventCategory)category, level);
+ }
+
+ if(list.size() == 0){
+ msg.appfmt("Empty filter specification");
+ result = -1;
+ goto done;
+ }
+
+ m_mgmsrv.m_statisticsListner.add_listener(le);
+
+ m_stop = true;
+ m_socket = -1;
+
+done:
+ m_output->println("listen event");
+ m_output->println("result: %d", result);
+ if(result != 0)
+ m_output->println("msg: %s", msg.c_str());
+ m_output->println("");
+}
+
template class MutexVector<int>;
template class Vector<ParserRow<MgmApiSession> const*>;
template class Vector<unsigned short>;
diff --git a/ndb/src/mgmsrv/Services.hpp b/ndb/src/mgmsrv/Services.hpp
index 9cf8b59be8f..e47820826b6 100644
--- a/ndb/src/mgmsrv/Services.hpp
+++ b/ndb/src/mgmsrv/Services.hpp
@@ -83,7 +83,8 @@ public:
void configChange(Parser_t::Context &ctx, const class Properties &args);
void setParameter(Parser_t::Context &ctx, const class Properties &args);
-
+ void listen_event(Parser_t::Context &ctx, const class Properties &args);
+
void repCommand(Parser_t::Context &ctx, const class Properties &args);
};
@@ -103,28 +104,4 @@ public:
}
};
-class MgmStatService : public SocketServer::Service,
- public MgmtSrvr::StatisticsListner
-{
- class MgmtSrvr * m_mgmsrv;
- MutexVector<NDB_SOCKET_TYPE> m_sockets;
-public:
- MgmStatService() : m_sockets(5) {
- m_mgmsrv = 0;
- }
-
- void setMgm(class MgmtSrvr * mgmsrv){
- m_mgmsrv = mgmsrv;
- }
-
- SocketServer::Session * newSession(NDB_SOCKET_TYPE socket){
- m_sockets.push_back(socket);
- m_mgmsrv->startStatisticEventReporting(5);
- return 0;
- }
-
- void stopSessions();
-
- void println_statistics(const BaseString &line);
-};
#endif
diff --git a/ndb/src/mgmsrv/SignalQueue.cpp b/ndb/src/mgmsrv/SignalQueue.cpp
index 7003f5c0a89..08ad5f363a6 100644
--- a/ndb/src/mgmsrv/SignalQueue.cpp
+++ b/ndb/src/mgmsrv/SignalQueue.cpp
@@ -14,8 +14,7 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#include <string.h>
-
+#include <ndb_global.h>
#include "SignalQueue.hpp"
SignalQueue::SignalQueue() {
diff --git a/ndb/src/mgmsrv/main.cpp b/ndb/src/mgmsrv/main.cpp
index 719226b51df..5ee48e4cfcc 100644
--- a/ndb/src/mgmsrv/main.cpp
+++ b/ndb/src/mgmsrv/main.cpp
@@ -60,6 +60,7 @@ struct MgmGlobals {
/** Command line arguments */
int daemon; // NOT bool, bool need not be int
int non_interactive;
+ int interactive;
const char * config_filename;
const char * local_config_filename;
@@ -68,7 +69,6 @@ struct MgmGlobals {
bool use_specific_ip;
char * interface_name;
int port;
- int port_stats;
/** The configuration of the cluster */
Config * cluster_config;
@@ -80,6 +80,7 @@ struct MgmGlobals {
SocketServer * socketServer;
};
+int g_no_nodeid_checks= 0;
static MgmGlobals glob;
@@ -97,19 +98,31 @@ extern EventLogger g_EventLogger;
extern int global_mgmt_server_check;
int _print_version = 0;
+#ifndef DBUG_OFF
+const char *debug_option= 0;
+#endif
struct getargs args[] = {
{ "version", 'v', arg_flag, &_print_version,
- "Print ndb_mgmd version"},
+ "Print ndb_mgmd version",""},
{ "config-file", 'c', arg_string, &glob.config_filename,
- "Specify cluster configuration file", "filename" },
+ "Specify cluster configuration file (default config.ini if available)",
+ "filename"},
+#ifndef DBUG_OFF
+ { "debug", 0, arg_string, &debug_option,
+ "Specify debug options e.g. d:t:i:o,out.trace", "options"},
+#endif
{ "daemon", 'd', arg_flag, &glob.daemon,
- "Run ndb_mgmd in daemon mode" },
+ "Run ndb_mgmd in daemon mode (default)",""},
{ NULL, 'l', arg_string, &glob.local_config_filename,
- "Specify configuration file connect string (will default use Ndb.cfg if available)",
- "filename" },
- { "nodaemon", 'n', arg_flag, &glob.non_interactive,
- "Don't run as daemon, but don't read from stdin", "non-interactive" }
+ "Specify configuration file connect string (default Ndb.cfg if available)",
+ "filename"},
+ { "interactive", 0, arg_flag, &glob.interactive,
+ "Run interactive. Not supported but provided for testing purposes", ""},
+ { "no-nodeid-checks", 0, arg_flag, &g_no_nodeid_checks,
+ "Do not provide any node id checks", ""},
+ { "nodaemon", 0, arg_flag, &glob.non_interactive,
+ "Don't run as daemon, but don't read from stdin", "non-interactive"}
};
int num_args = sizeof(args) / sizeof(args[0]);
@@ -118,6 +131,8 @@ int num_args = sizeof(args) / sizeof(args[0]);
* MAIN
*/
NDB_MAIN(mgmsrv){
+ ndb_init();
+
/**
* OSE specific. Enable shared ownership of file system resources.
* This is needed in order to use the cluster log since the events
@@ -135,35 +150,48 @@ NDB_MAIN(mgmsrv){
exit(1);
}
+ if (glob.interactive ||
+ glob.non_interactive) {
+ glob.daemon= 0;
+ }
+
+#ifndef DBUG_OFF
+ if (debug_option)
+ DBUG_PUSH(debug_option);
+#endif
+
if (_print_version) {
ndbPrintVersion();
exit(0);
}
if(glob.config_filename == NULL) {
- fprintf(stderr, "No configuration file specified\n");
- exit(1);
+ glob.config_filename= "config.ini";
}
glob.socketServer = new SocketServer();
MgmApiService * mapi = new MgmApiService();
- MgmStatService * mstat = new MgmStatService();
-
/****************************
* Read configuration files *
****************************/
- if (!readLocalConfig())
+ LocalConfig local_config;
+ if(!local_config.init(0,glob.local_config_filename)){
+ local_config.printError();
goto error_end;
+ }
+ glob.localNodeId = local_config._ownNodeId;
+
if (!readGlobalConfig())
goto error_end;
glob.mgmObject = new MgmtSrvr(glob.localNodeId,
BaseString(glob.config_filename),
- BaseString(glob.local_config_filename == 0 ?
- "" : glob.local_config_filename),
+ local_config,
glob.cluster_config);
+ chdir(NdbConfig_get_path(0));
+
glob.cluster_config = 0;
glob.localNodeId= glob.mgmObject->getOwnNodeId();
@@ -180,10 +208,15 @@ NDB_MAIN(mgmsrv){
glob.use_specific_ip = false;
if(!glob.use_specific_ip){
- if(!glob.socketServer->tryBind(glob.port, glob.interface_name)){
+ int count= 5; // no of retries for tryBind
+ while(!glob.socketServer->tryBind(glob.port, glob.interface_name)){
+ if (--count > 0) {
+ NdbSleep_MilliSleep(1000);
+ continue;
+ }
ndbout_c("Unable to setup port: %s:%d!\n"
"Please check if the port is already used,\n"
- "(perhaps a mgmtsrvr is already running),\n"
+ "(perhaps a ndb_mgmd is already running),\n"
"and if you are executing on the correct computer",
(glob.interface_name ? glob.interface_name : "*"), glob.port);
goto error_end;
@@ -195,20 +228,13 @@ NDB_MAIN(mgmsrv){
if(!glob.socketServer->setup(mapi, glob.port, glob.interface_name)){
ndbout_c("Unable to setup management port: %d!\n"
"Please check if the port is already used,\n"
- "(perhaps a mgmtsrvr is already running),\n"
+ "(perhaps a ndb_mgmd is already running),\n"
"and if you are executing on the correct computer",
glob.port);
delete mapi;
goto error_end;
}
- if(!glob.socketServer->setup(mstat, glob.port_stats, glob.interface_name)){
- ndbout_c("Unable to setup statistic port: %d!\nPlease check if the port"
- " is already used.", glob.port_stats);
- delete mstat;
- goto error_end;
- }
-
if(!glob.mgmObject->check_start()){
ndbout_c("Unable to check start management server.");
ndbout_c("Probably caused by illegal initial configuration file.");
@@ -228,26 +254,27 @@ NDB_MAIN(mgmsrv){
}
signal(SIGPIPE, SIG_IGN);
- if(!glob.mgmObject->start()){
- ndbout_c("Unable to start management server.");
- ndbout_c("Probably caused by illegal initial configuration file.");
- goto error_end;
+ {
+ BaseString error_string;
+ if(!glob.mgmObject->start(error_string)){
+ ndbout_c("Unable to start management server.");
+ ndbout_c("Probably caused by illegal initial configuration file.");
+ ndbout_c(error_string.c_str());
+ goto error_end;
+ }
}
//glob.mgmObject->saveConfig();
-
- mstat->setMgm(glob.mgmObject);
mapi->setMgm(glob.mgmObject);
- glob.mgmObject->setStatisticsListner(mstat);
char msg[256];
- snprintf(msg, sizeof(msg),
+ BaseString::snprintf(msg, sizeof(msg),
"NDB Cluster Management Server. %s", NDB_VERSION_STRING);
ndbout_c(msg);
g_EventLogger.info(msg);
- snprintf(msg, 256, "Id: %d, Command port: %d, Statistics port: %d",
- glob.localNodeId, glob.port, glob.port_stats);
+ BaseString::snprintf(msg, 256, "Id: %d, Command port: %d",
+ glob.localNodeId, glob.port);
ndbout_c(msg);
g_EventLogger.info(msg);
@@ -255,7 +282,7 @@ NDB_MAIN(mgmsrv){
glob.socketServer->startServer();
#if ! defined NDB_OSE && ! defined NDB_SOFTOSE
- if(!glob.daemon && !glob.non_interactive){
+ if(glob.interactive) {
CommandInterpreter com(* glob.mgmObject);
while(com.readAndExecute());
} else
@@ -265,9 +292,10 @@ NDB_MAIN(mgmsrv){
NdbSleep_MilliSleep(500);
}
+ g_EventLogger.info("Shutting down server...");
glob.socketServer->stopServer();
glob.socketServer->stopSessions();
-
+ g_EventLogger.info("Shutdown complete");
return 0;
error_end:
return 1;
@@ -276,13 +304,13 @@ NDB_MAIN(mgmsrv){
MgmGlobals::MgmGlobals(){
// Default values
port = 0;
- port_stats = 0;
config_filename = NULL;
local_config_filename = NULL;
interface_name = 0;
cluster_config = 0;
- daemon = false;
+ daemon = 1;
non_interactive = 0;
+ interactive = 0;
socketServer = 0;
mgmObject = 0;
}
@@ -299,35 +327,9 @@ MgmGlobals::~MgmGlobals(){
}
/**
- * @fn readLocalConfig
- * @param glob : Global variables
- * @return true if success, false otherwise.
- *
- * How to get LOCAL CONFIGURATION FILE:
- * 1. Use local config file name (-l)
- * 2. Use environment NDB_HOME + Ndb.cfg
- * If NDB_HOME is not set this results in reading from local dir
- */
-static bool
-readLocalConfig(){
- // Read local config file
- LocalConfig lc;
- if(!lc.init(glob.local_config_filename))
- return false;
-
- glob.localNodeId = lc._ownNodeId;
- return true;
-}
-
-
-/**
* @fn readGlobalConfig
* @param glob : Global variables
* @return true if success, false otherwise.
- *
- * How to get the GLOBAL CONFIGURATION:
- * 1. Use config file name (this is a text file)(-c)
- * 2. Use name from line 2 of local config file, ex: file:///c/ndb/Ndb_cfg.bin
*/
static bool
readGlobalConfig() {
diff --git a/ndb/src/mgmsrv/mkconfig/mkconfig.cpp b/ndb/src/mgmsrv/mkconfig/mkconfig.cpp
index 3b2046d7b49..28823aaa35e 100644
--- a/ndb/src/mgmsrv/mkconfig/mkconfig.cpp
+++ b/ndb/src/mgmsrv/mkconfig/mkconfig.cpp
@@ -32,6 +32,7 @@ void usage(const char * prg){
NDB_COMMAND(mkconfig,
"mkconfig", "mkconfig",
"Make a binary configuration from a config file", 16384){
+ ndb_init();
if(argc < 3){
usage(argv[0]);
return 0;
diff --git a/ndb/src/ndbapi/ClusterMgr.cpp b/ndb/src/ndbapi/ClusterMgr.cpp
index b9947fcf0e7..e10b2e1d82c 100644
--- a/ndb/src/ndbapi/ClusterMgr.cpp
+++ b/ndb/src/ndbapi/ClusterMgr.cpp
@@ -15,6 +15,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <ndb_global.h>
+#include <my_pthread.h>
#include <ndb_limits.h>
#include <ndb_version.h>
@@ -36,6 +37,8 @@
#include <mgmapi_configuration.hpp>
#include <mgmapi_config_parameters.h>
+int global_flag_send_heartbeat_now= 0;
+
// Just a C wrapper for threadMain
extern "C"
void*
@@ -64,7 +67,8 @@ ClusterMgr::ClusterMgr(TransporterFacade & _facade):
{
ndbSetOwnVersion();
clusterMgrThreadMutex = NdbMutex_Create();
- noOfConnectedNodes = 0;
+ noOfConnectedNodes= 0;
+ theClusterMgrThread= 0;
}
ClusterMgr::~ClusterMgr(){
@@ -137,20 +141,21 @@ ClusterMgr::startThread() {
void
ClusterMgr::doStop( ){
+ DBUG_ENTER("ClusterMgr::doStop");
NdbMutex_Lock(clusterMgrThreadMutex);
-
if(theStop){
NdbMutex_Unlock(clusterMgrThreadMutex);
- return;
+ DBUG_VOID_RETURN;
}
-
void *status;
theStop = 1;
-
- NdbThread_WaitFor(theClusterMgrThread, &status);
- NdbThread_Destroy(&theClusterMgrThread);
-
+ if (theClusterMgrThread) {
+ NdbThread_WaitFor(theClusterMgrThread, &status);
+ NdbThread_Destroy(&theClusterMgrThread);
+ theClusterMgrThread= 0;
+ }
NdbMutex_Unlock(clusterMgrThreadMutex);
+ DBUG_VOID_RETURN;
}
void
@@ -174,6 +179,9 @@ ClusterMgr::threadMain( ){
/**
* Start of Secure area for use of Transporter
*/
+ int send_heartbeat_now= global_flag_send_heartbeat_now;
+ global_flag_send_heartbeat_now= 0;
+
theFacade.lock_mutex();
for (int i = 1; i < MAX_NODES; i++){
/**
@@ -196,12 +204,16 @@ ClusterMgr::threadMain( ){
}
theNode.hbCounter += timeSlept;
- if (theNode.hbCounter >= theNode.hbFrequency){
+ if (theNode.hbCounter >= theNode.hbFrequency ||
+ send_heartbeat_now) {
/**
* It is now time to send a new Heartbeat
*/
- theNode.hbSent++;
- theNode.hbCounter = 0;
+ if (theNode.hbCounter >= theNode.hbFrequency) {
+ theNode.hbSent++;
+ theNode.hbCounter = 0;
+ }
+
/**
* If the node is of type REP,
* then the receiver of the signal should be API_CLUSTERMGR
@@ -440,13 +452,11 @@ ClusterMgr::reportNodeFailed(NodeId nodeId){
theNode.nfCompleteRep = false;
if(noOfConnectedNodes == 0){
- Uint32 theData[1];
- NFCompleteRep * rep = (NFCompleteRep *)&theData[0];
-
+ NFCompleteRep rep;
for(Uint32 i = 1; i<MAX_NODES; i++){
if(theNodes[i].defined && theNodes[i].nfCompleteRep == false){
- rep->failedNodeId = i;
- execNF_COMPLETEREP(theData);
+ rep.failedNodeId = i;
+ execNF_COMPLETEREP((Uint32*)&rep);
}
}
}
@@ -524,6 +534,7 @@ ArbitMgr::doChoose(const Uint32* theData)
void
ArbitMgr::doStop(const Uint32* theData)
{
+ DBUG_ENTER("ArbitMgr::doStop");
ArbitSignal aSignal;
NdbMutex_Lock(theThreadMutex);
if (theThread != NULL) {
@@ -540,6 +551,7 @@ ArbitMgr::doStop(const Uint32* theData)
theState = StateInit;
}
NdbMutex_Unlock(theThreadMutex);
+ DBUG_VOID_RETURN;
}
// private methods
@@ -548,7 +560,9 @@ extern "C"
void*
runArbitMgr_C(void* me)
{
+ my_thread_init();
((ArbitMgr*) me)->threadMain();
+ my_thread_end();
NdbThread_Exit(0);
return NULL;
}
diff --git a/ndb/src/ndbapi/DictCache.cpp b/ndb/src/ndbapi/DictCache.cpp
index 5f620f77906..12300ce216f 100644
--- a/ndb/src/ndbapi/DictCache.cpp
+++ b/ndb/src/ndbapi/DictCache.cpp
@@ -21,6 +21,31 @@
#include <NdbCondition.h>
#include <NdbSleep.h>
+Ndb_local_table_info *
+Ndb_local_table_info::create(NdbTableImpl *table_impl, Uint32 sz)
+{
+ void *data= malloc(sizeof(NdbTableImpl)+sz-1);
+ if (data == 0)
+ return 0;
+ memset(data,0,sizeof(NdbTableImpl)+sz-1);
+ new (data) Ndb_local_table_info(table_impl);
+ return (Ndb_local_table_info *) data;
+}
+
+void Ndb_local_table_info::destroy(Ndb_local_table_info *info)
+{
+ free((void *)info);
+}
+
+Ndb_local_table_info::Ndb_local_table_info(NdbTableImpl *table_impl)
+{
+ m_table_impl= table_impl;
+}
+
+Ndb_local_table_info::~Ndb_local_table_info()
+{
+}
+
LocalDictCache::LocalDictCache(){
m_tableHash.createHashTable();
}
@@ -29,22 +54,24 @@ LocalDictCache::~LocalDictCache(){
m_tableHash.releaseHashTable();
}
-NdbTableImpl *
+Ndb_local_table_info *
LocalDictCache::get(const char * name){
const Uint32 len = strlen(name);
return m_tableHash.getData(name, len);
}
void
-LocalDictCache::put(const char * name, NdbTableImpl * tab){
- const Uint32 id = tab->m_tableId;
+LocalDictCache::put(const char * name, Ndb_local_table_info * tab_info){
+ const Uint32 id = tab_info->m_table_impl->m_tableId;
- m_tableHash.insertKey(name, strlen(name), id, tab);
+ m_tableHash.insertKey(name, strlen(name), id, tab_info);
}
void
LocalDictCache::drop(const char * name){
- m_tableHash.deleteKey(name, strlen(name));
+ Ndb_local_table_info *info= m_tableHash.deleteKey(name, strlen(name));
+ DBUG_ASSERT(info != 0);
+ Ndb_local_table_info::destroy(info);
}
/*****************************************************************
diff --git a/ndb/src/ndbapi/DictCache.hpp b/ndb/src/ndbapi/DictCache.hpp
index 098acc9006a..0dc853306fa 100644
--- a/ndb/src/ndbapi/DictCache.hpp
+++ b/ndb/src/ndbapi/DictCache.hpp
@@ -27,6 +27,17 @@
#include <Ndb.hpp>
#include "NdbLinHash.hpp"
+class Ndb_local_table_info {
+public:
+ static Ndb_local_table_info *create(NdbTableImpl *table_impl, Uint32 sz=0);
+ static void destroy(Ndb_local_table_info *);
+ NdbTableImpl *m_table_impl;
+ char m_local_data[1];
+private:
+ Ndb_local_table_info(NdbTableImpl *table_impl);
+ ~Ndb_local_table_info();
+};
+
/**
* A non thread safe dict cache
*/
@@ -35,12 +46,12 @@ public:
LocalDictCache();
~LocalDictCache();
- NdbTableImpl * get(const char * name);
+ Ndb_local_table_info * get(const char * name);
- void put(const char * name, NdbTableImpl *);
+ void put(const char * name, Ndb_local_table_info *);
void drop(const char * name);
- NdbLinHash<NdbTableImpl> m_tableHash; // On name
+ NdbLinHash<Ndb_local_table_info> m_tableHash; // On name
};
/**
diff --git a/ndb/src/ndbapi/Makefile.am b/ndb/src/ndbapi/Makefile.am
index 14badb0c62f..06128e047b6 100644
--- a/ndb/src/ndbapi/Makefile.am
+++ b/ndb/src/ndbapi/Makefile.am
@@ -34,6 +34,7 @@ libndbapi_la_SOURCES = \
NdbDictionary.cpp \
NdbDictionaryImpl.cpp \
DictCache.cpp \
+ ndb_cluster_connection.cpp \
NdbBlob.cpp
INCLUDES_LOC = -I$(top_srcdir)/ndb/src/mgmapi
diff --git a/ndb/src/ndbapi/Ndb.cpp b/ndb/src/ndbapi/Ndb.cpp
index f09a7481d2d..d7b8a695fe2 100644
--- a/ndb/src/ndbapi/Ndb.cpp
+++ b/ndb/src/ndbapi/Ndb.cpp
@@ -207,9 +207,11 @@ Remark: Disconnect all connections to the database.
void
Ndb::doDisconnect()
{
+ DBUG_ENTER("Ndb::doDisconnect");
NdbConnection* tNdbCon;
CHECK_STATUS_MACRO_VOID;
+ DBUG_PRINT("info", ("theNoOfDBnodes=%d", theNoOfDBnodes));
Uint32 tNoOfDbNodes = theNoOfDBnodes;
UintR i;
for (i = 0; i < tNoOfDbNodes; i++) {
@@ -227,6 +229,7 @@ Ndb::doDisconnect()
tNdbCon = tNdbCon->theNext;
releaseConnectToNdb(tmpNdbCon);
}//while
+ DBUG_VOID_RETURN;
}//Ndb::disconnect()
/*****************************************************************************
@@ -239,51 +242,62 @@ Remark: Waits until a node has status != 0
int
Ndb::waitUntilReady(int timeout)
{
+ DBUG_ENTER("Ndb::waitUntilReady");
int secondsCounter = 0;
int milliCounter = 0;
int noChecksSinceFirstAliveFound = 0;
+ int id;
if (theInitState != Initialised) {
// Ndb::init is not called
theError.code = 4256;
- return -1;
+ DBUG_RETURN(-1);
}
do {
- unsigned int foundAliveNode = 0;
- TransporterFacade *tp = TransporterFacade::instance();
- tp->lock_mutex();
- for (unsigned int i = 0; i < theNoOfDBnodes; i++) {
- const NodeId nodeId = theDBnodes[i];
- //************************************************
- // If any node is answering, ndb is answering
- //************************************************
- if (tp->get_node_alive(nodeId) != 0) {
- foundAliveNode++;
+ if ((id = theNode) != 0) {
+ unsigned int foundAliveNode = 0;
+ TransporterFacade *tp = TransporterFacade::instance();
+ tp->lock_mutex();
+ for (unsigned int i = 0; i < theNoOfDBnodes; i++) {
+ const NodeId nodeId = theDBnodes[i];
+ //************************************************
+ // If any node is answering, ndb is answering
+ //************************************************
+ if (tp->get_node_alive(nodeId) != 0) {
+ foundAliveNode++;
+ }//if
+ }//for
+
+ tp->unlock_mutex();
+ if (foundAliveNode == theNoOfDBnodes) {
+ DBUG_RETURN(0);
}//if
- }//for
-
- tp->unlock_mutex();
- if (foundAliveNode == theNoOfDBnodes) {
- return 0;
- }//if
- if (foundAliveNode > 0) {
- noChecksSinceFirstAliveFound++;
- }//if
- if (noChecksSinceFirstAliveFound > 30) {
- return 0;
- }//if
+ if (foundAliveNode > 0) {
+ noChecksSinceFirstAliveFound++;
+ }//if
+ if (noChecksSinceFirstAliveFound > 30) {
+ DBUG_RETURN(0);
+ }//if
+ }//if theNode != 0
+ if (secondsCounter >= timeout)
+ break;
NdbSleep_MilliSleep(100);
milliCounter += 100;
if (milliCounter >= 1000) {
secondsCounter++;
milliCounter = 0;
}//if
- } while ( secondsCounter < timeout );
+ } while (1);
+ if (id == 0) {
+ theError.code = 4269;
+ DBUG_RETURN(-1);
+ }
if (noChecksSinceFirstAliveFound > 0) {
- return 0;
+ DBUG_RETURN(0);
}//if
- return -1;
+ theError.code = 4009;
+ DBUG_RETURN(-1);
}
/*****************************************************************************
@@ -296,6 +310,7 @@ Remark: Start transaction. Synchronous.
NdbConnection*
Ndb::startTransaction(Uint32 aPriority, const char * keyData, Uint32 keyLen)
{
+ DBUG_ENTER("Ndb::startTransaction");
if (theInitState == Initialised) {
theError.code = 0;
@@ -312,9 +327,14 @@ Ndb::startTransaction(Uint32 aPriority, const char * keyData, Uint32 keyLen)
} else {
nodeId = 0;
}//if
- return startTransactionLocal(aPriority, nodeId);
+ {
+ NdbConnection *trans= startTransactionLocal(aPriority, nodeId);
+ DBUG_PRINT("exit",("start trans: 0x%x transid: 0x%llx",
+ trans, trans ? trans->getTransactionId() : 0));
+ DBUG_RETURN(trans);
+ }
} else {
- return NULL;
+ DBUG_RETURN(NULL);
}//if
}//Ndb::startTransaction()
@@ -329,9 +349,13 @@ Remark: Start transaction. Synchronous.
NdbConnection*
Ndb::hupp(NdbConnection* pBuddyTrans)
{
+ DBUG_ENTER("Ndb::hupp");
+
+ DBUG_PRINT("enter", ("trans: 0x%x",pBuddyTrans));
+
Uint32 aPriority = 0;
if (pBuddyTrans == NULL){
- return startTransaction();
+ DBUG_RETURN(startTransaction());
}
if (theInitState == Initialised) {
@@ -341,19 +365,22 @@ Ndb::hupp(NdbConnection* pBuddyTrans)
Uint32 nodeId = pBuddyTrans->getConnectedNodeId();
NdbConnection* pCon = startTransactionLocal(aPriority, nodeId);
if(pCon == NULL)
- return NULL;
+ DBUG_RETURN(NULL);
if (pCon->getConnectedNodeId() != nodeId){
// We could not get a connection to the desired node
// release the connection and return NULL
closeTransaction(pCon);
- return NULL;
+ theError.code = 4006;
+ DBUG_RETURN(NULL);
}
pCon->setTransactionId(pBuddyTrans->getTransactionId());
pCon->setBuddyConPtr((Uint32)pBuddyTrans->getTC_ConnectPtr());
- return pCon;
+ DBUG_PRINT("exit", ("hupp trans: 0x%x transid: 0x%llx",
+ pCon, pCon ? pCon->getTransactionId() : 0));
+ DBUG_RETURN(pCon);
} else {
- return NULL;
+ DBUG_RETURN(NULL);
}//if
}//Ndb::hupp()
@@ -387,7 +414,10 @@ Ndb::startTransactionDGroup(Uint32 aPriority, const char * keyData, int type)
fragmentId = getFragmentId(hashValue);
}//if
Uint32 nodeId = guessPrimaryNode(fragmentId);
- return startTransactionLocal(aPriority, nodeId);
+ NdbConnection* trans= startTransactionLocal(aPriority, nodeId);
+ DBUG_PRINT("exit", ("start DGroup trans: 0x%x transid: 0x%llx",
+ trans, trans ? trans->getTransactionId() : 0));
+ return trans;
} else {
return NULL;
}//if
@@ -404,11 +434,14 @@ Ndb::startTransactionLocal(Uint32 aPriority, Uint32 nodeId)
}
#endif
+ DBUG_ENTER("Ndb::startTransactionLocal");
+ DBUG_PRINT("enter", ("nodeid: %d", nodeId));
+
NdbConnection* tConnection;
Uint64 tFirstTransId = theFirstTransId;
tConnection = doConnect(nodeId);
if (tConnection == NULL) {
- return NULL;
+ DBUG_RETURN(NULL);
}//if
NdbConnection* tConNext = theTransactionList;
tConnection->init();
@@ -431,7 +464,7 @@ Ndb::startTransactionLocal(Uint32 aPriority, Uint32 nodeId)
abort();
}
#endif
- return tConnection;
+ DBUG_RETURN(tConnection);
}//Ndb::startTransactionLocal()
/*****************************************************************************
@@ -443,6 +476,7 @@ Remark: Close transaction by releasing the connection and all operations
void
Ndb::closeTransaction(NdbConnection* aConnection)
{
+ DBUG_ENTER("Ndb::closeTransaction");
NdbConnection* tCon;
NdbConnection* tPreviousCon;
@@ -454,12 +488,18 @@ Ndb::closeTransaction(NdbConnection* aConnection)
#ifdef VM_TRACE
printf("NULL into closeTransaction\n");
#endif
- return;
+ DBUG_VOID_RETURN;
}//if
CHECK_STATUS_MACRO_VOID;
tCon = theTransactionList;
+ DBUG_PRINT("info",("close trans: 0x%x transid: 0x%llx",
+ aConnection, aConnection->getTransactionId()));
+ DBUG_PRINT("info",("magic number: 0x%x TCConPtr: 0x%x theMyRef: 0x%x 0x%x",
+ aConnection->theMagicNumber, aConnection->theTCConPtr,
+ aConnection->theMyRef, getReference()));
+
if (aConnection == tCon) { // Remove the active connection object
theTransactionList = tCon->next(); // from the transaction list.
} else {
@@ -479,14 +519,14 @@ Ndb::closeTransaction(NdbConnection* aConnection)
printf("Scan timeout:ed NdbConnection-> "
"not returning it-> memory leak\n");
#endif
- return;
+ DBUG_VOID_RETURN;
}
#ifdef VM_TRACE
printf("Non-existing transaction into closeTransaction\n");
abort();
#endif
- return;
+ DBUG_VOID_RETURN;
}//if
tPreviousCon = tCon;
tCon = tCon->next();
@@ -505,7 +545,7 @@ Ndb::closeTransaction(NdbConnection* aConnection)
#ifdef VM_TRACE
printf("Con timeout:ed NdbConnection-> not returning it-> memory leak\n");
#endif
- return;
+ DBUG_VOID_RETURN;
}
if (aConnection->theReleaseOnClose == false) {
@@ -515,11 +555,12 @@ Ndb::closeTransaction(NdbConnection* aConnection)
Uint32 nodeId = aConnection->getConnectedNodeId();
aConnection->theNext = theConnectionArray[nodeId];
theConnectionArray[nodeId] = aConnection;
- return;
+ DBUG_VOID_RETURN;
} else {
aConnection->theReleaseOnClose = false;
releaseNdbCon(aConnection);
}//if
+ DBUG_VOID_RETURN;
}//Ndb::closeTransaction()
/*****************************************************************************
@@ -729,15 +770,18 @@ Uint64
Ndb::getAutoIncrementValue(const char* aTableName, Uint32 cacheSize)
{
DEBUG_TRACE("getAutoIncrementValue");
- const NdbTableImpl* table = theDictionary->getTable(aTableName);
- if (table == 0)
+ const char * internalTableName = internalizeTableName(aTableName);
+ Ndb_local_table_info *info=
+ theDictionary->get_local_table_info(internalTableName, false);
+ if (info == 0)
return ~0;
+ const NdbTableImpl *table= info->m_table_impl;
Uint64 tupleId = getTupleIdFromNdb(table->m_tableId, cacheSize);
return tupleId;
}
Uint64
-Ndb::getAutoIncrementValue(NdbDictionary::Table * aTable, Uint32 cacheSize)
+Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable, Uint32 cacheSize)
{
DEBUG_TRACE("getAutoIncrementValue");
if (aTable == 0)
@@ -775,14 +819,16 @@ Ndb::readAutoIncrementValue(const char* aTableName)
{
DEBUG_TRACE("readtAutoIncrementValue");
const NdbTableImpl* table = theDictionary->getTable(aTableName);
- if (table == 0)
+ if (table == 0) {
+ theError= theDictionary->getNdbError();
return ~0;
+ }
Uint64 tupleId = readTupleIdFromNdb(table->m_tableId);
return tupleId;
}
Uint64
-Ndb::readAutoIncrementValue(NdbDictionary::Table * aTable)
+Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable)
{
DEBUG_TRACE("readtAutoIncrementValue");
if (aTable == 0)
@@ -806,14 +852,19 @@ bool
Ndb::setAutoIncrementValue(const char* aTableName, Uint64 val, bool increase)
{
DEBUG_TRACE("setAutoIncrementValue " << val);
- const NdbTableImpl* table = theDictionary->getTable(aTableName);
- if (table == 0)
+ const char * internalTableName= internalizeTableName(aTableName);
+ Ndb_local_table_info *info=
+ theDictionary->get_local_table_info(internalTableName, false);
+ if (info == 0) {
+ theError= theDictionary->getNdbError();
return false;
+ }
+ const NdbTableImpl* table= info->m_table_impl;
return setTupleIdInNdb(table->m_tableId, val, increase);
}
bool
-Ndb::setAutoIncrementValue(NdbDictionary::Table * aTable, Uint64 val, bool increase)
+Ndb::setAutoIncrementValue(const NdbDictionary::Table * aTable, Uint64 val, bool increase)
{
DEBUG_TRACE("setAutoIncrementValue " << val);
if (aTable == 0)
@@ -827,8 +878,10 @@ Ndb::setTupleIdInNdb(const char* aTableName, Uint64 val, bool increase )
{
DEBUG_TRACE("setTupleIdInNdb");
const NdbTableImpl* table = theDictionary->getTable(aTableName);
- if (table == 0)
+ if (table == 0) {
+ theError= theDictionary->getNdbError();
return false;
+ }
return setTupleIdInNdb(table->m_tableId, val, increase);
}
@@ -1050,6 +1103,9 @@ Ndb::StartTransactionNodeSelectionData::init(Uint32 noOfNodes,
* This algorithm should be implemented in Dbdih
*/
{
+ if (fragment2PrimaryNodeMap != 0)
+ abort();
+
fragment2PrimaryNodeMap = new Uint32[noOfFragments];
Uint32 i;
for(i = 0; i<noOfNodes; i++){
@@ -1102,13 +1158,13 @@ const char * Ndb::getCatalogName() const
void Ndb::setCatalogName(const char * a_catalog_name)
{
if (a_catalog_name) {
- snprintf(theDataBase, sizeof(theDataBase), "%s",
+ BaseString::snprintf(theDataBase, sizeof(theDataBase), "%s",
a_catalog_name ? a_catalog_name : "");
-
- int len = snprintf(prefixName, sizeof(prefixName), "%s%c%s%c",
+
+ int len = BaseString::snprintf(prefixName, sizeof(prefixName), "%s%c%s%c",
theDataBase, table_name_separator,
theDataBaseSchema, table_name_separator);
- prefixEnd = prefixName + (len < sizeof(prefixName) ? len :
+ prefixEnd = prefixName + (len < (int) sizeof(prefixName) ? len :
sizeof(prefixName) - 1);
}
}
@@ -1121,13 +1177,13 @@ const char * Ndb::getSchemaName() const
void Ndb::setSchemaName(const char * a_schema_name)
{
if (a_schema_name) {
- snprintf(theDataBaseSchema, sizeof(theDataBase), "%s",
+ BaseString::snprintf(theDataBaseSchema, sizeof(theDataBase), "%s",
a_schema_name ? a_schema_name : "");
- int len = snprintf(prefixName, sizeof(prefixName), "%s%c%s%c",
+ int len = BaseString::snprintf(prefixName, sizeof(prefixName), "%s%c%s%c",
theDataBase, table_name_separator,
theDataBaseSchema, table_name_separator);
- prefixEnd = prefixName + (len < sizeof(prefixName) ? len :
+ prefixEnd = prefixName + (len < (int) sizeof(prefixName) ? len :
sizeof(prefixName) - 1);
}
}
@@ -1155,11 +1211,6 @@ void Ndb::setDatabaseSchemaName(const char * a_schema_name)
setSchemaName(a_schema_name);
}
-void Ndb::useFullyQualifiedNames(bool turnNamingOn)
-{
- fullyQualifiedNames = turnNamingOn;
-}
-
bool Ndb::usingFullyQualifiedNames()
{
return fullyQualifiedNames;
@@ -1314,7 +1365,8 @@ Ndb::pollEvents(int aMillisecondNumber)
#ifdef VM_TRACE
#include <NdbMutex.h>
-static NdbMutex print_state_mutex = NDB_MUTEX_INITIALIZER;
+extern NdbMutex *ndb_print_state_mutex;
+
static bool
checkdups(NdbConnection** list, unsigned no)
{
@@ -1332,7 +1384,7 @@ Ndb::printState(const char* fmt, ...)
va_start(ap, fmt);
vsprintf(buf, fmt, ap);
va_end(ap);
- NdbMutex_Lock(&print_state_mutex);
+ NdbMutex_Lock(ndb_print_state_mutex);
bool dups = false;
ndbout << buf << " ndb=" << hex << this << dec;
#ifndef NDB_WIN32
@@ -1370,7 +1422,7 @@ Ndb::printState(const char* fmt, ...)
}
for (unsigned i = 0; i < theNoOfCompletedTransactions; i++)
theCompletedTransactionsArray[i]->printState();
- NdbMutex_Unlock(&print_state_mutex);
+ NdbMutex_Unlock(ndb_print_state_mutex);
}
#endif
diff --git a/ndb/src/ndbapi/NdbApiSignal.cpp b/ndb/src/ndbapi/NdbApiSignal.cpp
index 6f5e1e50d2c..a1d34896968 100644
--- a/ndb/src/ndbapi/NdbApiSignal.cpp
+++ b/ndb/src/ndbapi/NdbApiSignal.cpp
@@ -168,7 +168,7 @@ NdbApiSignal::setSignal(int aNdbSignalType)
theTrace = TestOrd::TraceAPI;
theReceiversBlockNumber = DBTC;
theVerId_signalNumber = GSN_TC_COMMITREQ;
- theLength = 5;
+ theLength = 3;
}
break;
@@ -177,7 +177,7 @@ NdbApiSignal::setSignal(int aNdbSignalType)
theTrace = TestOrd::TraceAPI;
theReceiversBlockNumber = DBTC;
theVerId_signalNumber = GSN_SCAN_TABREQ;
- theLength = 9; // ScanTabReq::SignalLength;
+ theLength = ScanTabReq::StaticLength;
}
break;
@@ -186,7 +186,7 @@ NdbApiSignal::setSignal(int aNdbSignalType)
theTrace = TestOrd::TraceAPI;
theReceiversBlockNumber = DBTC;
theVerId_signalNumber = GSN_SCAN_NEXTREQ;
- theLength = 4;
+ theLength = ScanNextReq::SignalLength;
}
break;
diff --git a/ndb/src/ndbapi/NdbApiSignal.hpp b/ndb/src/ndbapi/NdbApiSignal.hpp
index 9d5bc0847be..52c3be2256c 100644
--- a/ndb/src/ndbapi/NdbApiSignal.hpp
+++ b/ndb/src/ndbapi/NdbApiSignal.hpp
@@ -71,6 +71,8 @@ public:
const Uint32 * getDataPtr() const;
Uint32 * getDataPtrSend();
+ NodeId get_sender_node();
+
/**
* Fragmentation
*/
@@ -104,6 +106,17 @@ private:
Uint32 *theRealData;
};
/**********************************************************************
+NodeId get_sender_node
+Remark: Get the node id of the sender
+***********************************************************************/
+inline
+NodeId
+NdbApiSignal::get_sender_node()
+{
+ return refToNode(theSendersBlockRef);
+}
+
+/**********************************************************************
void getLength
Remark: Get the length of the signal.
******************************************************************************/
diff --git a/ndb/src/ndbapi/NdbBlob.cpp b/ndb/src/ndbapi/NdbBlob.cpp
index 7939f54d846..feab95d8ca5 100644
--- a/ndb/src/ndbapi/NdbBlob.cpp
+++ b/ndb/src/ndbapi/NdbBlob.cpp
@@ -867,7 +867,7 @@ NdbBlob::readParts(char* buf, Uint32 part, Uint32 count)
while (n < count) {
NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable);
if (tOp == NULL ||
- tOp->readTuple() == -1 ||
+ tOp->committedRead() == -1 ||
setPartKeyValue(tOp, part + n) == -1 ||
tOp->getValue((Uint32)3, buf) == NULL) {
setErrorCode(tOp);
@@ -1440,11 +1440,11 @@ NdbOut&
operator<<(NdbOut& out, const NdbBlob& blob)
{
ndbout << dec << "o=" << blob.getOperationType();
- ndbout << dec << " s=" << blob.theState;
+ ndbout << dec << " s=" << (Uint32) blob.theState;
ndbout << dec << " n=" << blob.theNullFlag;;
ndbout << dec << " l=" << blob.theLength;
ndbout << dec << " p=" << blob.thePos;
- ndbout << dec << " u=" << blob.theHeadInlineUpdateFlag;
+ ndbout << dec << " u=" << (Uint32) blob.theHeadInlineUpdateFlag;
return out;
}
#endif
diff --git a/ndb/src/ndbapi/NdbConnection.cpp b/ndb/src/ndbapi/NdbConnection.cpp
index 9a2995a957e..1457792cf28 100644
--- a/ndb/src/ndbapi/NdbConnection.cpp
+++ b/ndb/src/ndbapi/NdbConnection.cpp
@@ -14,19 +14,7 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-
-/*****************************************************************************
-Name: NdbConnection.C
-Include:
-Link:
-Author: UABMNST Mona Natterkvist UAB/B/UL
-Date: 970829
-Version: 0.1
-Description: Interface between TIS and NDB
-Documentation:
-Adjust: 971022 UABMNST First version.
-*****************************************************************************/
+#include <ndb_global.h>
#include <NdbOut.hpp>
#include <NdbConnection.hpp>
#include <NdbOperation.hpp>
@@ -95,6 +83,11 @@ NdbConnection::NdbConnection( Ndb* aNdb ) :
theListState = NotInList;
theError.code = 0;
theId = theNdb->theNdbObjectIdMap->map(this);
+
+#define CHECK_SZ(mask, sz) assert((sizeof(mask)/sizeof(mask[0])) == sz)
+
+ CHECK_SZ(m_db_nodes, NdbNodeBitmask::Size);
+ CHECK_SZ(m_failed_db_nodes, NdbNodeBitmask::Size);
}//NdbConnection::NdbConnection()
/*****************************************************************************
@@ -104,7 +97,9 @@ Remark: Deletes the connection object.
*****************************************************************************/
NdbConnection::~NdbConnection()
{
+ DBUG_ENTER("NdbConnection::~NdbConnection");
theNdb->theNdbObjectIdMap->unmap(theId, this);
+ DBUG_VOID_RETURN;
}//NdbConnection::~NdbConnection()
/*****************************************************************************
@@ -155,27 +150,29 @@ NdbConnection::init()
}//NdbConnection::init()
/*****************************************************************************
-setOperationErrorCode(int anErrorCode);
+setOperationErrorCode(int error);
Remark: Sets an error code on the connection object from an
operation object.
*****************************************************************************/
void
-NdbConnection::setOperationErrorCode(int anErrorCode)
+NdbConnection::setOperationErrorCode(int error)
{
- if (theError.code == 0)
- theError.code = anErrorCode;
-}//NdbConnection::setOperationErrorCode()
+ DBUG_ENTER("NdbConnection::setOperationErrorCode");
+ setErrorCode(error);
+ DBUG_VOID_RETURN;
+}
/*****************************************************************************
-setOperationErrorCodeAbort(int anErrorCode);
+setOperationErrorCodeAbort(int error);
Remark: Sets an error code on the connection object from an
operation object.
*****************************************************************************/
void
-NdbConnection::setOperationErrorCodeAbort(int anErrorCode)
+NdbConnection::setOperationErrorCodeAbort(int error)
{
+ DBUG_ENTER("NdbConnection::setOperationErrorCodeAbort");
if (theTransactionIsStarted == false) {
theCommitStatus = Aborted;
} else if ((m_abortOption == AbortOnError) &&
@@ -183,9 +180,9 @@ NdbConnection::setOperationErrorCodeAbort(int anErrorCode)
(theCommitStatus != Aborted)) {
theCommitStatus = NeedAbort;
}//if
- if (theError.code == 0)
- theError.code = anErrorCode;
-}//NdbConnection::setOperationErrorCodeAbort()
+ setErrorCode(error);
+ DBUG_VOID_RETURN;
+}
/*****************************************************************************
setErrorCode(int anErrorCode);
@@ -193,14 +190,20 @@ setErrorCode(int anErrorCode);
Remark: Sets an error indication on the connection object.
*****************************************************************************/
void
-NdbConnection::setErrorCode(int anErrorCode)
+NdbConnection::setErrorCode(int error)
{
+ DBUG_ENTER("NdbConnection::setErrorCode");
+ DBUG_PRINT("enter", ("error: %d, theError.code: %d", error, theError.code));
+
if (theError.code == 0)
- theError.code = anErrorCode;
+ theError.code = error;
+
+ DBUG_VOID_RETURN;
}//NdbConnection::setErrorCode()
int
NdbConnection::restart(){
+ DBUG_ENTER("NdbConnection::restart");
if(theCompletionStatus == CompletedSuccess){
releaseCompletedOperations();
Uint64 tTransid = theNdb->theFirstTransId;
@@ -210,10 +213,13 @@ NdbConnection::restart(){
} else {
theNdb->theFirstTransId = tTransid + 1;
}
+ theCommitStatus = Started;
theCompletionStatus = NotCompleted;
- return 0;
+ theTransactionIsStarted = false;
+ DBUG_RETURN(0);
}
- return -1;
+ DBUG_PRINT("error",("theCompletionStatus != CompletedSuccess"));
+ DBUG_RETURN(-1);
}
/*****************************************************************************
@@ -268,8 +274,12 @@ NdbConnection::execute(ExecType aTypeOfExec,
AbortOption abortOption,
int forceSend)
{
+ DBUG_ENTER("NdbConnection::execute");
+ DBUG_PRINT("enter", ("aTypeOfExec: %d, abortOption: %d",
+ aTypeOfExec, abortOption));
+
if (! theBlobFlag)
- return executeNoBlobs(aTypeOfExec, abortOption, forceSend);
+ DBUG_RETURN(executeNoBlobs(aTypeOfExec, abortOption, forceSend));
/*
* execute prepared ops in batches, as requested by blobs
@@ -303,8 +313,8 @@ NdbConnection::execute(ExecType aTypeOfExec,
tPrepOp = tPrepOp->next();
}
// save rest of prepared ops if batch
- NdbOperation* tRestOp;
- NdbOperation* tLastOp;
+ NdbOperation* tRestOp= 0;
+ NdbOperation* tLastOp= 0;
if (tPrepOp != NULL) {
tRestOp = tPrepOp->next();
tPrepOp->next(NULL);
@@ -352,7 +362,7 @@ NdbConnection::execute(ExecType aTypeOfExec,
}
} while (theFirstOpInList != NULL || tExecType != aTypeOfExec);
- return ret;
+ DBUG_RETURN(ret);
}
int
@@ -360,6 +370,10 @@ NdbConnection::executeNoBlobs(ExecType aTypeOfExec,
AbortOption abortOption,
int forceSend)
{
+ DBUG_ENTER("NdbConnection::executeNoBlobs");
+ DBUG_PRINT("enter", ("aTypeOfExec: %d, abortOption: %d",
+ aTypeOfExec, abortOption));
+
//------------------------------------------------------------------------
// We will start by preparing all operations in the transaction defined
// since last execute or since beginning. If this works ok we will continue
@@ -382,7 +396,7 @@ NdbConnection::executeNoBlobs(ExecType aTypeOfExec,
*/
ndbout << "This timeout should never occur, execute(..)" << endl;
setOperationErrorCodeAbort(4012); // Error code for "Cluster Failure"
- return -1;
+ DBUG_RETURN(-1);
}//if
/*
@@ -406,13 +420,13 @@ NdbConnection::executeNoBlobs(ExecType aTypeOfExec,
}
#endif
if (theReturnStatus == ReturnFailure) {
- return -1;
+ DBUG_RETURN(-1);
}//if
break;
}
}
thePendingBlobOps = 0;
- return 0;
+ DBUG_RETURN(0);
}//NdbConnection::execute()
/*****************************************************************************
@@ -436,9 +450,15 @@ NdbConnection::executeAsynchPrepare( ExecType aTypeOfExec,
void* anyObject,
AbortOption abortOption)
{
+ DBUG_ENTER("NdbConnection::executeAsynchPrepare");
+ DBUG_PRINT("enter", ("aTypeOfExec: %d, aCallback: %x, anyObject: %x",
+ aTypeOfExec, aCallback, anyObject));
+
/**
* Reset error.code on execute
*/
+ if (theError.code != 0)
+ DBUG_PRINT("enter", ("Resetting error %d on execute", theError.code));
theError.code = 0;
NdbScanOperation* tcOp = m_theFirstScanOperation;
if (tcOp != 0){
@@ -447,7 +467,7 @@ NdbConnection::executeAsynchPrepare( ExecType aTypeOfExec,
int tReturnCode;
tReturnCode = tcOp->executeCursor(theDBnode);
if (tReturnCode == -1) {
- return;
+ DBUG_VOID_RETURN;
}//if
tcOp = (NdbScanOperation*)tcOp->next();
} // while
@@ -469,28 +489,12 @@ NdbConnection::executeAsynchPrepare( ExecType aTypeOfExec,
theCallbackFunction = aCallback;
theCallbackObject = anyObject;
m_abortOption = abortOption;
- // SendStatusType tSendStatus = theSendStatus;
-
-// if (tSendStatus != InitState) {
-/****************************************************************************
- * The application is obviously doing strange things. We should probably
- * report to the application the problem in some manner. Since we don't have
- * a good way of handling the problem we avoid discovering the problem.
- * Should be handled at some point in time.
- ****************************************************************************/
-// return;
-// }
m_waitForReply = true;
tNdb->thePreparedTransactionsArray[tnoOfPreparedTransactions] = this;
theTransArrayIndex = tnoOfPreparedTransactions;
theListState = InPreparedList;
tNdb->theNoOfPreparedTransactions = tnoOfPreparedTransactions + 1;
- if(tCommitStatus == Committed){
- tCommitStatus = Started;
- tTransactionIsStarted = false;
- }
-
if ((tCommitStatus != Started) ||
(aTypeOfExec == Rollback)) {
/*****************************************************************************
@@ -499,7 +503,7 @@ NdbConnection::executeAsynchPrepare( ExecType aTypeOfExec,
* same action.
****************************************************************************/
if (aTypeOfExec == Rollback) {
- if (theTransactionIsStarted == false) {
+ if (theTransactionIsStarted == false || theSimpleState) {
theCommitStatus = Aborted;
theSendStatus = sendCompleted;
} else {
@@ -508,7 +512,11 @@ NdbConnection::executeAsynchPrepare( ExecType aTypeOfExec,
} else {
theSendStatus = sendABORTfail;
}//if
- return;
+ if (theCommitStatus == Aborted){
+ DBUG_PRINT("exit", ("theCommitStatus: Aborted"));
+ setErrorCode(4350);
+ }
+ DBUG_VOID_RETURN;
}//if
if (tTransactionIsStarted == true) {
if (tLastOp != NULL) {
@@ -520,13 +528,13 @@ NdbConnection::executeAsynchPrepare( ExecType aTypeOfExec,
tLastOp->theCommitIndicator = 1;
}//if
} else {
- if (aTypeOfExec == Commit) {
+ if (aTypeOfExec == Commit && !theSimpleState) {
/**********************************************************************
* A Transaction have been started and no more operations exist.
* We will use the commit method.
*********************************************************************/
theSendStatus = sendCOMMITstate;
- return;
+ DBUG_VOID_RETURN;
} else {
/**********************************************************************
* We need to put it into the array of completed transactions to
@@ -538,7 +546,7 @@ NdbConnection::executeAsynchPrepare( ExecType aTypeOfExec,
* put it into the completed array.
**********************************************************************/
theSendStatus = sendCompleted;
- return; // No Commit with no operations is OK
+ DBUG_VOID_RETURN; // No Commit with no operations is OK
}//if
}//if
} else if (tTransactionIsStarted == false) {
@@ -566,7 +574,7 @@ NdbConnection::executeAsynchPrepare( ExecType aTypeOfExec,
* will put it into the completed array.
***********************************************************************/
theSendStatus = sendCompleted;
- return;
+ DBUG_VOID_RETURN;
}//if
}
@@ -579,7 +587,7 @@ NdbConnection::executeAsynchPrepare( ExecType aTypeOfExec,
tReturnCode = tOp->prepareSend(theTCConPtr, theTransactionId);
if (tReturnCode == -1) {
theSendStatus = sendABORTfail;
- return;
+ DBUG_VOID_RETURN;
}//if
/*************************************************************************
@@ -602,7 +610,9 @@ NdbConnection::executeAsynchPrepare( ExecType aTypeOfExec,
theNoOfOpSent = 0;
theNoOfOpCompleted = 0;
theSendStatus = sendOperations;
- return;
+ NdbNodeBitmask::clear(m_db_nodes);
+ NdbNodeBitmask::clear(m_failed_db_nodes);
+ DBUG_VOID_RETURN;
}//NdbConnection::executeAsynchPrepare()
void NdbConnection::close()
@@ -671,6 +681,8 @@ Remark: Send all operations belonging to this connection.
int
NdbConnection::doSend()
{
+ DBUG_ENTER("NdbConnection::doSend");
+
/*
This method assumes that at least one operation have been defined. This
is ensured by the caller of this routine (=execute).
@@ -693,7 +705,7 @@ NdbConnection::doSend()
theSendStatus = sendTC_OP;
theTransactionIsStarted = true;
tNdb->insert_sent_list(this);
- return 0;
+ DBUG_RETURN(0);
}//case
case sendABORT:
case sendABORTfail:{
@@ -705,20 +717,21 @@ NdbConnection::doSend()
theReturnStatus = ReturnFailure;
}//if
if (sendROLLBACK() == 0) {
- return 0;
+ DBUG_RETURN(0);
}//if
break;
}//case
case sendCOMMITstate:
if (sendCOMMIT() == 0) {
- return 0;
+ DBUG_RETURN(0);
}//if
break;
case sendCompleted:
theNdb->insert_completed_list(this);
- return 0;
+ DBUG_RETURN(0);
default:
- ndbout << "Inconsistent theSendStatus = " << theSendStatus << endl;
+ ndbout << "Inconsistent theSendStatus = "
+ << (Uint32) theSendStatus << endl;
abort();
break;
}//switch
@@ -726,7 +739,7 @@ NdbConnection::doSend()
theReleaseOnClose = true;
theTransactionIsStarted = false;
theCommitStatus = Aborted;
- return -1;
+ DBUG_RETURN(-1);
}//NdbConnection::doSend()
/**************************************************************************
@@ -949,7 +962,7 @@ Remark: Get an operation from NdbOperation object idlelist and
object, synchronous.
*****************************************************************************/
NdbOperation*
-NdbConnection::getNdbOperation(NdbTableImpl * tab, NdbOperation* aNextOp)
+NdbConnection::getNdbOperation(const NdbTableImpl * tab, NdbOperation* aNextOp)
{
NdbOperation* tOp;
@@ -995,7 +1008,7 @@ NdbConnection::getNdbOperation(NdbTableImpl * tab, NdbOperation* aNextOp)
return NULL;
}//NdbConnection::getNdbOperation()
-NdbOperation* NdbConnection::getNdbOperation(NdbDictionary::Table * table)
+NdbOperation* NdbConnection::getNdbOperation(const NdbDictionary::Table * table)
{
if (table)
return getNdbOperation(& NdbTableImpl::getImpl(*table));
@@ -1054,8 +1067,8 @@ NdbConnection::getNdbIndexScanOperation(const char* anIndexName,
}
NdbIndexScanOperation*
-NdbConnection::getNdbIndexScanOperation(NdbIndexImpl* index,
- NdbTableImpl* table)
+NdbConnection::getNdbIndexScanOperation(const NdbIndexImpl* index,
+ const NdbTableImpl* table)
{
if (theCommitStatus == Started){
const NdbTableImpl * indexTable = index->getIndexTable();
@@ -1076,8 +1089,8 @@ NdbConnection::getNdbIndexScanOperation(NdbIndexImpl* index,
}//NdbConnection::getNdbIndexScanOperation()
NdbIndexScanOperation*
-NdbConnection::getNdbIndexScanOperation(NdbDictionary::Index * index,
- NdbDictionary::Table * table)
+NdbConnection::getNdbIndexScanOperation(const NdbDictionary::Index * index,
+ const NdbDictionary::Table * table)
{
if (index && table)
return getNdbIndexScanOperation(& NdbIndexImpl::getImpl(*index),
@@ -1097,7 +1110,7 @@ Remark: Get an operation from NdbScanOperation object idlelist and get t
getOperation will set the theTableId in the NdbOperation object, synchronous.
*****************************************************************************/
NdbIndexScanOperation*
-NdbConnection::getNdbScanOperation(NdbTableImpl * tab)
+NdbConnection::getNdbScanOperation(const NdbTableImpl * tab)
{
NdbIndexScanOperation* tOp;
@@ -1105,15 +1118,8 @@ NdbConnection::getNdbScanOperation(NdbTableImpl * tab)
if (tOp == NULL)
goto getNdbOp_error1;
- // Link scan operation into list of cursor operations
- if (m_theLastScanOperation == NULL)
- m_theFirstScanOperation = m_theLastScanOperation = tOp;
- else {
- m_theLastScanOperation->next(tOp);
- m_theLastScanOperation = tOp;
- }
- tOp->next(NULL);
if (tOp->init(tab, this) != -1) {
+ define_scan_op(tOp);
return tOp;
} else {
theNdb->releaseScanOperation(tOp);
@@ -1125,8 +1131,33 @@ getNdbOp_error1:
return NULL;
}//NdbConnection::getNdbScanOperation()
+void
+NdbConnection::remove_list(NdbOperation*& list, NdbOperation* op){
+ NdbOperation* tmp= list;
+ if(tmp == op)
+ list = op->next();
+ else {
+ while(tmp && tmp->next() != op) tmp = tmp->next();
+ if(tmp)
+ tmp->next(op->next());
+ }
+ op->next(NULL);
+}
+
+void
+NdbConnection::define_scan_op(NdbIndexScanOperation * tOp){
+ // Link scan operation into list of cursor operations
+ if (m_theLastScanOperation == NULL)
+ m_theFirstScanOperation = m_theLastScanOperation = tOp;
+ else {
+ m_theLastScanOperation->next(tOp);
+ m_theLastScanOperation = tOp;
+ }
+ tOp->next(NULL);
+}
+
NdbScanOperation*
-NdbConnection::getNdbScanOperation(NdbDictionary::Table * table)
+NdbConnection::getNdbScanOperation(const NdbDictionary::Table * table)
{
if (table)
return getNdbScanOperation(& NdbTableImpl::getImpl(*table));
@@ -1184,8 +1215,8 @@ Remark: Get an operation from NdbIndexOperation object idlelist and get
getOperation will set the theTableId in the NdbIndexOperation object, synchronous.
*****************************************************************************/
NdbIndexOperation*
-NdbConnection::getNdbIndexOperation(NdbIndexImpl * anIndex,
- NdbTableImpl * aTable,
+NdbConnection::getNdbIndexOperation(const NdbIndexImpl * anIndex,
+ const NdbTableImpl * aTable,
NdbOperation* aNextOp)
{
NdbIndexOperation* tOp;
@@ -1228,8 +1259,8 @@ NdbConnection::getNdbIndexOperation(NdbIndexImpl * anIndex,
}//NdbConnection::getNdbIndexOperation()
NdbIndexOperation*
-NdbConnection::getNdbIndexOperation(NdbDictionary::Index * index,
- NdbDictionary::Table * table)
+NdbConnection::getNdbIndexOperation(const NdbDictionary::Index * index,
+ const NdbDictionary::Table * table)
{
if (index && table)
return getNdbIndexOperation(& NdbIndexImpl::getImpl(*index),
@@ -1507,12 +1538,21 @@ from other transactions.
const Uint32* tPtr = (Uint32 *)&keyConf->operations[0];
Uint32 tNoComp = theNoOfOpCompleted;
for (Uint32 i = 0; i < tNoOfOperations ; i++) {
- tOp = theNdb->void2rec(theNdb->int2void(*tPtr));
- tPtr++;
- const Uint32 tAttrInfoLen = *tPtr;
- tPtr++;
+ tOp = theNdb->void2rec(theNdb->int2void(*tPtr++));
+ const Uint32 tAttrInfoLen = *tPtr++;
if (tOp && tOp->checkMagicNumber()) {
- tNoComp += tOp->execTCOPCONF(tAttrInfoLen);
+ Uint32 done = tOp->execTCOPCONF(tAttrInfoLen);
+ if(tAttrInfoLen > TcKeyConf::SimpleReadBit){
+ Uint32 node = tAttrInfoLen & (~TcKeyConf::SimpleReadBit);
+ NdbNodeBitmask::set(m_db_nodes, node);
+ if(NdbNodeBitmask::get(m_failed_db_nodes, node) && !done)
+ {
+ done = 1;
+ tOp->setErrorCode(4119);
+ theCompletionStatus = CompletedFailure;
+ }
+ }
+ tNoComp += done;
} else {
return -1;
}//if
@@ -1604,6 +1644,10 @@ NdbConnection::receiveTCKEY_FAILCONF(const TcKeyFailConf * failConf)
setOperationErrorCodeAbort(4115);
tOp = NULL;
break;
+ case NdbOperation::NotDefined:
+ case NdbOperation::NotDefined2:
+ assert(false);
+ break;
}//if
}//while
theReleaseOnClose = true;
@@ -1762,7 +1806,7 @@ Parameters: aErrorCode: The error code.
Remark: An operation was completed with failure.
*******************************************************************************/
int
-NdbConnection::OpCompleteFailure()
+NdbConnection::OpCompleteFailure(Uint8 abortOption)
{
Uint32 tNoComp = theNoOfOpCompleted;
Uint32 tNoSent = theNoOfOpSent;
@@ -1776,10 +1820,7 @@ NdbConnection::OpCompleteFailure()
//decide the success of the whole transaction since a simple
//operation is not really part of that transaction.
//------------------------------------------------------------------------
- if (theSimpleState == 1) {
- theCommitStatus = NdbConnection::Aborted;
- }//if
- if (m_abortOption == IgnoreError){
+ if (abortOption == IgnoreError){
/**
* There's always a TCKEYCONF when using IgnoreError
*/
@@ -1814,9 +1855,6 @@ NdbConnection::OpCompleteSuccess()
tNoComp++;
theNoOfOpCompleted = tNoComp;
if (tNoComp == tNoSent) { // Last operation completed
- if (theSimpleState == 1) {
- theCommitStatus = NdbConnection::Committed;
- }//if
return 0;
} else if (tNoComp < tNoSent) {
return -1; // Continue waiting for more signals
@@ -1891,14 +1929,14 @@ NdbConnection::printState()
CASE(Connected);
CASE(DisConnecting);
CASE(ConnectFailure);
- default: ndbout << theStatus;
+ default: ndbout << (Uint32) theStatus;
}
switch (theListState) {
CASE(NotInList);
CASE(InPreparedList);
CASE(InSendList);
CASE(InCompletedList);
- default: ndbout << theListState;
+ default: ndbout << (Uint32) theListState;
}
switch (theSendStatus) {
CASE(NotInit);
@@ -1911,7 +1949,7 @@ NdbConnection::printState()
CASE(sendTC_ROLLBACK);
CASE(sendTC_COMMIT);
CASE(sendTC_OP);
- default: ndbout << theSendStatus;
+ default: ndbout << (Uint32) theSendStatus;
}
switch (theCommitStatus) {
CASE(NotStarted);
@@ -1919,16 +1957,56 @@ NdbConnection::printState()
CASE(Committed);
CASE(Aborted);
CASE(NeedAbort);
- default: ndbout << theCommitStatus;
+ default: ndbout << (Uint32) theCommitStatus;
}
switch (theCompletionStatus) {
CASE(NotCompleted);
CASE(CompletedSuccess);
CASE(CompletedFailure);
CASE(DefinitionFailure);
- default: ndbout << theCompletionStatus;
+ default: ndbout << (Uint32) theCompletionStatus;
}
ndbout << endl;
}
#undef CASE
#endif
+
+int
+NdbConnection::report_node_failure(Uint32 id){
+ NdbNodeBitmask::set(m_failed_db_nodes, id);
+ if(!NdbNodeBitmask::get(m_db_nodes, id))
+ {
+ return 0;
+ }
+
+ /**
+ * Arrived
+ * TCKEYCONF TRANSIDAI
+ * 1) - -
+ * 2) - X
+ * 3) X -
+ * 4) X X
+ */
+ NdbOperation* tmp = theFirstExecOpInList;
+ const Uint32 len = TcKeyConf::SimpleReadBit | id;
+ Uint32 tNoComp = theNoOfOpCompleted;
+ Uint32 tNoSent = theNoOfOpSent;
+ while(tmp != 0)
+ {
+ if(tmp->theReceiver.m_expected_result_length == len &&
+ tmp->theReceiver.m_received_result_length == 0)
+ {
+ tNoComp++;
+ tmp->theError.code = 4119;
+ }
+ tmp = tmp->next();
+ }
+ theNoOfOpCompleted = tNoComp;
+ if(tNoComp == tNoSent)
+ {
+ theError.code = 4119;
+ theCompletionStatus = NdbConnection::CompletedFailure;
+ return 1;
+ }
+ return 0;
+}
diff --git a/ndb/src/ndbapi/NdbConnectionScan.cpp b/ndb/src/ndbapi/NdbConnectionScan.cpp
index 0c4490015ff..3fe8993a42b 100644
--- a/ndb/src/ndbapi/NdbConnectionScan.cpp
+++ b/ndb/src/ndbapi/NdbConnectionScan.cpp
@@ -99,11 +99,12 @@ NdbConnection::receiveSCAN_TABCONF(NdbApiSignal* aSignal,
}
for(Uint32 i = 0; i<len; i += 3){
+ Uint32 opCount, totalLen;
Uint32 ptrI = * ops++;
Uint32 tcPtrI = * ops++;
Uint32 info = * ops++;
- Uint32 opCount = ScanTabConf::getRows(info);
- Uint32 totalLen = ScanTabConf::getLength(info);
+ opCount = ScanTabConf::getRows(info);
+ totalLen = ScanTabConf::getLength(info);
void * tPtr = theNdb->int2void(ptrI);
assert(tPtr); // For now
@@ -119,12 +120,21 @@ NdbConnection::receiveSCAN_TABCONF(NdbApiSignal* aSignal,
}
}
}
+ if (conf->requestInfo & ScanTabConf::EndOfData) {
+ if(theScanningOp->m_ordered)
+ theScanningOp->m_api_receivers_count = 0;
+ if(theScanningOp->m_api_receivers_count +
+ theScanningOp->m_conf_receivers_count +
+ theScanningOp->m_sent_receivers_count){
+ abort();
+ }
+ }
return 0;
} else {
#ifdef NDB_NO_DROPPED_SIGNAL
abort();
#endif
}
-
+
return -1;
}
diff --git a/ndb/src/ndbapi/NdbDictionary.cpp b/ndb/src/ndbapi/NdbDictionary.cpp
index 4b30f41b51d..c8414ec16a3 100644
--- a/ndb/src/ndbapi/NdbDictionary.cpp
+++ b/ndb/src/ndbapi/NdbDictionary.cpp
@@ -30,7 +30,7 @@ NdbDictionary::Column::Column(const char * name)
NdbDictionary::Column::Column(const NdbDictionary::Column & org)
: m_impl(* new NdbColumnImpl(* this))
{
- m_impl.assign(org.m_impl);
+ m_impl = org.m_impl;
}
NdbDictionary::Column::Column(NdbColumnImpl& impl)
@@ -65,7 +65,7 @@ NdbDictionary::Column::getName() const {
void
NdbDictionary::Column::setType(Type t){
- m_impl.m_type = t;
+ m_impl.init(t);
}
NdbDictionary::Column::Type
@@ -103,6 +103,54 @@ NdbDictionary::Column::getLength() const{
return m_impl.m_length;
}
+void
+NdbDictionary::Column::setInlineSize(int size)
+{
+ m_impl.m_precision = size;
+}
+
+void
+NdbDictionary::Column::setCharset(CHARSET_INFO* cs)
+{
+ m_impl.m_cs = cs;
+}
+
+CHARSET_INFO*
+NdbDictionary::Column::getCharset() const
+{
+ return m_impl.m_cs;
+}
+
+int
+NdbDictionary::Column::getInlineSize() const
+{
+ return m_impl.m_precision;
+}
+
+void
+NdbDictionary::Column::setPartSize(int size)
+{
+ m_impl.m_scale = size;
+}
+
+int
+NdbDictionary::Column::getPartSize() const
+{
+ return m_impl.m_scale;
+}
+
+void
+NdbDictionary::Column::setStripeSize(int size)
+{
+ m_impl.m_length = size;
+}
+
+int
+NdbDictionary::Column::getStripeSize() const
+{
+ return m_impl.m_length;
+}
+
int
NdbDictionary::Column::getSize() const{
return m_impl.m_attrSize;
@@ -229,7 +277,8 @@ NdbDictionary::Table::Table(const char * name)
}
NdbDictionary::Table::Table(const NdbDictionary::Table & org)
- : m_impl(* new NdbTableImpl(* this))
+ : NdbDictionary::Object(),
+ m_impl(* new NdbTableImpl(* this))
{
m_impl.assign(org.m_impl);
}
@@ -272,11 +321,8 @@ NdbDictionary::Table::getTableId() const {
void
NdbDictionary::Table::addColumn(const Column & c){
- // JONAS check this out!!
- // Memory leak, the new Column will not be freed
- //NdbDictionary::Column * col = new Column(c);
NdbColumnImpl* col = new NdbColumnImpl;
- col->assign(NdbColumnImpl::getImpl(c));
+ (* col) = NdbColumnImpl::getImpl(c);
m_impl.m_columns.push_back(col);
if(c.getPrimaryKey()){
m_impl.m_noOfKeys++;
@@ -491,11 +537,8 @@ NdbDictionary::Index::getIndexColumn(int no) const {
void
NdbDictionary::Index::addColumn(const Column & c){
- // JONAS check this out!!
- // Memory leak, the new Column will not be freed
- //NdbDictionary::Column * col = new Column(c);
NdbColumnImpl* col = new NdbColumnImpl;
- col->assign(NdbColumnImpl::getImpl(c));
+ (* col) = NdbColumnImpl::getImpl(c);
m_impl.m_columns.push_back(col);
}
@@ -605,11 +648,8 @@ NdbDictionary::Event::setDurability(const EventDurability d)
void
NdbDictionary::Event::addColumn(const Column & c){
- // JONAS check this out!!
- // Memory leak, the new Column will not be freed
- //NdbDictionary::Column * col = new Column(c);
NdbColumnImpl* col = new NdbColumnImpl;
- col->assign(NdbColumnImpl::getImpl(c));
+ (* col) = NdbColumnImpl::getImpl(c);
m_impl.m_columns.push_back(col);
}
@@ -690,13 +730,23 @@ NdbDictionary::Dictionary::alterTable(const Table & t){
}
const NdbDictionary::Table *
-NdbDictionary::Dictionary::getTable(const char * name){
- NdbTableImpl * t = m_impl.getTable(name);
+NdbDictionary::Dictionary::getTable(const char * name, void **data){
+ NdbTableImpl * t = m_impl.getTable(name, data);
if(t)
return t->m_facade;
return 0;
}
+void NdbDictionary::Dictionary::set_local_table_data_size(unsigned sz)
+{
+ m_impl.m_local_table_data_size= sz;
+}
+
+const NdbDictionary::Table *
+NdbDictionary::Dictionary::getTable(const char * name){
+ return getTable(name, 0);
+}
+
void
NdbDictionary::Dictionary::invalidateTable(const char * name){
NdbTableImpl * t = m_impl.getTable(name);
@@ -806,7 +856,12 @@ NdbDictionary::Dictionary::listObjects(List& list, Object::Type type)
int
NdbDictionary::Dictionary::listIndexes(List& list, const char * tableName)
{
- return m_impl.listIndexes(list, tableName);
+ const NdbDictionary::Table* tab= getTable(tableName);
+ if(tab == 0)
+ {
+ return -1;
+ }
+ return m_impl.listIndexes(list, tab->getTableId());
}
const struct NdbError &
@@ -819,6 +874,8 @@ NdbDictionary::Dictionary::getNdbError() const {
NdbOut&
operator<<(NdbOut& out, const NdbDictionary::Column& col)
{
+ const CHARSET_INFO *cs = col.getCharset();
+ const char *csname = cs ? cs->name : "?";
out << col.getName() << " ";
switch (col.getType()) {
case NdbDictionary::Column::Tinyint:
@@ -861,10 +918,10 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col)
out << "Decimal(" << col.getScale() << "," << col.getPrecision() << ")";
break;
case NdbDictionary::Column::Char:
- out << "Char(" << col.getLength() << ")";
+ out << "Char(" << col.getLength() << ";" << csname << ")";
break;
case NdbDictionary::Column::Varchar:
- out << "Varchar(" << col.getLength() << ")";
+ out << "Varchar(" << col.getLength() << ";" << csname << ")";
break;
case NdbDictionary::Column::Binary:
out << "Binary(" << col.getLength() << ")";
@@ -884,7 +941,7 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col)
break;
case NdbDictionary::Column::Text:
out << "Text(" << col.getInlineSize() << "," << col.getPartSize()
- << ";" << col.getStripeSize() << ")";
+ << ";" << col.getStripeSize() << ";" << csname << ")";
break;
case NdbDictionary::Column::Undefined:
out << "Undefined";
@@ -901,3 +958,8 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col)
out << " NULL";
return out;
}
+
+const NdbDictionary::Column * NdbDictionary::Column::FRAGMENT = 0;
+const NdbDictionary::Column * NdbDictionary::Column::ROW_COUNT = 0;
+const NdbDictionary::Column * NdbDictionary::Column::COMMIT_COUNT = 0;
+
diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp
index cb5e3b3c821..cf51a30fe0b 100644
--- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp
+++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp
@@ -35,6 +35,8 @@
#include <NdbEventOperation.hpp>
#include "NdbEventOperationImpl.hpp"
#include "NdbBlob.hpp"
+#include <AttributeHeader.hpp>
+#include <my_sys.h>
#define DEBUG_PRINT 0
#define INCOMPATIBLE_VERSION -2
@@ -63,6 +65,7 @@ NdbColumnImpl::operator=(const NdbColumnImpl& col)
m_name = col.m_name;
m_type = col.m_type;
m_precision = col.m_precision;
+ m_cs = col.m_cs;
m_scale = col.m_scale;
m_length = col.m_length;
m_pk = col.m_pk;
@@ -86,10 +89,69 @@ NdbColumnImpl::operator=(const NdbColumnImpl& col)
}
void
-NdbColumnImpl::init()
+NdbColumnImpl::init(Type t)
{
+ // do not use default_charset_info as it may not be initialized yet
+ // use binary collation until NDB tests can handle charsets
+ CHARSET_INFO* default_cs = &my_charset_latin1_bin;
m_attrId = -1;
- m_type = NdbDictionary::Column::Unsigned;
+ m_type = t;
+ switch (m_type) {
+ case Tinyint:
+ case Tinyunsigned:
+ case Smallint:
+ case Smallunsigned:
+ case Mediumint:
+ case Mediumunsigned:
+ case Int:
+ case Unsigned:
+ case Bigint:
+ case Bigunsigned:
+ case Float:
+ case Double:
+ m_precision = 0;
+ m_scale = 0;
+ m_length = 1;
+ m_cs = NULL;
+ break;
+ case Decimal:
+ m_precision = 10;
+ m_scale = 0;
+ m_length = 1;
+ m_cs = NULL;
+ break;
+ case Char:
+ case Varchar:
+ m_precision = 0;
+ m_scale = 0;
+ m_length = 1;
+ m_cs = default_cs;
+ break;
+ case Binary:
+ case Varbinary:
+ case Datetime:
+ case Timespec:
+ m_precision = 0;
+ m_scale = 0;
+ m_length = 1;
+ m_cs = NULL;
+ break;
+ case Blob:
+ m_precision = 256;
+ m_scale = 8000;
+ m_length = 4;
+ m_cs = NULL;
+ break;
+ case Text:
+ m_precision = 256;
+ m_scale = 8000;
+ m_length = 4;
+ m_cs = default_cs;
+ break;
+ case Undefined:
+ assert(false);
+ break;
+ }
m_pk = false;
m_nullable = false;
m_tupleKey = false;
@@ -97,12 +159,10 @@ NdbColumnImpl::init()
m_distributionKey = false;
m_distributionGroup = false;
m_distributionGroupBits = 8;
- m_length = 1;
- m_scale = 5;
- m_precision = 5;
m_keyInfoPos = 0;
- m_attrSize = 4,
- m_arraySize = 1,
+ // next 2 are set at run time
+ m_attrSize = 0;
+ m_arraySize = 0;
m_autoIncrement = false;
m_autoIncrementInitialValue = 1;
m_blobTable = NULL;
@@ -145,84 +205,45 @@ NdbColumnImpl::equal(const NdbColumnImpl& col) const
return false;
}
}
- if(m_length != col.m_length){
+ if (m_precision != col.m_precision ||
+ m_scale != col.m_scale ||
+ m_length != col.m_length ||
+ m_cs != col.m_cs) {
return false;
}
-
- switch(m_type){
- case NdbDictionary::Column::Undefined:
- break;
- case NdbDictionary::Column::Tinyint:
- case NdbDictionary::Column::Tinyunsigned:
- case NdbDictionary::Column::Smallint:
- case NdbDictionary::Column::Smallunsigned:
- case NdbDictionary::Column::Mediumint:
- case NdbDictionary::Column::Mediumunsigned:
- case NdbDictionary::Column::Int:
- case NdbDictionary::Column::Unsigned:
- case NdbDictionary::Column::Float:
- break;
- case NdbDictionary::Column::Decimal:
- if(m_scale != col.m_scale ||
- m_precision != col.m_precision){
- return false;
- }
- break;
- case NdbDictionary::Column::Char:
- case NdbDictionary::Column::Varchar:
- case NdbDictionary::Column::Binary:
- case NdbDictionary::Column::Varbinary:
- if(m_length != col.m_length){
- return false;
- }
- break;
- case NdbDictionary::Column::Bigint:
- case NdbDictionary::Column::Bigunsigned:
- case NdbDictionary::Column::Double:
- case NdbDictionary::Column::Datetime:
- case NdbDictionary::Column::Timespec:
- break;
- case NdbDictionary::Column::Blob:
- case NdbDictionary::Column::Text:
- if (m_precision != col.m_precision ||
- m_scale != col.m_scale ||
- m_length != col.m_length) {
- return false;
- }
- break;
- }
if (m_autoIncrement != col.m_autoIncrement){
return false;
}
if(strcmp(m_defaultValue.c_str(), col.m_defaultValue.c_str()) != 0){
return false;
}
-
+
return true;
}
-void
-NdbColumnImpl::assign(const NdbColumnImpl& org)
-{
- m_attrId = org.m_attrId;
- m_name.assign(org.m_name);
- m_type = org.m_type;
- m_precision = org.m_precision;
- m_scale = org.m_scale;
- m_length = org.m_length;
- m_pk = org.m_pk;
- m_tupleKey = org.m_tupleKey;
- m_distributionKey = org.m_distributionKey;
- m_distributionGroup = org.m_distributionGroup;
- m_distributionGroupBits = org.m_distributionGroupBits;
- m_nullable = org.m_nullable;
- m_indexOnly = org.m_indexOnly;
- m_autoIncrement = org.m_autoIncrement;
- m_autoIncrementInitialValue = org.m_autoIncrementInitialValue;
- m_defaultValue.assign(org.m_defaultValue);
- m_keyInfoPos = org.m_keyInfoPos;
- m_attrSize = org.m_attrSize;
- m_arraySize = org.m_arraySize;
+NdbDictionary::Column *
+NdbColumnImpl::create_psuedo(const char * name){
+ NdbDictionary::Column * col = new NdbDictionary::Column();
+ col->setName(name);
+ if(!strcmp(name, "NDB$FRAGMENT")){
+ col->setType(NdbDictionary::Column::Unsigned);
+ col->m_impl.m_attrId = AttributeHeader::FRAGMENT;
+ col->m_impl.m_attrSize = 4;
+ col->m_impl.m_arraySize = 1;
+ } else if(!strcmp(name, "NDB$ROW_COUNT")){
+ col->setType(NdbDictionary::Column::Bigunsigned);
+ col->m_impl.m_attrId = AttributeHeader::ROW_COUNT;
+ col->m_impl.m_attrSize = 8;
+ col->m_impl.m_arraySize = 1;
+ } else if(!strcmp(name, "NDB$COMMIT_COUNT")){
+ col->setType(NdbDictionary::Column::Bigunsigned);
+ col->m_impl.m_attrId = AttributeHeader::COMMIT_COUNT;
+ col->m_impl.m_attrSize = 8;
+ col->m_impl.m_arraySize = 1;
+ } else {
+ abort();
+ }
+ return col;
}
/**
@@ -332,7 +353,7 @@ NdbTableImpl::assign(const NdbTableImpl& org)
for(unsigned i = 0; i<org.m_columns.size(); i++){
NdbColumnImpl * col = new NdbColumnImpl();
const NdbColumnImpl * iorg = org.m_columns[i];
- col->assign(* iorg);
+ (* col) = (* iorg);
m_columns.push_back(col);
}
@@ -385,7 +406,7 @@ void
NdbTableImpl::buildColumnHash(){
const Uint32 size = m_columns.size();
- size_t i;
+ int i;
for(i = 31; i >= 0; i--){
if(((1 << i) & size) != 0){
m_columnHashMask = (1 << (i + 1)) - 1;
@@ -395,7 +416,7 @@ NdbTableImpl::buildColumnHash(){
Vector<Uint32> hashValues;
Vector<Vector<Uint32> > chains; chains.fill(size, hashValues);
- for(i = 0; i<size; i++){
+ for(i = 0; i< (int) size; i++){
Uint32 hv = Hash(m_columns[i]->getName()) & 0xFFFE;
Uint32 bucket = hv & m_columnHashMask;
bucket = (bucket < size ? bucket : bucket - size);
@@ -409,7 +430,7 @@ NdbTableImpl::buildColumnHash(){
m_columnHash.fill((unsigned)size-1, tmp); // Default no chaining
Uint32 pos = 0; // In overflow vector
- for(i = 0; i<size; i++){
+ for(i = 0; i< (int) size; i++){
Uint32 sz = chains[i].size();
if(sz == 1){
Uint32 col = chains[i][0];
@@ -581,6 +602,7 @@ NdbDictionaryImpl::NdbDictionaryImpl(Ndb &ndb)
m_ndb(ndb)
{
m_globalHash = 0;
+ m_local_table_data_size= 0;
}
NdbDictionaryImpl::NdbDictionaryImpl(Ndb &ndb,
@@ -591,29 +613,72 @@ NdbDictionaryImpl::NdbDictionaryImpl(Ndb &ndb,
m_ndb(ndb)
{
m_globalHash = 0;
+ m_local_table_data_size= 0;
}
+static int f_dictionary_count = 0;
+
NdbDictionaryImpl::~NdbDictionaryImpl()
{
- NdbElement_t<NdbTableImpl> * curr = m_localHash.m_tableHash.getNext(0);
- while(curr != 0){
+ NdbElement_t<Ndb_local_table_info> * curr = m_localHash.m_tableHash.getNext(0);
+ if(m_globalHash){
+ while(curr != 0){
+ m_globalHash->lock();
+ m_globalHash->release(curr->theData->m_table_impl);
+ Ndb_local_table_info::destroy(curr->theData);
+ m_globalHash->unlock();
+
+ curr = m_localHash.m_tableHash.getNext(curr);
+ }
+
m_globalHash->lock();
- m_globalHash->release(curr->theData);
+ if(--f_dictionary_count == 0){
+ delete NdbDictionary::Column::FRAGMENT;
+ delete NdbDictionary::Column::ROW_COUNT;
+ delete NdbDictionary::Column::COMMIT_COUNT;
+ NdbDictionary::Column::FRAGMENT= 0;
+ NdbDictionary::Column::ROW_COUNT= 0;
+ NdbDictionary::Column::COMMIT_COUNT= 0;
+ }
m_globalHash->unlock();
-
- curr = m_localHash.m_tableHash.getNext(curr);
+ } else {
+ assert(curr == 0);
}
}
-void
-initDict(NdbDictionary::Dictionary & d)
+Ndb_local_table_info *
+NdbDictionaryImpl::fetchGlobalTableImpl(const char * internalTableName)
{
- TransporterFacade * tf = TransporterFacade::instance();
- NdbDictionaryImpl & impl = NdbDictionaryImpl::getImpl(d);
+ NdbTableImpl *impl;
+
+ m_globalHash->lock();
+ impl = m_globalHash->get(internalTableName);
+ m_globalHash->unlock();
+
+ if (impl == 0){
+ impl = m_receiver.getTable(internalTableName,
+ m_ndb.usingFullyQualifiedNames());
+ m_globalHash->lock();
+ m_globalHash->put(internalTableName, impl);
+ m_globalHash->unlock();
+
+ if(impl == 0){
+ return 0;
+ }
+ }
+
+ Ndb_local_table_info *info=
+ Ndb_local_table_info::create(impl, m_local_table_data_size);
+
+ m_localHash.put(internalTableName, info);
+
+ m_ndb.theFirstTupleId[impl->getTableId()] = ~0;
+ m_ndb.theLastTupleId[impl->getTableId()] = ~0;
- impl.m_receiver.setTransporter(tf);
+ return info;
}
+#if 0
bool
NdbDictionaryImpl::setTransporter(class TransporterFacade * tf)
{
@@ -624,13 +689,27 @@ NdbDictionaryImpl::setTransporter(class TransporterFacade * tf)
return false;
}
+#endif
bool
NdbDictionaryImpl::setTransporter(class Ndb* ndb,
class TransporterFacade * tf)
{
m_globalHash = &tf->m_globalDictCache;
- return m_receiver.setTransporter(ndb, tf);
+ if(m_receiver.setTransporter(ndb, tf)){
+ m_globalHash->lock();
+ if(f_dictionary_count++ == 0){
+ NdbDictionary::Column::FRAGMENT=
+ NdbColumnImpl::create_psuedo("NDB$FRAGMENT");
+ NdbDictionary::Column::ROW_COUNT=
+ NdbColumnImpl::create_psuedo("NDB$ROW_COUNT");
+ NdbDictionary::Column::COMMIT_COUNT=
+ NdbColumnImpl::create_psuedo("NDB$COMMIT_COUNT");
+ }
+ m_globalHash->unlock();
+ return true;
+ }
+ return false;
}
NdbTableImpl *
@@ -643,6 +722,7 @@ NdbDictionaryImpl::getIndexTable(NdbIndexImpl * index,
return getTable(m_ndb.externalizeTableName(internalName));
}
+#if 0
bool
NdbDictInterface::setTransporter(class TransporterFacade * tf)
{
@@ -666,11 +746,11 @@ NdbDictInterface::setTransporter(class TransporterFacade * tf)
return true;
}
+#endif
bool
NdbDictInterface::setTransporter(class Ndb* ndb, class TransporterFacade * tf)
{
- m_blockNumber = -1;
m_reference = ndb->getReference();
m_transporter = tf;
m_waiter.m_mutex = tf->theMutexPtr;
@@ -680,10 +760,6 @@ NdbDictInterface::setTransporter(class Ndb* ndb, class TransporterFacade * tf)
NdbDictInterface::~NdbDictInterface()
{
- if (m_transporter != NULL){
- if (m_blockNumber != -1)
- m_transporter->close(m_blockNumber);
- }
}
void
@@ -770,7 +846,7 @@ NdbDictInterface::execSignal(void* dictImpl,
}
void
-NdbDictInterface::execNodeStatus(void* dictImpl, NodeId aNode,
+NdbDictInterface::execNodeStatus(void* dictImpl, Uint32 aNode,
bool alive, bool nfCompleted)
{
NdbDictInterface * tmp = (NdbDictInterface*)dictImpl;
@@ -795,6 +871,8 @@ NdbDictInterface::dictSignal(NdbApiSignal* signal,
const int noerrcodes,
const int temporaryMask)
{
+ DBUG_ENTER("NdbDictInterface::dictSignal");
+ DBUG_PRINT("enter", ("useMasterNodeId: %d", useMasterNodeId));
for(Uint32 i = 0; i<RETRIES; i++){
//if (useMasterNodeId == 0)
m_buffer.clear();
@@ -814,7 +892,7 @@ NdbDictInterface::dictSignal(NdbApiSignal* signal,
if(aNodeId == 0){
m_error.code = 4009;
m_transporter->unlock_mutex();
- return -1;
+ DBUG_RETURN(-1);
}
{
int r;
@@ -850,7 +928,7 @@ NdbDictInterface::dictSignal(NdbApiSignal* signal,
if(m_waiter.m_state == NO_WAIT && m_error.code == 0){
// Normal return
- return 0;
+ DBUG_RETURN(0);
}
/**
@@ -873,9 +951,9 @@ NdbDictInterface::dictSignal(NdbApiSignal* signal,
continue;
}
- return -1;
+ DBUG_RETURN(-1);
}
- return -1;
+ DBUG_RETURN(-1);
}
/*****************************************************************
@@ -1074,6 +1152,7 @@ indexTypeMapping[] = {
{ -1, -1 }
};
+// TODO: remove, api-kernel type codes must match now
static const
ApiKernelMapping
columnTypeMapping[] = {
@@ -1180,9 +1259,23 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
return 703;
}
col->m_extType = attrDesc.AttributeExtType;
- col->m_precision = attrDesc.AttributeExtPrecision;
+ col->m_precision = (attrDesc.AttributeExtPrecision & 0xFFFF);
col->m_scale = attrDesc.AttributeExtScale;
col->m_length = attrDesc.AttributeExtLength;
+ // charset in upper half of precision
+ unsigned cs_number = (attrDesc.AttributeExtPrecision >> 16);
+ // charset is defined exactly for char types
+ if (col->getCharType() != (cs_number != 0)) {
+ delete impl;
+ return 703;
+ }
+ if (col->getCharType()) {
+ col->m_cs = get_charset(cs_number, MYF(0));
+ if (col->m_cs == NULL) {
+ delete impl;
+ return 743;
+ }
+ }
// translate to old kernel types and sizes
if (! attrDesc.translateExtType()) {
@@ -1223,6 +1316,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
impl->m_columns[attrDesc.AttributeId] = col;
it.next();
}
+
impl->m_noOfKeys = keyCount;
impl->m_keyLenInWords = keyInfoPos;
impl->m_sizeOfKeysInWords = keyInfoPos;
@@ -1242,12 +1336,13 @@ NdbDictionaryImpl::createTable(NdbTableImpl &t)
if (t.m_noOfBlobs == 0)
return 0;
// update table def from DICT
- NdbTableImpl * tp = getTable(t.m_externalName.c_str());
- if (tp == NULL) {
+ Ndb_local_table_info *info=
+ get_local_table_info(t.m_internalName.c_str(),false);
+ if (info == NULL) {
m_error.code = 709;
return -1;
}
- if (createBlobTables(* tp) != 0) {
+ if (createBlobTables(*(info->m_table_impl)) != 0) {
int save_code = m_error.code;
(void)dropTable(t);
m_error.code = save_code;
@@ -1268,8 +1363,12 @@ NdbDictionaryImpl::createBlobTables(NdbTableImpl &t)
if (createTable(bt) != 0)
return -1;
// Save BLOB table handle
- NdbTableImpl * cachedBlobTable = getTable(bt.m_externalName.c_str());
- c.m_blobTable = cachedBlobTable;
+ Ndb_local_table_info *info=
+ get_local_table_info(bt.m_internalName.c_str(),false);
+ if (info == 0) {
+ return -1;
+ }
+ c.m_blobTable = info->m_table_impl;
}
return 0;
@@ -1278,14 +1377,22 @@ NdbDictionaryImpl::createBlobTables(NdbTableImpl &t)
int
NdbDictionaryImpl::addBlobTables(NdbTableImpl &t)
{
- for (unsigned i = 0; i < t.m_columns.size(); i++) {
+ unsigned n= t.m_noOfBlobs;
+ // optimized for blob column being the last one
+ // and not looking for more than one if not neccessary
+ for (unsigned i = t.m_columns.size(); i > 0 && n > 0;) {
+ i--;
NdbColumnImpl & c = *t.m_columns[i];
if (! c.getBlobType() || c.getPartSize() == 0)
continue;
+ n--;
char btname[NdbBlob::BlobTableNameSize];
NdbBlob::getBlobTableName(btname, &t, &c);
// Save BLOB table handle
- NdbTableImpl * cachedBlobTable = getTable(btname);;
+ NdbTableImpl * cachedBlobTable = getTable(btname);
+ if (cachedBlobTable == 0) {
+ return -1;
+ }
c.m_blobTable = cachedBlobTable;
}
@@ -1318,16 +1425,15 @@ int NdbDictionaryImpl::alterTable(NdbTableImpl &impl)
// Remove cached information and let it be refreshed at next access
if (m_localHash.get(originalInternalName) != NULL) {
m_localHash.drop(originalInternalName);
+ m_globalHash->lock();
NdbTableImpl * cachedImpl = m_globalHash->get(originalInternalName);
// If in local cache it must be in global
if (!cachedImpl)
abort();
- m_globalHash->lock();
m_globalHash->drop(cachedImpl);
m_globalHash->unlock();
}
}
-
return ret;
}
@@ -1363,12 +1469,12 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
impl.m_internalName.assign(internalName);
UtilBufferWriter w(m_buffer);
DictTabInfo::Table tmpTab; tmpTab.init();
- snprintf(tmpTab.TableName,
+ BaseString::snprintf(tmpTab.TableName,
sizeof(tmpTab.TableName),
internalName);
bool haveAutoIncrement = false;
- Uint64 autoIncrementValue;
+ Uint64 autoIncrementValue = 0;
for(i = 0; i<sz; i++){
const NdbColumnImpl * col = impl.m_columns[i];
if(col == 0)
@@ -1419,7 +1525,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
continue;
DictTabInfo::Attribute tmpAttr; tmpAttr.init();
- snprintf(tmpAttr.AttributeName, sizeof(tmpAttr.AttributeName),
+ BaseString::snprintf(tmpAttr.AttributeName, sizeof(tmpAttr.AttributeName),
col->m_name.c_str());
tmpAttr.AttributeId = i;
tmpAttr.AttributeKeyFlag = col->m_pk || col->m_tupleKey;
@@ -1432,15 +1538,29 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
getKernelConstant(col->m_type,
columnTypeMapping,
DictTabInfo::ExtUndefined);
- tmpAttr.AttributeExtPrecision = col->m_precision;
+ tmpAttr.AttributeExtPrecision = ((unsigned)col->m_precision & 0xFFFF);
tmpAttr.AttributeExtScale = col->m_scale;
tmpAttr.AttributeExtLength = col->m_length;
+ // charset is defined exactly for char types
+ if (col->getCharType() != (col->m_cs != NULL)) {
+ m_error.code = 703;
+ return -1;
+ }
+ // primary key type check
+ if (col->m_pk && ! NdbSqlUtil::usable_in_pk(col->m_type, col->m_cs)) {
+ m_error.code = 743;
+ return -1;
+ }
+ // charset in upper half of precision
+ if (col->getCharType()) {
+ tmpAttr.AttributeExtPrecision |= (col->m_cs->number << 16);
+ }
// DICT will ignore and recompute this
(void)tmpAttr.translateExtType();
tmpAttr.AttributeAutoIncrement = col->m_autoIncrement;
- snprintf(tmpAttr.AttributeDefaultValue,
+ BaseString::snprintf(tmpAttr.AttributeDefaultValue,
sizeof(tmpAttr.AttributeDefaultValue),
col->m_defaultValue.c_str());
s = SimpleProperties::pack(w,
@@ -1483,10 +1603,13 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
: createTable(&tSignal, ptr);
if (!alter && haveAutoIncrement) {
- // if (!ndb.setAutoIncrementValue(impl.m_internalName.c_str(), autoIncrementValue)) {
- if (!ndb.setAutoIncrementValue(impl.m_externalName.c_str(), autoIncrementValue)) {
- m_error.code = 4336;
- ndb.theError = m_error;
+ if (!ndb.setAutoIncrementValue(impl.m_externalName.c_str(),
+ autoIncrementValue)) {
+ if (ndb.theError.code == 0) {
+ m_error.code = 4336;
+ ndb.theError = m_error;
+ } else
+ m_error= ndb.theError;
ret = -1; // errorcode set in initialize_autoincrement
}
}
@@ -1501,7 +1624,6 @@ NdbDictInterface::createTable(NdbApiSignal* signal, LinearSectionPtr ptr[3])
SimplePropertiesLinearReader r(ptr[0].p, ptr[0].sz);
r.printAll(ndbout);
#endif
-
const int noErrCodes = 2;
int errCodes[noErrCodes] =
{CreateTableRef::Busy,
@@ -1519,7 +1641,10 @@ void
NdbDictInterface::execCREATE_TABLE_CONF(NdbApiSignal * signal,
LinearSectionPtr ptr[3])
{
- //CreateTableConf* const conf = CAST_PTR(CreateTableConf, signal->getDataPtr());
+ const CreateTableConf* const conf=
+ CAST_CONSTPTR(CreateTableConf, signal->getDataPtr());
+ Uint32 tableId= conf->tableId;
+ Uint32 tableVersion= conf->tableVersion;
m_waiter.signal(NO_WAIT);
}
@@ -1528,7 +1653,8 @@ void
NdbDictInterface::execCREATE_TABLE_REF(NdbApiSignal * signal,
LinearSectionPtr ptr[3])
{
- const CreateTableRef* const ref = CAST_CONSTPTR(CreateTableRef, signal->getDataPtr());
+ const CreateTableRef* const ref=
+ CAST_CONSTPTR(CreateTableRef, signal->getDataPtr());
m_error.code = ref->errorCode;
m_masterNodeId = ref->masterNodeId;
m_waiter.signal(NO_WAIT);
@@ -1542,7 +1668,6 @@ NdbDictInterface::alterTable(NdbApiSignal* signal, LinearSectionPtr ptr[3])
SimplePropertiesLinearReader r(ptr[0].p, ptr[0].sz);
r.printAll(ndbout);
#endif
-
const int noErrCodes = 2;
int errCodes[noErrCodes] =
{AlterTableRef::NotMaster,
@@ -1585,6 +1710,8 @@ NdbDictInterface::execALTER_TABLE_REF(NdbApiSignal * signal,
int
NdbDictionaryImpl::dropTable(const char * name)
{
+ DBUG_ENTER("NdbDictionaryImpl::dropTable");
+ DBUG_PRINT("enter",("name: %s", name));
NdbTableImpl * tab = getTable(name);
if(tab == 0){
return -1;
@@ -1595,20 +1722,22 @@ NdbDictionaryImpl::dropTable(const char * name)
if (ret == INCOMPATIBLE_VERSION) {
const char * internalTableName = m_ndb.internalizeTableName(name);
+ DBUG_PRINT("info",("INCOMPATIBLE_VERSION internal_name: %s", internalTableName));
m_localHash.drop(internalTableName);
m_globalHash->lock();
m_globalHash->drop(tab);
m_globalHash->unlock();
- return dropTable(name);
+ DBUG_RETURN(dropTable(name));
}
- return ret;
+ DBUG_RETURN(ret);
}
int
NdbDictionaryImpl::dropTable(NdbTableImpl & impl)
{
+ int res;
const char * name = impl.getName();
if(impl.m_status == NdbDictionary::Object::New){
return dropTable(name);
@@ -1620,28 +1749,34 @@ NdbDictionaryImpl::dropTable(NdbTableImpl & impl)
}
List list;
- if (listIndexes(list, name) == -1)
+ if ((res = listIndexes(list, impl.m_tableId)) == -1){
return -1;
+ }
for (unsigned i = 0; i < list.count; i++) {
const List::Element& element = list.elements[i];
- if (dropIndex(element.name, name) == -1)
+ if ((res = dropIndex(element.name, name)) == -1)
+ {
return -1;
+ }
}
-
+
if (impl.m_noOfBlobs != 0) {
- if (dropBlobTables(impl) != 0)
+ if (dropBlobTables(impl) != 0){
return -1;
+ }
}
-
+
int ret = m_receiver.dropTable(impl);
- if(ret == 0){
+ if(ret == 0 || m_error.code == 709){
const char * internalTableName = impl.m_internalName.c_str();
-
+
m_localHash.drop(internalTableName);
m_globalHash->lock();
m_globalHash->drop(&impl);
m_globalHash->unlock();
+
+ return 0;
}
return ret;
@@ -1650,6 +1785,7 @@ NdbDictionaryImpl::dropTable(NdbTableImpl & impl)
int
NdbDictionaryImpl::dropBlobTables(NdbTableImpl & t)
{
+ DBUG_ENTER("NdbDictionaryImpl::dropBlobTables");
for (unsigned i = 0; i < t.m_columns.size(); i++) {
NdbColumnImpl & c = *t.m_columns[i];
if (! c.getBlobType() || c.getPartSize() == 0)
@@ -1657,11 +1793,14 @@ NdbDictionaryImpl::dropBlobTables(NdbTableImpl & t)
char btname[NdbBlob::BlobTableNameSize];
NdbBlob::getBlobTableName(btname, &t, &c);
if (dropTable(btname) != 0) {
- if (m_error.code != 709)
- return -1;
+ if (m_error.code != 709){
+ DBUG_PRINT("exit",("error %u - exiting",m_error.code));
+ DBUG_RETURN(-1);
+ }
+ DBUG_PRINT("info",("error %u - continuing",m_error.code));
}
}
- return 0;
+ DBUG_RETURN(0);
}
int
@@ -1751,11 +1890,13 @@ NdbIndexImpl*
NdbDictionaryImpl::getIndexImpl(const char * externalName,
const char * internalName)
{
- NdbTableImpl* tab = getTableImpl(internalName);
- if(tab == 0){
+ Ndb_local_table_info * info = get_local_table_info(internalName,
+ false);
+ if(info == 0){
m_error.code = 4243;
return 0;
}
+ NdbTableImpl * tab = info->m_table_impl;
if(tab->m_indexType == NdbDictionary::Index::Undefined){
// Not an index
@@ -1772,14 +1913,31 @@ NdbDictionaryImpl::getIndexImpl(const char * externalName,
/**
* Create index impl
*/
- NdbIndexImpl* idx = new NdbIndexImpl();
+ NdbIndexImpl* idx;
+ if(NdbDictInterface::create_index_obj_from_table(&idx, tab, prim) == 0){
+ idx->m_table = tab;
+ idx->m_externalName.assign(externalName);
+ idx->m_internalName.assign(internalName);
+ // TODO Assign idx to tab->m_index
+ // Don't do it right now since assign can't asign a table with index
+ // tab->m_index = idx;
+ return idx;
+ }
+ return 0;
+}
+
+int
+NdbDictInterface::create_index_obj_from_table(NdbIndexImpl** dst,
+ const NdbTableImpl* tab,
+ const NdbTableImpl* prim){
+ NdbIndexImpl *idx = new NdbIndexImpl();
idx->m_version = tab->m_version;
idx->m_status = tab->m_status;
idx->m_indexId = tab->m_tableId;
- idx->m_internalName.assign(internalName);
- idx->m_externalName.assign(externalName);
+ idx->m_externalName.assign(tab->getName());
idx->m_tableName.assign(prim->m_externalName);
idx->m_type = tab->m_indexType;
+ idx->m_logging = tab->m_logging;
// skip last attribute (NDB$PK or NDB$TNODE)
for(unsigned i = 0; i+1<tab->m_columns.size(); i++){
NdbColumnImpl* col = new NdbColumnImpl;
@@ -1795,12 +1953,9 @@ NdbDictionaryImpl::getIndexImpl(const char * externalName,
idx->m_key_ids[key_id] = i;
col->m_keyInfoPos = key_id;
}
-
- idx->m_table = tab;
- // TODO Assign idx to tab->m_index
- // Don't do it right now since assign can't asign a table with index
- // tab->m_index = idx;
- return idx;
+
+ * dst = idx;
+ return 0;
}
/*****************************************************************
@@ -1879,6 +2034,14 @@ NdbDictInterface::createIndex(Ndb & ndb,
m_error.code = 4245;
return -1;
}
+ // index key type check
+ if (it == DictTabInfo::UniqueHashIndex &&
+ ! NdbSqlUtil::usable_in_hash_index(col->m_type, col->m_cs) ||
+ it == DictTabInfo::OrderedIndex &&
+ ! NdbSqlUtil::usable_in_ordered_index(col->m_type, col->m_cs)) {
+ m_error.code = 743;
+ return -1;
+ }
attributeList.id[i] = col->m_attrId;
}
if (it == DictTabInfo::UniqueHashIndex) {
@@ -2004,7 +2167,6 @@ NdbDictionaryImpl::dropIndex(NdbIndexImpl & impl, const char * tableName)
m_globalHash->drop(impl.m_table);
m_globalHash->unlock();
}
-
return ret;
}
@@ -2688,14 +2850,11 @@ NdbDictionaryImpl::listObjects(List& list, NdbDictionary::Object::Type type)
}
int
-NdbDictionaryImpl::listIndexes(List& list, const char * tableName)
+NdbDictionaryImpl::listIndexes(List& list, Uint32 indexId)
{
ListTablesReq req;
- NdbTableImpl* impl = getTable(tableName);
- if (impl == 0)
- return -1;
req.requestData = 0;
- req.setTableId(impl->m_tableId);
+ req.setTableId(indexId);
req.setListNames(true);
req.setListIndexes(true);
return m_receiver.listObjects(list, req.requestData, m_ndb.usingFullyQualifiedNames());
diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/ndb/src/ndbapi/NdbDictionaryImpl.hpp
index 9a890f02575..12f0946ab67 100644
--- a/ndb/src/ndbapi/NdbDictionaryImpl.hpp
+++ b/ndb/src/ndbapi/NdbDictionaryImpl.hpp
@@ -52,7 +52,7 @@ public:
NdbColumnImpl(NdbDictionary::Column &); // This is not a copy constructor
~NdbColumnImpl();
NdbColumnImpl& operator=(const NdbColumnImpl&);
- void init();
+ void init(Type t = Unsigned);
int m_attrId;
BaseString m_name;
@@ -60,6 +60,7 @@ public:
int m_precision;
int m_scale;
int m_length;
+ CHARSET_INFO * m_cs; // not const in MySQL
bool m_pk;
bool m_tupleKey;
@@ -82,17 +83,19 @@ public:
Uint32 m_keyInfoPos;
Uint32 m_extType; // used by restore (kernel type in versin v2x)
bool getInterpretableType() const ;
+ bool getCharType() const;
bool getBlobType() const;
/**
* Equality/assign
*/
bool equal(const NdbColumnImpl&) const;
- void assign(const NdbColumnImpl&);
static NdbColumnImpl & getImpl(NdbDictionary::Column & t);
static const NdbColumnImpl & getImpl(const NdbDictionary::Column & t);
NdbDictionary::Column * m_facade;
+
+ static NdbDictionary::Column * create_psuedo(const char *);
};
class NdbTableImpl : public NdbDictionary::Table, public NdbDictObjectImpl {
@@ -240,7 +243,6 @@ public:
NdbDictInterface(NdbError& err) : m_error(err) {
m_reference = 0;
m_masterNodeId = 0;
- m_blockNumber = -1;
m_transporter= NULL;
}
~NdbDictInterface();
@@ -304,11 +306,14 @@ public:
const Uint32 * data, Uint32 len,
bool fullyQualifiedNames);
+ static int create_index_obj_from_table(NdbIndexImpl ** dst,
+ const NdbTableImpl*,
+ const NdbTableImpl*);
+
NdbError & m_error;
private:
Uint32 m_reference;
Uint32 m_masterNodeId;
- int m_blockNumber;
NdbWaiter m_waiter;
class TransporterFacade * m_transporter;
@@ -318,7 +323,7 @@ private:
class NdbApiSignal* signal,
class LinearSectionPtr ptr[3]);
- static void execNodeStatus(void* dictImpl, NodeId,
+ static void execNodeStatus(void* dictImpl, Uint32,
bool alive, bool nfCompleted);
void execGET_TABINFO_REF(NdbApiSignal *, LinearSectionPtr ptr[3]);
@@ -385,10 +390,11 @@ public:
int stopSubscribeEvent(NdbEventImpl &);
int listObjects(List& list, NdbDictionary::Object::Type type);
- int listIndexes(List& list, const char * tableName);
+ int listIndexes(List& list, Uint32 indexId);
- NdbTableImpl * getTable(const char * tableName);
- NdbTableImpl * getTableImpl(const char * internalName);
+ NdbTableImpl * getTable(const char * tableName, void **data= 0);
+ Ndb_local_table_info * get_local_table_info(const char * internalName,
+ bool do_add_blob_tables);
NdbIndexImpl * getIndex(const char * indexName,
const char * tableName);
NdbIndexImpl * getIndexImpl(const char * name, const char * internalName);
@@ -397,6 +403,7 @@ public:
const NdbError & getNdbError() const;
NdbError m_error;
+ Uint32 m_local_table_data_size;
LocalDictCache m_localHash;
GlobalDictCache * m_globalHash;
@@ -407,6 +414,8 @@ public:
NdbDictInterface m_receiver;
Ndb & m_ndb;
+private:
+ Ndb_local_table_info * fetchGlobalTableImpl(const char * internalName);
};
inline
@@ -442,6 +451,14 @@ NdbColumnImpl::getInterpretableType() const {
inline
bool
+NdbColumnImpl::getCharType() const {
+ return (m_type == NdbDictionary::Column::Char ||
+ m_type == NdbDictionary::Column::Varchar ||
+ m_type == NdbDictionary::Column::Text);
+}
+
+inline
+bool
NdbColumnImpl::getBlobType() const {
return (m_type == NdbDictionary::Column::Blob ||
m_type == NdbDictionary::Column::Text);
@@ -595,45 +612,37 @@ NdbDictionaryImpl::getImpl(const NdbDictionary::Dictionary & t){
inline
NdbTableImpl *
-NdbDictionaryImpl::getTable(const char * tableName)
+NdbDictionaryImpl::getTable(const char * tableName, void **data)
{
- const char * internalTableName = m_ndb.internalizeTableName(tableName);
-
- return getTableImpl(internalTableName);
+ Ndb_local_table_info *info=
+ get_local_table_info(m_ndb.internalizeTableName(tableName), true);
+ if (info == 0) {
+ return 0;
+ }
+ if (data) {
+ *data= info->m_local_data;
+ }
+ return info->m_table_impl;
}
inline
-NdbTableImpl *
-NdbDictionaryImpl::getTableImpl(const char * internalTableName)
+Ndb_local_table_info *
+NdbDictionaryImpl::get_local_table_info(const char * internalTableName,
+ bool do_add_blob_tables)
{
- NdbTableImpl *ret = m_localHash.get(internalTableName);
-
- if (ret != 0) {
- return ret; // autoincrement already initialized
- }
-
- m_globalHash->lock();
- ret = m_globalHash->get(internalTableName);
- m_globalHash->unlock();
-
- if (ret == 0){
- ret = m_receiver.getTable(internalTableName, m_ndb.usingFullyQualifiedNames());
- m_globalHash->lock();
- m_globalHash->put(internalTableName, ret);
- m_globalHash->unlock();
-
- if(ret == 0){
+ Ndb_local_table_info *info= m_localHash.get(internalTableName);
+ if (info == 0) {
+ info= fetchGlobalTableImpl(internalTableName);
+ if (info == 0) {
return 0;
}
}
- m_localHash.put(internalTableName, ret);
-
- m_ndb.theFirstTupleId[ret->getTableId()] = ~0;
- m_ndb.theLastTupleId[ret->getTableId()] = ~0;
-
- addBlobTables(*ret);
-
- return ret;
+ if (do_add_blob_tables &&
+ info->m_table_impl->m_noOfBlobs &&
+ addBlobTables(*(info->m_table_impl))) {
+ return 0;
+ }
+ return info; // autoincrement already initialized
}
inline
@@ -648,12 +657,14 @@ NdbDictionaryImpl::getIndex(const char * indexName,
if (t != 0)
internalIndexName = m_ndb.internalizeIndexName(t, indexName);
} else {
- internalIndexName = m_ndb.internalizeTableName(indexName); // Index is also a table
+ internalIndexName =
+ m_ndb.internalizeTableName(indexName); // Index is also a table
}
if (internalIndexName) {
- NdbTableImpl * tab = getTableImpl(internalIndexName);
-
- if (tab) {
+ Ndb_local_table_info * info = get_local_table_info(internalIndexName,
+ false);
+ if (info) {
+ NdbTableImpl * tab = info->m_table_impl;
if (tab->m_index == 0)
tab->m_index = getIndexImpl(indexName, internalIndexName);
if (tab->m_index != 0)
diff --git a/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/ndb/src/ndbapi/NdbEventOperationImpl.cpp
index f5e683b1c29..b3fac64d1c4 100644
--- a/ndb/src/ndbapi/NdbEventOperationImpl.cpp
+++ b/ndb/src/ndbapi/NdbEventOperationImpl.cpp
@@ -573,12 +573,8 @@ int NdbEventOperationImpl::wait(void *p, int aMillisecondNumber)
*
*/
+extern NdbMutex * ndb_global_event_buffer_mutex;
static NdbGlobalEventBuffer *ndbGlobalEventBuffer=NULL;
-#ifdef NDB_WIN32
-static NdbMutex & ndbGlobalEventBufferMutex = * NdbMutex_Create();
-#else
-static NdbMutex ndbGlobalEventBufferMutex = NDB_MUTEX_INITIALIZER;
-#endif
/*
* Class NdbGlobalEventBufferHandle
@@ -607,18 +603,18 @@ NdbGlobalEventBufferHandle::NdbGlobalEventBufferHandle
exit(-1);
}
- NdbMutex_Lock(&ndbGlobalEventBufferMutex);
+ NdbMutex_Lock(ndb_global_event_buffer_mutex);
if (ndbGlobalEventBuffer == NULL) {
if (ndbGlobalEventBuffer == NULL) {
ndbGlobalEventBuffer = new NdbGlobalEventBuffer();
if (!ndbGlobalEventBuffer) {
- NdbMutex_Unlock(&ndbGlobalEventBufferMutex);
+ NdbMutex_Unlock(ndb_global_event_buffer_mutex);
ndbout_c("NdbGlobalEventBufferHandle:: failed to allocate ndbGlobalEventBuffer");
exit(-1);
}
}
}
- NdbMutex_Unlock(&ndbGlobalEventBufferMutex);
+ NdbMutex_Unlock(ndb_global_event_buffer_mutex);
GUARD(real_init(this,MAX_NUMBER_ACTIVE_EVENTS));
}
@@ -631,12 +627,12 @@ NdbGlobalEventBufferHandle::~NdbGlobalEventBufferHandle()
ndbGlobalEventBuffer->real_remove(this);
ndbGlobalEventBuffer->unlock();
- NdbMutex_Lock(&ndbGlobalEventBufferMutex);
+ NdbMutex_Lock(ndb_global_event_buffer_mutex);
if (ndbGlobalEventBuffer->m_handlers.size() == 0) {
delete ndbGlobalEventBuffer;
ndbGlobalEventBuffer = NULL;
}
- NdbMutex_Unlock(&ndbGlobalEventBufferMutex);
+ NdbMutex_Unlock(ndb_global_event_buffer_mutex);
}
void
@@ -770,13 +766,13 @@ void
NdbGlobalEventBuffer::lock()
{
if (!m_group_lock_flag)
- NdbMutex_Lock(&ndbGlobalEventBufferMutex);
+ NdbMutex_Lock(ndb_global_event_buffer_mutex);
}
void
NdbGlobalEventBuffer::unlock()
{
if (!m_group_lock_flag)
- NdbMutex_Unlock(&ndbGlobalEventBufferMutex);
+ NdbMutex_Unlock(ndb_global_event_buffer_mutex);
}
void
NdbGlobalEventBuffer::add_drop_lock()
@@ -1232,7 +1228,8 @@ NdbGlobalEventBuffer::real_wait(NdbGlobalEventBufferHandle *h,
n += hasData(h->m_bufferIds[i]);
if (n) return n;
- int r = NdbCondition_WaitTimeout(h->p_cond, &ndbGlobalEventBufferMutex, aMillisecondNumber);
+ int r = NdbCondition_WaitTimeout(h->p_cond, ndb_global_event_buffer_mutex,
+ aMillisecondNumber);
if (r > 0)
return -1;
diff --git a/ndb/src/ndbapi/NdbIndexOperation.cpp b/ndb/src/ndbapi/NdbIndexOperation.cpp
index 7bea3b9f3d2..9abde639914 100644
--- a/ndb/src/ndbapi/NdbIndexOperation.cpp
+++ b/ndb/src/ndbapi/NdbIndexOperation.cpp
@@ -54,8 +54,8 @@ NdbIndexOperation::~NdbIndexOperation()
* Remark: Initiates operation record after allocation.
*****************************************************************************/
int
-NdbIndexOperation::indxInit(NdbIndexImpl * anIndex,
- NdbTableImpl * aTable,
+NdbIndexOperation::indxInit(const NdbIndexImpl * anIndex,
+ const NdbTableImpl * aTable,
NdbConnection* myConnection)
{
NdbOperation::init(aTable, myConnection);
@@ -85,6 +85,23 @@ NdbIndexOperation::indxInit(NdbIndexImpl * anIndex,
return 0;
}
+int NdbIndexOperation::readTuple(NdbOperation::LockMode lm)
+{
+ switch(lm) {
+ case LM_Read:
+ return readTuple();
+ break;
+ case LM_Exclusive:
+ return readTupleExclusive();
+ break;
+ case LM_CommittedRead:
+ return readTuple();
+ break;
+ default:
+ return -1;
+ };
+}
+
int NdbIndexOperation::readTuple()
{
// First check that index is unique
@@ -103,21 +120,21 @@ int NdbIndexOperation::simpleRead()
{
// First check that index is unique
- return NdbOperation::simpleRead();
+ return NdbOperation::readTuple();
}
int NdbIndexOperation::dirtyRead()
{
// First check that index is unique
- return NdbOperation::dirtyRead();
+ return NdbOperation::readTuple();
}
int NdbIndexOperation::committedRead()
{
// First check that index is unique
- return NdbOperation::committedRead();
+ return NdbOperation::readTuple();
}
int NdbIndexOperation::updateTuple()
@@ -164,6 +181,7 @@ int NdbIndexOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
Uint32 tData;
Uint32 tKeyInfoPosition;
const char* aValue = aValuePassed;
+ Uint32 xfrmData[1024];
Uint32 tempData[1024];
if ((theStatus == OperationDefined) &&
@@ -224,6 +242,39 @@ int NdbIndexOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
m_theIndexDefined[i][2] = true;
Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize;
+ {
+ /*************************************************************************
+ * Check if the pointer of the value passed is aligned on a 4 byte
+ * boundary. If so only assign the pointer to the internal variable
+ * aValue. If it is not aligned then we start by copying the value to
+ * tempData and use this as aValue instead.
+ *************************************************************************/
+ const int attributeSize = sizeInBytes;
+ const int slack = sizeInBytes & 3;
+ if ((((UintPtr)aValue & 3) != 0) || (slack != 0)){
+ memcpy(&tempData[0], aValue, attributeSize);
+ aValue = (char*)&tempData[0];
+ if(slack != 0) {
+ char * tmp = (char*)&tempData[0];
+ memset(&tmp[attributeSize], 0, (4 - slack));
+ }//if
+ }//if
+ }
+ const char* aValueToWrite = aValue;
+
+ CHARSET_INFO* cs = tAttrInfo->m_cs;
+ if (cs != 0) {
+ // current limitation: strxfrm does not increase length
+ assert(cs->strxfrm_multiply == 1);
+ unsigned n =
+ (*cs->coll->strnxfrm)(cs,
+ (uchar*)xfrmData, sizeof(xfrmData),
+ (const uchar*)aValue, sizeInBytes);
+ while (n < sizeInBytes)
+ ((uchar*)xfrmData)[n++] = 0x20;
+ aValue = (char*)xfrmData;
+ }
+
Uint32 bitsInLastWord = 8 * (sizeInBytes & 3) ;
Uint32 totalSizeInWords = (sizeInBytes + 3)/4;// Inc. bits in last word
Uint32 sizeInWords = sizeInBytes / 4; // Exc. bits in last word
@@ -261,25 +312,8 @@ int NdbIndexOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
m_theIndexLen = m_theIndexLen + tAttrLenInWords;
}//if
#endif
-
- /*************************************************************************
- * Check if the pointer of the value passed is aligned on a 4 byte
- * boundary. If so only assign the pointer to the internal variable
- * aValue. If it is not aligned then we start by copying the value to
- * tempData and use this as aValue instead.
- *************************************************************************/
- const int attributeSize = sizeInBytes;
- const int slack = sizeInBytes & 3;
int tDistrKey = tAttrInfo->m_distributionKey;
int tDistrGroup = tAttrInfo->m_distributionGroup;
- if ((((UintPtr)aValue & 3) != 0) || (slack != 0)){
- memcpy(&tempData[0], aValue, attributeSize);
- aValue = (char*)&tempData[0];
- if(slack != 0) {
- char * tmp = (char*)&tempData[0];
- memset(&tmp[attributeSize], 0, (4 - slack));
- }//if
- }//if
OperationType tOpType = theOperationType;
if ((tDistrKey != 1) && (tDistrGroup != 1)) {
;
@@ -314,13 +348,20 @@ int NdbIndexOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
if ((tOpType == InsertRequest) ||
(tOpType == WriteRequest)) {
if (!tAttrInfo->m_indexOnly){
+ // invalid data can crash kernel
+ if (cs != NULL &&
+ (*cs->cset->well_formed_len)(cs,
+ aValueToWrite,
+ aValueToWrite + sizeInBytes,
+ sizeInBytes) != sizeInBytes)
+ goto equal_error4;
Uint32 ahValue;
Uint32 sz = totalSizeInWords;
AttributeHeader::init(&ahValue, tAttrId, sz);
insertATTRINFO( ahValue );
- insertATTRINFOloop((Uint32*)aValue, sizeInWords);
+ insertATTRINFOloop((Uint32*)aValueToWrite, sizeInWords);
if (bitsInLastWord != 0) {
- tData = *(Uint32*)(aValue + (sizeInWords << 2));
+ tData = *(Uint32*)(aValueToWrite + (sizeInWords << 2));
tData = convertEndian(tData);
tData = tData & ((1 << bitsInLastWord) - 1);
tData = convertEndian(tData);
@@ -411,7 +452,10 @@ int NdbIndexOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
equal_error3:
setErrorCodeAbort(4209);
-
+ return -1;
+
+ equal_error4:
+ setErrorCodeAbort(744);
return -1;
}
@@ -505,7 +549,7 @@ NdbIndexOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransactionId)
//-------------------------------------------------------------
Uint8 tReadInd = (theOperationType == ReadRequest);
Uint8 tSimpleState = tReadInd & tSimpleAlt;
- theNdbCon->theSimpleState = tSimpleState;
+ //theNdbCon->theSimpleState = tSimpleState;
tcIndxReq->transId1 = tTransId1;
tcIndxReq->transId2 = tTransId2;
@@ -692,23 +736,10 @@ NdbIndexOperation::receiveTCINDXREF( NdbApiSignal* aSignal)
theStatus = Finished;
theNdbCon->theReturnStatus = NdbConnection::ReturnFailure;
- //--------------------------------------------------------------------------//
- // If the transaction this operation belongs to consists only of simple reads
- // we set the error code on the transaction object.
- // If the transaction consists of other types of operations we set
- // the error code only on the operation since the simple read is not really
- // part of this transaction and we can not decide the status of the whole
- // transaction based on this operation.
- //--------------------------------------------------------------------------//
Uint32 errorCode = tcIndxRef->errorCode;
- if (theNdbCon->theSimpleState == 0) {
- theError.code = errorCode;
- theNdbCon->setOperationErrorCodeAbort(errorCode);
- return theNdbCon->OpCompleteFailure();
- } else {
- theError.code = errorCode;
- return theNdbCon->OpCompleteSuccess();
- }
+ theError.code = errorCode;
+ theNdbCon->setOperationErrorCodeAbort(errorCode);
+ return theNdbCon->OpCompleteFailure(theNdbCon->m_abortOption);
}//NdbIndexOperation::receiveTCINDXREF()
diff --git a/ndb/src/ndbapi/NdbLinHash.hpp b/ndb/src/ndbapi/NdbLinHash.hpp
index 5d0d52a31d8..f245a261a04 100644
--- a/ndb/src/ndbapi/NdbLinHash.hpp
+++ b/ndb/src/ndbapi/NdbLinHash.hpp
@@ -59,7 +59,7 @@ public:
void releaseHashTable(void);
int insertKey(const char * str, Uint32 len, Uint32 lkey1, C* data);
- int deleteKey(const char * str, Uint32 len);
+ C *deleteKey(const char * str, Uint32 len);
C* getData(const char *, Uint32);
Uint32* getKey(const char *, Uint32);
@@ -277,7 +277,7 @@ NdbLinHash<C>::getData( const char* str, Uint32 len ){
template <class C>
inline
-int
+C *
NdbLinHash<C>::deleteKey ( const char* str, Uint32 len){
const Uint32 hash = Hash(str, len);
int dir, seg;
@@ -287,20 +287,19 @@ NdbLinHash<C>::deleteKey ( const char* str, Uint32 len){
NdbElement_t<C> **chainp = &directory[dir]->elements[seg];
for(NdbElement_t<C> * chain = *chainp; chain != 0; chain = chain->next){
if(chain->len == len && !memcmp(chain->str, str, len)){
+ C *data= chain->theData;
if (oldChain == 0) {
- delete chain;
- * chainp = 0;
- return 1;
+ * chainp = chain->next;
} else {
oldChain->next = chain->next;
- delete chain;
- return 1;
}
+ delete chain;
+ return data;
} else {
oldChain = chain;
}
}
- return -1; /* Element doesn't exist */
+ return 0; /* Element doesn't exist */
}
template <class C>
diff --git a/ndb/src/ndbapi/NdbOperation.cpp b/ndb/src/ndbapi/NdbOperation.cpp
index 18a7d1d1c80..b0b95d0ff43 100644
--- a/ndb/src/ndbapi/NdbOperation.cpp
+++ b/ndb/src/ndbapi/NdbOperation.cpp
@@ -78,7 +78,6 @@ NdbOperation::NdbOperation(Ndb* aNdb) :
m_tcReqGSN(GSN_TCKEYREQ),
m_keyInfoGSN(GSN_KEYINFO),
m_attrInfoGSN(GSN_ATTRINFO),
- theBoundATTRINFO(NULL),
theBlobList(NULL)
{
theReceiver.init(NdbReceiver::NDB_OPERATION, this);
@@ -131,7 +130,7 @@ NdbOperation::setErrorCodeAbort(int anErrorCode)
*****************************************************************************/
int
-NdbOperation::init(NdbTableImpl* tab, NdbConnection* myConnection){
+NdbOperation::init(const NdbTableImpl* tab, NdbConnection* myConnection){
NdbApiSignal* tSignal;
theStatus = Init;
theError.code = 0;
@@ -167,7 +166,6 @@ NdbOperation::init(NdbTableImpl* tab, NdbConnection* myConnection){
theScanInfo = 0;
theTotalNrOfKeyWordInSignal = 8;
theMagicNumber = 0xABCDEF01;
- theBoundATTRINFO = NULL;
theBlobList = NULL;
tSignal = theNdb->getSignal();
@@ -263,14 +261,6 @@ NdbOperation::release()
tSubroutine = tSubroutine->theNext;
theNdb->releaseNdbSubroutine(tSaveSubroutine);
}
- tSignal = theBoundATTRINFO;
- while (tSignal != NULL)
- {
- tSaveSignal = tSignal;
- tSignal = tSignal->next();
- theNdb->releaseSignal(tSaveSignal);
- }
- theBoundATTRINFO = NULL;
}
tBlob = theBlobList;
while (tBlob != NULL)
@@ -295,6 +285,12 @@ NdbOperation::getValue(Uint32 anAttrId, char* aValue)
return getValue_impl(m_currentTable->getColumn(anAttrId), aValue);
}
+NdbRecAttr*
+NdbOperation::getValue(const NdbDictionary::Column* col, char* aValue)
+{
+ return getValue_impl(&NdbColumnImpl::getImpl(*col), aValue);
+}
+
int
NdbOperation::equal(const char* anAttrName,
const char* aValuePassed,
diff --git a/ndb/src/ndbapi/NdbOperationDefine.cpp b/ndb/src/ndbapi/NdbOperationDefine.cpp
index 08ed6e84271..35abb15b00d 100644
--- a/ndb/src/ndbapi/NdbOperationDefine.cpp
+++ b/ndb/src/ndbapi/NdbOperationDefine.cpp
@@ -55,6 +55,7 @@ NdbOperation::insertTuple()
theOperationType = InsertRequest;
tNdbCon->theSimpleState = 0;
theErrorLine = tErrorLine++;
+ theLockMode = LM_Exclusive;
return 0;
} else {
setErrorCode(4200);
@@ -74,6 +75,7 @@ NdbOperation::updateTuple()
tNdbCon->theSimpleState = 0;
theOperationType = UpdateRequest;
theErrorLine = tErrorLine++;
+ theLockMode = LM_Exclusive;
return 0;
} else {
setErrorCode(4200);
@@ -93,6 +95,7 @@ NdbOperation::writeTuple()
tNdbCon->theSimpleState = 0;
theOperationType = WriteRequest;
theErrorLine = tErrorLine++;
+ theLockMode = LM_Exclusive;
return 0;
} else {
setErrorCode(4200);
@@ -103,6 +106,26 @@ NdbOperation::writeTuple()
* int readTuple();
*****************************************************************************/
int
+NdbOperation::readTuple(NdbOperation::LockMode lm)
+{
+ switch(lm) {
+ case LM_Read:
+ return readTuple();
+ break;
+ case LM_Exclusive:
+ return readTupleExclusive();
+ break;
+ case LM_CommittedRead:
+ return committedRead();
+ break;
+ default:
+ return -1;
+ };
+}
+/******************************************************************************
+ * int readTuple();
+ *****************************************************************************/
+int
NdbOperation::readTuple()
{
NdbConnection* tNdbCon = theNdbCon;
@@ -112,6 +135,7 @@ NdbOperation::readTuple()
tNdbCon->theSimpleState = 0;
theOperationType = ReadRequest;
theErrorLine = tErrorLine++;
+ theLockMode = LM_Read;
return 0;
} else {
setErrorCode(4200);
@@ -132,6 +156,7 @@ NdbOperation::deleteTuple()
tNdbCon->theSimpleState = 0;
theOperationType = DeleteRequest;
theErrorLine = tErrorLine++;
+ theLockMode = LM_Exclusive;
return 0;
} else {
setErrorCode(4200);
@@ -152,6 +177,7 @@ NdbOperation::readTupleExclusive()
tNdbCon->theSimpleState = 0;
theOperationType = ReadExclusive;
theErrorLine = tErrorLine++;
+ theLockMode = LM_Exclusive;
return 0;
} else {
setErrorCode(4200);
@@ -165,17 +191,24 @@ NdbOperation::readTupleExclusive()
int
NdbOperation::simpleRead()
{
+ /**
+ * Currently/still disabled
+ */
+ return readTuple();
+#if 0
int tErrorLine = theErrorLine;
if (theStatus == Init) {
theStatus = OperationDefined;
theOperationType = ReadRequest;
theSimpleIndicator = 1;
theErrorLine = tErrorLine++;
+ theLockMode = LM_Read;
return 0;
} else {
setErrorCode(4200);
return -1;
}//if
+#endif
}//NdbOperation::simpleRead()
/*****************************************************************************
@@ -200,6 +233,7 @@ NdbOperation::committedRead()
theSimpleIndicator = 1;
theDirtyIndicator = 1;
theErrorLine = tErrorLine++;
+ theLockMode = LM_CommittedRead;
return 0;
} else {
setErrorCode(4200);
@@ -222,6 +256,7 @@ NdbOperation::dirtyUpdate()
theSimpleIndicator = 1;
theDirtyIndicator = 1;
theErrorLine = tErrorLine++;
+ theLockMode = LM_CommittedRead;
return 0;
} else {
setErrorCode(4200);
@@ -244,6 +279,7 @@ NdbOperation::dirtyWrite()
theSimpleIndicator = 1;
theDirtyIndicator = 1;
theErrorLine = tErrorLine++;
+ theLockMode = LM_CommittedRead;
return 0;
} else {
setErrorCode(4200);
@@ -264,7 +300,7 @@ NdbOperation::interpretedUpdateTuple()
tNdbCon->theSimpleState = 0;
theOperationType = UpdateRequest;
theAI_LenInCurrAI = 25;
-
+ theLockMode = LM_Exclusive;
theErrorLine = tErrorLine++;
initInterpreter();
return 0;
@@ -289,7 +325,7 @@ NdbOperation::interpretedDeleteTuple()
theErrorLine = tErrorLine++;
theAI_LenInCurrAI = 25;
-
+ theLockMode = LM_Exclusive;
initInterpreter();
return 0;
} else {
@@ -316,16 +352,12 @@ NdbOperation::getValue_impl(const NdbColumnImpl* tAttrInfo, char* aValue)
if ((tAttrInfo != NULL) &&
(!tAttrInfo->m_indexOnly) &&
(theStatus != Init)){
- if (theStatus == SetBound) {
- ((NdbIndexScanOperation*)this)->saveBoundATTRINFO();
- theStatus = GetValue;
- }
if (theStatus != GetValue) {
if (theInterpretIndicator == 1) {
if (theStatus == FinalGetValue) {
; // Simply continue with getValue
} else if (theStatus == ExecInterpretedValue) {
- if (insertATTRINFO(Interpreter::EXIT_OK_LAST) == -1)
+ if (insertATTRINFO(Interpreter::EXIT_OK) == -1)
return NULL;
theInterpretedSize = theTotalCurrAI_Len -
(theInitialReadSize + 5);
@@ -415,7 +447,7 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo,
// We insert an exit from interpretation since we are now starting
// to set values in the tuple by setValue.
//--------------------------------------------------------------------
- if (insertATTRINFO(Interpreter::EXIT_OK_LAST) == -1){
+ if (insertATTRINFO(Interpreter::EXIT_OK) == -1){
return -1;
}
theInterpretedSize = theTotalCurrAI_Len -
@@ -492,6 +524,17 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo,
// Insert Attribute Id into ATTRINFO part.
const Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize;
+
+ CHARSET_INFO* cs = tAttrInfo->m_cs;
+ // invalid data can crash kernel
+ if (cs != NULL &&
+ (*cs->cset->well_formed_len)(cs,
+ aValue,
+ aValue + sizeInBytes,
+ sizeInBytes) != sizeInBytes) {
+ setErrorCodeAbort(744);
+ return -1;
+ }
#if 0
tAttrSize = tAttrInfo->theAttrSize;
tArraySize = tAttrInfo->theArraySize;
diff --git a/ndb/src/ndbapi/NdbOperationExec.cpp b/ndb/src/ndbapi/NdbOperationExec.cpp
index 7ee76bf2f3e..f1338ae01e4 100644
--- a/ndb/src/ndbapi/NdbOperationExec.cpp
+++ b/ndb/src/ndbapi/NdbOperationExec.cpp
@@ -161,28 +161,17 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId)
tTransId1 = (Uint32) aTransId;
tTransId2 = (Uint32) (aTransId >> 32);
-//-------------------------------------------------------------
-// Simple is simple if simple or both start and commit is set.
-//-------------------------------------------------------------
-// Temporarily disable simple stuff
- Uint8 tSimpleIndicator = 0;
-// Uint8 tSimpleIndicator = theSimpleIndicator;
+ Uint8 tSimpleIndicator = theSimpleIndicator;
Uint8 tCommitIndicator = theCommitIndicator;
Uint8 tStartIndicator = theStartIndicator;
-// if ((theNdbCon->theLastOpInList == this) && (theCommitIndicator == 0))
-// abort();
-// Temporarily disable simple stuff
- Uint8 tSimpleAlt = 0;
-// Uint8 tSimpleAlt = tStartIndicator & tCommitIndicator;
- tSimpleIndicator = tSimpleIndicator | tSimpleAlt;
+ Uint8 tInterpretIndicator = theInterpretIndicator;
//-------------------------------------------------------------
// Simple state is set if start and commit is set and it is
// a read request. Otherwise it is set to zero.
//-------------------------------------------------------------
Uint8 tReadInd = (theOperationType == ReadRequest);
- Uint8 tSimpleState = tReadInd & tSimpleAlt;
- theNdbCon->theSimpleState = tSimpleState;
+ Uint8 tSimpleState = tReadInd & tSimpleIndicator;
tcKeyReq->transId1 = tTransId1;
tcKeyReq->transId2 = tTransId2;
@@ -197,7 +186,6 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId)
tcKeyReq->setSimpleFlag(tReqInfo, tSimpleIndicator);
tcKeyReq->setCommitFlag(tReqInfo, tCommitIndicator);
tcKeyReq->setStartFlag(tReqInfo, tStartIndicator);
- const Uint8 tInterpretIndicator = theInterpretIndicator;
tcKeyReq->setInterpretedFlag(tReqInfo, tInterpretIndicator);
Uint8 tDirtyIndicator = theDirtyIndicator;
@@ -208,6 +196,9 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId)
tcKeyReq->setDirtyFlag(tReqInfo, tDirtyIndicator);
tcKeyReq->setOperationType(tReqInfo, tOperationType);
tcKeyReq->setKeyLength(tReqInfo, tTupKeyLen);
+
+ // A simple read is always ignore error
+ abortOption = tSimpleIndicator ? IgnoreError : abortOption;
tcKeyReq->setAbortOption(tReqInfo, abortOption);
Uint8 tDistrKeyIndicator = theDistrKeyIndicator;
@@ -354,7 +345,7 @@ NdbOperation::prepareSendInterpreted()
Uint32 tTotalCurrAI_Len = theTotalCurrAI_Len;
Uint32 tInitReadSize = theInitialReadSize;
if (theStatus == ExecInterpretedValue) {
- if (insertATTRINFO(Interpreter::EXIT_OK_LAST) != -1) {
+ if (insertATTRINFO(Interpreter::EXIT_OK) != -1) {
//-------------------------------------------------------------------------
// Since we read the total length before inserting the last entry in the
// signals we need to add one to the total length.
@@ -550,27 +541,29 @@ NdbOperation::receiveTCKEYREF( NdbApiSignal* aSignal)
return -1;
}//if
+ AbortOption ao = (AbortOption)theNdbCon->m_abortOption;
+ theReceiver.m_received_result_length = ~0;
+
theStatus = Finished;
-
theNdbCon->theReturnStatus = NdbConnection::ReturnFailure;
- //-------------------------------------------------------------------------//
- // If the transaction this operation belongs to consists only of simple reads
- // we set the error code on the transaction object.
- // If the transaction consists of other types of operations we set
- // the error code only on the operation since the simple read is not really
- // part of this transaction and we can not decide the status of the whole
- // transaction based on this operation.
- //-------------------------------------------------------------------------//
- if (theNdbCon->theSimpleState == 0) {
- theError.code = aSignal->readData(4);
- theNdbCon->setOperationErrorCodeAbort(aSignal->readData(4));
- return theNdbCon->OpCompleteFailure();
- } else {
- theError.code = aSignal->readData(4);
- return theNdbCon->OpCompleteSuccess();
+
+ theError.code = aSignal->readData(4);
+ theNdbCon->setOperationErrorCodeAbort(aSignal->readData(4));
+
+ if(theOperationType != ReadRequest || !theSimpleIndicator) // not simple read
+ return theNdbCon->OpCompleteFailure(ao);
+
+ /**
+ * If TCKEYCONF has arrived
+ * op has completed (maybe trans has completed)
+ */
+ if(theReceiver.m_expected_result_length)
+ {
+ return theNdbCon->OpCompleteFailure(AbortOnError);
}
-}//NdbOperation::receiveTCKEYREF()
+ return -1;
+}
void
diff --git a/ndb/src/ndbapi/NdbOperationInt.cpp b/ndb/src/ndbapi/NdbOperationInt.cpp
index 3a7e0dda85e..ee7b8132cd1 100644
--- a/ndb/src/ndbapi/NdbOperationInt.cpp
+++ b/ndb/src/ndbapi/NdbOperationInt.cpp
@@ -216,10 +216,6 @@ int
NdbOperation::initial_interpreterCheck()
{
if ((theInterpretIndicator == 1)) {
- if (theStatus == SetBound) {
- ((NdbIndexScanOperation*)this)->saveBoundATTRINFO();
- theStatus = GetValue;
- }
if (theStatus == ExecInterpretedValue) {
return 0; // Simply continue with interpretation
} else if (theStatus == GetValue) {
@@ -732,6 +728,9 @@ int
NdbOperation::read_attr(const NdbColumnImpl* anAttrObject, Uint32 RegDest)
{
INT_DEBUG(("read_attr %d %u", anAttrObject->m_attrId, RegDest));
+ if (initial_interpreterCheck() == -1)
+ return -1;
+
int tAttrId = read_attrCheck(anAttrObject);
if (tAttrId == -1)
goto read_attr_error1;
@@ -888,6 +887,18 @@ NdbOperation::interpret_exit_ok()
return 0;
}
+int
+NdbOperation::interpret_exit_last_row()
+{
+ INT_DEBUG(("interpret_exit_last_row"));
+ if (initial_interpreterCheck() == -1)
+ return -1;
+ if (insertATTRINFO(Interpreter::EXIT_OK_LAST) == -1)
+ return -1;
+ theErrorLine++;
+ return 0;
+}
+
/************************************************************************************************
int NdbOperation::interpret_exit_nok(Uint32 ErrorCode)
diff --git a/ndb/src/ndbapi/NdbOperationSearch.cpp b/ndb/src/ndbapi/NdbOperationSearch.cpp
index 19cb133dbf7..69b4e803acd 100644
--- a/ndb/src/ndbapi/NdbOperationSearch.cpp
+++ b/ndb/src/ndbapi/NdbOperationSearch.cpp
@@ -60,6 +60,7 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
Uint32 tData;
Uint32 tKeyInfoPosition;
const char* aValue = aValuePassed;
+ Uint32 xfrmData[1024];
Uint32 tempData[1024];
if ((theStatus == OperationDefined) &&
@@ -117,6 +118,40 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
theTupleKeyDefined[i][2] = true;
Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize;
+ {
+ /***************************************************************************
+ * Check if the pointer of the value passed is aligned on a 4 byte
+ * boundary. If so only assign the pointer to the internal variable
+ * aValue. If it is not aligned then we start by copying the value to
+ * tempData and use this as aValue instead.
+ *****************************************************************************/
+ const int attributeSize = sizeInBytes;
+ const int slack = sizeInBytes & 3;
+
+ if ((((UintPtr)aValue & 3) != 0) || (slack != 0)){
+ memcpy(&tempData[0], aValue, attributeSize);
+ aValue = (char*)&tempData[0];
+ if(slack != 0) {
+ char * tmp = (char*)&tempData[0];
+ memset(&tmp[attributeSize], 0, (4 - slack));
+ }//if
+ }//if
+ }
+ const char* aValueToWrite = aValue;
+
+ CHARSET_INFO* cs = tAttrInfo->m_cs;
+ if (cs != 0) {
+ // current limitation: strxfrm does not increase length
+ assert(cs->strxfrm_multiply == 1);
+ unsigned n =
+ (*cs->coll->strnxfrm)(cs,
+ (uchar*)xfrmData, sizeof(xfrmData),
+ (const uchar*)aValue, sizeInBytes);
+ while (n < sizeInBytes)
+ ((uchar*)xfrmData)[n++] = 0x20;
+ aValue = (char*)xfrmData;
+ }
+
Uint32 bitsInLastWord = 8 * (sizeInBytes & 3) ;
Uint32 totalSizeInWords = (sizeInBytes + 3)/4; // Inc. bits in last word
Uint32 sizeInWords = sizeInBytes / 4; // Exc. bits in last word
@@ -154,24 +189,9 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
theTupKeyLen = theTupKeyLen + tAttrLenInWords;
}//if
#endif
- /***************************************************************************
- * Check if the pointer of the value passed is aligned on a 4 byte
- * boundary. If so only assign the pointer to the internal variable
- * aValue. If it is not aligned then we start by copying the value to
- * tempData and use this as aValue instead.
- *****************************************************************************/
- const int attributeSize = sizeInBytes;
- const int slack = sizeInBytes & 3;
+
int tDistrKey = tAttrInfo->m_distributionKey;
int tDistrGroup = tAttrInfo->m_distributionGroup;
- if ((((UintPtr)aValue & 3) != 0) || (slack != 0)){
- memcpy(&tempData[0], aValue, attributeSize);
- aValue = (char*)&tempData[0];
- if(slack != 0) {
- char * tmp = (char*)&tempData[0];
- memset(&tmp[attributeSize], 0, (4 - slack));
- }//if
- }//if
OperationType tOpType = theOperationType;
if ((tDistrKey != 1) && (tDistrGroup != 1)) {
;
@@ -206,13 +226,20 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
if ((tOpType == InsertRequest) ||
(tOpType == WriteRequest)) {
if (!tAttrInfo->m_indexOnly){
+ // invalid data can crash kernel
+ if (cs != NULL &&
+ (*cs->cset->well_formed_len)(cs,
+ aValueToWrite,
+ aValueToWrite + sizeInBytes,
+ sizeInBytes) != sizeInBytes)
+ goto equal_error4;
Uint32 ahValue;
const Uint32 sz = totalSizeInWords;
AttributeHeader::init(&ahValue, tAttrId, sz);
insertATTRINFO( ahValue );
- insertATTRINFOloop((Uint32*)aValue, sizeInWords);
+ insertATTRINFOloop((Uint32*)aValueToWrite, sizeInWords);
if (bitsInLastWord != 0) {
- tData = *(Uint32*)(aValue + (sizeInWords << 2));
+ tData = *(Uint32*)(aValueToWrite + (sizeInWords << 2));
tData = convertEndian(tData);
tData = tData & ((1 << bitsInLastWord) - 1);
tData = convertEndian(tData);
@@ -311,6 +338,10 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
equal_error3:
setErrorCodeAbort(4209);
return -1;
+
+ equal_error4:
+ setErrorCodeAbort(744);
+ return -1;
}
/******************************************************************************
@@ -516,7 +547,8 @@ NdbOperation::getKeyFromTCREQ(Uint32* data, unsigned size)
assert(m_accessTable->m_sizeOfKeysInWords == size);
unsigned pos = 0;
while (pos < 8 && pos < size) {
- data[pos++] = theKEYINFOptr[pos];
+ data[pos] = theKEYINFOptr[pos];
+ pos++;
}
NdbApiSignal* tSignal = theFirstKEYINFO;
unsigned n = 0;
diff --git a/ndb/src/ndbapi/NdbRecAttr.cpp b/ndb/src/ndbapi/NdbRecAttr.cpp
index 2e753f13006..bcd91292fcd 100644
--- a/ndb/src/ndbapi/NdbRecAttr.cpp
+++ b/ndb/src/ndbapi/NdbRecAttr.cpp
@@ -230,7 +230,7 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r)
}
break;
default: /* no print functions for the rest, just print type */
- out << r.getType();
+ out << (int) r.getType();
j = r.arraySize();
if (j > 1)
out << " " << j << " times";
diff --git a/ndb/src/ndbapi/NdbReceiver.cpp b/ndb/src/ndbapi/NdbReceiver.cpp
index bdb5e6c7e78..14f8d4b8440 100644
--- a/ndb/src/ndbapi/NdbReceiver.cpp
+++ b/ndb/src/ndbapi/NdbReceiver.cpp
@@ -14,12 +14,15 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#include <ndb_global.h>
#include "NdbImpl.hpp"
#include <NdbReceiver.hpp>
#include "NdbDictionaryImpl.hpp"
#include <NdbRecAttr.hpp>
#include <AttributeHeader.hpp>
#include <NdbConnection.hpp>
+#include <TransporterFacade.hpp>
+#include <signaldata/TcKeyConf.hpp>
NdbReceiver::NdbReceiver(Ndb *aNdb) :
theMagicNumber(0),
@@ -35,10 +38,12 @@ NdbReceiver::NdbReceiver(Ndb *aNdb) :
NdbReceiver::~NdbReceiver()
{
+ DBUG_ENTER("NdbReceiver::~NdbReceiver");
if (m_id != NdbObjectIdMap::InvalidId) {
m_ndb->theNdbObjectIdMap->unmap(m_id, this);
}
delete[] m_rows;
+ DBUG_VOID_RETURN;
}
void
@@ -87,7 +92,52 @@ NdbReceiver::getValue(const NdbColumnImpl* tAttrInfo, char * user_dst_ptr){
return 0;
}
-#define KEY_ATTR_ID (~0)
+#define KEY_ATTR_ID (~(Uint32)0)
+
+void
+NdbReceiver::calculate_batch_size(Uint32 key_size,
+ Uint32 parallelism,
+ Uint32& batch_size,
+ Uint32& batch_byte_size,
+ Uint32& first_batch_size)
+{
+ TransporterFacade *tp= TransporterFacade::instance();
+ Uint32 max_scan_batch_size= tp->get_scan_batch_size();
+ Uint32 max_batch_byte_size= tp->get_batch_byte_size();
+ Uint32 max_batch_size= tp->get_batch_size();
+ Uint32 tot_size= (key_size ? (key_size + 32) : 0); //key + signal overhead
+ NdbRecAttr *rec_attr= theFirstRecAttr;
+ while (rec_attr != NULL) {
+ Uint32 attr_size= rec_attr->attrSize() * rec_attr->arraySize();
+ attr_size= ((attr_size + 7) >> 2) << 2; //Even to word + overhead
+ tot_size+= attr_size;
+ rec_attr= rec_attr->next();
+ }
+ tot_size+= 32; //include signal overhead
+
+ /**
+ * Now we calculate the batch size by trying to get upto SCAN_BATCH_SIZE
+ * bytes sent for each batch from each node. We do however ensure that
+ * no more than MAX_SCAN_BATCH_SIZE is sent from all nodes in total per
+ * batch.
+ */
+ batch_byte_size= max_batch_byte_size;
+ if (batch_byte_size * parallelism > max_scan_batch_size) {
+ batch_byte_size= max_scan_batch_size / parallelism;
+ }
+ batch_size= batch_byte_size / tot_size;
+ if (batch_size == 0) {
+ batch_size= 1;
+ } else {
+ if (batch_size > max_batch_size) {
+ batch_size= max_batch_size;
+ } else if (batch_size > MAX_PARALLEL_OP_PER_SCAN) {
+ batch_size= MAX_PARALLEL_OP_PER_SCAN;
+ }
+ }
+ first_batch_size= batch_size;
+ return;
+}
void
NdbReceiver::do_get_value(NdbReceiver * org, Uint32 rows, Uint32 key_size){
@@ -139,7 +189,7 @@ NdbReceiver::do_get_value(NdbReceiver * org, Uint32 rows, Uint32 key_size){
}
prepareSend();
- return ; //0;
+ return;
}
void
@@ -200,10 +250,11 @@ NdbReceiver::execTRANSID_AI(const Uint32* aDataPtr, Uint32 aLength)
/**
* Update m_received_result_length
*/
+ Uint32 exp = m_expected_result_length;
Uint32 tmp = m_received_result_length + aLength;
m_received_result_length = tmp;
- return (tmp == m_expected_result_length ? 1 : 0);
+ return (tmp == exp || (exp > TcKeyConf::SimpleReadBit) ? 1 : 0);
}
int
@@ -223,3 +274,11 @@ NdbReceiver::execKEYINFO20(Uint32 info, const Uint32* aDataPtr, Uint32 aLength)
return (tmp == m_expected_result_length ? 1 : 0);
}
+
+void
+NdbReceiver::setErrorCode(int code)
+{
+ theMagicNumber = 0;
+ NdbOperation* op = (NdbOperation*)getOwner();
+ op->setErrorCode(code);
+}
diff --git a/ndb/src/ndbapi/NdbScanFilter.cpp b/ndb/src/ndbapi/NdbScanFilter.cpp
index 3813ab139de..38b1c70c047 100644
--- a/ndb/src/ndbapi/NdbScanFilter.cpp
+++ b/ndb/src/ndbapi/NdbScanFilter.cpp
@@ -779,7 +779,9 @@ main(void){
template class Vector<NdbScanFilterImpl::State>;
#if __SUNPRO_CC != 0x560
+#ifndef _FORTEC_
template int NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition, Uint32 attrId, Uint32);
template int NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition, Uint32 attrId, Uint64);
#endif
+#endif
diff --git a/ndb/src/ndbapi/NdbScanOperation.cpp b/ndb/src/ndbapi/NdbScanOperation.cpp
index 0aa40f968bb..fd63ce96f25 100644
--- a/ndb/src/ndbapi/NdbScanOperation.cpp
+++ b/ndb/src/ndbapi/NdbScanOperation.cpp
@@ -32,6 +32,7 @@
#include <signaldata/ScanTab.hpp>
#include <signaldata/KeyInfo.hpp>
+#include <signaldata/AttrInfo.hpp>
#include <signaldata/TcKeyReq.hpp>
NdbScanOperation::NdbScanOperation(Ndb* aNdb) :
@@ -47,11 +48,13 @@ NdbScanOperation::NdbScanOperation(Ndb* aNdb) :
m_sent_receivers = 0;
m_receivers = 0;
m_array = new Uint32[1]; // skip if on delete in fix_receivers
+ theSCAN_TABREQ = 0;
}
NdbScanOperation::~NdbScanOperation()
{
for(Uint32 i = 0; i<m_allocated_receivers; i++){
+ m_receivers[i]->release();
theNdb->releaseNdbScanRec(m_receivers[i]);
}
delete[] m_array;
@@ -95,7 +98,7 @@ NdbScanOperation::setErrorCodeAbort(int aErrorCode){
* Remark: Initiates operation record after allocation.
*****************************************************************************/
int
-NdbScanOperation::init(NdbTableImpl* tab, NdbConnection* myConnection)
+NdbScanOperation::init(const NdbTableImpl* tab, NdbConnection* myConnection)
{
m_transConnection = myConnection;
//NdbConnection* aScanConnection = theNdb->startTransaction(myConnection);
@@ -114,10 +117,8 @@ NdbScanOperation::init(NdbTableImpl* tab, NdbConnection* myConnection)
theStatus = GetValue;
theOperationType = OpenScanRequest;
-
- theTotalBoundAI_Len = 0;
- theBoundATTRINFO = NULL;
-
+ theNdbCon->theMagicNumber = 0xFE11DF;
+
return 0;
}
@@ -129,17 +130,9 @@ NdbResultSet* NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
Uint32 fragCount = m_currentTable->m_fragmentCount;
- if (batch + parallel == 0) {
- batch = 16;
- parallel= fragCount;
- } else {
- if (batch == 0 && parallel > 0) { // Backward
- batch = (parallel >= 16 ? 16 : parallel);
- parallel = (parallel + 15) / 16;
- }
- if (parallel > fragCount || parallel == 0)
+ if (parallel > fragCount || parallel == 0) {
parallel = fragCount;
- }
+ }
// It is only possible to call openScan if
// 1. this transcation don't already contain another scan operation
@@ -151,6 +144,7 @@ NdbResultSet* NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
}
theNdbCon->theScanningOp = this;
+ theLockMode = lm;
bool lockExcl, lockHoldMode, readCommitted;
switch(lm){
@@ -174,7 +168,7 @@ NdbResultSet* NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
return 0;
}
- m_keyInfo = lockExcl;
+ m_keyInfo = lockExcl ? 1 : 0;
bool range = false;
if (m_accessTable->m_indexType == NdbDictionary::Index::OrderedIndex ||
@@ -187,20 +181,19 @@ NdbResultSet* NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
}
assert (m_currentTable != m_accessTable);
// Modify operation state
- theStatus = SetBound;
+ theStatus = GetValue;
theOperationType = OpenRangeScanRequest;
range = true;
}
theParallelism = parallel;
- theBatchSize = batch;
if(fix_receivers(parallel) == -1){
setErrorCodeAbort(4000);
return 0;
}
- theSCAN_TABREQ = theNdb->getSignal();
+ theSCAN_TABREQ = (!theSCAN_TABREQ ? theNdb->getSignal() : theSCAN_TABREQ);
if (theSCAN_TABREQ == NULL) {
setErrorCodeAbort(4000);
return 0;
@@ -215,7 +208,7 @@ NdbResultSet* NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
Uint32 reqInfo = 0;
ScanTabReq::setParallelism(reqInfo, parallel);
- ScanTabReq::setScanBatch(reqInfo, batch);
+ ScanTabReq::setScanBatch(reqInfo, 0);
ScanTabReq::setLockMode(reqInfo, lockExcl);
ScanTabReq::setHoldLockFlag(reqInfo, lockHoldMode);
ScanTabReq::setReadCommittedFlag(reqInfo, readCommitted);
@@ -226,8 +219,17 @@ NdbResultSet* NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
req->transId1 = (Uint32) transId;
req->transId2 = (Uint32) (transId >> 32);
- getFirstATTRINFOScan();
+ NdbApiSignal* tSignal =
+ theFirstKEYINFO;
+
+ theFirstKEYINFO = (tSignal ? tSignal : tSignal = theNdb->getSignal());
+ theLastKEYINFO = tSignal;
+
+ tSignal->setSignal(GSN_KEYINFO);
+ theKEYINFOptr = ((KeyInfo*)tSignal->getDataPtrSend())->keyData;
+ theTotalNrOfKeyWordInSignal= 0;
+ getFirstATTRINFOScan();
return getResultSet();
}
@@ -263,18 +265,7 @@ NdbScanOperation::fix_receivers(Uint32 parallel){
m_allocated_receivers = parallel;
}
- for(Uint32 i = 0; i<parallel; i++){
- m_receivers[i]->m_list_index = i;
- m_prepared_receivers[i] = m_receivers[i]->getId();
- m_sent_receivers[i] = m_receivers[i];
- m_conf_receivers[i] = 0;
- m_api_receivers[i] = 0;
- }
-
- m_api_receivers_count = 0;
- m_current_api_receiver = 0;
- m_sent_receivers_count = parallel;
- m_conf_receivers_count = 0;
+ reset_receivers(parallel, 0);
return 0;
}
@@ -362,6 +353,7 @@ NdbScanOperation::getFirstATTRINFOScan()
* After setBound() are done, move the accumulated ATTRINFO signals to
* a separate list. Then continue with normal scan.
*/
+#if 0
int
NdbIndexScanOperation::saveBoundATTRINFO()
{
@@ -395,8 +387,8 @@ NdbIndexScanOperation::saveBoundATTRINFO()
Uint32 cnt = m_accessTable->getNoOfColumns() - 1;
m_sort_columns = cnt - i;
for(; i<cnt; i++){
- NdbColumnImpl* key = m_accessTable->m_index->m_columns[i];
- NdbColumnImpl* col = m_currentTable->getColumn(key->m_keyInfoPos);
+ const NdbColumnImpl* key = m_accessTable->m_index->m_columns[i];
+ const NdbColumnImpl* col = m_currentTable->getColumn(key->m_keyInfoPos);
NdbRecAttr* tmp = NdbScanOperation::getValue_impl(col, (char*)-1);
UintPtr newVal = UintPtr(tmp);
theTupleKeyDefined[i][0] = FAKE_PTR;
@@ -408,6 +400,7 @@ NdbIndexScanOperation::saveBoundATTRINFO()
}
return res;
}
+#endif
#define WAITFOR_SCAN_TIMEOUT 120000
@@ -416,14 +409,22 @@ NdbScanOperation::executeCursor(int nodeId){
NdbConnection * tCon = theNdbCon;
TransporterFacade* tp = TransporterFacade::instance();
Guard guard(tp->theMutexPtr);
+
+ Uint32 magic = tCon->theMagicNumber;
Uint32 seq = tCon->theNodeSequence;
+
if (tp->get_node_alive(nodeId) &&
(tp->getNodeSequence(nodeId) == seq)) {
-
- if(prepareSendScan(tCon->theTCConPtr, tCon->theTransactionId) == -1)
- return -1;
+ /**
+ * Only call prepareSendScan first time (incase of restarts)
+ * - check with theMagicNumber
+ */
tCon->theMagicNumber = 0x37412619;
+ if(magic != 0x37412619 &&
+ prepareSendScan(tCon->theTCConPtr, tCon->theTransactionId) == -1)
+ return -1;
+
if (doSendScan(nodeId) == -1)
return -1;
@@ -435,7 +436,6 @@ NdbScanOperation::executeCursor(int nodeId){
TRACE_DEBUG("The node is hard dead when attempting to start a scan");
setErrorCode(4029);
tCon->theReleaseOnClose = true;
- abort();
} else {
TRACE_DEBUG("The node is stopping when attempting to start a scan");
setErrorCode(4030);
@@ -529,20 +529,9 @@ int NdbScanOperation::nextResult(bool fetchAllowed)
/**
* No completed & no sent -> EndOfData
*/
- if(send_next_scan(0, true) == 0){ // Close scan
- theNdb->theWaiter.m_node = nodeId;
- theNdb->theWaiter.m_state = WAIT_SCAN;
- int return_code = theNdb->receiveResponse(WAITFOR_SCAN_TIMEOUT);
- if (return_code == 0 && seq == tp->getNodeSequence(nodeId)) {
- theError.code = -1; // make sure user gets error if he tries again
- if(DEBUG_NEXT_RESULT) ndbout_c("return 1");
- return 1;
- }
- retVal = -1; //return_code;
- } else {
- retVal = -3;
- }
- idx = last;
+ theError.code = -1; // make sure user gets error if he tries again
+ if(DEBUG_NEXT_RESULT) ndbout_c("return 1");
+ return 1;
}
if(retVal == 0)
@@ -577,6 +566,8 @@ int NdbScanOperation::nextResult(bool fetchAllowed)
setErrorCode(4028); // Node fail
break;
case -3: // send_next_scan -> return fail (set error-code self)
+ if(theError.code == 0)
+ setErrorCode(4028); // seq changed = Node fail
break;
}
@@ -653,7 +644,7 @@ NdbScanOperation::doSend(int ProcessorId)
void NdbScanOperation::closeScan()
{
- if(m_transConnection) do {
+ if(m_transConnection){
if(DEBUG_NEXT_RESULT)
ndbout_c("closeScan() theError.code = %d "
"m_api_receivers_count = %d "
@@ -666,55 +657,8 @@ void NdbScanOperation::closeScan()
TransporterFacade* tp = TransporterFacade::instance();
Guard guard(tp->theMutexPtr);
-
- Uint32 seq = theNdbCon->theNodeSequence;
- Uint32 nodeId = theNdbCon->theDBnode;
-
- if(seq != tp->getNodeSequence(nodeId)){
- theNdbCon->theReleaseOnClose = true;
- break;
- }
-
- while(theError.code == 0 && m_sent_receivers_count){
- theNdb->theWaiter.m_node = nodeId;
- theNdb->theWaiter.m_state = WAIT_SCAN;
- int return_code = theNdb->receiveResponse(WAITFOR_SCAN_TIMEOUT);
- switch(return_code){
- case 0:
- break;
- case -1:
- setErrorCode(4008);
- case -2:
- m_api_receivers_count = 0;
- m_conf_receivers_count = 0;
- m_sent_receivers_count = 0;
- theNdbCon->theReleaseOnClose = true;
- }
- }
-
- if(m_api_receivers_count+m_conf_receivers_count){
- // Send close scan
- send_next_scan(0, true); // Close scan
- }
+ close_impl(tp);
- /**
- * wait for close scan conf
- */
- while(m_sent_receivers_count+m_api_receivers_count+m_conf_receivers_count){
- theNdb->theWaiter.m_node = nodeId;
- theNdb->theWaiter.m_state = WAIT_SCAN;
- int return_code = theNdb->receiveResponse(WAITFOR_SCAN_TIMEOUT);
- switch(return_code){
- case 0:
- break;
- case -1:
- setErrorCode(4008);
- case -2:
- m_api_receivers_count = 0;
- m_conf_receivers_count = 0;
- m_sent_receivers_count = 0;
- }
- }
} while(0);
theNdbCon->theScanningOp = 0;
@@ -739,6 +683,12 @@ void NdbScanOperation::release()
for(Uint32 i = 0; i<m_allocated_receivers; i++){
m_receivers[i]->release();
}
+ if(theSCAN_TABREQ)
+ {
+ theNdb->releaseSignal(theSCAN_TABREQ);
+ theSCAN_TABREQ = 0;
+ }
+ NdbOperation::release();
}
/***************************************************************************
@@ -762,11 +712,6 @@ int NdbScanOperation::prepareSendScan(Uint32 aTC_ConnectPtr,
return -1;
}
- if (theStatus == SetBound) {
- ((NdbIndexScanOperation*)this)->saveBoundATTRINFO();
- theStatus = GetValue;
- }
-
theErrorLine = 0;
// In preapareSendInterpreted we set the sizes (word 4-8) in the
@@ -778,26 +723,7 @@ int NdbScanOperation::prepareSendScan(Uint32 aTC_ConnectPtr,
((NdbIndexScanOperation*)this)->fix_get_values();
}
- const Uint32 transId1 = (Uint32) (aTransactionId & 0xFFFFFFFF);
- const Uint32 transId2 = (Uint32) (aTransactionId >> 32);
-
- if (theOperationType == OpenRangeScanRequest) {
- NdbApiSignal* tSignal = theBoundATTRINFO;
- do{
- tSignal->setData(aTC_ConnectPtr, 1);
- tSignal->setData(transId1, 2);
- tSignal->setData(transId2, 3);
- tSignal = tSignal->next();
- } while (tSignal != NULL);
- }
theCurrentATTRINFO->setLength(theAI_LenInCurrAI);
- NdbApiSignal* tSignal = theFirstATTRINFO;
- do{
- tSignal->setData(aTC_ConnectPtr, 1);
- tSignal->setData(transId1, 2);
- tSignal->setData(transId2, 3);
- tSignal = tSignal->next();
- } while (tSignal != NULL);
/**
* Prepare all receivers
@@ -805,20 +731,43 @@ int NdbScanOperation::prepareSendScan(Uint32 aTC_ConnectPtr,
theReceiver.prepareSend();
bool keyInfo = m_keyInfo;
Uint32 key_size = keyInfo ? m_currentTable->m_keyLenInWords : 0;
+ /**
+ * The number of records sent by each LQH is calculated and the kernel
+ * is informed of this number by updating the SCAN_TABREQ signal
+ */
+ Uint32 batch_size, batch_byte_size, first_batch_size;
+ theReceiver.calculate_batch_size(key_size,
+ theParallelism,
+ batch_size,
+ batch_byte_size,
+ first_batch_size);
+ ScanTabReq * req = CAST_PTR(ScanTabReq, theSCAN_TABREQ->getDataPtrSend());
+ ScanTabReq::setScanBatch(req->requestInfo, batch_size);
+ req->batch_byte_size= batch_byte_size;
+ req->first_batch_size= first_batch_size;
+
+ /**
+ * Set keyinfo flag
+ * (Always keyinfo when using blobs)
+ */
+ Uint32 reqInfo = req->requestInfo;
+ ScanTabReq::setKeyinfoFlag(reqInfo, keyInfo);
+ req->requestInfo = reqInfo;
+
for(Uint32 i = 0; i<theParallelism; i++){
- m_receivers[i]->do_get_value(&theReceiver, theBatchSize, key_size);
+ m_receivers[i]->do_get_value(&theReceiver, batch_size, key_size);
}
return 0;
}
-/******************************************************************************
+/*****************************************************************************
int doSend()
Return Value: Return >0 : send was succesful, returns number of signals sent
Return -1: In all other case.
Parameters: aProcessorId: Receiving processor node
Remark: Sends the ATTRINFO signal(s)
-******************************************************************************/
+*****************************************************************************/
int
NdbScanOperation::doSendScan(int aProcessorId)
{
@@ -838,47 +787,61 @@ NdbScanOperation::doSendScan(int aProcessorId)
setErrorCode(4001);
return -1;
}
+
+ Uint32 tupKeyLen = theTupKeyLen;
+ Uint32 len = theTotalNrOfKeyWordInSignal;
+ Uint32 aTC_ConnectPtr = theNdbCon->theTCConPtr;
+ Uint64 transId = theNdbCon->theTransactionId;
+
// Update the "attribute info length in words" in SCAN_TABREQ before
// sending it. This could not be done in openScan because
// we created the ATTRINFO signals after the SCAN_TABREQ signal.
ScanTabReq * const req = CAST_PTR(ScanTabReq, tSignal->getDataPtrSend());
- req->attrLen = theTotalCurrAI_Len;
- if (theOperationType == OpenRangeScanRequest)
- req->attrLen += theTotalBoundAI_Len;
+ req->attrLenKeyLen = (tupKeyLen << 16) | theTotalCurrAI_Len;
+
TransporterFacade *tp = TransporterFacade::instance();
- if(theParallelism > 16){
- LinearSectionPtr ptr[3];
- ptr[0].p = m_prepared_receivers;
- ptr[0].sz = theParallelism;
- if (tp->sendFragmentedSignal(tSignal, aProcessorId, ptr, 1) == -1) {
- setErrorCode(4002);
- return -1;
- }
- } else {
- tSignal->setLength(9+theParallelism);
- memcpy(tSignal->getDataPtrSend()+9, m_prepared_receivers, 4*theParallelism);
- if (tp->sendSignal(tSignal, aProcessorId) == -1) {
- setErrorCode(4002);
- return -1;
- }
- }
+ LinearSectionPtr ptr[3];
+ ptr[0].p = m_prepared_receivers;
+ ptr[0].sz = theParallelism;
+ if (tp->sendFragmentedSignal(tSignal, aProcessorId, ptr, 1) == -1) {
+ setErrorCode(4002);
+ return -1;
+ }
- if (theOperationType == OpenRangeScanRequest) {
+ if (tupKeyLen > 0){
// must have at least one signal since it contains attrLen for bounds
- assert(theBoundATTRINFO != NULL);
- tSignal = theBoundATTRINFO;
- while (tSignal != NULL) {
+ assert(theLastKEYINFO != NULL);
+ tSignal = theLastKEYINFO;
+ tSignal->setLength(KeyInfo::HeaderLength + theTotalNrOfKeyWordInSignal);
+
+ assert(theFirstKEYINFO != NULL);
+ tSignal = theFirstKEYINFO;
+
+ NdbApiSignal* last;
+ do {
+ KeyInfo * keyInfo = CAST_PTR(KeyInfo, tSignal->getDataPtrSend());
+ keyInfo->connectPtr = aTC_ConnectPtr;
+ keyInfo->transId[0] = Uint32(transId);
+ keyInfo->transId[1] = Uint32(transId >> 32);
+
if (tp->sendSignal(tSignal,aProcessorId) == -1){
- setErrorCode(4002);
- return -1;
+ setErrorCode(4002);
+ return -1;
}
+
tSignalCount++;
+ last = tSignal;
tSignal = tSignal->next();
- }
+ } while(last != theLastKEYINFO);
}
tSignal = theFirstATTRINFO;
while (tSignal != NULL) {
+ AttrInfo * attrInfo = CAST_PTR(AttrInfo, tSignal->getDataPtrSend());
+ attrInfo->connectPtr = aTC_ConnectPtr;
+ attrInfo->transId[0] = Uint32(transId);
+ attrInfo->transId[1] = Uint32(transId >> 32);
+
if (tp->sendSignal(tSignal,aProcessorId) == -1){
setErrorCode(4002);
return -1;
@@ -890,7 +853,7 @@ NdbScanOperation::doSendScan(int aProcessorId)
return tSignalCount;
}//NdbOperation::doSendScan()
-/******************************************************************************
+/*****************************************************************************
* NdbOperation* takeOverScanOp(NdbConnection* updateTrans);
*
* Parameters: The update transactions NdbConnection pointer.
@@ -909,7 +872,7 @@ NdbScanOperation::doSendScan(int aProcessorId)
* This means that the updating transactions can be placed
* in separate threads and thus increasing the parallelism during
* the scan process.
- *****************************************************************************/
+ ****************************************************************************/
int
NdbScanOperation::getKeyFromKEYINFO20(Uint32* data, unsigned size)
{
@@ -947,6 +910,7 @@ NdbScanOperation::takeOverScanOp(OperationType opType, NdbConnection* pTrans){
if (newOp == NULL){
return NULL;
}
+ pTrans->theSimpleState = 0;
const Uint32 len = (tRecAttr->attrSize() * tRecAttr->arraySize() + 3)/4-1;
@@ -959,8 +923,8 @@ NdbScanOperation::takeOverScanOp(OperationType opType, NdbConnection* pTrans){
}
const Uint32 * src = (Uint32*)tRecAttr->aRef();
- const Uint32 tScanInfo = src[len] & 0xFFFF;
- const Uint32 tTakeOverNode = src[len] >> 16;
+ const Uint32 tScanInfo = src[len] & 0x3FFFF;
+ const Uint32 tTakeOverNode = src[len] >> 20;
{
UintR scanInfo = 0;
TcKeyReq::setTakeOverScanFlag(scanInfo, 1);
@@ -1018,6 +982,7 @@ NdbScanOperation::takeOverScanOp(OperationType opType, NdbConnection* pTrans){
NdbBlob*
NdbScanOperation::getBlobHandle(const char* anAttrName)
{
+ m_keyInfo = 1;
return NdbOperation::getBlobHandle(m_transConnection,
m_currentTable->getColumn(anAttrName));
}
@@ -1025,6 +990,7 @@ NdbScanOperation::getBlobHandle(const char* anAttrName)
NdbBlob*
NdbScanOperation::getBlobHandle(Uint32 anAttrId)
{
+ m_keyInfo = 1;
return NdbOperation::getBlobHandle(m_transConnection,
m_currentTable->getColumn(anAttrId));
}
@@ -1038,13 +1004,15 @@ NdbIndexScanOperation::~NdbIndexScanOperation(){
}
int
-NdbIndexScanOperation::setBound(const char* anAttrName, int type, const void* aValue, Uint32 len)
+NdbIndexScanOperation::setBound(const char* anAttrName, int type,
+ const void* aValue, Uint32 len)
{
return setBound(m_accessTable->getColumn(anAttrName), type, aValue, len);
}
int
-NdbIndexScanOperation::setBound(Uint32 anAttrId, int type, const void* aValue, Uint32 len)
+NdbIndexScanOperation::setBound(Uint32 anAttrId, int type,
+ const void* aValue, Uint32 len)
{
return setBound(m_accessTable->getColumn(anAttrId), type, aValue, len);
}
@@ -1063,11 +1031,6 @@ NdbIndexScanOperation::getValue_impl(const NdbColumnImpl* attrInfo,
return NdbScanOperation::getValue_impl(attrInfo, aValue);
}
- if (theStatus == SetBound) {
- saveBoundATTRINFO();
- theStatus = GetValue;
- }
-
int id = attrInfo->m_attrId; // In "real" table
assert(m_accessTable->m_index);
int sz = (int)m_accessTable->m_index->m_key_ids.size();
@@ -1108,35 +1071,64 @@ NdbIndexScanOperation::setBound(const NdbColumnImpl* tAttrInfo,
int type, const void* aValue, Uint32 len)
{
if (theOperationType == OpenRangeScanRequest &&
- theStatus == SetBound &&
(0 <= type && type <= 4) &&
len <= 8000) {
- // bound type
-
- insertATTRINFO(type);
- // attribute header
+ // insert bound type
+ Uint32 currLen = theTotalNrOfKeyWordInSignal;
+ Uint32 remaining = KeyInfo::DataLength - currLen;
Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize;
+
+ // normalize char bound
+ CHARSET_INFO* cs = tAttrInfo->m_cs;
+ Uint32 xfrmData[2000];
+ if (cs != NULL && aValue != NULL) {
+ // current limitation: strxfrm does not increase length
+ assert(cs->strxfrm_multiply == 1);
+ unsigned n =
+ (*cs->coll->strnxfrm)(cs,
+ (uchar*)xfrmData, sizeof(xfrmData),
+ (const uchar*)aValue, sizeInBytes);
+ while (n < sizeInBytes)
+ ((uchar*)xfrmData)[n++] = 0x20;
+ aValue = (char*)xfrmData;
+ }
if (len != sizeInBytes && (len != 0)) {
setErrorCodeAbort(4209);
return -1;
}
+ // insert attribute header
len = aValue != NULL ? sizeInBytes : 0;
Uint32 tIndexAttrId = tAttrInfo->m_attrId;
Uint32 sizeInWords = (len + 3) / 4;
AttributeHeader ah(tIndexAttrId, sizeInWords);
- insertATTRINFO(ah.m_value);
- if (len != 0) {
- // attribute data
- if ((UintPtr(aValue) & 0x3) == 0 && (len & 0x3) == 0)
- insertATTRINFOloop((const Uint32*)aValue, sizeInWords);
- else {
- Uint32 temp[2000];
- memcpy(temp, aValue, len);
+ const Uint32 ahValue = ah.m_value;
+
+ const bool aligned = (UintPtr(aValue) & 3) == 0;
+ const bool nobytes = (len & 0x3) == 0;
+ const Uint32 totalLen = 2 + sizeInWords;
+ Uint32 tupKeyLen = theTupKeyLen;
+ if(remaining > totalLen && aligned && nobytes){
+ Uint32 * dst = theKEYINFOptr + currLen;
+ * dst ++ = type;
+ * dst ++ = ahValue;
+ memcpy(dst, aValue, 4 * sizeInWords);
+ theTotalNrOfKeyWordInSignal = currLen + totalLen;
+ } else {
+ if(!aligned || !nobytes){
+ Uint32 tempData[2002];
+ tempData[0] = type;
+ tempData[1] = ahValue;
+ memcpy(tempData+2, aValue, len);
while ((len & 0x3) != 0)
- ((char*)temp)[len++] = 0;
- insertATTRINFOloop(temp, sizeInWords);
+ ((char*)&tempData[2])[len++] = 0;
+ insertBOUNDS(tempData, 2+sizeInWords);
+ } else {
+ Uint32 buf[2] = { type, ahValue };
+ insertBOUNDS(buf, 2);
+ insertBOUNDS((Uint32*)aValue, sizeInWords);
}
}
+ theTupKeyLen = tupKeyLen + totalLen;
/**
* Do sorted stuff
@@ -1159,6 +1151,46 @@ NdbIndexScanOperation::setBound(const NdbColumnImpl* tAttrInfo,
}
}
+int
+NdbIndexScanOperation::insertBOUNDS(Uint32 * data, Uint32 sz){
+ Uint32 len;
+ Uint32 remaining = KeyInfo::DataLength - theTotalNrOfKeyWordInSignal;
+ Uint32 * dst = theKEYINFOptr + theTotalNrOfKeyWordInSignal;
+ do {
+ len = (sz < remaining ? sz : remaining);
+ memcpy(dst, data, 4 * len);
+
+ if(sz >= remaining){
+ NdbApiSignal* tCurr = theLastKEYINFO;
+ tCurr->setLength(KeyInfo::MaxSignalLength);
+ NdbApiSignal* tSignal = tCurr->next();
+ if(tSignal)
+ ;
+ else if((tSignal = theNdb->getSignal()) != 0)
+ {
+ tCurr->next(tSignal);
+ tSignal->setSignal(GSN_KEYINFO);
+ } else {
+ goto error;
+ }
+ theLastKEYINFO = tSignal;
+ theKEYINFOptr = dst = ((KeyInfo*)tSignal->getDataPtrSend())->keyData;
+ remaining = KeyInfo::DataLength;
+ sz -= len;
+ data += len;
+ } else {
+ len = (KeyInfo::DataLength - remaining) + len;
+ break;
+ }
+ } while(true);
+ theTotalNrOfKeyWordInSignal = len;
+ return 0;
+
+error:
+ setErrorCodeAbort(4228); // XXX wrong code
+ return -1;
+}
+
NdbResultSet*
NdbIndexScanOperation::readTuples(LockMode lm,
Uint32 batch,
@@ -1167,9 +1199,23 @@ NdbIndexScanOperation::readTuples(LockMode lm,
NdbResultSet * rs = NdbScanOperation::readTuples(lm, batch, 0);
if(rs && order_by){
m_ordered = 1;
- m_sort_columns = m_accessTable->getNoOfColumns() - 1; // -1 for NDB$NODE
+ Uint32 cnt = m_accessTable->getNoOfColumns() - 1;
+ m_sort_columns = cnt; // -1 for NDB$NODE
m_current_api_receiver = m_sent_receivers_count;
m_api_receivers_count = m_sent_receivers_count;
+
+ m_sort_columns = cnt;
+ for(Uint32 i = 0; i<cnt; i++){
+ const NdbColumnImpl* key = m_accessTable->m_index->m_columns[i];
+ const NdbColumnImpl* col = m_currentTable->getColumn(key->m_keyInfoPos);
+ NdbRecAttr* tmp = NdbScanOperation::getValue_impl(col, (char*)-1);
+ UintPtr newVal = UintPtr(tmp);
+ theTupleKeyDefined[i][0] = FAKE_PTR;
+ theTupleKeyDefined[i][1] = (newVal & 0xFFFFFFFF);
+#if (SIZEOF_CHARP == 8)
+ theTupleKeyDefined[i][2] = (newVal >> 32);
+#endif
+ }
}
return rs;
}
@@ -1183,8 +1229,8 @@ NdbIndexScanOperation::fix_get_values(){
Uint32 cnt = m_accessTable->getNoOfColumns() - 1;
assert(cnt < NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY);
- NdbIndexImpl * idx = m_accessTable->m_index;
- NdbTableImpl * tab = m_currentTable;
+ const NdbIndexImpl * idx = m_accessTable->m_index;
+ const NdbTableImpl * tab = m_currentTable;
for(Uint32 i = 0; i<cnt; i++){
Uint32 val = theTupleKeyDefined[i][0];
switch(val){
@@ -1221,13 +1267,13 @@ NdbIndexScanOperation::compare(Uint32 skip, Uint32 cols,
if((r1_null ^ (unsigned)r2->isNULL())){
return (r1_null ? -1 : 1);
}
- Uint32 type = NdbColumnImpl::getImpl(* r1->m_column).m_extType;
+ const NdbColumnImpl & col = NdbColumnImpl::getImpl(* r1->m_column);
Uint32 size = (r1->theAttrSize * r1->theArraySize + 3) / 4;
if(!r1_null){
- char r = NdbSqlUtil::cmp(type, d1, d2, size, size);
+ const NdbSqlUtil::Type& sqlType = NdbSqlUtil::getType(col.m_extType);
+ int r = (*sqlType.m_cmp)(col.m_cs, d1, d2, size, size);
if(r){
assert(r != NdbSqlUtil::CmpUnknown);
- assert(r != NdbSqlUtil::CmpError);
return r;
}
}
@@ -1348,19 +1394,9 @@ NdbIndexScanOperation::next_result_ordered(bool fetchAllowed){
return 0;
}
- TransporterFacade* tp = TransporterFacade::instance();
- Guard guard(tp->theMutexPtr);
- Uint32 seq = theNdbCon->theNodeSequence;
- Uint32 nodeId = theNdbCon->theDBnode;
- if(seq == tp->getNodeSequence(nodeId) &&
- send_next_scan(0, true) == 0 &&
- theError.code == 0){
- if(DEBUG_NEXT_RESULT) ndbout_c("return 1");
- return 1;
- }
- setErrorCode(theError.code);
- if(DEBUG_NEXT_RESULT) ndbout_c("return -1");
- return -1;
+ theError.code = -1;
+ if(DEBUG_NEXT_RESULT) ndbout_c("return 1");
+ return 1;
}
int
@@ -1400,10 +1436,7 @@ NdbIndexScanOperation::send_next_scan_ordered(Uint32 idx){
}
int
-NdbScanOperation::restart(){
- TransporterFacade* tp = TransporterFacade::instance();
- Guard guard(tp->theMutexPtr);
-
+NdbScanOperation::close_impl(TransporterFacade* tp){
Uint32 seq = theNdbCon->theNodeSequence;
Uint32 nodeId = theNdbCon->theDBnode;
@@ -1411,8 +1444,8 @@ NdbScanOperation::restart(){
theNdbCon->theReleaseOnClose = true;
return -1;
}
-
- while(m_sent_receivers_count){
+
+ while(theError.code == 0 && m_sent_receivers_count){
theNdb->theWaiter.m_node = nodeId;
theNdb->theWaiter.m_state = WAIT_SCAN;
int return_code = theNdb->receiveResponse(WAITFOR_SCAN_TIMEOUT);
@@ -1425,14 +1458,17 @@ NdbScanOperation::restart(){
m_api_receivers_count = 0;
m_conf_receivers_count = 0;
m_sent_receivers_count = 0;
+ theNdbCon->theReleaseOnClose = true;
return -1;
}
}
if(m_api_receivers_count+m_conf_receivers_count){
// Send close scan
- if(send_next_scan(0, true) == -1) // Close scan
+ if(send_next_scan(0, true) == -1){ // Close scan
+ theNdbCon->theReleaseOnClose = true;
return -1;
+ }
}
/**
@@ -1451,15 +1487,15 @@ NdbScanOperation::restart(){
m_api_receivers_count = 0;
m_conf_receivers_count = 0;
m_sent_receivers_count = 0;
+ theNdbCon->theReleaseOnClose = true;
return -1;
}
}
+ return 0;
+}
- /**
- * Reset receivers
- */
- const Uint32 parallell = theParallelism;
-
+void
+NdbScanOperation::reset_receivers(Uint32 parallell, Uint32 ordered){
for(Uint32 i = 0; i<parallell; i++){
m_receivers[i]->m_list_index = i;
m_prepared_receivers[i] = m_receivers[i]->getId();
@@ -1474,13 +1510,64 @@ NdbScanOperation::restart(){
m_sent_receivers_count = parallell;
m_conf_receivers_count = 0;
- if(m_ordered){
+ if(ordered){
m_current_api_receiver = parallell;
m_api_receivers_count = parallell;
}
+}
+
+int
+NdbScanOperation::restart()
+{
+ TransporterFacade* tp = TransporterFacade::instance();
+ Guard guard(tp->theMutexPtr);
+ Uint32 nodeId = theNdbCon->theDBnode;
+
+ {
+ int res;
+ if((res= close_impl(tp)))
+ {
+ return res;
+ }
+ }
+
+ /**
+ * Reset receivers
+ */
+ reset_receivers(theParallelism, m_ordered);
+
+ theError.code = 0;
if (doSendScan(nodeId) == -1)
return -1;
return 0;
}
+
+int
+NdbIndexScanOperation::reset_bounds(){
+ int res;
+
+ {
+ TransporterFacade* tp = TransporterFacade::instance();
+ Guard guard(tp->theMutexPtr);
+ res= close_impl(tp);
+ }
+
+ if(!res)
+ {
+ theError.code = 0;
+ reset_receivers(theParallelism, m_ordered);
+
+ theLastKEYINFO = theFirstKEYINFO;
+ theKEYINFOptr = ((KeyInfo*)theFirstKEYINFO->getDataPtrSend())->keyData;
+ theTupKeyLen = 0;
+ theTotalNrOfKeyWordInSignal = 0;
+ m_transConnection
+ ->remove_list((NdbOperation*&)m_transConnection->m_firstExecutedScanOp,
+ this);
+ m_transConnection->define_scan_op(this);
+ return 0;
+ }
+ return res;
+}
diff --git a/ndb/src/ndbapi/NdbScanReceiver.cpp b/ndb/src/ndbapi/NdbScanReceiver.cpp
deleted file mode 100644
index 6c8c16c3ecf..00000000000
--- a/ndb/src/ndbapi/NdbScanReceiver.cpp
+++ /dev/null
@@ -1,187 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#include "NdbScanReceiver.hpp"
-#include <NdbRecAttr.hpp>
-
-#include <signaldata/ScanFrag.hpp>
-
-#include <NdbOut.hpp>
-
-
-/***************************************************************************
- * int receiveKEYINFO20( NdbApiSignal* aSignal)
- *
- * Remark: Handles the reception of the KEYINFO20 signal.
- * Save a copy of the signal in list
- *
- ***************************************************************************/
-int
-NdbScanReceiver::receiveKEYINFO20( NdbApiSignal* aSignal){
- const KeyInfo20 * const keyInfo = CAST_CONSTPTR(KeyInfo20, aSignal->getDataPtr());
- if (theStatus != Waiting){
- //ndbout << "Dropping KEYINFO20, theStatus="<<theStatus << endl;
- return -1;
- }
- if (aSignal->getLength() < 5){
- //ndbout << "Dropping KEYINFO20, length="<<aSignal->getLength() << endl;
- }
- Uint64 tCurrTransId = theNdbOp->theNdbCon->getTransactionId();
- Uint64 tRecTransId = (Uint64)keyInfo->transId1 + ((Uint64)keyInfo->transId2 << 32);
- if ((tRecTransId - tCurrTransId) != (Uint64)0){
- //ndbout << "Dropping KEYINFO20 wrong transid" << endl;
- return -1;
- }
-
- NdbApiSignal * tCopy = new NdbApiSignal(0);//getSignal();
- if (tCopy == NULL) {
- theNdbOp->setErrorCode(4000);
- return 2; // theWaitState = NO_WAIT
- }
- // Put copy last in list of KEYINFO20 signals
- tCopy->copyFrom(aSignal);
- tCopy->next(NULL);
- if (theFirstKEYINFO20_Recv == NULL)
- theFirstKEYINFO20_Recv = tCopy;
- else
- theLastKEYINFO20_Recv->next(tCopy);
- theLastKEYINFO20_Recv = tCopy;
-
- theTotalKI_Len = keyInfo->keyLen; // This is the total length of all signals
- theTotalRecKI_Len += aSignal->getLength() - 5;
- return theNdbOp->theNdbCon->checkNextScanResultComplete();
-}
-
-/***************************************************************************
- * int receiveTRANSID_AI_SCAN( NdbApiSignal* aSignal)
- *
- * Remark: Handles the reception of the TRANSID_AI_signal with
- * 22 signal data words.
- * Save a copy of the signal in list and check if all
- * signals belonging to this resultset is receieved.
- *
- ***************************************************************************/
-int
-NdbScanReceiver::receiveTRANSID_AI_SCAN( NdbApiSignal* aSignal)
-{
- const Uint32* aDataPtr = aSignal->getDataPtr();
- if (theStatus != Waiting){
- //ndbout << "Dropping TRANSID_AI, theStatus="<<theStatus << endl;
- return -1;
- }
- if (aSignal->getLength() < 3){
- //ndbout << "Dropping TRANSID_AI, length="<<aSignal->getLength() << endl;
- return -1;
- }
- if (theNdbOp == NULL){
- //ndbout << "Dropping TRANSID_AI, theNdbOp == NULL" << endl;
- return -1;
- }
- if (theNdbOp->theNdbCon == NULL){
- //ndbout << "Dropping TRANSID_AI, theNdbOp->theNdbCon == NULL" << endl;
- return -1;
- }
- Uint64 tCurrTransId = theNdbOp->theNdbCon->getTransactionId();
- Uint64 tRecTransId = (Uint64)aDataPtr[1] + ((Uint64)aDataPtr[2] << 32);
- if ((tRecTransId - tCurrTransId) != (Uint64)0){
- //ndbout << "Dropping TRANSID_AI wrong transid" << endl;
- return -1;
- }
-
- NdbApiSignal * tCopy = new NdbApiSignal(0);//getSignal();
- if (tCopy == NULL){
- theNdbOp->setErrorCode(4000);
- return 2; // theWaitState = NO_WAIT
- }
- tCopy->copyFrom(aSignal);
- tCopy->next(NULL);
- if (theFirstTRANSID_AI_Recv == NULL)
- theFirstTRANSID_AI_Recv = tCopy;
- else
- theLastTRANSID_AI_Recv->next(tCopy);
- theLastTRANSID_AI_Recv = tCopy;
- theTotalRecAI_Len += aSignal->getLength() - 3;
-
- return theNdbOp->theNdbCon->checkNextScanResultComplete();
-}
-
-/***************************************************************************
- * int executeSavedSignals()
- *
- * Remark: Execute all saved TRANSID_AI signals into the parent NdbOperation
- *
- *
- ***************************************************************************/
-int
-NdbScanReceiver::executeSavedSignals(){
-
- NdbApiSignal* tSignal = theFirstTRANSID_AI_Recv;
- while (tSignal != NULL) {
- const Uint32* tDataPtr = tSignal->getDataPtr();
-
- int tRet = theNdbOp->receiveREAD_AI((Uint32*)&tDataPtr[3],
- tSignal->getLength() - 3);
- if (tRet != -1){
- // -1 means that more signals are wanted ?
- // Make sure there are no more signals in the list
- assert(tSignal->next() == NULL);
- }
- tSignal = tSignal->next();
- }
- // receiveREAD_AI may not copy to application buffers
- NdbRecAttr* tRecAttr = theNdbOp->theFirstRecAttr;
- while (tRecAttr != NULL) {
- if (tRecAttr->copyoutRequired()) // copy to application buffer
- tRecAttr->copyout();
- tRecAttr = tRecAttr->next();
- }
- // Release TRANSID_AI signals for this receiver
- while(theFirstTRANSID_AI_Recv != NULL){
- NdbApiSignal* tmp = theFirstTRANSID_AI_Recv;
- theFirstTRANSID_AI_Recv = tmp->next();
- delete tmp;
- }
-
- // theNdbOp->theNdb->releaseSignalsInList(&theFirstTRANSID_AI_Recv);
- theFirstTRANSID_AI_Recv = NULL;
- theLastTRANSID_AI_Recv = NULL;
- theStatus = Executed;
-
- return 0;
-}
-
-
-void
-NdbScanReceiver::prepareNextScanResult(){
- if(theStatus == Executed){
-
- // theNdbOp->theNdb->releaseSignalsInList(&theFirstKEYINFO20_Recv);
- while(theFirstKEYINFO20_Recv != NULL){
- NdbApiSignal* tmp = theFirstKEYINFO20_Recv;
- theFirstKEYINFO20_Recv = tmp->next();
- delete tmp;
- }
- theFirstKEYINFO20_Recv = NULL;
- theLastKEYINFO20_Recv = NULL;
- theTotalRecAI_Len = 0;
- theTotalRecKI_Len = 0;
- if (theLockMode == true)
- theTotalKI_Len = 0xFFFFFFFF;
- else
- theTotalKI_Len = 0;
- theStatus = Waiting;
- }
-}
diff --git a/ndb/src/ndbapi/NdbScanReceiver.hpp b/ndb/src/ndbapi/NdbScanReceiver.hpp
deleted file mode 100644
index 72f9e48f02c..00000000000
--- a/ndb/src/ndbapi/NdbScanReceiver.hpp
+++ /dev/null
@@ -1,210 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef NdbScanReceiver_H
-#define NdbScanReceiver_H
-
-#include "Ndb.hpp"
-#include "NdbConnection.hpp"
-#include "NdbOperation.hpp"
-#include "NdbApiSignal.hpp"
-#include "NdbReceiver.hpp"
-#include <NdbOut.hpp>
-
-
-class NdbScanReceiver
-{
- enum ReceiverStatus { Init,
- Waiting,
- Completed,
- Executed,
- Released };
-
- friend class Ndb;
- friend class NdbOperation;
-public:
- NdbScanReceiver(Ndb *aNdb) :
- theReceiver(aNdb),
- theNdbOp(NULL),
- theFirstTRANSID_AI_Recv(NULL),
- theLastTRANSID_AI_Recv(NULL),
- theFirstKEYINFO20_Recv(NULL),
- theLastKEYINFO20_Recv(NULL),
- theTotalRecAI_Len(0),
- theTotalKI_Len(0xFFFFFFFF),
- theTotalRecKI_Len(0),
- theStatus(Init),
- theNextScanRec(NULL)
- {
- theReceiver.init(NdbReceiver::NDB_SCANRECEIVER, this);
- }
-
- int checkMagicNumber();
- int receiveTRANSID_AI_SCAN(NdbApiSignal*);
- int receiveKEYINFO20(NdbApiSignal*);
- int executeSavedSignals();
- void prepareNextScanResult();
-
- NdbScanReceiver* next();
- void next(NdbScanReceiver*);
-
- bool isCompleted(Uint32 aiLenToReceive);
- void setCompleted();
-
- void init(NdbOperation* aNdbOp, bool lockMode);
-
- Uint32 ptr2int() { return theReceiver.getId(); };
-private:
- NdbScanReceiver();
- void release();
-
- NdbReceiver theReceiver;
-
- NdbOperation* theNdbOp;
- NdbApiSignal* theFirstTRANSID_AI_Recv;
- NdbApiSignal* theLastTRANSID_AI_Recv;
- NdbApiSignal* theFirstKEYINFO20_Recv;
- NdbApiSignal* theLastKEYINFO20_Recv;
-
- Uint32 theTotalRecAI_Len;
- Uint32 theTotalKI_Len;
- Uint32 theTotalRecKI_Len;
- ReceiverStatus theStatus;
- Uint32 theMagicNumber;
- NdbScanReceiver* theNextScanRec;
- bool theLockMode;
-
-};
-
-inline
-void
-NdbScanReceiver::init(NdbOperation* aNdbOp, bool lockMode){
- assert(theStatus == Init || theStatus == Released);
- theNdbOp = aNdbOp;
- theMagicNumber = 0xA0B1C2D3;
- theTotalRecAI_Len = 0;
-
- /* If we are locking the records for take over
- * KI_len to receive is at least 1, since we don't know yet
- * how much KI we are expecting(this is written in the first KI signal)
- * set theTotalKI_Len to FFFFFFFF, this will make the ScanReciever wait for
- * at least the first KI, and when that is received we will know if
- * we are expecting another one
- */
- theLockMode = lockMode;
- if (theLockMode == true)
- theTotalKI_Len = 0xFFFFFFFF;
- else
- theTotalKI_Len = 0;
- theTotalRecKI_Len = 0;
-
- assert(theNextScanRec == NULL);
- theNextScanRec = NULL;
- assert(theFirstTRANSID_AI_Recv == NULL);
- theFirstTRANSID_AI_Recv = NULL;
- assert(theLastTRANSID_AI_Recv == NULL);
- theLastTRANSID_AI_Recv = NULL;
- assert(theFirstKEYINFO20_Recv == NULL);
- theFirstKEYINFO20_Recv = NULL;
- theLastKEYINFO20_Recv = NULL;
-
- theStatus = Waiting;
-};
-
-
-inline
-void
-NdbScanReceiver::release(){
- theStatus = Released;
- // theNdbOp->theNdb->releaseSignalsInList(&theFirstTRANSID_AI_Recv);
- while(theFirstTRANSID_AI_Recv != NULL){
- NdbApiSignal* tmp = theFirstTRANSID_AI_Recv;
- theFirstTRANSID_AI_Recv = tmp->next();
- delete tmp;
- }
- theFirstTRANSID_AI_Recv = NULL;
- theLastTRANSID_AI_Recv = NULL;
- // theNdbOp->theNdb->releaseSignalsInList(&theFirstKEYINFO20_Recv);
- while(theFirstKEYINFO20_Recv != NULL){
- NdbApiSignal* tmp = theFirstKEYINFO20_Recv;
- theFirstKEYINFO20_Recv = tmp->next();
- delete tmp;
- }
- theFirstKEYINFO20_Recv = NULL;
- theLastKEYINFO20_Recv = NULL;
- theNdbOp = NULL;
- theTotalRecAI_Len = 0;
- theTotalRecKI_Len = 0;
- theTotalKI_Len = 0xFFFFFFFF;
-};
-
-inline
-int
-NdbScanReceiver::checkMagicNumber()
-{
- if (theMagicNumber != 0xA0B1C2D3)
- return -1;
- return 0;
-}
-
-inline
-NdbScanReceiver*
-NdbScanReceiver::next(){
- return theNextScanRec;
-}
-
-inline
-void
-NdbScanReceiver::next(NdbScanReceiver* aScanRec){
- theNextScanRec = aScanRec;
-}
-
-inline
-bool
-NdbScanReceiver::isCompleted(Uint32 aiLenToReceive){
- assert(theStatus == Waiting || theStatus == Completed);
-#if 0
- ndbout << "NdbScanReceiver::isCompleted"<<endl
- << " theStatus = " << theStatus << endl
- << " theTotalRecAI_Len = " << theTotalRecAI_Len << endl
- << " aiLenToReceive = " << aiLenToReceive << endl
- << " theTotalRecKI_Len = "<< theTotalRecKI_Len << endl
- << " theTotalKI_Len = "<< theTotalKI_Len << endl;
-#endif
- // Have we already receive everything
- if(theStatus == Completed)
- return true;
-
- // Check that we have received AI
- if(theTotalRecAI_Len < aiLenToReceive)
- return false;
-
- // Check that we have recieved KI
- if (theTotalRecKI_Len < theTotalKI_Len)
- return false;
-
- // We should not have recieved more AI
- assert(theTotalRecAI_Len <= aiLenToReceive);
- return true;
-}
-
-inline
-void
-NdbScanReceiver::setCompleted(){
- theStatus = Completed;
-}
-
-#endif
diff --git a/ndb/src/ndbapi/Ndbif.cpp b/ndb/src/ndbapi/Ndbif.cpp
index 7ad37401b9a..c011c1a6a26 100644
--- a/ndb/src/ndbapi/Ndbif.cpp
+++ b/ndb/src/ndbapi/Ndbif.cpp
@@ -15,6 +15,8 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#include <ndb_global.h>
+
#include "NdbApiSignal.hpp"
#include "NdbImpl.hpp"
#include "NdbOperation.hpp"
@@ -53,6 +55,8 @@
int
Ndb::init(int aMaxNoOfTransactions)
{
+ DBUG_ENTER("Ndb::init");
+
int i;
int aNrOfCon;
int aNrOfOp;
@@ -67,7 +71,7 @@ Ndb::init(int aMaxNoOfTransactions)
theError.code = 4104;
break;
}
- return -1;
+ DBUG_RETURN(-1);
}//if
theInitState = StartingInit;
TransporterFacade * theFacade = TransporterFacade::instance();
@@ -75,37 +79,17 @@ Ndb::init(int aMaxNoOfTransactions)
const int tBlockNo = theFacade->open(this,
executeMessage,
- statusMessage);
-
-
+ statusMessage);
if ( tBlockNo == -1 ) {
theError.code = 4105;
theFacade->unlock_mutex();
- return -1; // no more free blocknumbers
+ DBUG_RETURN(-1); // no more free blocknumbers
}//if
theNdbBlockNumber = tBlockNo;
-
- theNode = theFacade->ownId();
- theMyRef = numberToRef(theNdbBlockNumber, theNode);
-
- for (i = 1; i < MAX_NDB_NODES; i++){
- if (theFacade->getIsDbNode(i)){
- theDBnodes[theNoOfDBnodes] = i;
- theNoOfDBnodes++;
- }
- }
- theFirstTransId = ((Uint64)theNdbBlockNumber << 52)+((Uint64)theNode << 40);
- theFirstTransId += theFacade->m_open_count;
theFacade->unlock_mutex();
-
- theDictionary = new NdbDictionaryImpl(*this);
- if (theDictionary == NULL) {
- theError.code = 4000;
- return -1;
- }
theDictionary->setTransporter(this, theFacade);
aNrOfCon = theNoOfDBnodes;
@@ -144,9 +128,6 @@ Ndb::init(int aMaxNoOfTransactions)
theSentTransactionsArray[i] = NULL;
theCompletedTransactionsArray[i] = NULL;
}//for
-
- startTransactionNodeSelectionData.init(theNoOfDBnodes, theDBnodes);
-
for (i = 0; i < 16; i++){
tSignal[i] = getSignal();
if(tSignal[i] == NULL) {
@@ -156,11 +137,8 @@ Ndb::init(int aMaxNoOfTransactions)
}
for (i = 0; i < 16; i++)
releaseSignal(tSignal[i]);
-
theInitState = Initialised;
-
- theCommitAckSignal = new NdbApiSignal(theMyRef);
- return 0;
+ DBUG_RETURN(0);
error_handler:
ndbout << "error_handler" << endl;
@@ -175,13 +153,14 @@ error_handler:
freeOperation();
delete theDictionary;
- TransporterFacade::instance()->close(theNdbBlockNumber);
- return -1;
+ TransporterFacade::instance()->close(theNdbBlockNumber, 0);
+ DBUG_RETURN(-1);
}
void
Ndb::releaseTransactionArrays()
{
+ DBUG_ENTER("Ndb::releaseTransactionArrays");
if (thePreparedTransactionsArray != NULL) {
delete [] thePreparedTransactionsArray;
}//if
@@ -191,6 +170,7 @@ Ndb::releaseTransactionArrays()
if (theCompletedTransactionsArray != NULL) {
delete [] theCompletedTransactionsArray;
}//if
+ DBUG_VOID_RETURN;
}//Ndb::releaseTransactionArrays()
void
@@ -202,13 +182,49 @@ Ndb::executeMessage(void* NdbObject,
tNdb->handleReceivedSignal(aSignal, ptr);
}
+void Ndb::connected(Uint32 ref)
+{
+ theMyRef= ref;
+ Uint32 tmpTheNode= refToNode(ref);
+ Uint64 tBlockNo= refToBlock(ref);
+ if (theNdbBlockNumber >= 0){
+ assert(theMyRef == numberToRef(theNdbBlockNumber, tmpTheNode));
+ }
+
+ TransporterFacade * theFacade = TransporterFacade::instance();
+ int i;
+ theNoOfDBnodes= 0;
+ for (i = 1; i < MAX_NDB_NODES; i++){
+ if (theFacade->getIsDbNode(i)){
+ theDBnodes[theNoOfDBnodes] = i;
+ theNoOfDBnodes++;
+ }
+ }
+ theFirstTransId = ((Uint64)tBlockNo << 52)+
+ ((Uint64)tmpTheNode << 40);
+ theFirstTransId += theFacade->m_max_trans_id;
+ // assert(0);
+ DBUG_PRINT("info",("connected with ref=%x, id=%d, no_db_nodes=%d, first_trans_id=%lx",
+ theMyRef,
+ tmpTheNode,
+ theNoOfDBnodes,
+ theFirstTransId));
+ startTransactionNodeSelectionData.init(theNoOfDBnodes, theDBnodes);
+ theCommitAckSignal = new NdbApiSignal(theMyRef);
+
+ theDictionary->m_receiver.m_reference= theMyRef;
+ theNode= tmpTheNode; // flag that Ndb object is initialized
+}
+
void
-Ndb::statusMessage(void* NdbObject, NodeId a_node, bool alive, bool nfComplete)
+Ndb::statusMessage(void* NdbObject, Uint32 a_node, bool alive, bool nfComplete)
{
+ DBUG_ENTER("Ndb::statusMessage");
Ndb* tNdb = (Ndb*)NdbObject;
if (alive) {
if (nfComplete) {
- assert(0);
+ tNdb->connected(a_node);
+ DBUG_VOID_RETURN;
}//if
} else {
if (nfComplete) {
@@ -219,6 +235,7 @@ Ndb::statusMessage(void* NdbObject, NodeId a_node, bool alive, bool nfComplete)
}//if
NdbDictInterface::execNodeStatus(&tNdb->theDictionary->m_receiver,
a_node, alive, nfComplete);
+ DBUG_VOID_RETURN;
}
void
@@ -232,6 +249,7 @@ Ndb::report_node_failure(Uint32 node_id)
*/
the_release_ind[node_id] = 1;
theWaiter.nodeFail(node_id);
+ return;
}//Ndb::report_node_failure()
@@ -254,9 +272,10 @@ Ndb::abortTransactionsAfterNodeFailure(Uint16 aNodeId)
Uint32 tNoSentTransactions = theNoOfSentTransactions;
for (int i = tNoSentTransactions - 1; i >= 0; i--) {
NdbConnection* localCon = theSentTransactionsArray[i];
- if (localCon->getConnectedNodeId() == aNodeId ) {
+ if (localCon->getConnectedNodeId() == aNodeId) {
const NdbConnection::SendStatusType sendStatus = localCon->theSendStatus;
- if (sendStatus == NdbConnection::sendTC_OP || sendStatus == NdbConnection::sendTC_COMMIT) {
+ if (sendStatus == NdbConnection::sendTC_OP ||
+ sendStatus == NdbConnection::sendTC_COMMIT) {
/*
A transaction was interrupted in the prepare phase by a node
failure. Since the transaction was not found in the phase
@@ -276,7 +295,7 @@ Ndb::abortTransactionsAfterNodeFailure(Uint16 aNodeId)
printState("abortTransactionsAfterNodeFailure %x", this);
abort();
#endif
- }//
+ }
/*
All transactions arriving here have no connection to the kernel
intact since the node was failing and they were aborted. Thus we
@@ -285,7 +304,11 @@ Ndb::abortTransactionsAfterNodeFailure(Uint16 aNodeId)
localCon->theCommitStatus = NdbConnection::Aborted;
localCon->theReleaseOnClose = true;
completedTransaction(localCon);
- }//if
+ }
+ else if(localCon->report_node_failure(aNodeId))
+ {
+ completedTransaction(localCon);
+ }
}//for
return;
}//Ndb::abortTransactionsAfterNodeFailure()
@@ -375,7 +398,8 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
break;
case NdbReceiver::NDB_SCANRECEIVER:
tCon->theScanningOp->receiver_delivered(tRec);
- theWaiter.m_state = (tWaitState == WAIT_SCAN ? NO_WAIT : tWaitState);
+ theWaiter.m_state = (((WaitSignalType) tWaitState) == WAIT_SCAN ?
+ (Uint32) NO_WAIT : tWaitState);
break;
default:
goto InvalidSignal;
@@ -747,7 +771,8 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
switch(com){
case 1:
tCon->theScanningOp->receiver_delivered(tRec);
- theWaiter.m_state = (tWaitState == WAIT_SCAN ? NO_WAIT : tWaitState);
+ theWaiter.m_state = (((WaitSignalType) tWaitState) == WAIT_SCAN ?
+ (Uint32) NO_WAIT : tWaitState);
break;
case 0:
break;
@@ -871,8 +896,8 @@ Ndb::completedTransaction(NdbConnection* aCon)
return;
}//if
} else {
- ndbout << "theNoOfSentTransactions = " << theNoOfSentTransactions;
- ndbout << " theListState = " << aCon->theListState;
+ ndbout << "theNoOfSentTransactions = " << (int) theNoOfSentTransactions;
+ ndbout << " theListState = " << (int) aCon->theListState;
ndbout << " theTransArrayIndex = " << aCon->theTransArrayIndex;
ndbout << endl << flush;
#ifdef VM_TRACE
@@ -923,7 +948,7 @@ Ndb::pollCompleted(NdbConnection** aCopyArray)
aCopyArray[i] = theCompletedTransactionsArray[i];
if (aCopyArray[i]->theListState != NdbConnection::InCompletedList) {
ndbout << "pollCompleted error ";
- ndbout << aCopyArray[i]->theListState << endl;
+ ndbout << (int) aCopyArray[i]->theListState << endl;
abort();
}//if
theCompletedTransactionsArray[i] = NULL;
diff --git a/ndb/src/ndbapi/Ndbinit.cpp b/ndb/src/ndbapi/Ndbinit.cpp
index be168ddffbe..698bbcde4c6 100644
--- a/ndb/src/ndbapi/Ndbinit.cpp
+++ b/ndb/src/ndbapi/Ndbinit.cpp
@@ -19,8 +19,6 @@
#include "NdbApiSignal.hpp"
#include "NdbImpl.hpp"
-//#include "NdbSchemaOp.hpp"
-//#include "NdbSchemaCon.hpp"
#include "NdbOperation.hpp"
#include "NdbConnection.hpp"
#include "NdbRecAttr.hpp"
@@ -39,15 +37,11 @@ void NdbGlobalEventBuffer_drop(NdbGlobalEventBufferHandle *);
/**
* Static object for NDB
*/
-static int theNoOfNdbObjects = 0;
+// only needed for backwards compatability, before ndb_cluster_connection
static char *ndbConnectString = 0;
-
-#if defined NDB_WIN32 || defined SCO
-static NdbMutex & createNdbMutex = * NdbMutex_Create();
-#else
-static NdbMutex createNdbMutex = NDB_MUTEX_INITIALIZER;
-#endif
+static int theNoOfNdbObjects = 0;
+static Ndb_cluster_connection *global_ndb_cluster_connection= 0;
/***************************************************************************
@@ -56,51 +50,88 @@ Ndb(const char* aDataBase);
Parameters: aDataBase : Name of the database.
Remark: Connect to the database.
***************************************************************************/
-Ndb::Ndb( const char* aDataBase , const char* aSchema) :
- theNdbObjectIdMap(0),
- thePreparedTransactionsArray(NULL),
- theSentTransactionsArray(NULL),
- theCompletedTransactionsArray(NULL),
- theNoOfPreparedTransactions(0),
- theNoOfSentTransactions(0),
- theNoOfCompletedTransactions(0),
- theNoOfAllocatedTransactions(0),
- theMaxNoOfTransactions(0),
- theMinNoOfEventsToWakeUp(0),
- prefixEnd(NULL),
- theImpl(NULL),
- theDictionary(NULL),
- theConIdleList(NULL),
- theOpIdleList(NULL),
- theScanOpIdleList(NULL),
- theIndexOpIdleList(NULL),
-// theSchemaConIdleList(NULL),
-// theSchemaConToNdbList(NULL),
- theTransactionList(NULL),
- theConnectionArray(NULL),
- theRecAttrIdleList(NULL),
- theSignalIdleList(NULL),
- theLabelList(NULL),
- theBranchList(NULL),
- theSubroutineList(NULL),
- theCallList(NULL),
- theScanList(NULL),
- theNdbBlobIdleList(NULL),
- theNoOfDBnodes(0),
- theDBnodes(NULL),
- the_release_ind(NULL),
- the_last_check_time(0),
- theFirstTransId(0),
- theRestartGCI(0),
- theNdbBlockNumber(-1),
- theInitState(NotConstructed)
+Ndb::Ndb( const char* aDataBase , const char* aSchema) {
+ DBUG_ENTER("Ndb::Ndb()");
+ DBUG_PRINT("enter",("(old)Ndb::Ndb this=0x%x", this));
+ if (theNoOfNdbObjects < 0)
+ abort(); // old and new Ndb constructor used mixed
+ theNoOfNdbObjects++;
+ if (global_ndb_cluster_connection == 0) {
+ global_ndb_cluster_connection= new Ndb_cluster_connection(ndbConnectString);
+ global_ndb_cluster_connection->connect();
+ }
+ setup(global_ndb_cluster_connection, aDataBase, aSchema);
+ DBUG_VOID_RETURN;
+}
+
+Ndb::Ndb( Ndb_cluster_connection *ndb_cluster_connection,
+ const char* aDataBase , const char* aSchema)
{
+ DBUG_ENTER("Ndb::Ndb()");
+ DBUG_PRINT("enter",("Ndb::Ndb this=0x%x", this));
+ if (global_ndb_cluster_connection != 0 &&
+ global_ndb_cluster_connection != ndb_cluster_connection)
+ abort(); // old and new Ndb constructor used mixed
+ theNoOfNdbObjects= -1;
+ setup(ndb_cluster_connection, aDataBase, aSchema);
+ DBUG_VOID_RETURN;
+}
+
+void Ndb::setup(Ndb_cluster_connection *ndb_cluster_connection,
+ const char* aDataBase , const char* aSchema)
+{
+ DBUG_ENTER("Ndb::setup");
+
+ theNdbObjectIdMap= 0;
+ m_ndb_cluster_connection= ndb_cluster_connection;
+ thePreparedTransactionsArray= NULL;
+ theSentTransactionsArray= NULL;
+ theCompletedTransactionsArray= NULL;
+ theNoOfPreparedTransactions= 0;
+ theNoOfSentTransactions= 0;
+ theNoOfCompletedTransactions= 0;
+ theNoOfAllocatedTransactions= 0;
+ theMaxNoOfTransactions= 0;
+ theMinNoOfEventsToWakeUp= 0;
+ prefixEnd= NULL;
+ theImpl= NULL;
+ theDictionary= NULL;
+ theConIdleList= NULL;
+ theOpIdleList= NULL;
+ theScanOpIdleList= NULL;
+ theIndexOpIdleList= NULL;
+ theTransactionList= NULL;
+ theConnectionArray= NULL;
+ theRecAttrIdleList= NULL;
+ theSignalIdleList= NULL;
+ theLabelList= NULL;
+ theBranchList= NULL;
+ theSubroutineList= NULL;
+ theCallList= NULL;
+ theScanList= NULL;
+ theNdbBlobIdleList= NULL;
+ theNoOfDBnodes= 0;
+ theDBnodes= NULL;
+ the_release_ind= NULL;
+ the_last_check_time= 0;
+ theFirstTransId= 0;
+ theRestartGCI= 0;
+ theNdbBlockNumber= -1;
+ theInitState= NotConstructed;
+
+ theNode= 0;
+ theFirstTransId= 0;
+ theMyRef= 0;
+
fullyQualifiedNames = true;
+#ifdef POORMANSPURIFY
cgetSignals =0;
cfreeSignals = 0;
cnewSignals = 0;
creleaseSignals = 0;
+#endif
+
theError.code = 0;
theNdbObjectIdMap = new NdbObjectIdMap(1024,1024);
@@ -122,34 +153,19 @@ Ndb::Ndb( const char* aDataBase , const char* aSchema) :
theLastTupleId[i] = 0;
}//for
- snprintf(theDataBase, sizeof(theDataBase), "%s",
+ BaseString::snprintf(theDataBase, sizeof(theDataBase), "%s",
aDataBase ? aDataBase : "");
- snprintf(theDataBaseSchema, sizeof(theDataBaseSchema), "%s",
+ BaseString::snprintf(theDataBaseSchema, sizeof(theDataBaseSchema), "%s",
aSchema ? aSchema : "");
- int len = snprintf(prefixName, sizeof(prefixName), "%s%c%s%c",
+ int len = BaseString::snprintf(prefixName, sizeof(prefixName), "%s%c%s%c",
theDataBase, table_name_separator,
theDataBaseSchema, table_name_separator);
- prefixEnd = prefixName + (len < sizeof(prefixName) ? len :
+ prefixEnd = prefixName + (len < (int) sizeof(prefixName) ? len :
sizeof(prefixName) - 1);
- NdbMutex_Lock(&createNdbMutex);
-
- TransporterFacade * m_facade = 0;
- if(theNoOfNdbObjects == 0){
- if ((m_facade = TransporterFacade::start_instance(ndbConnectString)) == 0)
- theInitState = InitConfigError;
- } else {
- m_facade = TransporterFacade::instance();
- }
-
- if(m_facade != 0){
- theWaiter.m_mutex = m_facade->theMutexPtr;
- }
-
- // For keeping track of how many Ndb objects that exists.
- theNoOfNdbObjects += 1;
-
+ theWaiter.m_mutex = TransporterFacade::instance()->theMutexPtr;
+
// Signal that the constructor has finished OK
if (theInitState == NotConstructed)
theInitState = NotInitialised;
@@ -166,7 +182,12 @@ Ndb::Ndb( const char* aDataBase , const char* aSchema) :
theGlobalEventBufferHandle = h;
}
- NdbMutex_Unlock(&createNdbMutex);
+ theDictionary = new NdbDictionaryImpl(*this);
+ if (theDictionary == NULL) {
+ ndbout_c("Ndb cailed to allocate dictionary");
+ exit(-1);
+ }
+ DBUG_VOID_RETURN;
}
@@ -187,6 +208,8 @@ void Ndb::setConnectString(const char * connectString)
*****************************************************************************/
Ndb::~Ndb()
{
+ DBUG_ENTER("Ndb::~Ndb()");
+ DBUG_PRINT("enter",("Ndb::~Ndb this=0x%x",this));
doDisconnect();
delete theDictionary;
@@ -195,26 +218,21 @@ Ndb::~Ndb()
NdbGlobalEventBuffer_drop(theGlobalEventBufferHandle);
if (TransporterFacade::instance() != NULL && theNdbBlockNumber > 0){
- TransporterFacade::instance()->close(theNdbBlockNumber);
+ TransporterFacade::instance()->close(theNdbBlockNumber, theFirstTransId);
}
-
- NdbMutex_Lock(&createNdbMutex);
-
- theNoOfNdbObjects -= 1;
- if(theNoOfNdbObjects == 0){
- TransporterFacade::stop_instance();
+
+ if (global_ndb_cluster_connection != 0) {
+ theNoOfNdbObjects--;
+ if(theNoOfNdbObjects == 0){
+ delete global_ndb_cluster_connection;
+ global_ndb_cluster_connection= 0;
+ }
}//if
- NdbMutex_Unlock(&createNdbMutex);
-
// if (theSchemaConToNdbList != NULL)
// closeSchemaTransaction(theSchemaConToNdbList);
while ( theConIdleList != NULL )
freeNdbCon();
- while ( theSignalIdleList != NULL )
- freeSignal();
- while (theRecAttrIdleList != NULL)
- freeRecAttr();
while (theOpIdleList != NULL)
freeOperation();
while (theScanOpIdleList != NULL)
@@ -233,6 +251,10 @@ Ndb::~Ndb()
freeNdbScanRec();
while (theNdbBlobIdleList != NULL)
freeNdbBlob();
+ while (theRecAttrIdleList != NULL)
+ freeRecAttr();
+ while ( theSignalIdleList != NULL )
+ freeSignal();
releaseTransactionArrays();
startTransactionNodeSelectionData.release();
@@ -271,6 +293,7 @@ Ndb::~Ndb()
assert(cnewSignals == cfreeSignals);
assert(cgetSignals == creleaseSignals);
#endif
+ DBUG_VOID_RETURN;
}
NdbWaiter::NdbWaiter(){
diff --git a/ndb/src/ndbapi/Ndblist.cpp b/ndb/src/ndbapi/Ndblist.cpp
index 1e1cb5e4b40..a5f2a4801d5 100644
--- a/ndb/src/ndbapi/Ndblist.cpp
+++ b/ndb/src/ndbapi/Ndblist.cpp
@@ -29,24 +29,31 @@
void
Ndb::checkFailedNode()
{
- for (NodeId i = 0; i < theNoOfDBnodes; i ++){
+ DBUG_ENTER("Ndb::checkFailedNode");
+ DBUG_PRINT("enter", ("theNoOfDBnodes: %d", theNoOfDBnodes));
+
+ DBUG_ASSERT(theNoOfDBnodes < MAX_NDB_NODES);
+ for (Uint32 i = 0; i < theNoOfDBnodes; i++){
const NodeId node_id = theDBnodes[i];
+ DBUG_PRINT("info", ("i: %d, node_id: %d", i, node_id));
- NdbConnection * tNdbCon = theConnectionArray[node_id];
+ DBUG_ASSERT(node_id < MAX_NDB_NODES);
if (the_release_ind[node_id] == 1){
/**
* Release all connections in idle list (for node)
*/
+ NdbConnection * tNdbCon = theConnectionArray[node_id];
theConnectionArray[node_id] = NULL;
while (tNdbCon != NULL) {
NdbConnection* tempNdbCon = tNdbCon;
tNdbCon = tNdbCon->next();
releaseNdbCon(tempNdbCon);
- }//while
+ }
the_release_ind[node_id] = 0;
- }//if
- }//for
+ }
+ }
+ DBUG_VOID_RETURN;
}
#if 0
@@ -425,11 +432,15 @@ Ndb::getSignal()
theSignalIdleList = tSignalNext;
} else {
tSignal = new NdbApiSignal(theMyRef);
+#ifdef POORMANSPURIFY
cnewSignals++;
+#endif
if (tSignal != NULL)
tSignal->next(NULL);
}
+#ifdef POORMANSPURIFY
cgetSignals++;
+#endif
return tSignal;
}
@@ -598,7 +609,9 @@ Ndb::releaseSignal(NdbApiSignal* aSignal)
}
#endif
#endif
+#ifdef POORMANSPURIFY
creleaseSignals++;
+#endif
aSignal->next(theSignalIdleList);
theSignalIdleList = aSignal;
}
@@ -642,8 +655,8 @@ Remark: Always release the first item in the free list
void
Ndb::freeScanOperation()
{
- NdbScanOperation* tOp = theScanOpIdleList;
- theScanOpIdleList = (NdbIndexScanOperation *) theScanOpIdleList->next();
+ NdbIndexScanOperation* tOp = theScanOpIdleList;
+ theScanOpIdleList = (NdbIndexScanOperation *)tOp->next();
delete tOp;
}
@@ -762,7 +775,9 @@ Ndb::freeSignal()
NdbApiSignal* tSignal = theSignalIdleList;
theSignalIdleList = tSignal->next();
delete tSignal;
+#ifdef POORMANSPURIFY
cfreeSignals++;
+#endif
}
void
@@ -783,6 +798,7 @@ Remark: Release and disconnect from DBTC a connection and seize it to th
void
Ndb::releaseConnectToNdb(NdbConnection* a_con)
{
+ DBUG_ENTER("Ndb::releaseConnectToNdb");
NdbApiSignal tSignal(theMyRef);
int tConPtr;
@@ -790,7 +806,7 @@ Ndb::releaseConnectToNdb(NdbConnection* a_con)
// manage to reach NDB or not.
if (a_con == NULL)
- return;
+ DBUG_VOID_RETURN;
Uint32 node_id = a_con->getConnectedNodeId();
Uint32 conn_seq = a_con->theNodeSequence;
@@ -821,6 +837,6 @@ Ndb::releaseConnectToNdb(NdbConnection* a_con)
abort();
}//if
releaseNdbCon(a_con);
- return;
+ DBUG_VOID_RETURN;
}
diff --git a/ndb/src/ndbapi/ObjectMap.hpp b/ndb/src/ndbapi/ObjectMap.hpp
index f67774bb413..12bede3aa3f 100644
--- a/ndb/src/ndbapi/ObjectMap.hpp
+++ b/ndb/src/ndbapi/ObjectMap.hpp
@@ -29,7 +29,7 @@
class NdbObjectIdMap //: NdbLockable
{
public:
- static const Uint32 InvalidId = ~0;
+ STATIC_CONST( InvalidId = ~0 );
NdbObjectIdMap(Uint32 initalSize = 128, Uint32 expandSize = 10);
~NdbObjectIdMap();
@@ -134,7 +134,10 @@ NdbObjectIdMap::expand(Uint32 incSize){
Uint32 newSize = m_size + incSize;
MapEntry * tmp = (MapEntry*)malloc(newSize * sizeof(MapEntry));
- memcpy(tmp, m_map, m_size * sizeof(MapEntry));
+ if (m_map) {
+ memcpy(tmp, m_map, m_size * sizeof(MapEntry));
+ free((void*)m_map);
+ }
m_map = tmp;
for(Uint32 i = m_size; i<newSize; i++){
diff --git a/ndb/src/ndbapi/TransporterFacade.cpp b/ndb/src/ndbapi/TransporterFacade.cpp
index 7ec9a6a55a3..bc24110ea14 100644
--- a/ndb/src/ndbapi/TransporterFacade.cpp
+++ b/ndb/src/ndbapi/TransporterFacade.cpp
@@ -15,6 +15,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <ndb_global.h>
+#include <my_pthread.h>
#include <ndb_limits.h>
#include "TransporterFacade.hpp"
#include "ClusterMgr.hpp"
@@ -37,6 +38,16 @@
//#define REPORT_TRANSPORTER
//#define API_TRACE;
+static int numberToIndex(int number)
+{
+ return number - MIN_API_BLOCK_NO;
+}
+
+static int indexToNumber(int index)
+{
+ return index + MIN_API_BLOCK_NO;
+}
+
#if defined DEBUG_TRANSPORTER
#define TRP_DEBUG(t) ndbout << __FILE__ << ":" << __LINE__ << ":" << t << endl;
#else
@@ -44,8 +55,6 @@
#endif
TransporterFacade* TransporterFacade::theFacadeInstance = NULL;
-ConfigRetriever *TransporterFacade::s_config_retriever= 0;
-
/*****************************************************************************
* Call back functions
@@ -321,12 +330,6 @@ copy(Uint32 * & insertPtr,
abort();
}
-extern "C"
-void
-atexit_stop_instance(){
- TransporterFacade::stop_instance();
-}
-
/**
* Note that this function need no locking since its
* only called from the constructor of Ndb (the NdbObject)
@@ -334,65 +337,15 @@ atexit_stop_instance(){
* Which is protected by a mutex
*/
-
-TransporterFacade*
-TransporterFacade::start_instance(const char * connectString){
-
- // TransporterFacade used from API get config from mgmt srvr
- s_config_retriever= new ConfigRetriever(NDB_VERSION, NODE_TYPE_API);
-
- s_config_retriever->setConnectString(connectString);
- const char* error = 0;
- do {
- if(s_config_retriever->init() == -1)
- break;
-
- if(s_config_retriever->do_connect() == -1)
- break;
-
- Uint32 nodeId = s_config_retriever->allocNodeId();
- for(Uint32 i = 0; nodeId == 0 && i<5; i++){
- NdbSleep_SecSleep(3);
- nodeId = s_config_retriever->allocNodeId();
- }
- if(nodeId == 0)
- break;
-
- ndb_mgm_configuration * props = s_config_retriever->getConfig();
- if(props == 0)
- break;
-
- TransporterFacade * tf = start_instance(nodeId, props);
-
- free(props);
- return tf;
- } while(0);
-
- ndbout << "Configuration error: ";
- const char* erString = s_config_retriever->getErrorString();
- if (erString == 0) {
- erString = "No error specified!";
- }
- ndbout << erString << endl;
- return 0;
-}
-
-TransporterFacade*
+int
TransporterFacade::start_instance(int nodeId,
const ndb_mgm_configuration* props)
{
- TransporterFacade* tf = new TransporterFacade();
- if (! tf->init(nodeId, props)) {
- delete tf;
- return NULL;
+ if (! theFacadeInstance->init(nodeId, props)) {
+ return -1;
}
/**
- * Install atexit handler
- */
- atexit(atexit_stop_instance);
-
- /**
* Install signal handler for SIGPIPE
*
* This due to the fact that a socket connection might have
@@ -402,19 +355,7 @@ TransporterFacade::start_instance(int nodeId,
signal(SIGPIPE, SIG_IGN);
#endif
- if(theFacadeInstance == NULL){
- theFacadeInstance = tf;
- }
-
- return tf;
-}
-
-void
-TransporterFacade::close_configuration(){
- if (s_config_retriever) {
- delete s_config_retriever;
- s_config_retriever= 0;
- }
+ return 0;
}
/**
@@ -425,23 +366,15 @@ TransporterFacade::close_configuration(){
*/
void
TransporterFacade::stop_instance(){
-
- close_configuration();
-
- if(theFacadeInstance == NULL){
- /**
- * We are called from atexit function
- */
- return;
- }
-
- theFacadeInstance->doStop();
-
- delete theFacadeInstance; theFacadeInstance = NULL;
+ DBUG_ENTER("TransporterFacade::stop_instance");
+ if(theFacadeInstance)
+ theFacadeInstance->doStop();
+ DBUG_VOID_RETURN;
}
void
TransporterFacade::doStop(){
+ DBUG_ENTER("TransporterFacade::doStop");
/**
* First stop the ClusterMgr because it needs to send one more signal
* and also uses theFacadeInstance to lock/unlock theMutexPtr
@@ -454,37 +387,39 @@ TransporterFacade::doStop(){
*/
void *status;
theStopReceive = 1;
- NdbThread_WaitFor(theReceiveThread, &status);
- NdbThread_WaitFor(theSendThread, &status);
- NdbThread_Destroy(&theReceiveThread);
- NdbThread_Destroy(&theSendThread);
+ if (theReceiveThread) {
+ NdbThread_WaitFor(theReceiveThread, &status);
+ NdbThread_Destroy(&theReceiveThread);
+ theReceiveThread= 0;
+ }
+ if (theSendThread) {
+ NdbThread_WaitFor(theSendThread, &status);
+ NdbThread_Destroy(&theSendThread);
+ theSendThread= 0;
+ }
+ DBUG_VOID_RETURN;
}
extern "C"
void*
runSendRequest_C(void * me)
{
+ my_thread_init();
((TransporterFacade*) me)->threadMainSend();
+ my_thread_end();
NdbThread_Exit(0);
return me;
}
void TransporterFacade::threadMainSend(void)
{
- SocketServer socket_server;
-
theTransporterRegistry->startSending();
- if (!theTransporterRegistry->start_service(socket_server)){
- ndbout_c("Unable to start theTransporterRegistry->start_service");
- exit(0);
- }
-
if (!theTransporterRegistry->start_clients()){
ndbout_c("Unable to start theTransporterRegistry->start_clients");
exit(0);
}
- socket_server.startServer();
+ m_socket_server.startServer();
while(!theStopReceive) {
NdbSleep_MilliSleep(10);
@@ -497,8 +432,8 @@ void TransporterFacade::threadMainSend(void)
}
theTransporterRegistry->stopSending();
- socket_server.stopServer();
- socket_server.stopSessions();
+ m_socket_server.stopServer();
+ m_socket_server.stopSessions();
theTransporterRegistry->stop_clients();
}
@@ -507,7 +442,9 @@ extern "C"
void*
runReceiveResponse_C(void * me)
{
+ my_thread_init();
((TransporterFacade*) me)->threadMainReceive();
+ my_thread_end();
NdbThread_Exit(0);
return me;
}
@@ -540,6 +477,8 @@ TransporterFacade::TransporterFacade() :
theSendThread(NULL),
theReceiveThread(NULL)
{
+ theOwnId = 0;
+
theMutexPtr = NdbMutex_Create();
sendPerformedLastInterval = 0;
@@ -548,7 +487,12 @@ TransporterFacade::TransporterFacade() :
theClusterMgr = NULL;
theArbitMgr = NULL;
theStartNodeId = 1;
- m_open_count = 0;
+ m_scan_batch_size= MAX_SCAN_BATCH_SIZE;
+ m_batch_byte_size= SCAN_BATCH_SIZE;
+ m_batch_size= DEF_BATCH_SIZE;
+ m_max_trans_id = 0;
+
+ theClusterMgr = new ClusterMgr(* this);
}
bool
@@ -567,7 +511,6 @@ TransporterFacade::init(Uint32 nodeId, const ndb_mgm_configuration* props)
ndb_mgm_configuration_iterator iter(* props, CFG_SECTION_NODE);
iter.first();
- theClusterMgr = new ClusterMgr(* this);
theClusterMgr->init(iter);
/**
@@ -592,11 +535,27 @@ TransporterFacade::init(Uint32 nodeId, const ndb_mgm_configuration* props)
iter.get(CFG_NODE_ARBIT_DELAY, &delay);
theArbitMgr->setDelay(delay);
}
-
+ Uint32 scan_batch_size= 0;
+ if (!iter.get(CFG_MAX_SCAN_BATCH_SIZE, &scan_batch_size)) {
+ m_scan_batch_size= scan_batch_size;
+ }
+ Uint32 batch_byte_size= 0;
+ if (!iter.get(CFG_BATCH_BYTE_SIZE, &batch_byte_size)) {
+ m_batch_byte_size= batch_byte_size;
+ }
+ Uint32 batch_size= 0;
+ if (!iter.get(CFG_BATCH_SIZE, &batch_size)) {
+ m_batch_size= batch_size;
+ }
#if 0
}
#endif
+ if (!theTransporterRegistry->start_service(m_socket_server)){
+ ndbout_c("Unable to start theTransporterRegistry->start_service");
+ return false;
+ }
+
theReceiveThread = NdbThread_Create(runReceiveResponse_C,
(void**)this,
32768,
@@ -608,7 +567,6 @@ TransporterFacade::init(Uint32 nodeId, const ndb_mgm_configuration* props)
32768,
"ndb_send",
NDB_THREAD_PRIO_LOW);
-
theClusterMgr->startThread();
#ifdef API_TRACE
@@ -620,6 +578,21 @@ TransporterFacade::init(Uint32 nodeId, const ndb_mgm_configuration* props)
void
+TransporterFacade::connected()
+{
+ DBUG_ENTER("TransporterFacade::connected");
+ Uint32 sz = m_threads.m_statusNext.size();
+ for (Uint32 i = 0; i < sz ; i ++) {
+ if (m_threads.getInUse(i)){
+ void * obj = m_threads.m_objectExecute[i].m_object;
+ NodeStatusFunction RegPC = m_threads.m_statusFunction[i];
+ (*RegPC) (obj, numberToRef(indexToNumber(i), theOwnId), true, true);
+ }
+ }
+ DBUG_VOID_RETURN;
+}
+
+void
TransporterFacade::ReportNodeDead(NodeId tNodeId)
{
/**
@@ -684,9 +657,11 @@ TransporterFacade::ReportNodeAlive(NodeId tNodeId)
}
int
-TransporterFacade::close(BlockNumber blockNumber)
+TransporterFacade::close(BlockNumber blockNumber, Uint64 trans_id)
{
NdbMutex_Lock(theMutexPtr);
+ Uint32 low_bits = (Uint32)trans_id;
+ m_max_trans_id = m_max_trans_id > low_bits ? m_max_trans_id : low_bits;
close_local(blockNumber);
NdbMutex_Unlock(theMutexPtr);
return 0;
@@ -703,8 +678,16 @@ TransporterFacade::open(void* objRef,
ExecuteFunction fun,
NodeStatusFunction statusFun)
{
- m_open_count++;
- return m_threads.open(objRef, fun, statusFun);
+ DBUG_ENTER("TransporterFacade::open");
+ int r= m_threads.open(objRef, fun, statusFun);
+ if (r < 0)
+ DBUG_RETURN(r);
+#if 1
+ if (theOwnId > 0) {
+ (*statusFun)(objRef, numberToRef(r, theOwnId), true, true);
+ }
+#endif
+ DBUG_RETURN(r);
}
TransporterFacade::~TransporterFacade(){
@@ -747,7 +730,7 @@ TransporterFacade::calculateSendLimit()
//-------------------------------------------------
void TransporterFacade::forceSend(Uint32 block_number) {
checkCounter--;
- m_threads.m_statusNext[block_number - MIN_API_BLOCK_NO] = ThreadData::ACTIVE;
+ m_threads.m_statusNext[numberToIndex(block_number)] = ThreadData::ACTIVE;
sendPerformedLastInterval = 1;
if (checkCounter < 0) {
calculateSendLimit();
@@ -760,7 +743,7 @@ void TransporterFacade::forceSend(Uint32 block_number) {
//-------------------------------------------------
void
TransporterFacade::checkForceSend(Uint32 block_number) {
- m_threads.m_statusNext[block_number - MIN_API_BLOCK_NO] = ThreadData::ACTIVE;
+ m_threads.m_statusNext[numberToIndex(block_number)] = ThreadData::ACTIVE;
//-------------------------------------------------
// This code is an adaptive algorithm to discover when
// the API should actually send its buffers. The reason
@@ -959,6 +942,8 @@ TransporterFacade::isConnected(NodeId aNodeId){
NodeId
TransporterFacade::get_an_alive_node()
{
+ DBUG_ENTER("TransporterFacade::get_an_alive_node");
+ DBUG_PRINT("enter", ("theStartNodeId: %d", theStartNodeId));
#ifdef VM_TRACE
const char* p = NdbEnv_GetEnv("NDB_ALIVE_NODE_ID", (char*)0, 0);
if (p != 0 && *p != 0)
@@ -967,17 +952,19 @@ TransporterFacade::get_an_alive_node()
NodeId i;
for (i = theStartNodeId; i < MAX_NDB_NODES; i++) {
if (get_node_alive(i)){
+ DBUG_PRINT("info", ("Node %d is alive", i));
theStartNodeId = ((i + 1) % MAX_NDB_NODES);
- return i;
+ DBUG_RETURN(i);
}
}
for (i = 1; i < theStartNodeId; i++) {
if (get_node_alive(i)){
+ DBUG_PRINT("info", ("Node %d is alive", i));
theStartNodeId = ((i + 1) % MAX_NDB_NODES);
- return i;
+ DBUG_RETURN(i);
}
}
- return (NodeId)0;
+ DBUG_RETURN((NodeId)0);
}
TransporterFacade::ThreadData::ThreadData(Uint32 size){
@@ -1001,11 +988,12 @@ TransporterFacade::ThreadData::expand(Uint32 size){
m_firstFree = m_statusNext.size() - size;
}
+
int
TransporterFacade::ThreadData::open(void* objRef,
- ExecuteFunction fun,
- NodeStatusFunction fun2){
-
+ ExecuteFunction fun,
+ NodeStatusFunction fun2)
+{
Uint32 nextFree = m_firstFree;
if(m_statusNext.size() >= MAX_NO_THREADS && nextFree == END_OF_LIST){
@@ -1025,12 +1013,12 @@ TransporterFacade::ThreadData::open(void* objRef,
m_objectExecute[nextFree] = oe;
m_statusFunction[nextFree] = fun2;
- return nextFree + MIN_API_BLOCK_NO;
+ return indexToNumber(nextFree);
}
int
TransporterFacade::ThreadData::close(int number){
- number -= MIN_API_BLOCK_NO;
+ number= numberToIndex(number);
assert(getInUse(number));
m_statusNext[number] = m_firstFree;
m_firstFree = number;
diff --git a/ndb/src/ndbapi/TransporterFacade.hpp b/ndb/src/ndbapi/TransporterFacade.hpp
index 14da4b11aa1..5f473975585 100644
--- a/ndb/src/ndbapi/TransporterFacade.hpp
+++ b/ndb/src/ndbapi/TransporterFacade.hpp
@@ -35,7 +35,7 @@ class Ndb;
class NdbApiSignal;
typedef void (* ExecuteFunction)(void *, NdbApiSignal *, LinearSectionPtr ptr[3]);
-typedef void (* NodeStatusFunction)(void *, NodeId, bool nodeAlive, bool nfComplete);
+typedef void (* NodeStatusFunction)(void *, Uint32, bool nodeAlive, bool nfComplete);
extern "C" {
void* runSendRequest_C(void*);
@@ -55,9 +55,7 @@ public:
bool init(Uint32, const ndb_mgm_configuration *);
static TransporterFacade* instance();
- static TransporterFacade* start_instance(int, const ndb_mgm_configuration*);
- static TransporterFacade* start_instance(const char *connectString);
- static void close_configuration();
+ int start_instance(int, const ndb_mgm_configuration*);
static void stop_instance();
/**
@@ -67,7 +65,7 @@ public:
int open(void* objRef, ExecuteFunction, NodeStatusFunction);
// Close this block number
- int close(BlockNumber blockNumber);
+ int close(BlockNumber blockNumber, Uint64 trans_id);
// Only sends to nodes which are alive
int sendSignal(NdbApiSignal * signal, NodeId nodeId);
@@ -93,6 +91,8 @@ public:
// My own processor id
NodeId ownId() const;
+ void connected();
+
void doConnect(int NodeId);
void reportConnected(int NodeId);
void doDisconnect(int NodeId);
@@ -113,6 +113,11 @@ public:
// Close this block number
int close_local(BlockNumber blockNumber);
+ // Scan batch configuration parameters
+ Uint32 get_scan_batch_size();
+ Uint32 get_batch_byte_size();
+ Uint32 get_batch_size();
+
private:
/**
* Send a signal unconditional of node status (used by ClusterMgr)
@@ -125,6 +130,7 @@ private:
friend class ExtSender; ///< @todo Hack to be able to sendSignalUnCond
friend class GrepSS;
friend class Ndb;
+ friend class Ndb_cluster_connection;
int sendSignalUnCond(NdbApiSignal *, NodeId nodeId);
@@ -132,6 +138,7 @@ private:
void doStop();
TransporterRegistry* theTransporterRegistry;
+ SocketServer m_socket_server;
int sendPerformedLastInterval;
int theOwnId;
@@ -146,6 +153,11 @@ private:
void calculateSendLimit();
+ // Scan batch configuration parameters
+ Uint32 m_scan_batch_size;
+ Uint32 m_batch_byte_size;
+ Uint32 m_batch_size;
+
// Declarations for the receive and send thread
int theStopReceive;
@@ -162,13 +174,13 @@ private:
* Block number handling
*/
public:
- static const unsigned MAX_NO_THREADS = 4711;
+ STATIC_CONST( MAX_NO_THREADS = 4711 );
private:
struct ThreadData {
- static const Uint32 ACTIVE = (1 << 16) | 1;
- static const Uint32 INACTIVE = (1 << 16);
- static const Uint32 END_OF_LIST = MAX_NO_THREADS + 1;
+ STATIC_CONST( ACTIVE = (1 << 16) | 1 );
+ STATIC_CONST( INACTIVE = (1 << 16) );
+ STATIC_CONST( END_OF_LIST = MAX_NO_THREADS + 1 );
ThreadData(Uint32 initialSize = 32);
@@ -210,9 +222,9 @@ private:
return (m_statusNext[index] & (1 << 16)) != 0;
}
} m_threads;
-
- Uint32 m_open_count;
-
+
+ Uint32 m_max_trans_id;
+
/**
* execute function
*/
@@ -224,7 +236,6 @@ public:
NdbMutex* theMutexPtr;
private:
static TransporterFacade* theFacadeInstance;
- static ConfigRetriever *s_config_retriever;
public:
GlobalDictCache m_globalDictCache;
@@ -328,4 +339,24 @@ TransporterFacade::getNodeSequence(NodeId n) const {
return theClusterMgr->getNodeInfo(n).m_info.m_connectCount;
}
+inline
+Uint32
+TransporterFacade::get_scan_batch_size() {
+ return m_scan_batch_size;
+}
+
+inline
+Uint32
+TransporterFacade::get_batch_byte_size() {
+ return m_batch_byte_size;
+}
+
+inline
+Uint32
+TransporterFacade::get_batch_size() {
+ return m_batch_size;
+}
+
+
+
#endif // TransporterFacade_H
diff --git a/ndb/src/ndbapi/ndb_cluster_connection.cpp b/ndb/src/ndbapi/ndb_cluster_connection.cpp
new file mode 100644
index 00000000000..4c42fe1aeef
--- /dev/null
+++ b/ndb/src/ndbapi/ndb_cluster_connection.cpp
@@ -0,0 +1,208 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <ndb_global.h>
+#include <my_pthread.h>
+#include <my_sys.h>
+
+#include <ndb_cluster_connection.hpp>
+#include <TransporterFacade.hpp>
+#include <NdbOut.hpp>
+#include <NdbSleep.h>
+#include <NdbThread.h>
+#include <ndb_limits.h>
+#include <ConfigRetriever.hpp>
+#include <ndb_version.h>
+
+static int g_run_connect_thread= 0;
+
+#include <NdbMutex.h>
+NdbMutex *ndb_global_event_buffer_mutex= NULL;
+#ifdef VM_TRACE
+NdbMutex *ndb_print_state_mutex= NULL;
+#endif
+
+Ndb_cluster_connection::Ndb_cluster_connection(const char *connect_string)
+{
+ DBUG_ENTER("Ndb_cluster_connection");
+ DBUG_PRINT("enter",("Ndb_cluster_connection this=0x%x", this));
+ m_facade= TransporterFacade::theFacadeInstance= new TransporterFacade();
+ if (connect_string)
+ m_connect_string= my_strdup(connect_string,MYF(MY_WME));
+ else
+ m_connect_string= 0;
+ m_config_retriever= 0;
+ m_local_config= 0;
+ m_connect_thread= 0;
+ m_connect_callback= 0;
+
+ if (ndb_global_event_buffer_mutex == NULL)
+ {
+ ndb_global_event_buffer_mutex= NdbMutex_Create();
+ }
+#ifdef VM_TRACE
+ if (ndb_print_state_mutex == NULL)
+ {
+ ndb_print_state_mutex= NdbMutex_Create();
+ }
+#endif
+ DBUG_VOID_RETURN;
+}
+
+extern "C" pthread_handler_decl(run_ndb_cluster_connection_connect_thread, me)
+{
+ my_thread_init();
+ g_run_connect_thread= 1;
+ ((Ndb_cluster_connection*) me)->connect_thread();
+ my_thread_end();
+ NdbThread_Exit(0);
+ return me;
+}
+
+void Ndb_cluster_connection::connect_thread()
+{
+ DBUG_ENTER("Ndb_cluster_connection::connect_thread");
+ int r;
+ do {
+ NdbSleep_SecSleep(1);
+ if ((r = connect(1)) == 0)
+ break;
+ if (r == -1) {
+ printf("Ndb_cluster_connection::connect_thread error\n");
+ DBUG_ASSERT(false);
+ g_run_connect_thread= 0;
+ } else {
+ // Wait before making a new connect attempt
+ NdbSleep_SecSleep(1);
+ }
+ } while (g_run_connect_thread);
+ if (m_connect_callback)
+ (*m_connect_callback)();
+ DBUG_VOID_RETURN;
+}
+
+int Ndb_cluster_connection::start_connect_thread(int (*connect_callback)(void))
+{
+ int r;
+ DBUG_ENTER("Ndb_cluster_connection::start_connect_thread");
+ m_connect_callback= connect_callback;
+ if ((r = connect(1)) == 1)
+ {
+ DBUG_PRINT("info",("starting thread"));
+ m_connect_thread=
+ NdbThread_Create(run_ndb_cluster_connection_connect_thread,
+ (void**)this, 32768, "ndb_cluster_connection",
+ NDB_THREAD_PRIO_LOW);
+ }
+ else if (r < 0)
+ {
+ DBUG_RETURN(-1);
+ }
+ else if (m_connect_callback)
+ {
+ (*m_connect_callback)();
+ }
+ DBUG_RETURN(0);
+}
+
+int Ndb_cluster_connection::connect(int reconnect)
+{
+ DBUG_ENTER("Ndb_cluster_connection::connect");
+ const char* error = 0;
+ do {
+ if (m_config_retriever == 0)
+ {
+ if (m_local_config == 0) {
+ m_local_config= new LocalConfig();
+ if (!m_local_config->init(m_connect_string,0)) {
+ ndbout_c("Configuration error: Unable to retrieve local config");
+ m_local_config->printError();
+ m_local_config->printUsage();
+ DBUG_RETURN(-1);
+ }
+ }
+ m_config_retriever=
+ new ConfigRetriever(*m_local_config, NDB_VERSION, NODE_TYPE_API);
+ }
+ else
+ if (reconnect == 0)
+ DBUG_RETURN(0);
+ if (reconnect)
+ {
+ int r= m_config_retriever->do_connect(1);
+ if (r == 1)
+ DBUG_RETURN(1); // mgmt server not up yet
+ if (r == -1)
+ break;
+ }
+ else
+ if(m_config_retriever->do_connect() == -1)
+ break;
+
+ Uint32 nodeId = m_config_retriever->allocNodeId();
+ for(Uint32 i = 0; nodeId == 0 && i<5; i++){
+ NdbSleep_SecSleep(3);
+ nodeId = m_config_retriever->allocNodeId();
+ }
+ if(nodeId == 0)
+ break;
+ ndb_mgm_configuration * props = m_config_retriever->getConfig();
+ if(props == 0)
+ break;
+ m_facade->start_instance(nodeId, props);
+ ndb_mgm_destroy_configuration(props);
+ m_facade->connected();
+ DBUG_RETURN(0);
+ } while(0);
+
+ ndbout << "Configuration error: ";
+ const char* erString = m_config_retriever->getErrorString();
+ if (erString == 0) {
+ erString = "No error specified!";
+ }
+ ndbout << erString << endl;
+ DBUG_RETURN(-1);
+}
+
+Ndb_cluster_connection::~Ndb_cluster_connection()
+{
+ DBUG_ENTER("~Ndb_cluster_connection");
+ DBUG_PRINT("enter",("~Ndb_cluster_connection this=0x%x", this));
+ TransporterFacade::stop_instance();
+ if (m_connect_thread)
+ {
+ void *status;
+ g_run_connect_thread= 0;
+ NdbThread_WaitFor(m_connect_thread, &status);
+ NdbThread_Destroy(&m_connect_thread);
+ m_connect_thread= 0;
+ }
+ if (m_facade != 0)
+ {
+ delete m_facade;
+ if (m_facade != TransporterFacade::theFacadeInstance)
+ abort();
+ TransporterFacade::theFacadeInstance= 0;
+ }
+ my_free(m_connect_string,MYF(MY_ALLOW_ZERO_PTR));
+ if (m_config_retriever)
+ delete m_config_retriever;
+ if (m_local_config)
+ delete m_local_config;
+ DBUG_VOID_RETURN;
+}
+
+
diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c
index 66a89326a66..20661b89517 100644
--- a/ndb/src/ndbapi/ndberror.c
+++ b/ndb/src/ndbapi/ndberror.c
@@ -16,7 +16,6 @@
#include <ndb_global.h>
-
#include <ndberror.h>
typedef struct ErrorBundle {
@@ -92,9 +91,10 @@ ErrorBundle ErrorCodes[] = {
{ 4031, NR, "Node failure caused abort of transaction" },
{ 4033, NR, "Send to NDB failed" },
{ 4115, NR,
- "Transaction was committed but all read information was not "
- "received due to node crash" },
-
+ "Transaction was committed but all read information was not "
+ "received due to node crash" },
+ { 4119, NR, "Simple/dirty read failed due to node failure" },
+
/**
* Node shutdown
*/
@@ -171,7 +171,7 @@ ErrorBundle ErrorCodes[] = {
{ 677, OL, "Index UNDO buffers overloaded" },
{ 891, OL, "Data UNDO buffers overloaded" },
{ 1221, OL, "REDO log buffers overloaded" },
- { 4006, AE, "Connect failure - out of connection objects" },
+ { 4006, OL, "Connect failure - out of connection objects" },
@@ -228,6 +228,7 @@ ErrorBundle ErrorCodes[] = {
{ 4347, IE, "Bad state at alter index" },
{ 4348, IE, "Inconsistency detected at alter index" },
{ 4349, IE, "Inconsistency detected at index usage" },
+ { 4350, IE, "Transaction already aborted" },
/**
* Application error
@@ -280,6 +281,9 @@ ErrorBundle ErrorCodes[] = {
{ 739, SE, "Unsupported primary key length" },
{ 740, SE, "Nullable primary key not supported" },
{ 741, SE, "Unsupported alter table" },
+ { 742, SE, "Unsupported attribute type in index" },
+ { 743, SE, "Unsupported character set in table or index" },
+ { 744, SE, "Character string is invalid for given character set" },
{ 241, SE, "Invalid schema object version" },
{ 283, SE, "Table is being dropped" },
{ 284, SE, "Table not defined in transaction coordinator" },
@@ -424,7 +428,8 @@ ErrorBundle ErrorCodes[] = {
{ 4266, AE, "Invalid blob seek position" },
{ 4267, IE, "Corrupted blob value" },
{ 4268, IE, "Error in blob head update forced rollback of transaction" },
- { 4268, IE, "Unknown blob error" }
+ { 4268, IE, "Unknown blob error" },
+ { 4269, IE, "No connection to ndb management server" }
};
static
@@ -486,6 +491,7 @@ static
const
int NbClassification = sizeof(StatusClassificationMapping)/sizeof(ErrorStatusClassification);
+#ifdef NOT_USED
/**
* Complete all fields of an NdbError given the error code
* and details
@@ -501,7 +507,7 @@ set(ndberror_struct * error, int code, const char * details, ...){
va_end(ap);
}
}
-
+#endif
void
ndberror_update(ndberror_struct * error){
@@ -587,8 +593,10 @@ int ndb_error_string(int err_no, char *str, unsigned int size)
error.code = err_no;
ndberror_update(&error);
- len = snprintf(str, size-1, "%s: %s: %s", error.message,
- ndberror_status_message(error.status), ndberror_classification_message(error.classification));
+ len =
+ snprintf(str, size-1, "%s: %s: %s", error.message,
+ ndberror_status_message(error.status),
+ ndberror_classification_message(error.classification));
str[size-1]= '\0';
return len;