summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--BitKeeper/etc/logging_ok3
-rw-r--r--acinclude.m450
-rw-r--r--configure.in4
-rw-r--r--mysql-test/mysql-test-run.sh22
-rw-r--r--mysql-test/ndb/ndb_config_2_node.ini1
-rw-r--r--mysql-test/ndb/ndbcluster.sh5
-rw-r--r--mysql-test/r/ndb_alter_table.result19
-rw-r--r--mysql-test/r/ndb_charset.result191
-rw-r--r--mysql-test/r/ndb_index.result2
-rw-r--r--mysql-test/t/ndb_alter_table.test31
-rw-r--r--mysql-test/t/ndb_charset.test159
-rw-r--r--mysql-test/t/ndb_index.test2
-rw-r--r--ndb/config/type_ndbapitest.mk.am2
-rw-r--r--ndb/config/type_ndbapitools.mk.am2
-rw-r--r--ndb/examples/ndbapi_async_example/ndbapi_async.cpp11
-rw-r--r--ndb/examples/ndbapi_example1/ndbapi_example1.cpp5
-rw-r--r--ndb/examples/ndbapi_example2/ndbapi_example2.cpp1
-rw-r--r--ndb/examples/ndbapi_example3/ndbapi_example3.cpp1
-rw-r--r--ndb/examples/ndbapi_example4/ndbapi_example4.cpp5
-rw-r--r--ndb/examples/ndbapi_example5/ndbapi_example5.cpp1
-rw-r--r--ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp10
-rw-r--r--ndb/examples/select_all/select_all.cpp1
-rw-r--r--ndb/include/debugger/EventLogger.hpp136
-rw-r--r--ndb/include/kernel/LogLevel.hpp130
-rw-r--r--ndb/include/kernel/signaldata/CreateTable.hpp3
-rw-r--r--ndb/include/kernel/signaldata/DictTabInfo.hpp4
-rw-r--r--ndb/include/kernel/signaldata/EventReport.hpp11
-rw-r--r--ndb/include/kernel/signaldata/EventSubscribeReq.hpp18
-rw-r--r--ndb/include/kernel/signaldata/LqhFrag.hpp2
-rw-r--r--ndb/include/kernel/signaldata/SetLogLevelOrd.hpp28
-rw-r--r--ndb/include/kernel/signaldata/TupFrag.hpp10
-rw-r--r--ndb/include/mgmapi/mgmapi.h76
-rw-r--r--ndb/include/mgmapi/mgmapi_config_parameters.h50
-rw-r--r--ndb/include/mgmcommon/ConfigRetriever.hpp32
-rw-r--r--ndb/include/ndb_global.h4
-rw-r--r--ndb/include/ndbapi/NdbDictionary.hpp26
-rw-r--r--ndb/include/ndbapi/ndb_cluster_connection.hpp2
-rw-r--r--ndb/include/transporter/TransporterDefinitions.hpp12
-rw-r--r--ndb/include/transporter/TransporterRegistry.hpp11
-rw-r--r--ndb/include/util/NdbSqlUtil.hpp18
-rw-r--r--ndb/include/util/SocketServer.hpp2
-rw-r--r--ndb/src/common/debugger/EventLogger.cpp1361
-rw-r--r--ndb/src/common/debugger/Makefile.am2
-rw-r--r--ndb/src/common/mgmcommon/ConfigRetriever.cpp92
-rw-r--r--ndb/src/common/mgmcommon/IPCConfig.cpp123
-rw-r--r--ndb/src/common/portlib/NdbTCP.cpp10
-rw-r--r--ndb/src/common/transporter/Makefile.am2
-rw-r--r--ndb/src/common/transporter/Packer.cpp21
-rw-r--r--ndb/src/common/transporter/SCI_Transporter.cpp733
-rw-r--r--ndb/src/common/transporter/SCI_Transporter.hpp29
-rw-r--r--ndb/src/common/transporter/SHM_Buffer.hpp20
-rw-r--r--ndb/src/common/transporter/SHM_Transporter.cpp52
-rw-r--r--ndb/src/common/transporter/SHM_Transporter.hpp2
-rw-r--r--ndb/src/common/transporter/TCP_Transporter.cpp93
-rw-r--r--ndb/src/common/transporter/TCP_Transporter.hpp3
-rw-r--r--ndb/src/common/transporter/TransporterRegistry.cpp147
-rw-r--r--ndb/src/common/util/Makefile.am2
-rw-r--r--ndb/src/common/util/NdbSqlUtil.cpp184
-rw-r--r--ndb/src/common/util/SocketServer.cpp32
-rw-r--r--ndb/src/common/util/ndb_init.c (renamed from ndb/src/common/debugger/LogLevel.cpp)31
-rw-r--r--ndb/src/cw/cpcd/APIService.cpp12
-rw-r--r--ndb/src/cw/cpcd/CPCD.cpp2
-rw-r--r--ndb/src/cw/cpcd/CPCD.hpp2
-rw-r--r--ndb/src/cw/cpcd/Makefile.am2
-rw-r--r--ndb/src/cw/cpcd/main.cpp8
-rw-r--r--ndb/src/kernel/Makefile.am2
-rw-r--r--ndb/src/kernel/blocks/backup/Backup.cpp34
-rw-r--r--ndb/src/kernel/blocks/backup/read.cpp1
-rw-r--r--ndb/src/kernel/blocks/backup/restore/Makefile.am2
-rw-r--r--ndb/src/kernel/blocks/backup/restore/main.cpp5
-rw-r--r--ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp56
-rw-r--r--ndb/src/kernel/blocks/dbdict/Dbdict.cpp126
-rw-r--r--ndb/src/kernel/blocks/dblqh/Dblqh.hpp2
-rw-r--r--ndb/src/kernel/blocks/dblqh/DblqhMain.cpp2
-rw-r--r--ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp61
-rw-r--r--ndb/src/kernel/blocks/dbtup/Dbtup.hpp47
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp6
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp15
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp41
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupGen.cpp4
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp103
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp80
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp85
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp29
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp9
-rw-r--r--ndb/src/kernel/blocks/dbtux/Dbtux.hpp49
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp158
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp8
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp84
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp16
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp5
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp4
-rw-r--r--ndb/src/kernel/blocks/dbtux/Times.txt18
-rw-r--r--ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp8
-rw-r--r--ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp4
-rw-r--r--ndb/src/kernel/main.cpp18
-rw-r--r--ndb/src/kernel/vm/Configuration.cpp25
-rw-r--r--ndb/src/kernel/vm/Configuration.hpp9
-rw-r--r--ndb/src/kernel/vm/FastScheduler.hpp2
-rw-r--r--ndb/src/kernel/vm/MetaData.hpp3
-rw-r--r--ndb/src/mgmapi/mgmapi.cpp95
-rw-r--r--ndb/src/mgmclient/CommandInterpreter.cpp197
-rw-r--r--ndb/src/mgmclient/Makefile.am2
-rw-r--r--ndb/src/mgmclient/main.cpp1
-rw-r--r--ndb/src/mgmsrv/CommandInterpreter.cpp26
-rw-r--r--ndb/src/mgmsrv/ConfigInfo.cpp125
-rw-r--r--ndb/src/mgmsrv/Makefile.am4
-rw-r--r--ndb/src/mgmsrv/MgmtSrvr.cpp927
-rw-r--r--ndb/src/mgmsrv/MgmtSrvr.hpp125
-rw-r--r--ndb/src/mgmsrv/MgmtSrvrConfig.cpp3
-rw-r--r--ndb/src/mgmsrv/NodeLogLevel.cpp70
-rw-r--r--ndb/src/mgmsrv/NodeLogLevel.hpp54
-rw-r--r--ndb/src/mgmsrv/NodeLogLevelList.cpp182
-rw-r--r--ndb/src/mgmsrv/NodeLogLevelList.hpp93
-rw-r--r--ndb/src/mgmsrv/Services.cpp236
-rw-r--r--ndb/src/mgmsrv/Services.hpp27
-rw-r--r--ndb/src/mgmsrv/main.cpp64
-rw-r--r--ndb/src/mgmsrv/mkconfig/mkconfig.cpp1
-rw-r--r--ndb/src/ndbapi/Ndb.cpp10
-rw-r--r--ndb/src/ndbapi/NdbDictionary.cpp65
-rw-r--r--ndb/src/ndbapi/NdbDictionaryImpl.cpp198
-rw-r--r--ndb/src/ndbapi/NdbDictionaryImpl.hpp14
-rw-r--r--ndb/src/ndbapi/NdbIndexOperation.cpp32
-rw-r--r--ndb/src/ndbapi/NdbLinHash.hpp11
-rw-r--r--ndb/src/ndbapi/NdbOperationDefine.cpp11
-rw-r--r--ndb/src/ndbapi/NdbOperationSearch.cpp31
-rw-r--r--ndb/src/ndbapi/NdbScanOperation.cpp45
-rw-r--r--ndb/src/ndbapi/NdbScanReceiver.cpp187
-rw-r--r--ndb/src/ndbapi/NdbScanReceiver.hpp210
-rw-r--r--ndb/src/ndbapi/Ndbinit.cpp17
-rw-r--r--ndb/src/ndbapi/TransporterFacade.hpp1
-rw-r--r--ndb/src/ndbapi/ndb_cluster_connection.cpp32
-rw-r--r--ndb/src/ndbapi/ndberror.c3
-rw-r--r--ndb/test/include/NDBT_Table.hpp4
-rw-r--r--ndb/test/ndbapi/acid.cpp1
-rw-r--r--ndb/test/ndbapi/acid2.cpp1
-rw-r--r--ndb/test/ndbapi/bank/Bank.cpp105
-rw-r--r--ndb/test/ndbapi/bank/Bank.hpp5
-rw-r--r--ndb/test/ndbapi/bank/BankLoad.cpp4
-rw-r--r--ndb/test/ndbapi/bank/bankCreator.cpp1
-rw-r--r--ndb/test/ndbapi/bank/bankMakeGL.cpp1
-rw-r--r--ndb/test/ndbapi/bank/bankSumAccounts.cpp1
-rw-r--r--ndb/test/ndbapi/bank/bankTimer.cpp1
-rw-r--r--ndb/test/ndbapi/bank/bankTransactionMaker.cpp1
-rw-r--r--ndb/test/ndbapi/bank/bankValidateAllGLs.cpp1
-rw-r--r--ndb/test/ndbapi/bank/testBank.cpp1
-rw-r--r--ndb/test/ndbapi/benchronja.cpp1
-rw-r--r--ndb/test/ndbapi/bulk_copy.cpp1
-rw-r--r--ndb/test/ndbapi/cdrserver.cpp1
-rw-r--r--ndb/test/ndbapi/celloDb.cpp1
-rw-r--r--ndb/test/ndbapi/create_all_tabs.cpp1
-rw-r--r--ndb/test/ndbapi/create_tab.cpp1
-rw-r--r--ndb/test/ndbapi/drop_all_tabs.cpp1
-rw-r--r--ndb/test/ndbapi/flexAsynch.cpp1
-rw-r--r--ndb/test/ndbapi/flexBench.cpp1
-rw-r--r--ndb/test/ndbapi/flexHammer.cpp1
-rw-r--r--ndb/test/ndbapi/flexScan.cpp1
-rw-r--r--ndb/test/ndbapi/flexTT.cpp1
-rw-r--r--ndb/test/ndbapi/flexTimedAsynch.cpp1
-rw-r--r--ndb/test/ndbapi/flex_bench_mysql.cpp1
-rw-r--r--ndb/test/ndbapi/index.cpp17
-rw-r--r--ndb/test/ndbapi/index2.cpp5
-rw-r--r--ndb/test/ndbapi/initronja.cpp1
-rw-r--r--ndb/test/ndbapi/interpreterInTup.cpp1
-rw-r--r--ndb/test/ndbapi/mainAsyncGenerator.cpp1
-rw-r--r--ndb/test/ndbapi/msa.cpp1
-rw-r--r--ndb/test/ndbapi/restarter.cpp1
-rw-r--r--ndb/test/ndbapi/restarter2.cpp1
-rw-r--r--ndb/test/ndbapi/restarts.cpp1
-rw-r--r--ndb/test/ndbapi/size.cpp1
-rw-r--r--ndb/test/ndbapi/slow_select.cpp1
-rw-r--r--ndb/test/ndbapi/testBackup.cpp16
-rw-r--r--ndb/test/ndbapi/testBasic.cpp1
-rw-r--r--ndb/test/ndbapi/testBasicAsynch.cpp1
-rw-r--r--ndb/test/ndbapi/testBlobs.cpp1
-rw-r--r--ndb/test/ndbapi/testDataBuffers.cpp1
-rw-r--r--ndb/test/ndbapi/testDeadlock.cpp1
-rw-r--r--ndb/test/ndbapi/testDict.cpp3
-rw-r--r--ndb/test/ndbapi/testGrep.cpp1
-rw-r--r--ndb/test/ndbapi/testGrepVerify.cpp1
-rw-r--r--ndb/test/ndbapi/testIndex.cpp1
-rw-r--r--ndb/test/ndbapi/testInterpreter.cpp1
-rw-r--r--ndb/test/ndbapi/testMgm.cpp1
-rw-r--r--ndb/test/ndbapi/testNdbApi.cpp1
-rw-r--r--ndb/test/ndbapi/testNodeRestart.cpp1
-rw-r--r--ndb/test/ndbapi/testOIBasic.cpp26
-rw-r--r--ndb/test/ndbapi/testOperations.cpp1
-rw-r--r--ndb/test/ndbapi/testOrderedIndex.cpp1
-rw-r--r--ndb/test/ndbapi/testReadPerf.cpp1
-rw-r--r--ndb/test/ndbapi/testRestartGci.cpp1
-rw-r--r--ndb/test/ndbapi/testScan.cpp1
-rw-r--r--ndb/test/ndbapi/testScanInterpreter.cpp1
-rw-r--r--ndb/test/ndbapi/testScanPerf.cpp1
-rw-r--r--ndb/test/ndbapi/testSystemRestart.cpp63
-rw-r--r--ndb/test/ndbapi/testTimeout.cpp1
-rw-r--r--ndb/test/ndbapi/testTransactions.cpp1
-rw-r--r--ndb/test/ndbapi/test_event.cpp1
-rw-r--r--ndb/test/run-test/Makefile.am2
-rwxr-xr-xndb/test/run-test/atrt-mysql-test-run17
-rw-r--r--ndb/test/run-test/daily-basic-tests.txt15
-rw-r--r--ndb/test/run-test/daily-devel-tests.txt40
-rw-r--r--ndb/test/run-test/main.cpp1
-rw-r--r--ndb/test/src/HugoTransactions.cpp11
-rw-r--r--ndb/test/src/NDBT_Test.cpp2
-rw-r--r--ndb/test/src/NdbBackup.cpp37
-rw-r--r--ndb/test/tools/copy_tab.cpp1
-rw-r--r--ndb/test/tools/cpcc.cpp1
-rw-r--r--ndb/test/tools/create_index.cpp1
-rw-r--r--ndb/test/tools/hugoCalculator.cpp1
-rw-r--r--ndb/test/tools/hugoFill.cpp1
-rw-r--r--ndb/test/tools/hugoLoad.cpp1
-rw-r--r--ndb/test/tools/hugoLockRecords.cpp1
-rw-r--r--ndb/test/tools/hugoPkDelete.cpp1
-rw-r--r--ndb/test/tools/hugoPkRead.cpp1
-rw-r--r--ndb/test/tools/hugoPkReadRecord.cpp1
-rw-r--r--ndb/test/tools/hugoPkUpdate.cpp1
-rw-r--r--ndb/test/tools/hugoScanRead.cpp1
-rw-r--r--ndb/test/tools/hugoScanUpdate.cpp1
-rw-r--r--ndb/test/tools/restart.cpp1
-rw-r--r--ndb/test/tools/transproxy.cpp1
-rw-r--r--ndb/test/tools/verify_index.cpp1
-rw-r--r--ndb/tools/delete_all.cpp1
-rw-r--r--ndb/tools/desc.cpp1
-rw-r--r--ndb/tools/drop_index.cpp1
-rw-r--r--ndb/tools/drop_tab.cpp1
-rw-r--r--ndb/tools/listTables.cpp3
-rw-r--r--ndb/tools/ndbsql.cpp1
-rw-r--r--ndb/tools/select_all.cpp3
-rw-r--r--ndb/tools/select_count.cpp1
-rw-r--r--ndb/tools/waiter.cpp1
-rw-r--r--sql/Makefile.am2
-rw-r--r--sql/ha_ndbcluster.cc165
-rw-r--r--sql/ha_ndbcluster.h2
233 files changed, 4643 insertions, 4270 deletions
diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok
index da85ed6391f..77c82618446 100644
--- a/BitKeeper/etc/logging_ok
+++ b/BitKeeper/etc/logging_ok
@@ -102,6 +102,7 @@ miguel@hegel.txg.br
miguel@light.
miguel@light.local
miguel@sartre.local
+mikael@mc04.(none)
mikron@c-fb0ae253.1238-1-64736c10.cust.bredbandsbolaget.se
mikron@mikael-ronstr-ms-dator.local
mleich@mysql.com
@@ -132,6 +133,7 @@ mwagner@here.mwagner.org
mwagner@work.mysql.com
mydev@mysql.com
mysql@home.(none)
+mysql@mc04.(none)
mysqldev@build.mysql2.com
mysqldev@melody.local
mysqldev@mysql.com
@@ -161,6 +163,7 @@ ram@ram.(none)
ranger@regul.home.lan
rburnett@build.mysql.com
root@home.(none)
+root@mc04.(none)
root@x3.internalnet
salle@banica.(none)
salle@geopard.(none)
diff --git a/acinclude.m4 b/acinclude.m4
index 28344919dae..c7d7ba0e9c9 100644
--- a/acinclude.m4
+++ b/acinclude.m4
@@ -1548,16 +1548,43 @@ dnl Sets HAVE_NDBCLUSTER_DB if --with-ndbcluster is used
dnl ---------------------------------------------------------------------------
AC_DEFUN([MYSQL_CHECK_NDB_OPTIONS], [
+ AC_ARG_WITH([ndb-sci],
+ AC_HELP_STRING([--with-ndb-sci=DIR],
+ [Provide MySQL with a custom location of
+ sci library. Given DIR, sci library is
+ assumed to be in $DIR/lib and header files
+ in $DIR/include.]),
+ [mysql_sci_dir=${withval}],
+ [mysql_sci_dir=""])
+
+ case "$mysql_sci_dir" in
+ "no" )
+ have_ndb_sci=no
+ AC_MSG_RESULT([-- not including sci transporter])
+ ;;
+ * )
+ if test -f "$mysql_sci_dir/lib/libsisci.a" -a \
+ -f "$mysql_sci_dir/include/sisci_api.h"; then
+ NDB_SCI_INCLUDES="-I$mysql_sci_dir/include"
+ NDB_SCI_LIBS="-L$mysql_sci_dir/lib -lsisci"
+ AC_MSG_RESULT([-- including sci transporter])
+ AC_DEFINE([NDB_SCI_TRANSPORTER], [1],
+ [Including Ndb Cluster DB sci transporter])
+ AC_SUBST(NDB_SCI_INCLUDES)
+ AC_SUBST(NDB_SCI_LIBS)
+ have_ndb_sci="yes"
+ AC_MSG_RESULT([found sci transporter in $mysql_sci_dir/{include, lib}])
+ else
+ AC_MSG_RESULT([could not find sci transporter in $mysql_sci_dir/{include, lib}])
+ fi
+ ;;
+ esac
+
AC_ARG_WITH([ndb-shm],
[
--with-ndb-shm Include the NDB Cluster shared memory transporter],
[ndb_shm="$withval"],
[ndb_shm=no])
- AC_ARG_WITH([ndb-sci],
- [
- --with-ndb-sci Include the NDB Cluster sci transporter],
- [ndb_sci="$withval"],
- [ndb_sci=no])
AC_ARG_WITH([ndb-test],
[
--with-ndb-test Include the NDB Cluster ndbapi test programs],
@@ -1590,19 +1617,6 @@ AC_DEFUN([MYSQL_CHECK_NDB_OPTIONS], [
;;
esac
- have_ndb_sci=no
- case "$ndb_sci" in
- yes )
- AC_MSG_RESULT([-- including sci transporter])
- AC_DEFINE([NDB_SCI_TRANSPORTER], [1],
- [Including Ndb Cluster DB sci transporter])
- have_ndb_sci="yes"
- ;;
- * )
- AC_MSG_RESULT([-- not including sci transporter])
- ;;
- esac
-
have_ndb_test=no
case "$ndb_test" in
yes )
diff --git a/configure.in b/configure.in
index be365ec8584..39151205533 100644
--- a/configure.in
+++ b/configure.in
@@ -3024,11 +3024,11 @@ AC_SUBST([ndb_port_base])
ndb_transporter_opt_objs=""
if test X"$have_ndb_shm" = Xyes
then
- ndb_transporter_opt_objs="$(ndb_transporter_opt_objs) SHM_Transporter.lo SHM_Transporter.unix.lo"
+ ndb_transporter_opt_objs="$ndb_transporter_opt_objs SHM_Transporter.lo SHM_Transporter.unix.lo"
fi
if test X"$have_ndb_sci" = Xyes
then
- ndb_transporter_opt_objs="$(ndb_transporter_opt_objs) SCI_Transporter.lo"
+ ndb_transporter_opt_objs="$ndb_transporter_opt_objs SCI_Transporter.lo"
fi
AC_SUBST([ndb_transporter_opt_objs])
diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh
index 64e903a115b..9b891d44a0b 100644
--- a/mysql-test/mysql-test-run.sh
+++ b/mysql-test/mysql-test-run.sh
@@ -299,6 +299,11 @@ while test $# -gt 0; do
--record)
RECORD=1;
EXTRA_MYSQL_TEST_OPT="$EXTRA_MYSQL_TEST_OPT $1" ;;
+ --small-bench)
+ DO_SMALL_BENCH=1
+ DO_BENCH=1
+ NO_SLAVE=1
+ ;;
--bench)
DO_BENCH=1
NO_SLAVE=1
@@ -1546,7 +1551,13 @@ then
if [ -z "$USE_RUNNING_NDBCLUSTER" ]
then
echo "Starting ndbcluster"
- ./ndb/ndbcluster --port-base=$NDBCLUSTER_PORT --small --diskless --initial --data-dir=$MYSQL_TEST_DIR/var || exit 1
+ if [ "$DO_BENCH" = 1 ]
+ then
+ NDBCLUSTER_OPTS=""
+ else
+ NDBCLUSTER_OPTS="--small"
+ fi
+ ./ndb/ndbcluster --port-base=$NDBCLUSTER_PORT $NDBCLUSTER_OPTS --diskless --initial --data-dir=$MYSQL_TEST_DIR/var || exit 1
USE_NDBCLUSTER="$USE_NDBCLUSTER --ndb-connectstring=\"host=localhost:$NDBCLUSTER_PORT\""
else
USE_NDBCLUSTER="$USE_NDBCLUSTER --ndb-connectstring=\"$USE_RUNNING_NDBCLUSTER\""
@@ -1580,9 +1591,14 @@ if [ "$DO_BENCH" = 1 ]
then
start_master
+ if [ "$DO_SMALL_BENCH" = 1 ]
+ then
+ EXTRA_BENCH_ARGS="--small-test --small-tables"
+ fi
+
if [ ! -z "$USE_NDBCLUSTER" ]
then
- EXTRA_BENCH_ARGS="--create-options=TYPE=ndb"
+ EXTRA_BENCH_ARGS="--create-options=TYPE=ndb $EXTRA_BENCH_ARGS"
fi
BENCHDIR=$BASEDIR/sql-bench/
@@ -1590,7 +1606,7 @@ then
cd $BENCHDIR
if [ -z "$1" ]
then
- ./run-all-tests --socket=$MASTER_MYSOCK --user=root $EXTRA_BENCH_ARGS
+ ./run-all-tests --socket=$MASTER_MYSOCK --user=root $EXTRA_BENCH_ARGS --log
else
if [ -x "./$1" ]
then
diff --git a/mysql-test/ndb/ndb_config_2_node.ini b/mysql-test/ndb/ndb_config_2_node.ini
index cc0f940efe3..8c89d2aa2cc 100644
--- a/mysql-test/ndb/ndb_config_2_node.ini
+++ b/mysql-test/ndb/ndb_config_2_node.ini
@@ -6,6 +6,7 @@ IndexMemory= CHOOSE_IndexMemory
Diskless= CHOOSE_Diskless
TimeBetweenWatchDogCheck= 30000
DataDir= CHOOSE_FILESYSTEM
+MaxNoOfOrderedIndexes= CHOOSE_MaxNoOfOrderedIndexes
[ndbd]
HostName= CHOOSE_HOSTNAME_1
diff --git a/mysql-test/ndb/ndbcluster.sh b/mysql-test/ndb/ndbcluster.sh
index f143242371f..7485e42923e 100644
--- a/mysql-test/ndb/ndbcluster.sh
+++ b/mysql-test/ndb/ndbcluster.sh
@@ -44,7 +44,8 @@ initial_ndb=
status_ndb=
ndb_diskless=0
-ndb_con_op=100000
+ndb_no_ord=512
+ndb_con_op=105000
ndb_dmem=80M
ndb_imem=24M
@@ -65,6 +66,7 @@ while test $# -gt 0; do
status_ndb=1
;;
--small)
+ ndb_no_ord=128
ndb_con_op=10000
ndb_dmem=40M
ndb_imem=12M
@@ -128,6 +130,7 @@ port_transporter=`expr $ndb_mgmd_port + 2`
if [ $initial_ndb ] ; then
sed \
+ -e s,"CHOOSE_MaxNoOfOrderedIndexes","$ndb_no_ord",g \
-e s,"CHOOSE_MaxNoOfConcurrentOperations","$ndb_con_op",g \
-e s,"CHOOSE_DataMemory","$ndb_dmem",g \
-e s,"CHOOSE_IndexMemory","$ndb_imem",g \
diff --git a/mysql-test/r/ndb_alter_table.result b/mysql-test/r/ndb_alter_table.result
index 8143e34ecc2..a36536b878d 100644
--- a/mysql-test/r/ndb_alter_table.result
+++ b/mysql-test/r/ndb_alter_table.result
@@ -73,3 +73,22 @@ col6 col1 col3 fourth col4 col4_5 col5 col7 col8
1 101 3 4 5 PENDING 0000-00-00 00:00:00
2 102 4 3 5 99 PENDING EXTRA 2004-01-01 00:00:00
drop table t1;
+CREATE TABLE t1 (
+a INT NOT NULL,
+b INT NOT NULL
+) ENGINE=ndbcluster;
+INSERT INTO t1 VALUES (9410,9412);
+ALTER TABLE t1 ADD COLUMN c int not null;
+select * from t1;
+a b c
+9410 9412 0
+select * from t1;
+a b c
+9410 9412 0
+alter table t1 drop c;
+select * from t1;
+a b
+9410 9412
+drop table t1;
+select * from t1;
+ERROR 42S02: Table 'test.t1' doesn't exist
diff --git a/mysql-test/r/ndb_charset.result b/mysql-test/r/ndb_charset.result
new file mode 100644
index 00000000000..93429a1fcb0
--- /dev/null
+++ b/mysql-test/r/ndb_charset.result
@@ -0,0 +1,191 @@
+drop table if exists t1;
+create table t1 (
+a char(3) character set latin1 collate latin1_bin primary key
+) engine=ndb;
+insert into t1 values('aAa');
+insert into t1 values('aaa');
+insert into t1 values('AAA');
+select * from t1 order by a;
+a
+AAA
+aAa
+aaa
+select * from t1 where a = 'aAa';
+a
+aAa
+select * from t1 where a = 'aaa';
+a
+aaa
+select * from t1 where a = 'AaA';
+a
+select * from t1 where a = 'AAA';
+a
+AAA
+drop table t1;
+create table t1 (
+a char(3) character set latin1 collate latin1_swedish_ci primary key
+) engine=ndb;
+insert into t1 values('aAa');
+insert into t1 values('aaa');
+ERROR 23000: Duplicate entry 'aaa' for key 1
+insert into t1 values('AAA');
+ERROR 23000: Duplicate entry 'AAA' for key 1
+select * from t1 order by a;
+a
+aAa
+select * from t1 where a = 'aAa';
+a
+aAa
+select * from t1 where a = 'aaa';
+a
+aAa
+select * from t1 where a = 'AaA';
+a
+aAa
+select * from t1 where a = 'AAA';
+a
+aAa
+drop table t1;
+create table t1 (
+p int primary key,
+a char(3) character set latin1 collate latin1_bin not null,
+unique key(a)
+) engine=ndb;
+insert into t1 values(1, 'aAa');
+insert into t1 values(2, 'aaa');
+insert into t1 values(3, 'AAA');
+select * from t1 order by p;
+p a
+1 aAa
+2 aaa
+3 AAA
+select * from t1 where a = 'aAa';
+p a
+1 aAa
+select * from t1 where a = 'aaa';
+p a
+2 aaa
+select * from t1 where a = 'AaA';
+p a
+select * from t1 where a = 'AAA';
+p a
+3 AAA
+drop table t1;
+create table t1 (
+p int primary key,
+a char(3) character set latin1 collate latin1_swedish_ci not null,
+unique key(a)
+) engine=ndb;
+insert into t1 values(1, 'aAa');
+insert into t1 values(2, 'aaa');
+ERROR 23000: Can't write, because of unique constraint, to table 't1'
+insert into t1 values(3, 'AAA');
+ERROR 23000: Can't write, because of unique constraint, to table 't1'
+select * from t1 order by p;
+p a
+1 aAa
+select * from t1 where a = 'aAa';
+p a
+1 aAa
+select * from t1 where a = 'aaa';
+p a
+1 aAa
+select * from t1 where a = 'AaA';
+p a
+1 aAa
+select * from t1 where a = 'AAA';
+p a
+1 aAa
+drop table t1;
+create table t1 (
+p int primary key,
+a char(3) character set latin1 collate latin1_bin not null,
+index(a)
+) engine=ndb;
+insert into t1 values(1, 'aAa');
+insert into t1 values(2, 'aaa');
+insert into t1 values(3, 'AAA');
+insert into t1 values(4, 'aAa');
+insert into t1 values(5, 'aaa');
+insert into t1 values(6, 'AAA');
+select * from t1 order by p;
+p a
+1 aAa
+2 aaa
+3 AAA
+4 aAa
+5 aaa
+6 AAA
+explain select * from t1 where a = 'zZz' order by p;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref a a 3 const 10 Using where; Using filesort
+select * from t1 where a = 'aAa' order by p;
+p a
+1 aAa
+4 aAa
+select * from t1 where a = 'aaa' order by p;
+p a
+2 aaa
+5 aaa
+select * from t1 where a = 'AaA' order by p;
+p a
+select * from t1 where a = 'AAA' order by p;
+p a
+3 AAA
+6 AAA
+drop table t1;
+create table t1 (
+p int primary key,
+a char(3) character set latin1 collate latin1_swedish_ci not null,
+index(a)
+) engine=ndb;
+insert into t1 values(1, 'aAa');
+insert into t1 values(2, 'aaa');
+insert into t1 values(3, 'AAA');
+insert into t1 values(4, 'aAa');
+insert into t1 values(5, 'aaa');
+insert into t1 values(6, 'AAA');
+select * from t1 order by p;
+p a
+1 aAa
+2 aaa
+3 AAA
+4 aAa
+5 aaa
+6 AAA
+explain select * from t1 where a = 'zZz' order by p;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref a a 3 const 10 Using where; Using filesort
+select * from t1 where a = 'aAa' order by p;
+p a
+1 aAa
+2 aaa
+3 AAA
+4 aAa
+5 aaa
+6 AAA
+select * from t1 where a = 'aaa' order by p;
+p a
+1 aAa
+2 aaa
+3 AAA
+4 aAa
+5 aaa
+6 AAA
+select * from t1 where a = 'AaA' order by p;
+p a
+1 aAa
+2 aaa
+3 AAA
+4 aAa
+5 aaa
+6 AAA
+select * from t1 where a = 'AAA' order by p;
+p a
+1 aAa
+2 aaa
+3 AAA
+4 aAa
+5 aaa
+6 AAA
+drop table t1;
diff --git a/mysql-test/r/ndb_index.result b/mysql-test/r/ndb_index.result
index dd92c237ace..5702552b0b5 100644
--- a/mysql-test/r/ndb_index.result
+++ b/mysql-test/r/ndb_index.result
@@ -4,7 +4,7 @@ PORT varchar(16) NOT NULL,
ACCESSNODE varchar(16) NOT NULL,
POP varchar(48) NOT NULL,
ACCESSTYPE int unsigned NOT NULL,
-CUSTOMER_ID varchar(20) NOT NULL,
+CUSTOMER_ID varchar(20) collate latin1_bin NOT NULL,
PROVIDER varchar(16),
TEXPIRE int unsigned,
NUM_IP int unsigned,
diff --git a/mysql-test/t/ndb_alter_table.test b/mysql-test/t/ndb_alter_table.test
index 3cdddfa8dce..96270d94dcb 100644
--- a/mysql-test/t/ndb_alter_table.test
+++ b/mysql-test/t/ndb_alter_table.test
@@ -49,6 +49,37 @@ show table status;
select * from t1 order by col1;
drop table t1;
+
+#
+# Check that invalidating dictionary cache works
+#
+
+CREATE TABLE t1 (
+ a INT NOT NULL,
+ b INT NOT NULL
+) ENGINE=ndbcluster;
+
+INSERT INTO t1 VALUES (9410,9412);
+
+connect (con1,localhost,,,test);
+connect (con2,localhost,,,test);
+
+connection con1;
+ALTER TABLE t1 ADD COLUMN c int not null;
+select * from t1;
+
+connection con2;
+select * from t1;
+alter table t1 drop c;
+
+connection con1;
+select * from t1;
+drop table t1;
+
+connection con2;
+--error 1146
+select * from t1;
+
#--disable_warnings
#DROP TABLE IF EXISTS t2;
#--enable_warnings
diff --git a/mysql-test/t/ndb_charset.test b/mysql-test/t/ndb_charset.test
new file mode 100644
index 00000000000..b9f28ed0faf
--- /dev/null
+++ b/mysql-test/t/ndb_charset.test
@@ -0,0 +1,159 @@
+--source include/have_ndb.inc
+
+--disable_warnings
+drop table if exists t1;
+--enable_warnings
+
+#
+# Minimal NDB charset test.
+#
+
+# pk - binary
+
+create table t1 (
+ a char(3) character set latin1 collate latin1_bin primary key
+) engine=ndb;
+# ok
+insert into t1 values('aAa');
+insert into t1 values('aaa');
+insert into t1 values('AAA');
+# 3
+select * from t1 order by a;
+# 1
+select * from t1 where a = 'aAa';
+# 1
+select * from t1 where a = 'aaa';
+# 0
+select * from t1 where a = 'AaA';
+# 1
+select * from t1 where a = 'AAA';
+drop table t1;
+
+# pk - case insensitive
+
+create table t1 (
+ a char(3) character set latin1 collate latin1_swedish_ci primary key
+) engine=ndb;
+# ok
+insert into t1 values('aAa');
+# fail
+--error 1062
+insert into t1 values('aaa');
+--error 1062
+insert into t1 values('AAA');
+# 1
+select * from t1 order by a;
+# 1
+select * from t1 where a = 'aAa';
+# 1
+select * from t1 where a = 'aaa';
+# 1
+select * from t1 where a = 'AaA';
+# 1
+select * from t1 where a = 'AAA';
+drop table t1;
+
+# unique hash index - binary
+
+create table t1 (
+ p int primary key,
+ a char(3) character set latin1 collate latin1_bin not null,
+ unique key(a)
+) engine=ndb;
+# ok
+insert into t1 values(1, 'aAa');
+insert into t1 values(2, 'aaa');
+insert into t1 values(3, 'AAA');
+# 3
+select * from t1 order by p;
+# 1
+select * from t1 where a = 'aAa';
+# 1
+select * from t1 where a = 'aaa';
+# 0
+select * from t1 where a = 'AaA';
+# 1
+select * from t1 where a = 'AAA';
+drop table t1;
+
+# unique hash index - case insensitive
+
+create table t1 (
+ p int primary key,
+ a char(3) character set latin1 collate latin1_swedish_ci not null,
+ unique key(a)
+) engine=ndb;
+# ok
+insert into t1 values(1, 'aAa');
+# fail
+--error 1169
+insert into t1 values(2, 'aaa');
+--error 1169
+insert into t1 values(3, 'AAA');
+# 1
+select * from t1 order by p;
+# 1
+select * from t1 where a = 'aAa';
+# 1
+select * from t1 where a = 'aaa';
+# 1
+select * from t1 where a = 'AaA';
+# 1
+select * from t1 where a = 'AAA';
+drop table t1;
+
+# ordered index - binary
+
+create table t1 (
+ p int primary key,
+ a char(3) character set latin1 collate latin1_bin not null,
+ index(a)
+) engine=ndb;
+# ok
+insert into t1 values(1, 'aAa');
+insert into t1 values(2, 'aaa');
+insert into t1 values(3, 'AAA');
+insert into t1 values(4, 'aAa');
+insert into t1 values(5, 'aaa');
+insert into t1 values(6, 'AAA');
+# 6
+select * from t1 order by p;
+# plan
+explain select * from t1 where a = 'zZz' order by p;
+# 2
+select * from t1 where a = 'aAa' order by p;
+# 2
+select * from t1 where a = 'aaa' order by p;
+# 0
+select * from t1 where a = 'AaA' order by p;
+# 2
+select * from t1 where a = 'AAA' order by p;
+drop table t1;
+
+# ordered index - case insensitive
+
+create table t1 (
+ p int primary key,
+ a char(3) character set latin1 collate latin1_swedish_ci not null,
+ index(a)
+) engine=ndb;
+# ok
+insert into t1 values(1, 'aAa');
+insert into t1 values(2, 'aaa');
+insert into t1 values(3, 'AAA');
+insert into t1 values(4, 'aAa');
+insert into t1 values(5, 'aaa');
+insert into t1 values(6, 'AAA');
+# 6
+select * from t1 order by p;
+# plan
+explain select * from t1 where a = 'zZz' order by p;
+# 6
+select * from t1 where a = 'aAa' order by p;
+# 6
+select * from t1 where a = 'aaa' order by p;
+# 6
+select * from t1 where a = 'AaA' order by p;
+# 6
+select * from t1 where a = 'AAA' order by p;
+drop table t1;
diff --git a/mysql-test/t/ndb_index.test b/mysql-test/t/ndb_index.test
index d3977dc3ea4..e65b24a9b20 100644
--- a/mysql-test/t/ndb_index.test
+++ b/mysql-test/t/ndb_index.test
@@ -9,7 +9,7 @@ CREATE TABLE t1 (
ACCESSNODE varchar(16) NOT NULL,
POP varchar(48) NOT NULL,
ACCESSTYPE int unsigned NOT NULL,
- CUSTOMER_ID varchar(20) NOT NULL,
+ CUSTOMER_ID varchar(20) collate latin1_bin NOT NULL,
PROVIDER varchar(16),
TEXPIRE int unsigned,
NUM_IP int unsigned,
diff --git a/ndb/config/type_ndbapitest.mk.am b/ndb/config/type_ndbapitest.mk.am
index 8ac39aec8cf..f1fd8286337 100644
--- a/ndb/config/type_ndbapitest.mk.am
+++ b/ndb/config/type_ndbapitest.mk.am
@@ -3,7 +3,7 @@ LDADD += $(top_builddir)/ndb/test/src/libNDBT.a \
$(top_builddir)/ndb/src/libndbclient.la \
$(top_builddir)/dbug/libdbug.a \
$(top_builddir)/mysys/libmysys.a \
- $(top_builddir)/strings/libmystrings.a
+ $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
INCLUDES += -I$(srcdir) -I$(top_srcdir)/include \
-I$(top_srcdir)/ndb/include \
diff --git a/ndb/config/type_ndbapitools.mk.am b/ndb/config/type_ndbapitools.mk.am
index 3b5d40874b2..ed6d8699e05 100644
--- a/ndb/config/type_ndbapitools.mk.am
+++ b/ndb/config/type_ndbapitools.mk.am
@@ -3,7 +3,7 @@ LDADD += \
$(top_builddir)/ndb/src/libndbclient.la \
$(top_builddir)/dbug/libdbug.a \
$(top_builddir)/mysys/libmysys.a \
- $(top_builddir)/strings/libmystrings.a
+ $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
INCLUDES += -I$(srcdir) -I$(top_srcdir)/include \
-I$(top_srcdir)/ndb/include \
diff --git a/ndb/examples/ndbapi_async_example/ndbapi_async.cpp b/ndb/examples/ndbapi_async_example/ndbapi_async.cpp
index 7abebcc832d..76ce1a8efe3 100644
--- a/ndb/examples/ndbapi_async_example/ndbapi_async.cpp
+++ b/ndb/examples/ndbapi_async_example/ndbapi_async.cpp
@@ -46,9 +46,9 @@
*
* NdbDictionary::Column
* setName()
- * setPrimaryKey()
* setType()
* setLength()
+ * setPrimaryKey()
* setNullable()
*
* NdbDictionary::Table
@@ -234,9 +234,9 @@ int create_table(Ndb * myNdb)
* Column REG_NO
*/
myColumn.setName("REG_NO");
- myColumn.setPrimaryKey(true);
myColumn.setType(NdbDictionary::Column::Unsigned);
myColumn.setLength(1);
+ myColumn.setPrimaryKey(true);
myColumn.setNullable(false);
myTable.addColumn(myColumn);
@@ -244,9 +244,9 @@ int create_table(Ndb * myNdb)
* Column BRAND
*/
myColumn.setName("BRAND");
- myColumn.setPrimaryKey(false);
myColumn.setType(NdbDictionary::Column::Char);
myColumn.setLength(20);
+ myColumn.setPrimaryKey(false);
myColumn.setNullable(false);
myTable.addColumn(myColumn);
@@ -254,9 +254,9 @@ int create_table(Ndb * myNdb)
* Column COLOR
*/
myColumn.setName("COLOR");
- myColumn.setPrimaryKey(false);
myColumn.setType(NdbDictionary::Column::Char);
myColumn.setLength(20);
+ myColumn.setPrimaryKey(false);
myColumn.setNullable(false);
myTable.addColumn(myColumn);
@@ -454,6 +454,7 @@ int populate(Ndb * myNdb, int data, async_callback_t * cbData)
int main()
{
+ ndb_init();
Ndb* myNdb = new Ndb( "TEST_DB" ); // Object representing the database
/*******************************************
@@ -493,5 +494,3 @@ int main()
std::cout << "Number of temporary errors: " << tempErrors << std::endl;
delete myNdb;
}
-
-
diff --git a/ndb/examples/ndbapi_example1/ndbapi_example1.cpp b/ndb/examples/ndbapi_example1/ndbapi_example1.cpp
index 879d86de824..03a84aa249b 100644
--- a/ndb/examples/ndbapi_example1/ndbapi_example1.cpp
+++ b/ndb/examples/ndbapi_example1/ndbapi_example1.cpp
@@ -44,6 +44,7 @@
int main()
{
+ ndb_init();
Ndb* myNdb = new Ndb( "TEST_DB_1" ); // Object representing the database
NdbDictionary::Table myTable;
NdbDictionary::Column myColumn;
@@ -78,16 +79,16 @@ int main()
myTable.setName("MYTABLENAME");
myColumn.setName("ATTR1");
- myColumn.setPrimaryKey(true);
myColumn.setType(NdbDictionary::Column::Unsigned);
myColumn.setLength(1);
+ myColumn.setPrimaryKey(true);
myColumn.setNullable(false);
myTable.addColumn(myColumn);
myColumn.setName("ATTR2");
- myColumn.setPrimaryKey(false);
myColumn.setType(NdbDictionary::Column::Unsigned);
myColumn.setLength(1);
+ myColumn.setPrimaryKey(false);
myColumn.setNullable(false);
myTable.addColumn(myColumn);
diff --git a/ndb/examples/ndbapi_example2/ndbapi_example2.cpp b/ndb/examples/ndbapi_example2/ndbapi_example2.cpp
index 1c61721c037..95a7bae66b8 100644
--- a/ndb/examples/ndbapi_example2/ndbapi_example2.cpp
+++ b/ndb/examples/ndbapi_example2/ndbapi_example2.cpp
@@ -39,6 +39,7 @@ static void callback(int result, NdbConnection* NdbObject, void* aObject);
int main()
{
+ ndb_init();
Ndb* myNdb = new Ndb( "TEST_DB_2" ); // Object representing the database
NdbConnection* myNdbConnection[2]; // For transactions
diff --git a/ndb/examples/ndbapi_example3/ndbapi_example3.cpp b/ndb/examples/ndbapi_example3/ndbapi_example3.cpp
index 36d2cf1608c..91d9ff122ba 100644
--- a/ndb/examples/ndbapi_example3/ndbapi_example3.cpp
+++ b/ndb/examples/ndbapi_example3/ndbapi_example3.cpp
@@ -176,6 +176,7 @@ int executeInsertTransaction(int transactionId, Ndb* myNdb) {
int main()
{
+ ndb_init();
Ndb* myNdb = new Ndb( "TEST_DB_1" ); // Object representing the database
/*******************************************
diff --git a/ndb/examples/ndbapi_example4/ndbapi_example4.cpp b/ndb/examples/ndbapi_example4/ndbapi_example4.cpp
index 520172b9b0c..fcb770d49e9 100644
--- a/ndb/examples/ndbapi_example4/ndbapi_example4.cpp
+++ b/ndb/examples/ndbapi_example4/ndbapi_example4.cpp
@@ -44,6 +44,7 @@
int main()
{
+ ndb_init();
Ndb* myNdb = new Ndb( "TEST_DB_1" ); // Object representing the database
NdbDictionary::Table myTable;
NdbDictionary::Column myColumn;
@@ -79,16 +80,16 @@ int main()
myTable.setName("MYTABLENAME");
myColumn.setName("ATTR1");
- myColumn.setPrimaryKey(true);
myColumn.setType(NdbDictionary::Column::Unsigned);
myColumn.setLength(1);
+ myColumn.setPrimaryKey(true);
myColumn.setNullable(false);
myTable.addColumn(myColumn);
myColumn.setName("ATTR2");
- myColumn.setPrimaryKey(false);
myColumn.setType(NdbDictionary::Column::Unsigned);
myColumn.setLength(1);
+ myColumn.setPrimaryKey(false);
myColumn.setNullable(false);
myTable.addColumn(myColumn);
diff --git a/ndb/examples/ndbapi_example5/ndbapi_example5.cpp b/ndb/examples/ndbapi_example5/ndbapi_example5.cpp
index a9d3099883c..77f74e7bb63 100644
--- a/ndb/examples/ndbapi_example5/ndbapi_example5.cpp
+++ b/ndb/examples/ndbapi_example5/ndbapi_example5.cpp
@@ -65,6 +65,7 @@ int myCreateEvent(Ndb* myNdb,
int main()
{
+ ndb_init();
Ndb* myNdb = myCreateNdb();
NdbDictionary::Dictionary *myDict;
diff --git a/ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp b/ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp
index 7c3a66326c6..22641bc5b57 100644
--- a/ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp
+++ b/ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp
@@ -47,9 +47,9 @@
*
* NdbDictionary::Column
* setName()
- * setPrimaryKey()
* setType()
* setLength()
+ * setPrimaryKey()
* setNullable()
*
* NdbDictionary::Table
@@ -165,24 +165,24 @@ int create_table(Ndb * myNdb)
myTable.setName("GARAGE");
myColumn.setName("REG_NO");
- myColumn.setPrimaryKey(true);
myColumn.setType(NdbDictionary::Column::Unsigned);
myColumn.setLength(1);
+ myColumn.setPrimaryKey(true);
myColumn.setNullable(false);
myTable.addColumn(myColumn);
myColumn.setName("BRAND");
- myColumn.setPrimaryKey(false);
myColumn.setType(NdbDictionary::Column::Char);
myColumn.setLength(20);
+ myColumn.setPrimaryKey(false);
myColumn.setNullable(false);
myTable.addColumn(myColumn);
myColumn.setName("COLOR");
- myColumn.setPrimaryKey(false);
myColumn.setType(NdbDictionary::Column::Char);
myColumn.setLength(20);
+ myColumn.setPrimaryKey(false);
myColumn.setNullable(false);
myTable.addColumn(myColumn);
@@ -761,6 +761,7 @@ int scan_print(Ndb * myNdb, int parallelism,
int main()
{
+ ndb_init();
Ndb* myNdb = new Ndb( "TEST_DB" ); // Object representing the database
@@ -813,4 +814,3 @@ int main()
delete myNdb;
}
-
diff --git a/ndb/examples/select_all/select_all.cpp b/ndb/examples/select_all/select_all.cpp
index 3cdbdc47e62..bd25fb60128 100644
--- a/ndb/examples/select_all/select_all.cpp
+++ b/ndb/examples/select_all/select_all.cpp
@@ -112,6 +112,7 @@ const char* ResultSetContainer::getAttrName(int i) const {return m_names[i];}
int main(int argc, const char** argv)
{
+ ndb_init();
Ndb* myNdb = new Ndb("ndbapi_example4"); // Object representing the database
NdbConnection* myNdbConnection; // For transactions
NdbOperation* myNdbOperation; // For operations
diff --git a/ndb/include/debugger/EventLogger.hpp b/ndb/include/debugger/EventLogger.hpp
index 6cd6a83e68d..686989089ae 100644
--- a/ndb/include/debugger/EventLogger.hpp
+++ b/ndb/include/debugger/EventLogger.hpp
@@ -24,6 +24,32 @@
#include <kernel/LogLevel.hpp>
#include <signaldata/EventReport.hpp>
+class EventLoggerBase {
+public:
+ virtual ~EventLoggerBase();
+
+ /**
+ * LogLevel settings
+ */
+ LogLevel m_logLevel;
+
+ /**
+ * This matrix defines which event should be printed when
+ *
+ * threshold - is in range [0-15]
+ * severity - DEBUG to ALERT (Type of log message)
+ */
+ struct EventRepLogLevelMatrix {
+ EventReport::EventType eventType;
+ LogLevel::EventCategory eventCategory;
+ Uint32 threshold;
+ Logger::LoggerLevel severity;
+ };
+
+ static const EventRepLogLevelMatrix matrix[];
+ static const Uint32 matrixSize;
+};
+
/**
* The EventLogger is primarily used for logging NDB events
* in the Management Server. It inherits all logging functionality of Logger.
@@ -58,7 +84,7 @@
* @see Logger
* @version #@ $Id: EventLogger.hpp,v 1.3 2003/09/01 10:15:52 innpeno Exp $
*/
-class EventLogger : public Logger
+class EventLogger : public EventLoggerBase, public Logger
{
public:
/**
@@ -70,7 +96,7 @@ public:
/**
* Destructor.
*/
- ~EventLogger();
+ virtual ~EventLogger();
/**
* Opens/creates the eventlog with the specified filename.
@@ -95,47 +121,13 @@ public:
/**
* Logs the NDB event.
*
- * @param nodeId the node id of event origin.
- * @param eventType the type of event.
- * @param theData the event data.
- * @deprecated use log(int eventType, const Uint32* theData, NodeId nodeId)
- */
- void log(NodeId nodeId, int eventType, const Uint32* theData);
-
- /**
- * Logs the NDB event.
- *
* @param eventType the type of event.
* @param theData the event data.
* @param nodeId the node id of event origin.
*/
- void log(int eventType, const Uint32* theData, NodeId nodeId = 0);
-
- /**
- * Returns the current log levels.
- * Enable, disable log levels to filter the events that are sent to the
- * eventlog.
- *
- * @return the log level.
- */
- LogLevel& getLoglevel();
+ virtual void log(int, const Uint32*, NodeId = 0,const class LogLevel * = 0);
/**
- * Returns the log level that is used to filter an event. The event will not
- * be logged unless its event category's log level is <= levelFilter.
- *
- * @return the log level filter that is used for all event categories.
- */
- int getFilterLevel() const;
- /**
- * Sets log level filter. The event will be logged if
- * the event category's log level is <= 'filterLevel'.
- *
- * @param level the log level to filter.
- */
- void setFilterLevel(int filterLevel);
-
- /**
* Returns the event text for the specified event report type.
*
* @param type the event type.
@@ -143,72 +135,25 @@ public:
* @param nodeId a node id.
* @return the event report text.
*/
- static const char* getText(int type,
+ static const char* getText(char * dst, size_t dst_len,
+ int type,
const Uint32* theData, NodeId nodeId = 0);
-
- /**
- * Find a category matching the string
- *
- * @param str string to match.
- * @param cat the event category.
- * @param exactMatch only do exact matching.
- *
- * @return TRUE if match is found, then cat is modified
- * FALSE if match is not found
- */
- static bool matchEventCategory(const char * str,
- LogLevel::EventCategory * cat,
- bool exactMatch = false);
/**
- * Returns category name or NULL if not found.
+ * Returns the log level that is used to filter an event. The event will not
+ * be logged unless its event category's log level is <= levelFilter.
*
- * @param cat the event category.
- * @return category name.
- */
- static const char * getEventCategoryName(LogLevel::EventCategory cat);
-
- /**
- * Specifies allowed event categories/log levels.
+ * @return the log level filter that is used for all event categories.
*/
- struct EventCategoryName {
- LogLevel::EventCategory category;
- const char * name;
- };
-
- static const EventCategoryName eventCategoryNames[];
- static const Uint32 noOfEventCategoryNames;
-
- /**
- * This matrix defines which event should be printed when
- *
- * threshold - is in range [0-15]
- * severity - DEBUG to ALERT (Type of log message)
- */
- struct EventRepLogLevelMatrix {
- EventReport::EventType eventType;
- LogLevel::EventCategory eventCategory;
- Uint32 threshold;
- Logger::LoggerLevel severity;
- };
-
- static const EventRepLogLevelMatrix matrix[];
+ int getFilterLevel() const;
/**
- * Default log levels for management nodes.
+ * Sets log level filter. The event will be logged if
+ * the event category's log level is <= 'filterLevel'.
*
- * threshold - is in range [0-15]
+ * @param level the log level to filter.
*/
- struct EventLogMatrix {
- LogLevel::EventCategory eventCategory;
- Uint32 threshold;
- };
-
- static const EventLogMatrix defEventLogMatrix[];
-
-
- static const Uint32 matrixSize;
- static const Uint32 defEventLogMatrixSize;
+ void setFilterLevel(int filterLevel);
private:
/** Prohibit */
@@ -216,11 +161,10 @@ private:
EventLogger operator = (const EventLogger&);
bool operator == (const EventLogger&);
- LogLevel m_logLevel;
Uint32 m_filterLevel;
STATIC_CONST(MAX_TEXT_LENGTH = 256);
- static char m_text[MAX_TEXT_LENGTH];
+ char m_text[MAX_TEXT_LENGTH];
};
diff --git a/ndb/include/kernel/LogLevel.hpp b/ndb/include/kernel/LogLevel.hpp
index 10cd0d43bee..52c2f70cda8 100644
--- a/ndb/include/kernel/LogLevel.hpp
+++ b/ndb/include/kernel/LogLevel.hpp
@@ -45,81 +45,30 @@ public:
* Copy operator
*/
LogLevel & operator= (const LogLevel &);
-
- static const Uint32 MIN_LOGLEVEL_ID = CFG_LOGLEVEL_STARTUP;
-
- enum EventCategory {
- /**
- * Events during all kind of startups
- */
- llStartUp = CFG_LOGLEVEL_STARTUP - MIN_LOGLEVEL_ID,
-
- /**
- * Events during shutdown
- */
- llShutdown = CFG_LOGLEVEL_SHUTDOWN - MIN_LOGLEVEL_ID,
-
- /**
- * Transaction statistics
- * Job level
- * TCP/IP speed
- */
- llStatistic = CFG_LOGLEVEL_STATISTICS - MIN_LOGLEVEL_ID,
-
- /**
- * Checkpoints
- */
- llCheckpoint = CFG_LOGLEVEL_CHECKPOINT - MIN_LOGLEVEL_ID,
-
- /**
- * Events during node restart
- */
- llNodeRestart = CFG_LOGLEVEL_NODERESTART - MIN_LOGLEVEL_ID,
-
- /**
- * Events related to connection / communication
- */
- llConnection = CFG_LOGLEVEL_CONNECTION - MIN_LOGLEVEL_ID,
-
- /**
- * Assorted event w.r.t unexpected happenings
- */
- llError = CFG_LOGLEVEL_ERROR - MIN_LOGLEVEL_ID,
-
- /**
- * Assorted event w.r.t warning
- */
- llWarning = CFG_LOGLEVEL_WARNING - MIN_LOGLEVEL_ID,
-
- /**
- * Assorted event w.r.t information
- */
- llInfo = CFG_LOGLEVEL_INFO - MIN_LOGLEVEL_ID,
-
- /**
- * Events related to global replication
- */
- llGrep = CFG_LOGLEVEL_GREP - MIN_LOGLEVEL_ID
- };
- struct LogLevelCategoryName {
- const char* name;
+ enum EventCategory {
+ llStartUp = CFG_LOGLEVEL_STARTUP - CFG_MIN_LOGLEVEL,
+ llShutdown = CFG_LOGLEVEL_SHUTDOWN - CFG_MIN_LOGLEVEL,
+ llStatistic = CFG_LOGLEVEL_STATISTICS - CFG_MIN_LOGLEVEL,
+ llCheckpoint = CFG_LOGLEVEL_CHECKPOINT - CFG_MIN_LOGLEVEL,
+ llNodeRestart = CFG_LOGLEVEL_NODERESTART - CFG_MIN_LOGLEVEL,
+ llConnection = CFG_LOGLEVEL_CONNECTION - CFG_MIN_LOGLEVEL,
+ llInfo = CFG_LOGLEVEL_INFO - CFG_MIN_LOGLEVEL,
+ llWarning = CFG_LOGLEVEL_WARNING - CFG_MIN_LOGLEVEL,
+ llError = CFG_LOGLEVEL_ERROR - CFG_MIN_LOGLEVEL,
+ llGrep = CFG_LOGLEVEL_GREP - CFG_MIN_LOGLEVEL,
+ llDebug = CFG_LOGLEVEL_DEBUG - CFG_MIN_LOGLEVEL
+ ,llBackup = CFG_LOGLEVEL_BACKUP - CFG_MIN_LOGLEVEL
};
/**
- * Log/event level category names. Remember to update the names whenever
- * a new category is added.
- */
- static const LogLevelCategoryName LOGLEVEL_CATEGORY_NAME[];
-
- /**
* No of categories
*/
-#define _LOGLEVEL_CATEGORIES 10
+#define _LOGLEVEL_CATEGORIES (CFG_MAX_LOGLEVEL - CFG_MIN_LOGLEVEL + 1);
static const Uint32 LOGLEVEL_CATEGORIES = _LOGLEVEL_CATEGORIES;
-
+
void clear();
-
+
/**
* Note level is valid as 0-15
*/
@@ -130,26 +79,33 @@ public:
*/
Uint32 getLogLevel(EventCategory ec) const;
+ /**
+ * Set this= max(this, ll) per category
+ */
+ LogLevel& set_max(const LogLevel& ll);
+
+ bool operator==(const LogLevel& l) const {
+ return memcmp(this, &l, sizeof(* this)) == 0;
+ }
+
+ LogLevel& operator=(const class EventSubscribeReq & req);
+
private:
/**
* The actual data
*/
- Uint32 logLevelData[LOGLEVEL_CATEGORIES];
-
- LogLevel(const LogLevel &);
+ Uint8 logLevelData[LOGLEVEL_CATEGORIES];
};
inline
LogLevel::LogLevel(){
- clear();
+ clear();
}
inline
LogLevel &
LogLevel::operator= (const LogLevel & org){
- for(Uint32 i = 0; i<LOGLEVEL_CATEGORIES; i++){
- logLevelData[i] = org.logLevelData[i];
- }
+ memcpy(logLevelData, org.logLevelData, sizeof(logLevelData));
return * this;
}
@@ -165,7 +121,7 @@ inline
void
LogLevel::setLogLevel(EventCategory ec, Uint32 level){
assert(ec >= 0 && (Uint32) ec < LOGLEVEL_CATEGORIES);
- logLevelData[ec] = level;
+ logLevelData[ec] = (Uint8)level;
}
inline
@@ -173,8 +129,30 @@ Uint32
LogLevel::getLogLevel(EventCategory ec) const{
assert(ec >= 0 && (Uint32) ec < LOGLEVEL_CATEGORIES);
- return logLevelData[ec];
+ return (Uint32)logLevelData[ec];
+}
+
+inline
+LogLevel &
+LogLevel::set_max(const LogLevel & org){
+ for(Uint32 i = 0; i<LOGLEVEL_CATEGORIES; i++){
+ if(logLevelData[i] < org.logLevelData[i])
+ logLevelData[i] = org.logLevelData[i];
+ }
+ return * this;
}
+#include <signaldata/EventSubscribeReq.hpp>
+
+inline
+LogLevel&
+LogLevel::operator=(const EventSubscribeReq& req)
+{
+ clear();
+ for(size_t i = 0; i<req.noOfEntries; i++){
+ logLevelData[(req.theData[i] >> 16)] = req.theData[i] & 0xFFFF;
+ }
+ return * this;
+}
#endif
diff --git a/ndb/include/kernel/signaldata/CreateTable.hpp b/ndb/include/kernel/signaldata/CreateTable.hpp
index 424367f28d5..67e510d2ed0 100644
--- a/ndb/include/kernel/signaldata/CreateTable.hpp
+++ b/ndb/include/kernel/signaldata/CreateTable.hpp
@@ -89,7 +89,8 @@ public:
ArraySizeTooBig = 737,
RecordTooBig = 738,
InvalidPrimaryKeySize = 739,
- NullablePrimaryKey = 740
+ NullablePrimaryKey = 740,
+ InvalidCharset = 743
};
private:
diff --git a/ndb/include/kernel/signaldata/DictTabInfo.hpp b/ndb/include/kernel/signaldata/DictTabInfo.hpp
index dec7145c897..a9a50f19fbc 100644
--- a/ndb/include/kernel/signaldata/DictTabInfo.hpp
+++ b/ndb/include/kernel/signaldata/DictTabInfo.hpp
@@ -438,8 +438,8 @@ public:
case DictTabInfo::ExtText:
AttributeType = DictTabInfo::StringType;
AttributeSize = DictTabInfo::an8Bit;
- // head + inline part [ attr precision ]
- AttributeArraySize = (NDB_BLOB_HEAD_SIZE << 2) + AttributeExtPrecision;
+ // head + inline part [ attr precision lower half ]
+ AttributeArraySize = (NDB_BLOB_HEAD_SIZE << 2) + (AttributeExtPrecision & 0xFFFF);
return true;
};
return false;
diff --git a/ndb/include/kernel/signaldata/EventReport.hpp b/ndb/include/kernel/signaldata/EventReport.hpp
index b6106bb0ca4..1ad6e1bf7ac 100644
--- a/ndb/include/kernel/signaldata/EventReport.hpp
+++ b/ndb/include/kernel/signaldata/EventReport.hpp
@@ -135,12 +135,17 @@ public:
//GREP
GrepSubscriptionInfo = 52,
- GrepSubscriptionAlert = 53
- };
+ GrepSubscriptionAlert = 53,
+ //BACKUP
+ BackupStarted = 54,
+ BackupFailedToStart = 55,
+ BackupCompleted = 56,
+ BackupAborted = 57
+ };
+
void setEventType(EventType type);
EventType getEventType() const;
-private:
UintR eventType; // DATA 0
};
diff --git a/ndb/include/kernel/signaldata/EventSubscribeReq.hpp b/ndb/include/kernel/signaldata/EventSubscribeReq.hpp
index fd2821ea31d..84a1717b1de 100644
--- a/ndb/include/kernel/signaldata/EventSubscribeReq.hpp
+++ b/ndb/include/kernel/signaldata/EventSubscribeReq.hpp
@@ -27,7 +27,7 @@
* RECIVER: SimBlockCMCtrBlck
*/
-class EventSubscribeReq {
+struct EventSubscribeReq {
/**
* Receiver(s)
*/
@@ -38,9 +38,8 @@ class EventSubscribeReq {
*/
friend class MgmtSrvr;
-public:
- STATIC_CONST( SignalLength = 22 );
-private:
+ STATIC_CONST( SignalLength = 2 + LogLevel::LOGLEVEL_CATEGORIES );
+
/**
* Note: If you use the same blockRef as you have used earlier,
* you update your ongoing subscription
@@ -53,8 +52,15 @@ private:
*/
Uint32 noOfEntries;
- Uint32 theCategories[10];
- Uint32 theLevels[10];
+ Uint32 theData[LogLevel::LOGLEVEL_CATEGORIES];
+
+ EventSubscribeReq& operator= (const LogLevel& ll){
+ noOfEntries = LogLevel::LOGLEVEL_CATEGORIES;
+ for(size_t i = 0; i<noOfEntries; i++){
+ theData[i] = (i << 16) | ll.getLogLevel((LogLevel::EventCategory)i);
+ }
+ return * this;
+ }
};
#endif
diff --git a/ndb/include/kernel/signaldata/LqhFrag.hpp b/ndb/include/kernel/signaldata/LqhFrag.hpp
index 116e9c01ca0..13dfafcc653 100644
--- a/ndb/include/kernel/signaldata/LqhFrag.hpp
+++ b/ndb/include/kernel/signaldata/LqhFrag.hpp
@@ -130,7 +130,7 @@ private:
Uint32 keyLength;
Uint32 nextLCP;
Uint32 noOfKeyAttr;
- Uint32 noOfNewAttr;
+ Uint32 noOfNewAttr; // noOfCharsets in upper half
Uint32 checksumIndicator;
Uint32 noOfAttributeGroups;
Uint32 GCPIndicator;
diff --git a/ndb/include/kernel/signaldata/SetLogLevelOrd.hpp b/ndb/include/kernel/signaldata/SetLogLevelOrd.hpp
index c3be808cc41..2923029f8f6 100644
--- a/ndb/include/kernel/signaldata/SetLogLevelOrd.hpp
+++ b/ndb/include/kernel/signaldata/SetLogLevelOrd.hpp
@@ -18,6 +18,7 @@
#define SET_LOGLEVEL_ORD_HPP
#include <LogLevel.hpp>
+#include "EventSubscribeReq.hpp"
#include "SignalData.hpp"
/**
@@ -39,11 +40,10 @@ class SetLogLevelOrd {
friend class NodeLogLevel;
private:
- STATIC_CONST( SignalLength = 25 );
-
+ STATIC_CONST( SignalLength = 1 + LogLevel::LOGLEVEL_CATEGORIES );
+
Uint32 noOfEntries;
- Uint32 theCategories[12];
- Uint32 theLevels[12];
+ Uint32 theData[LogLevel::LOGLEVEL_CATEGORIES];
void clear();
@@ -51,6 +51,22 @@ private:
* Note level is valid as 0-15
*/
void setLogLevel(LogLevel::EventCategory ec, int level = 7);
+
+ SetLogLevelOrd& operator= (const LogLevel& ll){
+ noOfEntries = LogLevel::LOGLEVEL_CATEGORIES;
+ for(size_t i = 0; i<noOfEntries; i++){
+ theData[i] = (i << 16) | ll.getLogLevel((LogLevel::EventCategory)i);
+ }
+ return * this;
+ }
+
+ SetLogLevelOrd& operator= (const EventSubscribeReq& ll){
+ noOfEntries = ll.noOfEntries;
+ for(size_t i = 0; i<noOfEntries; i++){
+ theData[i] = ll.theData[i];
+ }
+ return * this;
+ }
};
inline
@@ -62,9 +78,7 @@ SetLogLevelOrd::clear(){
inline
void
SetLogLevelOrd::setLogLevel(LogLevel::EventCategory ec, int level){
- assert(noOfEntries < 12);
- theCategories[noOfEntries] = ec;
- theLevels[noOfEntries] = level;
+ theData[noOfEntries] = (ec << 16) | level;
noOfEntries++;
}
diff --git a/ndb/include/kernel/signaldata/TupFrag.hpp b/ndb/include/kernel/signaldata/TupFrag.hpp
index c0ce22651aa..c1e861c5dff 100644
--- a/ndb/include/kernel/signaldata/TupFrag.hpp
+++ b/ndb/include/kernel/signaldata/TupFrag.hpp
@@ -119,12 +119,13 @@ class TupAddAttrReq {
friend class Dblqh;
friend class Dbtux;
public:
- STATIC_CONST( SignalLength = 4 );
+ STATIC_CONST( SignalLength = 5 );
private:
Uint32 tupConnectPtr;
Uint32 notused1;
Uint32 attrId;
Uint32 attrDescriptor;
+ Uint32 extTypeInfo;
};
class TupAddAttrConf {
@@ -141,6 +142,10 @@ class TupAddAttrRef {
friend class Dbtup;
public:
STATIC_CONST( SignalLength = 2 );
+ enum ErrorCode {
+ NoError = 0,
+ InvalidCharset = 743
+ };
private:
Uint32 userPtr;
Uint32 errorCode;
@@ -178,7 +183,8 @@ public:
STATIC_CONST( SignalLength = 2 );
enum ErrorCode {
NoError = 0,
- InvalidAttributeType = 831,
+ InvalidAttributeType = 742,
+ InvalidCharset = 743,
InvalidNodeSize = 832
};
private:
diff --git a/ndb/include/mgmapi/mgmapi.h b/ndb/include/mgmapi/mgmapi.h
index 929f4b0833b..44307c3e73c 100644
--- a/ndb/include/mgmapi/mgmapi.h
+++ b/ndb/include/mgmapi/mgmapi.h
@@ -56,24 +56,6 @@ extern "C" {
#endif
/**
- * Format of statistical information from the NDB Cluster.
- * STATISTIC_LINE is sent on the statistical port from the Management server,
- * each line is timestamped with STATISTIC_DATE.
- */
-#define STATISTIC_LINE "date=%s epochsecs=%d nodeid=%u trans=%u commit=%u " \
- "read=%u insert=%u attrinfo=%u cops=%u abort=%u"
- /**
- * Format of statistical information from the NDB Cluster.
- * STATISTIC_LINE is sent on the statistical port from the Management server,
- * each line is timestamped with STATISTIC_DATE.
- */
-#define STATISTIC_DATE "%d-%.2d-%.2d/%.2d:%.2d:%.2d"
- /**
- * Format of statistical information from the NDB Cluster.
- */
-#define OP_STATISTIC_LINE "date=%s epochsecs=%d nodeid=%d operations=%u"
-
- /**
* The NdbMgmHandle.
*/
typedef struct ndb_mgm_handle * NdbMgmHandle;
@@ -272,19 +254,35 @@ extern "C" {
* Log categories
*/
enum ndb_mgm_event_category {
- NDB_MGM_EVENT_CATEGORY_STARTUP, ///< Events during all kinds
- ///< of startups
- NDB_MGM_EVENT_CATEGORY_SHUTDOWN, ///< Events during shutdown
- NDB_MGM_EVENT_CATEGORY_STATISTIC, ///< Transaction statistics
- ///< (Job level, TCP/IP speed)
- NDB_MGM_EVENT_CATEGORY_CHECKPOINT, ///< Checkpoints
- NDB_MGM_EVENT_CATEGORY_NODE_RESTART, ///< Events during node restart
- NDB_MGM_EVENT_CATEGORY_CONNECTION, ///< Events related to connection
- ///< and communication
- NDB_MGM_EVENT_CATEGORY_ERROR ///< Assorted event w.r.t.
- ///< unexpected happenings
+ NDB_MGM_ILLEGAL_EVENT_CATEGORY = -1, ///< Invalid
+ /**
+ * Events during all kinds of startups
+ */
+ NDB_MGM_EVENT_CATEGORY_STARTUP = CFG_LOGLEVEL_STARTUP,
+
+ /**
+ * Events during shutdown
+ */
+ NDB_MGM_EVENT_CATEGORY_SHUTDOWN = CFG_LOGLEVEL_SHUTDOWN,
+
+ /**
+ * Transaction statistics (Job level, TCP/IP speed)
+ */
+ NDB_MGM_EVENT_CATEGORY_STATISTIC = CFG_LOGLEVEL_STATISTICS,
+ NDB_MGM_EVENT_CATEGORY_CHECKPOINT = CFG_LOGLEVEL_CHECKPOINT,
+ NDB_MGM_EVENT_CATEGORY_NODE_RESTART = CFG_LOGLEVEL_NODERESTART,
+ NDB_MGM_EVENT_CATEGORY_CONNECTION = CFG_LOGLEVEL_CONNECTION,
+ NDB_MGM_EVENT_CATEGORY_DEBUG = CFG_LOGLEVEL_DEBUG,
+ NDB_MGM_EVENT_CATEGORY_INFO = CFG_LOGLEVEL_INFO,
+ NDB_MGM_EVENT_CATEGORY_WARNING = CFG_LOGLEVEL_WARNING,
+ NDB_MGM_EVENT_CATEGORY_ERROR = CFG_LOGLEVEL_ERROR,
+ NDB_MGM_EVENT_CATEGORY_GREP = CFG_LOGLEVEL_GREP,
+ NDB_MGM_EVENT_CATEGORY_BACKUP = CFG_LOGLEVEL_BACKUP,
+
+ NDB_MGM_MIN_EVENT_CATEGORY = CFG_MIN_LOGLEVEL,
+ NDB_MGM_MAX_EVENT_CATEGORY = CFG_MAX_LOGLEVEL
};
-
+
/***************************************************************************/
/**
* @name Functions: Error Handling
@@ -420,6 +418,9 @@ extern "C" {
*/
const char * ndb_mgm_get_node_status_string(enum ndb_mgm_node_status status);
+ ndb_mgm_event_category ndb_mgm_match_event_category(const char *);
+ const char * ndb_mgm_get_event_category_string(enum ndb_mgm_event_category);
+
/** @} *********************************************************************/
/**
* @name Functions: State of cluster
@@ -580,8 +581,7 @@ extern "C" {
*/
int ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle handle,
int nodeId,
- /*enum ndb_mgm_event_category category*/
- char * category,
+ enum ndb_mgm_event_category category,
int level,
struct ndb_mgm_reply* reply);
@@ -597,8 +597,7 @@ extern "C" {
*/
int ndb_mgm_set_loglevel_node(NdbMgmHandle handle,
int nodeId,
- /*enum ndb_mgm_event_category category*/
- char * category,
+ enum ndb_mgm_event_category category,
int level,
struct ndb_mgm_reply* reply);
@@ -670,6 +669,15 @@ extern "C" {
struct ndb_mgm_reply* reply);
/**
+ * Listen event
+ *
+ * @param filter pairs of { level, category } that will be
+ * pushed to fd, level=0 ends lists
+ * @return fd which events will be pushed to
+ */
+ int ndb_mgm_listen_event(NdbMgmHandle handle, int filter[]);
+
+ /**
* Get configuration
* @param handle NDB management handle.
* @param version Version of configuration, 0 means latest
diff --git a/ndb/include/mgmapi/mgmapi_config_parameters.h b/ndb/include/mgmapi/mgmapi_config_parameters.h
index 4a4863298dd..6a0cd376355 100644
--- a/ndb/include/mgmapi/mgmapi_config_parameters.h
+++ b/ndb/include/mgmapi/mgmapi_config_parameters.h
@@ -1,7 +1,6 @@
#ifndef MGMAPI_CONFIG_PARAMTERS_H
#define MGMAPI_CONFIG_PARAMTERS_H
-
#define CFG_SYS_NAME 3
#define CFG_SYS_PRIMARY_MGM_NODE 1
#define CFG_SYS_CONFIG_GENERATION 2
@@ -64,16 +63,6 @@
#define CFG_DB_BACKUP_LOG_BUFFER_MEM 135
#define CFG_DB_BACKUP_WRITE_SIZE 136
-#define CFG_LOGLEVEL_STARTUP 137
-#define CFG_LOGLEVEL_SHUTDOWN 138
-#define CFG_LOGLEVEL_STATISTICS 139
-#define CFG_LOGLEVEL_CHECKPOINT 140
-#define CFG_LOGLEVEL_NODERESTART 141
-#define CFG_LOGLEVEL_CONNECTION 142
-#define CFG_LOGLEVEL_INFO 143
-#define CFG_LOGLEVEL_WARNING 144
-#define CFG_LOGLEVEL_ERROR 145
-#define CFG_LOGLEVEL_GREP 146
#define CFG_LOG_DESTINATION 147
#define CFG_DB_DISCLESS 148
@@ -95,6 +84,21 @@
#define CFG_NODE_ARBIT_RANK 200
#define CFG_NODE_ARBIT_DELAY 201
+#define CFG_MIN_LOGLEVEL 250
+#define CFG_LOGLEVEL_STARTUP 250
+#define CFG_LOGLEVEL_SHUTDOWN 251
+#define CFG_LOGLEVEL_STATISTICS 252
+#define CFG_LOGLEVEL_CHECKPOINT 253
+#define CFG_LOGLEVEL_NODERESTART 254
+#define CFG_LOGLEVEL_CONNECTION 255
+#define CFG_LOGLEVEL_INFO 256
+#define CFG_LOGLEVEL_WARNING 257
+#define CFG_LOGLEVEL_ERROR 258
+#define CFG_LOGLEVEL_GREP 259
+#define CFG_LOGLEVEL_DEBUG 260
+#define CFG_LOGLEVEL_BACKUP 261
+#define CFG_MAX_LOGLEVEL 261
+
#define CFG_MGM_PORT 300
#define CFG_CONNECTION_NODE_1 400
@@ -104,9 +108,9 @@
#define CFG_CONNECTION_NODE_1_SYSTEM 404
#define CFG_CONNECTION_NODE_2_SYSTEM 405
#define CFG_CONNECTION_SERVER_PORT 406
+#define CFG_CONNECTION_HOSTNAME_1 407
+#define CFG_CONNECTION_HOSTNAME_2 408
-#define CFG_TCP_HOSTNAME_1 450
-#define CFG_TCP_HOSTNAME_2 451
#define CFG_TCP_SERVER 452
#define CFG_TCP_SEND_BUFFER_SIZE 454
#define CFG_TCP_RECEIVE_BUFFER_SIZE 455
@@ -117,19 +121,13 @@
#define CFG_SHM_KEY 502
#define CFG_SHM_BUFFER_MEM 503
-#define CFG_SCI_ID_0 550
-#define CFG_SCI_ID_1 551
-#define CFG_SCI_SEND_LIMIT 552
-#define CFG_SCI_BUFFER_MEM 553
-#define CFG_SCI_NODE1_ADAPTERS 554
-#define CFG_SCI_NODE1_ADAPTER0 555
-#define CFG_SCI_NODE1_ADAPTER1 556
-#define CFG_SCI_NODE2_ADAPTERS 554
-#define CFG_SCI_NODE2_ADAPTER0 555
-#define CFG_SCI_NODE2_ADAPTER1 556
-
-#define CFG_OSE_HOSTNAME_1 600
-#define CFG_OSE_HOSTNAME_2 601
+#define CFG_SCI_HOST1_ID_0 550
+#define CFG_SCI_HOST1_ID_1 551
+#define CFG_SCI_HOST2_ID_0 552
+#define CFG_SCI_HOST2_ID_1 553
+#define CFG_SCI_SEND_LIMIT 554
+#define CFG_SCI_BUFFER_MEM 555
+
#define CFG_OSE_PRIO_A_SIZE 602
#define CFG_OSE_PRIO_B_SIZE 603
#define CFG_OSE_RECEIVE_ARRAY_SIZE 604
diff --git a/ndb/include/mgmcommon/ConfigRetriever.hpp b/ndb/include/mgmcommon/ConfigRetriever.hpp
index f9f8904b65c..6c32255e921 100644
--- a/ndb/include/mgmcommon/ConfigRetriever.hpp
+++ b/ndb/include/mgmcommon/ConfigRetriever.hpp
@@ -28,19 +28,13 @@
*/
class ConfigRetriever {
public:
- ConfigRetriever(Uint32 version, Uint32 nodeType);
+ ConfigRetriever(LocalConfig &local_config, Uint32 version, Uint32 nodeType);
~ConfigRetriever();
- /**
- * Read local config
- * @return Own node id, -1 means fail
- */
- int init();
-
int do_connect(int exit_on_connect_failure= false);
/**
- * Get configuration for current (nodeId given in local config file) node.
+ * Get configuration for current node.
*
* Configuration is fetched from one MGM server configured in local config
* file. The method loops over all the configured MGM servers and tries
@@ -55,16 +49,6 @@ public:
const char * getErrorString();
/**
- * Sets connectstring which can be used instead of local config file
- */
- void setConnectString(const char * connectString);
-
- /**
- * Sets name of local config file (usually not needed)
- */
- void setLocalConfigFileName(const char * connectString);
-
- /**
* @return Node id of this node (as stated in local config or connectString)
*/
Uint32 allocNodeId();
@@ -83,6 +67,9 @@ public:
* Verify config
*/
bool verifyConfig(const struct ndb_mgm_configuration *, Uint32 nodeid);
+
+ Uint32 get_mgmd_port() const {return m_mgmd_port;};
+ const char *get_mgmd_host() const {return m_mgmd_host;};
private:
BaseString errorString;
enum ErrorType {
@@ -93,12 +80,11 @@ private:
void setError(ErrorType, const char * errorMsg);
- BaseString _localConfigFileName;
- struct LocalConfig _localConfig;
+ struct LocalConfig& _localConfig;
Uint32 _ownNodeId;
-
- BaseString m_connectString;
-
+ Uint32 m_mgmd_port;
+ const char *m_mgmd_host;
+
Uint32 m_version;
Uint32 m_node_type;
NdbMgmHandle m_handle;
diff --git a/ndb/include/ndb_global.h b/ndb/include/ndb_global.h
index 0ae781ba5c2..3ce37a2edee 100644
--- a/ndb/include/ndb_global.h
+++ b/ndb/include/ndb_global.h
@@ -76,6 +76,10 @@ extern "C" {
#include <assert.h>
+/* call in main() - does not return on error */
+extern int ndb_init(void);
+extern void ndb_end(int);
+
#ifndef HAVE_STRDUP
extern char * strdup(const char *s);
#endif
diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp
index 5c470c1d25f..51a6895648f 100644
--- a/ndb/include/ndbapi/NdbDictionary.hpp
+++ b/ndb/include/ndbapi/NdbDictionary.hpp
@@ -32,6 +32,8 @@
#include <ndb_types.h>
class Ndb;
+struct charset_info_st;
+typedef struct charset_info_st CHARSET_INFO;
/**
* @class NdbDictionary
@@ -257,6 +259,10 @@ public:
/**
* Set type of column
* @param type Type of column
+ *
+ * @note setType resets <em>all</em> column attributes
+ * to (type dependent) defaults and should be the first
+ * method to call. Default type is Unsigned.
*/
void setType(Type type);
@@ -302,27 +308,35 @@ public:
int getLength() const;
/**
+ * For Char or Varchar or Text, set or get MySQL CHARSET_INFO. This
+ * specifies both character set and collation. See get_charset()
+ * etc in MySQL. (The cs is not "const" in MySQL).
+ */
+ void setCharset(CHARSET_INFO* cs);
+ CHARSET_INFO* getCharset() const;
+
+ /**
* For blob, set or get "inline size" i.e. number of initial bytes
* to store in table's blob attribute. This part is normally in
* main memory and can be indexed and interpreted.
*/
- void setInlineSize(int size) { setPrecision(size); }
- int getInlineSize() const { return getPrecision(); }
+ void setInlineSize(int size);
+ int getInlineSize() const;
/**
* For blob, set or get "part size" i.e. number of bytes to store in
* each tuple of the "blob table". Can be set to zero to omit parts
* and to allow only inline bytes ("tinyblob").
*/
- void setPartSize(int size) { setScale(size); }
- int getPartSize() const { return getScale(); }
+ void setPartSize(int size);
+ int getPartSize() const;
/**
* For blob, set or get "stripe size" i.e. number of consecutive
* <em>parts</em> to store in each node group.
*/
- void setStripeSize(int size) { setLength(size); }
- int getStripeSize() const { return getLength(); }
+ void setStripeSize(int size);
+ int getStripeSize() const;
/**
* Get size of element
diff --git a/ndb/include/ndbapi/ndb_cluster_connection.hpp b/ndb/include/ndbapi/ndb_cluster_connection.hpp
index 59d5a038844..f8e6f25ce73 100644
--- a/ndb/include/ndbapi/ndb_cluster_connection.hpp
+++ b/ndb/include/ndbapi/ndb_cluster_connection.hpp
@@ -19,6 +19,7 @@
#define CLUSTER_CONNECTION_HPP
class TransporterFacade;
+class LocalConfig;
class ConfigRetriever;
class NdbThread;
@@ -37,6 +38,7 @@ private:
void connect_thread();
char *m_connect_string;
TransporterFacade *m_facade;
+ LocalConfig *m_local_config;
ConfigRetriever *m_config_retriever;
NdbThread *m_connect_thread;
int (*m_connect_callback)(void);
diff --git a/ndb/include/transporter/TransporterDefinitions.hpp b/ndb/include/transporter/TransporterDefinitions.hpp
index 445e8b889d2..a8da8068552 100644
--- a/ndb/include/transporter/TransporterDefinitions.hpp
+++ b/ndb/include/transporter/TransporterDefinitions.hpp
@@ -59,8 +59,6 @@ struct TCP_TransporterConfiguration {
NodeId localNodeId;
Uint32 sendBufferSize; // Size of SendBuffer of priority B
Uint32 maxReceiveSize; // Maximum no of bytes to receive
- Uint32 byteOrder;
- bool compression;
bool checksum;
bool signalId;
};
@@ -72,10 +70,8 @@ struct SHM_TransporterConfiguration {
Uint32 port;
NodeId remoteNodeId;
NodeId localNodeId;
- bool compression;
bool checksum;
bool signalId;
- int byteOrder;
Uint32 shmKey;
Uint32 shmSize;
@@ -89,10 +85,8 @@ struct OSE_TransporterConfiguration {
const char *localHostName;
NodeId remoteNodeId;
NodeId localNodeId;
- bool compression;
bool checksum;
bool signalId;
- int byteOrder;
Uint32 prioASignalSize;
Uint32 prioBSignalSize;
@@ -103,20 +97,20 @@ struct OSE_TransporterConfiguration {
* SCI Transporter Configuration
*/
struct SCI_TransporterConfiguration {
+ const char *remoteHostName;
+ const char *localHostName;
+ Uint32 port;
Uint32 sendLimit; // Packet size
Uint32 bufferSize; // Buffer size
Uint32 nLocalAdapters; // 1 or 2, the number of adapters on local host
- Uint32 nRemoteAdapters;
Uint32 remoteSciNodeId0; // SCInodeId for adapter 1
Uint32 remoteSciNodeId1; // SCInodeId for adapter 2
NodeId localNodeId; // Local node Id
NodeId remoteNodeId; // Remote node Id
- Uint32 byteOrder;
- bool compression;
bool checksum;
bool signalId;
diff --git a/ndb/include/transporter/TransporterRegistry.hpp b/ndb/include/transporter/TransporterRegistry.hpp
index 3c6c307406c..ac6291f9e57 100644
--- a/ndb/include/transporter/TransporterRegistry.hpp
+++ b/ndb/include/transporter/TransporterRegistry.hpp
@@ -218,15 +218,18 @@ public:
void printState();
#endif
- unsigned short m_service_port;
-
+ class Transporter_interface {
+ public:
+ unsigned short m_service_port;
+ const char *m_interface;
+ };
+ Vector<Transporter_interface> m_transporter_interface;
+ void add_transporter_interface(const char *interface, unsigned short port);
protected:
private:
void * callbackObj;
- TransporterService *m_transporter_service;
- char *m_interface_name;
struct NdbThread *m_start_clients_thread;
bool m_run_start_clients_thread;
diff --git a/ndb/include/util/NdbSqlUtil.hpp b/ndb/include/util/NdbSqlUtil.hpp
index 1d3e96d5c7e..3062d1e4e1b 100644
--- a/ndb/include/util/NdbSqlUtil.hpp
+++ b/ndb/include/util/NdbSqlUtil.hpp
@@ -40,11 +40,14 @@ public:
* Compare kernel attribute values. Returns -1, 0, +1 for less,
* equal, greater, respectively. Parameters are pointers to values,
* full attribute size in words, and size of available data in words.
+ * There is also pointer to type specific extra info. Char types
+ * receive CHARSET_INFO in it.
+ *
* If available size is less than full size, CmpUnknown may be
* returned. If a value cannot be parsed, it compares like NULL i.e.
* less than any valid value.
*/
- typedef int Cmp(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size);
+ typedef int Cmp(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size);
enum CmpResult {
CmpLess = -1,
@@ -55,6 +58,7 @@ public:
/**
* Kernel data types. Must match m_typeList in NdbSqlUtil.cpp.
+ * Now also must match types in NdbDictionary.
*/
struct Type {
enum Enum {
@@ -90,6 +94,18 @@ public:
*/
static const Type& getType(Uint32 typeId);
+ /**
+ * Get type by id but replace char type by corresponding binary type.
+ */
+ static const Type& getTypeBinary(Uint32 typeId);
+
+ /**
+ * Check character set.
+ */
+ static bool usable_in_pk(Uint32 typeId, const void* cs);
+ static bool usable_in_hash_index(Uint32 typeId, const void* cs);
+ static bool usable_in_ordered_index(Uint32 typeId, const void* cs);
+
private:
/**
* List of all types. Must match Type::Enum.
diff --git a/ndb/include/util/SocketServer.hpp b/ndb/include/util/SocketServer.hpp
index 334fa575e47..3860b9ca84b 100644
--- a/ndb/include/util/SocketServer.hpp
+++ b/ndb/include/util/SocketServer.hpp
@@ -76,7 +76,7 @@ public:
* then close the socket
* Returns true if succeding in binding
*/
- bool tryBind(unsigned short port, const char * intface = 0) const;
+ static bool tryBind(unsigned short port, const char * intface = 0);
/**
* Setup socket
diff --git a/ndb/src/common/debugger/EventLogger.cpp b/ndb/src/common/debugger/EventLogger.cpp
index 69874ab7ecc..03445622e6a 100644
--- a/ndb/src/common/debugger/EventLogger.cpp
+++ b/ndb/src/common/debugger/EventLogger.cpp
@@ -28,6 +28,10 @@
//
// PUBLIC
//
+EventLoggerBase::~EventLoggerBase()
+{
+
+}
/**
* This matrix defines which event should be printed when
@@ -35,122 +39,89 @@
* threshold - is in range [0-15]
* severity - DEBUG to ALERT (Type of log message)
*/
-const EventLogger::EventRepLogLevelMatrix EventLogger::matrix[] = {
+const EventLoggerBase::EventRepLogLevelMatrix EventLoggerBase::matrix[] = {
// CONNECTION
- { EventReport::Connected, LogLevel::llConnection, 8, LL_INFO },
- { EventReport::Disconnected, LogLevel::llConnection, 8, LL_ALERT },
- { EventReport::CommunicationClosed, LogLevel::llConnection, 8, LL_INFO },
- { EventReport::CommunicationOpened, LogLevel::llConnection, 8, LL_INFO },
- { EventReport::ConnectedApiVersion, LogLevel::llConnection, 8, LL_INFO },
+ { EventReport::Connected, LogLevel::llConnection, 8, Logger::LL_INFO },
+ { EventReport::Disconnected, LogLevel::llConnection, 8, Logger::LL_ALERT },
+ { EventReport::CommunicationClosed, LogLevel::llConnection, 8, Logger::LL_INFO },
+ { EventReport::CommunicationOpened, LogLevel::llConnection, 8, Logger::LL_INFO },
+ { EventReport::ConnectedApiVersion, LogLevel::llConnection, 8, Logger::LL_INFO },
// CHECKPOINT
- { EventReport::GlobalCheckpointStarted, LogLevel::llCheckpoint, 9, LL_INFO },
- { EventReport::GlobalCheckpointCompleted,LogLevel::llCheckpoint,10, LL_INFO },
- { EventReport::LocalCheckpointStarted, LogLevel::llCheckpoint, 7, LL_INFO },
- { EventReport::LocalCheckpointCompleted,LogLevel::llCheckpoint, 8, LL_INFO },
- { EventReport::LCPStoppedInCalcKeepGci, LogLevel::llCheckpoint, 0, LL_ALERT },
- { EventReport::LCPFragmentCompleted, LogLevel::llCheckpoint, 11, LL_INFO },
- { EventReport::UndoLogBlocked, LogLevel::llCheckpoint, 7, LL_INFO },
+ { EventReport::GlobalCheckpointStarted, LogLevel::llCheckpoint, 9, Logger::LL_INFO },
+ { EventReport::GlobalCheckpointCompleted,LogLevel::llCheckpoint,10, Logger::LL_INFO },
+ { EventReport::LocalCheckpointStarted, LogLevel::llCheckpoint, 7, Logger::LL_INFO },
+ { EventReport::LocalCheckpointCompleted,LogLevel::llCheckpoint, 8, Logger::LL_INFO },
+ { EventReport::LCPStoppedInCalcKeepGci, LogLevel::llCheckpoint, 0, Logger::LL_ALERT },
+ { EventReport::LCPFragmentCompleted, LogLevel::llCheckpoint, 11, Logger::LL_INFO },
+ { EventReport::UndoLogBlocked, LogLevel::llCheckpoint, 7, Logger::LL_INFO },
// STARTUP
- { EventReport::NDBStartStarted, LogLevel::llStartUp, 1, LL_INFO },
- { EventReport::NDBStartCompleted, LogLevel::llStartUp, 1, LL_INFO },
- { EventReport::STTORRYRecieved, LogLevel::llStartUp,15, LL_INFO },
- { EventReport::StartPhaseCompleted, LogLevel::llStartUp, 4, LL_INFO },
- { EventReport::CM_REGCONF, LogLevel::llStartUp, 3, LL_INFO },
- { EventReport::CM_REGREF, LogLevel::llStartUp, 8, LL_INFO },
- { EventReport::FIND_NEIGHBOURS, LogLevel::llStartUp, 8, LL_INFO },
- { EventReport::NDBStopStarted, LogLevel::llStartUp, 1, LL_INFO },
- { EventReport::NDBStopAborted, LogLevel::llStartUp, 1, LL_INFO },
- { EventReport::StartREDOLog, LogLevel::llStartUp, 10, LL_INFO },
- { EventReport::StartLog, LogLevel::llStartUp, 10, LL_INFO },
- { EventReport::UNDORecordsExecuted, LogLevel::llStartUp, 15, LL_INFO },
+ { EventReport::NDBStartStarted, LogLevel::llStartUp, 1, Logger::LL_INFO },
+ { EventReport::NDBStartCompleted, LogLevel::llStartUp, 1, Logger::LL_INFO },
+ { EventReport::STTORRYRecieved, LogLevel::llStartUp,15, Logger::LL_INFO },
+ { EventReport::StartPhaseCompleted, LogLevel::llStartUp, 4, Logger::LL_INFO },
+ { EventReport::CM_REGCONF, LogLevel::llStartUp, 3, Logger::LL_INFO },
+ { EventReport::CM_REGREF, LogLevel::llStartUp, 8, Logger::LL_INFO },
+ { EventReport::FIND_NEIGHBOURS, LogLevel::llStartUp, 8, Logger::LL_INFO },
+ { EventReport::NDBStopStarted, LogLevel::llStartUp, 1, Logger::LL_INFO },
+ { EventReport::NDBStopAborted, LogLevel::llStartUp, 1, Logger::LL_INFO },
+ { EventReport::StartREDOLog, LogLevel::llStartUp, 10, Logger::LL_INFO },
+ { EventReport::StartLog, LogLevel::llStartUp, 10, Logger::LL_INFO },
+ { EventReport::UNDORecordsExecuted, LogLevel::llStartUp, 15, Logger::LL_INFO },
// NODERESTART
- { EventReport::NR_CopyDict, LogLevel::llNodeRestart, 8, LL_INFO },
- { EventReport::NR_CopyDistr, LogLevel::llNodeRestart, 8, LL_INFO },
- { EventReport::NR_CopyFragsStarted, LogLevel::llNodeRestart, 8, LL_INFO },
- { EventReport::NR_CopyFragDone, LogLevel::llNodeRestart, 10, LL_INFO },
- { EventReport::NR_CopyFragsCompleted, LogLevel::llNodeRestart, 8, LL_INFO },
+ { EventReport::NR_CopyDict, LogLevel::llNodeRestart, 8, Logger::LL_INFO },
+ { EventReport::NR_CopyDistr, LogLevel::llNodeRestart, 8, Logger::LL_INFO },
+ { EventReport::NR_CopyFragsStarted, LogLevel::llNodeRestart, 8, Logger::LL_INFO },
+ { EventReport::NR_CopyFragDone, LogLevel::llNodeRestart, 10, Logger::LL_INFO },
+ { EventReport::NR_CopyFragsCompleted, LogLevel::llNodeRestart, 8, Logger::LL_INFO },
- { EventReport::NodeFailCompleted, LogLevel::llNodeRestart, 8, LL_ALERT},
- { EventReport::NODE_FAILREP, LogLevel::llNodeRestart, 8, LL_ALERT},
- { EventReport::ArbitState, LogLevel::llNodeRestart, 6, LL_INFO },
- { EventReport::ArbitResult, LogLevel::llNodeRestart, 2, LL_ALERT},
- { EventReport::GCP_TakeoverStarted, LogLevel::llNodeRestart, 7, LL_INFO },
- { EventReport::GCP_TakeoverCompleted, LogLevel::llNodeRestart, 7, LL_INFO },
- { EventReport::LCP_TakeoverStarted, LogLevel::llNodeRestart, 7, LL_INFO },
- { EventReport::LCP_TakeoverCompleted, LogLevel::llNodeRestart, 7, LL_INFO },
+ { EventReport::NodeFailCompleted, LogLevel::llNodeRestart, 8, Logger::LL_ALERT},
+ { EventReport::NODE_FAILREP, LogLevel::llNodeRestart, 8, Logger::LL_ALERT},
+ { EventReport::ArbitState, LogLevel::llNodeRestart, 6, Logger::LL_INFO },
+ { EventReport::ArbitResult, LogLevel::llNodeRestart, 2, Logger::LL_ALERT},
+ { EventReport::GCP_TakeoverStarted, LogLevel::llNodeRestart, 7, Logger::LL_INFO },
+ { EventReport::GCP_TakeoverCompleted, LogLevel::llNodeRestart, 7, Logger::LL_INFO },
+ { EventReport::LCP_TakeoverStarted, LogLevel::llNodeRestart, 7, Logger::LL_INFO },
+ { EventReport::LCP_TakeoverCompleted, LogLevel::llNodeRestart, 7, Logger::LL_INFO },
// STATISTIC
- { EventReport::TransReportCounters, LogLevel::llStatistic, 8, LL_INFO },
- { EventReport::OperationReportCounters, LogLevel::llStatistic, 8, LL_INFO },
- { EventReport::TableCreated, LogLevel::llStatistic, 7, LL_INFO },
- { EventReport::JobStatistic, LogLevel::llStatistic, 9, LL_INFO },
- { EventReport::SendBytesStatistic, LogLevel::llStatistic, 9, LL_INFO },
- { EventReport::ReceiveBytesStatistic, LogLevel::llStatistic, 9, LL_INFO },
- { EventReport::MemoryUsage, LogLevel::llStatistic, 5, LL_INFO },
+ { EventReport::TransReportCounters, LogLevel::llStatistic, 8, Logger::LL_INFO },
+ { EventReport::OperationReportCounters, LogLevel::llStatistic, 8, Logger::LL_INFO },
+ { EventReport::TableCreated, LogLevel::llStatistic, 7, Logger::LL_INFO },
+ { EventReport::JobStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO },
+ { EventReport::SendBytesStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO },
+ { EventReport::ReceiveBytesStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO },
+ { EventReport::MemoryUsage, LogLevel::llStatistic, 5, Logger::LL_INFO },
// ERROR
- { EventReport::TransporterError, LogLevel::llError, 2, LL_ERROR },
- { EventReport::TransporterWarning, LogLevel::llError, 8, LL_WARNING },
- { EventReport::MissedHeartbeat, LogLevel::llError, 8, LL_WARNING },
- { EventReport::DeadDueToHeartbeat, LogLevel::llError, 8, LL_ALERT },
- { EventReport::WarningEvent, LogLevel::llError, 2, LL_WARNING },
+ { EventReport::TransporterError, LogLevel::llError, 2, Logger::LL_ERROR },
+ { EventReport::TransporterWarning, LogLevel::llError, 8, Logger::LL_WARNING },
+ { EventReport::MissedHeartbeat, LogLevel::llError, 8, Logger::LL_WARNING },
+ { EventReport::DeadDueToHeartbeat, LogLevel::llError, 8, Logger::LL_ALERT },
+ { EventReport::WarningEvent, LogLevel::llError, 2, Logger::LL_WARNING },
// INFO
- { EventReport::SentHeartbeat, LogLevel::llInfo, 12, LL_INFO },
- { EventReport::CreateLogBytes, LogLevel::llInfo, 11, LL_INFO },
- { EventReport::InfoEvent, LogLevel::llInfo, 2, LL_INFO },
+ { EventReport::SentHeartbeat, LogLevel::llInfo, 12, Logger::LL_INFO },
+ { EventReport::CreateLogBytes, LogLevel::llInfo, 11, Logger::LL_INFO },
+ { EventReport::InfoEvent, LogLevel::llInfo, 2, Logger::LL_INFO },
//Global replication
- { EventReport::GrepSubscriptionInfo, LogLevel::llGrep, 7, LL_INFO},
- { EventReport::GrepSubscriptionAlert, LogLevel::llGrep, 7, LL_ALERT}
-};
-
-const Uint32 EventLogger::matrixSize = sizeof(EventLogger::matrix)/
- sizeof(EventRepLogLevelMatrix);
+ { EventReport::GrepSubscriptionInfo, LogLevel::llGrep, 7, Logger::LL_INFO},
+ { EventReport::GrepSubscriptionAlert, LogLevel::llGrep, 7, Logger::LL_ALERT},
-/**
- * Default log levels for management nodes.
- *
- * threshold - is in range [0-15]
- */
-const EventLogger::EventLogMatrix EventLogger::defEventLogMatrix[] = {
- { LogLevel::llStartUp, 7 },
- { LogLevel::llShutdown, 7 },
- { LogLevel::llStatistic, 7 },
- { LogLevel::llCheckpoint, 7 },
- { LogLevel::llNodeRestart, 7 },
- { LogLevel::llConnection, 7 },
- { LogLevel::llError, 15 },
- { LogLevel::llInfo, 7 },
- { LogLevel::llGrep, 7 }
+ // Backup
+ { EventReport::BackupStarted, LogLevel::llBackup, 7, Logger::LL_INFO },
+ { EventReport::BackupCompleted, LogLevel::llBackup, 7, Logger::LL_INFO },
+ { EventReport::BackupFailedToStart, LogLevel::llBackup, 7, Logger::LL_ALERT},
+ { EventReport::BackupAborted, LogLevel::llBackup, 7, Logger::LL_ALERT }
};
-const Uint32
-EventLogger::defEventLogMatrixSize = sizeof(EventLogger::defEventLogMatrix)/
- sizeof(EventLogMatrix);
-/**
- * Specifies allowed event categories/log levels that can be set from
- * the Management API/interactive shell.
- */
-const EventLogger::EventCategoryName EventLogger::eventCategoryNames[] = {
- { LogLevel::llStartUp, "STARTUP" },
- { LogLevel::llStatistic, "STATISTICS" },
- { LogLevel::llCheckpoint, "CHECKPOINT" },
- { LogLevel::llNodeRestart, "NODERESTART" },
- { LogLevel::llConnection, "CONNECTION" },
- { LogLevel::llInfo, "INFO" },
- { LogLevel::llGrep, "GREP" }
-};
-
-const Uint32
-EventLogger::noOfEventCategoryNames = sizeof(EventLogger::eventCategoryNames)/
- sizeof(EventLogger::EventCategoryName);
-
-char EventLogger::m_text[MAX_TEXT_LENGTH];
+const Uint32 EventLoggerBase::matrixSize = sizeof(EventLoggerBase::matrix)/
+ sizeof(EventRepLogLevelMatrix);
const char*
-EventLogger::getText(int type,
+EventLogger::getText(char * m_text, size_t m_text_len,
+ int type,
const Uint32* theData, NodeId nodeId)
{
// TODO: Change the switch implementation...
@@ -164,13 +135,13 @@ EventLogger::getText(int type,
EventReport::EventType eventType = (EventReport::EventType)type;
switch (eventType){
case EventReport::Connected:
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sNode %u Connected",
theNodeId,
theData[1]);
break;
case EventReport::ConnectedApiVersion:
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sNode %u: API version %d.%d.%d",
theNodeId,
theData[1],
@@ -179,7 +150,7 @@ EventLogger::getText(int type,
getBuild(theData[2]));
break;
case EventReport::Disconnected:
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sNode %u Disconnected",
theNodeId,
theData[1]);
@@ -188,7 +159,7 @@ EventLogger::getText(int type,
//-----------------------------------------------------------------------
// REPORT communication to node closed.
//-----------------------------------------------------------------------
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sCommunication to Node %u closed",
theNodeId,
theData[1]);
@@ -197,7 +168,7 @@ EventLogger::getText(int type,
//-----------------------------------------------------------------------
// REPORT communication to node opened.
//-----------------------------------------------------------------------
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sCommunication to Node %u opened",
theNodeId,
theData[1]);
@@ -206,7 +177,7 @@ EventLogger::getText(int type,
//-----------------------------------------------------------------------
// Start of NDB has been initiated.
//-----------------------------------------------------------------------
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sStart initiated (version %d.%d.%d)",
theNodeId ,
getMajor(theData[1]),
@@ -214,13 +185,13 @@ EventLogger::getText(int type,
getBuild(theData[1]));
break;
case EventReport::NDBStopStarted:
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%s%s shutdown initiated",
theNodeId,
(theData[1] == 1 ? "Cluster" : "Node"));
break;
case EventReport::NDBStopAborted:
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sNode shutdown aborted",
theNodeId);
break;
@@ -228,7 +199,7 @@ EventLogger::getText(int type,
//-----------------------------------------------------------------------
// Start of NDB has been completed.
//-----------------------------------------------------------------------
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sStarted (version %d.%d.%d)",
theNodeId ,
getMajor(theData[1]),
@@ -240,7 +211,7 @@ EventLogger::getText(int type,
//-----------------------------------------------------------------------
// STTORRY recevied after restart finished.
//-----------------------------------------------------------------------
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sSTTORRY received after restart finished",
theNodeId);
break;
@@ -266,7 +237,7 @@ EventLogger::getText(int type,
type = "";
break;
default:{
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sStart phase %u completed (unknown = %d)",
theNodeId,
theData[1],
@@ -274,7 +245,7 @@ EventLogger::getText(int type,
return m_text;
}
}
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sStart phase %u completed %s",
theNodeId,
theData[1],
@@ -283,7 +254,7 @@ EventLogger::getText(int type,
break;
}
case EventReport::CM_REGCONF:
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sCM_REGCONF president = %u, own Node = %u, our dynamic id = %u"
,
theNodeId,
@@ -315,7 +286,7 @@ EventLogger::getText(int type,
break;
}//switch
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sCM_REGREF from Node %u to our Node %u. Cause = %s",
theNodeId,
theData[2],
@@ -328,7 +299,7 @@ EventLogger::getText(int type,
// REPORT Node Restart copied a fragment.
//-----------------------------------------------------------------------
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sWe are Node %u with dynamic ID %u, our left neighbour "
"is Node %u, our right is Node %u",
theNodeId,
@@ -344,13 +315,13 @@ EventLogger::getText(int type,
if (theData[1] == 0)
{
if (theData[3] != 0) {
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sNode %u completed failure of Node %u",
theNodeId,
theData[3],
theData[2]);
} else {
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sAll nodes completed failure of Node %u",
theNodeId,
theData[2]);
@@ -367,7 +338,7 @@ EventLogger::getText(int type,
line = "DBLQH";
}
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sNode failure of %u %s completed",
theNodeId,
theData[2],
@@ -376,7 +347,7 @@ EventLogger::getText(int type,
break;
case EventReport::NODE_FAILREP:
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sNode %u has failed. The Node state at failure "
"was %u",
theNodeId,
@@ -395,41 +366,41 @@ EventLogger::getText(int type,
const unsigned state = sd->code >> 16;
switch (code) {
case ArbitCode::ThreadStart:
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sPresident restarts arbitration thread [state=%u]",
theNodeId, state);
break;
case ArbitCode::PrepPart2:
sd->ticket.getText(ticketText, sizeof(ticketText));
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sPrepare arbitrator node %u [ticket=%s]",
theNodeId, sd->node, ticketText);
break;
case ArbitCode::PrepAtrun:
sd->ticket.getText(ticketText, sizeof(ticketText));
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sReceive arbitrator node %u [ticket=%s]",
theNodeId, sd->node, ticketText);
break;
case ArbitCode::ApiStart:
sd->ticket.getText(ticketText, sizeof(ticketText));
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sStarted arbitrator node %u [ticket=%s]",
theNodeId, sd->node, ticketText);
break;
case ArbitCode::ApiFail:
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sLost arbitrator node %u - process failure [state=%u]",
theNodeId, sd->node, state);
break;
case ArbitCode::ApiExit:
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sLost arbitrator node %u - process exit [state=%u]",
theNodeId, sd->node, state);
break;
default:
ArbitCode::getErrText(code, errText, sizeof(errText));
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sLost arbitrator node %u - %s [state=%u]",
theNodeId, sd->node, errText, state);
break;
@@ -446,48 +417,48 @@ EventLogger::getText(int type,
const unsigned state = sd->code >> 16;
switch (code) {
case ArbitCode::LoseNodes:
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sArbitration check lost - less than 1/2 nodes left",
theNodeId);
break;
case ArbitCode::WinGroups:
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sArbitration check won - node group majority",
theNodeId);
break;
case ArbitCode::LoseGroups:
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sArbitration check lost - missing node group",
theNodeId);
break;
case ArbitCode::Partitioning:
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sNetwork partitioning - arbitration required",
theNodeId);
break;
case ArbitCode::WinChoose:
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sArbitration won - positive reply from node %u",
theNodeId, sd->node);
break;
case ArbitCode::LoseChoose:
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sArbitration lost - negative reply from node %u",
theNodeId, sd->node);
break;
case ArbitCode::LoseNorun:
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sNetwork partitioning - no arbitrator available",
theNodeId);
break;
case ArbitCode::LoseNocfg:
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sNetwork partitioning - no arbitrator configured",
theNodeId);
break;
default:
ArbitCode::getErrText(code, errText, sizeof(errText));
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sArbitration failure - %s [state=%u]",
theNodeId, errText, state);
break;
@@ -500,7 +471,7 @@ EventLogger::getText(int type,
// node is the master of this global checkpoint.
//-----------------------------------------------------------------------
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sGlobal checkpoint %u started",
theNodeId,
theData[1]);
@@ -510,7 +481,7 @@ EventLogger::getText(int type,
// This event reports that a global checkpoint has been completed on this
// node and the node is the master of this global checkpoint.
//-----------------------------------------------------------------------
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sGlobal checkpoint %u completed",
theNodeId,
theData[1]);
@@ -521,7 +492,7 @@ EventLogger::getText(int type,
// node is the master of this local checkpoint.
//-----------------------------------------------------------------------
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sLocal checkpoint %u started. "
"Keep GCI = %u oldest restorable GCI = %u",
theNodeId,
@@ -535,7 +506,7 @@ EventLogger::getText(int type,
// node and the node is the master of this local checkpoint.
//-----------------------------------------------------------------------
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sLocal checkpoint %u completed",
theNodeId,
theData[1]);
@@ -544,14 +515,14 @@ EventLogger::getText(int type,
//-----------------------------------------------------------------------
// This event reports that a table has been created.
//-----------------------------------------------------------------------
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sTable with ID = %u created",
theNodeId,
theData[1]);
break;
case EventReport::LCPStoppedInCalcKeepGci:
if (theData[1] == 0)
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sLocal Checkpoint stopped in CALCULATED_KEEP_GCI",
theNodeId);
break;
@@ -560,7 +531,7 @@ EventLogger::getText(int type,
// REPORT Node Restart completed copy of dictionary information.
//-----------------------------------------------------------------------
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sNode restart completed copy of dictionary information",
theNodeId);
break;
@@ -569,7 +540,7 @@ EventLogger::getText(int type,
// REPORT Node Restart completed copy of distribution information.
//-----------------------------------------------------------------------
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sNode restart completed copy of distribution information",
theNodeId);
break;
@@ -578,7 +549,7 @@ EventLogger::getText(int type,
// REPORT Node Restart is starting to copy the fragments.
//-----------------------------------------------------------------------
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sNode restart starting to copy the fragments "
"to Node %u",
theNodeId,
@@ -589,7 +560,7 @@ EventLogger::getText(int type,
// REPORT Node Restart copied a fragment.
//-----------------------------------------------------------------------
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sTable ID = %u, fragment ID = %u have been copied "
"to Node %u",
theNodeId,
@@ -599,7 +570,7 @@ EventLogger::getText(int type,
break;
case EventReport::NR_CopyFragsCompleted:
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sNode restart completed copying the fragments "
"to Node %u",
theNodeId,
@@ -607,7 +578,7 @@ EventLogger::getText(int type,
break;
case EventReport::LCPFragmentCompleted:
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sTable ID = %u, fragment ID = %u has completed LCP "
"on Node %u",
theNodeId,
@@ -620,7 +591,7 @@ EventLogger::getText(int type,
// Report information about transaction activity once per 10 seconds.
// -------------------------------------------------------------------
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sTrans. Count = %u, Commit Count = %u, "
"Read Count = %u, Simple Read Count = %u,\n"
"Write Count = %u, AttrInfo Count = %u, "
@@ -639,7 +610,7 @@ EventLogger::getText(int type,
theData[10]);
break;
case EventReport::OperationReportCounters:
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%sOperations=%u",
theNodeId,
theData[1]);
@@ -649,7 +620,7 @@ EventLogger::getText(int type,
// REPORT Undo Logging blocked due to buffer near to overflow.
//-----------------------------------------------------------------------
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sACC Blocked %u and TUP Blocked %u times last second",
theNodeId,
theData[1],
@@ -658,7 +629,7 @@ EventLogger::getText(int type,
case EventReport::TransporterError:
case EventReport::TransporterWarning:
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sTransporter to node %d reported error 0x%x",
theNodeId,
theData[1],
@@ -669,7 +640,7 @@ EventLogger::getText(int type,
// REPORT Undo Logging blocked due to buffer near to overflow.
//-----------------------------------------------------------------------
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sNode %d missed heartbeat %d",
theNodeId,
theData[1],
@@ -680,21 +651,21 @@ EventLogger::getText(int type,
// REPORT Undo Logging blocked due to buffer near to overflow.
//-----------------------------------------------------------------------
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sNode %d declared dead due to missed heartbeat",
theNodeId,
theData[1]);
break;
case EventReport::JobStatistic:
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sMean loop Counter in doJob last 8192 times = %u",
theNodeId,
theData[1]);
break;
case EventReport::SendBytesStatistic:
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sMean send size to Node = %d last 4096 sends = %u bytes",
theNodeId,
theData[1],
@@ -702,7 +673,7 @@ EventLogger::getText(int type,
break;
case EventReport::ReceiveBytesStatistic:
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sMean receive size to Node = %d last 4096 sends = %u bytes",
theNodeId,
theData[1],
@@ -710,14 +681,14 @@ EventLogger::getText(int type,
break;
case EventReport::SentHeartbeat:
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sNode Sent Heartbeat to node = %d",
theNodeId,
theData[1]);
break;
case EventReport::CreateLogBytes:
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sLog part %u, log file %u, MB %u",
theNodeId,
theData[1],
@@ -726,7 +697,7 @@ EventLogger::getText(int type,
break;
case EventReport::StartLog:
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sLog part %u, start MB %u, stop MB %u, last GCI, log exec %u",
theNodeId,
theData[1],
@@ -736,7 +707,7 @@ EventLogger::getText(int type,
break;
case EventReport::StartREDOLog:
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sNode: %d StartLog: [GCI Keep: %d LastCompleted: %d NewestRestorable: %d]",
theNodeId,
theData[1],
@@ -753,7 +724,7 @@ EventLogger::getText(int type,
}
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%s UNDO %s %d [%d %d %d %d %d %d %d %d %d]",
theNodeId,
line,
@@ -771,36 +742,36 @@ EventLogger::getText(int type,
break;
case EventReport::InfoEvent:
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%s%s",
theNodeId,
(char *)&theData[1]);
break;
case EventReport::WarningEvent:
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%s%s",
theNodeId,
(char *)&theData[1]);
break;
case EventReport::GCP_TakeoverStarted:
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sGCP Take over started", theNodeId);
break;
case EventReport::GCP_TakeoverCompleted:
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sGCP Take over completed", theNodeId);
break;
case EventReport::LCP_TakeoverStarted:
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sLCP Take over started", theNodeId);
break;
case EventReport::LCP_TakeoverCompleted:
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sLCP Take over completed (state = %d)",
theNodeId, theData[1]);
break;
@@ -812,7 +783,7 @@ EventLogger::getText(int type,
const int block = theData[5];
const int percent = (used*100)/total;
- ::snprintf(m_text, sizeof(m_text),
+ ::snprintf(m_text, m_text_len,
"%s%s usage %s %d%s(%d %dK pages of total %d)",
theNodeId,
(block==DBACC ? "Index" : (block == DBTUP ?"Data":"<unknown>")),
@@ -822,478 +793,508 @@ EventLogger::getText(int type,
);
break;
}
-
-
case EventReport::GrepSubscriptionInfo :
- {
- GrepEvent::Subscription event = (GrepEvent::Subscription)theData[1];
- switch(event) {
- case GrepEvent::GrepSS_CreateSubIdConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: Created subscription id"
- " (subId=%d,SubKey=%d)"
- " Return code: %d.",
- subId,
- subKey,
- err);
- break;
- }
- case GrepEvent::GrepPS_CreateSubIdConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: Created subscription id"
- " (subId=%d,SubKey=%d)"
- " Return code: %d.",
- subId,
- subKey,
- err);
- break;
- }
- case GrepEvent::GrepSS_SubCreateConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- const int nodegrp = theData[5];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: Created subscription using"
- " (subId=%d,SubKey=%d)"
- " in primary system. Primary system has %d nodegroup(s)."
- " Return code: %d",
- subId,
- subKey,
- nodegrp,
- err);
- break;
- }
- case GrepEvent::GrepPS_SubCreateConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: All participants have created "
- "subscriptions"
- " using (subId=%d,SubKey=%d)."
- " Return code: %d",
- subId,
- subKey,
- err);
- break;
- }
- case GrepEvent::GrepSS_SubStartMetaConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: Logging started on meta data changes."
- " using (subId=%d,SubKey=%d)"
- " Return code: %d",
- subId,
- subKey,
- err);
- break;
- }
- case GrepEvent::GrepPS_SubStartMetaConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: All participants have started "
- "logging meta data"
- " changes on the subscription subId=%d,SubKey=%d) "
- "(N.I yet)."
- " Return code: %d",
- subId,
- subKey,
- err);
- break;
- }
- case GrepEvent::GrepSS_SubStartDataConf: {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: Logging started on table data changes "
- " using (subId=%d,SubKey=%d)"
- " Return code: %d",
- subId,
- subKey,
- err);
- break;
- }
- case GrepEvent::GrepPS_SubStartDataConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: All participants have started logging "
- "table data changes on the subscription "
- "subId=%d,SubKey=%d)."
- " Return code: %d",
- subId,
- subKey,
- err);
- break;
- }
- case GrepEvent::GrepPS_SubSyncMetaConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: All participants have started "
- " synchronization on meta data (META SCAN) using "
- "(subId=%d,SubKey=%d)."
- " Return code: %d",
- subId,
- subKey,
- err);
- break;
- }
- case GrepEvent::GrepSS_SubSyncMetaConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: Synchronization started (META SCAN) on "
- " meta data using (subId=%d,SubKey=%d)"
- " Return code: %d",
- subId,
- subKey,
- err);
- break;
- }
- case GrepEvent::GrepPS_SubSyncDataConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: All participants have started "
- "synchronization "
- " on table data (DATA SCAN) using (subId=%d,SubKey=%d)."
- " Return code: %d",
- subId,
- subKey,
- err);
- break;
- }
- case GrepEvent::GrepSS_SubSyncDataConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- const int gci = theData[5];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: Synchronization started (DATA SCAN) on "
- "table data using (subId=%d,SubKey=%d). GCI = %d"
- " Return code: %d",
- subId,
- subKey,
- gci,
- err);
- break;
- }
- case GrepEvent::GrepPS_SubRemoveConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: All participants have removed "
- "subscription (subId=%d,SubKey=%d). I have cleaned "
- "up resources I've used."
- " Return code: %d",
- subId,
- subKey,
- err);
- break;
- }
- case GrepEvent::GrepSS_SubRemoveConf:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: Removed subscription "
- "(subId=%d,SubKey=%d)"
- " Return code: %d",
- subId,
- subKey,
- err);
- break;
- }
+ {
+ GrepEvent::Subscription event = (GrepEvent::Subscription)theData[1];
+ switch(event) {
+ case GrepEvent::GrepSS_CreateSubIdConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ ::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: Created subscription id"
+ " (subId=%d,SubKey=%d)"
+ " Return code: %d.",
+ subId,
+ subKey,
+ err);
+ break;
+ }
+ case GrepEvent::GrepPS_CreateSubIdConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ ::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: Created subscription id"
+ " (subId=%d,SubKey=%d)"
+ " Return code: %d.",
+ subId,
+ subKey,
+ err);
+ break;
+ }
+ case GrepEvent::GrepSS_SubCreateConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ const int nodegrp = theData[5];
+ ::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: Created subscription using"
+ " (subId=%d,SubKey=%d)"
+ " in primary system. Primary system has %d nodegroup(s)."
+ " Return code: %d",
+ subId,
+ subKey,
+ nodegrp,
+ err);
+ break;
+ }
+ case GrepEvent::GrepPS_SubCreateConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ ::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: All participants have created "
+ "subscriptions"
+ " using (subId=%d,SubKey=%d)."
+ " Return code: %d",
+ subId,
+ subKey,
+ err);
+ break;
+ }
+ case GrepEvent::GrepSS_SubStartMetaConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ ::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: Logging started on meta data changes."
+ " using (subId=%d,SubKey=%d)"
+ " Return code: %d",
+ subId,
+ subKey,
+ err);
+ break;
+ }
+ case GrepEvent::GrepPS_SubStartMetaConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ ::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: All participants have started "
+ "logging meta data"
+ " changes on the subscription subId=%d,SubKey=%d) "
+ "(N.I yet)."
+ " Return code: %d",
+ subId,
+ subKey,
+ err);
+ break;
+ }
+ case GrepEvent::GrepSS_SubStartDataConf: {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ ::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: Logging started on table data changes "
+ " using (subId=%d,SubKey=%d)"
+ " Return code: %d",
+ subId,
+ subKey,
+ err);
+ break;
+ }
+ case GrepEvent::GrepPS_SubStartDataConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ ::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: All participants have started logging "
+ "table data changes on the subscription "
+ "subId=%d,SubKey=%d)."
+ " Return code: %d",
+ subId,
+ subKey,
+ err);
+ break;
+ }
+ case GrepEvent::GrepPS_SubSyncMetaConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ ::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: All participants have started "
+ " synchronization on meta data (META SCAN) using "
+ "(subId=%d,SubKey=%d)."
+ " Return code: %d",
+ subId,
+ subKey,
+ err);
+ break;
+ }
+ case GrepEvent::GrepSS_SubSyncMetaConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ ::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: Synchronization started (META SCAN) on "
+ " meta data using (subId=%d,SubKey=%d)"
+ " Return code: %d",
+ subId,
+ subKey,
+ err);
+ break;
+ }
+ case GrepEvent::GrepPS_SubSyncDataConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ ::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: All participants have started "
+ "synchronization "
+ " on table data (DATA SCAN) using (subId=%d,SubKey=%d)."
+ " Return code: %d",
+ subId,
+ subKey,
+ err);
+ break;
+ }
+ case GrepEvent::GrepSS_SubSyncDataConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ const int gci = theData[5];
+ ::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: Synchronization started (DATA SCAN) on "
+ "table data using (subId=%d,SubKey=%d). GCI = %d"
+ " Return code: %d",
+ subId,
+ subKey,
+ gci,
+ err);
+ break;
+ }
+ case GrepEvent::GrepPS_SubRemoveConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ ::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: All participants have removed "
+ "subscription (subId=%d,SubKey=%d). I have cleaned "
+ "up resources I've used."
+ " Return code: %d",
+ subId,
+ subKey,
+ err);
+ break;
+ }
+ case GrepEvent::GrepSS_SubRemoveConf:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ ::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: Removed subscription "
+ "(subId=%d,SubKey=%d)"
+ " Return code: %d",
+ subId,
+ subKey,
+ err);
+ break;
+ }
default:
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sUnknown GrepSubscriptonInfo event: %d",
theNodeId,
theData[1]);
- }
- break;
}
-
+ break;
+ }
+
case EventReport::GrepSubscriptionAlert :
+ {
+ GrepEvent::Subscription event = (GrepEvent::Subscription)theData[1];
+ switch(event)
+ {
+ case GrepEvent::GrepSS_CreateSubIdRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ ::snprintf(m_text, m_text_len,
+ "Grep::SSCoord:Error code: %d Error message: %s"
+ " (subId=%d,SubKey=%d)",
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err),
+ subId,
+ subKey);
+ break;
+ }
+ case GrepEvent::GrepSS_SubCreateRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ ::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: FAILED to Created subscription using"
+ " (subId=%d,SubKey=%d)in primary system."
+ " Error code: %d Error Message: %s",
+ subId,
+ subKey,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err));
+ break;
+ }
+ case GrepEvent::GrepSS_SubStartMetaRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ ::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: Logging failed to start on meta "
+ "data changes."
+ " using (subId=%d,SubKey=%d)"
+ " Error code: %d Error Message: %s",
+ subId,
+ subKey,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err));
+ break;
+ }
+ case GrepEvent::GrepSS_SubStartDataRef:
{
- GrepEvent::Subscription event = (GrepEvent::Subscription)theData[1];
- switch(event)
- {
- case GrepEvent::GrepSS_CreateSubIdRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord:Error code: %d Error message: %s"
- " (subId=%d,SubKey=%d)",
- err,
- GrepError::getErrorDesc((GrepError::Code)err),
- subId,
- subKey);
- break;
- }
- case GrepEvent::GrepSS_SubCreateRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: FAILED to Created subscription using"
- " (subId=%d,SubKey=%d)in primary system."
- " Error code: %d Error Message: %s",
- subId,
- subKey,
- err,
- GrepError::getErrorDesc((GrepError::Code)err));
- break;
- }
- case GrepEvent::GrepSS_SubStartMetaRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: Logging failed to start on meta "
- "data changes."
- " using (subId=%d,SubKey=%d)"
- " Error code: %d Error Message: %s",
- subId,
- subKey,
- err,
- GrepError::getErrorDesc((GrepError::Code)err));
- break;
- }
- case GrepEvent::GrepSS_SubStartDataRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: Logging FAILED to start on table data "
- " changes using (subId=%d,SubKey=%d)"
- " Error code: %d Error Message: %s",
- subId,
- subKey,
- err,
- GrepError::getErrorDesc((GrepError::Code)err));
- break;
- }
- case GrepEvent::GrepSS_SubSyncMetaRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: Synchronization FAILED (META SCAN) on "
- " meta data using (subId=%d,SubKey=%d)"
- " Error code: %d Error Message: %s",
- subId,
- subKey,
- err,
- GrepError::getErrorDesc((GrepError::Code)err));
- break;
- }
- case GrepEvent::GrepSS_SubSyncDataRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- const int gci = theData[5];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: Synchronization FAILED (DATA SCAN) on "
- "table data using (subId=%d,SubKey=%d). GCI = %d"
- " Error code: %d Error Message: %s",
- subId,
- subKey,
- gci,
- err,
- GrepError::getErrorDesc((GrepError::Code)err));
- break;
- }
- case GrepEvent::GrepSS_SubRemoveRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::SSCoord: Failed to remove subscription "
- "(subId=%d,SubKey=%d). "
- " Error code: %d Error Message: %s",
- subId,
- subKey,
- err,
- GrepError::getErrorDesc((GrepError::Code)err)
- );
- break;
- }
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ ::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: Logging FAILED to start on table data "
+ " changes using (subId=%d,SubKey=%d)"
+ " Error code: %d Error Message: %s",
+ subId,
+ subKey,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err));
+ break;
+ }
+ case GrepEvent::GrepSS_SubSyncMetaRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ ::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: Synchronization FAILED (META SCAN) on "
+ " meta data using (subId=%d,SubKey=%d)"
+ " Error code: %d Error Message: %s",
+ subId,
+ subKey,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err));
+ break;
+ }
+ case GrepEvent::GrepSS_SubSyncDataRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ const int gci = theData[5];
+ ::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: Synchronization FAILED (DATA SCAN) on "
+ "table data using (subId=%d,SubKey=%d). GCI = %d"
+ " Error code: %d Error Message: %s",
+ subId,
+ subKey,
+ gci,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err));
+ break;
+ }
+ case GrepEvent::GrepSS_SubRemoveRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ ::snprintf(m_text, m_text_len,
+ "Grep::SSCoord: Failed to remove subscription "
+ "(subId=%d,SubKey=%d). "
+ " Error code: %d Error Message: %s",
+ subId,
+ subKey,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err)
+ );
+ break;
+ }
- case GrepEvent::GrepPS_CreateSubIdRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: Error code: %d Error Message: %s"
- " (subId=%d,SubKey=%d)",
- err,
- GrepError::getErrorDesc((GrepError::Code)err),
- subId,
- subKey);
- break;
- }
- case GrepEvent::GrepPS_SubCreateRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: FAILED to Created subscription using"
- " (subId=%d,SubKey=%d)in primary system."
- " Error code: %d Error Message: %s",
- subId,
- subKey,
- err,
- GrepError::getErrorDesc((GrepError::Code)err));
- break;
- }
- case GrepEvent::GrepPS_SubStartMetaRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: Logging failed to start on meta "
- "data changes."
- " using (subId=%d,SubKey=%d)"
- " Error code: %d Error Message: %s",
- subId,
- subKey,
- err,
- GrepError::getErrorDesc((GrepError::Code)err));
- break;
- }
- case GrepEvent::GrepPS_SubStartDataRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: Logging FAILED to start on table data "
- " changes using (subId=%d,SubKey=%d)"
- " Error code: %d Error Message: %s",
- subId,
- subKey,
- err,
- GrepError::getErrorDesc((GrepError::Code)err));
- break;
- }
- case GrepEvent::GrepPS_SubSyncMetaRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: Synchronization FAILED (META SCAN) on "
- " meta data using (subId=%d,SubKey=%d)"
- " Error code: %d Error Message: %s",
- subId,
- subKey,
- err,
- GrepError::getErrorDesc((GrepError::Code)err));
- break;
- }
- case GrepEvent::GrepPS_SubSyncDataRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- const int gci = theData[5];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: Synchronization FAILED (DATA SCAN) on "
- "table data using (subId=%d,SubKey=%d). GCI = %d. "
- " Error code: %d Error Message: %s",
- subId,
- subKey,
- gci,
- err,
- GrepError::getErrorDesc((GrepError::Code)err));
- break;
- }
- case GrepEvent::GrepPS_SubRemoveRef:
- {
- const int subId = theData[2];
- const int subKey = theData[3];
- const int err = theData[4];
- ::snprintf(m_text, sizeof(m_text),
- "Grep::PSCoord: Failed to remove subscription "
- "(subId=%d,SubKey=%d)."
- " Error code: %d Error Message: %s",
- subId,
- subKey,
- err,
- GrepError::getErrorDesc((GrepError::Code)err));
- break;
- }
- case GrepEvent::Rep_Disconnect:
- {
- const int err = theData[4];
- const int nodeId = theData[5];
- ::snprintf(m_text, sizeof(m_text),
- "Rep: Node %d."
- " Error code: %d Error Message: %s",
- nodeId,
- err,
- GrepError::getErrorDesc((GrepError::Code)err));
- break;
- }
+ case GrepEvent::GrepPS_CreateSubIdRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ ::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: Error code: %d Error Message: %s"
+ " (subId=%d,SubKey=%d)",
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err),
+ subId,
+ subKey);
+ break;
+ }
+ case GrepEvent::GrepPS_SubCreateRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ ::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: FAILED to Created subscription using"
+ " (subId=%d,SubKey=%d)in primary system."
+ " Error code: %d Error Message: %s",
+ subId,
+ subKey,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err));
+ break;
+ }
+ case GrepEvent::GrepPS_SubStartMetaRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ ::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: Logging failed to start on meta "
+ "data changes."
+ " using (subId=%d,SubKey=%d)"
+ " Error code: %d Error Message: %s",
+ subId,
+ subKey,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err));
+ break;
+ }
+ case GrepEvent::GrepPS_SubStartDataRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ ::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: Logging FAILED to start on table data "
+ " changes using (subId=%d,SubKey=%d)"
+ " Error code: %d Error Message: %s",
+ subId,
+ subKey,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err));
+ break;
+ }
+ case GrepEvent::GrepPS_SubSyncMetaRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ ::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: Synchronization FAILED (META SCAN) on "
+ " meta data using (subId=%d,SubKey=%d)"
+ " Error code: %d Error Message: %s",
+ subId,
+ subKey,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err));
+ break;
+ }
+ case GrepEvent::GrepPS_SubSyncDataRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ const int gci = theData[5];
+ ::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: Synchronization FAILED (DATA SCAN) on "
+ "table data using (subId=%d,SubKey=%d). GCI = %d. "
+ " Error code: %d Error Message: %s",
+ subId,
+ subKey,
+ gci,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err));
+ break;
+ }
+ case GrepEvent::GrepPS_SubRemoveRef:
+ {
+ const int subId = theData[2];
+ const int subKey = theData[3];
+ const int err = theData[4];
+ ::snprintf(m_text, m_text_len,
+ "Grep::PSCoord: Failed to remove subscription "
+ "(subId=%d,SubKey=%d)."
+ " Error code: %d Error Message: %s",
+ subId,
+ subKey,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err));
+ break;
+ }
+ case GrepEvent::Rep_Disconnect:
+ {
+ const int err = theData[4];
+ const int nodeId = theData[5];
+ ::snprintf(m_text, m_text_len,
+ "Rep: Node %d."
+ " Error code: %d Error Message: %s",
+ nodeId,
+ err,
+ GrepError::getErrorDesc((GrepError::Code)err));
+ break;
+ }
- default:
- ::snprintf(m_text,
- sizeof(m_text),
- "%sUnknown GrepSubscriptionAlert event: %d",
- theNodeId,
- theData[1]);
- break;
- }
- break;
+ default:
+ ::snprintf(m_text,
+ m_text_len,
+ "%sUnknown GrepSubscriptionAlert event: %d",
+ theNodeId,
+ theData[1]);
+ break;
}
-
+ break;
+ }
+
+ case EventReport::BackupStarted:
+ ::snprintf(m_text,
+ m_text_len,
+ "%sBackup %d started from node %d",
+ theNodeId, theData[2], refToNode(theData[1]));
+ break;
+ case EventReport::BackupFailedToStart:
+ ::snprintf(m_text,
+ m_text_len,
+ "%sBackup request from %d failed to start. Error: %d",
+ theNodeId, refToNode(theData[1]), theData[2]);
+ break;
+ case EventReport::BackupCompleted:
+ ::snprintf(m_text,
+ m_text_len,
+ "%sBackup %d started from node %d completed\n"
+ " StartGCP: %d StopGCP: %d\n"
+ " #Records: %d #LogRecords: %d\n"
+ " Data: %d bytes Log: %d bytes",
+ theNodeId, theData[2], refToNode(theData[1]),
+ theData[3], theData[4], theData[6], theData[8],
+ theData[5], theData[7]);
+ break;
+ case EventReport::BackupAborted:
+ ::snprintf(m_text,
+ m_text_len,
+ "%sBackup %d started from %d has been aborted. Error: %d",
+ theNodeId,
+ theData[2],
+ refToNode(theData[1]),
+ theData[3]);
+ break;
default:
::snprintf(m_text,
- sizeof(m_text),
+ m_text_len,
"%sUnknown event: %d",
theNodeId,
theData[0]);
@@ -1302,54 +1303,10 @@ EventLogger::getText(int type,
return m_text;
}
-bool
-EventLogger::matchEventCategory(const char * str,
- LogLevel::EventCategory * cat,
- bool exactMatch){
- unsigned i;
- if(cat == 0 || str == 0)
- return false;
-
- char * tmp = strdup(str);
- for(i = 0; i<strlen(tmp); i++)
- tmp[i] = toupper(tmp[i]);
-
- for(i = 0; i<noOfEventCategoryNames; i++){
- if(strcmp(tmp, eventCategoryNames[i].name) == 0){
- * cat = eventCategoryNames[i].category;
- free(tmp);
- return true;
- }
- }
- free(tmp);
- return false;
-}
-
-const char *
-EventLogger::getEventCategoryName(LogLevel::EventCategory cat){
-
- for(unsigned i = 0; i<noOfEventCategoryNames; i++){
- if(cat == eventCategoryNames[i].category){
- return eventCategoryNames[i].name;
- }
- }
- return 0;
-}
-
-
-EventLogger::EventLogger() : Logger(), m_logLevel(), m_filterLevel(15)
+EventLogger::EventLogger() : m_filterLevel(15)
{
setCategory("EventLogger");
- m_logLevel.setLogLevel(LogLevel::llStartUp, m_filterLevel);
- m_logLevel.setLogLevel(LogLevel::llShutdown, m_filterLevel);
- // m_logLevel.setLogLevel(LogLevel::llStatistic, m_filterLevel);
- // m_logLevel.setLogLevel(LogLevel::llCheckpoint, m_filterLevel);
- m_logLevel.setLogLevel(LogLevel::llNodeRestart, m_filterLevel);
- m_logLevel.setLogLevel(LogLevel::llConnection, m_filterLevel);
- m_logLevel.setLogLevel(LogLevel::llError, m_filterLevel);
- m_logLevel.setLogLevel(LogLevel::llInfo, m_filterLevel);
- enable(Logger::LL_INFO, Logger::LL_ALERT); // Log INFO to ALERT
-
+ enable(Logger::Logger::LL_INFO, Logger::Logger::LL_ALERT);
}
EventLogger::~EventLogger()
@@ -1370,66 +1327,74 @@ EventLogger::close()
removeAllHandlers();
}
-void
-EventLogger::log(NodeId nodeId, int eventType, const Uint32* theData)
+static NdbOut&
+operator<<(NdbOut& out, const LogLevel & ll)
{
- log(eventType, theData, nodeId);
+ out << "[LogLevel: ";
+ for(size_t i = 0; i<LogLevel::LOGLEVEL_CATEGORIES; i++)
+ out << ll.getLogLevel((LogLevel::EventCategory)i) << " ";
+ out << "]";
+ return out;
}
void
-EventLogger::log(int eventType, const Uint32* theData, NodeId nodeId)
+EventLogger::log(int eventType, const Uint32* theData, NodeId nodeId,
+ const LogLevel* ll)
{
Uint32 threshold = 0;
- Logger::LoggerLevel severity = LL_WARNING;
+ Logger::LoggerLevel severity = Logger::LL_WARNING;
+ LogLevel::EventCategory cat;
- for(unsigned i = 0; i<EventLogger::matrixSize; i++){
- if(EventLogger::matrix[i].eventType == eventType){
- const LogLevel::EventCategory cat = EventLogger::matrix[i].eventCategory;
- threshold = m_logLevel.getLogLevel(cat);
- severity = EventLogger::matrix[i].severity;
+ for(unsigned i = 0; i<EventLoggerBase::matrixSize; i++){
+ if(EventLoggerBase::matrix[i].eventType == eventType){
+ cat = EventLoggerBase::matrix[i].eventCategory;
+ threshold = EventLoggerBase::matrix[i].threshold;
+ severity = EventLoggerBase::matrix[i].severity;
break;
}
}
- if (threshold <= m_filterLevel){
+ Uint32 set = ll?ll->getLogLevel(cat) : m_logLevel.getLogLevel(cat);
+ if (threshold <= set){
switch (severity){
- case LL_ALERT:
- alert(EventLogger::getText(eventType, theData, nodeId));
+ case Logger::LL_ALERT:
+ alert(EventLogger::getText(m_text, sizeof(m_text),
+ eventType, theData, nodeId));
break;
- case LL_CRITICAL:
- critical(EventLogger::getText(eventType, theData, nodeId));
+ case Logger::LL_CRITICAL:
+ critical(EventLogger::getText(m_text, sizeof(m_text),
+ eventType, theData, nodeId));
break;
- case LL_WARNING:
- warning(EventLogger::getText(eventType, theData, nodeId));
+ case Logger::LL_WARNING:
+ warning(EventLogger::getText(m_text, sizeof(m_text),
+ eventType, theData, nodeId));
break;
- case LL_ERROR:
- error(EventLogger::getText(eventType, theData, nodeId));
+ case Logger::LL_ERROR:
+ error(EventLogger::getText(m_text, sizeof(m_text),
+ eventType, theData, nodeId));
break;
- case LL_INFO:
- info(EventLogger::getText(eventType, theData, nodeId));
+ case Logger::LL_INFO:
+ info(EventLogger::getText(m_text, sizeof(m_text),
+ eventType, theData, nodeId));
break;
- case LL_DEBUG:
- debug(EventLogger::getText(eventType, theData, nodeId));
+ case Logger::LL_DEBUG:
+ debug(EventLogger::getText(m_text, sizeof(m_text),
+ eventType, theData, nodeId));
break;
default:
- info(EventLogger::getText(eventType, theData, nodeId));
+ info(EventLogger::getText(m_text, sizeof(m_text),
+ eventType, theData, nodeId));
break;
}
} // if (..
}
-LogLevel&
-EventLogger::getLoglevel()
-{
- return m_logLevel;
-}
-
int
EventLogger::getFilterLevel() const
{
diff --git a/ndb/src/common/debugger/Makefile.am b/ndb/src/common/debugger/Makefile.am
index 0278d0d2ba0..d0fb30717cd 100644
--- a/ndb/src/common/debugger/Makefile.am
+++ b/ndb/src/common/debugger/Makefile.am
@@ -2,7 +2,7 @@ SUBDIRS = signaldata
noinst_LTLIBRARIES = libtrace.la
-libtrace_la_SOURCES = SignalLoggerManager.cpp DebuggerNames.cpp BlockNames.cpp LogLevel.cpp EventLogger.cpp GrepError.cpp
+libtrace_la_SOURCES = SignalLoggerManager.cpp DebuggerNames.cpp BlockNames.cpp EventLogger.cpp GrepError.cpp
include $(top_srcdir)/ndb/config/common.mk.am
include $(top_srcdir)/ndb/config/type_kernel.mk.am
diff --git a/ndb/src/common/mgmcommon/ConfigRetriever.cpp b/ndb/src/common/mgmcommon/ConfigRetriever.cpp
index 109c999852b..b4f2d0b9897 100644
--- a/ndb/src/common/mgmcommon/ConfigRetriever.cpp
+++ b/ndb/src/common/mgmcommon/ConfigRetriever.cpp
@@ -18,6 +18,7 @@
#include <ndb_version.h>
#include <ConfigRetriever.hpp>
+#include <SocketServer.hpp>
#include "LocalConfig.hpp"
#include <NdbSleep.h>
@@ -44,11 +45,14 @@
//****************************************************************************
//****************************************************************************
-ConfigRetriever::ConfigRetriever(Uint32 version, Uint32 node_type) {
-
+ConfigRetriever::ConfigRetriever(LocalConfig &local_config,
+ Uint32 version, Uint32 node_type)
+ : _localConfig(local_config)
+{
m_handle= 0;
m_version = version;
m_node_type = node_type;
+ _ownNodeId = _localConfig._ownNodeId;
}
ConfigRetriever::~ConfigRetriever(){
@@ -63,23 +67,12 @@ ConfigRetriever::~ConfigRetriever(){
//****************************************************************************
//****************************************************************************
-int
-ConfigRetriever::init() {
- if (!_localConfig.init(m_connectString.c_str(),
- _localConfigFileName.c_str())){
-
- setError(CR_ERROR, "error in retrieving contact info for mgmtsrvr");
- _localConfig.printError();
- _localConfig.printUsage();
- return -1;
- }
-
- return _ownNodeId = _localConfig._ownNodeId;
-}
-
int
ConfigRetriever::do_connect(int exit_on_connect_failure){
+ m_mgmd_port= 0;
+ m_mgmd_host= 0;
+
if(!m_handle)
m_handle= ndb_mgm_create_handle();
@@ -96,10 +89,18 @@ ConfigRetriever::do_connect(int exit_on_connect_failure){
BaseString tmp;
for (unsigned int i = 0; i<_localConfig.ids.size(); i++){
MgmtSrvrId * m = &_localConfig.ids[i];
+ DBUG_PRINT("info",("trying %s:%d",
+ m->name.c_str(),
+ m->port));
switch(m->type){
case MgmId_TCP:
tmp.assfmt("%s:%d", m->name.c_str(), m->port);
if (ndb_mgm_connect(m_handle, tmp.c_str()) == 0) {
+ m_mgmd_port= m->port;
+ m_mgmd_host= m->name.c_str();
+ DBUG_PRINT("info",("connected to ndb_mgmd at %s:%d",
+ m_mgmd_host,
+ m_mgmd_port));
return 0;
}
setError(CR_RETRY, ndb_mgm_get_latest_error_desc(m_handle));
@@ -107,9 +108,10 @@ ConfigRetriever::do_connect(int exit_on_connect_failure){
break;
}
}
- if (exit_on_connect_failure)
- return 1;
if(latestErrorType == CR_RETRY){
+ DBUG_PRINT("info",("CR_RETRY"));
+ if (exit_on_connect_failure)
+ return 1;
REPORT_WARNING("Failed to retrieve cluster configuration");
ndbout << "(Cause of failure: " << getErrorString() << ")" << endl;
ndbout << "Attempt " << retry << " of " << retry_max << ". "
@@ -124,6 +126,8 @@ ConfigRetriever::do_connect(int exit_on_connect_failure){
ndb_mgm_destroy_handle(&m_handle);
m_handle= 0;
+ m_mgmd_port= 0;
+ m_mgmd_host= 0;
return -1;
}
@@ -229,16 +233,6 @@ ConfigRetriever::getErrorString(){
return errorString.c_str();
}
-void
-ConfigRetriever::setLocalConfigFileName(const char * localConfigFileName) {
- _localConfigFileName.assign(localConfigFileName ? localConfigFileName : "");
-}
-
-void
-ConfigRetriever::setConnectString(const char * connectString) {
- m_connectString.assign(connectString ? connectString : "");
-}
-
bool
ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf, Uint32 nodeid){
@@ -272,43 +266,15 @@ ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf, Uint32
NdbConfig_SetPath(datadir);
}
- char localhost[MAXHOSTNAMELEN];
- if(NdbHost_GetHostName(localhost) != 0){
- snprintf(buf, 255, "Unable to get own hostname");
+ if (hostname && hostname[0] != 0 &&
+ !SocketServer::tryBind(0,hostname)) {
+ snprintf(buf, 255, "Config hostname(%s) don't match a local interface,"
+ " tried to bind, error = %d - %s",
+ hostname, errno, strerror(errno));
setError(CR_ERROR, buf);
return false;
}
- do {
- if(strlen(hostname) == 0)
- break;
-
- if(strcasecmp(hostname, localhost) == 0)
- break;
-
- if(strcasecmp(hostname, "localhost") == 0)
- break;
-
- struct in_addr local, config;
- bool b1 = false, b2 = false, b3 = false;
- b1 = Ndb_getInAddr(&local, localhost) == 0;
- b2 = Ndb_getInAddr(&config, hostname) == 0;
- b3 = memcmp(&local, &config, sizeof(local)) == 0;
-
- if(b1 && b2 && b3)
- break;
-
- b1 = Ndb_getInAddr(&local, "localhost") == 0;
- b3 = memcmp(&local, &config, sizeof(local)) == 0;
- if(b1 && b2 && b3)
- break;
-
- snprintf(buf, 255, "Local hostname(%s) and config hostname(%s) dont match",
- localhost, hostname);
- setError(CR_ERROR, buf);
- return false;
- } while(false);
-
unsigned int _type;
if(ndb_mgm_get_int_parameter(it, CFG_TYPE_OF_SECTION, &_type)){
snprintf(buf, 255, "Unable to get type of node(%d) from config",
@@ -344,7 +310,7 @@ ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf, Uint32
const char * name;
struct in_addr addr;
BaseString tmp;
- if(!iter.get(CFG_TCP_HOSTNAME_1, &name) && strlen(name)){
+ if(!iter.get(CFG_CONNECTION_HOSTNAME_1, &name) && strlen(name)){
if(Ndb_getInAddr(&addr, name) != 0){
tmp.assfmt("Unable to lookup/illegal hostname %s, "
"connection from node %d to node %d",
@@ -354,7 +320,7 @@ ConfigRetriever::verifyConfig(const struct ndb_mgm_configuration * conf, Uint32
}
}
- if(!iter.get(CFG_TCP_HOSTNAME_2, &name) && strlen(name)){
+ if(!iter.get(CFG_CONNECTION_HOSTNAME_2, &name) && strlen(name)){
if(Ndb_getInAddr(&addr, name) != 0){
tmp.assfmt("Unable to lookup/illegal hostname %s, "
"connection from node %d to node %d",
diff --git a/ndb/src/common/mgmcommon/IPCConfig.cpp b/ndb/src/common/mgmcommon/IPCConfig.cpp
index a76c541f3f6..780504d2c62 100644
--- a/ndb/src/common/mgmcommon/IPCConfig.cpp
+++ b/ndb/src/common/mgmcommon/IPCConfig.cpp
@@ -133,7 +133,6 @@ IPCConfig::configureTransporters(TransporterRegistry * theTransporterRegistry){
Uint32 compression;
Uint32 checksum;
if(!tmp->get("SendSignalId", &sendSignalId)) continue;
- if(!tmp->get("Compression", &compression)) continue;
if(!tmp->get("Checksum", &checksum)) continue;
const char * type;
@@ -143,8 +142,6 @@ IPCConfig::configureTransporters(TransporterRegistry * theTransporterRegistry){
SHM_TransporterConfiguration conf;
conf.localNodeId = the_ownId;
conf.remoteNodeId = (nodeId1 != the_ownId ? nodeId1 : nodeId2);
- conf.byteOrder = 0;
- conf.compression = compression;
conf.checksum = checksum;
conf.signalId = sendSignalId;
@@ -164,8 +161,6 @@ IPCConfig::configureTransporters(TransporterRegistry * theTransporterRegistry){
SCI_TransporterConfiguration conf;
conf.localNodeId = the_ownId;
conf.remoteNodeId = (nodeId1 != the_ownId ? nodeId1 : nodeId2);
- conf.byteOrder = 0;
- conf.compression = compression;
conf.checksum = checksum;
conf.signalId = sendSignalId;
@@ -174,18 +169,16 @@ IPCConfig::configureTransporters(TransporterRegistry * theTransporterRegistry){
if(the_ownId == nodeId1){
if(!tmp->get("Node1_NoOfAdapters", &conf.nLocalAdapters)) continue;
- if(!tmp->get("Node2_NoOfAdapters", &conf.nRemoteAdapters)) continue;
if(!tmp->get("Node2_Adapter", 0, &conf.remoteSciNodeId0)) continue;
- if(conf.nRemoteAdapters > 1){
+ if(conf.nLocalAdapters > 1){
if(!tmp->get("Node2_Adapter", 1, &conf.remoteSciNodeId1)) continue;
}
} else {
if(!tmp->get("Node2_NoOfAdapters", &conf.nLocalAdapters)) continue;
- if(!tmp->get("Node1_NoOfAdapters", &conf.nRemoteAdapters)) continue;
if(!tmp->get("Node1_Adapter", 0, &conf.remoteSciNodeId0)) continue;
- if(conf.nRemoteAdapters > 1){
+ if(conf.nLocalAdapters > 1){
if(!tmp->get("Node1_Adapter", 1, &conf.remoteSciNodeId1)) continue;
}
}
@@ -243,8 +236,6 @@ IPCConfig::configureTransporters(TransporterRegistry * theTransporterRegistry){
conf.localHostName = ownHostName;
conf.remoteNodeId = remoteNodeId;
conf.localNodeId = ownNodeId;
- conf.byteOrder = 0;
- conf.compression = compression;
conf.checksum = checksum;
conf.signalId = sendSignalId;
@@ -270,8 +261,6 @@ IPCConfig::configureTransporters(TransporterRegistry * theTransporterRegistry){
conf.localHostName = ownHostName;
conf.remoteNodeId = remoteNodeId;
conf.localNodeId = ownNodeId;
- conf.byteOrder = 0;
- conf.compression = compression;
conf.checksum = checksum;
conf.signalId = sendSignalId;
@@ -344,19 +333,29 @@ Uint32
IPCConfig::configureTransporters(Uint32 nodeId,
const class ndb_mgm_configuration & config,
class TransporterRegistry & tr){
+ DBUG_ENTER("IPCConfig::configureTransporters");
- Uint32 noOfTransportersCreated= 0, server_port= 0;
+ Uint32 noOfTransportersCreated= 0;
ndb_mgm_configuration_iterator iter(config, CFG_SECTION_CONNECTION);
for(iter.first(); iter.valid(); iter.next()){
Uint32 nodeId1, nodeId2, remoteNodeId;
+ const char * remoteHostName= 0, * localHostName= 0;
if(iter.get(CFG_CONNECTION_NODE_1, &nodeId1)) continue;
if(iter.get(CFG_CONNECTION_NODE_2, &nodeId2)) continue;
if(nodeId1 != nodeId && nodeId2 != nodeId) continue;
remoteNodeId = (nodeId == nodeId1 ? nodeId2 : nodeId1);
+ {
+ const char * host1= 0, * host2= 0;
+ iter.get(CFG_CONNECTION_HOSTNAME_1, &host1);
+ iter.get(CFG_CONNECTION_HOSTNAME_2, &host2);
+ localHostName = (nodeId == nodeId1 ? host1 : host2);
+ remoteHostName = (nodeId == nodeId1 ? host2 : host1);
+ }
+
Uint32 sendSignalId = 1;
Uint32 checksum = 1;
if(iter.get(CFG_CONNECTION_SEND_SIGNAL_ID, &sendSignalId)) continue;
@@ -365,71 +364,77 @@ IPCConfig::configureTransporters(Uint32 nodeId,
Uint32 type = ~0;
if(iter.get(CFG_TYPE_OF_SECTION, &type)) continue;
- Uint32 tmp_server_port= 0;
- if(iter.get(CFG_CONNECTION_SERVER_PORT, &tmp_server_port)) break;
+ Uint32 server_port= 0;
+ if(iter.get(CFG_CONNECTION_SERVER_PORT, &server_port)) break;
if (nodeId <= nodeId1 && nodeId <= nodeId2) {
- if (server_port && server_port != tmp_server_port) {
- ndbout << "internal error in config setup of server ports line= " << __LINE__ << endl;
- exit(-1);
- }
- server_port= tmp_server_port;
+ tr.add_transporter_interface(localHostName, server_port);
}
-
+ DBUG_PRINT("info", ("Transporter between this node %d and node %d using port %d, signalId %d, checksum %d",
+ nodeId, remoteNodeId, server_port, sendSignalId, checksum));
switch(type){
case CONNECTION_TYPE_SHM:{
SHM_TransporterConfiguration conf;
conf.localNodeId = nodeId;
conf.remoteNodeId = remoteNodeId;
- conf.byteOrder = 0;
- conf.compression = 0;
conf.checksum = checksum;
conf.signalId = sendSignalId;
if(iter.get(CFG_SHM_KEY, &conf.shmKey)) break;
if(iter.get(CFG_SHM_BUFFER_MEM, &conf.shmSize)) break;
- conf.port= tmp_server_port;
+ conf.port= server_port;
if(!tr.createTransporter(&conf)){
+ DBUG_PRINT("error", ("Failed to create SHM Transporter from %d to %d",
+ conf.localNodeId, conf.remoteNodeId));
ndbout << "Failed to create SHM Transporter from: "
<< conf.localNodeId << " to: " << conf.remoteNodeId << endl;
} else {
noOfTransportersCreated++;
}
+ DBUG_PRINT("info", ("Created SHM Transporter using shmkey %d, buf size = %d",
+ conf.shmKey, conf.shmSize));
break;
}
case CONNECTION_TYPE_SCI:{
SCI_TransporterConfiguration conf;
conf.localNodeId = nodeId;
conf.remoteNodeId = remoteNodeId;
- conf.byteOrder = 0;
- conf.compression = 0;
conf.checksum = checksum;
conf.signalId = sendSignalId;
+ conf.port= server_port;
+ conf.localHostName = localHostName;
+ conf.remoteHostName = remoteHostName;
+
if(iter.get(CFG_SCI_SEND_LIMIT, &conf.sendLimit)) break;
if(iter.get(CFG_SCI_BUFFER_MEM, &conf.bufferSize)) break;
-
- if(nodeId == nodeId1){
- if(iter.get(CFG_SCI_NODE1_ADAPTERS, &conf.nLocalAdapters)) break;
- if(iter.get(CFG_SCI_NODE2_ADAPTERS, &conf.nRemoteAdapters)) break;
- if(iter.get(CFG_SCI_NODE2_ADAPTER0, &conf.remoteSciNodeId0)) break;
- if(conf.nRemoteAdapters > 1){
- if(iter.get(CFG_SCI_NODE2_ADAPTER1, &conf.remoteSciNodeId1)) break;
- }
+ if (nodeId == nodeId1) {
+ if(iter.get(CFG_SCI_HOST2_ID_0, &conf.remoteSciNodeId0)) break;
+ if(iter.get(CFG_SCI_HOST2_ID_1, &conf.remoteSciNodeId1)) break;
} else {
- if(iter.get(CFG_SCI_NODE2_ADAPTERS, &conf.nLocalAdapters)) break;
- if(iter.get(CFG_SCI_NODE1_ADAPTERS, &conf.nRemoteAdapters)) break;
- if(iter.get(CFG_SCI_NODE1_ADAPTER0, &conf.remoteSciNodeId0)) break;
- if(conf.nRemoteAdapters > 1){
- if(iter.get(CFG_SCI_NODE1_ADAPTER1, &conf.remoteSciNodeId1)) break;
- }
+ if(iter.get(CFG_SCI_HOST1_ID_0, &conf.remoteSciNodeId0)) break;
+ if(iter.get(CFG_SCI_HOST1_ID_1, &conf.remoteSciNodeId1)) break;
}
-
- if(!tr.createTransporter(&conf)){
+ if (conf.remoteSciNodeId1 == 0) {
+ conf.nLocalAdapters = 1;
+ } else {
+ conf.nLocalAdapters = 2;
+ }
+ if(!tr.createTransporter(&conf)){
+ DBUG_PRINT("error", ("Failed to create SCI Transporter from %d to %d",
+ conf.localNodeId, conf.remoteNodeId));
ndbout << "Failed to create SCI Transporter from: "
<< conf.localNodeId << " to: " << conf.remoteNodeId << endl;
} else {
+ DBUG_PRINT("info", ("Created SCI Transporter: Adapters = %d, remote SCI node id %d",
+ conf.nLocalAdapters, conf.remoteSciNodeId0));
+ DBUG_PRINT("info", ("Host 1 = %s, Host 2 = %s, sendLimit = %d, buf size = %d",
+ conf.localHostName, conf.remoteHostName, conf.sendLimit, conf.bufferSize));
+ if (conf.nLocalAdapters > 1) {
+ DBUG_PRINT("info", ("Fault-tolerant with 2 Remote Adapters, second remote SCI node id = %d",
+ conf.remoteSciNodeId1));
+ }
noOfTransportersCreated++;
continue;
}
@@ -437,14 +442,10 @@ IPCConfig::configureTransporters(Uint32 nodeId,
case CONNECTION_TYPE_TCP:{
TCP_TransporterConfiguration conf;
- const char * host1, * host2;
- if(iter.get(CFG_TCP_HOSTNAME_1, &host1)) break;
- if(iter.get(CFG_TCP_HOSTNAME_2, &host2)) break;
-
if(iter.get(CFG_TCP_SEND_BUFFER_SIZE, &conf.sendBufferSize)) break;
if(iter.get(CFG_TCP_RECEIVE_BUFFER_SIZE, &conf.maxReceiveSize)) break;
- conf.port= tmp_server_port;
+ conf.port= server_port;
const char * proxy;
if (!iter.get(CFG_TCP_PROXY, &proxy)) {
if (strlen(proxy) > 0 && nodeId2 == nodeId) {
@@ -455,10 +456,8 @@ IPCConfig::configureTransporters(Uint32 nodeId,
conf.localNodeId = nodeId;
conf.remoteNodeId = remoteNodeId;
- conf.localHostName = (nodeId == nodeId1 ? host1 : host2);
- conf.remoteHostName = (nodeId == nodeId1 ? host2 : host1);
- conf.byteOrder = 0;
- conf.compression = 0;
+ conf.localHostName = localHostName;
+ conf.remoteHostName = remoteHostName;
conf.checksum = checksum;
conf.signalId = sendSignalId;
@@ -468,23 +467,20 @@ IPCConfig::configureTransporters(Uint32 nodeId,
} else {
noOfTransportersCreated++;
}
+ DBUG_PRINT("info", ("Created TCP Transporter: sendBufferSize = %d, maxReceiveSize = %d",
+ conf.sendBufferSize, conf.maxReceiveSize));
+ break;
case CONNECTION_TYPE_OSE:{
OSE_TransporterConfiguration conf;
-
- const char * host1, * host2;
- if(iter.get(CFG_OSE_HOSTNAME_1, &host1)) break;
- if(iter.get(CFG_OSE_HOSTNAME_2, &host2)) break;
-
+
if(iter.get(CFG_OSE_PRIO_A_SIZE, &conf.prioASignalSize)) break;
if(iter.get(CFG_OSE_PRIO_B_SIZE, &conf.prioBSignalSize)) break;
if(iter.get(CFG_OSE_RECEIVE_ARRAY_SIZE, &conf.receiveBufferSize)) break;
conf.localNodeId = nodeId;
conf.remoteNodeId = remoteNodeId;
- conf.localHostName = (nodeId == nodeId1 ? host1 : host2);
- conf.remoteHostName = (nodeId == nodeId1 ? host2 : host1);
- conf.byteOrder = 0;
- conf.compression = 0;
+ conf.localHostName = localHostName;
+ conf.remoteHostName = remoteHostName;
conf.checksum = checksum;
conf.signalId = sendSignalId;
@@ -502,9 +498,6 @@ IPCConfig::configureTransporters(Uint32 nodeId,
}
}
}
-
- tr.m_service_port= server_port;
-
- return noOfTransportersCreated;
+ DBUG_RETURN(noOfTransportersCreated);
}
diff --git a/ndb/src/common/portlib/NdbTCP.cpp b/ndb/src/common/portlib/NdbTCP.cpp
index 8448d64222f..a711a586203 100644
--- a/ndb/src/common/portlib/NdbTCP.cpp
+++ b/ndb/src/common/portlib/NdbTCP.cpp
@@ -15,6 +15,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#include <ndb_global.h>
#include <NdbMutex.h>
#include <NdbTCP.h>
@@ -27,13 +28,14 @@ static NdbMutex LOCK_gethostbyname = NDB_MUTEX_INITIALIZER;
extern "C"
int
Ndb_getInAddr(struct in_addr * dst, const char *address) {
+ DBUG_ENTER("Ndb_getInAddr");
struct hostent * hostPtr;
NdbMutex_Lock(&LOCK_gethostbyname);
hostPtr = gethostbyname(address);
if (hostPtr != NULL) {
dst->s_addr = ((struct in_addr *) *hostPtr->h_addr_list)->s_addr;
NdbMutex_Unlock(&LOCK_gethostbyname);
- return 0;
+ DBUG_RETURN(0);
}
NdbMutex_Unlock(&LOCK_gethostbyname);
@@ -47,9 +49,11 @@ Ndb_getInAddr(struct in_addr * dst, const char *address) {
#endif
)
{
- return 0;
+ DBUG_RETURN(0);
}
- return -1;
+ DBUG_PRINT("error",("inet_addr(%s) - %d - %s",
+ address, errno, strerror(errno)));
+ DBUG_RETURN(-1);
}
#if 0
diff --git a/ndb/src/common/transporter/Makefile.am b/ndb/src/common/transporter/Makefile.am
index 218b261606d..9d91a210d46 100644
--- a/ndb/src/common/transporter/Makefile.am
+++ b/ndb/src/common/transporter/Makefile.am
@@ -13,7 +13,7 @@ EXTRA_libtransporter_la_SOURCES = SHM_Transporter.cpp SHM_Transporter.unix.cpp S
libtransporter_la_LIBADD = @ndb_transporter_opt_objs@
libtransporter_la_DEPENDENCIES = @ndb_transporter_opt_objs@
-INCLUDES_LOC = -I$(top_srcdir)/ndb/include/kernel -I$(top_srcdir)/ndb/include/transporter
+INCLUDES_LOC = -I$(top_srcdir)/ndb/include/kernel -I$(top_srcdir)/ndb/include/transporter @NDB_SCI_INCLUDES@
include $(top_srcdir)/ndb/config/common.mk.am
include $(top_srcdir)/ndb/config/type_util.mk.am
diff --git a/ndb/src/common/transporter/Packer.cpp b/ndb/src/common/transporter/Packer.cpp
index 645517a4b1a..9eba335330d 100644
--- a/ndb/src/common/transporter/Packer.cpp
+++ b/ndb/src/common/transporter/Packer.cpp
@@ -21,6 +21,7 @@
#include <TransporterCallback.hpp>
#include <RefConvert.hpp>
+#define MAX_RECEIVED_SIGNALS 1024
Uint32
TransporterRegistry::unpack(Uint32 * readPtr,
Uint32 sizeOfData,
@@ -30,12 +31,15 @@ TransporterRegistry::unpack(Uint32 * readPtr,
LinearSectionPtr ptr[3];
Uint32 usedData = 0;
-
+ Uint32 loop_count = 0;
+
if(state == NoHalt || state == HaltOutput){
- while(sizeOfData >= 4 + sizeof(Protocol6)){
+ while ((sizeOfData >= 4 + sizeof(Protocol6)) &&
+ (loop_count < MAX_RECEIVED_SIGNALS)) {
Uint32 word1 = readPtr[0];
Uint32 word2 = readPtr[1];
Uint32 word3 = readPtr[2];
+ loop_count++;
#if 0
if(Protocol6::getByteOrder(word1) != MY_OWN_BYTE_ORDER){
@@ -112,10 +116,12 @@ TransporterRegistry::unpack(Uint32 * readPtr,
} else {
/** state = HaltIO || state == HaltInput */
- while(sizeOfData >= 4 + sizeof(Protocol6)){
+ while ((sizeOfData >= 4 + sizeof(Protocol6)) &&
+ (loop_count < MAX_RECEIVED_SIGNALS)) {
Uint32 word1 = readPtr[0];
Uint32 word2 = readPtr[1];
Uint32 word3 = readPtr[2];
+ loop_count++;
#if 0
if(Protocol6::getByteOrder(word1) != MY_OWN_BYTE_ORDER){
@@ -208,12 +214,13 @@ TransporterRegistry::unpack(Uint32 * readPtr,
IOState state) {
static SignalHeader signalHeader;
static LinearSectionPtr ptr[3];
+ Uint32 loop_count = 0;
if(state == NoHalt || state == HaltOutput){
- while(readPtr < eodPtr){
+ while ((readPtr < eodPtr) && (loop_count < MAX_RECEIVED_SIGNALS)) {
Uint32 word1 = readPtr[0];
Uint32 word2 = readPtr[1];
Uint32 word3 = readPtr[2];
-
+ loop_count++;
#if 0
if(Protocol6::getByteOrder(word1) != MY_OWN_BYTE_ORDER){
//Do funky stuff
@@ -280,11 +287,11 @@ TransporterRegistry::unpack(Uint32 * readPtr,
} else {
/** state = HaltIO || state == HaltInput */
- while(readPtr < eodPtr){
+ while ((readPtr < eodPtr) && (loop_count < MAX_RECEIVED_SIGNALS)) {
Uint32 word1 = readPtr[0];
Uint32 word2 = readPtr[1];
Uint32 word3 = readPtr[2];
-
+ loop_count++;
#if 0
if(Protocol6::getByteOrder(word1) != MY_OWN_BYTE_ORDER){
//Do funky stuff
diff --git a/ndb/src/common/transporter/SCI_Transporter.cpp b/ndb/src/common/transporter/SCI_Transporter.cpp
index c52c8a9d8c0..73fbb064599 100644
--- a/ndb/src/common/transporter/SCI_Transporter.cpp
+++ b/ndb/src/common/transporter/SCI_Transporter.cpp
@@ -24,23 +24,30 @@
#include "TransporterInternalDefinitions.hpp"
#include <TransporterCallback.hpp>
-
+
+#include <InputStream.hpp>
+#include <OutputStream.hpp>
+
#define FLAGS 0
-
-SCI_Transporter::SCI_Transporter(Uint32 packetSize,
+#define DEBUG_TRANSPORTER
+SCI_Transporter::SCI_Transporter(TransporterRegistry &t_reg,
+ const char *lHostName,
+ const char *rHostName,
+ int r_port,
+ Uint32 packetSize,
Uint32 bufferSize,
Uint32 nAdapters,
Uint16 remoteSciNodeId0,
Uint16 remoteSciNodeId1,
NodeId _localNodeId,
NodeId _remoteNodeId,
- int byte_order,
- bool compr,
bool chksm,
bool signalId,
Uint32 reportFreq) :
- Transporter(_localNodeId, _remoteNodeId, byte_order, compr, chksm, signalId)
-{
+ Transporter(t_reg, lHostName, rHostName, r_port, _localNodeId,
+ _remoteNodeId, 0, false, chksm, signalId)
+{
+ DBUG_ENTER("SCI_Transporter::SCI_Transporter");
m_PacketSize = (packetSize + 3)/4 ;
m_BufferSize = bufferSize;
m_sendBuffer.m_buffer = NULL;
@@ -56,10 +63,6 @@ SCI_Transporter::SCI_Transporter(Uint32 packetSize,
m_initLocal=false;
- m_remoteNodes= new Uint16[m_numberOfRemoteNodes];
- if(m_remoteNodes == NULL) {
- //DO WHAT??
- }
m_swapCounter=0;
m_failCounter=0;
m_remoteNodes[0]=remoteSciNodeId0;
@@ -94,20 +97,19 @@ SCI_Transporter::SCI_Transporter(Uint32 packetSize,
i4096=0;
i4097=0;
#endif
-
+ DBUG_VOID_RETURN;
}
void SCI_Transporter::disconnectImpl()
{
+ DBUG_ENTER("SCI_Transporter::disconnectImpl");
sci_error_t err;
if(m_mapped){
setDisconnect();
-#ifdef DEBUG_TRANSPORTER
- ndbout << "DisconnectImpl " << getConnectionStatus() << endl;
- ndbout << "remote node " << remoteNodeId << endl;
-#endif
+ DBUG_PRINT("info", ("connect status = %d, remote node = %d",
+ (int)getConnectionStatus(), remoteNodeId));
disconnectRemote();
disconnectLocal();
}
@@ -124,65 +126,56 @@ void SCI_Transporter::disconnectImpl()
SCIClose(sciAdapters[i].scidesc, FLAGS, &err);
if(err != SCI_ERR_OK) {
- reportError(callbackObj, localNodeId, TE_SCI_UNABLE_TO_CLOSE_CHANNEL);
-#ifdef DEBUG_TRANSPORTER
- fprintf(stderr,
- "\nCannot close channel to the driver. Error code 0x%x",
- err);
-#endif
- }
+ report_error(TE_SCI_UNABLE_TO_CLOSE_CHANNEL);
+ DBUG_PRINT("error", ("Cannot close channel to the driver. Error code 0x%x",
+ err));
+ }
}
}
m_sciinit=false;
#ifdef DEBUG_TRANSPORTER
- ndbout << "total: " << i1024+ i10242048 + i2048+i2049 << endl;
+ ndbout << "total: " << i1024+ i10242048 + i2048+i2049 << endl;
ndbout << "<1024: " << i1024 << endl;
ndbout << "1024-2047: " << i10242048 << endl;
ndbout << "==2048: " << i2048 << endl;
ndbout << "2049-4096: " << i20484096 << endl;
ndbout << "==4096: " << i4096 << endl;
ndbout << ">4096: " << i4097 << endl;
-
#endif
-
+ DBUG_VOID_RETURN;
}
bool SCI_Transporter::initTransporter() {
- if(m_BufferSize < (2*MAX_MESSAGE_SIZE)){
- m_BufferSize = 2 * MAX_MESSAGE_SIZE;
+ DBUG_ENTER("SCI_Transporter::initTransporter");
+ if(m_BufferSize < (2*MAX_MESSAGE_SIZE + 4096)){
+ m_BufferSize = 2 * MAX_MESSAGE_SIZE + 4096;
}
- // Allocate buffers for sending
- Uint32 sz = 0;
- if(m_BufferSize < (m_PacketSize * 4)){
- sz = m_BufferSize + MAX_MESSAGE_SIZE;
- } else {
- /**
- * 3 packages
- */
- sz = (m_PacketSize * 4) * 3 + MAX_MESSAGE_SIZE;
- }
+ // Allocate buffers for sending, send buffer size plus 2048 bytes for avoiding
+ // the need to send twice when a large message comes around. Send buffer size is
+ // measured in words.
+ Uint32 sz = 4 * m_PacketSize + MAX_MESSAGE_SIZE;;
- m_sendBuffer.m_bufferSize = 4 * ((sz + 3) / 4);
- m_sendBuffer.m_buffer = new Uint32[m_sendBuffer.m_bufferSize / 4];
+ m_sendBuffer.m_sendBufferSize = 4 * ((sz + 3) / 4);
+ m_sendBuffer.m_buffer = new Uint32[m_sendBuffer.m_sendBufferSize / 4];
m_sendBuffer.m_dataSize = 0;
-
+
+ DBUG_PRINT("info", ("Created SCI Send Buffer with buffer size %d and packet size %d",
+ m_sendBuffer.m_sendBufferSize, m_PacketSize * 4));
if(!getLinkStatus(m_ActiveAdapterId) ||
- !getLinkStatus(m_StandbyAdapterId)) {
-#ifdef DEBUG_TRANSPORTER
- ndbout << "The link is not fully operational. " << endl;
- ndbout << "Check the cables and the switches" << endl;
-#endif
+ (m_adapters > 1 &&
+ !getLinkStatus(m_StandbyAdapterId))) {
+ DBUG_PRINT("error", ("The link is not fully operational. Check the cables and the switches"));
//reportDisconnect(remoteNodeId, 0);
//doDisconnect();
//NDB should terminate
- reportError(callbackObj, localNodeId, TE_SCI_LINK_ERROR);
- return false;
+ report_error(TE_SCI_LINK_ERROR);
+ DBUG_RETURN(false);
}
- return true;
+ DBUG_RETURN(true);
} // initTransporter()
@@ -218,10 +211,8 @@ bool SCI_Transporter::getLinkStatus(Uint32 adapterNo)
SCIQuery(SCI_Q_ADAPTER,(void*)(&queryAdapter),(Uint32)NULL,&error);
if(error != SCI_ERR_OK) {
-#ifdef DEBUG_TRANSPORTER
- ndbout << "error querying adapter " << endl;
-#endif
- return false;
+ DBUG_PRINT("error", ("error %d querying adapter", error));
+ return false;
}
if(linkstatus<=0)
return false;
@@ -231,6 +222,7 @@ bool SCI_Transporter::getLinkStatus(Uint32 adapterNo)
sci_error_t SCI_Transporter::initLocalSegment() {
+ DBUG_ENTER("SCI_Transporter::initLocalSegment");
Uint32 segmentSize = m_BufferSize;
Uint32 offset = 0;
sci_error_t err;
@@ -238,16 +230,12 @@ sci_error_t SCI_Transporter::initLocalSegment() {
for(Uint32 i=0; i<m_adapters ; i++) {
SCIOpen(&(sciAdapters[i].scidesc), FLAGS, &err);
sciAdapters[i].localSciNodeId=getLocalNodeId(i);
-#ifdef DEBUG_TRANSPORTER
- ndbout_c("SCInode iD %d adapter %d\n",
- sciAdapters[i].localSciNodeId, i);
-#endif
+ DBUG_PRINT("info", ("SCInode iD %d adapter %d\n",
+ sciAdapters[i].localSciNodeId, i));
if(err != SCI_ERR_OK) {
-#ifdef DEBUG_TRANSPORTER
- ndbout_c("\nCannot open an SCI virtual device. Error code 0x%x",
- err);
-#endif
- return err;
+ DBUG_PRINT("error", ("Cannot open an SCI virtual device. Error code 0x%x",
+ err));
+ DBUG_RETURN(err);
}
}
}
@@ -264,12 +252,11 @@ sci_error_t SCI_Transporter::initLocalSegment() {
&err);
if(err != SCI_ERR_OK) {
- return err;
+ DBUG_PRINT("error", ("Error creating segment, err = 0x%x", err));
+ DBUG_RETURN(err);
} else {
-#ifdef DEBUG_TRANSPORTER
- ndbout << "created segment id : "
- << hostSegmentId(localNodeId, remoteNodeId) << endl;
-#endif
+ DBUG_PRINT("info", ("created segment id : %d",
+ hostSegmentId(localNodeId, remoteNodeId)));
}
/** Prepare the segment*/
@@ -280,11 +267,9 @@ sci_error_t SCI_Transporter::initLocalSegment() {
&err);
if(err != SCI_ERR_OK) {
-#ifdef DEBUG_TRANSPORTER
- ndbout_c("Local Segment is not accessible by an SCI adapter.");
- ndbout_c("Error code 0x%x\n", err);
-#endif
- return err;
+ DBUG_PRINT("error", ("Local Segment is not accessible by an SCI adapter. Error code 0x%x\n",
+ err));
+ DBUG_RETURN(err);
}
}
@@ -301,14 +286,10 @@ sci_error_t SCI_Transporter::initLocalSegment() {
if(err != SCI_ERR_OK) {
-
-#ifdef DEBUG_TRANSPORTER
- fprintf(stderr, "\nCannot map area of size %d. Error code 0x%x",
- segmentSize,err);
- ndbout << "initLocalSegment does a disConnect" << endl;
-#endif
+ DBUG_PRINT("error", ("Cannot map area of size %d. Error code 0x%x",
+ segmentSize,err));
doDisconnect();
- return err;
+ DBUG_RETURN(err);
}
@@ -320,18 +301,16 @@ sci_error_t SCI_Transporter::initLocalSegment() {
&err);
if(err != SCI_ERR_OK) {
-#ifdef DEBUG_TRANSPORTER
- ndbout_c("\nLocal Segment is not available for remote connections.");
- ndbout_c("Error code 0x%x\n", err);
-#endif
- return err;
+ DBUG_PRINT("error", ("Local Segment is not available for remote connections. Error code 0x%x\n",
+ err));
+ DBUG_RETURN(err);
}
}
setupLocalSegment();
- return err;
+ DBUG_RETURN(err);
} // initLocalSegment()
@@ -345,7 +324,7 @@ bool SCI_Transporter::doSend() {
Uint32 retry=0;
const char * const sendPtr = (char*)m_sendBuffer.m_buffer;
- const Uint32 sizeToSend = m_sendBuffer.m_dataSize;
+ const Uint32 sizeToSend = 4 * m_sendBuffer.m_dataSize; //Convert to number of bytes
if (sizeToSend > 0){
#ifdef DEBUG_TRANSPORTER
@@ -363,15 +342,19 @@ bool SCI_Transporter::doSend() {
i4097++;
#endif
if(startSequence(m_ActiveAdapterId)!=SCI_ERR_OK) {
-#ifdef DEBUG_TRANSPORTER
- ndbout << "Start sequence failed" << endl;
-#endif
- reportError(callbackObj, remoteNodeId, TE_SCI_UNABLE_TO_START_SEQUENCE);
+ DBUG_PRINT("error", ("Start sequence failed"));
+ report_error(TE_SCI_UNABLE_TO_START_SEQUENCE);
return false;
}
- tryagain:
+ tryagain:
+ retry++;
+ if (retry > 3) {
+ DBUG_PRINT("error", ("SCI Transfer failed"));
+ report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
+ return false;
+ }
Uint32 * insertPtr = (Uint32 *)
(m_TargetSegm[m_ActiveAdapterId].writer)->getWritePtr(sizeToSend);
@@ -390,44 +373,37 @@ bool SCI_Transporter::doSend() {
&err);
+ if (err != SCI_ERR_OK) {
if(err == SCI_ERR_OUT_OF_RANGE) {
-#ifdef DEBUG_TRANSPORTER
- ndbout << "Data transfer : out of range error \n" << endl;
-#endif
+ DBUG_PRINT("error", ("Data transfer : out of range error"));
goto tryagain;
}
if(err == SCI_ERR_SIZE_ALIGNMENT) {
-#ifdef DEBUG_TRANSPORTER
- ndbout << "Data transfer : aligne\n" << endl;
-#endif
+ DBUG_PRINT("error", ("Data transfer : alignment error"));
+ DBUG_PRINT("info", ("sendPtr 0x%x, sizeToSend = %d", sendPtr, sizeToSend));
goto tryagain;
}
if(err == SCI_ERR_OFFSET_ALIGNMENT) {
-#ifdef DEBUG_TRANSPORTER
- ndbout << "Data transfer : offset alignment\n" << endl;
-#endif
+ DBUG_PRINT("error", ("Data transfer : offset alignment"));
goto tryagain;
- }
+ }
if(err == SCI_ERR_TRANSFER_FAILED) {
//(m_TargetSegm[m_StandbyAdapterId].writer)->heavyLock();
if(getLinkStatus(m_ActiveAdapterId)) {
- retry++;
- if(retry>3) {
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
- return false;
- }
goto tryagain;
}
+ if (m_adapters == 1) {
+ DBUG_PRINT("error", ("SCI Transfer failed"));
+ report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
+ return false;
+ }
m_failCounter++;
Uint32 temp=m_ActiveAdapterId;
switch(m_swapCounter) {
case 0:
/**swap from active (0) to standby (1)*/
if(getLinkStatus(m_StandbyAdapterId)) {
-#ifdef DEBUG_TRANSPORTER
- ndbout << "Swapping from 0 to 1 " << endl;
-#endif
+ DBUG_PRINT("error", ("Swapping from adapter 0 to 1"));
failoverShmWriter();
SCIStoreBarrier(m_TargetSegm[m_StandbyAdapterId].sequence,0);
m_ActiveAdapterId=m_StandbyAdapterId;
@@ -436,26 +412,21 @@ bool SCI_Transporter::doSend() {
FLAGS,
&err);
if(err!=SCI_ERR_OK) {
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNABLE_TO_REMOVE_SEQUENCE);
+ report_error(TE_SCI_UNABLE_TO_REMOVE_SEQUENCE);
+ DBUG_PRINT("error", ("Unable to remove sequence"));
return false;
}
if(startSequence(m_ActiveAdapterId)!=SCI_ERR_OK) {
-#ifdef DEBUG_TRANSPORTER
- ndbout << "Start sequence failed" << endl;
-#endif
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNABLE_TO_START_SEQUENCE);
+ DBUG_PRINT("error", ("Start sequence failed"));
+ report_error(TE_SCI_UNABLE_TO_START_SEQUENCE);
return false;
}
m_swapCounter++;
-#ifdef DEBUG_TRANSPORTER
- ndbout << "failover complete.." << endl;
-#endif
+ DBUG_PRINT("info", ("failover complete"));
goto tryagain;
} else {
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
+ report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
+ DBUG_PRINT("error", ("SCI Transfer failed"));
return false;
}
return false;
@@ -468,20 +439,15 @@ bool SCI_Transporter::doSend() {
failoverShmWriter();
m_ActiveAdapterId=m_StandbyAdapterId;
m_StandbyAdapterId=temp;
-#ifdef DEBUG_TRANSPORTER
- ndbout << "Swapping from 1 to 0 " << endl;
-#endif
+ DBUG_PRINT("info", ("Swapping from 1 to 0"));
if(createSequence(m_ActiveAdapterId)!=SCI_ERR_OK) {
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNABLE_TO_CREATE_SEQUENCE);
+ DBUG_PRINT("error", ("Unable to create sequence"));
+ report_error(TE_SCI_UNABLE_TO_CREATE_SEQUENCE);
return false;
}
if(startSequence(m_ActiveAdapterId)!=SCI_ERR_OK) {
-#ifdef DEBUG_TRANSPORTER
- ndbout << "startSequence failed... disconnecting" << endl;
-#endif
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNABLE_TO_START_SEQUENCE);
+ DBUG_PRINT("error", ("startSequence failed... disconnecting"));
+ report_error(TE_SCI_UNABLE_TO_START_SEQUENCE);
return false;
}
@@ -489,37 +455,36 @@ bool SCI_Transporter::doSend() {
, FLAGS,
&err);
if(err!=SCI_ERR_OK) {
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNABLE_TO_REMOVE_SEQUENCE);
+ DBUG_PRINT("error", ("Unable to remove sequence"));
+ report_error(TE_SCI_UNABLE_TO_REMOVE_SEQUENCE);
return false;
}
if(createSequence(m_StandbyAdapterId)!=SCI_ERR_OK) {
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNABLE_TO_CREATE_SEQUENCE);
+ DBUG_PRINT("error", ("Unable to create sequence on standby"));
+ report_error(TE_SCI_UNABLE_TO_CREATE_SEQUENCE);
return false;
}
m_swapCounter=0;
-#ifdef DEBUG_TRANSPORTER
- ndbout << "failover complete.." << endl;
-#endif
+ DBUG_PRINT("info", ("failover complete.."));
goto tryagain;
} else {
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
+ DBUG_PRINT("error", ("Unrecoverable data transfer error"));
+ report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
return false;
}
break;
default:
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
+ DBUG_PRINT("error", ("Unrecoverable data transfer error"));
+ report_error(TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR);
return false;
break;
}
+ }
} else {
SHM_Writer * writer = (m_TargetSegm[m_ActiveAdapterId].writer);
writer->updateWritePtr(sizeToSend);
@@ -535,13 +500,10 @@ bool SCI_Transporter::doSend() {
/**
* If we end up here, the SCI segment is full.
*/
-#ifdef DEBUG_TRANSPORTER
- ndbout << "the segment is full for some reason" << endl;
-#endif
+ DBUG_PRINT("error", ("the segment is full for some reason"));
return false;
} //if
}
-
return true;
} // doSend()
@@ -557,11 +519,8 @@ void SCI_Transporter::failoverShmWriter() {
void SCI_Transporter::setupLocalSegment()
{
-
+ DBUG_ENTER("SCI_Transporter::setupLocalSegment");
Uint32 sharedSize = 0;
- sharedSize += 16; //SHM_Reader::getSharedSize();
- sharedSize += 16; //SHM_Writer::getSharedSize();
- sharedSize += 32; //SHM_Writer::getSharedSize();
sharedSize =4096; //start of the buffer is page aligend
Uint32 sizeOfBuffer = m_BufferSize;
@@ -570,27 +529,15 @@ void SCI_Transporter::setupLocalSegment()
Uint32 * localReadIndex =
(Uint32*)m_SourceSegm[m_ActiveAdapterId].mappedMemory;
- Uint32 * localWriteIndex =
- (Uint32*)(localReadIndex+ 1);
-
- Uint32 * localEndOfDataIndex = (Uint32*)
- (localReadIndex + 2);
-
+ Uint32 * localWriteIndex = (Uint32*)(localReadIndex+ 1);
m_localStatusFlag = (Uint32*)(localReadIndex + 3);
- Uint32 * sharedLockIndex = (Uint32*)
- (localReadIndex + 4);
-
- Uint32 * sharedHeavyLock = (Uint32*)
- (localReadIndex + 5);
-
char * localStartOfBuf = (char*)
((char*)m_SourceSegm[m_ActiveAdapterId].mappedMemory+sharedSize);
-
- * localReadIndex = * localWriteIndex = 0;
- * localEndOfDataIndex = sizeOfBuffer -1;
-
+ * localReadIndex = 0;
+ * localWriteIndex = 0;
+
const Uint32 slack = MAX_MESSAGE_SIZE;
reader = new SHM_Reader(localStartOfBuf,
@@ -599,178 +546,240 @@ void SCI_Transporter::setupLocalSegment()
localReadIndex,
localWriteIndex);
- * localReadIndex = 0;
- * localWriteIndex = 0;
-
reader->clear();
+ DBUG_VOID_RETURN;
} //setupLocalSegment
void SCI_Transporter::setupRemoteSegment()
{
+ DBUG_ENTER("SCI_Transporter::setupRemoteSegment");
Uint32 sharedSize = 0;
- sharedSize += 16; //SHM_Reader::getSharedSize();
- sharedSize += 16; //SHM_Writer::getSharedSize();
- sharedSize += 32;
- sharedSize =4096; //start of the buffer is page aligend
+ sharedSize =4096; //start of the buffer is page aligned
Uint32 sizeOfBuffer = m_BufferSize;
+ const Uint32 slack = MAX_MESSAGE_SIZE;
sizeOfBuffer -= sharedSize;
- Uint32 * segPtr = (Uint32*) m_TargetSegm[m_StandbyAdapterId].mappedMemory ;
-
- Uint32 * remoteReadIndex2 = (Uint32*)segPtr;
- Uint32 * remoteWriteIndex2 = (Uint32*) (segPtr + 1);
- Uint32 * remoteEndOfDataIndex2 = (Uint32*) (segPtr + 2);
- Uint32 * sharedLockIndex2 = (Uint32*) (segPtr + 3);
- m_remoteStatusFlag2 = (Uint32*)(segPtr + 4);
- Uint32 * sharedHeavyLock2 = (Uint32*) (segPtr + 5);
-
-
- char * remoteStartOfBuf2 = ( char*)((char *)segPtr+sharedSize);
-
- segPtr = (Uint32*) m_TargetSegm[m_ActiveAdapterId].mappedMemory ;
+
+ Uint32 *segPtr = (Uint32*) m_TargetSegm[m_ActiveAdapterId].mappedMemory ;
Uint32 * remoteReadIndex = (Uint32*)segPtr;
- Uint32 * remoteWriteIndex = (Uint32*) (segPtr + 1);
- Uint32 * remoteEndOfDataIndex = (Uint32*) (segPtr + 2);
- Uint32 * sharedLockIndex = (Uint32*) (segPtr + 3);
- m_remoteStatusFlag = (Uint32*)(segPtr + 4);
- Uint32 * sharedHeavyLock = (Uint32*) (segPtr + 5);
+ Uint32 * remoteWriteIndex = (Uint32*)(segPtr + 1);
+ m_remoteStatusFlag = (Uint32*)(segPtr + 3);
char * remoteStartOfBuf = ( char*)((char*)segPtr+(sharedSize));
- * remoteReadIndex = * remoteWriteIndex = 0;
- * remoteReadIndex2 = * remoteWriteIndex2 = 0;
- * remoteEndOfDataIndex = sizeOfBuffer - 1;
- * remoteEndOfDataIndex2 = sizeOfBuffer - 1;
-
- /**
- * setup two writers. writer2 is used to mirror the changes of
- * writer on the standby
- * segment, so that in the case of a failover, we can switch
- * to the stdby seg. quickly.*
- */
- const Uint32 slack = MAX_MESSAGE_SIZE;
-
writer = new SHM_Writer(remoteStartOfBuf,
sizeOfBuffer,
slack,
remoteReadIndex,
remoteWriteIndex);
- writer2 = new SHM_Writer(remoteStartOfBuf2,
- sizeOfBuffer,
- slack,
- remoteReadIndex2,
- remoteWriteIndex2);
-
- * remoteReadIndex = 0;
- * remoteWriteIndex = 0;
-
writer->clear();
- writer2->clear();
m_TargetSegm[0].writer=writer;
- m_TargetSegm[1].writer=writer2;
m_sendBuffer.m_forceSendLimit = writer->getBufferSize();
if(createSequence(m_ActiveAdapterId)!=SCI_ERR_OK) {
- reportThreadError(remoteNodeId, TE_SCI_UNABLE_TO_CREATE_SEQUENCE);
+ report_error(TE_SCI_UNABLE_TO_CREATE_SEQUENCE);
+ DBUG_PRINT("error", ("Unable to create sequence on active"));
doDisconnect();
}
- if(createSequence(m_StandbyAdapterId)!=SCI_ERR_OK) {
- reportThreadError(remoteNodeId, TE_SCI_UNABLE_TO_CREATE_SEQUENCE);
- doDisconnect();
- }
-
-
+ if (m_adapters > 1) {
+ segPtr = (Uint32*) m_TargetSegm[m_StandbyAdapterId].mappedMemory ;
+
+ Uint32 * remoteReadIndex2 = (Uint32*)segPtr;
+ Uint32 * remoteWriteIndex2 = (Uint32*) (segPtr + 1);
+ m_remoteStatusFlag2 = (Uint32*)(segPtr + 3);
+
+ char * remoteStartOfBuf2 = ( char*)((char *)segPtr+sharedSize);
+
+ /**
+ * setup a writer. writer2 is used to mirror the changes of
+ * writer on the standby
+ * segment, so that in the case of a failover, we can switch
+ * to the stdby seg. quickly.*
+ */
+ writer2 = new SHM_Writer(remoteStartOfBuf2,
+ sizeOfBuffer,
+ slack,
+ remoteReadIndex2,
+ remoteWriteIndex2);
+
+ * remoteReadIndex = 0;
+ * remoteWriteIndex = 0;
+ writer2->clear();
+ m_TargetSegm[1].writer=writer2;
+ if(createSequence(m_StandbyAdapterId)!=SCI_ERR_OK) {
+ report_error(TE_SCI_UNABLE_TO_CREATE_SEQUENCE);
+ DBUG_PRINT("error", ("Unable to create sequence on standby"));
+ doDisconnect();
+ }
+ }
+ DBUG_VOID_RETURN;
} //setupRemoteSegment
-
-
-bool SCI_Transporter::connectImpl(Uint32 timeout) {
-
- sci_error_t err;
- Uint32 offset = 0;
-
+
+bool
+SCI_Transporter::init_local()
+{
+ DBUG_ENTER("SCI_Transporter::init_local");
if(!m_initLocal) {
if(initLocalSegment()!=SCI_ERR_OK){
- NdbSleep_MilliSleep(timeout);
+ NdbSleep_MilliSleep(10);
//NDB SHOULD TERMINATE AND COMPUTER REBOOTED!
- reportThreadError(localNodeId, TE_SCI_CANNOT_INIT_LOCALSEGMENT);
- return false;
+ report_error(TE_SCI_CANNOT_INIT_LOCALSEGMENT);
+ DBUG_RETURN(false);
}
- m_initLocal=true;
+ m_initLocal=true;
}
-
- if(!m_mapped ) {
-
- for(Uint32 i=0; i < m_adapters ; i++) {
- m_TargetSegm[i].rhm[i].remoteHandle=0;
- SCIConnectSegment(sciAdapters[i].scidesc,
- &(m_TargetSegm[i].rhm[i].remoteHandle),
- m_remoteNodes[i],
- remoteSegmentId(localNodeId, remoteNodeId),
- i,
- 0,
- 0,
- 0,
- 0,
- &err);
-
- if(err != SCI_ERR_OK) {
- NdbSleep_MilliSleep(timeout);
- return false;
- }
-
- }
-
-
+ DBUG_RETURN(true);
+}
+
+bool
+SCI_Transporter::init_remote()
+{
+ DBUG_ENTER("SCI_Transporter::init_remote");
+ sci_error_t err;
+ Uint32 offset = 0;
+ if(!m_mapped ) {
+ DBUG_PRINT("info", ("Map remote segments"));
+ for(Uint32 i=0; i < m_adapters ; i++) {
+ m_TargetSegm[i].rhm[i].remoteHandle=0;
+ SCIConnectSegment(sciAdapters[i].scidesc,
+ &(m_TargetSegm[i].rhm[i].remoteHandle),
+ m_remoteNodes[i],
+ remoteSegmentId(localNodeId, remoteNodeId),
+ i,
+ 0,
+ 0,
+ 0,
+ 0,
+ &err);
+
+ if(err != SCI_ERR_OK) {
+ NdbSleep_MilliSleep(10);
+ DBUG_PRINT("error", ("Error connecting segment, err 0x%x", err));
+ DBUG_RETURN(false);
+ }
+
+ }
// Map the remote memory segment into program space
- for(Uint32 i=0; i < m_adapters ; i++) {
- m_TargetSegm[i].mappedMemory =
- SCIMapRemoteSegment((m_TargetSegm[i].rhm[i].remoteHandle),
- &(m_TargetSegm[i].rhm[i].map),
- offset,
- m_BufferSize,
- NULL,
- FLAGS,
- &err);
-
-
- if(err!= SCI_ERR_OK) {
-#ifdef DEBUG_TRANSPORTER
- ndbout_c("\nCannot map a segment to the remote node %d.");
- ndbout_c("Error code 0x%x",m_RemoteSciNodeId, err);
-#endif
- //NDB SHOULD TERMINATE AND COMPUTER REBOOTED!
- reportThreadError(remoteNodeId, TE_SCI_CANNOT_MAP_REMOTESEGMENT);
- return false;
- }
-
-
- }
- m_mapped=true;
- setupRemoteSegment();
- setConnected();
-#ifdef DEBUG_TRANSPORTER
- ndbout << "connected and mapped to segment : " << endl;
- ndbout << "remoteNode: " << m_remoteNodes[0] << endl;
- ndbout << "remoteNode: " << m_remotenodes[1] << endl;
- ndbout << "remoteSegId: "
- << remoteSegmentId(localNodeId, remoteNodeId)
- << endl;
-#endif
- return true;
- }
- else {
- return getConnectionStatus();
- }
-} // connectImpl()
-
+ for(Uint32 i=0; i < m_adapters ; i++) {
+ m_TargetSegm[i].mappedMemory =
+ SCIMapRemoteSegment((m_TargetSegm[i].rhm[i].remoteHandle),
+ &(m_TargetSegm[i].rhm[i].map),
+ offset,
+ m_BufferSize,
+ NULL,
+ FLAGS,
+ &err);
+
+ if(err!= SCI_ERR_OK) {
+ DBUG_PRINT("error", ("Cannot map a segment to the remote node %d. Error code 0x%x",m_RemoteSciNodeId, err));
+ //NDB SHOULD TERMINATE AND COMPUTER REBOOTED!
+ report_error(TE_SCI_CANNOT_MAP_REMOTESEGMENT);
+ DBUG_RETURN(false);
+ }
+ }
+ m_mapped=true;
+ setupRemoteSegment();
+ setConnected();
+ DBUG_PRINT("info", ("connected and mapped to segment, remoteNode: %d",
+ remoteNodeId));
+ DBUG_PRINT("info", ("remoteSegId: %d",
+ remoteSegmentId(localNodeId, remoteNodeId)));
+ DBUG_RETURN(true);
+ } else {
+ DBUG_RETURN(getConnectionStatus());
+ }
+}
+
+bool
+SCI_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd)
+{
+ SocketInputStream s_input(sockfd);
+ SocketOutputStream s_output(sockfd);
+ char buf[256];
+ DBUG_ENTER("SCI_Transporter::connect_client_impl");
+ // Wait for server to create and attach
+ if (s_input.gets(buf, 256) == 0) {
+ DBUG_PRINT("error", ("No initial response from server in SCI"));
+ NDB_CLOSE_SOCKET(sockfd);
+ DBUG_RETURN(false);
+ }
+
+ if (!init_local()) {
+ NDB_CLOSE_SOCKET(sockfd);
+ DBUG_RETURN(false);
+ }
+
+ // Send ok to server
+ s_output.println("sci client 1 ok");
+
+ if (!init_remote()) {
+ NDB_CLOSE_SOCKET(sockfd);
+ DBUG_RETURN(false);
+ }
+ // Wait for ok from server
+ if (s_input.gets(buf, 256) == 0) {
+ DBUG_PRINT("error", ("No second response from server in SCI"));
+ NDB_CLOSE_SOCKET(sockfd);
+ DBUG_RETURN(false);
+ }
+ // Send ok to server
+ s_output.println("sci client 2 ok");
+
+ NDB_CLOSE_SOCKET(sockfd);
+ DBUG_PRINT("info", ("Successfully connected client to node %d",
+ remoteNodeId));
+ DBUG_RETURN(true);
+}
+
+bool
+SCI_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd)
+{
+ SocketOutputStream s_output(sockfd);
+ SocketInputStream s_input(sockfd);
+ char buf[256];
+ DBUG_ENTER("SCI_Transporter::connect_server_impl");
+
+ if (!init_local()) {
+ NDB_CLOSE_SOCKET(sockfd);
+ DBUG_RETURN(false);
+ }
+ // Send ok to client
+ s_output.println("sci server 1 ok");
+
+ // Wait for ok from client
+ if (s_input.gets(buf, 256) == 0) {
+ DBUG_PRINT("error", ("No response from client in SCI"));
+ NDB_CLOSE_SOCKET(sockfd);
+ DBUG_RETURN(false);
+ }
+
+ if (!init_remote()) {
+ NDB_CLOSE_SOCKET(sockfd);
+ DBUG_RETURN(false);
+ }
+ // Send ok to client
+ s_output.println("sci server 2 ok");
+ // Wait for ok from client
+ if (s_input.gets(buf, 256) == 0) {
+ DBUG_PRINT("error", ("No second response from client in SCI"));
+ NDB_CLOSE_SOCKET(sockfd);
+ DBUG_RETURN(false);
+ }
+
+ NDB_CLOSE_SOCKET(sockfd);
+ DBUG_PRINT("info", ("Successfully connected server to node %d",
+ remoteNodeId));
+ DBUG_RETURN(true);
+}
+
sci_error_t SCI_Transporter::createSequence(Uint32 adapterid) {
sci_error_t err;
SCICreateMapSequence((m_TargetSegm[adapterid].rhm[adapterid].map),
@@ -795,13 +804,14 @@ sci_error_t SCI_Transporter::startSequence(Uint32 adapterid) {
// If there still is an error then data cannot be safely send
- return err;
+ return err;
} // startSequence()
bool SCI_Transporter::disconnectLocal()
-{
+{
+ DBUG_ENTER("SCI_Transporter::disconnectLocal");
sci_error_t err;
m_ActiveAdapterId=0;
@@ -809,31 +819,28 @@ bool SCI_Transporter::disconnectLocal()
*/
SCIUnmapSegment(m_SourceSegm[0].lhm[0].map,0,&err);
- if(err!=SCI_ERR_OK) {
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNABLE_TO_UNMAP_SEGMENT);
- return false;
- }
+ if(err!=SCI_ERR_OK) {
+ report_error(TE_SCI_UNABLE_TO_UNMAP_SEGMENT);
+ DBUG_PRINT("error", ("Unable to unmap segment"));
+ DBUG_RETURN(false);
+ }
SCIRemoveSegment((m_SourceSegm[m_ActiveAdapterId].localHandle),
FLAGS,
&err);
if(err!=SCI_ERR_OK) {
- reportError(callbackObj, remoteNodeId, TE_SCI_UNABLE_TO_REMOVE_SEGMENT);
- return false;
+ report_error(TE_SCI_UNABLE_TO_REMOVE_SEGMENT);
+ DBUG_PRINT("error", ("Unable to remove segment"));
+ DBUG_RETURN(false);
}
-
- if(err == SCI_ERR_OK) {
-#ifdef DEBUG_TRANSPORTER
- printf("Local memory segment is unmapped and removed\n" );
-#endif
- }
- return true;
+ DBUG_PRINT("info", ("Local memory segment is unmapped and removed"));
+ DBUG_RETURN(true);
} // disconnectLocal()
bool SCI_Transporter::disconnectRemote() {
+ DBUG_ENTER("SCI_Transporter::disconnectRemote");
sci_error_t err;
for(Uint32 i=0; i<m_adapters; i++) {
/**
@@ -841,35 +848,32 @@ bool SCI_Transporter::disconnectRemote() {
*/
SCIUnmapSegment(m_TargetSegm[i].rhm[i].map,0,&err);
if(err!=SCI_ERR_OK) {
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNABLE_TO_DISCONNECT_SEGMENT);
- return false;
- }
+ report_error(TE_SCI_UNABLE_TO_UNMAP_SEGMENT);
+ DBUG_PRINT("error", ("Unable to unmap segment"));
+ DBUG_RETURN(false);
+ }
SCIDisconnectSegment(m_TargetSegm[i].rhm[i].remoteHandle,
FLAGS,
&err);
if(err!=SCI_ERR_OK) {
- reportError(callbackObj,
- remoteNodeId, TE_SCI_UNABLE_TO_DISCONNECT_SEGMENT);
- return false;
+ report_error(TE_SCI_UNABLE_TO_DISCONNECT_SEGMENT);
+ DBUG_PRINT("error", ("Unable to disconnect segment"));
+ DBUG_RETURN(false);
}
-#ifdef DEBUG_TRANSPORTER
- ndbout_c("Remote memory segment is unmapped and disconnected\n" );
-#endif
+ DBUG_PRINT("info", ("Remote memory segment is unmapped and disconnected"));
}
- return true;
+ DBUG_RETURN(true);
} // disconnectRemote()
SCI_Transporter::~SCI_Transporter() {
+ DBUG_ENTER("SCI_Transporter::~SCI_Transporter");
// Close channel to the driver
-#ifdef DEBUG_TRANSPORTER
- ndbout << "~SCITransporter does a disConnect" << endl;
-#endif
doDisconnect();
if(m_sendBuffer.m_buffer != NULL)
delete[] m_sendBuffer.m_buffer;
+ DBUG_VOID_RETURN;
} // ~SCI_Transporter()
@@ -878,7 +882,7 @@ SCI_Transporter::~SCI_Transporter() {
void SCI_Transporter::closeSCI() {
// Termination of SCI
sci_error_t err;
- printf("\nClosing SCI Transporter...\n");
+ DBUG_ENTER("SCI_Transporter::closeSCI");
// Disconnect and remove remote segment
disconnectRemote();
@@ -890,26 +894,41 @@ void SCI_Transporter::closeSCI() {
// Closes an SCI virtual device
SCIClose(activeSCIDescriptor, FLAGS, &err);
- if(err != SCI_ERR_OK)
- fprintf(stderr,
- "\nCannot close SCI channel to the driver. Error code 0x%x",
- err);
+ if(err != SCI_ERR_OK) {
+ DBUG_PRINT("error", ("Cannot close SCI channel to the driver. Error code 0x%x",
+ err));
+ }
SCITerminate();
+ DBUG_VOID_RETURN;
} // closeSCI()
Uint32 *
-SCI_Transporter::getWritePtr(Uint32 lenBytes, Uint32 prio){
+SCI_Transporter::getWritePtr(Uint32 lenBytes, Uint32 prio)
+{
- if(m_sendBuffer.full()){
- /**-------------------------------------------------
- * Buffer was completely full. We have severe problems.
- * -------------------------------------------------
- */
- if(!doSend()){
+ Uint32 sci_buffer_remaining = m_sendBuffer.m_forceSendLimit;
+ Uint32 send_buf_size = m_sendBuffer.m_sendBufferSize;
+ Uint32 curr_data_size = m_sendBuffer.m_dataSize << 2;
+ Uint32 new_curr_data_size = curr_data_size + lenBytes;
+ if ((curr_data_size >= send_buf_size) ||
+ (curr_data_size >= sci_buffer_remaining)) {
+ /**
+ * The new message will not fit in the send buffer. We need to
+ * send the send buffer before filling it up with the new
+ * signal data. If current data size will spill over buffer edge
+ * we will also send to ensure correct operation.
+ */
+ if (!doSend()) {
+ /**
+ * We were not successfull sending, report 0 as meaning buffer full and
+ * upper levels handle retries and other recovery matters.
+ */
return 0;
}
}
-
+ /**
+ * New signal fits, simply fill it up with more data.
+ */
Uint32 sz = m_sendBuffer.m_dataSize;
return &m_sendBuffer.m_buffer[sz];
}
@@ -918,10 +937,11 @@ void
SCI_Transporter::updateWritePtr(Uint32 lenBytes, Uint32 prio){
Uint32 sz = m_sendBuffer.m_dataSize;
- sz += (lenBytes / 4);
+ Uint32 packet_size = m_PacketSize;
+ sz += ((lenBytes + 3) >> 2);
m_sendBuffer.m_dataSize = sz;
- if(sz > m_PacketSize) {
+ if(sz > packet_size) {
/**-------------------------------------------------
* Buffer is full and we are ready to send. We will
* not wait since the signal is already in the buffer.
@@ -944,7 +964,8 @@ bool
SCI_Transporter::getConnectionStatus() {
if(*m_localStatusFlag == SCICONNECTED &&
(*m_remoteStatusFlag == SCICONNECTED ||
- *m_remoteStatusFlag2 == SCICONNECTED))
+ ((m_adapters > 1) &&
+ *m_remoteStatusFlag2 == SCICONNECTED)))
return true;
else
return false;
@@ -954,7 +975,9 @@ SCI_Transporter::getConnectionStatus() {
void
SCI_Transporter::setConnected() {
*m_remoteStatusFlag = SCICONNECTED;
- *m_remoteStatusFlag2 = SCICONNECTED;
+ if (m_adapters > 1) {
+ *m_remoteStatusFlag2 = SCICONNECTED;
+ }
*m_localStatusFlag = SCICONNECTED;
}
@@ -963,8 +986,10 @@ void
SCI_Transporter::setDisconnect() {
if(getLinkStatus(m_ActiveAdapterId))
*m_remoteStatusFlag = SCIDISCONNECT;
- if(getLinkStatus(m_StandbyAdapterId))
- *m_remoteStatusFlag2 = SCIDISCONNECT;
+ if (m_adapters > 1) {
+ if(getLinkStatus(m_StandbyAdapterId))
+ *m_remoteStatusFlag2 = SCIDISCONNECT;
+ }
}
@@ -981,20 +1006,20 @@ static bool init = false;
bool
SCI_Transporter::initSCI() {
+ DBUG_ENTER("SCI_Transporter::initSCI");
if(!init){
sci_error_t error;
// Initialize SISCI library
SCIInitialize(0, &error);
if(error != SCI_ERR_OK) {
-#ifdef DEBUG_TRANSPORTER
- ndbout_c("\nCannot initialize SISCI library.");
- ndbout_c("\nInconsistency between SISCI library and SISCI driver.Error code 0x%x", error);
-#endif
- return false;
+ DBUG_PRINT("error", ("Cannot initialize SISCI library."));
+ DBUG_PRINT("error", ("Inconsistency between SISCI library and SISCI driver. Error code 0x%x",
+ error));
+ DBUG_RETURN(false);
}
init = true;
}
- return true;
+ DBUG_RETURN(true);
}
diff --git a/ndb/src/common/transporter/SCI_Transporter.hpp b/ndb/src/common/transporter/SCI_Transporter.hpp
index 03496c2ce21..e2f2dfcaf99 100644
--- a/ndb/src/common/transporter/SCI_Transporter.hpp
+++ b/ndb/src/common/transporter/SCI_Transporter.hpp
@@ -26,7 +26,7 @@
#include <ndb_types.h>
- /**
+/**
* The SCI Transporter
*
* The design goal of the SCI transporter is to deliver high performance
@@ -135,15 +135,17 @@ public:
bool getConnectionStatus();
private:
- SCI_Transporter(Uint32 packetSize,
+ SCI_Transporter(TransporterRegistry &t_reg,
+ const char *local_host,
+ const char *remote_host,
+ int port,
+ Uint32 packetSize,
Uint32 bufferSize,
Uint32 nAdapters,
Uint16 remoteSciNodeId0,
Uint16 remoteSciNodeId1,
NodeId localNodeID,
NodeId remoteNodeID,
- int byteorder,
- bool compression,
bool checksum,
bool signalId,
Uint32 reportFreq = 4096);
@@ -160,7 +162,8 @@ private:
/**
* For statistics on transfered packets
*/
-#ifdef DEBUG_TRANSPORTER
+//#ifdef DEBUG_TRANSPORTER
+#if 1
Uint32 i1024;
Uint32 i2048;
Uint32 i2049;
@@ -177,10 +180,8 @@ private:
struct {
Uint32 * m_buffer; // The buffer
Uint32 m_dataSize; // No of words in buffer
- Uint32 m_bufferSize; // Buffer size
+ Uint32 m_sendBufferSize; // Buffer size
Uint32 m_forceSendLimit; // Send when buffer is this full
-
- bool full() const { return (m_dataSize * 4) > m_forceSendLimit ;}
} m_sendBuffer;
SHM_Reader * reader;
@@ -196,7 +197,7 @@ private:
Uint32 m_adapters;
Uint32 m_numberOfRemoteNodes;
- Uint16* m_remoteNodes;
+ Uint16 m_remoteNodes[2];
typedef struct SciAdapter {
sci_desc_t scidesc;
@@ -296,12 +297,11 @@ private:
*/
bool sendIsPossible(struct timeval * timeout);
-
void getReceivePtr(Uint32 ** ptr, Uint32 ** eod){
reader->getReadPtr(* ptr, * eod);
}
- void updateReceivePtr(Uint32 * ptr){
+ void updateReceivePtr(Uint32 *ptr){
reader->updateReadPtr(ptr);
}
@@ -341,7 +341,9 @@ private:
*/
void failoverShmWriter();
-
+ bool init_local();
+ bool init_remote();
+
protected:
/** Perform a connection between segment
@@ -350,7 +352,8 @@ protected:
* retrying.
* @return Returns true on success, otherwize falser
*/
- bool connectImpl(Uint32 timeOutMillis);
+ bool connect_server_impl(NDB_SOCKET_TYPE sockfd);
+ bool connect_client_impl(NDB_SOCKET_TYPE sockfd);
/**
* We will disconnect if:
diff --git a/ndb/src/common/transporter/SHM_Buffer.hpp b/ndb/src/common/transporter/SHM_Buffer.hpp
index 32e59dd57a2..f49b4fe73cb 100644
--- a/ndb/src/common/transporter/SHM_Buffer.hpp
+++ b/ndb/src/common/transporter/SHM_Buffer.hpp
@@ -52,7 +52,7 @@ public:
}
void clear() {
- m_readIndex = * m_sharedReadIndex;
+ m_readIndex = 0;
}
/**
@@ -71,7 +71,7 @@ public:
/**
* Update read ptr
*/
- inline void updateReadPtr(Uint32 * readPtr);
+ inline void updateReadPtr(Uint32 *ptr);
private:
char * const m_startOfBuffer;
@@ -98,8 +98,8 @@ SHM_Reader::empty() const{
*/
inline
void
-SHM_Reader::getReadPtr(Uint32 * & ptr, Uint32 * & eod){
-
+SHM_Reader::getReadPtr(Uint32 * & ptr, Uint32 * & eod)
+{
Uint32 tReadIndex = m_readIndex;
Uint32 tWriteIndex = * m_sharedWriteIndex;
@@ -117,14 +117,14 @@ SHM_Reader::getReadPtr(Uint32 * & ptr, Uint32 * & eod){
*/
inline
void
-SHM_Reader::updateReadPtr(Uint32 * ptr){
-
- Uint32 tReadIndex = ((char *)ptr) - m_startOfBuffer;
+SHM_Reader::updateReadPtr(Uint32 *ptr)
+{
+ Uint32 tReadIndex = ((char*)ptr) - m_startOfBuffer;
assert(tReadIndex < m_totalBufferSize);
if(tReadIndex >= m_bufferSize){
- tReadIndex = 0; //-= m_bufferSize;
+ tReadIndex = 0;
}
m_readIndex = tReadIndex;
@@ -149,7 +149,7 @@ public:
}
void clear() {
- m_writeIndex = * m_sharedWriteIndex;
+ m_writeIndex = 0;
}
inline char * getWritePtr(Uint32 sz);
@@ -206,7 +206,7 @@ SHM_Writer::updateWritePtr(Uint32 sz){
assert(tWriteIndex < m_totalBufferSize);
if(tWriteIndex >= m_bufferSize){
- tWriteIndex = 0; //-= m_bufferSize;
+ tWriteIndex = 0;
}
m_writeIndex = tWriteIndex;
diff --git a/ndb/src/common/transporter/SHM_Transporter.cpp b/ndb/src/common/transporter/SHM_Transporter.cpp
index aa6b650afa8..ab161d8c18c 100644
--- a/ndb/src/common/transporter/SHM_Transporter.cpp
+++ b/ndb/src/common/transporter/SHM_Transporter.cpp
@@ -32,13 +32,12 @@ SHM_Transporter::SHM_Transporter(TransporterRegistry &t_reg,
int r_port,
NodeId lNodeId,
NodeId rNodeId,
- bool compression,
bool checksum,
bool signalId,
key_t _shmKey,
Uint32 _shmSize) :
Transporter(t_reg, lHostName, rHostName, r_port, lNodeId, rNodeId,
- 0, compression, checksum, signalId),
+ 0, false, checksum, signalId),
shmKey(_shmKey),
shmSize(_shmSize)
{
@@ -48,7 +47,7 @@ SHM_Transporter::SHM_Transporter(TransporterRegistry &t_reg,
shmBuf = 0;
reader = 0;
writer = 0;
-
+
setupBuffersDone=false;
#ifdef DEBUG_TRANSPORTER
printf("shm key (%d - %d) = %d\n", lNodeId, rNodeId, shmKey);
@@ -92,8 +91,6 @@ SHM_Transporter::setupBuffers(){
clientStatusFlag = base2 + 4;
char * startOfBuf2 = ((char *)base2)+sharedSize;
- * sharedReadIndex2 = * sharedWriteIndex2 = 0;
-
if(isServer){
* serverStatusFlag = 0;
reader = new SHM_Reader(startOfBuf1,
@@ -109,10 +106,10 @@ SHM_Transporter::setupBuffers(){
sharedWriteIndex2);
* sharedReadIndex1 = 0;
- * sharedWriteIndex2 = 0;
+ * sharedWriteIndex1 = 0;
* sharedReadIndex2 = 0;
- * sharedWriteIndex1 = 0;
+ * sharedWriteIndex2 = 0;
reader->clear();
writer->clear();
@@ -224,6 +221,7 @@ SHM_Transporter::prepareSend(const SignalHeader * const signalHeader,
bool
SHM_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd)
{
+ DBUG_ENTER("SHM_Transporter::connect_server_impl");
SocketOutputStream s_output(sockfd);
SocketInputStream s_input(sockfd);
char buf[256];
@@ -233,7 +231,7 @@ SHM_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd)
if (!ndb_shm_create()) {
report_error(TE_SHM_UNABLE_TO_CREATE_SEGMENT);
NDB_CLOSE_SOCKET(sockfd);
- return false;
+ DBUG_RETURN(false);
}
_shmSegCreated = true;
}
@@ -243,7 +241,7 @@ SHM_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd)
if (!ndb_shm_attach()) {
report_error(TE_SHM_UNABLE_TO_ATTACH_SEGMENT);
NDB_CLOSE_SOCKET(sockfd);
- return false;
+ DBUG_RETURN(false);
}
_attached = true;
}
@@ -254,7 +252,7 @@ SHM_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd)
// Wait for ok from client
if (s_input.gets(buf, 256) == 0) {
NDB_CLOSE_SOCKET(sockfd);
- return false;
+ DBUG_RETURN(false);
}
int r= connect_common(sockfd);
@@ -265,17 +263,20 @@ SHM_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd)
// Wait for ok from client
if (s_input.gets(buf, 256) == 0) {
NDB_CLOSE_SOCKET(sockfd);
- return false;
+ DBUG_RETURN(false);
}
+ DBUG_PRINT("info", ("Successfully connected server to node %d",
+ remoteNodeId));
}
NDB_CLOSE_SOCKET(sockfd);
- return r;
+ DBUG_RETURN(r);
}
bool
SHM_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd)
{
+ DBUG_ENTER("SHM_Transporter::connect_client_impl");
SocketInputStream s_input(sockfd);
SocketOutputStream s_output(sockfd);
char buf[256];
@@ -283,14 +284,18 @@ SHM_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd)
// Wait for server to create and attach
if (s_input.gets(buf, 256) == 0) {
NDB_CLOSE_SOCKET(sockfd);
- return false;
+ DBUG_PRINT("error", ("Server id %d did not attach",
+ remoteNodeId));
+ DBUG_RETURN(false);
}
// Create
if(!_shmSegCreated){
if (!ndb_shm_get()) {
NDB_CLOSE_SOCKET(sockfd);
- return false;
+ DBUG_PRINT("error", ("Failed create of shm seg to node %d",
+ remoteNodeId));
+ DBUG_RETURN(false);
}
_shmSegCreated = true;
}
@@ -300,7 +305,9 @@ SHM_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd)
if (!ndb_shm_attach()) {
report_error(TE_SHM_UNABLE_TO_ATTACH_SEGMENT);
NDB_CLOSE_SOCKET(sockfd);
- return false;
+ DBUG_PRINT("error", ("Failed attach of shm seg to node %d",
+ remoteNodeId));
+ DBUG_RETURN(false);
}
_attached = true;
}
@@ -314,21 +321,28 @@ SHM_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd)
// Wait for ok from server
if (s_input.gets(buf, 256) == 0) {
NDB_CLOSE_SOCKET(sockfd);
- return false;
+ DBUG_PRINT("error", ("No ok from server node %d",
+ remoteNodeId));
+ DBUG_RETURN(false);
}
// Send ok to server
s_output.println("shm client 2 ok");
+ DBUG_PRINT("info", ("Successfully connected client to node %d",
+ remoteNodeId));
}
NDB_CLOSE_SOCKET(sockfd);
- return r;
+ DBUG_RETURN(r);
}
bool
SHM_Transporter::connect_common(NDB_SOCKET_TYPE sockfd)
{
- if (!checkConnected())
+ if (!checkConnected()) {
+ DBUG_PRINT("error", ("Already connected to node %d",
+ remoteNodeId));
return false;
+ }
if(!setupBuffersDone) {
setupBuffers();
@@ -341,5 +355,7 @@ SHM_Transporter::connect_common(NDB_SOCKET_TYPE sockfd)
return true;
}
+ DBUG_PRINT("error", ("Failed to set up buffers to node %d",
+ remoteNodeId));
return false;
}
diff --git a/ndb/src/common/transporter/SHM_Transporter.hpp b/ndb/src/common/transporter/SHM_Transporter.hpp
index be54d0daa2a..27692209ffe 100644
--- a/ndb/src/common/transporter/SHM_Transporter.hpp
+++ b/ndb/src/common/transporter/SHM_Transporter.hpp
@@ -38,7 +38,6 @@ public:
int r_port,
NodeId lNodeId,
NodeId rNodeId,
- bool compression,
bool checksum,
bool signalId,
key_t shmKey,
@@ -127,6 +126,7 @@ protected:
private:
bool _shmSegCreated;
bool _attached;
+ bool m_connected;
key_t shmKey;
volatile Uint32 * serverStatusFlag;
diff --git a/ndb/src/common/transporter/TCP_Transporter.cpp b/ndb/src/common/transporter/TCP_Transporter.cpp
index 8833b51e236..7cfdc224b34 100644
--- a/ndb/src/common/transporter/TCP_Transporter.cpp
+++ b/ndb/src/common/transporter/TCP_Transporter.cpp
@@ -70,11 +70,10 @@ TCP_Transporter::TCP_Transporter(TransporterRegistry &t_reg,
int r_port,
NodeId lNodeId,
NodeId rNodeId,
- int byte_order,
- bool compr, bool chksm, bool signalId,
+ bool chksm, bool signalId,
Uint32 _reportFreq) :
Transporter(t_reg, lHostName, rHostName, r_port, lNodeId, rNodeId,
- byte_order, compr, chksm, signalId),
+ 0, false, chksm, signalId),
m_sendBuffer(sendBufSize)
{
maxReceiveSize = maxRecvSize;
@@ -106,12 +105,14 @@ TCP_Transporter::~TCP_Transporter() {
bool TCP_Transporter::connect_server_impl(NDB_SOCKET_TYPE sockfd)
{
- return connect_common(sockfd);
+ DBUG_ENTER("TCP_Transpporter::connect_server_impl");
+ DBUG_RETURN(connect_common(sockfd));
}
bool TCP_Transporter::connect_client_impl(NDB_SOCKET_TYPE sockfd)
{
- return connect_common(sockfd);
+ DBUG_ENTER("TCP_Transpporter::connect_client_impl");
+ DBUG_RETURN(connect_common(sockfd));
}
bool TCP_Transporter::connect_common(NDB_SOCKET_TYPE sockfd)
@@ -119,6 +120,8 @@ bool TCP_Transporter::connect_common(NDB_SOCKET_TYPE sockfd)
theSocket = sockfd;
setSocketOptions();
setSocketNonBlocking(theSocket);
+ DBUG_PRINT("info", ("Successfully set-up TCP transporter to node %d",
+ remoteNodeId));
return true;
}
@@ -359,50 +362,56 @@ TCP_Transporter::doReceive() {
// Select-function must return the socket for read
// before this method is called
// It reads the external TCP/IP interface once
-
- const int nBytesRead = recv(theSocket,
- receiveBuffer.insertPtr, maxReceiveSize, 0);
-
- if (nBytesRead > 0) {
- receiveBuffer.sizeOfData += nBytesRead;
- receiveBuffer.insertPtr += nBytesRead;
+ int size = receiveBuffer.sizeOfBuffer - receiveBuffer.sizeOfData;
+ if(size > 0){
+ const int nBytesRead = recv(theSocket,
+ receiveBuffer.insertPtr,
+ size < maxReceiveSize ? size : maxReceiveSize,
+ 0);
- if(receiveBuffer.sizeOfData > receiveBuffer.sizeOfBuffer){
+ if (nBytesRead > 0) {
+ receiveBuffer.sizeOfData += nBytesRead;
+ receiveBuffer.insertPtr += nBytesRead;
+
+ if(receiveBuffer.sizeOfData > receiveBuffer.sizeOfBuffer){
#ifdef DEBUG_TRANSPORTER
- ndbout_c("receiveBuffer.sizeOfData(%d) > receiveBuffer.sizeOfBuffer(%d)",
- receiveBuffer.sizeOfData, receiveBuffer.sizeOfBuffer);
- ndbout_c("nBytesRead = %d", nBytesRead);
+ ndbout_c("receiveBuffer.sizeOfData(%d) > receiveBuffer.sizeOfBuffer(%d)",
+ receiveBuffer.sizeOfData, receiveBuffer.sizeOfBuffer);
+ ndbout_c("nBytesRead = %d", nBytesRead);
#endif
- ndbout_c("receiveBuffer.sizeOfData(%d) > receiveBuffer.sizeOfBuffer(%d)",
- receiveBuffer.sizeOfData, receiveBuffer.sizeOfBuffer);
- report_error(TE_INVALID_MESSAGE_LENGTH);
- return 0;
- }
-
- receiveCount ++;
- receiveSize += nBytesRead;
-
- if(receiveCount == reportFreq){
- reportReceiveLen(get_callback_obj(), remoteNodeId, receiveCount, receiveSize);
- receiveCount = 0;
- receiveSize = 0;
+ ndbout_c("receiveBuffer.sizeOfData(%d) > receiveBuffer.sizeOfBuffer(%d)",
+ receiveBuffer.sizeOfData, receiveBuffer.sizeOfBuffer);
+ report_error(TE_INVALID_MESSAGE_LENGTH);
+ return 0;
+ }
+
+ receiveCount ++;
+ receiveSize += nBytesRead;
+
+ if(receiveCount == reportFreq){
+ reportReceiveLen(get_callback_obj(), remoteNodeId, receiveCount, receiveSize);
+ receiveCount = 0;
+ receiveSize = 0;
+ }
+ return nBytesRead;
+ } else {
+#if defined DEBUG_TRANSPORTER
+ ndbout_c("Receive Failure(disconnect==%d) to node = %d nBytesSent = %d "
+ "errno = %d strerror = %s",
+ DISCONNECT_ERRNO(InetErrno, nBytesRead),
+ remoteNodeId, nBytesRead, InetErrno,
+ (char*)ndbstrerror(InetErrno));
+#endif
+ if(DISCONNECT_ERRNO(InetErrno, nBytesRead)){
+ // The remote node has closed down
+ doDisconnect();
+ report_disconnect(InetErrno);
+ }
}
return nBytesRead;
} else {
-#if defined DEBUG_TRANSPORTER
- ndbout_c("Receive Failure(disconnect==%d) to node = %d nBytesSent = %d "
- "errno = %d strerror = %s",
- DISCONNECT_ERRNO(InetErrno, nBytesRead),
- remoteNodeId, nBytesRead, InetErrno,
- (char*)ndbstrerror(InetErrno));
-#endif
- if(DISCONNECT_ERRNO(InetErrno, nBytesRead)){
- // The remote node has closed down
- doDisconnect();
- report_disconnect(InetErrno);
- }
+ return 0;
}
- return nBytesRead;
}
void
diff --git a/ndb/src/common/transporter/TCP_Transporter.hpp b/ndb/src/common/transporter/TCP_Transporter.hpp
index 958cfde03a1..48046310bf8 100644
--- a/ndb/src/common/transporter/TCP_Transporter.hpp
+++ b/ndb/src/common/transporter/TCP_Transporter.hpp
@@ -52,8 +52,7 @@ private:
int r_port,
NodeId lHostId,
NodeId rHostId,
- int byteorder,
- bool compression, bool checksum, bool signalId,
+ bool checksum, bool signalId,
Uint32 reportFreq = 4096);
// Disconnect, delete send buffers and receive buffer
diff --git a/ndb/src/common/transporter/TransporterRegistry.cpp b/ndb/src/common/transporter/TransporterRegistry.cpp
index fe1d4b04786..cacbbed00f1 100644
--- a/ndb/src/common/transporter/TransporterRegistry.cpp
+++ b/ndb/src/common/transporter/TransporterRegistry.cpp
@@ -15,6 +15,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <ndb_global.h>
+#include <my_pthread.h>
#include <TransporterRegistry.hpp>
#include "TransporterInternalDefinitions.hpp"
@@ -48,9 +49,10 @@
SocketServer::Session * TransporterService::newSession(NDB_SOCKET_TYPE sockfd)
{
+ DBUG_ENTER("SocketServer::Session * TransporterService::newSession");
if (m_auth && !m_auth->server_authenticate(sockfd)){
NDB_CLOSE_SOCKET(sockfd);
- return 0;
+ DBUG_RETURN(0);
}
{
@@ -60,27 +62,32 @@ SocketServer::Session * TransporterService::newSession(NDB_SOCKET_TYPE sockfd)
char buf[256];
if (s_input.gets(buf, 256) == 0) {
NDB_CLOSE_SOCKET(sockfd);
- return 0;
+ DBUG_PRINT("error", ("Could not get node id from client"));
+ DBUG_RETURN(0);
}
if (sscanf(buf, "%d", &nodeId) != 1) {
NDB_CLOSE_SOCKET(sockfd);
- return 0;
+ DBUG_PRINT("error", ("Error in node id from client"));
+ DBUG_RETURN(0);
}
//check that nodeid is valid and that there is an allocated transporter
- if ( nodeId < 0 || nodeId >= (int) m_transporter_registry->maxTransporters) {
- NDB_CLOSE_SOCKET(sockfd);
- return 0;
+ if ( nodeId < 0 || nodeId >= (int)m_transporter_registry->maxTransporters) {
+ NDB_CLOSE_SOCKET(sockfd);
+ DBUG_PRINT("error", ("Node id out of range from client"));
+ DBUG_RETURN(0);
}
if (m_transporter_registry->theTransporters[nodeId] == 0) {
NDB_CLOSE_SOCKET(sockfd);
- return 0;
+ DBUG_PRINT("error", ("No transporter for this node id from client"));
+ DBUG_RETURN(0);
}
//check that the transporter should be connected
if (m_transporter_registry->performStates[nodeId] != TransporterRegistry::CONNECTING) {
NDB_CLOSE_SOCKET(sockfd);
- return 0;
+ DBUG_PRINT("error", ("Transporter in wrong state for this node id from client"));
+ DBUG_RETURN(0);
}
Transporter *t= m_transporter_registry->theTransporters[nodeId];
@@ -93,14 +100,13 @@ SocketServer::Session * TransporterService::newSession(NDB_SOCKET_TYPE sockfd)
t->connect_server(sockfd);
}
- return 0;
+ DBUG_RETURN(0);
}
TransporterRegistry::TransporterRegistry(void * callback,
unsigned _maxTransporters,
unsigned sizeOfLongSignalMemory) {
- m_transporter_service= 0;
nodeIdSpecified = false;
maxTransporters = _maxTransporters;
sendCounter = 1;
@@ -209,8 +215,6 @@ TransporterRegistry::createTransporter(TCP_TransporterConfiguration *config) {
config->port,
localNodeId,
config->remoteNodeId,
- config->byteOrder,
- config->compression,
config->checksum,
config->signalId);
if (t == NULL)
@@ -264,8 +268,6 @@ TransporterRegistry::createTransporter(OSE_TransporterConfiguration *conf) {
conf->localHostName,
conf->remoteNodeId,
conf->remoteHostName,
- conf->byteOrder,
- conf->compression,
conf->checksum,
conf->signalId);
if (t == NULL)
@@ -306,15 +308,17 @@ TransporterRegistry::createTransporter(SCI_TransporterConfiguration *config) {
if(theTransporters[config->remoteNodeId] != NULL)
return false;
- SCI_Transporter * t = new SCI_Transporter(config->sendLimit,
+ SCI_Transporter * t = new SCI_Transporter(*this,
+ config->localHostName,
+ config->remoteHostName,
+ config->port,
+ config->sendLimit,
config->bufferSize,
config->nLocalAdapters,
config->remoteSciNodeId0,
config->remoteSciNodeId1,
localNodeId,
config->remoteNodeId,
- config->byteOrder,
- config->compression,
config->checksum,
config->signalId);
@@ -357,7 +361,6 @@ TransporterRegistry::createTransporter(SHM_TransporterConfiguration *config) {
config->port,
localNodeId,
config->remoteNodeId,
- config->compression,
config->checksum,
config->signalId,
config->shmKey,
@@ -855,8 +858,8 @@ TransporterRegistry::performReceive(){
if(t->isConnected() && t->checkConnected()){
Uint32 * readPtr, * eodPtr;
t->getReceivePtr(&readPtr, &eodPtr);
- readPtr = unpack(readPtr, eodPtr, nodeId, ioStates[nodeId]);
- t->updateReceivePtr(readPtr);
+ Uint32 *newPtr = unpack(readPtr, eodPtr, nodeId, ioStates[nodeId]);
+ t->updateReceivePtr(newPtr);
}
}
}
@@ -870,8 +873,8 @@ TransporterRegistry::performReceive(){
if(t->isConnected() && t->checkConnected()){
Uint32 * readPtr, * eodPtr;
t->getReceivePtr(&readPtr, &eodPtr);
- readPtr = unpack(readPtr, eodPtr, nodeId, ioStates[nodeId]);
- t->updateReceivePtr(readPtr);
+ Uint32 *newPtr = unpack(readPtr, eodPtr, nodeId, ioStates[nodeId]);
+ t->updateReceivePtr(newPtr);
}
}
}
@@ -1023,7 +1026,9 @@ TransporterRegistry::setIOState(NodeId nodeId, IOState state) {
static void *
run_start_clients_C(void * me)
{
+ my_thread_init();
((TransporterRegistry*) me)->start_clients_thread();
+ my_thread_end();
NdbThread_Exit(0);
return me;
}
@@ -1106,6 +1111,7 @@ TransporterRegistry::update_connections()
void
TransporterRegistry::start_clients_thread()
{
+ DBUG_ENTER("TransporterRegistry::start_clients_thread");
while (m_run_start_clients_thread) {
NdbSleep_MilliSleep(100);
for (int i= 0, n= 0; n < nTransporters && m_run_start_clients_thread; i++){
@@ -1129,6 +1135,7 @@ TransporterRegistry::start_clients_thread()
}
}
}
+ DBUG_VOID_RETURN;
}
bool
@@ -1159,55 +1166,67 @@ TransporterRegistry::stop_clients()
return true;
}
-bool
-TransporterRegistry::start_service(SocketServer& socket_server)
+void
+TransporterRegistry::add_transporter_interface(const char *interface, unsigned short port)
{
-#if 0
- for (int i= 0, n= 0; n < nTransporters; i++){
- Transporter * t = theTransporters[i];
- if (!t)
+ DBUG_ENTER("TransporterRegistry::add_transporter_interface");
+ DBUG_PRINT("enter",("interface=%s, port= %d", interface, port));
+ if (interface && strlen(interface) == 0)
+ interface= 0;
+
+ for (unsigned i= 0; i < m_transporter_interface.size(); i++)
+ {
+ Transporter_interface &tmp= m_transporter_interface[i];
+ if (port != tmp.m_service_port)
continue;
- n++;
- if (t->isServer) {
- t->m_service = new TransporterService(new SocketAuthSimple("ndbd passwd"));
- if(!socket_server.setup(t->m_service, t->m_r_port, 0))
- {
- ndbout_c("Unable to setup transporter service port: %d!\n"
- "Please check if the port is already used,\n"
- "(perhaps a mgmt server is already running)",
- m_service_port);
- delete t->m_service;
- return false;
- }
+ if (interface != 0 && tmp.m_interface != 0 &&
+ strcmp(interface, tmp.m_interface) == 0)
+ {
+ DBUG_VOID_RETURN; // found match, no need to insert
+ }
+ if (interface == 0 && tmp.m_interface == 0)
+ {
+ DBUG_VOID_RETURN; // found match, no need to insert
}
}
-#endif
-
- if (m_service_port != 0) {
+ Transporter_interface t;
+ t.m_service_port= port;
+ t.m_interface= interface;
+ m_transporter_interface.push_back(t);
+ DBUG_PRINT("exit",("interface and port added"));
+ DBUG_VOID_RETURN;
+}
- m_transporter_service = new TransporterService(new SocketAuthSimple("ndbd", "ndbd passwd"));
+bool
+TransporterRegistry::start_service(SocketServer& socket_server)
+{
+ if (m_transporter_interface.size() > 0 && nodeIdSpecified != true)
+ {
+ ndbout_c("TransporterRegistry::startReceiving: localNodeId not specified");
+ return false;
+ }
- if (nodeIdSpecified != true) {
- ndbout_c("TransporterRegistry::startReceiving: localNodeId not specified");
+ for (unsigned i= 0; i < m_transporter_interface.size(); i++)
+ {
+ Transporter_interface &t= m_transporter_interface[i];
+ if (t.m_service_port == 0)
+ {
+ continue;
+ }
+ TransporterService *transporter_service =
+ new TransporterService(new SocketAuthSimple("ndbd", "ndbd passwd"));
+ if(!socket_server.setup(transporter_service,
+ t.m_service_port, t.m_interface))
+ {
+ ndbout_c("Unable to setup transporter service port: %s:%d!\n"
+ "Please check if the port is already used,\n"
+ "(perhaps the node is already running)",
+ t.m_interface ? t.m_interface : "*", t.m_service_port);
+ delete transporter_service;
return false;
}
-
- //m_interface_name = "ndbd";
- m_interface_name = 0;
-
- if(!socket_server.setup(m_transporter_service, m_service_port, m_interface_name))
- {
- ndbout_c("Unable to setup transporter service port: %d!\n"
- "Please check if the port is already used,\n"
- "(perhaps a mgmt server is already running)",
- m_service_port);
- delete m_transporter_service;
- return false;
- }
- m_transporter_service->setTransporterRegistry(this);
- } else
- m_transporter_service= 0;
-
+ transporter_service->setTransporterRegistry(this);
+ }
return true;
}
@@ -1281,3 +1300,5 @@ NdbOut & operator <<(NdbOut & out, SignalHeader & sh){
out << "trace: " << (int)sh.theTrace << endl;
return out;
}
+
+template class Vector<TransporterRegistry::Transporter_interface>;
diff --git a/ndb/src/common/util/Makefile.am b/ndb/src/common/util/Makefile.am
index 678added01e..efb249dd330 100644
--- a/ndb/src/common/util/Makefile.am
+++ b/ndb/src/common/util/Makefile.am
@@ -9,7 +9,7 @@ libgeneral_la_SOURCES = \
NdbSqlUtil.cpp new.cpp \
uucode.c random.c getarg.c version.c \
strdup.c strlcat.c strlcpy.c \
- ConfigValues.cpp
+ ConfigValues.cpp ndb_init.c
include $(top_srcdir)/ndb/config/common.mk.am
include $(top_srcdir)/ndb/config/type_util.mk.am
diff --git a/ndb/src/common/util/NdbSqlUtil.cpp b/ndb/src/common/util/NdbSqlUtil.cpp
index 84a6f6e6c21..6e4e5919e43 100644
--- a/ndb/src/common/util/NdbSqlUtil.cpp
+++ b/ndb/src/common/util/NdbSqlUtil.cpp
@@ -176,10 +176,29 @@ NdbSqlUtil::getType(Uint32 typeId)
return m_typeList[Type::Undefined];
}
+const NdbSqlUtil::Type&
+NdbSqlUtil::getTypeBinary(Uint32 typeId)
+{
+ switch (typeId) {
+ case Type::Char:
+ typeId = Type::Binary;
+ break;
+ case Type::Varchar:
+ typeId = Type::Varbinary;
+ break;
+ case Type::Text:
+ typeId = Type::Blob;
+ break;
+ default:
+ break;
+ }
+ return getType(typeId);
+}
+
// compare
int
-NdbSqlUtil::cmpTinyint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpTinyint(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
assert(full >= size && size > 0);
union { Uint32 p[1]; Int8 v; } u1, u2;
@@ -193,7 +212,7 @@ NdbSqlUtil::cmpTinyint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 s
}
int
-NdbSqlUtil::cmpTinyunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpTinyunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
assert(full >= size && size > 0);
union { Uint32 p[1]; Uint8 v; } u1, u2;
@@ -207,7 +226,7 @@ NdbSqlUtil::cmpTinyunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uin
}
int
-NdbSqlUtil::cmpSmallint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpSmallint(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
assert(full >= size && size > 0);
union { Uint32 p[1]; Int16 v; } u1, u2;
@@ -221,7 +240,7 @@ NdbSqlUtil::cmpSmallint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32
}
int
-NdbSqlUtil::cmpSmallunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpSmallunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
assert(full >= size && size > 0);
union { Uint32 p[1]; Uint16 v; } u1, u2;
@@ -235,7 +254,7 @@ NdbSqlUtil::cmpSmallunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Ui
}
int
-NdbSqlUtil::cmpMediumint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpMediumint(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
assert(full >= size && size > 0);
union { const Uint32* p; const unsigned char* v; } u1, u2;
@@ -251,7 +270,7 @@ NdbSqlUtil::cmpMediumint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32
}
int
-NdbSqlUtil::cmpMediumunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpMediumunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
assert(full >= size && size > 0);
union { const Uint32* p; const unsigned char* v; } u1, u2;
@@ -267,7 +286,7 @@ NdbSqlUtil::cmpMediumunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, U
}
int
-NdbSqlUtil::cmpInt(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpInt(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
assert(full >= size && size > 0);
union { Uint32 p[1]; Int32 v; } u1, u2;
@@ -281,7 +300,7 @@ NdbSqlUtil::cmpInt(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
}
int
-NdbSqlUtil::cmpUnsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpUnsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
assert(full >= size && size > 0);
union { Uint32 p[1]; Uint32 v; } u1, u2;
@@ -295,7 +314,7 @@ NdbSqlUtil::cmpUnsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32
}
int
-NdbSqlUtil::cmpBigint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpBigint(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
assert(full >= size && size > 0);
if (size >= 2) {
@@ -314,7 +333,7 @@ NdbSqlUtil::cmpBigint(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 si
}
int
-NdbSqlUtil::cmpBigunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpBigunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
assert(full >= size && size > 0);
if (size >= 2) {
@@ -333,7 +352,7 @@ NdbSqlUtil::cmpBigunsigned(const Uint32* p1, const Uint32* p2, Uint32 full, Uint
}
int
-NdbSqlUtil::cmpFloat(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpFloat(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
assert(full >= size && size > 0);
union { Uint32 p[1]; float v; } u1, u2;
@@ -348,7 +367,7 @@ NdbSqlUtil::cmpFloat(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 siz
}
int
-NdbSqlUtil::cmpDouble(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpDouble(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
assert(full >= size && size > 0);
if (size >= 2) {
@@ -368,7 +387,7 @@ NdbSqlUtil::cmpDouble(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 si
}
int
-NdbSqlUtil::cmpDecimal(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpDecimal(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
assert(full >= size && size > 0);
// not used by MySQL or NDB
@@ -377,27 +396,34 @@ NdbSqlUtil::cmpDecimal(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 s
}
int
-NdbSqlUtil::cmpChar(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpChar(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
- assert(full >= size && size > 0);
+ // collation does not work on prefix for some charsets
+ assert(full == size && size > 0);
/*
- * Char is blank-padded to length and null-padded to word size. There
- * is no terminator so we compare the full values.
+ * Char is blank-padded to length and null-padded to word size.
*/
- union { const Uint32* p; const char* v; } u1, u2;
+ union { const Uint32* p; const uchar* v; } u1, u2;
u1.p = p1;
u2.p = p2;
- int k = memcmp(u1.v, u2.v, size << 2);
- return k < 0 ? -1 : k > 0 ? +1 : full == size ? 0 : CmpUnknown;
+ // not const in MySQL
+ CHARSET_INFO* cs = (CHARSET_INFO*)(info);
+ // length in bytes including null padding to Uint32
+ uint l1 = (full << 2);
+ int k = (*cs->coll->strnncollsp)(cs, u1.v, l1, u2.v, l1);
+ return k < 0 ? -1 : k > 0 ? +1 : 0;
}
int
-NdbSqlUtil::cmpVarchar(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpVarchar(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
assert(full >= size && size > 0);
/*
* Varchar is not allowed to contain a null byte and the value is
* null-padded. Therefore comparison does not need to use the length.
+ *
+ * Not used before MySQL 5.0. Format is likely to change. Handle
+ * only binary collation for now.
*/
union { const Uint32* p; const char* v; } u1, u2;
u1.p = p1;
@@ -408,7 +434,7 @@ NdbSqlUtil::cmpVarchar(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 s
}
int
-NdbSqlUtil::cmpBinary(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpBinary(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
assert(full >= size && size > 0);
/*
@@ -422,12 +448,14 @@ NdbSqlUtil::cmpBinary(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 si
}
int
-NdbSqlUtil::cmpVarbinary(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpVarbinary(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
assert(full >= size && size > 0);
/*
* Binary data of variable length padded with nulls. The comparison
* does not need to use the length.
+ *
+ * Not used before MySQL 5.0. Format is likely to change.
*/
union { const Uint32* p; const unsigned char* v; } u1, u2;
u1.p = p1;
@@ -438,11 +466,13 @@ NdbSqlUtil::cmpVarbinary(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32
}
int
-NdbSqlUtil::cmpDatetime(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpDatetime(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
assert(full >= size && size > 0);
/*
* Datetime is CC YY MM DD hh mm ss \0
+ *
+ * Not used via MySQL.
*/
union { const Uint32* p; const unsigned char* v; } u1, u2;
u1.p = p1;
@@ -459,11 +489,13 @@ NdbSqlUtil::cmpDatetime(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32
}
int
-NdbSqlUtil::cmpTimespec(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpTimespec(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
assert(full >= size && size > 0);
/*
* Timespec is CC YY MM DD hh mm ss \0 NN NN NN NN
+ *
+ * Not used via MySQL.
*/
union { const Uint32* p; const unsigned char* v; } u1, u2;
u1.p = p1;
@@ -490,12 +522,11 @@ NdbSqlUtil::cmpTimespec(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32
}
int
-NdbSqlUtil::cmpBlob(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpBlob(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
assert(full >= size && size > 0);
/*
- * Blob comparison is on the inline bytes. Except for larger header
- * the format is like Varbinary.
+ * Blob comparison is on the inline bytes (null padded).
*/
const unsigned head = NDB_BLOB_HEAD_SIZE;
// skip blob head
@@ -510,25 +541,107 @@ NdbSqlUtil::cmpBlob(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size
}
int
-NdbSqlUtil::cmpText(const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpText(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
{
- assert(full >= size && size > 0);
+ // collation does not work on prefix for some charsets
+ assert(full == size && size > 0);
/*
- * Text comparison is on the inline bytes. Except for larger header
- * the format is like Varchar.
+ * Text comparison is on the inline bytes (blank padded). Currently
+ * not supported for multi-byte charsets.
*/
const unsigned head = NDB_BLOB_HEAD_SIZE;
// skip blob head
if (size >= head + 1) {
- union { const Uint32* p; const char* v; } u1, u2;
+ union { const Uint32* p; const uchar* v; } u1, u2;
u1.p = p1 + head;
u2.p = p2 + head;
- int k = memcmp(u1.v, u2.v, (size - head) << 2);
- return k < 0 ? -1 : k > 0 ? +1 : full == size ? 0 : CmpUnknown;
+ // not const in MySQL
+ CHARSET_INFO* cs = (CHARSET_INFO*)(info);
+ // length in bytes including null padding to Uint32
+ uint l1 = (full << 2);
+ int k = (*cs->coll->strnncollsp)(cs, u1.v, l1, u2.v, l1);
+ return k < 0 ? -1 : k > 0 ? +1 : 0;
}
return CmpUnknown;
}
+// check charset
+
+bool
+NdbSqlUtil::usable_in_pk(Uint32 typeId, const void* info)
+{
+ const Type& type = getType(typeId);
+ switch (type.m_typeId) {
+ case Type::Undefined:
+ break;
+ case Type::Char:
+ {
+ const CHARSET_INFO *cs = (const CHARSET_INFO*)info;
+ return
+ cs != 0 &&
+ cs->cset != 0 &&
+ cs->coll != 0 &&
+ cs->coll->strnxfrm != 0 &&
+ cs->strxfrm_multiply == 1; // current limitation
+ }
+ break;
+ case Type::Varchar:
+ return true; // Varchar not used via MySQL
+ case Type::Blob:
+ case Type::Text:
+ break;
+ default:
+ return true;
+ }
+ return false;
+}
+
+bool
+NdbSqlUtil::usable_in_hash_index(Uint32 typeId, const void* info)
+{
+ return usable_in_pk(typeId, info);
+}
+
+bool
+NdbSqlUtil::usable_in_ordered_index(Uint32 typeId, const void* info)
+{
+ const Type& type = getType(typeId);
+ switch (type.m_typeId) {
+ case Type::Undefined:
+ break;
+ case Type::Char:
+ {
+ const CHARSET_INFO *cs = (const CHARSET_INFO*)info;
+ return
+ cs != 0 &&
+ cs->cset != 0 &&
+ cs->coll != 0 &&
+ cs->coll->strnxfrm != 0 &&
+ cs->coll->strnncollsp != 0 &&
+ cs->strxfrm_multiply == 1; // current limitation
+ }
+ break;
+ case Type::Varchar:
+ return true; // Varchar not used via MySQL
+ case Type::Text:
+ {
+ const CHARSET_INFO *cs = (const CHARSET_INFO*)info;
+ return
+ cs != 0 &&
+ cs->mbmaxlen == 1 && // extra limitation
+ cs->cset != 0 &&
+ cs->coll != 0 &&
+ cs->coll->strnxfrm != 0 &&
+ cs->coll->strnncollsp != 0 &&
+ cs->strxfrm_multiply == 1; // current limitation
+ }
+ break;
+ default:
+ return true;
+ }
+ return false;
+}
+
#ifdef NDB_SQL_UTIL_TEST
#include <NdbTick.h>
@@ -556,6 +669,7 @@ const Testcase testcase[] = {
int
main(int argc, char** argv)
{
+ ndb_init(); // for charsets
unsigned count = argc > 1 ? atoi(argv[1]) : 1000000;
ndbout_c("count = %u", count);
assert(count != 0);
diff --git a/ndb/src/common/util/SocketServer.cpp b/ndb/src/common/util/SocketServer.cpp
index 0cc06a54496..c3cffa1399b 100644
--- a/ndb/src/common/util/SocketServer.cpp
+++ b/ndb/src/common/util/SocketServer.cpp
@@ -16,6 +16,7 @@
#include <ndb_global.h>
+#include <my_pthread.h>
#include <SocketServer.hpp>
@@ -46,7 +47,7 @@ SocketServer::~SocketServer() {
}
bool
-SocketServer::tryBind(unsigned short port, const char * intface) const {
+SocketServer::tryBind(unsigned short port, const char * intface) {
struct sockaddr_in servaddr;
memset(&servaddr, 0, sizeof(servaddr));
servaddr.sin_family = AF_INET;
@@ -83,7 +84,8 @@ bool
SocketServer::setup(SocketServer::Service * service,
unsigned short port,
const char * intface){
-
+ DBUG_ENTER("SocketServer::setup");
+ DBUG_PRINT("enter",("interface=%s, port=%d", intface, port));
struct sockaddr_in servaddr;
memset(&servaddr, 0, sizeof(servaddr));
servaddr.sin_family = AF_INET;
@@ -92,36 +94,44 @@ SocketServer::setup(SocketServer::Service * service,
if(intface != 0){
if(Ndb_getInAddr(&servaddr.sin_addr, intface))
- return false;
+ DBUG_RETURN(false);
}
const NDB_SOCKET_TYPE sock = socket(AF_INET, SOCK_STREAM, 0);
if (sock == NDB_INVALID_SOCKET) {
- return false;
+ DBUG_PRINT("error",("socket() - %d - %s",
+ errno, strerror(errno)));
+ DBUG_RETURN(false);
}
const int on = 1;
if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
(const char*)&on, sizeof(on)) == -1) {
+ DBUG_PRINT("error",("getsockopt() - %d - %s",
+ errno, strerror(errno)));
NDB_CLOSE_SOCKET(sock);
- return false;
+ DBUG_RETURN(false);
}
if (bind(sock, (struct sockaddr*) &servaddr, sizeof(servaddr)) == -1) {
+ DBUG_PRINT("error",("bind() - %d - %s",
+ errno, strerror(errno)));
NDB_CLOSE_SOCKET(sock);
- return false;
+ DBUG_RETURN(false);
}
if (listen(sock, m_maxSessions) == -1){
+ DBUG_PRINT("error",("listen() - %d - %s",
+ errno, strerror(errno)));
NDB_CLOSE_SOCKET(sock);
- return false;
+ DBUG_RETURN(false);
}
ServiceInstance i;
i.m_socket = sock;
i.m_service = service;
m_services.push_back(i);
- return true;
+ DBUG_RETURN(true);
}
void
@@ -177,8 +187,9 @@ void*
socketServerThread_C(void* _ss){
SocketServer * ss = (SocketServer *)_ss;
+ my_thread_init();
ss->doRun();
-
+ my_thread_end();
NdbThread_Exit(0);
return 0;
}
@@ -287,8 +298,10 @@ void*
sessionThread_C(void* _sc){
SocketServer::Session * si = (SocketServer::Session *)_sc;
+ my_thread_init();
if(!transfer(si->m_socket)){
si->m_stopped = true;
+ my_thread_end();
NdbThread_Exit(0);
return 0;
}
@@ -301,6 +314,7 @@ sessionThread_C(void* _sc){
}
si->m_stopped = true;
+ my_thread_end();
NdbThread_Exit(0);
return 0;
}
diff --git a/ndb/src/common/debugger/LogLevel.cpp b/ndb/src/common/util/ndb_init.c
index f9e2f318432..f3aa734d7f9 100644
--- a/ndb/src/common/debugger/LogLevel.cpp
+++ b/ndb/src/common/util/ndb_init.c
@@ -14,17 +14,22 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#include <LogLevel.hpp>
+#include <ndb_global.h>
+#include <my_sys.h>
-const LogLevel::LogLevelCategoryName LogLevel::LOGLEVEL_CATEGORY_NAME[] = {
- { "LogLevelStartup" },
- { "LogLevelShutdown" },
- { "LogLevelStatistic" },
- { "LogLevelCheckpoint" },
- { "LogLevelNodeRestart" },
- { "LogLevelConnection" },
- { "LogLevelError" },
- { "LogLevelWarning" },
- { "LogLevelInfo" },
- { "LogLevelGrep" }
-};
+int
+ndb_init()
+{
+ if (my_init()) {
+ const char* err = "my_init() failed - exit\n";
+ write(2, err, strlen(err));
+ exit(1);
+ }
+ return 0;
+}
+
+void
+ndb_end(int flags)
+{
+ my_end(flags);
+}
diff --git a/ndb/src/cw/cpcd/APIService.cpp b/ndb/src/cw/cpcd/APIService.cpp
index 46b043c7004..de0e40cebfc 100644
--- a/ndb/src/cw/cpcd/APIService.cpp
+++ b/ndb/src/cw/cpcd/APIService.cpp
@@ -47,7 +47,7 @@
ParserRow<CPCDAPISession>::IgnoreMinMax, \
0, 0, \
fun, \
- desc }
+ desc, 0 }
#define CPCD_ARG(name, type, opt, desc) \
{ name, \
@@ -58,7 +58,7 @@
ParserRow<CPCDAPISession>::IgnoreMinMax, \
0, 0, \
0, \
- desc }
+ desc, 0 }
#define CPCD_ARG2(name, type, opt, min, max, desc) \
{ name, \
@@ -69,7 +69,7 @@
ParserRow<CPCDAPISession>::IgnoreMinMax, \
min, max, \
0, \
- desc }
+ desc, 0 }
#define CPCD_END() \
{ 0, \
@@ -80,7 +80,7 @@
ParserRow<CPCDAPISession>::IgnoreMinMax, \
0, 0, \
0, \
- 0 }
+ 0, 0 }
#define CPCD_CMD_ALIAS(name, realName, fun) \
{ name, \
@@ -91,7 +91,7 @@
ParserRow<CPCDAPISession>::IgnoreMinMax, \
0, 0, \
0, \
- 0 }
+ 0, 0 }
#define CPCD_ARG_ALIAS(name, realName, fun) \
{ name, \
@@ -102,7 +102,7 @@
ParserRow<CPCDAPISession>::IgnoreMinMax, \
0, 0, \
0, \
- 0 }
+ 0, 0 }
const
ParserRow<CPCDAPISession> commands[] =
diff --git a/ndb/src/cw/cpcd/CPCD.cpp b/ndb/src/cw/cpcd/CPCD.cpp
index 44db10422b9..bc9f350755f 100644
--- a/ndb/src/cw/cpcd/CPCD.cpp
+++ b/ndb/src/cw/cpcd/CPCD.cpp
@@ -378,7 +378,7 @@ CPCD::getProcessList() {
}
void
-CPCD::RequestStatus::err(enum RequestStatusCode status, char *msg) {
+CPCD::RequestStatus::err(enum RequestStatusCode status, const char *msg) {
m_status = status;
snprintf(m_errorstring, sizeof(m_errorstring), "%s", msg);
}
diff --git a/ndb/src/cw/cpcd/CPCD.hpp b/ndb/src/cw/cpcd/CPCD.hpp
index 4a7cab23bab..a5c0bef1dac 100644
--- a/ndb/src/cw/cpcd/CPCD.hpp
+++ b/ndb/src/cw/cpcd/CPCD.hpp
@@ -91,7 +91,7 @@ public:
RequestStatus() { m_status = OK; m_errorstring[0] = '\0'; };
/** @brief Sets an errorcode and a printable message */
- void err(enum RequestStatusCode, char *);
+ void err(enum RequestStatusCode, const char *);
/** @brief Returns the error message */
char *getErrMsg() { return m_errorstring; };
diff --git a/ndb/src/cw/cpcd/Makefile.am b/ndb/src/cw/cpcd/Makefile.am
index e276d1a766d..6af44a359fc 100644
--- a/ndb/src/cw/cpcd/Makefile.am
+++ b/ndb/src/cw/cpcd/Makefile.am
@@ -7,7 +7,7 @@ LDADD_LOC = \
$(top_builddir)/ndb/src/libndbclient.la \
$(top_builddir)/dbug/libdbug.a \
$(top_builddir)/mysys/libmysys.a \
- $(top_builddir)/strings/libmystrings.a
+ $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
include $(top_srcdir)/ndb/config/common.mk.am
include $(top_srcdir)/ndb/config/type_util.mk.am
diff --git a/ndb/src/cw/cpcd/main.cpp b/ndb/src/cw/cpcd/main.cpp
index 913c31de1f7..207b81bfa89 100644
--- a/ndb/src/cw/cpcd/main.cpp
+++ b/ndb/src/cw/cpcd/main.cpp
@@ -28,12 +28,12 @@
#include "common.hpp"
-static char *work_dir = CPCD_DEFAULT_WORK_DIR;
+static const char *work_dir = CPCD_DEFAULT_WORK_DIR;
static int port = CPCD_DEFAULT_TCP_PORT;
static int use_syslog = 0;
-static char *logfile = NULL;
-static char *config_file = CPCD_DEFAULT_CONFIG_FILE;
-static char *user = 0;
+static const char *logfile = NULL;
+static const char *config_file = CPCD_DEFAULT_CONFIG_FILE;
+static const char *user = 0;
static struct getargs args[] = {
{ "work-dir", 'w', arg_string, &work_dir,
diff --git a/ndb/src/kernel/Makefile.am b/ndb/src/kernel/Makefile.am
index a6be3244b41..493ab4f9982 100644
--- a/ndb/src/kernel/Makefile.am
+++ b/ndb/src/kernel/Makefile.am
@@ -55,7 +55,7 @@ LDADD += \
$(top_builddir)/ndb/src/common/util/libgeneral.la \
$(top_builddir)/dbug/libdbug.a \
$(top_builddir)/mysys/libmysys.a \
- $(top_builddir)/strings/libmystrings.a
+ $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
# Don't update the files from bitkeeper
%::SCCS/s.%
diff --git a/ndb/src/kernel/blocks/backup/Backup.cpp b/ndb/src/kernel/blocks/backup/Backup.cpp
index b3e9ff735ac..08a8bf83e20 100644
--- a/ndb/src/kernel/blocks/backup/Backup.cpp
+++ b/ndb/src/kernel/blocks/backup/Backup.cpp
@@ -40,6 +40,7 @@
#include <signaldata/BackupImpl.hpp>
#include <signaldata/BackupSignalData.hpp>
#include <signaldata/BackupContinueB.hpp>
+#include <signaldata/EventReport.hpp>
#include <signaldata/UtilSequence.hpp>
@@ -944,6 +945,13 @@ Backup::sendBackupRef(BlockReference senderRef, Signal *signal,
ref->errorCode = errorCode;
ref->masterRef = numberToRef(BACKUP, getMasterNodeId());
sendSignal(senderRef, GSN_BACKUP_REF, signal, BackupRef::SignalLength, JBB);
+
+ if(errorCode != BackupRef::IAmNotMaster){
+ signal->theData[0] = EventReport::BackupFailedToStart;
+ signal->theData[1] = senderRef;
+ signal->theData[2] = errorCode;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
+ }
}
void
@@ -1226,7 +1234,13 @@ Backup::defineBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId)
conf->nodes = ptr.p->nodes;
sendSignal(ptr.p->clientRef, GSN_BACKUP_CONF, signal,
BackupConf::SignalLength, JBB);
-
+
+ signal->theData[0] = EventReport::BackupStarted;
+ signal->theData[1] = ptr.p->clientRef;
+ signal->theData[2] = ptr.p->backupId;
+ ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+3);
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3+NdbNodeBitmask::Size, JBB);
+
ptr.p->masterData.state.setState(DEFINED);
/**
* Prepare Trig
@@ -2069,6 +2083,18 @@ Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId)
rep->nodes = ptr.p->nodes;
sendSignal(ptr.p->clientRef, GSN_BACKUP_COMPLETE_REP, signal,
BackupCompleteRep::SignalLength, JBB);
+
+ signal->theData[0] = EventReport::BackupCompleted;
+ signal->theData[1] = ptr.p->clientRef;
+ signal->theData[2] = ptr.p->backupId;
+ signal->theData[3] = ptr.p->startGCP;
+ signal->theData[4] = ptr.p->stopGCP;
+ signal->theData[5] = ptr.p->noOfBytes;
+ signal->theData[6] = ptr.p->noOfRecords;
+ signal->theData[7] = ptr.p->noOfLogBytes;
+ signal->theData[8] = ptr.p->noOfLogRecords;
+ ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+9);
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 9+NdbNodeBitmask::Size, JBB);
}
/*****************************************************************************
@@ -2259,6 +2285,12 @@ Backup::masterSendAbortBackup(Signal* signal, BackupRecordPtr ptr)
rep->reason = ptr.p->errorCode;
sendSignal(ptr.p->clientRef, GSN_BACKUP_ABORT_REP, signal,
BackupAbortRep::SignalLength, JBB);
+
+ signal->theData[0] = EventReport::BackupAborted;
+ signal->theData[1] = ptr.p->clientRef;
+ signal->theData[2] = ptr.p->backupId;
+ signal->theData[3] = ptr.p->errorCode;
+ sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB);
}//if
// ptr.p->masterData.state.setState(INITIAL);
diff --git a/ndb/src/kernel/blocks/backup/read.cpp b/ndb/src/kernel/blocks/backup/read.cpp
index 921c352ea13..89cc08ee9de 100644
--- a/ndb/src/kernel/blocks/backup/read.cpp
+++ b/ndb/src/kernel/blocks/backup/read.cpp
@@ -48,6 +48,7 @@ static Uint32 logEntryNo;
int
main(int argc, const char * argv[]){
+ ndb_init();
if(argc <= 1){
printf("Usage: %s <filename>", argv[0]);
exit(1);
diff --git a/ndb/src/kernel/blocks/backup/restore/Makefile.am b/ndb/src/kernel/blocks/backup/restore/Makefile.am
index eef5bc5a203..16550f13546 100644
--- a/ndb/src/kernel/blocks/backup/restore/Makefile.am
+++ b/ndb/src/kernel/blocks/backup/restore/Makefile.am
@@ -7,7 +7,7 @@ LDADD_LOC = \
$(top_builddir)/ndb/src/libndbclient.la \
$(top_builddir)/dbug/libdbug.a \
$(top_builddir)/mysys/libmysys.a \
- $(top_builddir)/strings/libmystrings.a
+ $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
include $(top_srcdir)/ndb/config/common.mk.am
diff --git a/ndb/src/kernel/blocks/backup/restore/main.cpp b/ndb/src/kernel/blocks/backup/restore/main.cpp
index a330aa51373..5708415c61e 100644
--- a/ndb/src/kernel/blocks/backup/restore/main.cpp
+++ b/ndb/src/kernel/blocks/backup/restore/main.cpp
@@ -206,6 +206,7 @@ free_data_callback()
int
main(int argc, const char** argv)
{
+ ndb_init();
if (!readArguments(argc, argv))
{
return -1;
@@ -331,7 +332,7 @@ main(int argc, const char** argv)
for (i= 0; i < g_consumers.size(); i++)
g_consumers[i]->endOfTuples();
-
+
RestoreLogIterator logIter(metaData);
if (!logIter.readHeader())
{
@@ -357,7 +358,7 @@ main(int argc, const char** argv)
}
}
clearConsumers();
- return 1;
+ return 0;
} // main
template class Vector<BackupConsumer*>;
diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
index e2085eb612c..234d832655c 100644
--- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
+++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
@@ -97,7 +97,7 @@ Cmvmi::Cmvmi(const Configuration & conf) :
const ndb_mgm_configuration_iterator * db = theConfig.getOwnConfigIterator();
for(unsigned j = 0; j<LogLevel::LOGLEVEL_CATEGORIES; j++){
Uint32 logLevel;
- if(!ndb_mgm_get_int_parameter(db, LogLevel::MIN_LOGLEVEL_ID+j, &logLevel)){
+ if(!ndb_mgm_get_int_parameter(db, CFG_MIN_LOGLEVEL+j, &logLevel)){
clogLevel.setLogLevel((LogLevel::EventCategory)j,
logLevel);
}
@@ -169,9 +169,9 @@ void Cmvmi::execSET_LOGLEVELORD(Signal* signal)
jamEntry();
for(unsigned int i = 0; i<llOrd->noOfEntries; i++){
- category = (LogLevel::EventCategory)llOrd->theCategories[i];
- level = llOrd->theLevels[i];
-
+ category = (LogLevel::EventCategory)(llOrd->theData[i] >> 16);
+ level = llOrd->theData[i] & 0xFFFF;
+
clogLevel.setLogLevel(category, level);
}
}//execSET_LOGLEVELORD()
@@ -196,10 +196,10 @@ void Cmvmi::execEVENT_REP(Signal* signal)
Uint32 threshold = 16;
LogLevel::EventCategory eventCategory = (LogLevel::EventCategory)0;
- for(unsigned int i = 0; i< EventLogger::matrixSize; i++){
- if(EventLogger::matrix[i].eventType == eventType){
- eventCategory = EventLogger::matrix[i].eventCategory;
- threshold = EventLogger::matrix[i].threshold;
+ for(unsigned int i = 0; i< EventLoggerBase::matrixSize; i++){
+ if(EventLoggerBase::matrix[i].eventType == eventType){
+ eventCategory = EventLoggerBase::matrix[i].eventCategory;
+ threshold = EventLoggerBase::matrix[i].threshold;
break;
}
}
@@ -250,17 +250,7 @@ Cmvmi::execEVENT_SUBSCRIBE_REQ(Signal * signal){
sendSignal(subReq->blockRef, GSN_EVENT_SUBSCRIBE_REF, signal, 1, JBB);
return;
}
- /**
- * If it's a new subscription, clear the loglevel
- *
- * Clear only if noOfEntries is 0, this is needed beacuse we set
- * the default loglevels for the MGMT nodes during the inital connect phase.
- * See reportConnected().
- */
- if (subReq->noOfEntries == 0){
- ptr.p->logLevel.clear();
- }
-
+ ptr.p->logLevel.clear();
ptr.p->blockRef = subReq->blockRef;
}
@@ -276,10 +266,9 @@ Cmvmi::execEVENT_SUBSCRIBE_REQ(Signal * signal){
LogLevel::EventCategory category;
Uint32 level = 0;
for(Uint32 i = 0; i<subReq->noOfEntries; i++){
- category = (LogLevel::EventCategory)subReq->theCategories[i];
- level = subReq->theLevels[i];
- ptr.p->logLevel.setLogLevel(category,
- level);
+ category = (LogLevel::EventCategory)(subReq->theData[i] >> 16);
+ level = subReq->theData[i] & 0xFFFF;
+ ptr.p->logLevel.setLogLevel(category, level);
}
}
@@ -384,11 +373,6 @@ void Cmvmi::execCLOSE_COMREQ(Signal* signal)
globalTransporterRegistry.setIOState(i, HaltIO);
globalTransporterRegistry.do_disconnect(i);
-
- /**
- * Cancel possible event subscription
- */
- cancelSubscription(i);
}
}
if (failNo != 0) {
@@ -494,6 +478,8 @@ void Cmvmi::execDISCONNECT_REP(Signal *signal)
globalTransporterRegistry.do_connect(hostId);
}
+ cancelSubscription(hostId);
+
signal->theData[0] = EventReport::Disconnected;
signal->theData[1] = hostId;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
@@ -539,20 +525,6 @@ void Cmvmi::execCONNECT_REP(Signal *signal){
if(type == NodeInfo::MGM){
jam();
globalTransporterRegistry.setIOState(hostId, NoHalt);
-
- EventSubscribeReq* dst = (EventSubscribeReq *)&signal->theData[0];
-
- for (Uint32 i = 0; i < EventLogger::defEventLogMatrixSize; i++) {
- dst->theCategories[i] = EventLogger::defEventLogMatrix[i].eventCategory;
- dst->theLevels[i] = EventLogger::defEventLogMatrix[i].threshold;
- }
-
- dst->noOfEntries = EventLogger::defEventLogMatrixSize;
- /* The BlockNumber is hardcoded as 1 in MgmtSrvr */
- dst->blockRef = numberToRef(MIN_API_BLOCK_NO, hostId);
-
- execEVENT_SUBSCRIBE_REQ(signal);
-
}
//------------------------------------------
diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
index 7126842459e..d1a8128ea7f 100644
--- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
+++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
@@ -15,6 +15,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <ndb_global.h>
+#include <my_sys.h>
#define DBDICT_C
#include "Dbdict.hpp"
@@ -2866,8 +2867,6 @@ Dbdict::execALTER_TABLE_REQ(Signal* signal)
if(parseRecord.errorCode != 0){
jam();
c_opCreateTable.release(alterTabPtr);
- parseRecord.tablePtr.p->tabState = TableRecord::NOT_DEFINED;
- releaseTableObject(parseRecord.tablePtr.i, false);
alterTableRef(signal, req,
(AlterTableRef::ErrorCode) parseRecord.errorCode,
aParseRecord);
@@ -3052,8 +3051,6 @@ Dbdict::execALTER_TAB_REQ(Signal * signal)
if(parseRecord.errorCode != 0){
jam();
c_opCreateTable.release(alterTabPtr);
- parseRecord.tablePtr.p->tabState = TableRecord::NOT_DEFINED;
- releaseTableObject(parseRecord.tablePtr.i, false);
alterTabRef(signal, req,
(AlterTableRef::ErrorCode) parseRecord.errorCode,
aParseRecord);
@@ -3438,7 +3435,6 @@ Dbdict::execALTER_TAB_CONF(Signal * signal){
// Release resources
TableRecordPtr tabPtr;
c_tableRecordPool.getPtr(tabPtr, regAlterTabPtr->m_tablePtrI);
- tabPtr.p->tabState = TableRecord::NOT_DEFINED;
releaseTableObject(tabPtr.i, false);
c_opCreateTable.release(alterTabPtr);
c_blockState = BS_IDLE;
@@ -3479,12 +3475,19 @@ int Dbdict::handleAlterTab(AlterTabReq * req,
jam();
// Table rename
// Remove from hashtable
+#ifdef VM_TRACE
+ TableRecordPtr tmp;
+ ndbrequire(c_tableRecordHash.find(tmp, *origTablePtr.p));
+#endif
c_tableRecordHash.remove(origTablePtr);
strcpy(regAlterTabPtr->previousTableName, origTablePtr.p->tableName);
strcpy(origTablePtr.p->tableName, newTablePtr.p->tableName);
// Set new schema version
origTablePtr.p->tableVersion = newTablePtr.p->tableVersion;
// Put it back
+#ifdef VM_TRACE
+ ndbrequire(!c_tableRecordHash.find(tmp, *origTablePtr.p));
+#endif
c_tableRecordHash.add(origTablePtr);
return 0;
@@ -3505,12 +3508,19 @@ void Dbdict::revertAlterTable(Signal * signal,
TableRecordPtr tablePtr;
c_tableRecordPool.getPtr(tablePtr, tableId);
// Remove from hashtable
+#ifdef VM_TRACE
+ TableRecordPtr tmp;
+ ndbrequire(c_tableRecordHash.find(tmp, * tablePtr.p));
+#endif
c_tableRecordHash.remove(tablePtr);
// Restore name
strcpy(tablePtr.p->tableName, regAlterTabPtr->previousTableName);
// Revert schema version
tablePtr.p->tableVersion = tablePtr.p->tableVersion - 1;
// Put it back
+#ifdef VM_TRACE
+ ndbrequire(!c_tableRecordHash.find(tmp, * tablePtr.p));
+#endif
c_tableRecordHash.add(tablePtr);
return;
@@ -3572,7 +3582,6 @@ Dbdict::alterTab_writeTableConf(Signal* signal,
jam();
// Release resources
c_tableRecordPool.getPtr(tabPtr, regAlterTabPtr->m_tablePtrI);
- tabPtr.p->tabState = TableRecord::NOT_DEFINED;
releaseTableObject(tabPtr.i, false);
c_opCreateTable.release(alterTabPtr);
c_blockState = BS_IDLE;
@@ -4100,6 +4109,8 @@ Dbdict::execADD_FRAGREQ(Signal* signal) {
req->noOfKeyAttr = tabPtr.p->noOfPrimkey;
req->noOfNewAttr = 0;
+ // noOfCharsets passed to TUP in upper half
+ req->noOfNewAttr |= (tabPtr.p->noOfCharsets << 16);
req->checksumIndicator = 1;
req->noOfAttributeGroups = 1;
req->GCPIndicator = 0;
@@ -4161,6 +4172,8 @@ Dbdict::sendLQHADDATTRREQ(Signal* signal,
entry.attrId = attrPtr.p->attributeId;
entry.attrDescriptor = attrPtr.p->attributeDescriptor;
entry.extTypeInfo = attrPtr.p->extType;
+ // charset number passed to TUP, TUX in upper half
+ entry.extTypeInfo |= (attrPtr.p->extPrecision & ~0xFFFF);
if (tabPtr.p->isIndex()) {
Uint32 primaryAttrId;
if (attrPtr.p->nextAttrInTable != RNIL) {
@@ -4456,7 +4469,6 @@ Dbdict::createTab_dropComplete(Signal* signal,
TableRecordPtr tabPtr;
c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
- tabPtr.p->tabState = TableRecord::NOT_DEFINED;
releaseTableObject(tabPtr.i);
PageRecordPtr pagePtr;
@@ -4540,6 +4552,15 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
parseP->errorLine = __LINE__;
return;
}
+
+ if(parseP->requestType == DictTabInfo::AlterTableFromAPI)
+ {
+ ndbrequire(!checkExist);
+ }
+ if(!checkExist)
+ {
+ ndbrequire(parseP->requestType == DictTabInfo::AlterTableFromAPI);
+ }
/* ---------------------------------------------------------------- */
// Verify that table name is an allowed table name.
@@ -4554,14 +4575,15 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
TableRecordPtr tablePtr;
c_tableRecordHash.find(tablePtr, keyRecord);
-
- if (checkExist)
+
+ if (checkExist){
jam();
/* ---------------------------------------------------------------- */
// Check if table already existed.
/* ---------------------------------------------------------------- */
tabRequire(tablePtr.i == RNIL, CreateTableRef::TableAlreadyExist);
-
+ }
+
switch (parseP->requestType) {
case DictTabInfo::CreateTableFromAPI: {
jam();
@@ -4634,12 +4656,13 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
strcpy(tablePtr.p->tableName, keyRecord.tableName);
if (parseP->requestType != DictTabInfo::AlterTableFromAPI) {
jam();
- c_tableRecordHash.add(tablePtr);
- }
-
#ifdef VM_TRACE
- ndbout_c("Dbdict: name=%s,id=%u", tablePtr.p->tableName, tablePtr.i);
+ ndbout_c("Dbdict: name=%s,id=%u", tablePtr.p->tableName, tablePtr.i);
+ TableRecordPtr tmp;
+ ndbrequire(!c_tableRecordHash.find(tmp, * tablePtr.p));
#endif
+ c_tableRecordHash.add(tablePtr);
+ }
//tablePtr.p->noOfPrimkey = tableDesc.NoOfKeyAttr;
//tablePtr.p->noOfNullAttr = tableDesc.NoOfNullable;
@@ -4678,11 +4701,12 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
handleTabInfo(it, parseP);
- if(parseP->errorCode != 0){
+ if(parseP->errorCode != 0)
+ {
/**
* Release table
*/
- releaseTableObject(tablePtr.i);
+ releaseTableObject(tablePtr.i, checkExist);
}
}//handleTabInfoInit()
@@ -4697,6 +4721,8 @@ void Dbdict::handleTabInfo(SimpleProperties::Reader & it,
Uint32 keyLength = 0;
Uint32 attrCount = tablePtr.p->noOfAttributes;
Uint32 nullCount = 0;
+ Uint32 noOfCharsets = 0;
+ Uint16 charsets[128];
Uint32 recordLength = 0;
AttributeRecordPtr attrPtr;
c_attributeRecordHash.removeAll();
@@ -4751,6 +4777,31 @@ void Dbdict::handleTabInfo(SimpleProperties::Reader & it,
attrPtr.p->extPrecision = attrDesc.AttributeExtPrecision;
attrPtr.p->extScale = attrDesc.AttributeExtScale;
attrPtr.p->extLength = attrDesc.AttributeExtLength;
+ // charset in upper half of precision
+ unsigned csNumber = (attrPtr.p->extPrecision >> 16);
+ if (csNumber != 0) {
+ CHARSET_INFO* cs = get_charset(csNumber, MYF(0));
+ if (cs == NULL) {
+ parseP->errorCode = CreateTableRef::InvalidCharset;
+ parseP->errorLine = __LINE__;
+ return;
+ }
+ unsigned i = 0;
+ while (i < noOfCharsets) {
+ if (charsets[i] == csNumber)
+ break;
+ i++;
+ }
+ if (i == noOfCharsets) {
+ noOfCharsets++;
+ if (noOfCharsets > sizeof(charsets)/sizeof(charsets[0])) {
+ parseP->errorCode = CreateTableRef::InvalidFormat;
+ parseP->errorLine = __LINE__;
+ return;
+ }
+ charsets[i] = csNumber;
+ }
+ }
/**
* Ignore incoming old-style type and recompute it.
@@ -4814,6 +4865,7 @@ void Dbdict::handleTabInfo(SimpleProperties::Reader & it,
tablePtr.p->noOfPrimkey = keyCount;
tablePtr.p->noOfNullAttr = nullCount;
+ tablePtr.p->noOfCharsets = noOfCharsets;
tablePtr.p->tupKeyLength = keyLength;
tabRequire(recordLength<= MAX_TUPLE_SIZE_IN_WORDS,
@@ -5465,7 +5517,14 @@ void Dbdict::releaseTableObject(Uint32 tableId, bool removeFromHash)
AttributeRecordPtr attrPtr;
c_tableRecordPool.getPtr(tablePtr, tableId);
if (removeFromHash)
+ {
+#ifdef VM_TRACE
+ TableRecordPtr tmp;
+ ndbrequire(c_tableRecordHash.find(tmp, * tablePtr.p));
+#endif
c_tableRecordHash.remove(tablePtr);
+ }
+ tablePtr.p->tabState = TableRecord::NOT_DEFINED;
Uint32 nextAttrRecord = tablePtr.p->firstAttribute;
while (nextAttrRecord != RNIL) {
@@ -6317,6 +6376,8 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
w.add(DictTabInfo::AttributeStoredInd, (Uint32)DictTabInfo::Stored);
// ext type overrides
w.add(DictTabInfo::AttributeExtType, aRec->extType);
+ w.add(DictTabInfo::AttributeExtPrecision, aRec->extPrecision);
+ w.add(DictTabInfo::AttributeExtScale, aRec->extScale);
w.add(DictTabInfo::AttributeExtLength, aRec->extLength);
w.add(DictTabInfo::AttributeEnd, (Uint32)true);
}
@@ -6510,6 +6571,8 @@ Dbdict::execDROP_INDX_REQ(Signal* signal)
jamEntry();
DropIndxReq* const req = (DropIndxReq*)signal->getDataPtrSend();
OpDropIndexPtr opPtr;
+
+ int err = DropIndxRef::BadRequestType;
const Uint32 senderRef = signal->senderBlockRef();
const DropIndxReq::RequestType requestType = req->getRequestType();
if (requestType == DropIndxReq::RT_USER) {
@@ -6524,6 +6587,34 @@ Dbdict::execDROP_INDX_REQ(Signal* signal)
return;
}
// forward initial request plus operation key to all
+ Uint32 indexId= req->getIndexId();
+ Uint32 indexVersion= req->getIndexVersion();
+ TableRecordPtr tmp;
+ int res = getMetaTablePtr(tmp, indexId, indexVersion);
+ switch(res){
+ case MetaData::InvalidArgument:
+ err = DropIndxRef::IndexNotFound;
+ goto error;
+ case MetaData::TableNotFound:
+ case MetaData::InvalidTableVersion:
+ err = DropIndxRef::InvalidIndexVersion;
+ goto error;
+ }
+
+ if (! tmp.p->isIndex()) {
+ jam();
+ err = DropIndxRef::NotAnIndex;
+ goto error;
+ }
+
+ if (tmp.p->indexState == TableRecord::IS_DROPPING){
+ jam();
+ err = DropIndxRef::IndexNotFound;
+ goto error;
+ }
+
+ tmp.p->indexState = TableRecord::IS_DROPPING;
+
req->setOpKey(++c_opRecordSequence);
NodeReceiverGroup rg(DBDICT, c_aliveNodes);
sendSignal(rg, GSN_DROP_INDX_REQ,
@@ -6573,12 +6664,13 @@ Dbdict::execDROP_INDX_REQ(Signal* signal)
return;
}
}
+error:
jam();
// return to sender
OpDropIndex opBad;
opPtr.p = &opBad;
opPtr.p->save(req);
- opPtr.p->m_errorCode = DropIndxRef::BadRequestType;
+ opPtr.p->m_errorCode = (DropIndxRef::ErrorCode)err;
opPtr.p->m_errorLine = __LINE__;
dropIndex_sendReply(signal, opPtr, true);
}
diff --git a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
index 5ddaa67a7d6..a94af7b59c8 100644
--- a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
+++ b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
@@ -455,7 +455,7 @@ public:
Uint16 totalAttrReceived;
Uint16 fragCopyCreation;
Uint16 noOfKeyAttr;
- Uint16 noOfNewAttr;
+ Uint32 noOfNewAttr; // noOfCharsets in upper half
Uint16 noOfAttributeGroups;
Uint16 lh3DistrBits;
Uint16 tableType;
diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
index 3b05a133bbb..8342870d69c 100644
--- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -1444,6 +1444,7 @@ Dblqh::sendAddAttrReq(Signal* signal)
tupreq->notused1 = 0;
tupreq->attrId = attrId;
tupreq->attrDescriptor = entry.attrDescriptor;
+ tupreq->extTypeInfo = entry.extTypeInfo;
sendSignal(fragptr.p->tupBlockref, GSN_TUP_ADD_ATTRREQ,
signal, TupAddAttrReq::SignalLength, JBB);
return;
@@ -7699,6 +7700,7 @@ void Dblqh::accScanConfScanLab(Signal* signal)
ndbrequire(sz == boundAiLength);
EXECUTE_DIRECT(DBTUX, GSN_TUX_BOUND_INFO,
signal, TuxBoundInfo::SignalLength + boundAiLength);
+ jamEntry();
if (req->errorCode != 0) {
jam();
/*
diff --git a/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp b/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp
index 0f3881e9024..2c62adab3e5 100644
--- a/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp
+++ b/ndb/src/kernel/blocks/dbtup/AttributeOffset.hpp
@@ -22,26 +22,59 @@ class AttributeOffset {
private:
static void setOffset(Uint32 & desc, Uint32 offset);
+ static void setCharsetPos(Uint32 & desc, Uint32 offset);
static void setNullFlagPos(Uint32 & desc, Uint32 offset);
static Uint32 getOffset(const Uint32 &);
+ static bool getCharsetFlag(const Uint32 &);
+ static Uint32 getCharsetPos(const Uint32 &);
static Uint32 getNullFlagPos(const Uint32 &);
static Uint32 getNullFlagOffset(const Uint32 &);
static Uint32 getNullFlagBitOffset(const Uint32 &);
static bool isNULL(const Uint32 &, const Uint32 &);
};
-#define AO_ATTRIBUTE_OFFSET_MASK (0xffff)
-#define AO_NULL_FLAG_POS_MASK (0x7ff)
-#define AO_NULL_FLAG_POS_SHIFT (21)
-#define AO_NULL_FLAG_WORD_MASK (31)
-#define AO_NULL_FLAG_OFFSET_SHIFT (5)
+/**
+ * Allow for 4096 attributes, all nullable, and for 128 different
+ * character sets.
+ *
+ * a = Attribute offset - 11 bits 0-10 ( addr word in 8 kb )
+ * c = Has charset flag 1 bits 11-11
+ * s = Charset pointer position - 7 bits 12-18 ( in table descriptor )
+ * f = Null flag offset in word - 5 bits 20-24 ( address 32 bits )
+ * w = Null word offset - 7 bits 25-32 ( f+w addr 4096 attrs )
+ *
+ * 1111111111222222222233
+ * 01234567890123456789012345678901
+ * aaaaaaaaaaacsssssss fffffwwwwwww
+ */
+
+#define AO_ATTRIBUTE_OFFSET_SHIFT 0
+#define AO_ATTRIBUTE_OFFSET_MASK 0x7ff
+
+#define AO_CHARSET_FLAG_SHIFT 11
+#define AO_CHARSET_POS_SHIFT 12
+#define AO_CHARSET_POS_MASK 127
+
+#define AO_NULL_FLAG_POS_MASK 0xfff // f+w
+#define AO_NULL_FLAG_POS_SHIFT 20
+
+#define AO_NULL_FLAG_WORD_MASK 31 // f
+#define AO_NULL_FLAG_OFFSET_SHIFT 5
inline
void
AttributeOffset::setOffset(Uint32 & desc, Uint32 offset){
ASSERT_MAX(offset, AO_ATTRIBUTE_OFFSET_MASK, "AttributeOffset::setOffset");
- desc |= offset;
+ desc |= (offset << AO_ATTRIBUTE_OFFSET_SHIFT);
+}
+
+inline
+void
+AttributeOffset::setCharsetPos(Uint32 & desc, Uint32 offset) {
+ ASSERT_MAX(offset, AO_CHARSET_POS_MASK, "AttributeOffset::setCharsetPos");
+ desc |= (1 << AO_CHARSET_FLAG_SHIFT);
+ desc |= (offset << AO_CHARSET_POS_SHIFT);
}
inline
@@ -55,7 +88,21 @@ inline
Uint32
AttributeOffset::getOffset(const Uint32 & desc)
{
- return desc & AO_ATTRIBUTE_OFFSET_MASK;
+ return (desc >> AO_ATTRIBUTE_OFFSET_SHIFT) & AO_ATTRIBUTE_OFFSET_MASK;
+}
+
+inline
+bool
+AttributeOffset::getCharsetFlag(const Uint32 & desc)
+{
+ return (desc >> AO_CHARSET_FLAG_SHIFT) & 1;
+}
+
+inline
+Uint32
+AttributeOffset::getCharsetPos(const Uint32 & desc)
+{
+ return (desc >> AO_CHARSET_POS_SHIFT) & AO_CHARSET_POS_MASK;
}
inline
diff --git a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
index cb7e35ea73e..0e8dd5fbbe8 100644
--- a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
+++ b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
@@ -502,6 +502,7 @@ struct Fragoperrec {
Uint32 attributeCount;
Uint32 freeNullBit;
Uint32 noOfNewAttrCount;
+ Uint32 charsetIndex;
BlockReference lqhBlockrefFrag;
};
typedef Ptr<Fragoperrec> FragoperrecPtr;
@@ -514,6 +515,7 @@ struct Fragrecord {
Uint32 emptyPrimPage;
Uint32 firstusedOprec;
+ Uint32 lastusedOprec;
Uint32 thFreeFirst;
Uint32 thFreeCopyFirst;
@@ -785,6 +787,7 @@ struct Tablerec {
ReadFunction* readFunctionArray;
UpdateFunction* updateFunctionArray;
+ CHARSET_INFO** charsetArray;
Uint32 readKeyArray;
Uint32 tabDescriptor;
@@ -796,6 +799,7 @@ struct Tablerec {
Uint16 tupheadsize;
Uint16 noOfAttr;
Uint16 noOfKeyAttr;
+ Uint16 noOfCharsets;
Uint16 noOfNewAttr;
Uint16 noOfNullAttr;
Uint16 noOfAttributeGroups;
@@ -1001,17 +1005,20 @@ public:
void tuxGetNode(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32*& node);
/*
- * TUX reads primary table attributes for index keys. Input is
- * attribute ids in AttributeHeader format. Output is pointers to
- * attribute data within tuple or 0 for NULL value.
+ * TUX reads primary table attributes for index keys. Tuple is
+ * specified by location of original tuple and version number. Input
+ * is attribute ids in AttributeHeader format. Output is attribute
+ * data with headers. Uses readAttributes with xfrm option set.
+ * Returns number of words or negative (-terrorCode) on error.
*/
- void tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, Uint32 numAttrs, const Uint32* attrIds, const Uint32** attrData);
+ int tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, const Uint32* attrIds, Uint32 numAttrs, Uint32* dataOut);
/*
* TUX reads primary key without headers into an array of words. Used
- * for md5 summing and when returning keyinfo.
+ * for md5 summing and when returning keyinfo. Returns number of
+ * words or negative (-terrorCode) on error.
*/
- void tuxReadKeys(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* pkSize, Uint32* pkData);
+ int tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* dataOut);
/*
* TUX checks if tuple is visible to scan.
@@ -1365,10 +1372,11 @@ private:
//------------------------------------------------------------------
int readAttributes(Page* const pagePtr,
Uint32 TupHeadOffset,
- Uint32* inBuffer,
+ const Uint32* inBuffer,
Uint32 inBufLen,
Uint32* outBuffer,
- Uint32 TmaxRead);
+ Uint32 TmaxRead,
+ bool xfrmFlag);
//------------------------------------------------------------------
//------------------------------------------------------------------
@@ -1614,6 +1622,20 @@ private:
Uint32 attrDescriptor,
Uint32 attrDes2);
+// *****************************************************************
+// Read char routines optionally (tXfrmFlag) apply strxfrm
+// *****************************************************************
+
+ bool readCharNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
+ bool readCharNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2);
+
//------------------------------------------------------------------
//------------------------------------------------------------------
bool nullFlagCheck(Uint32 attrDes2);
@@ -1632,7 +1654,7 @@ private:
//------------------------------------------------------------------
//------------------------------------------------------------------
- void initOpConnection(Operationrec* const regOperPtr);
+ void initOpConnection(Operationrec* regOperPtr, Fragrecord*);
//------------------------------------------------------------------
//------------------------------------------------------------------
@@ -1909,7 +1931,8 @@ private:
void updatePackedList(Signal* signal, Uint16 ahostIndex);
void setUpDescriptorReferences(Uint32 descriptorReference,
- Tablerec* const regTabPtr);
+ Tablerec* const regTabPtr,
+ const Uint32* offset);
void setUpKeyArray(Tablerec* const regTabPtr);
bool addfragtotab(Tablerec* const regTabPtr, Uint32 fragId, Uint32 fragIndex);
void deleteFragTab(Tablerec* const regTabPtr, Uint32 fragId);
@@ -2098,7 +2121,8 @@ private:
//-----------------------------------------------------------------------------
// Public methods
- Uint32 allocTabDescr(Uint32 noOfAttributes, Uint32 noOfKeyAttr, Uint32 noOfAttributeGroups);
+ Uint32 getTabDescrOffsets(const Tablerec* regTabPtr, Uint32* offset);
+ Uint32 allocTabDescr(const Tablerec* regTabPtr, Uint32* offset);
void freeTabDescr(Uint32 retRef, Uint32 retNo);
Uint32 getTabDescrWord(Uint32 index);
void setTabDescrWord(Uint32 index, Uint32 word);
@@ -2217,6 +2241,7 @@ private:
Uint32 tMaxRead;
Uint32 tOutBufIndex;
Uint32* tTupleHeader;
+ bool tXfrmFlag;
// updateAttributes module
Uint32 tInBufIndex;
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp b/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
index 1ffc5f06754..e9043a8b52d 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupAbort.cpp
@@ -77,7 +77,7 @@ void Dbtup::execTUP_ABORTREQ(Signal* signal)
if (regOperPtr.p->optype == ZREAD) {
ljam();
freeAllAttrBuffers(regOperPtr.p);
- initOpConnection(regOperPtr.p);
+ initOpConnection(regOperPtr.p, 0);
return;
}//if
@@ -134,7 +134,7 @@ void Dbtup::execTUP_ABORTREQ(Signal* signal)
ndbrequire(regOperPtr.p->tupleState == ALREADY_ABORTED);
commitUpdate(signal, regOperPtr.p, regFragPtr.p, regTabPtr.p);
}//if
- initOpConnection(regOperPtr.p);
+ initOpConnection(regOperPtr.p, regFragPtr.p);
}//execTUP_ABORTREQ()
void Dbtup::setTupleStateOnPreviousOps(Uint32 prevOpIndex)
@@ -459,7 +459,7 @@ void Dbtup::tupkeyErrorLab(Signal* signal)
freeAllAttrBuffers(regOperPtr);
abortUpdate(signal, regOperPtr, fragptr.p, tabptr.p);
removeActiveOpList(regOperPtr);
- initOpConnection(regOperPtr);
+ initOpConnection(regOperPtr, fragptr.p);
regOperPtr->transstate = IDLE;
regOperPtr->tupleState = NO_OTHER_OP;
TupKeyRef * const tupKeyRef = (TupKeyRef *)signal->getDataPtrSend();
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp b/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
index fa3667b221e..cbd56c3281f 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
@@ -224,7 +224,8 @@ void Dbtup::removeActiveOpList(Operationrec* const regOperPtr)
/* ---------------------------------------------------------------- */
/* INITIALIZATION OF ONE CONNECTION RECORD TO PREPARE FOR NEXT OP. */
/* ---------------------------------------------------------------- */
-void Dbtup::initOpConnection(Operationrec* const regOperPtr)
+void Dbtup::initOpConnection(Operationrec* regOperPtr,
+ Fragrecord * fragPtrP)
{
Uint32 RinFragList = regOperPtr->inFragList;
regOperPtr->transstate = IDLE;
@@ -244,22 +245,18 @@ void Dbtup::initOpConnection(Operationrec* const regOperPtr)
regOperPtr->inFragList = ZFALSE;
if (tropPrevLinkPtr.i == RNIL) {
ljam();
- FragrecordPtr regFragPtr;
- regFragPtr.i = regOperPtr->fragmentPtr;
- ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord);
- regFragPtr.p->firstusedOprec = tropNextLinkPtr.i;
+ fragPtrP->firstusedOprec = tropNextLinkPtr.i;
} else {
ljam();
ptrCheckGuard(tropPrevLinkPtr, cnoOfOprec, operationrec);
tropPrevLinkPtr.p->nextOprecInList = tropNextLinkPtr.i;
}//if
if (tropNextLinkPtr.i == RNIL) {
- ;
+ fragPtrP->lastusedOprec = tropPrevLinkPtr.i;
} else {
- ljam();
ptrCheckGuard(tropNextLinkPtr, cnoOfOprec, operationrec);
tropNextLinkPtr.p->prevOprecInList = tropPrevLinkPtr.i;
- }//if
+ }
regOperPtr->prevOprecInList = RNIL;
regOperPtr->nextOprecInList = RNIL;
}//if
@@ -336,7 +333,7 @@ void Dbtup::execTUP_COMMITREQ(Signal* signal)
commitUpdate(signal, regOperPtr.p, regFragPtr.p, regTabPtr.p);
removeActiveOpList(regOperPtr.p);
}//if
- initOpConnection(regOperPtr.p);
+ initOpConnection(regOperPtr.p, regFragPtr.p);
}//execTUP_COMMITREQ()
void
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
index 0a47778f7c1..0061ebe812d 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
@@ -319,24 +319,20 @@ void Dbtup::linkOpIntoFragList(OperationrecPtr regOperPtr,
Fragrecord* const regFragPtr)
{
OperationrecPtr sopTmpOperPtr;
-/* ----------------------------------------------------------------- */
-/* LINK THE OPERATION INTO A DOUBLY LINKED LIST ON THE FRAGMENT*/
-/* PUT IT FIRST IN THIS LIST SINCE IT DOESN'T MATTER WHERE IT */
-/* IS PUT. */
-/* ----------------------------------------------------------------- */
+ Uint32 tail = regFragPtr->lastusedOprec;
ndbrequire(regOperPtr.p->inFragList == ZFALSE);
regOperPtr.p->inFragList = ZTRUE;
- regOperPtr.p->prevOprecInList = RNIL;
- sopTmpOperPtr.i = regFragPtr->firstusedOprec;
- regFragPtr->firstusedOprec = regOperPtr.i;
- regOperPtr.p->nextOprecInList = sopTmpOperPtr.i;
- if (sopTmpOperPtr.i == RNIL) {
- return;
+ regOperPtr.p->prevOprecInList = tail;
+ regOperPtr.p->nextOprecInList = RNIL;
+ sopTmpOperPtr.i = tail;
+ if (tail == RNIL) {
+ regFragPtr->firstusedOprec = regOperPtr.i;
} else {
jam();
ptrCheckGuard(sopTmpOperPtr, cnoOfOprec, operationrec);
- sopTmpOperPtr.p->prevOprecInList = regOperPtr.i;
+ sopTmpOperPtr.p->nextOprecInList = regOperPtr.i;
}//if
+ regFragPtr->lastusedOprec = regOperPtr.i;
}//Dbtup::linkOpIntoFragList()
/*
@@ -903,7 +899,8 @@ int Dbtup::handleReadReq(Signal* signal,
&cinBuffer[0],
regOperPtr->attrinbufLen,
dst,
- dstLen);
+ dstLen,
+ false);
if (TnoOfDataRead != (Uint32)-1) {
/* ------------------------------------------------------------------------- */
// We have read all data into coutBuffer. Now send it to the API.
@@ -1274,7 +1271,8 @@ int Dbtup::interpreterStartLab(Signal* signal,
&cinBuffer[5],
RinitReadLen,
&dst[0],
- dstLen);
+ dstLen,
+ false);
if (TnoDataRW != (Uint32)-1) {
RattroutCounter = TnoDataRW;
RinstructionCounter += RinitReadLen;
@@ -1347,7 +1345,8 @@ int Dbtup::interpreterStartLab(Signal* signal,
&cinBuffer[RinstructionCounter],
RfinalRLen,
&dst[RattroutCounter],
- (dstLen - RattroutCounter));
+ (dstLen - RattroutCounter),
+ false);
if (TnoDataRW != (Uint32)-1) {
RattroutCounter += TnoDataRW;
} else {
@@ -1487,7 +1486,8 @@ int Dbtup::interpreterNextLab(Signal* signal,
&theAttrinfo,
(Uint32)1,
&TregMemBuffer[theRegister],
- (Uint32)3);
+ (Uint32)3,
+ false);
if (TnoDataRW == 2) {
/* ------------------------------------------------------------- */
// Two words read means that we get the instruction plus one 32
@@ -1833,7 +1833,8 @@ int Dbtup::interpreterNextLab(Signal* signal,
Int32 TnoDataR = readAttributes(pagePtr,
TupHeadOffset,
&attrId, 1,
- tmpArea, tmpAreaSz);
+ tmpArea, tmpAreaSz,
+ false);
if (TnoDataR == -1) {
jam();
@@ -1929,7 +1930,8 @@ int Dbtup::interpreterNextLab(Signal* signal,
Int32 TnoDataR = readAttributes(pagePtr,
TupHeadOffset,
&attrId, 1,
- tmpArea, tmpAreaSz);
+ tmpArea, tmpAreaSz,
+ false);
if (TnoDataR == -1) {
jam();
@@ -1957,7 +1959,8 @@ int Dbtup::interpreterNextLab(Signal* signal,
Int32 TnoDataR = readAttributes(pagePtr,
TupHeadOffset,
&attrId, 1,
- tmpArea, tmpAreaSz);
+ tmpArea, tmpAreaSz,
+ false);
if (TnoDataR == -1) {
jam();
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
index 1e57f127fbc..d33adcd08e1 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
@@ -963,6 +963,7 @@ void Dbtup::initializeFragrecord()
regFragPtr.p->nextfreefrag = regFragPtr.i + 1;
regFragPtr.p->checkpointVersion = RNIL;
regFragPtr.p->firstusedOprec = RNIL;
+ regFragPtr.p->lastusedOprec = RNIL;
regFragPtr.p->fragStatus = IDLE;
}//for
regFragPtr.i = cnoOfFragrec - 1;
@@ -1067,6 +1068,7 @@ Dbtup::initTab(Tablerec* const regTabPtr)
}//for
regTabPtr->readFunctionArray = NULL;
regTabPtr->updateFunctionArray = NULL;
+ regTabPtr->charsetArray = NULL;
regTabPtr->tabDescriptor = RNIL;
regTabPtr->attributeGroupDescriptor = RNIL;
@@ -1163,7 +1165,7 @@ void Dbtup::execTUPSEIZEREQ(Signal* signal)
return;
}//if
regOperPtr.p->optype = ZREAD;
- initOpConnection(regOperPtr.p);
+ initOpConnection(regOperPtr.p, 0);
regOperPtr.p->userpointer = userPtr;
regOperPtr.p->userblockref = userRef;
signal->theData[0] = regOperPtr.p->userpointer;
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp b/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp
index ec2c63c736e..2dd707ebafc 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp
@@ -112,10 +112,11 @@ Dbtup::tuxGetNode(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32*& no
node = &pagePtr.p->pageWord[pageOffset] + attrDataOffset;
}
-void
-Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, Uint32 numAttrs, const Uint32* attrIds, const Uint32** attrData)
+int
+Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tupVersion, const Uint32* attrIds, Uint32 numAttrs, Uint32* dataOut)
{
ljamEntry();
+ // use own variables instead of globals
FragrecordPtr fragPtr;
fragPtr.i = fragPtrI;
ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
@@ -134,6 +135,7 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tu
while (true) {
ptrCheckGuard(opPtr, cnoOfOprec, operationrec);
if (opPtr.p->realPageIdC != RNIL) {
+ // update page and offset
pagePtr.i = opPtr.p->realPageIdC;
pageOffset = opPtr.p->pageOffsetC;
ptrCheckGuard(pagePtr, cnoOfPage, page);
@@ -147,33 +149,34 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tu
ndbrequire(++loopGuard < (1 << ZTUP_VERSION_BITS));
}
}
- const Uint32 tabDescriptor = tablePtr.p->tabDescriptor;
- const Uint32* tupleHeader = &pagePtr.p->pageWord[pageOffset];
- for (Uint32 i = 0; i < numAttrs; i++) {
- AttributeHeader ah(attrIds[i]);
- const Uint32 attrId = ah.getAttributeId();
- const Uint32 index = tabDescriptor + (attrId << ZAD_LOG_SIZE);
- const Uint32 desc1 = tableDescriptor[index].tabDescr;
- const Uint32 desc2 = tableDescriptor[index + 1].tabDescr;
- if (AttributeDescriptor::getNullable(desc1)) {
- Uint32 offset = AttributeOffset::getNullFlagOffset(desc2);
- ndbrequire(offset < tablePtr.p->tupNullWords);
- offset += tablePtr.p->tupNullIndex;
- ndbrequire(offset < tablePtr.p->tupheadsize);
- if (AttributeOffset::isNULL(tupleHeader[offset], desc2)) {
- ljam();
- attrData[i] = 0;
- continue;
- }
- }
- attrData[i] = tupleHeader + AttributeOffset::getOffset(desc2);
+ // read key attributes from found tuple version
+ // save globals
+ TablerecPtr tabptr_old = tabptr;
+ FragrecordPtr fragptr_old = fragptr;
+ OperationrecPtr operPtr_old = operPtr;
+ // new globals
+ tabptr = tablePtr;
+ fragptr = fragPtr;
+ operPtr.i = RNIL;
+ operPtr.p = NULL;
+ // do it
+ int ret = readAttributes(pagePtr.p, pageOffset, attrIds, numAttrs, dataOut, ZNIL, true);
+ // restore globals
+ tabptr = tabptr_old;
+ fragptr = fragptr_old;
+ operPtr = operPtr_old;
+ // done
+ if (ret == (Uint32)-1) {
+ ret = terrorCode ? (-(int)terrorCode) : -1;
}
+ return ret;
}
-void
-Dbtup::tuxReadKeys(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* pkSize, Uint32* pkData)
+int
+Dbtup::tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* dataOut)
{
ljamEntry();
+ // use own variables instead of globals
FragrecordPtr fragPtr;
fragPtr.i = fragPtrI;
ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
@@ -184,25 +187,45 @@ Dbtup::tuxReadKeys(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* pk
pagePtr.i = pageId;
ptrCheckGuard(pagePtr, cnoOfPage, page);
const Uint32 tabDescriptor = tablePtr.p->tabDescriptor;
- const Uint32 numAttrs = tablePtr.p->noOfKeyAttr;
const Uint32* attrIds = &tableDescriptor[tablePtr.p->readKeyArray].tabDescr;
- const Uint32* tupleHeader = &pagePtr.p->pageWord[pageOffset];
- Uint32 size = 0;
- for (Uint32 i = 0; i < numAttrs; i++) {
- AttributeHeader ah(attrIds[i]);
- const Uint32 attrId = ah.getAttributeId();
- const Uint32 index = tabDescriptor + (attrId << ZAD_LOG_SIZE);
- const Uint32 desc1 = tableDescriptor[index].tabDescr;
- const Uint32 desc2 = tableDescriptor[index + 1].tabDescr;
- ndbrequire(! AttributeDescriptor::getNullable(desc1));
- const Uint32 attrSize = AttributeDescriptor::getSizeInWords(desc1);
- const Uint32* attrData = tupleHeader + AttributeOffset::getOffset(desc2);
- for (Uint32 j = 0; j < attrSize; j++) {
- pkData[size + j] = attrData[j];
+ const Uint32 numAttrs = tablePtr.p->noOfKeyAttr;
+ // read pk attributes from original tuple
+ // save globals
+ TablerecPtr tabptr_old = tabptr;
+ FragrecordPtr fragptr_old = fragptr;
+ OperationrecPtr operPtr_old = operPtr;
+ // new globals
+ tabptr = tablePtr;
+ fragptr = fragPtr;
+ operPtr.i = RNIL;
+ operPtr.p = NULL;
+ // do it
+ int ret = readAttributes(pagePtr.p, pageOffset, attrIds, numAttrs, dataOut, ZNIL, true);
+ // restore globals
+ tabptr = tabptr_old;
+ fragptr = fragptr_old;
+ operPtr = operPtr_old;
+ // done
+ if (ret != (Uint32)-1) {
+ // remove headers
+ Uint32 n = 0;
+ Uint32 i = 0;
+ while (n < numAttrs) {
+ const AttributeHeader ah(dataOut[i]);
+ Uint32 size = ah.getDataSize();
+ ndbrequire(size != 0);
+ for (Uint32 j = 0; j < size; j++) {
+ dataOut[i + j - n] = dataOut[i + j + 1];
+ }
+ n += 1;
+ i += 1 + size;
}
- size += attrSize;
+ ndbrequire(i == ret);
+ ret -= numAttrs;
+ } else {
+ ret = terrorCode ? (-(int)terrorCode) : -1;
}
- *pkSize = size;
+ return ret;
}
bool
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
index 09889a51fa3..efea312b865 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
@@ -20,12 +20,14 @@
#include <RefConvert.hpp>
#include <ndb_limits.h>
#include <pc.hpp>
+#include <signaldata/TupFrag.hpp>
#include <signaldata/FsConf.hpp>
#include <signaldata/FsRemoveReq.hpp>
#include <signaldata/DropTab.hpp>
#include <signaldata/AlterTab.hpp>
#include <AttributeDescriptor.hpp>
#include "AttributeOffset.hpp"
+#include <my_sys.h>
#define ljam() { jamLine(20000 + __LINE__); }
#define ljamEntry() { jamEntryLine(20000 + __LINE__); }
@@ -52,7 +54,10 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
/* Uint32 schemaVersion = signal->theData[8];*/
Uint32 noOfKeyAttr = signal->theData[9];
- Uint32 noOfNewAttr = signal->theData[10];
+ Uint32 noOfNewAttr = (signal->theData[10] & 0xFFFF);
+ /* DICT sends number of character sets in upper half */
+ Uint32 noOfCharsets = (signal->theData[10] >> 16);
+
Uint32 checksumIndicator = signal->theData[11];
Uint32 noOfAttributeGroups = signal->theData[12];
Uint32 globalCheckpointIdIndicator = signal->theData[13];
@@ -75,6 +80,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
fragOperPtr.p->attributeCount = noOfAttributes;
fragOperPtr.p->freeNullBit = noOfNullAttr;
fragOperPtr.p->noOfNewAttrCount = noOfNewAttr;
+ fragOperPtr.p->charsetIndex = 0;
ndbrequire(reqinfo == ZADDFRAG);
@@ -156,6 +162,7 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
regTabPtr.p->tupheadsize = regTabPtr.p->tupGCPIndex;
regTabPtr.p->noOfKeyAttr = noOfKeyAttr;
+ regTabPtr.p->noOfCharsets = noOfCharsets;
regTabPtr.p->noOfAttr = noOfAttributes;
regTabPtr.p->noOfNewAttr = noOfNewAttr;
regTabPtr.p->noOfNullAttr = noOfNullAttr;
@@ -163,13 +170,14 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
regTabPtr.p->notNullAttributeMask.clear();
- Uint32 tableDescriptorRef = allocTabDescr(noOfAttributes, noOfKeyAttr, noOfAttributeGroups);
+ Uint32 offset[10];
+ Uint32 tableDescriptorRef = allocTabDescr(regTabPtr.p, offset);
if (tableDescriptorRef == RNIL) {
ljam();
fragrefuse4Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId);
return;
}//if
- setUpDescriptorReferences(tableDescriptorRef, regTabPtr.p);
+ setUpDescriptorReferences(tableDescriptorRef, regTabPtr.p, offset);
} else {
ljam();
fragOperPtr.p->definingFragment = false;
@@ -251,6 +259,9 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
ptrCheckGuard(fragOperPtr, cnoOfFragoprec, fragoperrec);
Uint32 attrId = signal->theData[2];
Uint32 attrDescriptor = signal->theData[3];
+ // DICT sends extended type (ignored) and charset number
+ Uint32 extType = (signal->theData[4] & 0xFF);
+ Uint32 csNumber = (signal->theData[4] >> 16);
regTabPtr.i = fragOperPtr.p->tableidFrag;
ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec);
@@ -304,6 +315,29 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
} else {
ndbrequire(false);
}//if
+ if (csNumber != 0) {
+ CHARSET_INFO* cs = get_charset(csNumber, MYF(0));
+ if (cs == NULL) {
+ ljam();
+ terrorCode = TupAddAttrRef::InvalidCharset;
+ addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
+ return;
+ }
+ Uint32 i = 0;
+ while (i < fragOperPtr.p->charsetIndex) {
+ ljam();
+ if (regTabPtr.p->charsetArray[i] == cs)
+ break;
+ i++;
+ }
+ if (i == fragOperPtr.p->charsetIndex) {
+ ljam();
+ fragOperPtr.p->charsetIndex++;
+ }
+ ndbrequire(i < regTabPtr.p->noOfCharsets);
+ regTabPtr.p->charsetArray[i] = cs;
+ AttributeOffset::setCharsetPos(attrDes2, i);
+ }
setTabDescrWord(firstTabDesIndex + 1, attrDes2);
if (regTabPtr.p->tupheadsize > MAX_TUPLE_SIZE_IN_WORDS) {
@@ -340,20 +374,28 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
return;
}//Dbtup::execTUP_ADD_ATTRREQ()
+/*
+ * Descriptor has these parts:
+ *
+ * 0 readFunctionArray ( one for each attribute )
+ * 1 updateFunctionArray ( ditto )
+ * 2 charsetArray ( pointers to distinct CHARSET_INFO )
+ * 3 readKeyArray ( attribute ids of keys )
+ * 4 attributeGroupDescriptor ( currently size 1 but unused )
+ * 5 tabDescriptor ( attribute descriptors, each ZAD_SIZE )
+ */
+
void Dbtup::setUpDescriptorReferences(Uint32 descriptorReference,
- Tablerec* const regTabPtr)
+ Tablerec* const regTabPtr,
+ const Uint32* offset)
{
- Uint32 noOfAttributes = regTabPtr->noOfAttr;
- descriptorReference += ZTD_SIZE;
- ReadFunction * tmp = (ReadFunction*)&tableDescriptor[descriptorReference].tabDescr;
- regTabPtr->readFunctionArray = tmp;
- regTabPtr->updateFunctionArray = (UpdateFunction*)(tmp + noOfAttributes);
-
- TableDescriptor * start = &tableDescriptor[descriptorReference];
- TableDescriptor * end = (TableDescriptor*)(tmp + 2 * noOfAttributes);
- regTabPtr->readKeyArray = descriptorReference + (end - start);
- regTabPtr->attributeGroupDescriptor = regTabPtr->readKeyArray + regTabPtr->noOfKeyAttr;
- regTabPtr->tabDescriptor = regTabPtr->attributeGroupDescriptor + regTabPtr->noOfAttributeGroups;
+ Uint32* desc = &tableDescriptor[descriptorReference].tabDescr;
+ regTabPtr->readFunctionArray = (ReadFunction*)(desc + offset[0]);
+ regTabPtr->updateFunctionArray = (UpdateFunction*)(desc + offset[1]);
+ regTabPtr->charsetArray = (CHARSET_INFO**)(desc + offset[2]);
+ regTabPtr->readKeyArray = descriptorReference + offset[3];
+ regTabPtr->attributeGroupDescriptor = descriptorReference + offset[4];
+ regTabPtr->tabDescriptor = descriptorReference + offset[5];
}//Dbtup::setUpDescriptorReferences()
Uint32
@@ -491,14 +533,18 @@ void Dbtup::releaseTabDescr(Tablerec* const regTabPtr)
Uint32 descriptor = regTabPtr->readKeyArray;
if (descriptor != RNIL) {
ljam();
+ Uint32 offset[10];
+ getTabDescrOffsets(regTabPtr, offset);
+
regTabPtr->tabDescriptor = RNIL;
regTabPtr->readKeyArray = RNIL;
regTabPtr->readFunctionArray = NULL;
regTabPtr->updateFunctionArray = NULL;
+ regTabPtr->charsetArray = NULL;
regTabPtr->attributeGroupDescriptor= RNIL;
- Uint32 sizeFunctionArrays = 2 * (regTabPtr->noOfAttr * sizeOfReadFunction());
- descriptor -= (sizeFunctionArrays + ZTD_SIZE);
+ // move to start of descriptor
+ descriptor -= offset[3];
Uint32 retNo = getTabDescrWord(descriptor + ZTD_DATASIZE);
ndbrequire(getTabDescrWord(descriptor + ZTD_HEADER) == ZTD_TYPE_NORMAL);
ndbrequire(retNo == getTabDescrWord((descriptor + retNo) - ZTD_TR_SIZE));
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
index cc47ef7e78f..a4e7cb47249 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
@@ -35,6 +35,7 @@ Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr)
for (Uint32 i = 0; i < regTabPtr->noOfAttr; i++) {
Uint32 attrDescriptorStart = startDescriptor + (i << ZAD_LOG_SIZE);
Uint32 attrDescriptor = tableDescriptor[attrDescriptorStart].tabDescr;
+ Uint32 attrOffset = tableDescriptor[attrDescriptorStart + 1].tabDescr;
if (!AttributeDescriptor::getDynamic(attrDescriptor)) {
if ((AttributeDescriptor::getArrayType(attrDescriptor) == ZNON_ARRAY) ||
(AttributeDescriptor::getArrayType(attrDescriptor) == ZFIXED_ARRAY)) {
@@ -54,6 +55,11 @@ Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr)
} else {
ndbrequire(false);
}//if
+ // replace read function of char attribute
+ if (AttributeOffset::getCharsetFlag(attrOffset)) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readCharNotNULL;
+ }
} else {
if (AttributeDescriptor::getSizeInWords(attrDescriptor) == 1) {
ljam();
@@ -72,6 +78,11 @@ Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr)
regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHZeroWordNULLable;
regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable;
}//if
+ // replace read function of char attribute
+ if (AttributeOffset::getCharsetFlag(attrOffset)) {
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readCharNULLable;
+ }
}//if
} else if (AttributeDescriptor::getArrayType(attrDescriptor) == ZVAR_ARRAY) {
if (!AttributeDescriptor::getNullable(attrDescriptor)) {
@@ -146,10 +157,11 @@ Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr)
/* ---------------------------------------------------------------- */
int Dbtup::readAttributes(Page* const pagePtr,
Uint32 tupHeadOffset,
- Uint32* inBuffer,
+ const Uint32* inBuffer,
Uint32 inBufLen,
Uint32* outBuffer,
- Uint32 maxRead)
+ Uint32 maxRead,
+ bool xfrmFlag)
{
Tablerec* const regTabPtr = tabptr.p;
Uint32 numAttributes = regTabPtr->noOfAttr;
@@ -162,6 +174,7 @@ int Dbtup::readAttributes(Page* const pagePtr,
tCheckOffset = regTabPtr->tupheadsize;
tMaxRead = maxRead;
tTupleHeader = &pagePtr->pageWord[tupHeadOffset];
+ tXfrmFlag = xfrmFlag;
ndbrequire(tupHeadOffset + tCheckOffset <= ZWORDS_ON_PAGE);
while (inBufIndex < inBufLen) {
@@ -542,6 +555,74 @@ Dbtup::readDynSmallVarSize(Uint32* outBuffer,
return false;
}//Dbtup::readDynSmallVarSize()
+
+bool
+Dbtup::readCharNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Uint32 indexBuf = tOutBufIndex;
+ Uint32 readOffset = AttributeOffset::getOffset(attrDes2);
+ Uint32 attrNoOfWords = AttributeDescriptor::getSizeInWords(attrDescriptor);
+ Uint32 newIndexBuf = indexBuf + attrNoOfWords;
+ Uint32 maxRead = tMaxRead;
+
+ ndbrequire((readOffset + attrNoOfWords - 1) < tCheckOffset);
+ if (newIndexBuf <= maxRead) {
+ ljam();
+ ahOut->setDataSize(attrNoOfWords);
+ if (! tXfrmFlag) {
+ MEMCOPY_NO_WORDS(&outBuffer[indexBuf],
+ &tTupleHeader[readOffset],
+ attrNoOfWords);
+ } else {
+ ljam();
+ Tablerec* regTabPtr = tabptr.p;
+ Uint32 i = AttributeOffset::getCharsetPos(attrDes2);
+ ndbrequire(i < tabptr.p->noOfCharsets);
+ // not const in MySQL
+ CHARSET_INFO* cs = tabptr.p->charsetArray[i];
+ // XXX should strip Uint32 null padding
+ const unsigned nBytes = attrNoOfWords << 2;
+ unsigned n =
+ (*cs->coll->strnxfrm)(cs,
+ (uchar*)&outBuffer[indexBuf],
+ nBytes,
+ (const uchar*)&tTupleHeader[readOffset],
+ nBytes);
+ // pad with ascii spaces
+ while (n < nBytes)
+ ((uchar*)&outBuffer[indexBuf])[n++] = 0x20;
+ }
+ tOutBufIndex = newIndexBuf;
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
+ return false;
+ }
+}
+
+bool
+Dbtup::readCharNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ if (!nullFlagCheck(attrDes2)) {
+ ljam();
+ return readCharNotNULL(outBuffer,
+ ahOut,
+ attrDescriptor,
+ attrDes2);
+ } else {
+ ljam();
+ ahOut->setNULL();
+ return true;
+ }
+}
+
/* ---------------------------------------------------------------------- */
/* THIS ROUTINE IS USED TO UPDATE A NUMBER OF ATTRIBUTES. IT IS */
/* USED BY THE INSERT ROUTINE, THE UPDATE ROUTINE AND IT CAN BE */
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp b/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp
index d31ab43f108..642ba270760 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupTabDesMan.cpp
@@ -31,12 +31,33 @@
/* memory attached to fragments (could be allocated per table */
/* instead. Performs its task by a buddy algorithm. */
/* **************************************************************** */
-Uint32 Dbtup::allocTabDescr(Uint32 noOfAttributes, Uint32 noOfKeyAttr, Uint32 noOfAttributeGroups)
+
+Uint32
+Dbtup::getTabDescrOffsets(const Tablerec* regTabPtr, Uint32* offset)
+{
+ // belongs to configure.in
+ unsigned sizeOfPointer = sizeof(CHARSET_INFO*);
+ ndbrequire((sizeOfPointer & 0x3) == 0);
+ sizeOfPointer = (sizeOfPointer >> 2);
+ // do in layout order and return offsets (see DbtupMeta.cpp)
+ Uint32 allocSize = 0;
+ // magically aligned to 8 bytes
+ offset[0] = allocSize += ZTD_SIZE;
+ offset[1] = allocSize += regTabPtr->noOfAttr * sizeOfReadFunction();
+ offset[2] = allocSize += regTabPtr->noOfAttr * sizeOfReadFunction();
+ offset[3] = allocSize += regTabPtr->noOfCharsets * sizeOfPointer;
+ offset[4] = allocSize += regTabPtr->noOfKeyAttr;
+ offset[5] = allocSize += regTabPtr->noOfAttributeGroups;
+ allocSize += regTabPtr->noOfAttr * ZAD_SIZE;
+ allocSize += ZTD_TRAILER_SIZE;
+ // return number of words
+ return allocSize;
+}
+
+Uint32 Dbtup::allocTabDescr(const Tablerec* regTabPtr, Uint32* offset)
{
Uint32 reference = RNIL;
- Uint32 allocSize = (ZTD_SIZE + ZTD_TRAILER_SIZE) + (noOfAttributes * ZAD_SIZE);
- allocSize += noOfAttributeGroups;
- allocSize += ((2 * noOfAttributes * sizeOfReadFunction()) + noOfKeyAttr);
+ Uint32 allocSize = getTabDescrOffsets(regTabPtr, offset);
/* ---------------------------------------------------------------- */
/* ALWAYS ALLOCATE A MULTIPLE OF 16 BYTES */
/* ---------------------------------------------------------------- */
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp b/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
index a93ff4566e7..c0b49364ee6 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
@@ -751,7 +751,8 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr,
&tableDescriptor[regTabPtr->readKeyArray].tabDescr,
regTabPtr->noOfKeyAttr,
keyBuffer,
- ZATTR_BUFFER_SIZE);
+ ZATTR_BUFFER_SIZE,
+ true);
ndbrequire(noPrimKey != (Uint32)-1);
Uint32 numAttrsToRead;
@@ -792,7 +793,8 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr,
&readBuffer[0],
numAttrsToRead,
mainBuffer,
- ZATTR_BUFFER_SIZE);
+ ZATTR_BUFFER_SIZE,
+ true);
ndbrequire(noMainWords != (Uint32)-1);
} else {
ljam();
@@ -816,7 +818,8 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr,
&readBuffer[0],
numAttrsToRead,
copyBuffer,
- ZATTR_BUFFER_SIZE);
+ ZATTR_BUFFER_SIZE,
+ true);
ndbrequire(noCopyWords != (Uint32)-1);
if ((noMainWords == noCopyWords) &&
diff --git a/ndb/src/kernel/blocks/dbtux/Dbtux.hpp b/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
index 36ac20611bb..8dca52cec04 100644
--- a/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
+++ b/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
@@ -163,11 +163,6 @@ private:
static const unsigned AttributeHeaderSize = 1;
/*
- * Array of pointers to TUP table attributes. Always read-on|y.
- */
- typedef const Uint32** TableData;
-
- /*
* Logical tuple address, "local key". Identifies table tuples.
*/
typedef Uint32 TupAddr;
@@ -330,11 +325,15 @@ private:
/*
* Attribute metadata. Size must be multiple of word size.
+ *
+ * Prefix comparison of char data must use strxfrm and binary
+ * comparison. The charset is currently unused.
*/
struct DescAttr {
Uint32 m_attrDesc; // standard AttributeDescriptor
Uint16 m_primaryAttrId;
- Uint16 m_typeId;
+ unsigned m_typeId : 6;
+ unsigned m_charset : 10;
};
static const unsigned DescAttrSize = sizeof(DescAttr) >> 2;
@@ -553,9 +552,9 @@ private:
void execREAD_CONFIG_REQ(Signal* signal);
// utils
void setKeyAttrs(const Frag& frag);
- void readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, TableData keyData);
- void readTablePk(const Frag& frag, TreeEnt ent, unsigned& pkSize, Data pkData);
- void copyAttrs(const Frag& frag, TableData data1, Data data2, unsigned maxlen2 = MaxAttrDataSize);
+ void readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, Data keyData);
+ void readTablePk(const Frag& frag, TreeEnt ent, Data pkData, unsigned& pkSize);
+ void copyAttrs(const Frag& frag, ConstData data1, Data data2, unsigned maxlen2 = MaxAttrDataSize);
/*
* DbtuxMeta.cpp
@@ -622,17 +621,15 @@ private:
/*
* DbtuxSearch.cpp
*/
- void searchToAdd(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos);
- void searchToRemove(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos);
+ void searchToAdd(Signal* signal, Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos);
+ void searchToRemove(Signal* signal, Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos);
void searchToScan(Signal* signal, Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos);
/*
* DbtuxCmp.cpp
*/
- int cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, ConstData entryData, unsigned maxlen = MaxAttrDataSize);
- int cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, TableData entryKey);
+ int cmpSearchKey(const Frag& frag, unsigned& start, ConstData searchKey, ConstData entryData, unsigned maxlen = MaxAttrDataSize);
int cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, ConstData entryData, unsigned maxlen = MaxAttrDataSize);
- int cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, TableData entryKey);
/*
* DbtuxDebug.cpp
@@ -679,17 +676,27 @@ private:
Uint32 c_typeOfStart;
/*
- * Array of index key attribute ids in AttributeHeader format.
- * Includes fixed attribute sizes. This is global data set at
- * operation start and is not passed as a parameter.
+ * Global data set at operation start. Unpacked from index metadata.
+ * Not passed as parameter to methods. Invalid across timeslices.
+ *
+ * TODO inline all into index metadata
*/
+
+ // index key attr ids with sizes in AttributeHeader format
Data c_keyAttrs;
- // buffer for search key data as pointers to TUP storage
- TableData c_searchKey;
+ // pointers to index key comparison functions
+ NdbSqlUtil::Cmp** c_sqlCmp;
+
+ /*
+ * Other buffers used during the operation.
+ */
+
+ // buffer for search key data with headers
+ Data c_searchKey;
- // buffer for current entry key data as pointers to TUP storage
- TableData c_entryKey;
+ // buffer for current entry key data with headers
+ Data c_entryKey;
// buffer for scan bounds and keyinfo (primary key)
Data c_dataBuffer;
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp
index debb5252386..549720cc17c 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp
@@ -18,21 +18,24 @@
#include "Dbtux.hpp"
/*
- * Search key vs node prefix.
+ * Search key vs node prefix or entry
*
- * The comparison starts at given attribute position (in fact 0). The
- * position is updated by number of equal initial attributes found. The
- * prefix may be partial in which case CmpUnknown may be returned.
+ * The comparison starts at given attribute position. The position is
+ * updated by number of equal initial attributes found. The entry data
+ * may be partial in which case CmpUnknown may be returned.
*/
int
-Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, ConstData entryData, unsigned maxlen)
+Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, ConstData searchKey, ConstData entryData, unsigned maxlen)
{
const unsigned numAttrs = frag.m_numAttrs;
const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff);
// number of words of attribute data left
unsigned len2 = maxlen;
- // skip to right position in search key
- searchKey += start;
+ // skip to right position in search key only
+ for (unsigned i = 0; i < start; i++) {
+ jam();
+ searchKey += AttributeHeaderSize + searchKey.ah().getDataSize();
+ }
int ret = 0;
while (start < numAttrs) {
if (len2 <= AttributeHeaderSize) {
@@ -41,22 +44,21 @@ Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, Cons
break;
}
len2 -= AttributeHeaderSize;
- if (*searchKey != 0) {
+ if (! searchKey.ah().isNULL()) {
if (! entryData.ah().isNULL()) {
jam();
// current attribute
const DescAttr& descAttr = descEnt.m_descAttr[start];
- const NdbSqlUtil::Type& type = NdbSqlUtil::getType(descAttr.m_typeId);
- ndbassert(type.m_typeId != NdbSqlUtil::Type::Undefined);
// full data size
const unsigned size1 = AttributeDescriptor::getSizeInWords(descAttr.m_attrDesc);
ndbrequire(size1 != 0 && size1 == entryData.ah().getDataSize());
const unsigned size2 = min(size1, len2);
len2 -= size2;
// compare
- const Uint32* const p1 = *searchKey;
+ NdbSqlUtil::Cmp* const cmp = c_sqlCmp[start];
+ const Uint32* const p1 = &searchKey[AttributeHeaderSize];
const Uint32* const p2 = &entryData[AttributeHeaderSize];
- ret = (*type.m_cmp)(p1, p2, size1, size2);
+ ret = (*cmp)(0, p1, p2, size1, size2);
if (ret != 0) {
jam();
break;
@@ -75,7 +77,7 @@ Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, Cons
break;
}
}
- searchKey += 1;
+ searchKey += AttributeHeaderSize + searchKey.ah().getDataSize();
entryData += AttributeHeaderSize + entryData.ah().getDataSize();
start++;
}
@@ -83,60 +85,7 @@ Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, Cons
}
/*
- * Search key vs tree entry.
- *
- * Start position is updated as in previous routine.
- */
-int
-Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, TableData searchKey, TableData entryKey)
-{
- const unsigned numAttrs = frag.m_numAttrs;
- const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff);
- // skip to right position
- searchKey += start;
- entryKey += start;
- int ret = 0;
- while (start < numAttrs) {
- if (*searchKey != 0) {
- if (*entryKey != 0) {
- jam();
- // current attribute
- const DescAttr& descAttr = descEnt.m_descAttr[start];
- const NdbSqlUtil::Type& type = NdbSqlUtil::getType(descAttr.m_typeId);
- ndbassert(type.m_typeId != NdbSqlUtil::Type::Undefined);
- // full data size
- const unsigned size1 = AttributeDescriptor::getSizeInWords(descAttr.m_attrDesc);
- // compare
- const Uint32* const p1 = *searchKey;
- const Uint32* const p2 = *entryKey;
- ret = (*type.m_cmp)(p1, p2, size1, size1);
- if (ret != 0) {
- jam();
- break;
- }
- } else {
- jam();
- // not NULL > NULL
- ret = +1;
- break;
- }
- } else {
- if (*entryKey != 0) {
- jam();
- // NULL < not NULL
- ret = -1;
- break;
- }
- }
- searchKey += 1;
- entryKey += 1;
- start++;
- }
- return ret;
-}
-
-/*
- * Scan bound vs node prefix.
+ * Scan bound vs node prefix or entry.
*
* Compare lower or upper bound and index attribute data. The attribute
* data may be partial in which case CmpUnknown may be returned.
@@ -183,9 +132,8 @@ Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigne
jam();
// current attribute
const unsigned index = boundInfo.ah().getAttributeId();
+ ndbrequire(index < frag.m_numAttrs);
const DescAttr& descAttr = descEnt.m_descAttr[index];
- const NdbSqlUtil::Type& type = NdbSqlUtil::getType(descAttr.m_typeId);
- ndbassert(type.m_typeId != NdbSqlUtil::Type::Undefined);
ndbrequire(entryData.ah().getAttributeId() == descAttr.m_primaryAttrId);
// full data size
const unsigned size1 = boundInfo.ah().getDataSize();
@@ -193,9 +141,10 @@ Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigne
const unsigned size2 = min(size1, len2);
len2 -= size2;
// compare
+ NdbSqlUtil::Cmp* const cmp = c_sqlCmp[index];
const Uint32* const p1 = &boundInfo[AttributeHeaderSize];
const Uint32* const p2 = &entryData[AttributeHeaderSize];
- int ret = (*type.m_cmp)(p1, p2, size1, size2);
+ int ret = (*cmp)(0, p1, p2, size1, size2);
if (ret != 0) {
jam();
return ret;
@@ -244,72 +193,3 @@ Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigne
return +1;
}
}
-
-/*
- * Scan bound vs tree entry.
- */
-int
-Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, TableData entryKey)
-{
- const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff);
- // direction 0-lower 1-upper
- ndbrequire(dir <= 1);
- // initialize type to equality
- unsigned type = 4;
- while (boundCount != 0) {
- // get and skip bound type
- type = boundInfo[0];
- boundInfo += 1;
- if (! boundInfo.ah().isNULL()) {
- if (*entryKey != 0) {
- jam();
- // current attribute
- const unsigned index = boundInfo.ah().getAttributeId();
- const DescAttr& descAttr = descEnt.m_descAttr[index];
- const NdbSqlUtil::Type& type = NdbSqlUtil::getType(descAttr.m_typeId);
- ndbassert(type.m_typeId != NdbSqlUtil::Type::Undefined);
- // full data size
- const unsigned size1 = AttributeDescriptor::getSizeInWords(descAttr.m_attrDesc);
- // compare
- const Uint32* const p1 = &boundInfo[AttributeHeaderSize];
- const Uint32* const p2 = *entryKey;
- int ret = (*type.m_cmp)(p1, p2, size1, size1);
- if (ret != 0) {
- jam();
- return ret;
- }
- } else {
- jam();
- // not NULL > NULL
- return +1;
- }
- } else {
- jam();
- if (*entryKey != 0) {
- jam();
- // NULL < not NULL
- return -1;
- }
- }
- boundInfo += AttributeHeaderSize + boundInfo.ah().getDataSize();
- entryKey += 1;
- boundCount -= 1;
- }
- if (dir == 0) {
- // lower bound
- jam();
- if (type == 1) {
- jam();
- return +1;
- }
- return -1;
- } else {
- // upper bound
- jam();
- if (type == 3) {
- jam();
- return -1;
- }
- return +1;
- }
-}
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp
index 11f4f12b7f6..8d31d2c6a55 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp
@@ -207,14 +207,10 @@ Dbtux::printNode(Signal* signal, Frag& frag, NdbOut& out, TupLoc loc, PrintPar&
}
// check ordering within node
for (unsigned j = 1; j < node.getOccup(); j++) {
- unsigned start = 0;
const TreeEnt ent1 = node.getEnt(j - 1);
const TreeEnt ent2 = node.getEnt(j);
- if (j == 1) {
- readKeyAttrs(frag, ent1, start, c_searchKey);
- } else {
- memcpy(c_searchKey, c_entryKey, frag.m_numAttrs << 2);
- }
+ unsigned start = 0;
+ readKeyAttrs(frag, ent1, start, c_searchKey);
readKeyAttrs(frag, ent2, start, c_entryKey);
int ret = cmpSearchKey(frag, start, c_searchKey, c_entryKey);
if (ret == 0)
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
index f6f1610c8c1..39cd8e25184 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
@@ -16,8 +16,6 @@
#define DBTUX_GEN_CPP
#include "Dbtux.hpp"
-#include <signaldata/TuxContinueB.hpp>
-#include <signaldata/TuxContinueB.hpp>
Dbtux::Dbtux(const Configuration& conf) :
SimulatedBlock(DBTUX, conf),
@@ -202,8 +200,9 @@ Dbtux::execREAD_CONFIG_REQ(Signal* signal)
}
// allocate buffers
c_keyAttrs = (Uint32*)allocRecord("c_keyAttrs", sizeof(Uint32), MaxIndexAttributes);
- c_searchKey = (TableData)allocRecord("c_searchKey", sizeof(Uint32*), MaxIndexAttributes);
- c_entryKey = (TableData)allocRecord("c_entryKey", sizeof(Uint32*), MaxIndexAttributes);
+ c_sqlCmp = (NdbSqlUtil::Cmp**)allocRecord("c_sqlCmp", sizeof(NdbSqlUtil::Cmp*), MaxIndexAttributes);
+ c_searchKey = (Uint32*)allocRecord("c_searchKey", sizeof(Uint32), MaxAttrDataSize);
+ c_entryKey = (Uint32*)allocRecord("c_entryKey", sizeof(Uint32), MaxAttrDataSize);
c_dataBuffer = (Uint32*)allocRecord("c_dataBuffer", sizeof(Uint64), (MaxAttrDataSize + 1) >> 1);
// ack
ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
@@ -218,7 +217,8 @@ Dbtux::execREAD_CONFIG_REQ(Signal* signal)
void
Dbtux::setKeyAttrs(const Frag& frag)
{
- Data keyAttrs = c_keyAttrs; // global
+ Data keyAttrs = c_keyAttrs; // global
+ NdbSqlUtil::Cmp** sqlCmp = c_sqlCmp; // global
const unsigned numAttrs = frag.m_numAttrs;
const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff);
for (unsigned i = 0; i < numAttrs; i++) {
@@ -227,75 +227,71 @@ Dbtux::setKeyAttrs(const Frag& frag)
// set attr id and fixed size
keyAttrs.ah() = AttributeHeader(descAttr.m_primaryAttrId, size);
keyAttrs += 1;
+ // set comparison method pointer
+ const NdbSqlUtil::Type& sqlType = NdbSqlUtil::getTypeBinary(descAttr.m_typeId);
+ ndbrequire(sqlType.m_cmp != 0);
+ *(sqlCmp++) = sqlType.m_cmp;
}
}
void
-Dbtux::readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, TableData keyData)
+Dbtux::readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, Data keyData)
{
ConstData keyAttrs = c_keyAttrs; // global
const Uint32 tableFragPtrI = frag.m_tupTableFragPtrI[ent.m_fragBit];
const TupLoc tupLoc = ent.m_tupLoc;
const Uint32 tupVersion = ent.m_tupVersion;
ndbrequire(start < frag.m_numAttrs);
- const unsigned numAttrs = frag.m_numAttrs - start;
- // start applies to both keys and output data
+ const Uint32 numAttrs = frag.m_numAttrs - start;
+ // skip to start position in keyAttrs only
keyAttrs += start;
- keyData += start;
- c_tup->tuxReadAttrs(tableFragPtrI, tupLoc.m_pageId, tupLoc.m_pageOffset, tupVersion, numAttrs, keyAttrs, keyData);
+ int ret = c_tup->tuxReadAttrs(tableFragPtrI, tupLoc.m_pageId, tupLoc.m_pageOffset, tupVersion, keyAttrs, numAttrs, keyData);
jamEntry();
+ // TODO handle error
+ ndbrequire(ret > 0);
}
void
-Dbtux::readTablePk(const Frag& frag, TreeEnt ent, unsigned& pkSize, Data pkData)
+Dbtux::readTablePk(const Frag& frag, TreeEnt ent, Data pkData, unsigned& pkSize)
{
const Uint32 tableFragPtrI = frag.m_tupTableFragPtrI[ent.m_fragBit];
const TupLoc tupLoc = ent.m_tupLoc;
- Uint32 size = 0;
- c_tup->tuxReadKeys(tableFragPtrI, tupLoc.m_pageId, tupLoc.m_pageOffset, &size, pkData);
- ndbrequire(size != 0);
- pkSize = size;
+ int ret = c_tup->tuxReadPk(tableFragPtrI, tupLoc.m_pageId, tupLoc.m_pageOffset, pkData);
+ jamEntry();
+ // TODO handle error
+ ndbrequire(ret > 0);
+ pkSize = ret;
}
/*
- * Input is pointers to table attributes. Output is array of attribute
- * data with headers. Copies whatever fits.
+ * Copy attribute data with headers. Input is all index key data.
+ * Copies whatever fits.
*/
void
-Dbtux::copyAttrs(const Frag& frag, TableData data1, Data data2, unsigned maxlen2)
+Dbtux::copyAttrs(const Frag& frag, ConstData data1, Data data2, unsigned maxlen2)
{
- ConstData keyAttrs = c_keyAttrs; // global
- const unsigned numAttrs = frag.m_numAttrs;
+ unsigned n = frag.m_numAttrs;
unsigned len2 = maxlen2;
- for (unsigned n = 0; n < numAttrs; n++) {
+ while (n != 0) {
jam();
- const unsigned attrId = keyAttrs.ah().getAttributeId();
- const unsigned dataSize = keyAttrs.ah().getDataSize();
- const Uint32* const p1 = *data1;
- if (p1 != 0) {
- if (len2 == 0)
- return;
- data2.ah() = AttributeHeader(attrId, dataSize);
- data2 += 1;
- len2 -= 1;
- unsigned n = dataSize;
- for (unsigned i = 0; i < dataSize; i++) {
- if (len2 == 0)
- return;
- *data2 = p1[i];
- data2 += 1;
- len2 -= 1;
- }
- } else {
+ const unsigned dataSize = data1.ah().getDataSize();
+ // copy header
+ if (len2 == 0)
+ return;
+ data2[0] = data1[0];
+ data1 += 1;
+ data2 += 1;
+ len2 -= 1;
+ // copy data
+ for (unsigned i = 0; i < dataSize; i++) {
if (len2 == 0)
return;
- data2.ah() = AttributeHeader(attrId, 0);
- data2.ah().setNULL();
- data2 += 1;
+ data2[i] = data1[i];
len2 -= 1;
}
- keyAttrs += 1;
- data1 += 1;
+ data1 += dataSize;
+ data2 += dataSize;
+ n -= 1;
}
#ifdef VM_TRACE
memset(data2, DataFillByte, len2 << 2);
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp
index 4bb3b940d91..3c0af3ca79d 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp
@@ -178,19 +178,31 @@ Dbtux::execTUX_ADD_ATTRREQ(Signal* signal)
descAttr.m_attrDesc = req->attrDescriptor;
descAttr.m_primaryAttrId = req->primaryAttrId;
descAttr.m_typeId = req->extTypeInfo & 0xFF;
+ descAttr.m_charset = (req->extTypeInfo >> 16);
#ifdef VM_TRACE
if (debugFlags & DebugMeta) {
debugOut << "Add frag " << fragPtr.i << " attr " << attrId << " " << descAttr << endl;
}
#endif
- // check if type is valid and has a comparison method
- const NdbSqlUtil::Type& type = NdbSqlUtil::getType(descAttr.m_typeId);
+ // check that type is valid and has a binary comparison method
+ const NdbSqlUtil::Type& type = NdbSqlUtil::getTypeBinary(descAttr.m_typeId);
if (type.m_typeId == NdbSqlUtil::Type::Undefined ||
type.m_cmp == 0) {
jam();
errorCode = TuxAddAttrRef::InvalidAttributeType;
break;
}
+#ifdef dbtux_uses_charset
+ if (descAttr.m_charset != 0) {
+ CHARSET_INFO *cs = get_charset(descAttr.m_charset, MYF(0));
+ // here use the non-binary type
+ if (! NdbSqlUtil::usable_in_ordered_index(descAttr.m_typeId, cs)) {
+ jam();
+ errorCode = TuxAddAttrRef::InvalidCharset;
+ break;
+ }
+ }
+#endif
if (indexPtr.p->m_numAttrs == fragOpPtr.p->m_numAttrsRecvd) {
jam();
// initialize tree header
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
index c4c33ff931f..5b161d3c4ce 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
@@ -112,6 +112,7 @@ Dbtux::execACC_SCANREQ(Signal* signal)
void
Dbtux::execTUX_BOUND_INFO(Signal* signal)
{
+ jamEntry();
struct BoundInfo {
unsigned offset;
unsigned size;
@@ -389,7 +390,7 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal)
jam();
const TreeEnt ent = scan.m_scanPos.m_ent;
// read tuple key
- readTablePk(frag, ent, pkSize, pkData);
+ readTablePk(frag, ent, pkData, pkSize);
// get read lock or exclusive lock
AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend();
lockReq->returnCode = RNIL;
@@ -480,7 +481,7 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal)
jam();
if (pkSize == 0) {
jam();
- readTablePk(frag, ent, pkSize, pkData);
+ readTablePk(frag, ent, pkData, pkSize);
}
}
// conf signal
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp
index 84048b308bc..bffbb8f5594 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp
@@ -25,7 +25,7 @@
* TODO optimize for initial equal attrs in node min/max
*/
void
-Dbtux::searchToAdd(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos)
+Dbtux::searchToAdd(Signal* signal, Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos)
{
const TreeHead& tree = frag.m_tree;
const unsigned numAttrs = frag.m_numAttrs;
@@ -144,7 +144,7 @@ Dbtux::searchToAdd(Signal* signal, Frag& frag, TableData searchKey, TreeEnt sear
* to it.
*/
void
-Dbtux::searchToRemove(Signal* signal, Frag& frag, TableData searchKey, TreeEnt searchEnt, TreePos& treePos)
+Dbtux::searchToRemove(Signal* signal, Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos)
{
const TreeHead& tree = frag.m_tree;
const unsigned numAttrs = frag.m_numAttrs;
diff --git a/ndb/src/kernel/blocks/dbtux/Times.txt b/ndb/src/kernel/blocks/dbtux/Times.txt
index 84819ddcf97..03473353a52 100644
--- a/ndb/src/kernel/blocks/dbtux/Times.txt
+++ b/ndb/src/kernel/blocks/dbtux/Times.txt
@@ -83,7 +83,7 @@ optim 13 mc02/a 39 ms 59 ms 50 pct
mc02/c 9 ms 12 ms 44 pct
mc02/d 246 ms 289 ms 17 pct
-[ case d: what happened to PK read performance? ]
+[ case d: bug in testOIBasic killed PK read performance ]
optim 14 mc02/a 41 ms 60 ms 44 pct
mc02/b 46 ms 81 ms 73 pct
@@ -91,5 +91,21 @@ optim 14 mc02/a 41 ms 60 ms 44 pct
mc02/d 242 ms 285 ms 17 pct
[ case b: do long keys suffer from many subroutine calls? ]
+[ case d: bug in testOIBasic killed PK read performance ]
+
+none mc02/a 35 ms 60 ms 71 pct
+ mc02/b 42 ms 75 ms 76 pct
+ mc02/c 5 ms 12 ms 106 pct
+ mc02/d 165 ms 238 ms 44 pct
+
+[ johan re-installed mc02 as fedora gcc-3.3.2 ]
+[ case c: table scan has improved... ]
+
+charsets mc02/a 35 ms 60 ms 71 pct
+ mc02/b 42 ms 84 ms 97 pct
+ mc02/c 5 ms 12 ms 109 pct
+ mc02/d 190 ms 236 ms 23 pct
+
+[ case b: TUX can no longer use pointers to TUP data ]
vim: set et:
diff --git a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
index ff4876b1506..568ed6c6566 100644
--- a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
+++ b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
@@ -103,7 +103,7 @@ void Ndbcntr::execCONTINUEB(Signal* signal)
}
Uint64 now = NdbTick_CurrentMillisecond();
- if(c_start.m_startFailureTimeout > now){
+ if(now > c_start.m_startFailureTimeout){
ndbrequire(false);
}
@@ -446,13 +446,17 @@ void Ndbcntr::execREAD_NODESCONF(Signal* signal)
ndb_mgm_get_int_parameter(p, CFG_DB_START_PARTITION_TIMEOUT, &to_2);
ndb_mgm_get_int_parameter(p, CFG_DB_START_FAILURE_TIMEOUT, &to_3);
+ c_start.m_startTime = NdbTick_CurrentMillisecond();
c_start.m_startPartialTimeout = setTimeout(c_start.m_startTime, to_1);
c_start.m_startPartitionedTimeout = setTimeout(c_start.m_startTime, to_2);
c_start.m_startFailureTimeout = setTimeout(c_start.m_startTime, to_3);
-
+
UpgradeStartup::sendCmAppChg(* this, signal, 0); // ADD
sendCntrStartReq(signal);
+
+ signal->theData[0] = ZSTARTUP;
+ sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 1000, 1);
return;
}
diff --git a/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp b/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp
index 40e6aa2dcd7..2a65271a32a 100644
--- a/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp
+++ b/ndb/src/kernel/blocks/ndbcntr/NdbcntrSysTable.cpp
@@ -46,7 +46,7 @@ Ndbcntr::g_sysTable_SYSTAB_0 = {
static const Ndbcntr::SysColumn
column_NDBEVENTS_0[] = {
{ 0, "NAME",
- DictTabInfo::ExtChar, MAX_TAB_NAME_SIZE,
+ DictTabInfo::ExtBinary, MAX_TAB_NAME_SIZE,
true, false
},
{ 1, "EVENT_TYPE",
@@ -54,7 +54,7 @@ column_NDBEVENTS_0[] = {
false, false
},
{ 2, "TABLE_NAME",
- DictTabInfo::ExtChar, MAX_TAB_NAME_SIZE,
+ DictTabInfo::ExtBinary, MAX_TAB_NAME_SIZE,
false, false
},
{ 3, "ATTRIBUTE_MASK",
diff --git a/ndb/src/kernel/main.cpp b/ndb/src/kernel/main.cpp
index 491733975a8..9c25da79065 100644
--- a/ndb/src/kernel/main.cpp
+++ b/ndb/src/kernel/main.cpp
@@ -19,6 +19,7 @@
#include <ndb_version.h>
#include "Configuration.hpp"
+#include <LocalConfig.hpp>
#include <TransporterRegistry.hpp>
#include "vm/SimBlockList.hpp"
@@ -56,6 +57,7 @@ const char programName[] = "NDB Kernel";
NDB_MAIN(ndb_kernel){
+ ndb_init();
// Print to stdout/console
g_eventLogger.createConsoleHandler();
g_eventLogger.setCategory("NDB");
@@ -66,12 +68,19 @@ NDB_MAIN(ndb_kernel){
// Parse command line options
Configuration* theConfig = globalEmulatorData.theConfiguration;
if(!theConfig->init(argc, argv)){
- return 0;
+ return NRT_Default;
}
+ LocalConfig local_config;
+ if (!local_config.init(theConfig->getConnectString(),0)){
+ local_config.printError();
+ local_config.printUsage();
+ return NRT_Default;
+ }
+
{ // Do configuration
signal(SIGPIPE, SIG_IGN);
- theConfig->fetch_configuration();
+ theConfig->fetch_configuration(local_config);
}
chdir(NdbConfig_get_path(0));
@@ -134,7 +143,7 @@ NDB_MAIN(ndb_kernel){
exit(0);
}
g_eventLogger.info("Ndb has terminated (pid %d) restarting", child);
- theConfig->fetch_configuration();
+ theConfig->fetch_configuration(local_config);
}
g_eventLogger.info("Angel pid: %d ndb pid: %d", getppid(), getpid());
@@ -243,6 +252,9 @@ systemInfo(const Configuration & config, const LogLevel & logLevel){
if(logLevel.getLogLevel(LogLevel::llStartUp) > 0){
g_eventLogger.info("NDB Cluster -- DB node %d", globalData.ownId);
g_eventLogger.info("%s --", NDB_VERSION_STRING);
+ if (config.get_mgmd_host())
+ g_eventLogger.info("Configuration fetched at %s port %d",
+ config.get_mgmd_host(), config.get_mgmd_port());
#ifdef NDB_SOLARIS // ok
g_eventLogger.info("NDB is running on a machine with %d processor(s) at %d MHz",
processor, speed);
diff --git a/ndb/src/kernel/vm/Configuration.cpp b/ndb/src/kernel/vm/Configuration.cpp
index 3099c71b792..fd5d79b92e7 100644
--- a/ndb/src/kernel/vm/Configuration.cpp
+++ b/ndb/src/kernel/vm/Configuration.cpp
@@ -15,8 +15,8 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <ndb_global.h>
-#include <my_sys.h>
+#include <LocalConfig.hpp>
#include "Configuration.hpp"
#include <ErrorHandlingMacros.hpp>
#include "GlobalData.hpp"
@@ -105,7 +105,6 @@ Configuration::init(int argc, const char** argv){
}
// check for depricated flag '-i'
- my_init();
#ifndef DBUG_OFF
if (debug_option)
DBUG_PUSH(debug_option);
@@ -186,7 +185,7 @@ Configuration::closeConfiguration(){
}
void
-Configuration::fetch_configuration(){
+Configuration::fetch_configuration(LocalConfig &local_config){
/**
* Fetch configuration from management server
*/
@@ -194,21 +193,22 @@ Configuration::fetch_configuration(){
delete m_config_retriever;
}
- m_config_retriever= new ConfigRetriever(NDB_VERSION, NODE_TYPE_DB);
- m_config_retriever->setConnectString(_connectString ? _connectString : "");
- if(m_config_retriever->init() == -1 ||
- m_config_retriever->do_connect() == -1){
-
+ m_mgmd_port= 0;
+ m_mgmd_host= 0;
+ m_config_retriever= new ConfigRetriever(local_config, NDB_VERSION, NODE_TYPE_DB);
+ if(m_config_retriever->do_connect() == -1){
const char * s = m_config_retriever->getErrorString();
if(s == 0)
s = "No error given!";
-
/* Set stop on error to true otherwise NDB will
go into an restart loop...
*/
ERROR_SET(fatal, ERR_INVALID_CONFIG, "Could connect to ndb_mgmd", s);
}
+ m_mgmd_port= m_config_retriever->get_mgmd_port();
+ m_mgmd_host= m_config_retriever->get_mgmd_host();
+
ConfigRetriever &cr= *m_config_retriever;
if((globalData.ownId = cr.allocNodeId()) == 0){
@@ -418,6 +418,11 @@ Configuration::setRestartOnErrorInsert(int i){
m_restartOnErrorInsert = i;
}
+const char *
+Configuration::getConnectString() const {
+ return _connectString;
+}
+
char *
Configuration::getConnectStringCopy() const {
if(_connectString != 0)
@@ -506,7 +511,7 @@ Configuration::calcSizeAlt(ConfigValues * ownConfig){
for(unsigned j = 0; j<LogLevel::LOGLEVEL_CATEGORIES; j++){
Uint32 tmp;
- if(!ndb_mgm_get_int_parameter(&db, LogLevel::MIN_LOGLEVEL_ID+j, &tmp)){
+ if(!ndb_mgm_get_int_parameter(&db, CFG_MIN_LOGLEVEL+j, &tmp)){
m_logLevel->setLogLevel((LogLevel::EventCategory)j, tmp);
}
}
diff --git a/ndb/src/kernel/vm/Configuration.hpp b/ndb/src/kernel/vm/Configuration.hpp
index e84ff8d9193..2ea32ffea37 100644
--- a/ndb/src/kernel/vm/Configuration.hpp
+++ b/ndb/src/kernel/vm/Configuration.hpp
@@ -21,6 +21,7 @@
#include <ndb_types.h>
class ConfigRetriever;
+class LocalConfig;
class Configuration {
public:
@@ -32,7 +33,7 @@ public:
*/
bool init(int argc, const char** argv);
- void fetch_configuration();
+ void fetch_configuration(LocalConfig &local_config);
void setupConfiguration();
void closeConfiguration();
@@ -54,6 +55,7 @@ public:
const char * programName() const;
const char * fileSystemPath() const;
const char * backupFilePath() const;
+ const char * getConnectString() const;
char * getConnectStringCopy() const;
/**
@@ -65,6 +67,9 @@ public:
const ndb_mgm_configuration_iterator * getOwnConfigIterator() const;
+ Uint32 get_mgmd_port() const {return m_mgmd_port;};
+ const char *get_mgmd_host() const {return m_mgmd_host;};
+
class LogLevel * m_logLevel;
private:
friend class Cmvmi;
@@ -93,6 +98,8 @@ private:
char * _backupPath;
bool _initialStart;
char * _connectString;
+ Uint32 m_mgmd_port;
+ const char *m_mgmd_host;
bool _daemonMode;
void calcSizeAlt(class ConfigValues * );
diff --git a/ndb/src/kernel/vm/FastScheduler.hpp b/ndb/src/kernel/vm/FastScheduler.hpp
index 9749dab5d85..dc707e47eef 100644
--- a/ndb/src/kernel/vm/FastScheduler.hpp
+++ b/ndb/src/kernel/vm/FastScheduler.hpp
@@ -141,7 +141,7 @@ int
FastScheduler::checkDoJob()
{
/*
- * Joob buffer overload protetction
+ * Job buffer overload protetction
* If the job buffer B is filled over a certain limit start
* to execute the signals in the job buffer's
*/
diff --git a/ndb/src/kernel/vm/MetaData.hpp b/ndb/src/kernel/vm/MetaData.hpp
index f6a941e8f9f..11e262664c1 100644
--- a/ndb/src/kernel/vm/MetaData.hpp
+++ b/ndb/src/kernel/vm/MetaData.hpp
@@ -107,6 +107,9 @@ public:
/* Number of primary key attributes (should be computed) */
Uint16 noOfPrimkey;
+ /* Number of distinct character sets (computed) */
+ Uint16 noOfCharsets;
+
/* Length of primary key in words (should be computed) */
/* For ordered index this is tree node size in words */
Uint16 tupKeyLength;
diff --git a/ndb/src/mgmapi/mgmapi.cpp b/ndb/src/mgmapi/mgmapi.cpp
index 08b83a8d750..fccd5c7983b 100644
--- a/ndb/src/mgmapi/mgmapi.cpp
+++ b/ndb/src/mgmapi/mgmapi.cpp
@@ -954,13 +954,52 @@ struct ndb_mgm_event_categories
{
const char* name;
enum ndb_mgm_event_category category;
+} categories[] = {
+ { "STARTUP", NDB_MGM_EVENT_CATEGORY_STARTUP },
+ { "SHUTDOWN", NDB_MGM_EVENT_CATEGORY_SHUTDOWN },
+ { "STATISTICS", NDB_MGM_EVENT_CATEGORY_STATISTIC },
+ { "NODERESTART", NDB_MGM_EVENT_CATEGORY_NODE_RESTART },
+ { "CONNECTION", NDB_MGM_EVENT_CATEGORY_CONNECTION },
+ { "CHECKPOINT", NDB_MGM_EVENT_CATEGORY_CHECKPOINT },
+ { "DEBUG", NDB_MGM_EVENT_CATEGORY_DEBUG },
+ { "INFO", NDB_MGM_EVENT_CATEGORY_INFO },
+ { "ERROR", NDB_MGM_EVENT_CATEGORY_ERROR },
+ { "GREP", NDB_MGM_EVENT_CATEGORY_GREP },
+ { "BACKUP", NDB_MGM_EVENT_CATEGORY_BACKUP },
+ { 0, NDB_MGM_ILLEGAL_EVENT_CATEGORY }
};
extern "C"
+ndb_mgm_event_category
+ndb_mgm_match_event_category(const char * status)
+{
+ if(status == 0)
+ return NDB_MGM_ILLEGAL_EVENT_CATEGORY;
+
+ for(int i = 0; categories[i].name !=0 ; i++)
+ if(strcmp(status, categories[i].name) == 0)
+ return categories[i].category;
+
+ return NDB_MGM_ILLEGAL_EVENT_CATEGORY;
+}
+
+extern "C"
+const char *
+ndb_mgm_get_event_category_string(enum ndb_mgm_event_category status)
+{
+ int i;
+ for(i = 0; categories[i].name != 0; i++)
+ if(categories[i].category == status)
+ return categories[i].name;
+
+ return 0;
+}
+
+extern "C"
int
ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle handle, int nodeId,
- /*enum ndb_mgm_event_category*/
- char * category, int level,
+ enum ndb_mgm_event_category cat,
+ int level,
struct ndb_mgm_reply* /*reply*/)
{
SET_ERROR(handle, NDB_MGM_NO_ERROR,
@@ -975,14 +1014,14 @@ ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle handle, int nodeId,
Properties args;
args.put("node", nodeId);
- args.put("category", category);
+ args.put("category", cat);
args.put("level", level);
-
+
const Properties *reply;
reply = ndb_mgm_call(handle, clusterlog_reply,
"set cluster loglevel", &args);
CHECK_REPLY(reply, -1);
-
+
BaseString result;
reply->get("result", result);
if(strcmp(result.c_str(), "Ok") != 0) {
@@ -997,8 +1036,8 @@ ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle handle, int nodeId,
extern "C"
int
ndb_mgm_set_loglevel_node(NdbMgmHandle handle, int nodeId,
- /*enum ndb_mgm_event_category category*/
- char * category, int level,
+ enum ndb_mgm_event_category category,
+ int level,
struct ndb_mgm_reply* /*reply*/)
{
SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_set_loglevel_node");
@@ -1031,6 +1070,48 @@ ndb_mgm_set_loglevel_node(NdbMgmHandle handle, int nodeId,
}
extern "C"
+int
+ndb_mgm_listen_event(NdbMgmHandle handle, int filter[])
+{
+ SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_listen_event");
+ const ParserRow<ParserDummy> stat_reply[] = {
+ MGM_CMD("listen event", NULL, ""),
+ MGM_ARG("result", Int, Mandatory, "Error message"),
+ MGM_ARG("msg", String, Optional, "Error message"),
+ MGM_END()
+ };
+ CHECK_HANDLE(handle, -1);
+
+ SocketClient s(handle->hostname, handle->port);
+ const NDB_SOCKET_TYPE sockfd = s.connect();
+ if (sockfd < 0) {
+ setError(handle, NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET, __LINE__,
+ "Unable to connect to");
+ return -1;
+ }
+
+ Properties args;
+ {
+ BaseString tmp;
+ for(int i = 0; filter[i] != 0; i += 2){
+ tmp.appfmt("%d=%d ", filter[i+1], filter[i]);
+ }
+ args.put("filter", tmp.c_str());
+ }
+
+ int tmp = handle->socket;
+ handle->socket = sockfd;
+
+ const Properties *reply;
+ reply = ndb_mgm_call(handle, stat_reply, "listen event", &args);
+
+ handle->socket = tmp;
+
+ CHECK_REPLY(reply, -1);
+ return sockfd;
+}
+
+extern "C"
int
ndb_mgm_get_stat_port(NdbMgmHandle handle, struct ndb_mgm_reply* /*reply*/)
{
diff --git a/ndb/src/mgmclient/CommandInterpreter.cpp b/ndb/src/mgmclient/CommandInterpreter.cpp
index 91d057f8c30..fbb74d7c151 100644
--- a/ndb/src/mgmclient/CommandInterpreter.cpp
+++ b/ndb/src/mgmclient/CommandInterpreter.cpp
@@ -611,9 +611,9 @@ CommandInterpreter::executeHelp(char* parameters)
<< endl;
ndbout << "<category> = ";
- for(Uint32 i = 0; i<EventLogger::noOfEventCategoryNames; i++){
- ndbout << EventLogger::eventCategoryNames[i].name;
- if (i < EventLogger::noOfEventCategoryNames - 1) {
+ for(int i = 0; i<CFG_MIN_LOGLEVEL; i++){
+ ndbout << ndb_mgm_get_event_category_string((ndb_mgm_event_category)i);
+ if (i < CFG_MIN_LOGLEVEL - 1) {
ndbout << " | ";
}
}
@@ -673,8 +673,10 @@ CommandInterpreter::executeShutdown(char* parameters)
if (mgm_id == 0)
mgm_id= state->node_states[i].node_id;
else {
- ndbout << "Unable to locate management server, shutdown manually with #STOP"
+ ndbout << "Unable to locate management server, "
+ << "shutdown manually with <id> STOP"
<< endl;
+ return;
}
}
}
@@ -721,11 +723,13 @@ const char *status_string(ndb_mgm_node_status status)
static void
print_nodes(ndb_mgm_cluster_state *state, ndb_mgm_configuration_iterator *it,
- const char *proc_name, int no_proc, ndb_mgm_node_type type, int master_id)
+ const char *proc_name, int no_proc, ndb_mgm_node_type type,
+ int master_id)
{
int i;
ndbout << "[" << proc_name
- << "(" << ndb_mgm_get_node_type_string(type) << ")]\t" << no_proc << " node(s)" << endl;
+ << "(" << ndb_mgm_get_node_type_string(type) << ")]\t"
+ << no_proc << " node(s)" << endl;
for(i=0; i < state->no_of_nodes; i++) {
struct ndb_mgm_node_state *node_state= &(state->node_states[i]);
if(node_state->node_type == type) {
@@ -733,7 +737,9 @@ print_nodes(ndb_mgm_cluster_state *state, ndb_mgm_configuration_iterator *it,
ndbout << "id=" << node_id;
if(node_state->version != 0) {
const char *hostname= node_state->connect_address;
- if (hostname == 0 || strlen(hostname) == 0 || strcmp(hostname,"0.0.0.0") == 0)
+ if (hostname == 0
+ || strlen(hostname) == 0
+ || strcmp(hostname,"0.0.0.0") == 0)
ndbout << " ";
else
ndbout << "\t@" << hostname;
@@ -761,7 +767,8 @@ print_nodes(ndb_mgm_cluster_state *state, ndb_mgm_configuration_iterator *it,
ndb_mgm_get_string_parameter(it, CFG_NODE_HOST, &config_hostname);
if (config_hostname == 0 || config_hostname[0] == 0)
config_hostname= "any host";
- ndbout << " (not connected, accepting connect from " << config_hostname << ")" << endl;
+ ndbout << " (not connected, accepting connect from "
+ << config_hostname << ")" << endl;
}
}
}
@@ -1240,55 +1247,40 @@ CommandInterpreter::executeLogLevel(int processId, const char* parameters,
{
connect();
(void) all;
- (void) parameters;
- SetLogLevelOrd logLevel; logLevel.clear();
- LogLevel::EventCategory cat;
- int level;
- if (emptyString(parameters) || (strcmp(parameters, "ALL") == 0)) {
- for(Uint32 i = 0; i<EventLogger::noOfEventCategoryNames; i++)
- logLevel.setLogLevel(EventLogger::eventCategoryNames[i].category, 7);
- } else {
-
- char * tmpString = strdup(parameters);
- char * tmpPtr = 0;
- char * item = strtok_r(tmpString, ", ", &tmpPtr);
- while(item != NULL){
- char categoryTxt[255];
- const int m = sscanf(item, "%[^=]=%d", categoryTxt, &level);
- if(m != 2){
- free(tmpString);
- ndbout << "Invalid loglevel specification category=level" << endl;
- return;
- }
+ BaseString tmp(parameters);
+ Vector<BaseString> spec;
+ tmp.split(spec, "=");
+ if(spec.size() != 2){
+ ndbout << "Invalid loglevel specification: " << parameters << endl;
+ return;
+ }
- if(!EventLogger::matchEventCategory(categoryTxt,
- &cat)){
- ndbout << "Invalid loglevel specification, unknown category: "
- << categoryTxt << endl;
- free(tmpString);
- return ;
- }
- if(level < 0 || level > 15){
- ndbout << "Invalid loglevel specification row, level 0-15" << endl;
- free(tmpString);
- return ;
- }
- logLevel.setLogLevel(cat, level);
-
- item = strtok_r(NULL, ", ", &tmpPtr);
+ spec[0].trim().ndb_toupper();
+ int category = ndb_mgm_match_event_category(spec[0].c_str());
+ if(category == NDB_MGM_ILLEGAL_EVENT_CATEGORY){
+ category = atoi(spec[0].c_str());
+ if(category < NDB_MGM_MIN_EVENT_CATEGORY ||
+ category > NDB_MGM_MAX_EVENT_CATEGORY){
+ ndbout << "Unknown category: \"" << spec[0].c_str() << "\"" << endl;
+ return;
}
- free(tmpString);
}
-
+
+ int level = atoi(spec[1].c_str());
+ if(level < 0 || level > 15){
+ ndbout << "Invalid level: " << spec[1].c_str() << endl;
+ return;
+ }
+
struct ndb_mgm_reply reply;
int result;
result = ndb_mgm_set_loglevel_node(m_mgmsrv,
- processId, // fast fix - pekka
- (char*)EventLogger::getEventCategoryName(cat),
+ processId,
+ (ndb_mgm_event_category)category,
level,
&reply);
-
+
if (result < 0) {
ndbout_c("Executing LOGLEVEL on node %d failed.", processId);
printError();
@@ -1296,7 +1288,7 @@ CommandInterpreter::executeLogLevel(int processId, const char* parameters,
ndbout << "Executing LOGLEVEL on node " << processId << " OK!"
<< endl;
}
-
+
}
//*****************************************************************************
@@ -1626,54 +1618,41 @@ CommandInterpreter::executeEventReporting(int processId,
bool all)
{
connect();
- SetLogLevelOrd logLevel; logLevel.clear();
- char categoryTxt[255];
- int level;
- LogLevel::EventCategory cat;
- if (emptyString(parameters) || (strcmp(parameters, "ALL") == 0)) {
- for(Uint32 i = 0; i<EventLogger::noOfEventCategoryNames; i++)
- logLevel.setLogLevel(EventLogger::eventCategoryNames[i].category, 7);
- } else {
- char * tmpString = strdup(parameters);
- char * tmpPtr = 0;
- char * item = strtok_r(tmpString, ", ", &tmpPtr);
- while(item != NULL){
- const int m = sscanf(item, "%[^=]=%d", categoryTxt, &level);
- if(m != 2){
- free(tmpString);
- ndbout << "Invalid loglevel specification category=level" << endl;
- return;
- }
-
- if(!EventLogger::matchEventCategory(categoryTxt,
- &cat)){
- ndbout << "Invalid loglevel specification, unknown category: "
- << categoryTxt << endl;
- free(tmpString);
- return ;
- }
- if(level < 0 || level > 15){
- ndbout << "Invalid loglevel specification row, level 0-15" << endl;
- free(tmpString);
- return ;
- }
- logLevel.setLogLevel(cat, level);
-
- item = strtok_r(NULL, ", ", &tmpPtr);
+ BaseString tmp(parameters);
+ Vector<BaseString> spec;
+ tmp.split(spec, "=");
+ if(spec.size() != 2){
+ ndbout << "Invalid loglevel specification: " << parameters << endl;
+ return;
+ }
+
+ spec[0].trim().ndb_toupper();
+ int category = ndb_mgm_match_event_category(spec[0].c_str());
+ if(category == NDB_MGM_ILLEGAL_EVENT_CATEGORY){
+ category = atoi(spec[0].c_str());
+ if(category < NDB_MGM_MIN_EVENT_CATEGORY ||
+ category > NDB_MGM_MAX_EVENT_CATEGORY){
+ ndbout << "Unknown category: \"" << spec[0].c_str() << "\"" << endl;
+ return;
}
- free(tmpString);
}
+
+ int level = atoi(spec[1].c_str());
+ if(level < 0 || level > 15){
+ ndbout << "Invalid level: " << spec[1].c_str() << endl;
+ return;
+ }
+
+
struct ndb_mgm_reply reply;
int result;
- result =
- ndb_mgm_set_loglevel_clusterlog(m_mgmsrv,
- processId, // fast fix - pekka
- (char*)
- EventLogger::getEventCategoryName(cat),
- level,
- &reply);
+ result = ndb_mgm_set_loglevel_clusterlog(m_mgmsrv,
+ processId, // fast fix - pekka
+ (ndb_mgm_event_category)category,
+ level,
+ &reply);
if (result != 0) {
ndbout_c("Executing CLUSTERLOG on node %d failed", processId);
@@ -1693,13 +1672,45 @@ CommandInterpreter::executeStartBackup(char* /*parameters*/)
connect();
struct ndb_mgm_reply reply;
unsigned int backupId;
+
+ int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, 0 };
+ int fd = ndb_mgm_listen_event(m_mgmsrv, filter);
int result = ndb_mgm_start_backup(m_mgmsrv, &backupId, &reply);
if (result != 0) {
ndbout << "Start of backup failed" << endl;
printError();
- } else {
- ndbout << "Backup started. Backup id " << backupId << "." << endl;
+ close(fd);
+ return;
+ }
+
+ char *tmp;
+ char buf[1024];
+ {
+ SocketInputStream in(fd);
+ int count = 0;
+ do {
+ tmp = in.gets(buf, 1024);
+ if(tmp)
+ {
+ ndbout << tmp;
+ int id;
+ if(sscanf(tmp, "%*[^:]: Backup %d ", &id) == 1 && id == backupId){
+ count++;
+ }
+ }
+ } while(count < 2);
}
+
+ SocketInputStream in(fd, 10);
+ do {
+ tmp = in.gets(buf, 1024);
+ if(tmp && tmp[0] != 0)
+ {
+ ndbout << tmp;
+ }
+ } while(tmp && tmp[0] != 0);
+
+ close(fd);
}
void
diff --git a/ndb/src/mgmclient/Makefile.am b/ndb/src/mgmclient/Makefile.am
index 72ddc9d098b..e271c7bed53 100644
--- a/ndb/src/mgmclient/Makefile.am
+++ b/ndb/src/mgmclient/Makefile.am
@@ -16,7 +16,7 @@ LDADD_LOC = $(top_builddir)/ndb/src/libndbclient.la \
$(top_builddir)/dbug/libdbug.a \
$(top_builddir)/mysys/libmysys.a \
$(top_builddir)/strings/libmystrings.a \
- @TERMCAP_LIB@
+ @TERMCAP_LIB@ @NDB_SCI_LIBS@
ndb_mgm_LDFLAGS = @ndb_bin_am_ldflags@
diff --git a/ndb/src/mgmclient/main.cpp b/ndb/src/mgmclient/main.cpp
index df6659df0b1..69f968677cd 100644
--- a/ndb/src/mgmclient/main.cpp
+++ b/ndb/src/mgmclient/main.cpp
@@ -44,6 +44,7 @@ handler(int sig){
}
int main(int argc, const char** argv){
+ ndb_init();
int optind = 0;
const char *_host = 0;
int _port = 0;
diff --git a/ndb/src/mgmsrv/CommandInterpreter.cpp b/ndb/src/mgmsrv/CommandInterpreter.cpp
index 316b6d5795e..2c2aeda21ed 100644
--- a/ndb/src/mgmsrv/CommandInterpreter.cpp
+++ b/ndb/src/mgmsrv/CommandInterpreter.cpp
@@ -52,7 +52,7 @@ static const char* helpTexts[] = {
"{<id>|ALL} CLUSTERLOG {<category>=<level>}+ Set log level for cluster log",
"QUIT Quit management server",
};
-static const int noOfHelpTexts = sizeof(helpTexts)/sizeof(const char*);
+static const unsigned noOfHelpTexts = sizeof(helpTexts)/sizeof(const char*);
static const char* helpTextShow =
"SHOW prints NDB Cluster information\n\n"
@@ -389,14 +389,14 @@ void CommandInterpreter::executeHelp(char* parameters) {
<< endl;
ndbout << "<category> = ";
- for(i = 0; i<EventLogger::noOfEventCategoryNames; i++){
- ndbout << EventLogger::eventCategoryNames[i].name;
- if (i < EventLogger::noOfEventCategoryNames - 1) {
+ for(i = 0; i<CFG_MIN_LOGLEVEL; i++){
+ ndbout << ndb_mgm_get_event_category_string((ndb_mgm_event_category)i);
+ if (i < CFG_MIN_LOGLEVEL - 1) {
ndbout << " | ";
}
}
ndbout << endl;
-
+
ndbout << "<level> = " << "0 - 15"
<< endl;
@@ -831,12 +831,13 @@ void CommandInterpreter::executeStatus(int processId,
//*****************************************************************************
void CommandInterpreter::executeLogLevel(int processId,
const char* parameters, bool all) {
+#if 0
(void)all; // Don't want compiler warning
SetLogLevelOrd logLevel; logLevel.clear();
if (emptyString(parameters) || (strcmp(parameters, "ALL") == 0)) {
- for(Uint32 i = 0; i<EventLogger::noOfEventCategoryNames; i++)
- logLevel.setLogLevel(EventLogger::eventCategoryNames[i].category, 7);
+ for(Uint32 i = 0; i<EventLoggerBase::noOfEventCategoryNames; i++)
+ logLevel.setLogLevel(EventLoggerBase::eventCategoryNames[i].category, 7);
} else {
char * tmpString = strdup(parameters);
@@ -852,7 +853,7 @@ void CommandInterpreter::executeLogLevel(int processId,
return;
}
LogLevel::EventCategory cat;
- if(!EventLogger::matchEventCategory(categoryTxt,
+ if(!EventLoggerBase::matchEventCategory(categoryTxt,
&cat)){
ndbout << "Invalid loglevel specification, unknown category: "
<< categoryTxt << endl;
@@ -875,6 +876,7 @@ void CommandInterpreter::executeLogLevel(int processId,
if (result != 0) {
ndbout << _mgmtSrvr.getErrorText(result) << endl;
}
+#endif
}
@@ -1080,12 +1082,13 @@ void CommandInterpreter::executeTestOff(int processId,
void CommandInterpreter::executeEventReporting(int processId,
const char* parameters,
bool all) {
+#if 0
(void)all; // Don't want compiler warning
SetLogLevelOrd logLevel; logLevel.clear();
if (emptyString(parameters) || (strcmp(parameters, "ALL") == 0)) {
- for(Uint32 i = 0; i<EventLogger::noOfEventCategoryNames; i++)
- logLevel.setLogLevel(EventLogger::eventCategoryNames[i].category, 7);
+ for(Uint32 i = 0; i<EventLoggerBase::noOfEventCategoryNames; i++)
+ logLevel.setLogLevel(EventLoggerBase::eventCategoryNames[i].category, 7);
} else {
char * tmpString = strdup(parameters);
@@ -1101,7 +1104,7 @@ void CommandInterpreter::executeEventReporting(int processId,
return;
}
LogLevel::EventCategory cat;
- if(!EventLogger::matchEventCategory(categoryTxt,
+ if(!EventLoggerBase::matchEventCategory(categoryTxt,
&cat)){
ndbout << "Invalid loglevel specification, unknown category: "
<< categoryTxt << endl;
@@ -1124,6 +1127,7 @@ void CommandInterpreter::executeEventReporting(int processId,
if (result != 0) {
ndbout << _mgmtSrvr.getErrorText(result) << endl;
}
+#endif
}
void
diff --git a/ndb/src/mgmsrv/ConfigInfo.cpp b/ndb/src/mgmsrv/ConfigInfo.cpp
index ea19bc76d0e..0b7c664dfd3 100644
--- a/ndb/src/mgmsrv/ConfigInfo.cpp
+++ b/ndb/src/mgmsrv/ConfigInfo.cpp
@@ -129,11 +129,14 @@ ConfigInfo::m_SectionRules[] = {
{ "TCP", fixHostname, "HostName1" },
{ "TCP", fixHostname, "HostName2" },
+ { "SCI", fixHostname, "HostName1" },
+ { "SCI", fixHostname, "HostName2" },
{ "OSE", fixHostname, "HostName1" },
{ "OSE", fixHostname, "HostName2" },
{ "TCP", fixPortNumber, 0 }, // has to come after fixHostName
{ "SHM", fixPortNumber, 0 }, // has to come after fixHostName
+ { "SCI", fixPortNumber, 0 }, // has to come after fixHostName
//{ "SHM", fixShmKey, 0 },
/**
@@ -163,6 +166,8 @@ ConfigInfo::m_SectionRules[] = {
{ "TCP", checkTCPConstraints, "HostName1" },
{ "TCP", checkTCPConstraints, "HostName2" },
+ { "SCI", checkTCPConstraints, "HostName1" },
+ { "SCI", checkTCPConstraints, "HostName2" },
{ "*", checkMandatory, 0 },
@@ -1529,7 +1534,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
},
{
- CFG_TCP_HOSTNAME_1,
+ CFG_CONNECTION_HOSTNAME_1,
"HostName1",
"TCP",
"Name/IP of computer on one side of the connection",
@@ -1540,7 +1545,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
0, 0 },
{
- CFG_TCP_HOSTNAME_2,
+ CFG_CONNECTION_HOSTNAME_2,
"HostName2",
"TCP",
"Name/IP of computer on one side of the connection",
@@ -1808,7 +1813,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
"Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
ConfigInfo::USED,
false,
- ConfigInfo::INT,
+ ConfigInfo::STRING,
MANDATORY,
"0",
STR_VALUE(MAX_INT_RNIL) },
@@ -1820,16 +1825,50 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
"Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
ConfigInfo::USED,
false,
- ConfigInfo::INT,
+ ConfigInfo::STRING,
MANDATORY,
"0",
STR_VALUE(MAX_INT_RNIL) },
{
- CFG_SCI_ID_0,
- "SciId0",
+ CFG_CONNECTION_HOSTNAME_1,
+ "HostName1",
+ "SCI",
+ "Name/IP of computer on one side of the connection",
+ ConfigInfo::INTERNAL,
+ false,
+ ConfigInfo::STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ {
+ CFG_CONNECTION_HOSTNAME_2,
+ "HostName2",
+ "SCI",
+ "Name/IP of computer on one side of the connection",
+ ConfigInfo::INTERNAL,
+ false,
+ ConfigInfo::STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ {
+ CFG_CONNECTION_SERVER_PORT,
+ "PortNumber",
+ "SCI",
+ "Port used for this transporter",
+ ConfigInfo::USED,
+ false,
+ ConfigInfo::INT,
+ MANDATORY,
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_SCI_HOST1_ID_0,
+ "Host1SciId0",
"SCI",
- "Local SCI-node id for adapter 0 (a computer can have two adapters)",
+ "SCI-node id for adapter 0 on Host1 (a computer can have two adapters)",
ConfigInfo::USED,
false,
ConfigInfo::INT,
@@ -1838,10 +1877,22 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
STR_VALUE(MAX_INT_RNIL) },
{
- CFG_SCI_ID_1,
- "SciId1",
+ CFG_SCI_HOST1_ID_1,
+ "Host1SciId1",
+ "SCI",
+ "SCI-node id for adapter 1 on Host1 (a computer can have two adapters)",
+ ConfigInfo::USED,
+ false,
+ ConfigInfo::INT,
+ "0",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_SCI_HOST2_ID_0,
+ "Host2SciId0",
"SCI",
- "Local SCI-node id for adapter 1 (a computer can have two adapters)",
+ "SCI-node id for adapter 0 on Host2 (a computer can have two adapters)",
ConfigInfo::USED,
false,
ConfigInfo::INT,
@@ -1850,6 +1901,18 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
STR_VALUE(MAX_INT_RNIL) },
{
+ CFG_SCI_HOST2_ID_1,
+ "Host2SciId1",
+ "SCI",
+ "SCI-node id for adapter 1 on Host2 (a computer can have two adapters)",
+ ConfigInfo::USED,
+ false,
+ ConfigInfo::INT,
+ "0",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
CFG_CONNECTION_SEND_SIGNAL_ID,
"SendSignalId",
"SCI",
@@ -1881,9 +1944,9 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
ConfigInfo::USED,
false,
ConfigInfo::INT,
- "2K",
- "512",
- STR_VALUE(MAX_INT_RNIL) },
+ "8K",
+ "128",
+ "32K" },
{
CFG_SCI_BUFFER_MEM,
@@ -1894,7 +1957,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
false,
ConfigInfo::INT,
"1M",
- "256K",
+ "64K",
STR_VALUE(MAX_INT_RNIL) },
{
@@ -1935,7 +1998,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
},
{
- CFG_OSE_HOSTNAME_1,
+ CFG_CONNECTION_HOSTNAME_1,
"HostName1",
"OSE",
"Name of computer on one side of the connection",
@@ -1946,7 +2009,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
0, 0 },
{
- CFG_OSE_HOSTNAME_2,
+ CFG_CONNECTION_HOSTNAME_2,
"HostName2",
"OSE",
"Name of computer on one side of the connection",
@@ -2902,26 +2965,38 @@ fixPortNumber(InitConfigFileParser::Context & ctx, const char * data){
DBUG_ENTER("fixPortNumber");
Uint32 id1= 0, id2= 0;
+ const char *hostName1;
+ const char *hostName2;
require(ctx.m_currentSection->get("NodeId1", &id1));
require(ctx.m_currentSection->get("NodeId2", &id2));
+ require(ctx.m_currentSection->get("HostName1", &hostName1));
+ require(ctx.m_currentSection->get("HostName2", &hostName2));
+ DBUG_PRINT("info",("NodeId1=%d HostName1=\"%s\"",id1,hostName1));
+ DBUG_PRINT("info",("NodeId2=%d HostName2=\"%s\"",id2,hostName2));
+
if (id1 > id2) {
Uint32 tmp= id1;
+ const char *tmp_name= hostName1;
+ hostName1= hostName2;
id1= id2;
+ hostName2= tmp_name;
id2= tmp;
}
const Properties * node;
require(ctx.m_config->get("Node", id1, &node));
- BaseString hostname;
- require(node->get("HostName", hostname));
+ BaseString hostname(hostName1);
+ // require(node->get("HostName", hostname));
if (hostname.c_str()[0] == 0) {
- ctx.reportError("Hostname required on nodeid %d since it will act as server.", id1);
+ ctx.reportError("Hostname required on nodeid %d since it will "
+ "act as server.", id1);
DBUG_RETURN(false);
}
Uint32 port= 0;
- if (!node->get("ServerPort", &port) && !ctx.m_userProperties.get("ServerPort_", id1, &port)) {
+ if (!node->get("ServerPort", &port) &&
+ !ctx.m_userProperties.get("ServerPort_", id1, &port)) {
Uint32 adder= 0;
{
BaseString server_port_adder(hostname);
@@ -2932,7 +3007,8 @@ fixPortNumber(InitConfigFileParser::Context & ctx, const char * data){
Uint32 base= 0;
if (!ctx.m_userProperties.get("ServerPortBase", &base)){
- if(!(ctx.m_userDefaults && ctx.m_userDefaults->get("PortNumber", &base)) &&
+ if(!(ctx.m_userDefaults &&
+ ctx.m_userDefaults->get("PortNumber", &base)) &&
!ctx.m_systemDefaults->get("PortNumber", &base)) {
base= strtoll(NDB_BASE_PORT,0,0)+2;
// ctx.reportError("Cannot retrieve base port number");
@@ -2945,12 +3021,15 @@ fixPortNumber(InitConfigFileParser::Context & ctx, const char * data){
}
if(ctx.m_currentSection->contains("PortNumber")) {
- ndbout << "PortNumber should no longer be specificied per connection, please remove from config. Will be changed to " << port << endl;
+ ndbout << "PortNumber should no longer be specificied "
+ << "per connection, please remove from config. "
+ << "Will be changed to " << port << endl;
ctx.m_currentSection->put("PortNumber", port, true);
} else
ctx.m_currentSection->put("PortNumber", port);
- DBUG_PRINT("info", ("connection %d-%d port %d host %s", id1, id2, port, hostname.c_str()));
+ DBUG_PRINT("info", ("connection %d-%d port %d host %s",
+ id1, id2, port, hostname.c_str()));
DBUG_RETURN(true);
}
diff --git a/ndb/src/mgmsrv/Makefile.am b/ndb/src/mgmsrv/Makefile.am
index 8fa9ec5f63e..3b57b027827 100644
--- a/ndb/src/mgmsrv/Makefile.am
+++ b/ndb/src/mgmsrv/Makefile.am
@@ -12,8 +12,6 @@ ndb_mgmd_SOURCES = \
main.cpp \
Services.cpp \
convertStrToInt.cpp \
- NodeLogLevel.cpp \
- NodeLogLevelList.cpp \
SignalQueue.cpp \
MgmtSrvrConfig.cpp \
ConfigInfo.cpp \
@@ -29,7 +27,7 @@ LDADD_LOC = $(top_builddir)/ndb/src/libndbclient.la \
$(top_builddir)/ndb/src/common/editline/libeditline.a \
$(top_builddir)/dbug/libdbug.a \
$(top_builddir)/mysys/libmysys.a \
- $(top_builddir)/strings/libmystrings.a
+ $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
@TERMCAP_LIB@
DEFS_LOC = -DDEFAULT_MYSQL_HOME="\"$(MYSQLBASEdir)\"" \
diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp
index 8380f3fd86a..92a8025295f 100644
--- a/ndb/src/mgmsrv/MgmtSrvr.cpp
+++ b/ndb/src/mgmsrv/MgmtSrvr.cpp
@@ -15,7 +15,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <ndb_global.h>
-#include <pthread.h>
+#include <my_pthread.h>
#include "MgmtSrvr.hpp"
#include "MgmtErrorReporter.hpp"
@@ -45,7 +45,6 @@
#include <ndb_version.h>
#include <SocketServer.hpp>
-#include "NodeLogLevel.hpp"
#include <NdbConfig.h>
#include <NdbAutoPtr.hpp>
@@ -62,71 +61,16 @@
#endif
extern int global_flag_send_heartbeat_now;
-
-static
-void
-CmdBackupCallback(const MgmtSrvr::BackupEvent & event)
-{
- char str[255];
-
- ndbout << endl;
-
- bool ok = false;
- switch(event.Event){
- case MgmtSrvr::BackupEvent::BackupStarted:
- ok = true;
- snprintf(str, sizeof(str),
- "Backup %d started", event.Started.BackupId);
- break;
- case MgmtSrvr::BackupEvent::BackupFailedToStart:
- ok = true;
- snprintf(str, sizeof(str),
- "Backup failed to start (Error %d)",
- event.FailedToStart.ErrorCode);
- break;
- case MgmtSrvr::BackupEvent::BackupCompleted:
- ok = true;
- snprintf(str, sizeof(str),
- "Backup %d completed",
- event.Completed.BackupId);
- ndbout << str << endl;
-
- snprintf(str, sizeof(str),
- " StartGCP: %d StopGCP: %d",
- event.Completed.startGCP, event.Completed.stopGCP);
- ndbout << str << endl;
-
- snprintf(str, sizeof(str),
- " #Records: %d #LogRecords: %d",
- event.Completed.NoOfRecords, event.Completed.NoOfLogRecords);
- ndbout << str << endl;
-
- snprintf(str, sizeof(str),
- " Data: %d bytes Log: %d bytes",
- event.Completed.NoOfBytes, event.Completed.NoOfLogBytes);
- break;
- case MgmtSrvr::BackupEvent::BackupAborted:
- ok = true;
- snprintf(str, sizeof(str),
- "Backup %d has been aborted reason %d",
- event.Aborted.BackupId,
- event.Aborted.Reason);
- break;
- }
- if(!ok){
- snprintf(str, sizeof(str), "Unknown backup event: %d", event.Event);
- }
- ndbout << str << endl;
-}
-
+extern int g_no_nodeid_checks;
void *
MgmtSrvr::logLevelThread_C(void* m)
{
MgmtSrvr *mgm = (MgmtSrvr*)m;
-
+ my_thread_init();
mgm->logLevelThreadRun();
+ my_thread_end();
NdbThread_Exit(0);
/* NOTREACHED */
return 0;
@@ -136,9 +80,10 @@ void *
MgmtSrvr::signalRecvThread_C(void *m)
{
MgmtSrvr *mgm = (MgmtSrvr*)m;
-
+ my_thread_init();
mgm->signalRecvThreadRun();
+ my_thread_end();
NdbThread_Exit(0);
/* NOTREACHED */
return 0;
@@ -188,44 +133,65 @@ MgmtSrvr::signalRecvThreadRun()
EventLogger g_EventLogger;
+static NdbOut&
+operator<<(NdbOut& out, const LogLevel & ll)
+{
+ out << "[LogLevel: ";
+ for(size_t i = 0; i<LogLevel::LOGLEVEL_CATEGORIES; i++)
+ out << ll.getLogLevel((LogLevel::EventCategory)i) << " ";
+ out << "]";
+ return out;
+}
+
void
MgmtSrvr::logLevelThreadRun()
{
- NdbMutex* threadMutex = NdbMutex_Create();
-
while (!_isStopThread) {
- if (_startedNodeId != 0) {
- NdbMutex_Lock(threadMutex);
-
- // Local node
- NodeLogLevel* n = NULL;
- while ((n = _nodeLogLevelList->next()) != NULL) {
- if (n->getNodeId() == _startedNodeId) {
- setNodeLogLevel(_startedNodeId, n->getLogLevelOrd(), true);
- }
- }
- // Cluster log
- while ((n = _clusterLogLevelList->next()) != NULL) {
- if (n->getNodeId() == _startedNodeId) {
- setEventReportingLevel(_startedNodeId, n->getLogLevelOrd(), true);
- }
- }
- _startedNodeId = 0;
-
- NdbMutex_Unlock(threadMutex);
+ /**
+ * Handle started nodes
+ */
+ EventSubscribeReq req;
+ req = m_statisticsListner.m_clients[0].m_logLevel;
+ req.blockRef = _ownReference;
- } // if (_startedNodeId != 0) {
+ SetLogLevelOrd ord;
+
+ m_started_nodes.lock();
+ while(m_started_nodes.size() > 0){
+ Uint32 node = m_started_nodes[0];
+ m_started_nodes.erase(0, false);
+ m_started_nodes.unlock();
+ setEventReportingLevelImpl(node, req);
+
+ ord = m_nodeLogLevel[node];
+ setNodeLogLevelImpl(node, ord);
+
+ m_started_nodes.lock();
+ }
+ m_started_nodes.unlock();
+
+ m_log_level_requests.lock();
+ while(m_log_level_requests.size() > 0){
+ req = m_log_level_requests[0];
+ m_log_level_requests.erase(0, false);
+ m_log_level_requests.unlock();
+
+ LogLevel tmp;
+ tmp = req;
+
+ if(req.blockRef == 0){
+ req.blockRef = _ownReference;
+ setEventReportingLevelImpl(0, req);
+ } else {
+ ord = req;
+ setNodeLogLevelImpl(req.blockRef, ord);
+ }
+ m_log_level_requests.lock();
+ }
+ m_log_level_requests.unlock();
NdbSleep_MilliSleep(_logLevelThreadSleep);
- } // while (!_isStopThread)
-
- NdbMutex_Destroy(threadMutex);
-}
-
-void
-MgmtSrvr::setStatisticsListner(StatisticsListner* listner)
-{
- m_statisticsListner = listner;
+ }
}
void
@@ -272,7 +238,7 @@ class ErrorItem
{
public:
int _errorCode;
- const BaseString _errorText;
+ const char * _errorText;
};
bool
@@ -429,110 +395,37 @@ MgmtSrvr::getPort() const {
ndb_mgm_destroy_iterator(iter);
- /*****************
- * Set Stat Port *
- *****************/
-#if 0
- if (!mgmProps->get("PortNumberStats", &tmp)){
- ndbout << "Could not find PortNumberStats in the configuration file."
- << endl;
- return false;
- }
- glob.port_stats = tmp;
-#endif
-
-#if 0
- const char * host;
- if(ndb_mgm_get_string_parameter(iter, mgmProps->get("ExecuteOnComputer", host)){
- ndbout << "Failed to find \"ExecuteOnComputer\" for my node" << endl;
- ndbout << "Unable to verify own hostname" << endl;
- return false;
- }
-
- const char * hostname;
- {
- const Properties * p;
- char buf[255];
- snprintf(buf, sizeof(buf), "Computer_%s", host.c_str());
- if(!glob.cluster_config->get(buf, &p)){
- ndbout << "Failed to find computer " << host << " in config" << endl;
- ndbout << "Unable to verify own hostname" << endl;
- return false;
- }
- if(!p->get("HostName", &hostname)){
- ndbout << "Failed to find \"HostName\" for computer " << host
- << " in config" << endl;
- ndbout << "Unable to verify own hostname" << endl;
- return false;
- }
- if(NdbHost_GetHostName(buf) != 0){
- ndbout << "Unable to get own hostname" << endl;
- ndbout << "Unable to verify own hostname" << endl;
- return false;
- }
- }
-
- const char * ip_address;
- if(mgmProps->get("IpAddress", &ip_address)){
- glob.use_specific_ip = true;
- glob.interface_name = strdup(ip_address);
- return true;
- }
-
- glob.interface_name = strdup(hostname);
-#endif
-
return port;
}
-int
-MgmtSrvr::getStatPort() const {
-#if 0
- const Properties *mgmProps;
- if(!getConfig()->get("Node", _ownNodeId, &mgmProps))
- return -1;
-
- int tmp = -1;
- if(!mgmProps->get("PortNumberStats", (Uint32 *)&tmp))
- return -1;
-
- return tmp;
-#else
- return -1;
-#endif
-}
-
/* Constructor */
MgmtSrvr::MgmtSrvr(NodeId nodeId,
const BaseString &configFilename,
- const BaseString &ndb_config_filename,
+ LocalConfig &local_config,
Config * config):
_blockNumber(1), // Hard coded block number since it makes it easy to send
// signals to other management servers.
_ownReference(0),
+ m_allocated_resources(*this),
theSignalIdleList(NULL),
theWaitState(WAIT_SUBSCRIBE_CONF),
- theConfCount(0),
- m_allocated_resources(*this) {
-
+ m_statisticsListner(this),
+ m_local_config(local_config)
+{
+
DBUG_ENTER("MgmtSrvr::MgmtSrvr");
_config = NULL;
- _isStatPortActive = false;
- _isClusterLogStatActive = false;
_isStopThread = false;
_logLevelThread = NULL;
_logLevelThreadSleep = 500;
m_signalRecvThread = NULL;
- _startedNodeId = 0;
theFacade = 0;
m_newConfig = NULL;
m_configFilename = configFilename;
- setCallback(CmdBackupCallback);
- m_localNdbConfigFilename = ndb_config_filename;
m_nextConfigGenerationNumber = 0;
@@ -583,33 +476,64 @@ MgmtSrvr::MgmtSrvr(NodeId nodeId,
ndb_mgm_destroy_iterator(iter);
}
- m_statisticsListner = NULL;
-
- _nodeLogLevelList = new NodeLogLevelList();
- _clusterLogLevelList = new NodeLogLevelList();
-
_props = NULL;
-
_ownNodeId= 0;
NodeId tmp= nodeId;
BaseString error_string;
- if (!alloc_node_id(&tmp, NDB_MGM_NODE_TYPE_MGM, 0, 0, error_string)){
+#if 0
+ char my_hostname[256];
+ struct sockaddr_in tmp_addr;
+ SOCKET_SIZE_TYPE addrlen= sizeof(tmp_addr);
+ if (!g_no_nodeid_checks) {
+ if (gethostname(my_hostname, sizeof(my_hostname))) {
+ ndbout << "error: gethostname() - " << strerror(errno) << endl;
+ exit(-1);
+ }
+ if (Ndb_getInAddr(&(((sockaddr_in*)&tmp_addr)->sin_addr),my_hostname)) {
+ ndbout << "error: Ndb_getInAddr(" << my_hostname << ") - "
+ << strerror(errno) << endl;
+ exit(-1);
+ }
+ }
+ if (!alloc_node_id(&tmp, NDB_MGM_NODE_TYPE_MGM,
+ (struct sockaddr *)&tmp_addr,
+ &addrlen, error_string)){
ndbout << "Unable to obtain requested nodeid: "
<< error_string.c_str() << endl;
exit(-1);
}
+#else
+ if (!alloc_node_id(&tmp, NDB_MGM_NODE_TYPE_MGM,
+ 0, 0, error_string)){
+ ndbout << "Unable to obtain requested nodeid: "
+ << error_string.c_str() << endl;
+ exit(-1);
+ }
+#endif
_ownNodeId = tmp;
{
DBUG_PRINT("info", ("verifyConfig"));
- ConfigRetriever cr(NDB_VERSION, NDB_MGM_NODE_TYPE_MGM);
+ ConfigRetriever cr(m_local_config, NDB_VERSION, NDB_MGM_NODE_TYPE_MGM);
if (!cr.verifyConfig(config->m_configValues, _ownNodeId)) {
ndbout << cr.getErrorString() << endl;
exit(-1);
}
}
+ {
+ MgmStatService::StatListener se;
+ se.m_socket = -1;
+ for(size_t t = 0; t<LogLevel::LOGLEVEL_CATEGORIES; t++){
+ se.m_logLevel.setLogLevel((LogLevel::EventCategory)t, 7);
+ }
+ se.m_logLevel.setLogLevel(LogLevel::llError, 15);
+ se.m_logLevel.setLogLevel(LogLevel::llBackup, 15);
+ m_statisticsListner.m_clients.push_back(se);
+ m_statisticsListner.m_logLevel = se.m_logLevel;
+ }
+
DBUG_VOID_RETURN;
}
@@ -671,8 +595,6 @@ MgmtSrvr::start(BaseString &error_string)
// Set the initial confirmation count for subscribe requests confirm
// from NDB nodes in the cluster.
//
- theConfCount = getNodeCount(NDB_MGM_NODE_TYPE_NDB);
-
// Loglevel thread
_logLevelThread = NdbThread_Create(logLevelThread_C,
(void**)this,
@@ -713,9 +635,6 @@ MgmtSrvr::~MgmtSrvr()
if(_config != NULL)
delete _config;
- delete _nodeLogLevelList;
- delete _clusterLogLevelList;
-
// End set log level thread
void* res = 0;
_isStopThread = true;
@@ -736,6 +655,9 @@ MgmtSrvr::~MgmtSrvr()
int MgmtSrvr::okToSendTo(NodeId processId, bool unCond)
{
+ if(processId == 0)
+ return 0;
+
if (getNodeType(processId) != NDB_MGM_NODE_TYPE_NDB)
return WRONG_PROCESS_TYPE;
@@ -1020,36 +942,38 @@ int
MgmtSrvr::versionNode(int processId, bool abort,
VersionCallback callback, void * anyData)
{
+ int version;
+
if(m_versionRec.inUse)
return OPERATION_IN_PROGRESS;
m_versionRec.callback = callback;
m_versionRec.inUse = true ;
- ClusterMgr::Node node;
- int version;
- if (getNodeType(processId) == NDB_MGM_NODE_TYPE_MGM) {
- if(m_versionRec.callback != 0)
- m_versionRec.callback(processId, NDB_VERSION, this,0);
- }
- if (getNodeType(processId) == NDB_MGM_NODE_TYPE_NDB) {
- node = theFacade->theClusterMgr->getNodeInfo(processId);
- version = node.m_info.m_version;
- if(theFacade->theClusterMgr->getNodeInfo(processId).connected)
- if(m_versionRec.callback != 0)
- m_versionRec.callback(processId, version, this,0);
- else
- if(m_versionRec.callback != 0)
- m_versionRec.callback(processId, 0, this,0);
-
+ if (getOwnNodeId() == processId)
+ {
+ version= NDB_VERSION;
}
-
- if (getNodeType(processId) == NDB_MGM_NODE_TYPE_API) {
+ else if (getNodeType(processId) == NDB_MGM_NODE_TYPE_NDB)
+ {
+ ClusterMgr::Node node= theFacade->theClusterMgr->getNodeInfo(processId);
+ if(node.connected)
+ version= node.m_info.m_version;
+ else
+ version= 0;
+ }
+ else if (getNodeType(processId) == NDB_MGM_NODE_TYPE_API ||
+ getNodeType(processId) == NDB_MGM_NODE_TYPE_MGM)
+ {
return sendVersionReq(processId);
}
+ if(m_versionRec.callback != 0)
+ m_versionRec.callback(processId, version, this,0);
m_versionRec.inUse = false ;
- return 0;
+ m_versionRec.version[processId]= version;
+
+ return 0;
}
int
@@ -1460,17 +1384,14 @@ MgmtSrvr::status(int processId,
Uint32 * nodegroup,
Uint32 * connectCount)
{
- if (getNodeType(processId) == NDB_MGM_NODE_TYPE_API) {
+ if (getNodeType(processId) == NDB_MGM_NODE_TYPE_API ||
+ getNodeType(processId) == NDB_MGM_NODE_TYPE_MGM) {
if(versionNode(processId, false,0,0) ==0)
* version = m_versionRec.version[processId];
else
* version = 0;
}
- if (getNodeType(processId) == NDB_MGM_NODE_TYPE_MGM) {
- * version = NDB_VERSION;
- }
-
const ClusterMgr::Node node =
theFacade->theClusterMgr->getNodeInfo(processId);
@@ -1540,175 +1461,72 @@ MgmtSrvr::status(int processId,
return -1;
}
-
-
-//****************************************************************************
-//****************************************************************************
-int
-MgmtSrvr::startStatisticEventReporting(int level)
-{
- SetLogLevelOrd ll;
- NodeId nodeId = 0;
-
- ll.clear();
- ll.setLogLevel(LogLevel::llStatistic, level);
-
- if (level > 0) {
- _isStatPortActive = true;
- } else {
- _isStatPortActive = false;
-
- if (_isClusterLogStatActive) {
- return 0;
- }
- }
-
- while (getNextNodeId(&nodeId, NDB_MGM_NODE_TYPE_NDB)) {
- setEventReportingLevelImpl(nodeId, ll);
- }
-
- return 0;
-}
-
-int
-MgmtSrvr::setEventReportingLevel(int processId, const SetLogLevelOrd & ll,
- bool isResend)
-{
- for (Uint32 i = 0; i < ll.noOfEntries; i++) {
- if (ll.theCategories[i] == LogLevel::llStatistic) {
- if (ll.theLevels[i] > 0) {
- _isClusterLogStatActive = true;
- break;
- } else {
- _isClusterLogStatActive = false;
-
- if (_isStatPortActive) {
- return 0;
- }
- break;
- }
- } // if (ll.theCategories
- } // for (int i = 0
-
- return setEventReportingLevelImpl(processId, ll, isResend);
-}
int
MgmtSrvr::setEventReportingLevelImpl(int processId,
- const SetLogLevelOrd & ll,
- bool isResend)
+ const EventSubscribeReq& ll)
{
- Uint32 i;
- for(i = 0; i<ll.noOfEntries; i++){
- // Save log level for the cluster log
- if (!isResend) {
- NodeLogLevel* n = NULL;
- bool found = false;
- while ((n = _clusterLogLevelList->next()) != NULL) {
- if (n->getNodeId() == processId &&
- n->getCategory() == ll.theCategories[i]) {
-
- n->setLevel(ll.theLevels[i]);
- found = true;
- }
- }
- if (!found) {
- _clusterLogLevelList->add(new NodeLogLevel(processId, ll));
- }
- }
- }
-
+
int result = okToSendTo(processId, true);
if (result != 0) {
return result;
}
- NdbApiSignal* signal = getSignal();
- if (signal == NULL) {
- return COULD_NOT_ALLOCATE_MEMORY;
- }
+ NdbApiSignal signal(_ownReference);
EventSubscribeReq * dst =
- CAST_PTR(EventSubscribeReq, signal->getDataPtrSend());
- for(i = 0; i<ll.noOfEntries; i++){
- dst->theCategories[i] = ll.theCategories[i];
- dst->theLevels[i] = ll.theLevels[i];
- }
-
- dst->noOfEntries = ll.noOfEntries;
- dst->blockRef = _ownReference;
+ CAST_PTR(EventSubscribeReq, signal.getDataPtrSend());
- signal->set(TestOrd::TraceAPI, CMVMI, GSN_EVENT_SUBSCRIBE_REQ,
- EventSubscribeReq::SignalLength);
+ * dst = ll;
+
+ signal.set(TestOrd::TraceAPI, CMVMI, GSN_EVENT_SUBSCRIBE_REQ,
+ EventSubscribeReq::SignalLength);
+
+ theFacade->lock_mutex();
+ send(&signal, processId, NODE_TYPE_DB);
+ theFacade->unlock_mutex();
- result = sendSignal(processId, WAIT_SUBSCRIBE_CONF, signal, true);
- if (result == -1) {
- return SEND_OR_RECEIVE_FAILED;
- }
- else {
- // Increment the conf counter
- theConfCount++;
- }
-
return 0;
}
//****************************************************************************
//****************************************************************************
int
-MgmtSrvr::setNodeLogLevel(int processId, const SetLogLevelOrd & ll,
- bool isResend)
+MgmtSrvr::setNodeLogLevelImpl(int processId, const SetLogLevelOrd & ll)
{
- Uint32 i;
- for(i = 0; i<ll.noOfEntries; i++){
- // Save log level for the cluster log
- if (!isResend) {
- NodeLogLevel* n = NULL;
- bool found = false;
- while ((n = _clusterLogLevelList->next()) != NULL) {
- if (n->getNodeId() == processId &&
- n->getCategory() == ll.theCategories[i]) {
-
- n->setLevel(ll.theLevels[i]);
- found = true;
- }
- }
- if (!found) {
- _clusterLogLevelList->add(new NodeLogLevel(processId, ll));
- }
- }
- }
-
int result = okToSendTo(processId, true);
if (result != 0) {
return result;
}
- NdbApiSignal* signal = getSignal();
- if (signal == NULL) {
- return COULD_NOT_ALLOCATE_MEMORY;
- }
-
- SetLogLevelOrd * dst = CAST_PTR(SetLogLevelOrd, signal->getDataPtrSend());
-
- for(i = 0; i<ll.noOfEntries; i++){
- dst->theCategories[i] = ll.theCategories[i];
- dst->theLevels[i] = ll.theLevels[i];
- }
+ NdbApiSignal signal(_ownReference);
- dst->noOfEntries = ll.noOfEntries;
+ SetLogLevelOrd * dst = CAST_PTR(SetLogLevelOrd, signal.getDataPtrSend());
- signal->set(TestOrd::TraceAPI, CMVMI, GSN_SET_LOGLEVELORD,
- SetLogLevelOrd::SignalLength);
-
- result = sendSignal(processId, NO_WAIT, signal, true);
- if (result == -1) {
- return SEND_OR_RECEIVE_FAILED;
- }
+ * dst = ll;
+
+ signal.set(TestOrd::TraceAPI, CMVMI, GSN_SET_LOGLEVELORD,
+ SetLogLevelOrd::SignalLength);
+
+ theFacade->lock_mutex();
+ theFacade->sendSignalUnCond(&signal, processId);
+ theFacade->unlock_mutex();
return 0;
}
+int
+MgmtSrvr::send(NdbApiSignal* signal, Uint32 node, Uint32 node_type){
+ Uint32 max = (node == 0) ? MAX_NODES : node + 1;
+
+ for(; node < max; node++){
+ while(nodeTypes[node] != (int)node_type && node < max) node++;
+ if(nodeTypes[node] != (int)node_type)
+ break;
+ theFacade->sendSignalUnCond(signal, node);
+ }
+ return 0;
+}
//****************************************************************************
//****************************************************************************
@@ -2003,7 +1821,7 @@ const char* MgmtSrvr::getErrorText(int errorCode)
for (int i = 0; i < noOfErrorCodes; ++i) {
if (errorCode == errorTable[i]._errorCode) {
- return errorTable[i]._errorText.c_str();
+ return errorTable[i]._errorText;
}
}
@@ -2011,21 +1829,6 @@ const char* MgmtSrvr::getErrorText(int errorCode)
return text;
}
-/*****************************************************************************
- * Handle reception of various signals
- *****************************************************************************/
-
-int
-MgmtSrvr::handleSTATISTICS_CONF(NdbApiSignal* signal)
-{
- //ndbout << "MgmtSrvr::handleSTATISTICS_CONF" << endl;
-
- int x = signal->readData(1);
- //ndbout << "MgmtSrvr::handleSTATISTICS_CONF, x: " << x << endl;
- _statistics._test1 = x;
- return 0;
-}
-
void
MgmtSrvr::handleReceivedSignal(NdbApiSignal* signal)
{
@@ -2049,51 +1852,7 @@ MgmtSrvr::handleReceivedSignal(NdbApiSignal* signal)
}
break;
- case GSN_STATISTICS_CONF:
- if (theWaitState != WAIT_STATISTICS) {
- g_EventLogger.warning("MgmtSrvr::handleReceivedSignal, unexpected "
- "signal received, gsn %d, theWaitState = %d",
- gsn, theWaitState);
-
- return;
- }
- returnCode = handleSTATISTICS_CONF(signal);
- if (returnCode != -1) {
- theWaitState = NO_WAIT;
- }
- break;
-
-
- case GSN_SET_VAR_CONF:
- if (theWaitState != WAIT_SET_VAR) {
- g_EventLogger.warning("MgmtSrvr::handleReceivedSignal, unexpected "
- "signal received, gsn %d, theWaitState = %d",
- gsn, theWaitState);
- return;
- }
- theWaitState = NO_WAIT;
- _setVarReqResult = 0;
- break;
-
- case GSN_SET_VAR_REF:
- if (theWaitState != WAIT_SET_VAR) {
- g_EventLogger.warning("MgmtSrvr::handleReceivedSignal, unexpected "
- "signal received, gsn %d, theWaitState = %d",
- gsn, theWaitState);
- return;
- }
- theWaitState = NO_WAIT;
- _setVarReqResult = -1;
- break;
-
case GSN_EVENT_SUBSCRIBE_CONF:
- theConfCount--; // OK, we've received a conf message
- if (theConfCount < 0) {
- g_EventLogger.warning("MgmtSrvr::handleReceivedSignal, unexpected "
- "signal received, gsn %d, theWaitState = %d",
- gsn, theWaitState);
- theConfCount = 0;
- }
break;
case GSN_EVENT_REP:
@@ -2173,7 +1932,6 @@ MgmtSrvr::handleReceivedSignal(NdbApiSignal* signal)
event.Completed.NoOfLogBytes = rep->noOfLogBytes;
event.Completed.NoOfRecords = rep->noOfRecords;
event.Completed.NoOfLogRecords = rep->noOfLogRecords;
-
event.Completed.stopGCP = rep->stopGCP;
event.Completed.startGCP = rep->startGCP;
event.Nodes = rep->nodes;
@@ -2276,20 +2034,19 @@ void
MgmtSrvr::handleStatus(NodeId nodeId, bool alive)
{
if (alive) {
- _startedNodeId = nodeId; // Used by logLevelThreadRun()
+ m_started_nodes.push_back(nodeId);
Uint32 theData[25];
theData[0] = EventReport::Connected;
theData[1] = nodeId;
+ eventReport(_ownNodeId, theData);
} else {
handleStopReply(nodeId, 0);
- theConfCount++; // Increment the event subscr conf count because
-
+
Uint32 theData[25];
theData[0] = EventReport::Disconnected;
theData[1] = nodeId;
-
+
eventReport(_ownNodeId, theData);
- g_EventLogger.info("Lost connection to node %d", nodeId);
}
}
@@ -2337,32 +2094,42 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId,
SOCKET_SIZE_TYPE *client_addr_len,
BaseString &error_string)
{
+ DBUG_ENTER("MgmtSrvr::alloc_node_id");
+ DBUG_PRINT("enter", ("nodeid=%d, type=%d, client_addr=%d",
+ *nodeId, type, client_addr));
+ if (g_no_nodeid_checks) {
+ if (*nodeId == 0) {
+ error_string.appfmt("no-nodeid-ckecks set in manegment server.\n"
+ "node id must be set explicitly in connectstring");
+ DBUG_RETURN(false);
+ }
+ DBUG_RETURN(true);
+ }
Guard g(&f_node_id_mutex);
-#if 0
- ndbout << "MgmtSrvr::getFreeNodeId type=" << type
- << " *nodeid=" << *nodeId << endl;
-#endif
-
+ int no_mgm= 0;
NodeBitmask connected_nodes(m_reserved_nodes);
- if (theFacade && theFacade->theClusterMgr) {
- for(Uint32 i = 0; i < MAX_NODES; i++)
- if (getNodeType(i) == NDB_MGM_NODE_TYPE_NDB) {
- const ClusterMgr::Node &node= theFacade->theClusterMgr->getNodeInfo(i);
- if (node.connected)
- connected_nodes.bitOR(node.m_state.m_connected_nodes);
+ for(Uint32 i = 0; i < MAX_NODES; i++)
+ {
+ if (getNodeType(i) == NDB_MGM_NODE_TYPE_NDB &&
+ theFacade && theFacade->theClusterMgr) {
+ const ClusterMgr::Node &node= theFacade->theClusterMgr->getNodeInfo(i);
+ if (node.connected) {
+ connected_nodes.bitOR(node.m_state.m_connected_nodes);
}
+ } else if (getNodeType(i) == NDB_MGM_NODE_TYPE_MGM)
+ no_mgm++;
}
-
bool found_matching_id= false;
bool found_matching_type= false;
bool found_free_node= false;
- const char *config_hostname = 0;
+ unsigned id_found= 0;
+ const char *config_hostname= 0;
struct in_addr config_addr= {0};
int r_config_addr= -1;
unsigned type_c= 0;
- ndb_mgm_configuration_iterator iter(*(ndb_mgm_configuration *)_config->m_configValues,
- CFG_SECTION_NODE);
+ ndb_mgm_configuration_iterator
+ iter(*(ndb_mgm_configuration *)_config->m_configValues, CFG_SECTION_NODE);
for(iter.first(); iter.valid(); iter.next()) {
unsigned tmp= 0;
if(iter.get(CFG_NODE_ID, &tmp)) abort();
@@ -2370,15 +2137,16 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId,
continue;
found_matching_id= true;
if(iter.get(CFG_TYPE_OF_SECTION, &type_c)) abort();
- if(type_c != type)
+ if(type_c != (unsigned)type)
continue;
found_matching_type= true;
if (connected_nodes.get(tmp))
continue;
found_free_node= true;
if(iter.get(CFG_NODE_HOST, &config_hostname)) abort();
-
- if (config_hostname && config_hostname[0] != 0 && client_addr) {
+ if (config_hostname && config_hostname[0] == 0)
+ config_hostname= 0;
+ else if (client_addr) {
// check hostname compatability
const void *tmp_in= &(((sockaddr_in*)client_addr)->sin_addr);
if((r_config_addr= Ndb_getInAddr(&config_addr, config_hostname)) != 0
@@ -2388,39 +2156,76 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId,
|| memcmp(&tmp_addr, tmp_in, sizeof(config_addr)) != 0) {
// not localhost
#if 0
- ndbout << "MgmtSrvr::getFreeNodeId compare failed for \"" << config_hostname
- << "\" id=" << tmp << endl;
+ ndbout << "MgmtSrvr::getFreeNodeId compare failed for \""
+ << config_hostname
+ << "\" id=" << tmp << endl;
#endif
continue;
}
// connecting through localhost
- // check if config_hostname match hostname
- char my_hostname[256];
- if (gethostname(my_hostname, sizeof(my_hostname)) != 0)
- continue;
- if(Ndb_getInAddr(&tmp_addr, my_hostname) != 0
- || memcmp(&tmp_addr, &config_addr, sizeof(config_addr)) != 0) {
- // no match
+ // check if config_hostname is local
+ if (!SocketServer::tryBind(0,config_hostname)) {
continue;
}
}
+ } else { // client_addr == 0
+ if (!SocketServer::tryBind(0,config_hostname)) {
+ continue;
+ }
}
- *nodeId= tmp;
- if (client_addr)
- m_connect_address[tmp]= ((struct sockaddr_in *)client_addr)->sin_addr;
- else
- Ndb_getInAddr(&(m_connect_address[tmp]), "localhost");
- m_reserved_nodes.set(tmp);
-#if 0
- ndbout << "MgmtSrvr::getFreeNodeId found type=" << type
- << " *nodeid=" << *nodeId << endl;
-#endif
- return true;
+ if (*nodeId != 0 ||
+ type != NDB_MGM_NODE_TYPE_MGM ||
+ no_mgm == 1) { // any match is ok
+ id_found= tmp;
+ break;
+ }
+ if (id_found) { // mgmt server may only have one match
+ error_string.appfmt("Ambiguous node id's %d and %d.\n"
+ "Suggest specifying node id in connectstring,\n"
+ "or specifying unique host names in config file.",
+ id_found, tmp);
+ DBUG_RETURN(false);
+ }
+ if (config_hostname == 0) {
+ error_string.appfmt("Ambiguity for node id %d.\n"
+ "Suggest specifying node id in connectstring,\n"
+ "or specifying unique host names in config file,\n"
+ "or specifying just one mgmt server in config file.",
+ tmp);
+ DBUG_RETURN(false);
+ }
+ id_found= tmp; // mgmt server matched, check for more matches
+ }
+
+ if (id_found)
+ {
+ *nodeId= id_found;
+ DBUG_PRINT("info", ("allocating node id %d",*nodeId));
+ {
+ int r= 0;
+ if (client_addr)
+ m_connect_address[id_found]=
+ ((struct sockaddr_in *)client_addr)->sin_addr;
+ else if (config_hostname)
+ r= Ndb_getInAddr(&(m_connect_address[id_found]), config_hostname);
+ else {
+ char name[256];
+ r= gethostname(name, sizeof(name));
+ if (r == 0) {
+ name[sizeof(name)-1]= 0;
+ r= Ndb_getInAddr(&(m_connect_address[id_found]), name);
+ }
+ }
+ if (r)
+ m_connect_address[id_found].s_addr= 0;
+ }
+ m_reserved_nodes.set(id_found);
+ DBUG_RETURN(true);
}
if (found_matching_type && !found_free_node) {
- // we have a temporary error which might be due to that we have got the latest
- // connect status from db-nodes. Force update.
+ // we have a temporary error which might be due to that
+ // we have got the latest connect status from db-nodes. Force update.
global_flag_send_heartbeat_now= 1;
}
@@ -2429,7 +2234,8 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId,
const char *alias, *str;
alias= ndb_mgm_get_node_type_alias_string(type, &str);
type_string.assfmt("%s(%s)", alias, str);
- alias= ndb_mgm_get_node_type_alias_string((enum ndb_mgm_node_type)type_c, &str);
+ alias= ndb_mgm_get_node_type_alias_string((enum ndb_mgm_node_type)type_c,
+ &str);
type_c_string.assfmt("%s(%s)", alias, str);
}
@@ -2438,11 +2244,14 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId,
if (found_matching_type)
if (found_free_node)
error_string.appfmt("Connection done from wrong host ip %s.",
- inet_ntoa(((struct sockaddr_in *)(client_addr))->sin_addr));
+ inet_ntoa(((struct sockaddr_in *)
+ (client_addr))->sin_addr));
else
- error_string.appfmt("No free node id found for %s.", type_string.c_str());
+ error_string.appfmt("No free node id found for %s.",
+ type_string.c_str());
else
- error_string.appfmt("No %s node defined in config file.", type_string.c_str());
+ error_string.appfmt("No %s node defined in config file.",
+ type_string.c_str());
else
error_string.append("No nodes defined in config file.");
} else {
@@ -2451,19 +2260,23 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId,
if (found_free_node) {
// have to split these into two since inet_ntoa overwrites itself
error_string.appfmt("Connection with id %d done from wrong host ip %s,",
- *nodeId, inet_ntoa(((struct sockaddr_in *)(client_addr))->sin_addr));
+ *nodeId, inet_ntoa(((struct sockaddr_in *)
+ (client_addr))->sin_addr));
error_string.appfmt(" expected %s(%s).", config_hostname,
- r_config_addr ? "lookup failed" : inet_ntoa(config_addr));
+ r_config_addr ?
+ "lookup failed" : inet_ntoa(config_addr));
} else
- error_string.appfmt("Id %d already allocated by another node.", *nodeId);
+ error_string.appfmt("Id %d already allocated by another node.",
+ *nodeId);
else
error_string.appfmt("Id %d configured as %s, connect attempted as %s.",
- *nodeId, type_c_string.c_str(), type_string.c_str());
+ *nodeId, type_c_string.c_str(),
+ type_string.c_str());
else
- error_string.appfmt("No node defined with id=%d in config file.", *nodeId);
+ error_string.appfmt("No node defined with id=%d in config file.",
+ *nodeId);
}
-
- return false;
+ DBUG_RETURN(false);
}
bool
@@ -2483,91 +2296,23 @@ MgmtSrvr::getNextNodeId(NodeId * nodeId, enum ndb_mgm_node_type type) const
return true;
}
+#include "Services.hpp"
+
void
MgmtSrvr::eventReport(NodeId nodeId, const Uint32 * theData)
{
const EventReport * const eventReport = (EventReport *)&theData[0];
-
+
EventReport::EventType type = eventReport->getEventType();
-
- if (type == EventReport::TransReportCounters ||
- type == EventReport::OperationReportCounters) {
-
- if (_isClusterLogStatActive) {
- g_EventLogger.log(type, theData, nodeId);
- }
-
- if (_isStatPortActive) {
- char theTime[128];
- struct tm* tm_now;
- time_t now;
- now = time((time_t*)NULL);
-#ifdef NDB_WIN32
- tm_now = localtime(&now);
-#else
- tm_now = gmtime(&now);
-#endif
-
- snprintf(theTime, sizeof(theTime),
- STATISTIC_DATE,
- tm_now->tm_year + 1900,
- tm_now->tm_mon,
- tm_now->tm_mday,
- tm_now->tm_hour,
- tm_now->tm_min,
- tm_now->tm_sec);
-
- char str[255];
-
- if (type == EventReport::TransReportCounters) {
- snprintf(str, sizeof(str),
- STATISTIC_LINE,
- theTime,
- (int)now,
- nodeId,
- theData[1],
- theData[2],
- theData[3],
- // theData[4], simple reads
- theData[5],
- theData[6],
- theData[7],
- theData[8]);
- } else if (type == EventReport::OperationReportCounters) {
- snprintf(str, sizeof(str),
- OP_STATISTIC_LINE,
- theTime,
- (int)now,
- nodeId,
- theData[1]);
- }
-
- if(m_statisticsListner != 0){
- m_statisticsListner->println_statistics(str);
- }
- }
-
- return;
-
- } // if (type ==
-
// Log event
- g_EventLogger.log(type, theData, nodeId);
-
+ g_EventLogger.log(type, theData, nodeId,
+ &m_statisticsListner.m_clients[0].m_logLevel);
+ m_statisticsListner.log(type, theData, nodeId);
}
/***************************************************************************
* Backup
***************************************************************************/
-
-MgmtSrvr::BackupCallback
-MgmtSrvr::setCallback(BackupCallback aCall)
-{
- BackupCallback ret = m_backupCallback;
- m_backupCallback = aCall;
- return ret;
-}
-
int
MgmtSrvr::startBackup(Uint32& backupId, bool waitCompleted)
{
@@ -2674,102 +2419,18 @@ MgmtSrvr::abortBackup(Uint32 backupId)
void
MgmtSrvr::backupCallback(BackupEvent & event)
{
- char str[255];
-
- bool ok = false;
+ m_lastBackupEvent = event;
switch(event.Event){
- case BackupEvent::BackupStarted:
- ok = true;
- snprintf(str, sizeof(str),
- "Backup %d started", event.Started.BackupId);
- break;
case BackupEvent::BackupFailedToStart:
- ok = true;
- snprintf(str, sizeof(str),
- "Backup failed to start (Backup error %d)",
- event.FailedToStart.ErrorCode);
- break;
- case BackupEvent::BackupCompleted:
- ok = true;
- snprintf(str, sizeof(str),
- "Backup %d completed",
- event.Completed.BackupId);
- g_EventLogger.info(str);
-
- snprintf(str, sizeof(str),
- " StartGCP: %d StopGCP: %d",
- event.Completed.startGCP, event.Completed.stopGCP);
- g_EventLogger.info(str);
-
- snprintf(str, sizeof(str),
- " #Records: %d #LogRecords: %d",
- event.Completed.NoOfRecords, event.Completed.NoOfLogRecords);
- g_EventLogger.info(str);
-
- snprintf(str, sizeof(str),
- " Data: %d bytes Log: %d bytes",
- event.Completed.NoOfBytes, event.Completed.NoOfLogBytes);
- break;
case BackupEvent::BackupAborted:
- ok = true;
- snprintf(str, sizeof(str),
- "Backup %d has been aborted reason %d",
- event.Aborted.BackupId,
- event.Aborted.Reason);
- break;
- }
- if(!ok){
- snprintf(str, sizeof(str),
- "Unknown backup event: %d",
- event.Event);
-
- }
- g_EventLogger.info(str);
-
- switch (theWaitState){
- case WAIT_BACKUP_STARTED:
- switch(event.Event){
- case BackupEvent::BackupStarted:
- case BackupEvent::BackupFailedToStart:
- m_lastBackupEvent = event;
- theWaitState = NO_WAIT;
- break;
- default:
- snprintf(str, sizeof(str),
- "Received event %d in unexpected state WAIT_BACKUP_STARTED",
- event.Event);
- g_EventLogger.info(str);
- return;
- }
-
+ case BackupEvent::BackupCompleted:
+ theWaitState = NO_WAIT;
break;
- case WAIT_BACKUP_COMPLETED:
- switch(event.Event){
- case BackupEvent::BackupCompleted:
- case BackupEvent::BackupAborted:
- case BackupEvent::BackupFailedToStart:
- m_lastBackupEvent = event;
+ case BackupEvent::BackupStarted:
+ if(theWaitState == WAIT_BACKUP_STARTED)
theWaitState = NO_WAIT;
- break;
- default:
- snprintf(str, sizeof(str),
- "Received event %d in unexpected state WAIT_BACKUP_COMPLETED",
- event.Event);
- g_EventLogger.info(str);
- return;
- }
- break;
- default:
- snprintf(str, sizeof(str), "Received event %d in unexpected state = %d",
- event.Event, theWaitState);
- g_EventLogger.info(str);
- return;
-
- }
-
- if(m_backupCallback != 0){
- (* m_backupCallback)(event);
}
+ return;
}
@@ -2957,15 +2618,15 @@ MgmtSrvr::setDbParameter(int node, int param, const char * value,
switch(p_type){
case 0:
res = i2.set(param, val_32);
- ndbout_c("Updateing node %d param: %d to %d", node, param, val_32);
+ ndbout_c("Updating node %d param: %d to %d", node, param, val_32);
break;
case 1:
res = i2.set(param, val_64);
- ndbout_c("Updateing node %d param: %d to %Ld", node, param, val_32);
+ ndbout_c("Updating node %d param: %d to %Ld", node, param, val_32);
break;
case 2:
res = i2.set(param, val_char);
- ndbout_c("Updateing node %d param: %d to %s", node, param, val_char);
+ ndbout_c("Updating node %d param: %d to %s", node, param, val_char);
break;
default:
abort();
@@ -2981,3 +2642,7 @@ template class Vector<SigMatch>;
#if __SUNPRO_CC != 0x560
template bool SignalQueue::waitFor<SigMatch>(Vector<SigMatch>&, SigMatch*&, NdbApiSignal*&, unsigned);
#endif
+
+template class MutexVector<unsigned short>;
+template class MutexVector<MgmStatService::StatListener>;
+template class MutexVector<EventSubscribeReq>;
diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp
index d7f9f7a1af3..c6157db489a 100644
--- a/ndb/src/mgmsrv/MgmtSrvr.hpp
+++ b/ndb/src/mgmsrv/MgmtSrvr.hpp
@@ -28,8 +28,8 @@
#include <signaldata/ManagementServer.hpp>
#include "SignalQueue.hpp"
#include <ndb_version.h>
-
-#include "NodeLogLevelList.hpp"
+#include <EventLogger.hpp>
+#include <signaldata/EventSubscribeReq.hpp>
/**
* @desc Block number for Management server.
@@ -43,6 +43,29 @@ class Config;
class SetLogLevelOrd;
class SocketServer;
+class MgmStatService : public EventLoggerBase
+{
+ friend class MgmtSrvr;
+public:
+ struct StatListener : public EventLoggerBase {
+ NDB_SOCKET_TYPE m_socket;
+ };
+
+private:
+ class MgmtSrvr * m_mgmsrv;
+ MutexVector<StatListener> m_clients;
+public:
+ MgmStatService(class MgmtSrvr * m) : m_clients(5) {
+ m_mgmsrv = m;
+ }
+
+ void add_listener(const StatListener&);
+
+ void log(int eventType, const Uint32* theData, NodeId nodeId);
+
+ void stopSessions();
+};
+
/**
* @class MgmtSrvr
* @brief Main class for the management server.
@@ -63,11 +86,6 @@ class SocketServer;
class MgmtSrvr {
public:
- class StatisticsListner {
- public:
- virtual void println_statistics(const BaseString &s) = 0;
- };
-
// some compilers need all of this
class Allocated_resources;
friend class Allocated_resources;
@@ -85,11 +103,6 @@ public:
};
/**
- * Set a reference to the socket server.
- */
- void setStatisticsListner(StatisticsListner* listner);
-
- /**
* Start/initate the event log.
*/
void startEventLog();
@@ -151,15 +164,6 @@ public:
STATIC_CONST( NO_CONTACT_WITH_DB_NODES = 5030 );
/**
- * This class holds all statistical variables fetched with
- * the getStatistics methods.
- */
- class Statistics { // TODO, Real statistic data to be added
- public:
- int _test1;
- };
-
- /**
* This enum specifies the different signal loggig modes possible to set
* with the setSignalLoggingMode method.
*/
@@ -169,7 +173,7 @@ public:
MgmtSrvr(NodeId nodeId, /* Local nodeid */
const BaseString &config_filename, /* Where to save config */
- const BaseString &ndb_config_filename, /* Ndb.cfg filename */
+ LocalConfig &local_config, /* Ndb.cfg filename */
Config * config);
NodeId getOwnNodeId() const {return _ownNodeId;};
@@ -206,7 +210,7 @@ public:
typedef void (* EnterSingleCallback)(int nodeId, void * anyData,
int errorCode);
typedef void (* ExitSingleCallback)(int nodeId, void * anyData,
- int errorCode);
+ int errorCode);
/**
* Lock configuration
@@ -313,13 +317,6 @@ public:
bool abort = false,
int * stopCount = 0, StopCallback = 0, void * anyData = 0);
- int setEventReportingLevel(int processId,
- const class SetLogLevelOrd & logLevel,
- bool isResend = false);
-
- int startStatisticEventReporting(int level = 5);
-
-
struct BackupEvent {
enum Event {
BackupStarted = 1,
@@ -356,8 +353,6 @@ public:
/**
* Backup functionallity
*/
- typedef void (* BackupCallback)(const BackupEvent& Event);
- BackupCallback setCallback(BackupCallback);
int startBackup(Uint32& backupId, bool waitCompleted = false);
int abortBackup(Uint32 backupId);
int performBackup(Uint32* backupId);
@@ -377,22 +372,8 @@ public:
// INVALID_LEVEL
//**************************************************************************
- /**
- * Sets the Node's log level, i.e., its local event reporting.
- *
- * @param processId the DB node id.
- * @param logLevel the log level.
- * @param isResend Flag to indicate for resending log levels
- * during node restart
-
- * @return 0 if successful or NO_CONTACT_WITH_PROCESS,
- * SEND_OR_RECEIVE_FAILED,
- * COULD_NOT_ALLOCATE_MEMORY
- */
- int setNodeLogLevel(int processId,
- const class SetLogLevelOrd & logLevel,
- bool isResend = false);
-
+ int setEventReportingLevelImpl(int processId, const EventSubscribeReq& ll);
+ int setNodeLogLevelImpl(int processId, const SetLogLevelOrd & ll);
/**
* Insert an error in a DB process.
@@ -509,11 +490,6 @@ public:
NodeId getPrimaryNode() const;
/**
- * Returns the statistics port number.
- * @return statistic port number.
- */
- int getStatPort() const;
- /**
* Returns the port number.
* @return port number.
*/
@@ -526,10 +502,7 @@ public:
private:
//**************************************************************************
- int setEventReportingLevelImpl(int processId,
- const class SetLogLevelOrd & logLevel,
- bool isResend = false);
-
+ int setEventReportingLevel(int processId, LogLevel::EventCategory, Uint32);
/**
* Check if it is possible to send a signal to a (DB) process
@@ -555,18 +528,14 @@ private:
NdbMutex *m_configMutex;
const Config * _config;
Config * m_newConfig;
+ LocalConfig &m_local_config;
BaseString m_configFilename;
- BaseString m_localNdbConfigFilename;
Uint32 m_nextConfigGenerationNumber;
NodeBitmask m_reserved_nodes;
Allocated_resources m_allocated_resources;
struct in_addr m_connect_address[MAX_NODES];
- int _setVarReqResult; // The result of the SET_VAR_REQ response
- Statistics _statistics; // handleSTATISTICS_CONF store the result here,
- // and getStatistics reads it.
-
//**************************************************************************
// Specific signal handling methods
//**************************************************************************
@@ -598,14 +567,6 @@ private:
// Returns: -
//**************************************************************************
- int handleSTATISTICS_CONF(NdbApiSignal* signal);
- //**************************************************************************
- // Description: Handle reception of signal STATISTICS_CONF
- // Parameters:
- // signal: The recieved signal
- // Returns: TODO, to be defined
- //**************************************************************************
-
void handle_MGM_LOCK_CONFIG_REQ(NdbApiSignal *signal);
void handle_MGM_UNLOCK_CONFIG_REQ(NdbApiSignal *signal);
@@ -631,7 +592,6 @@ private:
*/
enum WaitSignalType {
NO_WAIT, // We don't expect to receive any signal
- WAIT_STATISTICS, // Accept STATISTICS_CONF
WAIT_SET_VAR, // Accept SET_VAR_CONF and SET_VAR_REF
WAIT_SUBSCRIBE_CONF, // Accept event subscription confirmation
WAIT_STOP,
@@ -733,14 +693,6 @@ private:
class SignalQueue m_signalRecvQueue;
- enum ndb_mgm_node_type nodeTypes[MAX_NODES];
-
- int theConfCount; // The number of expected conf signals
-
- StatisticsListner * m_statisticsListner; // Used for sending statistics info
- bool _isStatPortActive;
- bool _isClusterLogStatActive;
-
struct StopRecord {
StopRecord(){ inUse = false; callback = 0; singleUserMode = false;}
bool inUse;
@@ -765,10 +717,16 @@ private:
void handleStopReply(NodeId nodeId, Uint32 errCode);
int translateStopRef(Uint32 errCode);
-
+
bool _isStopThread;
int _logLevelThreadSleep;
- int _startedNodeId;
+ MutexVector<NodeId> m_started_nodes;
+ MutexVector<EventSubscribeReq> m_log_level_requests;
+ LogLevel m_nodeLogLevel[MAX_NODES];
+ enum ndb_mgm_node_type nodeTypes[MAX_NODES];
+ friend class MgmApiSession;
+ friend class MgmStatService;
+ MgmStatService m_statisticsListner;
/**
* Handles the thread wich upon a 'Node is started' event will
@@ -782,15 +740,12 @@ private:
static void *signalRecvThread_C(void *);
void signalRecvThreadRun();
- NodeLogLevelList* _nodeLogLevelList;
- NodeLogLevelList* _clusterLogLevelList;
-
void backupCallback(BackupEvent &);
- BackupCallback m_backupCallback;
BackupEvent m_lastBackupEvent;
Config *_props;
+ int send(class NdbApiSignal* signal, Uint32 node, Uint32 node_type);
public:
/**
* This method does not exist
diff --git a/ndb/src/mgmsrv/MgmtSrvrConfig.cpp b/ndb/src/mgmsrv/MgmtSrvrConfig.cpp
index 44c2aadd1e2..1d51061e909 100644
--- a/ndb/src/mgmsrv/MgmtSrvrConfig.cpp
+++ b/ndb/src/mgmsrv/MgmtSrvrConfig.cpp
@@ -288,8 +288,7 @@ MgmtSrvr::readConfig() {
Config *
MgmtSrvr::fetchConfig() {
- ConfigRetriever cr(NDB_VERSION, NODE_TYPE_MGM);
- cr.setLocalConfigFileName(m_localNdbConfigFilename.c_str());
+ ConfigRetriever cr(m_local_config, NDB_VERSION, NODE_TYPE_MGM);
struct ndb_mgm_configuration * tmp = cr.getConfig();
if(tmp != 0){
Config * conf = new Config();
diff --git a/ndb/src/mgmsrv/NodeLogLevel.cpp b/ndb/src/mgmsrv/NodeLogLevel.cpp
deleted file mode 100644
index 5271cdb0f2b..00000000000
--- a/ndb/src/mgmsrv/NodeLogLevel.cpp
+++ /dev/null
@@ -1,70 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#include "NodeLogLevel.hpp"
-// TODO_RONM: Clearly getCategory and getLevel is not correctly coded. Must be taken care of.
-
-NodeLogLevel::NodeLogLevel(int nodeId, const SetLogLevelOrd& ll)
-{
- m_nodeId = nodeId;
- m_logLevel = ll;
-}
-
-NodeLogLevel::~NodeLogLevel()
-{
-}
-
-int
-NodeLogLevel::getNodeId() const
-{
- return m_nodeId;
-}
-
-Uint32
-NodeLogLevel::getCategory() const
-{
- for (Uint32 i = 0; i < m_logLevel.noOfEntries; i++)
- {
- return m_logLevel.theCategories[i];
- }
- return 0;
-}
-
-int
-NodeLogLevel::getLevel() const
-{
- for (Uint32 i = 0; i < m_logLevel.noOfEntries; i++)
- {
- return m_logLevel.theLevels[i];
- }
- return 0;
-}
-
-void
-NodeLogLevel::setLevel(int level)
-{
- for (Uint32 i = 0; i < m_logLevel.noOfEntries; i++)
- {
- m_logLevel.theLevels[i] = level;
- }
-
-}
-
-SetLogLevelOrd
-NodeLogLevel::getLogLevelOrd() const
-{
- return m_logLevel;
-}
diff --git a/ndb/src/mgmsrv/NodeLogLevel.hpp b/ndb/src/mgmsrv/NodeLogLevel.hpp
deleted file mode 100644
index 3ad758cde99..00000000000
--- a/ndb/src/mgmsrv/NodeLogLevel.hpp
+++ /dev/null
@@ -1,54 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef NODELOGLEVEL_H
-#define NODELOGLEVEL_H
-
-#include <ndb_global.h>
-
-#include <signaldata/SetLogLevelOrd.hpp>
-
-/**
- * Holds a DB node's log level settings for both local and event log levels.
- * It only holds one log level setting even though SetLogLevelOrd can handle
- * multiple log levels at once, it is not used in that way in the managment
- * server.
- *
- * @version #@ $Id: NodeLogLevel.hpp,v 1.2 2003/07/05 17:40:22 elathal Exp $
- */
-class NodeLogLevel
-{
-public:
- NodeLogLevel(int nodeId, const SetLogLevelOrd& ll);
- ~NodeLogLevel();
-
- int getNodeId() const;
- Uint32 getCategory() const;
- int getLevel() const;
- void setLevel(int level);
- SetLogLevelOrd getLogLevelOrd() const;
-
-private:
- NodeLogLevel();
- NodeLogLevel(const NodeLogLevel&);
- bool operator == (const NodeLogLevel&);
- NodeLogLevel operator = (const NodeLogLevel&);
-
- int m_nodeId;
- SetLogLevelOrd m_logLevel;
-};
-
-#endif
diff --git a/ndb/src/mgmsrv/NodeLogLevelList.cpp b/ndb/src/mgmsrv/NodeLogLevelList.cpp
deleted file mode 100644
index 6c7c091c1a8..00000000000
--- a/ndb/src/mgmsrv/NodeLogLevelList.cpp
+++ /dev/null
@@ -1,182 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#include <ndb_global.h>
-
-#include "NodeLogLevelList.hpp"
-#include "NodeLogLevel.hpp"
-
-//
-// PUBLIC
-//
-
-NodeLogLevelList::NodeLogLevelList() :
- m_size(0),
- m_pHeadNode(NULL),
- m_pTailNode(NULL),
- m_pCurrNode(NULL)
-{
-}
-
-NodeLogLevelList::~NodeLogLevelList()
-{
- removeAll();
-}
-
-void
-NodeLogLevelList::add(NodeLogLevel* pNewNode)
-{
- NodeLogLevelNode* pNode = new NodeLogLevelNode();
-
- if (m_pHeadNode == NULL)
- {
- m_pHeadNode = pNode;
- pNode->pPrev = NULL;
- }
- else
- {
- m_pTailNode->pNext = pNode;
- pNode->pPrev = m_pTailNode;
- }
- m_pTailNode = pNode;
- pNode->pNext = NULL;
- pNode->pHandler = pNewNode;
-
- m_size++;
-}
-
-bool
-NodeLogLevelList::remove(NodeLogLevel* pRemoveNode)
-{
- NodeLogLevelNode* pNode = m_pHeadNode;
- bool removed = false;
- do
- {
- if (pNode->pHandler == pRemoveNode)
- {
- removeNode(pNode);
- removed = true;
- break;
- }
- } while ( (pNode = next(pNode)) != NULL);
-
- return removed;
-}
-
-void
-NodeLogLevelList::removeAll()
-{
- while (m_pHeadNode != NULL)
- {
- removeNode(m_pHeadNode);
- }
-}
-
-NodeLogLevel*
-NodeLogLevelList::next()
-{
- NodeLogLevel* pHandler = NULL;
- if (m_pCurrNode == NULL)
- {
- m_pCurrNode = m_pHeadNode;
- if (m_pCurrNode != NULL)
- {
- pHandler = m_pCurrNode->pHandler;
- }
- }
- else
- {
- m_pCurrNode = next(m_pCurrNode); // Next node
- if (m_pCurrNode != NULL)
- {
- pHandler = m_pCurrNode->pHandler;
- }
- }
-
- return pHandler;
-}
-
-int
-NodeLogLevelList::size() const
-{
- return m_size;
-}
-
-//
-// PRIVATE
-//
-
-NodeLogLevelList::NodeLogLevelNode*
-NodeLogLevelList::next(NodeLogLevelNode* pNode)
-{
- NodeLogLevelNode* pCurr = pNode;
- if (pNode->pNext != NULL)
- {
- pCurr = pNode->pNext;
- }
- else
- {
- // Tail
- pCurr = NULL;
- }
- return pCurr;
-}
-
-NodeLogLevelList::NodeLogLevelNode*
-NodeLogLevelList::prev(NodeLogLevelNode* pNode)
-{
- NodeLogLevelNode* pCurr = pNode;
- if (pNode->pPrev != NULL) // head
- {
- pCurr = pNode->pPrev;
- }
- else
- {
- // Head
- pCurr = NULL;
- }
-
- return pCurr;
-}
-
-void
-NodeLogLevelList::removeNode(NodeLogLevelNode* pNode)
-{
- if (pNode->pPrev == NULL) // If head
- {
- m_pHeadNode = pNode->pNext;
- }
- else
- {
- pNode->pPrev->pNext = pNode->pNext;
- }
-
- if (pNode->pNext == NULL) // if tail
- {
- m_pTailNode = pNode->pPrev;
- }
- else
- {
- pNode->pNext->pPrev = pNode->pPrev;
- }
-
- pNode->pNext = NULL;
- pNode->pPrev = NULL;
- delete pNode->pHandler; // Delete log handler
- delete pNode;
-
- m_size--;
-}
diff --git a/ndb/src/mgmsrv/NodeLogLevelList.hpp b/ndb/src/mgmsrv/NodeLogLevelList.hpp
deleted file mode 100644
index 4a55ee211e2..00000000000
--- a/ndb/src/mgmsrv/NodeLogLevelList.hpp
+++ /dev/null
@@ -1,93 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef NODELOGLEVELLIST_H
-#define NODELOGLEVELLIST_H
-
-class NodeLogLevel;
-
-/**
- * Provides a simple linked list of NodeLogLevel.
- *
- * @see NodeLogLevel
- * @version #@ $Id: NodeLogLevelList.hpp,v 1.1 2002/08/09 12:53:50 eyualex Exp $
- */
-class NodeLogLevelList
-{
-public:
- /**
- * Default Constructor.
- */
- NodeLogLevelList();
-
- /**
- * Destructor.
- */
- ~NodeLogLevelList();
-
- /**
- * Adds a new node.
- *
- * @param pNewHandler a new NodeLogLevel.
- */
- void add(NodeLogLevel* pNewNode);
-
- /**
- * Removes a NodeLogLevel from the list and call its destructor.
- *
- * @param pRemoveHandler the NodeLogLevel to remove
- */
- bool remove(NodeLogLevel* pRemoveNode);
-
- /**
- * Removes all items.
- */
- void removeAll();
-
- /**
- * Returns the next node in the list.
- * returns a node or NULL.
- */
- NodeLogLevel* next();
-
- /**
- * Returns the size of the list.
- */
- int size() const;
-private:
- /** List node */
- struct NodeLogLevelNode
- {
- NodeLogLevelNode* pPrev;
- NodeLogLevelNode* pNext;
- NodeLogLevel* pHandler;
- };
-
- NodeLogLevelNode* next(NodeLogLevelNode* pNode);
- NodeLogLevelNode* prev(NodeLogLevelNode* pNode);
-
- void removeNode(NodeLogLevelNode* pNode);
-
- int m_size;
-
- NodeLogLevelNode* m_pHeadNode;
- NodeLogLevelNode* m_pTailNode;
- NodeLogLevelNode* m_pCurrNode;
-};
-
-#endif
-
-
diff --git a/ndb/src/mgmsrv/Services.cpp b/ndb/src/mgmsrv/Services.cpp
index c529e277e0e..684c10dbd4d 100644
--- a/ndb/src/mgmsrv/Services.cpp
+++ b/ndb/src/mgmsrv/Services.cpp
@@ -23,6 +23,7 @@
#include <mgmapi.h>
#include <EventLogger.hpp>
#include <signaldata/SetLogLevelOrd.hpp>
+#include <LogLevel.hpp>
#include <BaseString.hpp>
#include <Base64.hpp>
@@ -133,7 +134,7 @@ ParserRow<MgmApiSession> commands[] = {
MGM_ARG("public key", String, Mandatory, "Public key"),
MGM_CMD("get version", &MgmApiSession::getVersion, ""),
-
+
MGM_CMD("get status", &MgmApiSession::getStatus, ""),
MGM_CMD("get info clusterlog", &MgmApiSession::getInfoClusterLog, ""),
@@ -236,7 +237,11 @@ ParserRow<MgmApiSession> commands[] = {
MGM_ARG("node", String, Mandatory, "Node"),
MGM_ARG("parameter", String, Mandatory, "Parameter"),
MGM_ARG("value", String, Mandatory, "Value"),
-
+
+ MGM_CMD("listen event", &MgmApiSession::listen_event, ""),
+ MGM_ARG("node", Int, Optional, "Node"),
+ MGM_ARG("filter", String, Mandatory, "Event category"),
+
MGM_END()
};
@@ -289,7 +294,8 @@ MgmApiSession::runSession() {
break;
}
}
- NDB_CLOSE_SOCKET(m_socket);
+ if(m_socket >= 0)
+ NDB_CLOSE_SOCKET(m_socket);
}
#ifdef MGM_GET_CONFIG_BACKWARDS_COMPAT
@@ -418,7 +424,8 @@ MgmApiSession::get_nodeid(Parser_t::Context &,
&addr, &addrlen, error_string)){
const char *alias;
const char *str;
- alias= ndb_mgm_get_node_type_alias_string((enum ndb_mgm_node_type)nodetype, &str);
+ alias= ndb_mgm_get_node_type_alias_string((enum ndb_mgm_node_type)
+ nodetype, &str);
m_output->println(cmd);
m_output->println("result: %s", error_string.c_str());
m_output->println("");
@@ -554,7 +561,7 @@ MgmApiSession::getStatPort(Parser_t::Context &,
const class Properties &) {
m_output->println("get statport reply");
- m_output->println("tcpport: %d", m_mgmsrv.getStatPort());
+ m_output->println("tcpport: %d", 0);
m_output->println("");
}
@@ -756,13 +763,12 @@ MgmApiSession::bye(Parser<MgmApiSession>::Context &,
void
MgmApiSession::setClusterLogLevel(Parser<MgmApiSession>::Context &,
Properties const &args) {
- Uint32 node, level;
- BaseString categoryName, errorString;
+ Uint32 node, level, category;
+ BaseString errorString;
SetLogLevelOrd logLevel;
int result;
- logLevel.clear();
args.get("node", &node);
- args.get("category", categoryName);
+ args.get("category", &category);
args.get("level", &level);
/* XXX should use constants for this value */
@@ -771,25 +777,17 @@ MgmApiSession::setClusterLogLevel(Parser<MgmApiSession>::Context &,
goto error;
}
- categoryName.ndb_toupper();
-
- LogLevel::EventCategory category;
- if(!EventLogger::matchEventCategory(categoryName.c_str(), &category)) {
- errorString.assign("Unknown category");
- goto error;
- }
-
- logLevel.setLogLevel(category, level);
- result = m_mgmsrv.setEventReportingLevel(node, logLevel);
-
+ EventSubscribeReq req;
+ req.blockRef = 0;
+ req.noOfEntries = 1;
+ req.theData[0] = (category << 16) | level;
+ m_mgmsrv.m_log_level_requests.push_back(req);
+
m_output->println("set cluster loglevel reply");
- if(result != 0)
- m_output->println("result: %s", m_mgmsrv.getErrorText(result));
- else
- m_output->println("result: Ok");
+ m_output->println("result: Ok");
m_output->println("");
return;
- error:
+error:
m_output->println("set cluster loglevel reply");
m_output->println("result: %s", errorString.c_str());
m_output->println("");
@@ -798,13 +796,13 @@ MgmApiSession::setClusterLogLevel(Parser<MgmApiSession>::Context &,
void
MgmApiSession::setLogLevel(Parser<MgmApiSession>::Context &,
Properties const &args) {
- Uint32 node = 0, level = 0;
- BaseString categoryName, errorString;
+ Uint32 node = 0, level = 0, category;
+ BaseString errorString;
SetLogLevelOrd logLevel;
int result;
logLevel.clear();
args.get("node", &node);
- args.get("category", categoryName);
+ args.get("category", &category);
args.get("level", &level);
/* XXX should use constants for this value */
@@ -813,23 +811,14 @@ MgmApiSession::setLogLevel(Parser<MgmApiSession>::Context &,
goto error;
}
- categoryName.ndb_toupper();
-
- LogLevel::EventCategory category;
- if(!EventLogger::matchEventCategory(categoryName.c_str(), &category)) {
- errorString.assign("Unknown category");
- goto error;
- }
-
- logLevel.setLogLevel(category, level);
-
- result = m_mgmsrv.setNodeLogLevel(node, logLevel);
-
+ EventSubscribeReq req;
+ req.blockRef = node;
+ req.noOfEntries = 1;
+ req.theData[0] = (category << 16) | level;
+ m_mgmsrv.m_log_level_requests.push_back(req);
+
m_output->println("set loglevel reply");
- if(result != 0)
- m_output->println("result: %s", m_mgmsrv.getErrorText(result));
- else
- m_output->println("result: Ok");
+ m_output->println("result: Ok");
m_output->println("");
return;
error:
@@ -1248,33 +1237,91 @@ MgmApiSession::configChange(Parser_t::Context &,
m_output->println("");
}
-void
-MgmStatService::println_statistics(const BaseString &line){
- MutexVector<NDB_SOCKET_TYPE> copy(m_sockets.size());
- m_sockets.lock();
+static NdbOut&
+operator<<(NdbOut& out, const LogLevel & ll)
+{
+ out << "[LogLevel: ";
+ for(size_t i = 0; i<LogLevel::LOGLEVEL_CATEGORIES; i++)
+ out << ll.getLogLevel((LogLevel::EventCategory)i) << " ";
+ out << "]";
+}
+
+void
+MgmStatService::log(int eventType, const Uint32* theData, NodeId nodeId){
+
+ Uint32 threshold = 0;
+ LogLevel::EventCategory cat;
+
+ for(unsigned i = 0; i<EventLogger::matrixSize; i++){
+ if(EventLogger::matrix[i].eventType == eventType){
+ cat = EventLogger::matrix[i].eventCategory;
+ threshold = EventLogger::matrix[i].threshold;
+ break;
+ }
+ }
+
+ char m_text[256];
+ EventLogger::getText(m_text, sizeof(m_text), eventType, theData, nodeId);
+
+ Vector<NDB_SOCKET_TYPE> copy;
+ m_clients.lock();
int i;
- for(i = m_sockets.size() - 1; i >= 0; i--){
- if(println_socket(m_sockets[i], MAX_WRITE_TIMEOUT, line.c_str()) == -1){
- copy.push_back(m_sockets[i]);
- m_sockets.erase(i, false);
+ for(i = m_clients.size() - 1; i >= 0; i--){
+ if(threshold <= m_clients[i].m_logLevel.getLogLevel(cat)){
+ if(m_clients[i].m_socket >= 0 &&
+ println_socket(m_clients[i].m_socket,
+ MAX_WRITE_TIMEOUT, m_text) == -1){
+ copy.push_back(m_clients[i].m_socket);
+ m_clients.erase(i, false);
+ }
}
}
- m_sockets.unlock();
+ m_clients.unlock();
- for(i = copy.size() - 1; i >= 0; i--){
+ for(i = 0; (unsigned)i<copy.size(); i++){
NDB_CLOSE_SOCKET(copy[i]);
- copy.erase(i);
}
- if(m_sockets.size() == 0 || false){
- m_mgmsrv->startStatisticEventReporting(0);
+
+ if(copy.size()){
+ LogLevel tmp; tmp.clear();
+ m_clients.lock();
+ for(i = 0; i < m_clients.size(); i++){
+ tmp.set_max(m_clients[i].m_logLevel);
+ }
+ m_clients.unlock();
+
+ if(!(tmp == m_logLevel)){
+ m_logLevel = tmp;
+ EventSubscribeReq req;
+ req = tmp;
+ req.blockRef = 0;
+ m_mgmsrv->m_log_level_requests.push_back(req);
+ }
+ }
+}
+
+void
+MgmStatService::add_listener(const StatListener& client){
+ m_clients.push_back(client);
+ LogLevel tmp = m_logLevel;
+ tmp.set_max(client.m_logLevel);
+
+ if(!(tmp == m_logLevel)){
+ m_logLevel = tmp;
+ EventSubscribeReq req;
+ req = tmp;
+ req.blockRef = 0;
+ m_mgmsrv->m_log_level_requests.push_back(req);
}
}
void
MgmStatService::stopSessions(){
- for(int i = m_sockets.size() - 1; i >= 0; i--){
- NDB_CLOSE_SOCKET(m_sockets[i]);
- m_sockets.erase(i);
+ for(int i = m_clients.size() - 1; i >= 0; i--){
+ if(m_clients[i].m_socket >= 0){
+ NDB_CLOSE_SOCKET(m_clients[i].m_socket);
+ m_clients.erase(i);
+ }
}
}
@@ -1298,6 +1345,75 @@ MgmApiSession::setParameter(Parser_t::Context &,
m_output->println("");
}
+void
+MgmApiSession::listen_event(Parser<MgmApiSession>::Context & ctx,
+ Properties const & args) {
+
+ BaseString node, param, value;
+ args.get("node", node);
+ args.get("filter", param);
+
+ int result = 0;
+ BaseString msg;
+
+ MgmStatService::StatListener le;
+ le.m_socket = m_socket;
+
+ Vector<BaseString> list;
+ param.trim();
+ param.split(list, " ,");
+ for(size_t i = 0; i<list.size(); i++){
+ Vector<BaseString> spec;
+ list[i].trim();
+ list[i].split(spec, "=:");
+ if(spec.size() != 2){
+ msg.appfmt("Invalid filter specification: >%s< >%s< %d",
+ param.c_str(), list[i].c_str(), spec.size());
+ result = -1;
+ goto done;
+ }
+
+ spec[0].trim().ndb_toupper();
+ int category = ndb_mgm_match_event_category(spec[0].c_str());
+ if(category == NDB_MGM_ILLEGAL_EVENT_CATEGORY){
+ category = atoi(spec[0].c_str());
+ if(category < NDB_MGM_MIN_EVENT_CATEGORY ||
+ category > NDB_MGM_MAX_EVENT_CATEGORY){
+ msg.appfmt("Unknown category: >%s<", spec[0].c_str());
+ result = -1;
+ goto done;
+ }
+ }
+
+ int level = atoi(spec[1].c_str());
+ if(level < 0 || level > 15){
+ msg.appfmt("Invalid level: >%s<", spec[1].c_str());
+ result = -1;
+ goto done;
+ }
+ category -= CFG_MIN_LOGLEVEL;
+ le.m_logLevel.setLogLevel((LogLevel::EventCategory)category, level);
+ }
+
+ if(list.size() == 0){
+ msg.appfmt("Empty filter specification");
+ result = -1;
+ goto done;
+ }
+
+ m_mgmsrv.m_statisticsListner.add_listener(le);
+
+ m_stop = true;
+ m_socket = -1;
+
+done:
+ m_output->println("listen event");
+ m_output->println("result: %d", result);
+ if(result != 0)
+ m_output->println("msg: %s", msg.c_str());
+ m_output->println("");
+}
+
template class MutexVector<int>;
template class Vector<ParserRow<MgmApiSession> const*>;
template class Vector<unsigned short>;
diff --git a/ndb/src/mgmsrv/Services.hpp b/ndb/src/mgmsrv/Services.hpp
index 9cf8b59be8f..e47820826b6 100644
--- a/ndb/src/mgmsrv/Services.hpp
+++ b/ndb/src/mgmsrv/Services.hpp
@@ -83,7 +83,8 @@ public:
void configChange(Parser_t::Context &ctx, const class Properties &args);
void setParameter(Parser_t::Context &ctx, const class Properties &args);
-
+ void listen_event(Parser_t::Context &ctx, const class Properties &args);
+
void repCommand(Parser_t::Context &ctx, const class Properties &args);
};
@@ -103,28 +104,4 @@ public:
}
};
-class MgmStatService : public SocketServer::Service,
- public MgmtSrvr::StatisticsListner
-{
- class MgmtSrvr * m_mgmsrv;
- MutexVector<NDB_SOCKET_TYPE> m_sockets;
-public:
- MgmStatService() : m_sockets(5) {
- m_mgmsrv = 0;
- }
-
- void setMgm(class MgmtSrvr * mgmsrv){
- m_mgmsrv = mgmsrv;
- }
-
- SocketServer::Session * newSession(NDB_SOCKET_TYPE socket){
- m_sockets.push_back(socket);
- m_mgmsrv->startStatisticEventReporting(5);
- return 0;
- }
-
- void stopSessions();
-
- void println_statistics(const BaseString &line);
-};
#endif
diff --git a/ndb/src/mgmsrv/main.cpp b/ndb/src/mgmsrv/main.cpp
index 323a836cdd4..1a2b95391a9 100644
--- a/ndb/src/mgmsrv/main.cpp
+++ b/ndb/src/mgmsrv/main.cpp
@@ -15,7 +15,6 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <ndb_global.h>
-#include <my_sys.h>
#include "MgmtSrvr.hpp"
#include "EventLogger.hpp"
@@ -70,7 +69,6 @@ struct MgmGlobals {
bool use_specific_ip;
char * interface_name;
int port;
- int port_stats;
/** The configuration of the cluster */
Config * cluster_config;
@@ -82,6 +80,7 @@ struct MgmGlobals {
SocketServer * socketServer;
};
+int g_no_nodeid_checks= 0;
static MgmGlobals glob;
@@ -118,7 +117,9 @@ struct getargs args[] = {
"Specify configuration file connect string (will default use Ndb.cfg if available)",
"filename" },
{ "interactive", 0, arg_flag, &glob.interactive,
- "Run interactive. Not supported but provided for testing purposes", "" },
+ "Run interactive. Not supported but provided for testing purposes", "" },
+ { "no-nodeid-checks", 0, arg_flag, &g_no_nodeid_checks,
+ "Do not provide any node id checks", "" },
{ "nodaemon", 0, arg_flag, &glob.non_interactive,
"Don't run as daemon, but don't read from stdin", "non-interactive" }
};
@@ -129,6 +130,7 @@ int num_args = sizeof(args) / sizeof(args[0]);
* MAIN
*/
NDB_MAIN(mgmsrv){
+ ndb_init();
/**
* OSE specific. Enable shared ownership of file system resources.
* This is needed in order to use the cluster log since the events
@@ -151,7 +153,6 @@ NDB_MAIN(mgmsrv){
glob.daemon= 0;
}
- my_init();
#ifndef DBUG_OFF
if (debug_option)
DBUG_PUSH(debug_option);
@@ -169,20 +170,22 @@ NDB_MAIN(mgmsrv){
MgmApiService * mapi = new MgmApiService();
- MgmStatService * mstat = new MgmStatService();
-
/****************************
* Read configuration files *
****************************/
- if (!readLocalConfig())
+ LocalConfig local_config;
+ if(!local_config.init(0,glob.local_config_filename)){
+ local_config.printError();
goto error_end;
+ }
+ glob.localNodeId = local_config._ownNodeId;
+
if (!readGlobalConfig())
goto error_end;
glob.mgmObject = new MgmtSrvr(glob.localNodeId,
BaseString(glob.config_filename),
- BaseString(glob.local_config_filename == 0 ?
- "" : glob.local_config_filename),
+ local_config,
glob.cluster_config);
chdir(NdbConfig_get_path(0));
@@ -230,13 +233,6 @@ NDB_MAIN(mgmsrv){
goto error_end;
}
- if(!glob.socketServer->setup(mstat, glob.port_stats, glob.interface_name)){
- ndbout_c("Unable to setup statistic port: %d!\nPlease check if the port"
- " is already used.", glob.port_stats);
- delete mstat;
- goto error_end;
- }
-
if(!glob.mgmObject->check_start()){
ndbout_c("Unable to check start management server.");
ndbout_c("Probably caused by illegal initial configuration file.");
@@ -267,10 +263,7 @@ NDB_MAIN(mgmsrv){
}
//glob.mgmObject->saveConfig();
-
- mstat->setMgm(glob.mgmObject);
mapi->setMgm(glob.mgmObject);
- glob.mgmObject->setStatisticsListner(mstat);
char msg[256];
snprintf(msg, sizeof(msg),
@@ -278,8 +271,8 @@ NDB_MAIN(mgmsrv){
ndbout_c(msg);
g_EventLogger.info(msg);
- snprintf(msg, 256, "Id: %d, Command port: %d, Statistics port: %d",
- glob.localNodeId, glob.port, glob.port_stats);
+ snprintf(msg, 256, "Id: %d, Command port: %d",
+ glob.localNodeId, glob.port);
ndbout_c(msg);
g_EventLogger.info(msg);
@@ -309,7 +302,6 @@ NDB_MAIN(mgmsrv){
MgmGlobals::MgmGlobals(){
// Default values
port = 0;
- port_stats = 0;
config_filename = NULL;
local_config_filename = NULL;
interface_name = 0;
@@ -333,37 +325,9 @@ MgmGlobals::~MgmGlobals(){
}
/**
- * @fn readLocalConfig
- * @param glob : Global variables
- * @return true if success, false otherwise.
- *
- * How to get LOCAL CONFIGURATION FILE:
- * 1. Use local config file name (-l)
- * 2. Use environment NDB_HOME + Ndb.cfg
- * If NDB_HOME is not set this results in reading from local dir
- */
-static bool
-readLocalConfig(){
- // Read local config file
- LocalConfig lc;
- if(!lc.init(glob.local_config_filename)){
- lc.printError();
- return false;
- }
-
- glob.localNodeId = lc._ownNodeId;
- return true;
-}
-
-
-/**
* @fn readGlobalConfig
* @param glob : Global variables
* @return true if success, false otherwise.
- *
- * How to get the GLOBAL CONFIGURATION:
- * 1. Use config file name (this is a text file)(-c)
- * 2. Use name from line 2 of local config file, ex: file:///c/ndb/Ndb_cfg.bin
*/
static bool
readGlobalConfig() {
diff --git a/ndb/src/mgmsrv/mkconfig/mkconfig.cpp b/ndb/src/mgmsrv/mkconfig/mkconfig.cpp
index 3b2046d7b49..28823aaa35e 100644
--- a/ndb/src/mgmsrv/mkconfig/mkconfig.cpp
+++ b/ndb/src/mgmsrv/mkconfig/mkconfig.cpp
@@ -32,6 +32,7 @@ void usage(const char * prg){
NDB_COMMAND(mkconfig,
"mkconfig", "mkconfig",
"Make a binary configuration from a config file", 16384){
+ ndb_init();
if(argc < 3){
usage(argv[0]);
return 0;
diff --git a/ndb/src/ndbapi/Ndb.cpp b/ndb/src/ndbapi/Ndb.cpp
index 7312eafb2f5..cb126a221a8 100644
--- a/ndb/src/ndbapi/Ndb.cpp
+++ b/ndb/src/ndbapi/Ndb.cpp
@@ -327,7 +327,11 @@ Ndb::startTransaction(Uint32 aPriority, const char * keyData, Uint32 keyLen)
} else {
nodeId = 0;
}//if
- DBUG_RETURN(startTransactionLocal(aPriority, nodeId));
+ {
+ NdbConnection *trans= startTransactionLocal(aPriority, nodeId);
+ DBUG_PRINT("exit",("start trans= 0x%x", trans));
+ DBUG_RETURN(trans);
+ }
} else {
DBUG_RETURN(NULL);
}//if
@@ -451,7 +455,7 @@ Ndb::startTransactionLocal(Uint32 aPriority, Uint32 nodeId)
abort();
}
#endif
- DBUG_PRINT("exit", ("transaction id: %d", tConnection->getTransactionId()));
+ DBUG_PRINT("exit", ("transid= %lld", tConnection->getTransactionId()));
DBUG_RETURN(tConnection);
}//Ndb::startTransactionLocal()
@@ -465,6 +469,8 @@ void
Ndb::closeTransaction(NdbConnection* aConnection)
{
DBUG_ENTER("Ndb::closeTransaction");
+ DBUG_PRINT("enter",("close trans= 0x%x, transid= %lld",
+ aConnection, aConnection->getTransactionId()));
NdbConnection* tCon;
NdbConnection* tPreviousCon;
diff --git a/ndb/src/ndbapi/NdbDictionary.cpp b/ndb/src/ndbapi/NdbDictionary.cpp
index 1e28fbc2db5..c8414ec16a3 100644
--- a/ndb/src/ndbapi/NdbDictionary.cpp
+++ b/ndb/src/ndbapi/NdbDictionary.cpp
@@ -65,7 +65,7 @@ NdbDictionary::Column::getName() const {
void
NdbDictionary::Column::setType(Type t){
- m_impl.m_type = t;
+ m_impl.init(t);
}
NdbDictionary::Column::Type
@@ -103,6 +103,54 @@ NdbDictionary::Column::getLength() const{
return m_impl.m_length;
}
+void
+NdbDictionary::Column::setInlineSize(int size)
+{
+ m_impl.m_precision = size;
+}
+
+void
+NdbDictionary::Column::setCharset(CHARSET_INFO* cs)
+{
+ m_impl.m_cs = cs;
+}
+
+CHARSET_INFO*
+NdbDictionary::Column::getCharset() const
+{
+ return m_impl.m_cs;
+}
+
+int
+NdbDictionary::Column::getInlineSize() const
+{
+ return m_impl.m_precision;
+}
+
+void
+NdbDictionary::Column::setPartSize(int size)
+{
+ m_impl.m_scale = size;
+}
+
+int
+NdbDictionary::Column::getPartSize() const
+{
+ return m_impl.m_scale;
+}
+
+void
+NdbDictionary::Column::setStripeSize(int size)
+{
+ m_impl.m_length = size;
+}
+
+int
+NdbDictionary::Column::getStripeSize() const
+{
+ return m_impl.m_length;
+}
+
int
NdbDictionary::Column::getSize() const{
return m_impl.m_attrSize;
@@ -808,7 +856,12 @@ NdbDictionary::Dictionary::listObjects(List& list, Object::Type type)
int
NdbDictionary::Dictionary::listIndexes(List& list, const char * tableName)
{
- return m_impl.listIndexes(list, tableName);
+ const NdbDictionary::Table* tab= getTable(tableName);
+ if(tab == 0)
+ {
+ return -1;
+ }
+ return m_impl.listIndexes(list, tab->getTableId());
}
const struct NdbError &
@@ -821,6 +874,8 @@ NdbDictionary::Dictionary::getNdbError() const {
NdbOut&
operator<<(NdbOut& out, const NdbDictionary::Column& col)
{
+ const CHARSET_INFO *cs = col.getCharset();
+ const char *csname = cs ? cs->name : "?";
out << col.getName() << " ";
switch (col.getType()) {
case NdbDictionary::Column::Tinyint:
@@ -863,10 +918,10 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col)
out << "Decimal(" << col.getScale() << "," << col.getPrecision() << ")";
break;
case NdbDictionary::Column::Char:
- out << "Char(" << col.getLength() << ")";
+ out << "Char(" << col.getLength() << ";" << csname << ")";
break;
case NdbDictionary::Column::Varchar:
- out << "Varchar(" << col.getLength() << ")";
+ out << "Varchar(" << col.getLength() << ";" << csname << ")";
break;
case NdbDictionary::Column::Binary:
out << "Binary(" << col.getLength() << ")";
@@ -886,7 +941,7 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col)
break;
case NdbDictionary::Column::Text:
out << "Text(" << col.getInlineSize() << "," << col.getPartSize()
- << ";" << col.getStripeSize() << ")";
+ << ";" << col.getStripeSize() << ";" << csname << ")";
break;
case NdbDictionary::Column::Undefined:
out << "Undefined";
diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp
index 815ecf8ca6c..9abe52fb030 100644
--- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp
+++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp
@@ -36,6 +36,7 @@
#include "NdbEventOperationImpl.hpp"
#include "NdbBlob.hpp"
#include <AttributeHeader.hpp>
+#include <my_sys.h>
#define DEBUG_PRINT 0
#define INCOMPATIBLE_VERSION -2
@@ -64,6 +65,7 @@ NdbColumnImpl::operator=(const NdbColumnImpl& col)
m_name = col.m_name;
m_type = col.m_type;
m_precision = col.m_precision;
+ m_cs = col.m_cs;
m_scale = col.m_scale;
m_length = col.m_length;
m_pk = col.m_pk;
@@ -87,10 +89,66 @@ NdbColumnImpl::operator=(const NdbColumnImpl& col)
}
void
-NdbColumnImpl::init()
+NdbColumnImpl::init(Type t)
{
+ // do not use default_charset_info as it may not be initialized yet
+ // use binary collation until NDB tests can handle charsets
+ CHARSET_INFO* default_cs = &my_charset_latin1_bin;
m_attrId = -1;
- m_type = NdbDictionary::Column::Unsigned;
+ m_type = t;
+ switch (m_type) {
+ case Tinyint:
+ case Tinyunsigned:
+ case Smallint:
+ case Smallunsigned:
+ case Mediumint:
+ case Mediumunsigned:
+ case Int:
+ case Unsigned:
+ case Bigint:
+ case Bigunsigned:
+ case Float:
+ case Double:
+ m_precision = 0;
+ m_scale = 0;
+ m_length = 1;
+ m_cs = NULL;
+ break;
+ case Decimal:
+ m_precision = 10;
+ m_scale = 0;
+ m_length = 1;
+ m_cs = NULL;
+ break;
+ case Char:
+ case Varchar:
+ m_precision = 0;
+ m_scale = 0;
+ m_length = 1;
+ m_cs = default_cs;
+ break;
+ case Binary:
+ case Varbinary:
+ case Datetime:
+ case Timespec:
+ m_precision = 0;
+ m_scale = 0;
+ m_length = 1;
+ m_cs = NULL;
+ break;
+ case Blob:
+ m_precision = 256;
+ m_scale = 8000;
+ m_length = 4;
+ m_cs = NULL;
+ break;
+ case Text:
+ m_precision = 256;
+ m_scale = 8000;
+ m_length = 4;
+ m_cs = default_cs;
+ break;
+ }
m_pk = false;
m_nullable = false;
m_tupleKey = false;
@@ -98,12 +156,10 @@ NdbColumnImpl::init()
m_distributionKey = false;
m_distributionGroup = false;
m_distributionGroupBits = 8;
- m_length = 1;
- m_scale = 5;
- m_precision = 5;
m_keyInfoPos = 0;
- m_attrSize = 4,
- m_arraySize = 1,
+ // next 2 are set at run time
+ m_attrSize = 0;
+ m_arraySize = 0;
m_autoIncrement = false;
m_autoIncrementInitialValue = 1;
m_blobTable = NULL;
@@ -146,52 +202,12 @@ NdbColumnImpl::equal(const NdbColumnImpl& col) const
return false;
}
}
- if(m_length != col.m_length){
+ if (m_precision != col.m_precision ||
+ m_scale != col.m_scale ||
+ m_length != col.m_length ||
+ m_cs != col.m_cs) {
return false;
}
-
- switch(m_type){
- case NdbDictionary::Column::Undefined:
- break;
- case NdbDictionary::Column::Tinyint:
- case NdbDictionary::Column::Tinyunsigned:
- case NdbDictionary::Column::Smallint:
- case NdbDictionary::Column::Smallunsigned:
- case NdbDictionary::Column::Mediumint:
- case NdbDictionary::Column::Mediumunsigned:
- case NdbDictionary::Column::Int:
- case NdbDictionary::Column::Unsigned:
- case NdbDictionary::Column::Float:
- break;
- case NdbDictionary::Column::Decimal:
- if(m_scale != col.m_scale ||
- m_precision != col.m_precision){
- return false;
- }
- break;
- case NdbDictionary::Column::Char:
- case NdbDictionary::Column::Varchar:
- case NdbDictionary::Column::Binary:
- case NdbDictionary::Column::Varbinary:
- if(m_length != col.m_length){
- return false;
- }
- break;
- case NdbDictionary::Column::Bigint:
- case NdbDictionary::Column::Bigunsigned:
- case NdbDictionary::Column::Double:
- case NdbDictionary::Column::Datetime:
- case NdbDictionary::Column::Timespec:
- break;
- case NdbDictionary::Column::Blob:
- case NdbDictionary::Column::Text:
- if (m_precision != col.m_precision ||
- m_scale != col.m_scale ||
- m_length != col.m_length) {
- return false;
- }
- break;
- }
if (m_autoIncrement != col.m_autoIncrement){
return false;
}
@@ -209,14 +225,18 @@ NdbColumnImpl::create_psuedo(const char * name){
if(!strcmp(name, "NDB$FRAGMENT")){
col->setType(NdbDictionary::Column::Unsigned);
col->m_impl.m_attrId = AttributeHeader::FRAGMENT;
+ col->m_impl.m_attrSize = 4;
+ col->m_impl.m_arraySize = 1;
} else if(!strcmp(name, "NDB$ROW_COUNT")){
col->setType(NdbDictionary::Column::Bigunsigned);
col->m_impl.m_attrId = AttributeHeader::ROW_COUNT;
col->m_impl.m_attrSize = 8;
+ col->m_impl.m_arraySize = 1;
} else if(!strcmp(name, "NDB$COMMIT_COUNT")){
col->setType(NdbDictionary::Column::Bigunsigned);
col->m_impl.m_attrId = AttributeHeader::COMMIT_COUNT;
col->m_impl.m_attrSize = 8;
+ col->m_impl.m_arraySize = 1;
} else {
abort();
}
@@ -1127,6 +1147,7 @@ indexTypeMapping[] = {
{ -1, -1 }
};
+// TODO: remove, api-kernel type codes must match now
static const
ApiKernelMapping
columnTypeMapping[] = {
@@ -1233,9 +1254,23 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
return 703;
}
col->m_extType = attrDesc.AttributeExtType;
- col->m_precision = attrDesc.AttributeExtPrecision;
+ col->m_precision = (attrDesc.AttributeExtPrecision & 0xFFFF);
col->m_scale = attrDesc.AttributeExtScale;
col->m_length = attrDesc.AttributeExtLength;
+ // charset in upper half of precision
+ unsigned cs_number = (attrDesc.AttributeExtPrecision >> 16);
+ // charset is defined exactly for char types
+ if (col->getCharType() != (cs_number != 0)) {
+ delete impl;
+ return 703;
+ }
+ if (col->getCharType()) {
+ col->m_cs = get_charset(cs_number, MYF(0));
+ if (col->m_cs == NULL) {
+ delete impl;
+ return 743;
+ }
+ }
// translate to old kernel types and sizes
if (! attrDesc.translateExtType()) {
@@ -1372,16 +1407,15 @@ int NdbDictionaryImpl::alterTable(NdbTableImpl &impl)
// Remove cached information and let it be refreshed at next access
if (m_localHash.get(originalInternalName) != NULL) {
m_localHash.drop(originalInternalName);
+ m_globalHash->lock();
NdbTableImpl * cachedImpl = m_globalHash->get(originalInternalName);
// If in local cache it must be in global
if (!cachedImpl)
abort();
- m_globalHash->lock();
m_globalHash->drop(cachedImpl);
m_globalHash->unlock();
}
}
-
return ret;
}
@@ -1486,9 +1520,23 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
getKernelConstant(col->m_type,
columnTypeMapping,
DictTabInfo::ExtUndefined);
- tmpAttr.AttributeExtPrecision = col->m_precision;
+ tmpAttr.AttributeExtPrecision = ((unsigned)col->m_precision & 0xFFFF);
tmpAttr.AttributeExtScale = col->m_scale;
tmpAttr.AttributeExtLength = col->m_length;
+ // charset is defined exactly for char types
+ if (col->getCharType() != (col->m_cs != NULL)) {
+ m_error.code = 703;
+ return -1;
+ }
+ // primary key type check
+ if (col->m_pk && ! NdbSqlUtil::usable_in_pk(col->m_type, col->m_cs)) {
+ m_error.code = 743;
+ return -1;
+ }
+ // charset in upper half of precision
+ if (col->getCharType()) {
+ tmpAttr.AttributeExtPrecision |= (col->m_cs->number << 16);
+ }
// DICT will ignore and recompute this
(void)tmpAttr.translateExtType();
@@ -1665,6 +1713,7 @@ NdbDictionaryImpl::dropTable(const char * name)
int
NdbDictionaryImpl::dropTable(NdbTableImpl & impl)
{
+ int res;
const char * name = impl.getName();
if(impl.m_status == NdbDictionary::Object::New){
return dropTable(name);
@@ -1676,28 +1725,34 @@ NdbDictionaryImpl::dropTable(NdbTableImpl & impl)
}
List list;
- if (listIndexes(list, name) == -1)
+ if ((res = listIndexes(list, impl.m_tableId)) == -1){
return -1;
+ }
for (unsigned i = 0; i < list.count; i++) {
const List::Element& element = list.elements[i];
- if (dropIndex(element.name, name) == -1)
+ if ((res = dropIndex(element.name, name)) == -1)
+ {
return -1;
+ }
}
-
+
if (impl.m_noOfBlobs != 0) {
- if (dropBlobTables(impl) != 0)
+ if (dropBlobTables(impl) != 0){
return -1;
+ }
}
-
+
int ret = m_receiver.dropTable(impl);
- if(ret == 0){
+ if(ret == 0 || m_error.code == 709){
const char * internalTableName = impl.m_internalName.c_str();
-
+
m_localHash.drop(internalTableName);
m_globalHash->lock();
m_globalHash->drop(&impl);
m_globalHash->unlock();
+
+ return 0;
}
return ret;
@@ -1713,8 +1768,9 @@ NdbDictionaryImpl::dropBlobTables(NdbTableImpl & t)
char btname[NdbBlob::BlobTableNameSize];
NdbBlob::getBlobTableName(btname, &t, &c);
if (dropTable(btname) != 0) {
- if (m_error.code != 709)
+ if (m_error.code != 709){
return -1;
+ }
}
}
return 0;
@@ -1950,6 +2006,14 @@ NdbDictInterface::createIndex(Ndb & ndb,
m_error.code = 4245;
return -1;
}
+ // index key type check
+ if (it == DictTabInfo::UniqueHashIndex &&
+ ! NdbSqlUtil::usable_in_hash_index(col->m_type, col->m_cs) ||
+ it == DictTabInfo::OrderedIndex &&
+ ! NdbSqlUtil::usable_in_ordered_index(col->m_type, col->m_cs)) {
+ m_error.code = 743;
+ return -1;
+ }
attributeList.id[i] = col->m_attrId;
}
if (it == DictTabInfo::UniqueHashIndex) {
@@ -2075,7 +2139,6 @@ NdbDictionaryImpl::dropIndex(NdbIndexImpl & impl, const char * tableName)
m_globalHash->drop(impl.m_table);
m_globalHash->unlock();
}
-
return ret;
}
@@ -2759,14 +2822,11 @@ NdbDictionaryImpl::listObjects(List& list, NdbDictionary::Object::Type type)
}
int
-NdbDictionaryImpl::listIndexes(List& list, const char * tableName)
+NdbDictionaryImpl::listIndexes(List& list, Uint32 indexId)
{
ListTablesReq req;
- NdbTableImpl* impl = getTable(tableName);
- if (impl == 0)
- return -1;
req.requestData = 0;
- req.setTableId(impl->m_tableId);
+ req.setTableId(indexId);
req.setListNames(true);
req.setListIndexes(true);
return m_receiver.listObjects(list, req.requestData, m_ndb.usingFullyQualifiedNames());
diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/ndb/src/ndbapi/NdbDictionaryImpl.hpp
index da5e7e45c36..1fe92db94ed 100644
--- a/ndb/src/ndbapi/NdbDictionaryImpl.hpp
+++ b/ndb/src/ndbapi/NdbDictionaryImpl.hpp
@@ -52,7 +52,7 @@ public:
NdbColumnImpl(NdbDictionary::Column &); // This is not a copy constructor
~NdbColumnImpl();
NdbColumnImpl& operator=(const NdbColumnImpl&);
- void init();
+ void init(Type t = Unsigned);
int m_attrId;
BaseString m_name;
@@ -60,6 +60,7 @@ public:
int m_precision;
int m_scale;
int m_length;
+ CHARSET_INFO * m_cs; // not const in MySQL
bool m_pk;
bool m_tupleKey;
@@ -82,6 +83,7 @@ public:
Uint32 m_keyInfoPos;
Uint32 m_extType; // used by restore (kernel type in versin v2x)
bool getInterpretableType() const ;
+ bool getCharType() const;
bool getBlobType() const;
/**
@@ -388,7 +390,7 @@ public:
int stopSubscribeEvent(NdbEventImpl &);
int listObjects(List& list, NdbDictionary::Object::Type type);
- int listIndexes(List& list, const char * tableName);
+ int listIndexes(List& list, Uint32 indexId);
NdbTableImpl * getTable(const char * tableName, void **data= 0);
Ndb_local_table_info * get_local_table_info(const char * internalName);
@@ -448,6 +450,14 @@ NdbColumnImpl::getInterpretableType() const {
inline
bool
+NdbColumnImpl::getCharType() const {
+ return (m_type == NdbDictionary::Column::Char ||
+ m_type == NdbDictionary::Column::Varchar ||
+ m_type == NdbDictionary::Column::Text);
+}
+
+inline
+bool
NdbColumnImpl::getBlobType() const {
return (m_type == NdbDictionary::Column::Blob ||
m_type == NdbDictionary::Column::Text);
diff --git a/ndb/src/ndbapi/NdbIndexOperation.cpp b/ndb/src/ndbapi/NdbIndexOperation.cpp
index 0742f8d911c..bf4b07842f6 100644
--- a/ndb/src/ndbapi/NdbIndexOperation.cpp
+++ b/ndb/src/ndbapi/NdbIndexOperation.cpp
@@ -164,6 +164,7 @@ int NdbIndexOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
Uint32 tData;
Uint32 tKeyInfoPosition;
const char* aValue = aValuePassed;
+ Uint32 xfrmData[1024];
Uint32 tempData[1024];
if ((theStatus == OperationDefined) &&
@@ -224,6 +225,21 @@ int NdbIndexOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
m_theIndexDefined[i][2] = true;
Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize;
+ const char* aValueToWrite = aValue;
+
+ CHARSET_INFO* cs = tAttrInfo->m_cs;
+ if (cs != 0) {
+ // current limitation: strxfrm does not increase length
+ assert(cs->strxfrm_multiply == 1);
+ unsigned n =
+ (*cs->coll->strnxfrm)(cs,
+ (uchar*)xfrmData, sizeof(xfrmData),
+ (const uchar*)aValue, sizeInBytes);
+ while (n < sizeInBytes)
+ ((uchar*)xfrmData)[n++] = 0x20;
+ aValue = (char*)xfrmData;
+ }
+
Uint32 bitsInLastWord = 8 * (sizeInBytes & 3) ;
Uint32 totalSizeInWords = (sizeInBytes + 3)/4;// Inc. bits in last word
Uint32 sizeInWords = sizeInBytes / 4; // Exc. bits in last word
@@ -314,13 +330,20 @@ int NdbIndexOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
if ((tOpType == InsertRequest) ||
(tOpType == WriteRequest)) {
if (!tAttrInfo->m_indexOnly){
+ // invalid data can crash kernel
+ if (cs != NULL &&
+ (*cs->cset->well_formed_len)(cs,
+ aValueToWrite,
+ aValueToWrite + sizeInBytes,
+ sizeInBytes) != sizeInBytes)
+ goto equal_error4;
Uint32 ahValue;
Uint32 sz = totalSizeInWords;
AttributeHeader::init(&ahValue, tAttrId, sz);
insertATTRINFO( ahValue );
- insertATTRINFOloop((Uint32*)aValue, sizeInWords);
+ insertATTRINFOloop((Uint32*)aValueToWrite, sizeInWords);
if (bitsInLastWord != 0) {
- tData = *(Uint32*)(aValue + (sizeInWords << 2));
+ tData = *(Uint32*)(aValueToWrite + (sizeInWords << 2));
tData = convertEndian(tData);
tData = tData & ((1 << bitsInLastWord) - 1);
tData = convertEndian(tData);
@@ -411,7 +434,10 @@ int NdbIndexOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
equal_error3:
setErrorCodeAbort(4209);
-
+ return -1;
+
+ equal_error4:
+ setErrorCodeAbort(744);
return -1;
}
diff --git a/ndb/src/ndbapi/NdbLinHash.hpp b/ndb/src/ndbapi/NdbLinHash.hpp
index f786600607f..f245a261a04 100644
--- a/ndb/src/ndbapi/NdbLinHash.hpp
+++ b/ndb/src/ndbapi/NdbLinHash.hpp
@@ -287,17 +287,14 @@ NdbLinHash<C>::deleteKey ( const char* str, Uint32 len){
NdbElement_t<C> **chainp = &directory[dir]->elements[seg];
for(NdbElement_t<C> * chain = *chainp; chain != 0; chain = chain->next){
if(chain->len == len && !memcmp(chain->str, str, len)){
+ C *data= chain->theData;
if (oldChain == 0) {
- C *data= chain->theData;
- delete chain;
- * chainp = 0;
- return data;
+ * chainp = chain->next;
} else {
- C *data= chain->theData;
oldChain->next = chain->next;
- delete chain;
- return data;
}
+ delete chain;
+ return data;
} else {
oldChain = chain;
}
diff --git a/ndb/src/ndbapi/NdbOperationDefine.cpp b/ndb/src/ndbapi/NdbOperationDefine.cpp
index 6d995e06582..ad838ddd601 100644
--- a/ndb/src/ndbapi/NdbOperationDefine.cpp
+++ b/ndb/src/ndbapi/NdbOperationDefine.cpp
@@ -492,6 +492,17 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo,
// Insert Attribute Id into ATTRINFO part.
const Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize;
+
+ CHARSET_INFO* cs = tAttrInfo->m_cs;
+ // invalid data can crash kernel
+ if (cs != NULL &&
+ (*cs->cset->well_formed_len)(cs,
+ aValue,
+ aValue + sizeInBytes,
+ sizeInBytes) != sizeInBytes) {
+ setErrorCodeAbort(744);
+ return -1;
+ }
#if 0
tAttrSize = tAttrInfo->theAttrSize;
tArraySize = tAttrInfo->theArraySize;
diff --git a/ndb/src/ndbapi/NdbOperationSearch.cpp b/ndb/src/ndbapi/NdbOperationSearch.cpp
index 19cb133dbf7..e5166fc4a82 100644
--- a/ndb/src/ndbapi/NdbOperationSearch.cpp
+++ b/ndb/src/ndbapi/NdbOperationSearch.cpp
@@ -60,6 +60,7 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
Uint32 tData;
Uint32 tKeyInfoPosition;
const char* aValue = aValuePassed;
+ Uint32 xfrmData[1024];
Uint32 tempData[1024];
if ((theStatus == OperationDefined) &&
@@ -117,6 +118,21 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
theTupleKeyDefined[i][2] = true;
Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize;
+ const char* aValueToWrite = aValue;
+
+ CHARSET_INFO* cs = tAttrInfo->m_cs;
+ if (cs != 0) {
+ // current limitation: strxfrm does not increase length
+ assert(cs->strxfrm_multiply == 1);
+ unsigned n =
+ (*cs->coll->strnxfrm)(cs,
+ (uchar*)xfrmData, sizeof(xfrmData),
+ (const uchar*)aValue, sizeInBytes);
+ while (n < sizeInBytes)
+ ((uchar*)xfrmData)[n++] = 0x20;
+ aValue = (char*)xfrmData;
+ }
+
Uint32 bitsInLastWord = 8 * (sizeInBytes & 3) ;
Uint32 totalSizeInWords = (sizeInBytes + 3)/4; // Inc. bits in last word
Uint32 sizeInWords = sizeInBytes / 4; // Exc. bits in last word
@@ -206,13 +222,20 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
if ((tOpType == InsertRequest) ||
(tOpType == WriteRequest)) {
if (!tAttrInfo->m_indexOnly){
+ // invalid data can crash kernel
+ if (cs != NULL &&
+ (*cs->cset->well_formed_len)(cs,
+ aValueToWrite,
+ aValueToWrite + sizeInBytes,
+ sizeInBytes) != sizeInBytes)
+ goto equal_error4;
Uint32 ahValue;
const Uint32 sz = totalSizeInWords;
AttributeHeader::init(&ahValue, tAttrId, sz);
insertATTRINFO( ahValue );
- insertATTRINFOloop((Uint32*)aValue, sizeInWords);
+ insertATTRINFOloop((Uint32*)aValueToWrite, sizeInWords);
if (bitsInLastWord != 0) {
- tData = *(Uint32*)(aValue + (sizeInWords << 2));
+ tData = *(Uint32*)(aValueToWrite + (sizeInWords << 2));
tData = convertEndian(tData);
tData = tData & ((1 << bitsInLastWord) - 1);
tData = convertEndian(tData);
@@ -311,6 +334,10 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
equal_error3:
setErrorCodeAbort(4209);
return -1;
+
+ equal_error4:
+ setErrorCodeAbort(744);
+ return -1;
}
/******************************************************************************
diff --git a/ndb/src/ndbapi/NdbScanOperation.cpp b/ndb/src/ndbapi/NdbScanOperation.cpp
index 86c174c4545..3ff2a32d418 100644
--- a/ndb/src/ndbapi/NdbScanOperation.cpp
+++ b/ndb/src/ndbapi/NdbScanOperation.cpp
@@ -47,11 +47,13 @@ NdbScanOperation::NdbScanOperation(Ndb* aNdb) :
m_sent_receivers = 0;
m_receivers = 0;
m_array = new Uint32[1]; // skip if on delete in fix_receivers
+ theSCAN_TABREQ = 0;
}
NdbScanOperation::~NdbScanOperation()
{
for(Uint32 i = 0; i<m_allocated_receivers; i++){
+ m_receivers[i]->release();
theNdb->releaseNdbScanRec(m_receivers[i]);
}
delete[] m_array;
@@ -191,7 +193,7 @@ NdbResultSet* NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
return 0;
}
- theSCAN_TABREQ = theNdb->getSignal();
+ theSCAN_TABREQ = (!theSCAN_TABREQ ? theNdb->getSignal() : theSCAN_TABREQ);
if (theSCAN_TABREQ == NULL) {
setErrorCodeAbort(4000);
return 0;
@@ -719,6 +721,12 @@ void NdbScanOperation::release()
for(Uint32 i = 0; i<m_allocated_receivers; i++){
m_receivers[i]->release();
}
+ if(theSCAN_TABREQ)
+ {
+ theNdb->releaseSignal(theSCAN_TABREQ);
+ theSCAN_TABREQ = 0;
+ }
+ NdbOperation::release();
}
/***************************************************************************
@@ -1096,30 +1104,43 @@ NdbIndexScanOperation::setBound(const NdbColumnImpl* tAttrInfo,
theStatus == SetBound &&
(0 <= type && type <= 4) &&
len <= 8000) {
- // bound type
-
+ // insert bound type
insertATTRINFO(type);
- // attribute header
Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize;
+ // normalize char bound
+ CHARSET_INFO* cs = tAttrInfo->m_cs;
+ Uint32 xfrmData[2000];
+ if (cs != NULL && aValue != NULL) {
+ // current limitation: strxfrm does not increase length
+ assert(cs->strxfrm_multiply == 1);
+ unsigned n =
+ (*cs->coll->strnxfrm)(cs,
+ (uchar*)xfrmData, sizeof(xfrmData),
+ (const uchar*)aValue, sizeInBytes);
+ while (n < sizeInBytes)
+ ((uchar*)xfrmData)[n++] = 0x20;
+ aValue = (char*)xfrmData;
+ }
if (len != sizeInBytes && (len != 0)) {
setErrorCodeAbort(4209);
return -1;
}
+ // insert attribute header
len = aValue != NULL ? sizeInBytes : 0;
Uint32 tIndexAttrId = tAttrInfo->m_attrId;
Uint32 sizeInWords = (len + 3) / 4;
AttributeHeader ah(tIndexAttrId, sizeInWords);
insertATTRINFO(ah.m_value);
if (len != 0) {
- // attribute data
+ // insert attribute data
if ((UintPtr(aValue) & 0x3) == 0 && (len & 0x3) == 0)
insertATTRINFOloop((const Uint32*)aValue, sizeInWords);
else {
- Uint32 temp[2000];
- memcpy(temp, aValue, len);
+ Uint32 tempData[2000];
+ memcpy(tempData, aValue, len);
while ((len & 0x3) != 0)
- ((char*)temp)[len++] = 0;
- insertATTRINFOloop(temp, sizeInWords);
+ ((char*)tempData)[len++] = 0;
+ insertATTRINFOloop(tempData, sizeInWords);
}
}
@@ -1206,11 +1227,11 @@ NdbIndexScanOperation::compare(Uint32 skip, Uint32 cols,
if((r1_null ^ (unsigned)r2->isNULL())){
return (r1_null ? -1 : 1);
}
- Uint32 type = NdbColumnImpl::getImpl(* r1->m_column).m_extType;
+ const NdbColumnImpl & col = NdbColumnImpl::getImpl(* r1->m_column);
Uint32 size = (r1->theAttrSize * r1->theArraySize + 3) / 4;
if(!r1_null){
- const NdbSqlUtil::Type& t = NdbSqlUtil::getType(type);
- int r = (*t.m_cmp)(d1, d2, size, size);
+ const NdbSqlUtil::Type& sqlType = NdbSqlUtil::getType(col.m_extType);
+ int r = (*sqlType.m_cmp)(col.m_cs, d1, d2, size, size);
if(r){
assert(r != NdbSqlUtil::CmpUnknown);
return r;
diff --git a/ndb/src/ndbapi/NdbScanReceiver.cpp b/ndb/src/ndbapi/NdbScanReceiver.cpp
deleted file mode 100644
index 6c8c16c3ecf..00000000000
--- a/ndb/src/ndbapi/NdbScanReceiver.cpp
+++ /dev/null
@@ -1,187 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#include "NdbScanReceiver.hpp"
-#include <NdbRecAttr.hpp>
-
-#include <signaldata/ScanFrag.hpp>
-
-#include <NdbOut.hpp>
-
-
-/***************************************************************************
- * int receiveKEYINFO20( NdbApiSignal* aSignal)
- *
- * Remark: Handles the reception of the KEYINFO20 signal.
- * Save a copy of the signal in list
- *
- ***************************************************************************/
-int
-NdbScanReceiver::receiveKEYINFO20( NdbApiSignal* aSignal){
- const KeyInfo20 * const keyInfo = CAST_CONSTPTR(KeyInfo20, aSignal->getDataPtr());
- if (theStatus != Waiting){
- //ndbout << "Dropping KEYINFO20, theStatus="<<theStatus << endl;
- return -1;
- }
- if (aSignal->getLength() < 5){
- //ndbout << "Dropping KEYINFO20, length="<<aSignal->getLength() << endl;
- }
- Uint64 tCurrTransId = theNdbOp->theNdbCon->getTransactionId();
- Uint64 tRecTransId = (Uint64)keyInfo->transId1 + ((Uint64)keyInfo->transId2 << 32);
- if ((tRecTransId - tCurrTransId) != (Uint64)0){
- //ndbout << "Dropping KEYINFO20 wrong transid" << endl;
- return -1;
- }
-
- NdbApiSignal * tCopy = new NdbApiSignal(0);//getSignal();
- if (tCopy == NULL) {
- theNdbOp->setErrorCode(4000);
- return 2; // theWaitState = NO_WAIT
- }
- // Put copy last in list of KEYINFO20 signals
- tCopy->copyFrom(aSignal);
- tCopy->next(NULL);
- if (theFirstKEYINFO20_Recv == NULL)
- theFirstKEYINFO20_Recv = tCopy;
- else
- theLastKEYINFO20_Recv->next(tCopy);
- theLastKEYINFO20_Recv = tCopy;
-
- theTotalKI_Len = keyInfo->keyLen; // This is the total length of all signals
- theTotalRecKI_Len += aSignal->getLength() - 5;
- return theNdbOp->theNdbCon->checkNextScanResultComplete();
-}
-
-/***************************************************************************
- * int receiveTRANSID_AI_SCAN( NdbApiSignal* aSignal)
- *
- * Remark: Handles the reception of the TRANSID_AI_signal with
- * 22 signal data words.
- * Save a copy of the signal in list and check if all
- * signals belonging to this resultset is receieved.
- *
- ***************************************************************************/
-int
-NdbScanReceiver::receiveTRANSID_AI_SCAN( NdbApiSignal* aSignal)
-{
- const Uint32* aDataPtr = aSignal->getDataPtr();
- if (theStatus != Waiting){
- //ndbout << "Dropping TRANSID_AI, theStatus="<<theStatus << endl;
- return -1;
- }
- if (aSignal->getLength() < 3){
- //ndbout << "Dropping TRANSID_AI, length="<<aSignal->getLength() << endl;
- return -1;
- }
- if (theNdbOp == NULL){
- //ndbout << "Dropping TRANSID_AI, theNdbOp == NULL" << endl;
- return -1;
- }
- if (theNdbOp->theNdbCon == NULL){
- //ndbout << "Dropping TRANSID_AI, theNdbOp->theNdbCon == NULL" << endl;
- return -1;
- }
- Uint64 tCurrTransId = theNdbOp->theNdbCon->getTransactionId();
- Uint64 tRecTransId = (Uint64)aDataPtr[1] + ((Uint64)aDataPtr[2] << 32);
- if ((tRecTransId - tCurrTransId) != (Uint64)0){
- //ndbout << "Dropping TRANSID_AI wrong transid" << endl;
- return -1;
- }
-
- NdbApiSignal * tCopy = new NdbApiSignal(0);//getSignal();
- if (tCopy == NULL){
- theNdbOp->setErrorCode(4000);
- return 2; // theWaitState = NO_WAIT
- }
- tCopy->copyFrom(aSignal);
- tCopy->next(NULL);
- if (theFirstTRANSID_AI_Recv == NULL)
- theFirstTRANSID_AI_Recv = tCopy;
- else
- theLastTRANSID_AI_Recv->next(tCopy);
- theLastTRANSID_AI_Recv = tCopy;
- theTotalRecAI_Len += aSignal->getLength() - 3;
-
- return theNdbOp->theNdbCon->checkNextScanResultComplete();
-}
-
-/***************************************************************************
- * int executeSavedSignals()
- *
- * Remark: Execute all saved TRANSID_AI signals into the parent NdbOperation
- *
- *
- ***************************************************************************/
-int
-NdbScanReceiver::executeSavedSignals(){
-
- NdbApiSignal* tSignal = theFirstTRANSID_AI_Recv;
- while (tSignal != NULL) {
- const Uint32* tDataPtr = tSignal->getDataPtr();
-
- int tRet = theNdbOp->receiveREAD_AI((Uint32*)&tDataPtr[3],
- tSignal->getLength() - 3);
- if (tRet != -1){
- // -1 means that more signals are wanted ?
- // Make sure there are no more signals in the list
- assert(tSignal->next() == NULL);
- }
- tSignal = tSignal->next();
- }
- // receiveREAD_AI may not copy to application buffers
- NdbRecAttr* tRecAttr = theNdbOp->theFirstRecAttr;
- while (tRecAttr != NULL) {
- if (tRecAttr->copyoutRequired()) // copy to application buffer
- tRecAttr->copyout();
- tRecAttr = tRecAttr->next();
- }
- // Release TRANSID_AI signals for this receiver
- while(theFirstTRANSID_AI_Recv != NULL){
- NdbApiSignal* tmp = theFirstTRANSID_AI_Recv;
- theFirstTRANSID_AI_Recv = tmp->next();
- delete tmp;
- }
-
- // theNdbOp->theNdb->releaseSignalsInList(&theFirstTRANSID_AI_Recv);
- theFirstTRANSID_AI_Recv = NULL;
- theLastTRANSID_AI_Recv = NULL;
- theStatus = Executed;
-
- return 0;
-}
-
-
-void
-NdbScanReceiver::prepareNextScanResult(){
- if(theStatus == Executed){
-
- // theNdbOp->theNdb->releaseSignalsInList(&theFirstKEYINFO20_Recv);
- while(theFirstKEYINFO20_Recv != NULL){
- NdbApiSignal* tmp = theFirstKEYINFO20_Recv;
- theFirstKEYINFO20_Recv = tmp->next();
- delete tmp;
- }
- theFirstKEYINFO20_Recv = NULL;
- theLastKEYINFO20_Recv = NULL;
- theTotalRecAI_Len = 0;
- theTotalRecKI_Len = 0;
- if (theLockMode == true)
- theTotalKI_Len = 0xFFFFFFFF;
- else
- theTotalKI_Len = 0;
- theStatus = Waiting;
- }
-}
diff --git a/ndb/src/ndbapi/NdbScanReceiver.hpp b/ndb/src/ndbapi/NdbScanReceiver.hpp
deleted file mode 100644
index 72f9e48f02c..00000000000
--- a/ndb/src/ndbapi/NdbScanReceiver.hpp
+++ /dev/null
@@ -1,210 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef NdbScanReceiver_H
-#define NdbScanReceiver_H
-
-#include "Ndb.hpp"
-#include "NdbConnection.hpp"
-#include "NdbOperation.hpp"
-#include "NdbApiSignal.hpp"
-#include "NdbReceiver.hpp"
-#include <NdbOut.hpp>
-
-
-class NdbScanReceiver
-{
- enum ReceiverStatus { Init,
- Waiting,
- Completed,
- Executed,
- Released };
-
- friend class Ndb;
- friend class NdbOperation;
-public:
- NdbScanReceiver(Ndb *aNdb) :
- theReceiver(aNdb),
- theNdbOp(NULL),
- theFirstTRANSID_AI_Recv(NULL),
- theLastTRANSID_AI_Recv(NULL),
- theFirstKEYINFO20_Recv(NULL),
- theLastKEYINFO20_Recv(NULL),
- theTotalRecAI_Len(0),
- theTotalKI_Len(0xFFFFFFFF),
- theTotalRecKI_Len(0),
- theStatus(Init),
- theNextScanRec(NULL)
- {
- theReceiver.init(NdbReceiver::NDB_SCANRECEIVER, this);
- }
-
- int checkMagicNumber();
- int receiveTRANSID_AI_SCAN(NdbApiSignal*);
- int receiveKEYINFO20(NdbApiSignal*);
- int executeSavedSignals();
- void prepareNextScanResult();
-
- NdbScanReceiver* next();
- void next(NdbScanReceiver*);
-
- bool isCompleted(Uint32 aiLenToReceive);
- void setCompleted();
-
- void init(NdbOperation* aNdbOp, bool lockMode);
-
- Uint32 ptr2int() { return theReceiver.getId(); };
-private:
- NdbScanReceiver();
- void release();
-
- NdbReceiver theReceiver;
-
- NdbOperation* theNdbOp;
- NdbApiSignal* theFirstTRANSID_AI_Recv;
- NdbApiSignal* theLastTRANSID_AI_Recv;
- NdbApiSignal* theFirstKEYINFO20_Recv;
- NdbApiSignal* theLastKEYINFO20_Recv;
-
- Uint32 theTotalRecAI_Len;
- Uint32 theTotalKI_Len;
- Uint32 theTotalRecKI_Len;
- ReceiverStatus theStatus;
- Uint32 theMagicNumber;
- NdbScanReceiver* theNextScanRec;
- bool theLockMode;
-
-};
-
-inline
-void
-NdbScanReceiver::init(NdbOperation* aNdbOp, bool lockMode){
- assert(theStatus == Init || theStatus == Released);
- theNdbOp = aNdbOp;
- theMagicNumber = 0xA0B1C2D3;
- theTotalRecAI_Len = 0;
-
- /* If we are locking the records for take over
- * KI_len to receive is at least 1, since we don't know yet
- * how much KI we are expecting(this is written in the first KI signal)
- * set theTotalKI_Len to FFFFFFFF, this will make the ScanReciever wait for
- * at least the first KI, and when that is received we will know if
- * we are expecting another one
- */
- theLockMode = lockMode;
- if (theLockMode == true)
- theTotalKI_Len = 0xFFFFFFFF;
- else
- theTotalKI_Len = 0;
- theTotalRecKI_Len = 0;
-
- assert(theNextScanRec == NULL);
- theNextScanRec = NULL;
- assert(theFirstTRANSID_AI_Recv == NULL);
- theFirstTRANSID_AI_Recv = NULL;
- assert(theLastTRANSID_AI_Recv == NULL);
- theLastTRANSID_AI_Recv = NULL;
- assert(theFirstKEYINFO20_Recv == NULL);
- theFirstKEYINFO20_Recv = NULL;
- theLastKEYINFO20_Recv = NULL;
-
- theStatus = Waiting;
-};
-
-
-inline
-void
-NdbScanReceiver::release(){
- theStatus = Released;
- // theNdbOp->theNdb->releaseSignalsInList(&theFirstTRANSID_AI_Recv);
- while(theFirstTRANSID_AI_Recv != NULL){
- NdbApiSignal* tmp = theFirstTRANSID_AI_Recv;
- theFirstTRANSID_AI_Recv = tmp->next();
- delete tmp;
- }
- theFirstTRANSID_AI_Recv = NULL;
- theLastTRANSID_AI_Recv = NULL;
- // theNdbOp->theNdb->releaseSignalsInList(&theFirstKEYINFO20_Recv);
- while(theFirstKEYINFO20_Recv != NULL){
- NdbApiSignal* tmp = theFirstKEYINFO20_Recv;
- theFirstKEYINFO20_Recv = tmp->next();
- delete tmp;
- }
- theFirstKEYINFO20_Recv = NULL;
- theLastKEYINFO20_Recv = NULL;
- theNdbOp = NULL;
- theTotalRecAI_Len = 0;
- theTotalRecKI_Len = 0;
- theTotalKI_Len = 0xFFFFFFFF;
-};
-
-inline
-int
-NdbScanReceiver::checkMagicNumber()
-{
- if (theMagicNumber != 0xA0B1C2D3)
- return -1;
- return 0;
-}
-
-inline
-NdbScanReceiver*
-NdbScanReceiver::next(){
- return theNextScanRec;
-}
-
-inline
-void
-NdbScanReceiver::next(NdbScanReceiver* aScanRec){
- theNextScanRec = aScanRec;
-}
-
-inline
-bool
-NdbScanReceiver::isCompleted(Uint32 aiLenToReceive){
- assert(theStatus == Waiting || theStatus == Completed);
-#if 0
- ndbout << "NdbScanReceiver::isCompleted"<<endl
- << " theStatus = " << theStatus << endl
- << " theTotalRecAI_Len = " << theTotalRecAI_Len << endl
- << " aiLenToReceive = " << aiLenToReceive << endl
- << " theTotalRecKI_Len = "<< theTotalRecKI_Len << endl
- << " theTotalKI_Len = "<< theTotalKI_Len << endl;
-#endif
- // Have we already receive everything
- if(theStatus == Completed)
- return true;
-
- // Check that we have received AI
- if(theTotalRecAI_Len < aiLenToReceive)
- return false;
-
- // Check that we have recieved KI
- if (theTotalRecKI_Len < theTotalKI_Len)
- return false;
-
- // We should not have recieved more AI
- assert(theTotalRecAI_Len <= aiLenToReceive);
- return true;
-}
-
-inline
-void
-NdbScanReceiver::setCompleted(){
- theStatus = Completed;
-}
-
-#endif
diff --git a/ndb/src/ndbapi/Ndbinit.cpp b/ndb/src/ndbapi/Ndbinit.cpp
index d4b4c92e78a..8589158ae6a 100644
--- a/ndb/src/ndbapi/Ndbinit.cpp
+++ b/ndb/src/ndbapi/Ndbinit.cpp
@@ -16,7 +16,6 @@
#include <ndb_global.h>
-#include <my_sys.h>
#include "NdbApiSignal.hpp"
#include "NdbImpl.hpp"
@@ -57,27 +56,32 @@ Parameters: aDataBase : Name of the database.
Remark: Connect to the database.
***************************************************************************/
Ndb::Ndb( const char* aDataBase , const char* aSchema) {
+ DBUG_ENTER("Ndb::Ndb()");
+ DBUG_PRINT("enter",("(old)Ndb::Ndb this=0x%x", this));
NdbMutex_Lock(&createNdbMutex);
if (theNoOfNdbObjects < 0)
abort(); // old and new Ndb constructor used mixed
theNoOfNdbObjects++;
if (global_ndb_cluster_connection == 0) {
- my_init();
global_ndb_cluster_connection= new Ndb_cluster_connection(ndbConnectString);
global_ndb_cluster_connection->connect();
}
NdbMutex_Unlock(&createNdbMutex);
setup(global_ndb_cluster_connection, aDataBase, aSchema);
+ DBUG_VOID_RETURN;
}
Ndb::Ndb( Ndb_cluster_connection *ndb_cluster_connection,
const char* aDataBase , const char* aSchema)
{
+ DBUG_ENTER("Ndb::Ndb()");
+ DBUG_PRINT("enter",("Ndb::Ndb this=0x%x", this));
if (global_ndb_cluster_connection != 0 &&
global_ndb_cluster_connection != ndb_cluster_connection)
abort(); // old and new Ndb constructor used mixed
theNoOfNdbObjects= -1;
setup(ndb_cluster_connection, aDataBase, aSchema);
+ DBUG_VOID_RETURN;
}
void Ndb::setup(Ndb_cluster_connection *ndb_cluster_connection,
@@ -215,6 +219,7 @@ void Ndb::setConnectString(const char * connectString)
Ndb::~Ndb()
{
DBUG_ENTER("Ndb::~Ndb()");
+ DBUG_PRINT("enter",("Ndb::~Ndb this=0x%x",this));
doDisconnect();
delete theDictionary;
@@ -242,10 +247,6 @@ Ndb::~Ndb()
// closeSchemaTransaction(theSchemaConToNdbList);
while ( theConIdleList != NULL )
freeNdbCon();
- while ( theSignalIdleList != NULL )
- freeSignal();
- while (theRecAttrIdleList != NULL)
- freeRecAttr();
while (theOpIdleList != NULL)
freeOperation();
while (theScanOpIdleList != NULL)
@@ -264,6 +265,10 @@ Ndb::~Ndb()
freeNdbScanRec();
while (theNdbBlobIdleList != NULL)
freeNdbBlob();
+ while (theRecAttrIdleList != NULL)
+ freeRecAttr();
+ while ( theSignalIdleList != NULL )
+ freeSignal();
releaseTransactionArrays();
startTransactionNodeSelectionData.release();
diff --git a/ndb/src/ndbapi/TransporterFacade.hpp b/ndb/src/ndbapi/TransporterFacade.hpp
index 76beaa708f1..8b6e38a0611 100644
--- a/ndb/src/ndbapi/TransporterFacade.hpp
+++ b/ndb/src/ndbapi/TransporterFacade.hpp
@@ -236,7 +236,6 @@ public:
NdbMutex* theMutexPtr;
private:
static TransporterFacade* theFacadeInstance;
- static ConfigRetriever *s_config_retriever;
public:
GlobalDictCache m_globalDictCache;
diff --git a/ndb/src/ndbapi/ndb_cluster_connection.cpp b/ndb/src/ndbapi/ndb_cluster_connection.cpp
index 27695cec187..5be4f0f9f91 100644
--- a/ndb/src/ndbapi/ndb_cluster_connection.cpp
+++ b/ndb/src/ndbapi/ndb_cluster_connection.cpp
@@ -16,6 +16,7 @@
#include <ndb_global.h>
#include <my_pthread.h>
+#include <my_sys.h>
#include <ndb_cluster_connection.hpp>
#include <TransporterFacade.hpp>
@@ -30,14 +31,18 @@ static int g_run_connect_thread= 0;
Ndb_cluster_connection::Ndb_cluster_connection(const char *connect_string)
{
+ DBUG_ENTER("Ndb_cluster_connection");
+ DBUG_PRINT("enter",("Ndb_cluster_connection this=0x%x", this));
m_facade= TransporterFacade::theFacadeInstance= new TransporterFacade();
if (connect_string)
- m_connect_string= strdup(connect_string);
+ m_connect_string= my_strdup(connect_string,MYF(MY_WME));
else
m_connect_string= 0;
m_config_retriever= 0;
+ m_local_config= 0;
m_connect_thread= 0;
m_connect_callback= 0;
+ DBUG_VOID_RETURN;
}
extern "C" pthread_handler_decl(run_ndb_cluster_connection_connect_thread, me)
@@ -55,6 +60,7 @@ void Ndb_cluster_connection::connect_thread()
DBUG_ENTER("Ndb_cluster_connection::connect_thread");
int r;
do {
+ NdbSleep_SecSleep(1);
if ((r = connect(1)) == 0)
break;
if (r == -1) {
@@ -75,6 +81,7 @@ int Ndb_cluster_connection::start_connect_thread(int (*connect_callback)(void))
m_connect_callback= connect_callback;
if ((r = connect(1)) == 1)
{
+ DBUG_PRINT("info",("starting thread"));
m_connect_thread= NdbThread_Create(run_ndb_cluster_connection_connect_thread,
(void**)this,
32768,
@@ -99,10 +106,16 @@ int Ndb_cluster_connection::connect(int reconnect)
do {
if (m_config_retriever == 0)
{
- m_config_retriever= new ConfigRetriever(NDB_VERSION, NODE_TYPE_API);
- m_config_retriever->setConnectString(m_connect_string);
- if(m_config_retriever->init() == -1)
- break;
+ if (m_local_config == 0) {
+ m_local_config= new LocalConfig();
+ if (!m_local_config->init(m_connect_string,0)) {
+ ndbout << "Configuration error: Unable to retrieve local config" << endl;
+ m_local_config->printError();
+ m_local_config->printUsage();
+ DBUG_RETURN(-1);
+ }
+ }
+ m_config_retriever= new ConfigRetriever(*m_local_config, NDB_VERSION, NODE_TYPE_API);
}
else
if (reconnect == 0)
@@ -118,6 +131,7 @@ int Ndb_cluster_connection::connect(int reconnect)
else
if(m_config_retriever->do_connect() == -1)
break;
+
Uint32 nodeId = m_config_retriever->allocNodeId();
for(Uint32 i = 0; nodeId == 0 && i<5; i++){
NdbSleep_SecSleep(3);
@@ -145,6 +159,8 @@ int Ndb_cluster_connection::connect(int reconnect)
Ndb_cluster_connection::~Ndb_cluster_connection()
{
+ DBUG_ENTER("~Ndb_cluster_connection");
+ DBUG_PRINT("enter",("~Ndb_cluster_connection this=0x%x", this));
TransporterFacade::stop_instance();
if (m_connect_thread)
{
@@ -161,10 +177,12 @@ Ndb_cluster_connection::~Ndb_cluster_connection()
abort();
TransporterFacade::theFacadeInstance= 0;
}
- if (m_connect_string)
- free(m_connect_string);
+ my_free(m_connect_string,MYF(MY_ALLOW_ZERO_PTR));
if (m_config_retriever)
delete m_config_retriever;
+ if (m_local_config)
+ delete m_local_config;
+ DBUG_VOID_RETURN;
}
diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c
index fdae5475d56..fdfd8a15fb0 100644
--- a/ndb/src/ndbapi/ndberror.c
+++ b/ndb/src/ndbapi/ndberror.c
@@ -281,6 +281,9 @@ ErrorBundle ErrorCodes[] = {
{ 739, SE, "Unsupported primary key length" },
{ 740, SE, "Nullable primary key not supported" },
{ 741, SE, "Unsupported alter table" },
+ { 742, SE, "Unsupported attribute type in index" },
+ { 743, SE, "Unsupported character set in table or index" },
+ { 744, SE, "Character string is invalid for given character set" },
{ 241, SE, "Invalid schema object version" },
{ 283, SE, "Table is being dropped" },
{ 284, SE, "Table not defined in transaction coordinator" },
diff --git a/ndb/test/include/NDBT_Table.hpp b/ndb/test/include/NDBT_Table.hpp
index 59db3ed1092..d2f99b85187 100644
--- a/ndb/test/include/NDBT_Table.hpp
+++ b/ndb/test/include/NDBT_Table.hpp
@@ -33,10 +33,10 @@ public:
{
assert(_name != 0);
+ setType(_type);
+ setLength(_length);
setNullable(_nullable);
setPrimaryKey(_pk);
- setLength(_length);
- setType(_type);
}
};
diff --git a/ndb/test/ndbapi/acid.cpp b/ndb/test/ndbapi/acid.cpp
index 157b3c7b3ef..3eb1625be26 100644
--- a/ndb/test/ndbapi/acid.cpp
+++ b/ndb/test/ndbapi/acid.cpp
@@ -434,6 +434,7 @@ extern "C" void* NdbThreadFuncRead(void* pArg)
NDB_COMMAND(acid, "acid", "acid", "acid", 65535)
{
+ ndb_init();
long nSeconds = 60;
int rc = NDBT_OK;
diff --git a/ndb/test/ndbapi/acid2.cpp b/ndb/test/ndbapi/acid2.cpp
index 434a0450daa..7bd7ec00ac5 100644
--- a/ndb/test/ndbapi/acid2.cpp
+++ b/ndb/test/ndbapi/acid2.cpp
@@ -610,6 +610,7 @@ extern "C" void* ThreadFunc(void*)
int main(int argc, char* argv[])
{
+ ndb_init();
Uint32 nSeconds = 1;
Uint32 nThread = 1;
diff --git a/ndb/test/ndbapi/bank/Bank.cpp b/ndb/test/ndbapi/bank/Bank.cpp
index 4581d1a9842..c6029259357 100644
--- a/ndb/test/ndbapi/bank/Bank.cpp
+++ b/ndb/test/ndbapi/bank/Bank.cpp
@@ -156,7 +156,14 @@ int Bank::performTransactionImpl1(int fromAccountId,
int check;
+ // Ok, all clear to do the transaction
+ Uint64 transId;
+ if (getNextTransactionId(transId) != NDBT_OK){
+ return NDBT_FAILED;
+ }
+
NdbConnection* pTrans = m_ndb.startTransaction();
+
if( pTrans == NULL ) {
const NdbError err = m_ndb.getNdbError();
if (err.status == NdbError::TemporaryError){
@@ -167,6 +174,13 @@ int Bank::performTransactionImpl1(int fromAccountId,
return NDBT_FAILED;
}
+ Uint64 currTime;
+ if (prepareGetCurrTimeOp(pTrans, currTime) != NDBT_OK){
+ ERR(pTrans->getNdbError());
+ m_ndb.closeTransaction(pTrans);
+ return NDBT_FAILED;
+ }
+
/**
* Check balance on from account
*/
@@ -205,29 +219,6 @@ int Bank::performTransactionImpl1(int fromAccountId,
return NDBT_FAILED;
}
- check = pTrans->execute(NoCommit);
- if( check == -1 ) {
- const NdbError err = pTrans->getNdbError();
- m_ndb.closeTransaction(pTrans);
- if (err.status == NdbError::TemporaryError){
- ERR(err);
- return NDBT_TEMPORARY;
- }
- ERR(err);
- return NDBT_FAILED;
- }
-
- Uint32 balanceFrom = balanceFromRec->u_32_value();
- // ndbout << "balanceFrom: " << balanceFrom << endl;
-
- if (((Int64)balanceFrom - amount) < 0){
- m_ndb.closeTransaction(pTrans);
- //ndbout << "Not enough funds" << endl;
- return NOT_ENOUGH_FUNDS;
- }
-
- Uint32 fromAccountType = fromAccountTypeRec->u_32_value();
-
/**
* Read balance on to account
*/
@@ -278,21 +269,22 @@ int Bank::performTransactionImpl1(int fromAccountId,
return NDBT_FAILED;
}
- Uint32 balanceTo = balanceToRec->u_32_value();
- // ndbout << "balanceTo: " << balanceTo << endl;
- Uint32 toAccountType = toAccountTypeRec->u_32_value();
- // Ok, all clear to do the transaction
- Uint64 transId;
- if (getNextTransactionId(transId) != NDBT_OK){
- return NDBT_FAILED;
- }
+ Uint32 balanceFrom = balanceFromRec->u_32_value();
+ // ndbout << "balanceFrom: " << balanceFrom << endl;
- Uint64 currTime;
- if (getCurrTime(currTime) != NDBT_OK){
- return NDBT_FAILED;
+ if (((Int64)balanceFrom - amount) < 0){
+ m_ndb.closeTransaction(pTrans);
+ //ndbout << "Not enough funds" << endl;
+ return NOT_ENOUGH_FUNDS;
}
+ Uint32 fromAccountType = fromAccountTypeRec->u_32_value();
+
+ Uint32 balanceTo = balanceToRec->u_32_value();
+ // ndbout << "balanceTo: " << balanceTo << endl;
+ Uint32 toAccountType = toAccountTypeRec->u_32_value();
+
/**
* Update balance on from account
*/
@@ -1988,47 +1980,50 @@ int Bank::readSystemValue(SystemValueId sysValId, Uint64 & value){
ERR(m_ndb.getNdbError());
return NDBT_FAILED;
}
-
- NdbOperation* pOp = pTrans->getNdbOperation("SYSTEM_VALUES");
- if (pOp == NULL) {
+
+ if (prepareReadSystemValueOp(pTrans, sysValId, value) != NDBT_OK) {
ERR(pTrans->getNdbError());
m_ndb.closeTransaction(pTrans);
return NDBT_FAILED;
}
-
- check = pOp->readTuple();
+
+ check = pTrans->execute(Commit);
if( check == -1 ) {
ERR(pTrans->getNdbError());
m_ndb.closeTransaction(pTrans);
return NDBT_FAILED;
}
- check = pOp->equal("SYSTEM_VALUES_ID", sysValId);
- if( check == -1 ) {
- ERR(pTrans->getNdbError());
- m_ndb.closeTransaction(pTrans);
+ m_ndb.closeTransaction(pTrans);
+ return NDBT_OK;
+
+}
+
+int Bank::prepareReadSystemValueOp(NdbConnection* pTrans, SystemValueId sysValId, Uint64 & value){
+
+ int check;
+
+ NdbOperation* pOp = pTrans->getNdbOperation("SYSTEM_VALUES");
+ if (pOp == NULL) {
return NDBT_FAILED;
}
- NdbRecAttr* valueRec = pOp->getValue("VALUE");
- if( valueRec ==NULL ) {
- ERR(pTrans->getNdbError());
- m_ndb.closeTransaction(pTrans);
+ check = pOp->readTuple();
+ if( check == -1 ) {
return NDBT_FAILED;
}
- check = pTrans->execute(Commit);
+ check = pOp->equal("SYSTEM_VALUES_ID", sysValId);
if( check == -1 ) {
- ERR(pTrans->getNdbError());
- m_ndb.closeTransaction(pTrans);
return NDBT_FAILED;
}
- value = valueRec->u_64_value();
+ NdbRecAttr* valueRec = pOp->getValue("VALUE", (char *)&value);
+ if( valueRec == NULL ) {
+ return NDBT_FAILED;
+ }
- m_ndb.closeTransaction(pTrans);
return NDBT_OK;
-
}
int Bank::writeSystemValue(SystemValueId sysValId, Uint64 value){
@@ -2307,6 +2302,10 @@ int Bank::getCurrTime(Uint64 &time){
return readSystemValue(CurrentTime, time);
}
+int Bank::prepareGetCurrTimeOp(NdbConnection *pTrans, Uint64 &time){
+ return prepareReadSystemValueOp(pTrans, CurrentTime, time);
+}
+
int Bank::performSumAccounts(int maxSleepBetweenSums, int yield){
if (init() != NDBT_OK)
diff --git a/ndb/test/ndbapi/bank/Bank.hpp b/ndb/test/ndbapi/bank/Bank.hpp
index e6816fd7111..34c5ff51cc2 100644
--- a/ndb/test/ndbapi/bank/Bank.hpp
+++ b/ndb/test/ndbapi/bank/Bank.hpp
@@ -29,7 +29,7 @@ public:
Bank();
- int createAndLoadBank(bool overWrite);
+ int createAndLoadBank(bool overWrite, int num_accounts=10);
int dropBank();
int performTransactions(int maxSleepBetweenTrans = 20, int yield=0);
@@ -118,6 +118,9 @@ private:
int incCurrTime(Uint64 &value);
int getCurrTime(Uint64 &time);
+ int prepareReadSystemValueOp(NdbConnection*, SystemValueId sysValId, Uint64 &time);
+ int prepareGetCurrTimeOp(NdbConnection*, Uint64 &time);
+
int createTables();
int createTable(const char* tabName);
diff --git a/ndb/test/ndbapi/bank/BankLoad.cpp b/ndb/test/ndbapi/bank/BankLoad.cpp
index bbaac27735b..39dc8097115 100644
--- a/ndb/test/ndbapi/bank/BankLoad.cpp
+++ b/ndb/test/ndbapi/bank/BankLoad.cpp
@@ -53,7 +53,7 @@ int Bank::getNumAccountTypes(){
return accountTypesSize;
}
-int Bank::createAndLoadBank(bool ovrWrt){
+int Bank::createAndLoadBank(bool ovrWrt, int num_accounts){
m_ndb.init();
if (m_ndb.waitUntilReady() != 0)
@@ -78,7 +78,7 @@ int Bank::createAndLoadBank(bool ovrWrt){
if (loadAccountType() != NDBT_OK)
return NDBT_FAILED;
- if (loadAccount(10) != NDBT_OK)
+ if (loadAccount(num_accounts) != NDBT_OK)
return NDBT_FAILED;
if (loadSystemValues() != NDBT_OK)
diff --git a/ndb/test/ndbapi/bank/bankCreator.cpp b/ndb/test/ndbapi/bank/bankCreator.cpp
index 5331ec6ba69..301d8bda6d2 100644
--- a/ndb/test/ndbapi/bank/bankCreator.cpp
+++ b/ndb/test/ndbapi/bank/bankCreator.cpp
@@ -27,6 +27,7 @@
int main(int argc, const char** argv){
+ ndb_init();
int _help = 0;
struct getargs args[] = {
diff --git a/ndb/test/ndbapi/bank/bankMakeGL.cpp b/ndb/test/ndbapi/bank/bankMakeGL.cpp
index 54bc559fbf9..9e2762ed8ae 100644
--- a/ndb/test/ndbapi/bank/bankMakeGL.cpp
+++ b/ndb/test/ndbapi/bank/bankMakeGL.cpp
@@ -27,6 +27,7 @@
int main(int argc, const char** argv){
+ ndb_init();
int _help = 0;
struct getargs args[] = {
diff --git a/ndb/test/ndbapi/bank/bankSumAccounts.cpp b/ndb/test/ndbapi/bank/bankSumAccounts.cpp
index c0a903f9034..b576161b27b 100644
--- a/ndb/test/ndbapi/bank/bankSumAccounts.cpp
+++ b/ndb/test/ndbapi/bank/bankSumAccounts.cpp
@@ -27,6 +27,7 @@
int main(int argc, const char** argv){
+ ndb_init();
int _help = 0;
struct getargs args[] = {
diff --git a/ndb/test/ndbapi/bank/bankTimer.cpp b/ndb/test/ndbapi/bank/bankTimer.cpp
index ba3165fccb4..874afd9c21e 100644
--- a/ndb/test/ndbapi/bank/bankTimer.cpp
+++ b/ndb/test/ndbapi/bank/bankTimer.cpp
@@ -28,6 +28,7 @@
int main(int argc, const char** argv){
+ ndb_init();
int _help = 0;
int _wait = 30;
diff --git a/ndb/test/ndbapi/bank/bankTransactionMaker.cpp b/ndb/test/ndbapi/bank/bankTransactionMaker.cpp
index fe9b53e0c8d..e5ff9aeb918 100644
--- a/ndb/test/ndbapi/bank/bankTransactionMaker.cpp
+++ b/ndb/test/ndbapi/bank/bankTransactionMaker.cpp
@@ -28,6 +28,7 @@
int main(int argc, const char** argv){
+ ndb_init();
int _help = 0;
int _wait = 20;
diff --git a/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp b/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp
index f9d974bb5f7..cf298ecc8e3 100644
--- a/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp
+++ b/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp
@@ -28,6 +28,7 @@
int main(int argc, const char** argv){
+ ndb_init();
int _help = 0;
struct getargs args[] = {
diff --git a/ndb/test/ndbapi/bank/testBank.cpp b/ndb/test/ndbapi/bank/testBank.cpp
index 77ac1172d7c..3ef2799cd3c 100644
--- a/ndb/test/ndbapi/bank/testBank.cpp
+++ b/ndb/test/ndbapi/bank/testBank.cpp
@@ -141,6 +141,7 @@ TESTCASE("Bank",
NDBT_TESTSUITE_END(testBank);
int main(int argc, const char** argv){
+ ndb_init();
// Tables should not be auto created
testBank.setCreateTable(false);
diff --git a/ndb/test/ndbapi/benchronja.cpp b/ndb/test/ndbapi/benchronja.cpp
index ce0aee35e8f..91b2a041186 100644
--- a/ndb/test/ndbapi/benchronja.cpp
+++ b/ndb/test/ndbapi/benchronja.cpp
@@ -66,6 +66,7 @@ static int ThreadReady[MAXTHREADS];
static int ThreadStart[MAXTHREADS];
NDB_COMMAND(benchronja, "benchronja", "benchronja", "benchronja", 65535){
+ ndb_init();
ThreadNdb tabThread[MAXTHREADS];
int i = 0 ;
diff --git a/ndb/test/ndbapi/bulk_copy.cpp b/ndb/test/ndbapi/bulk_copy.cpp
index 18881cae216..8821a92fb27 100644
--- a/ndb/test/ndbapi/bulk_copy.cpp
+++ b/ndb/test/ndbapi/bulk_copy.cpp
@@ -221,6 +221,7 @@ int insertFile(Ndb* pNdb,
int main(int argc, const char** argv){
+ ndb_init();
const char* _tabname = NULL;
int _help = 0;
diff --git a/ndb/test/ndbapi/cdrserver.cpp b/ndb/test/ndbapi/cdrserver.cpp
index 8354d28f53f..8d15061e94b 100644
--- a/ndb/test/ndbapi/cdrserver.cpp
+++ b/ndb/test/ndbapi/cdrserver.cpp
@@ -113,6 +113,7 @@ using namespace std;
int main(int argc, const char** argv)
{
+ ndb_init();
/******** NDB ***********/
/*
Ndb MyNdb( "TEST_DB" );
diff --git a/ndb/test/ndbapi/celloDb.cpp b/ndb/test/ndbapi/celloDb.cpp
index ec61e783585..2d6401c355a 100644
--- a/ndb/test/ndbapi/celloDb.cpp
+++ b/ndb/test/ndbapi/celloDb.cpp
@@ -73,6 +73,7 @@ static int failed = 0 ;
NDB_COMMAND(celloDb, "celloDb", "celloDb", "celloDb", 65535)
{
+ ndb_init();
int tTableId;
int i;
diff --git a/ndb/test/ndbapi/create_all_tabs.cpp b/ndb/test/ndbapi/create_all_tabs.cpp
index 55d04888144..97236b98b36 100644
--- a/ndb/test/ndbapi/create_all_tabs.cpp
+++ b/ndb/test/ndbapi/create_all_tabs.cpp
@@ -25,6 +25,7 @@
int main(int argc, const char** argv){
+ ndb_init();
int _temp = false;
int _help = 0;
diff --git a/ndb/test/ndbapi/create_tab.cpp b/ndb/test/ndbapi/create_tab.cpp
index c2e3b7f64ea..f3f18982ed0 100644
--- a/ndb/test/ndbapi/create_tab.cpp
+++ b/ndb/test/ndbapi/create_tab.cpp
@@ -25,6 +25,7 @@
int main(int argc, const char** argv){
+ ndb_init();
int _temp = false;
int _help = 0;
diff --git a/ndb/test/ndbapi/drop_all_tabs.cpp b/ndb/test/ndbapi/drop_all_tabs.cpp
index 59c57396acd..c024a81a5e6 100644
--- a/ndb/test/ndbapi/drop_all_tabs.cpp
+++ b/ndb/test/ndbapi/drop_all_tabs.cpp
@@ -23,6 +23,7 @@
#include <getarg.h>
int main(int argc, const char** argv){
+ ndb_init();
int _help = 0;
struct getargs args[] = {
diff --git a/ndb/test/ndbapi/flexAsynch.cpp b/ndb/test/ndbapi/flexAsynch.cpp
index 9192ec21b93..8c0ba46130c 100644
--- a/ndb/test/ndbapi/flexAsynch.cpp
+++ b/ndb/test/ndbapi/flexAsynch.cpp
@@ -145,6 +145,7 @@ tellThreads(StartType what)
NDB_COMMAND(flexAsynch, "flexAsynch", "flexAsynch", "flexAsynch", 65535)
{
+ ndb_init();
ThreadNdb* pThreadData;
int tLoops=0, i;
int returnValue = NDBT_OK;
diff --git a/ndb/test/ndbapi/flexBench.cpp b/ndb/test/ndbapi/flexBench.cpp
index 38c8f6e280f..b19944498f4 100644
--- a/ndb/test/ndbapi/flexBench.cpp
+++ b/ndb/test/ndbapi/flexBench.cpp
@@ -281,6 +281,7 @@ tellThreads(ThreadData* pt, StartType what)
NDB_COMMAND(flexBench, "flexBench", "flexBench", "flexbench", 65535)
{
+ ndb_init();
ThreadData* pThreadsData;
int tLoops = 0, i;
int returnValue = NDBT_OK;
diff --git a/ndb/test/ndbapi/flexHammer.cpp b/ndb/test/ndbapi/flexHammer.cpp
index c1c47923de9..80cc7c5a53f 100644
--- a/ndb/test/ndbapi/flexHammer.cpp
+++ b/ndb/test/ndbapi/flexHammer.cpp
@@ -178,6 +178,7 @@ tellThreads(ThreadNdb* threadArrayP, const StartType what)
NDB_COMMAND(flexHammer, "flexHammer", "flexHammer", "flexHammer", 65535)
//main(int argc, const char** argv)
{
+ ndb_init();
ThreadNdb* pThreads = NULL; // Pointer to thread data array
Ndb* pMyNdb = NULL; // Pointer to Ndb object
int tLoops = 0;
diff --git a/ndb/test/ndbapi/flexScan.cpp b/ndb/test/ndbapi/flexScan.cpp
index 5b5b4dde730..b09d71fb010 100644
--- a/ndb/test/ndbapi/flexScan.cpp
+++ b/ndb/test/ndbapi/flexScan.cpp
@@ -297,6 +297,7 @@ static int checkThreadResults(ThreadNdb *threadArrayP, char *action)
NDB_COMMAND(flexScan, "flexScan", "flexScan", "flexScan", 65535)
{
+ ndb_init();
ThreadNdb* pThreads = NULL;
Ndb* pMyNdb = NULL;
int tLoops = 0;
diff --git a/ndb/test/ndbapi/flexTT.cpp b/ndb/test/ndbapi/flexTT.cpp
index c0ff31d1677..162fc080218 100644
--- a/ndb/test/ndbapi/flexTT.cpp
+++ b/ndb/test/ndbapi/flexTT.cpp
@@ -171,6 +171,7 @@ tellThreads(StartType what)
NDB_COMMAND(flexTT, "flexTT", "flexTT", "flexTT", 65535)
{
+ ndb_init();
ThreadNdb* pThreadData;
int returnValue = NDBT_OK;
int i;
diff --git a/ndb/test/ndbapi/flexTimedAsynch.cpp b/ndb/test/ndbapi/flexTimedAsynch.cpp
index 761be53fdd3..27380cc79fd 100644
--- a/ndb/test/ndbapi/flexTimedAsynch.cpp
+++ b/ndb/test/ndbapi/flexTimedAsynch.cpp
@@ -174,6 +174,7 @@ void deleteAttributeSpace(){
NDB_COMMAND(flexTimedAsynch, "flexTimedAsynch", "flexTimedAsynch [-tpoilcas]", "flexTimedAsynch", 65535)
{
+ ndb_init();
ThreadNdb tabThread[MAXTHREADS];
int tLoops=0;
int returnValue;
diff --git a/ndb/test/ndbapi/flex_bench_mysql.cpp b/ndb/test/ndbapi/flex_bench_mysql.cpp
index 7cc883ab3e6..8e1fbcd9058 100644
--- a/ndb/test/ndbapi/flex_bench_mysql.cpp
+++ b/ndb/test/ndbapi/flex_bench_mysql.cpp
@@ -308,6 +308,7 @@ tellThreads(ThreadData* pt, StartType what)
NDB_COMMAND(flexBench, "flexBench", "flexBench", "flexbench", 65535)
{
+ ndb_init();
ThreadData* pThreadsData;
int tLoops = 0;
int returnValue = NDBT_OK;
diff --git a/ndb/test/ndbapi/index.cpp b/ndb/test/ndbapi/index.cpp
index 508186de529..c22da594164 100644
--- a/ndb/test/ndbapi/index.cpp
+++ b/ndb/test/ndbapi/index.cpp
@@ -81,63 +81,63 @@ static void createTable(Ndb &myNdb, bool storeInACC, bool twoKey, bool longKey)
int res;
column.setName("NAME");
- column.setPrimaryKey(true);
column.setType(NdbDictionary::Column::Char);
column.setLength((longKey)?
1024 // 1KB => long key
:12);
+ column.setPrimaryKey(true);
column.setNullable(false);
table.addColumn(column);
if (twoKey) {
column.setName("KEY2");
- column.setPrimaryKey(true);
column.setType(NdbDictionary::Column::Unsigned);
column.setLength(1);
+ column.setPrimaryKey(true);
column.setNullable(false);
table.addColumn(column);
}
column.setName("PNUM1");
- column.setPrimaryKey(false);
column.setType(NdbDictionary::Column::Unsigned);
column.setLength(1);
+ column.setPrimaryKey(false);
column.setNullable(false);
table.addColumn(column);
column.setName("PNUM2");
- column.setPrimaryKey(false);
column.setType(NdbDictionary::Column::Unsigned);
column.setLength(1);
+ column.setPrimaryKey(false);
column.setNullable(false);
table.addColumn(column);
column.setName("PNUM3");
- column.setPrimaryKey(false);
column.setType(NdbDictionary::Column::Unsigned);
column.setLength(1);
+ column.setPrimaryKey(false);
column.setNullable(false);
table.addColumn(column);
column.setName("PNUM4");
- column.setPrimaryKey(false);
column.setType(NdbDictionary::Column::Unsigned);
column.setLength(1);
+ column.setPrimaryKey(false);
column.setNullable(false);
table.addColumn(column);
column.setName("AGE");
- column.setPrimaryKey(false);
column.setType(NdbDictionary::Column::Unsigned);
column.setLength(1);
+ column.setPrimaryKey(false);
column.setNullable(false);
table.addColumn(column);
column.setName("STRING_AGE");
- column.setPrimaryKey(false);
column.setType(NdbDictionary::Column::Char);
column.setLength(1);
column.setLength(256);
+ column.setPrimaryKey(false);
column.setNullable(false);
table.addColumn(column);
@@ -771,6 +771,7 @@ static void dropIndex(Ndb &myNdb, unsigned int noOfIndexes)
NDB_COMMAND(indexTest, "indexTest", "indexTest", "indexTest", 65535)
{
+ ndb_init();
bool createTableOp, createIndexOp, dropIndexOp, insertOp, updateOp, deleteOp, readOp, readIndexOp, updateIndexOp, deleteIndexOp, twoKey, longKey;
unsigned int noOfTuples = 1;
unsigned int noOfOperations = 1;
diff --git a/ndb/test/ndbapi/index2.cpp b/ndb/test/ndbapi/index2.cpp
index e49113d2f1b..f739468d7df 100644
--- a/ndb/test/ndbapi/index2.cpp
+++ b/ndb/test/ndbapi/index2.cpp
@@ -81,16 +81,16 @@ static void createTable(Ndb &myNdb, bool storeInACC, bool twoKey, bool longKey)
int res;
column.setName("X");
- column.setPrimaryKey(true);
column.setType(NdbDictionary::Column::Unsigned);
column.setLength(1);
+ column.setPrimaryKey(true);
column.setNullable(false);
table.addColumn(column);
column.setName("Y");
- column.setPrimaryKey(false);
column.setType(NdbDictionary::Column::Unsigned);
column.setLength(1);
+ column.setPrimaryKey(false);
column.setNullable(false);
table.addColumn(column);
@@ -608,6 +608,7 @@ static void dropIndex(Ndb &myNdb, unsigned int noOfIndexes)
NDB_COMMAND(indexTest, "indexTest", "indexTest", "indexTest", 65535)
{
+ ndb_init();
bool createTableOp, createIndexOp, dropIndexOp, insertOp, updateOp, deleteOp, readOp, readIndexOp, updateIndexOp, deleteIndexOp, twoKey, longKey;
unsigned int noOfTuples = 1;
unsigned int noOfOperations = 1;
diff --git a/ndb/test/ndbapi/initronja.cpp b/ndb/test/ndbapi/initronja.cpp
index b3215104822..3ce274e4319 100644
--- a/ndb/test/ndbapi/initronja.cpp
+++ b/ndb/test/ndbapi/initronja.cpp
@@ -46,6 +46,7 @@ static char attrName[MAXATTR][MAXSTRLEN];
inline int InsertRecords(Ndb*, int) ;
NDB_COMMAND(initronja, "initronja", "initronja", "initronja", 65535){
+ ndb_init();
Ndb* pNdb = NULL ;
NdbSchemaCon *MySchemaTransaction = NULL ;
diff --git a/ndb/test/ndbapi/interpreterInTup.cpp b/ndb/test/ndbapi/interpreterInTup.cpp
index 47960cd5d12..20d84e6e96d 100644
--- a/ndb/test/ndbapi/interpreterInTup.cpp
+++ b/ndb/test/ndbapi/interpreterInTup.cpp
@@ -105,6 +105,7 @@ int bTestPassed = 0;
int main(int argc, const char** argv) {
+ ndb_init();
int operationType = 0;
int tupTest = 0;
diff --git a/ndb/test/ndbapi/mainAsyncGenerator.cpp b/ndb/test/ndbapi/mainAsyncGenerator.cpp
index f613c66d07b..16cb50e160f 100644
--- a/ndb/test/ndbapi/mainAsyncGenerator.cpp
+++ b/ndb/test/ndbapi/mainAsyncGenerator.cpp
@@ -282,6 +282,7 @@ threadRoutine(void *arg)
NDB_COMMAND(DbAsyncGenerator, "DbAsyncGenerator",
"DbAsyncGenerator", "DbAsyncGenerator", 65535)
{
+ ndb_init();
int i;
int j;
int k;
diff --git a/ndb/test/ndbapi/msa.cpp b/ndb/test/ndbapi/msa.cpp
index 7a734f9cb79..e39f7a8c64a 100644
--- a/ndb/test/ndbapi/msa.cpp
+++ b/ndb/test/ndbapi/msa.cpp
@@ -971,6 +971,7 @@ void ShowHelp(const char* szCmd)
int main(int argc, char* argv[])
{
+ ndb_init();
int iRes = -1;
g_nNumThreads = 0;
g_nMaxCallsPerSecond = 0;
diff --git a/ndb/test/ndbapi/restarter.cpp b/ndb/test/ndbapi/restarter.cpp
index 9a522f5dcac..d6831494b48 100644
--- a/ndb/test/ndbapi/restarter.cpp
+++ b/ndb/test/ndbapi/restarter.cpp
@@ -28,6 +28,7 @@
#include <NDBT.hpp>
int main(int argc, const char** argv){
+ ndb_init();
const char* _hostName = NULL;
int _loops = 10;
diff --git a/ndb/test/ndbapi/restarter2.cpp b/ndb/test/ndbapi/restarter2.cpp
index f2bcf6f8e7b..846748a7bba 100644
--- a/ndb/test/ndbapi/restarter2.cpp
+++ b/ndb/test/ndbapi/restarter2.cpp
@@ -26,6 +26,7 @@
#include <NDBT.hpp>
int main(int argc, const char** argv){
+ ndb_init();
const char* _hostName = NULL;
int _loops = 10;
diff --git a/ndb/test/ndbapi/restarts.cpp b/ndb/test/ndbapi/restarts.cpp
index 0ec2883d53c..184e754de4a 100644
--- a/ndb/test/ndbapi/restarts.cpp
+++ b/ndb/test/ndbapi/restarts.cpp
@@ -27,6 +27,7 @@
#include <NDBT.hpp>
int main(int argc, const char** argv){
+ ndb_init();
const char* _restartName = NULL;
int _loops = 1;
diff --git a/ndb/test/ndbapi/size.cpp b/ndb/test/ndbapi/size.cpp
index c506771ebde..ff178b11d68 100644
--- a/ndb/test/ndbapi/size.cpp
+++ b/ndb/test/ndbapi/size.cpp
@@ -19,6 +19,7 @@
int main(void)
{
+ ndb_init();
printf("cdrstruct=%d\n",sizeof(struct cdr_record));
printf("long int=%d\n",sizeof(long int));
printf("int=%d\n",sizeof(int));
diff --git a/ndb/test/ndbapi/slow_select.cpp b/ndb/test/ndbapi/slow_select.cpp
index a953e1539d0..625dbc34457 100644
--- a/ndb/test/ndbapi/slow_select.cpp
+++ b/ndb/test/ndbapi/slow_select.cpp
@@ -36,6 +36,7 @@ static void lookup();
int
main(void){
+ ndb_init();
Ndb g_ndb("test");
g_ndb.init(1024);
diff --git a/ndb/test/ndbapi/testBackup.cpp b/ndb/test/ndbapi/testBackup.cpp
index d328a7db292..77b9d0a4baa 100644
--- a/ndb/test/ndbapi/testBackup.cpp
+++ b/ndb/test/ndbapi/testBackup.cpp
@@ -149,6 +149,9 @@ int runRestartInitial(NDBT_Context* ctx, NDBT_Step* step){
if (restarter.restartAll(true) != 0)
return NDBT_FAILED;
+ if (restarter.waitClusterStarted() != 0)
+ return NDBT_FAILED;
+
return NDBT_OK;
}
@@ -215,7 +218,7 @@ int runDropTable(NDBT_Context* ctx, NDBT_Step* step){
int runCreateBank(NDBT_Context* ctx, NDBT_Step* step){
Bank bank;
int overWriteExisting = true;
- if (bank.createAndLoadBank(overWriteExisting) != NDBT_OK)
+ if (bank.createAndLoadBank(overWriteExisting, 10) != NDBT_OK)
return NDBT_FAILED;
return NDBT_OK;
}
@@ -413,7 +416,6 @@ TESTCASE("BackupOne",
INITIALIZER(runRestoreOne);
VERIFIER(runVerifyOne);
FINALIZER(runClearTable);
- FINALIZER(runDropTable);
}
TESTCASE("BackupBank",
"Test that backup and restore works during transaction load\n"
@@ -428,6 +430,15 @@ TESTCASE("BackupBank",
INITIALIZER(runCreateBank);
STEP(runBankTimer);
STEP(runBankTransactions);
+ STEP(runBankTransactions);
+ STEP(runBankTransactions);
+ STEP(runBankTransactions);
+ STEP(runBankTransactions);
+ STEP(runBankTransactions);
+ STEP(runBankTransactions);
+ STEP(runBankTransactions);
+ STEP(runBankTransactions);
+ STEP(runBankTransactions);
STEP(runBankGL);
// TODO STEP(runBankSum);
STEP(runBackupBank);
@@ -473,6 +484,7 @@ TESTCASE("FailSlave",
NDBT_TESTSUITE_END(testBackup);
int main(int argc, const char** argv){
+ ndb_init();
return testBackup.execute(argc, argv);
}
diff --git a/ndb/test/ndbapi/testBasic.cpp b/ndb/test/ndbapi/testBasic.cpp
index 26622f9b066..7d03016b87a 100644
--- a/ndb/test/ndbapi/testBasic.cpp
+++ b/ndb/test/ndbapi/testBasic.cpp
@@ -1278,6 +1278,7 @@ TESTCASE("MassiveTransaction",
NDBT_TESTSUITE_END(testBasic);
int main(int argc, const char** argv){
+ ndb_init();
return testBasic.execute(argc, argv);
}
diff --git a/ndb/test/ndbapi/testBasicAsynch.cpp b/ndb/test/ndbapi/testBasicAsynch.cpp
index a97920e53da..6daa22fdc6a 100644
--- a/ndb/test/ndbapi/testBasicAsynch.cpp
+++ b/ndb/test/ndbapi/testBasicAsynch.cpp
@@ -181,6 +181,7 @@ TESTCASE("PkDeleteAsynch",
NDBT_TESTSUITE_END(testBasicAsynch);
int main(int argc, const char** argv){
+ ndb_init();
return testBasicAsynch.execute(argc, argv);
}
diff --git a/ndb/test/ndbapi/testBlobs.cpp b/ndb/test/ndbapi/testBlobs.cpp
index 64881ca39ab..e18f4a8bd1a 100644
--- a/ndb/test/ndbapi/testBlobs.cpp
+++ b/ndb/test/ndbapi/testBlobs.cpp
@@ -1338,6 +1338,7 @@ static struct {
NDB_COMMAND(testOdbcDriver, "testBlobs", "testBlobs", "testBlobs", 65535)
{
+ ndb_init();
while (++argv, --argc > 0) {
const char* arg = argv[0];
if (strcmp(arg, "-batch") == 0) {
diff --git a/ndb/test/ndbapi/testDataBuffers.cpp b/ndb/test/ndbapi/testDataBuffers.cpp
index 2e29dbb0d7b..94658d5c6b9 100644
--- a/ndb/test/ndbapi/testDataBuffers.cpp
+++ b/ndb/test/ndbapi/testDataBuffers.cpp
@@ -545,6 +545,7 @@ testcase(int flag)
NDB_COMMAND(testDataBuffers, "testDataBuffers", "testDataBuffers", "testDataBuffers", 65535)
{
+ ndb_init();
while (++argv, --argc > 0) {
char const* p = argv[0];
if (*p++ != '-' || strlen(p) != 1)
diff --git a/ndb/test/ndbapi/testDeadlock.cpp b/ndb/test/ndbapi/testDeadlock.cpp
index f51b3cea1e5..a445823b8a8 100644
--- a/ndb/test/ndbapi/testDeadlock.cpp
+++ b/ndb/test/ndbapi/testDeadlock.cpp
@@ -491,6 +491,7 @@ wl1822_main(char scantx)
NDB_COMMAND(testOdbcDriver, "testDeadlock", "testDeadlock", "testDeadlock", 65535)
{
+ ndb_init();
while (++argv, --argc > 0) {
const char* arg = argv[0];
if (strcmp(arg, "-scan") == 0) {
diff --git a/ndb/test/ndbapi/testDict.cpp b/ndb/test/ndbapi/testDict.cpp
index a0c7bb1414b..7cba5ce4cc8 100644
--- a/ndb/test/ndbapi/testDict.cpp
+++ b/ndb/test/ndbapi/testDict.cpp
@@ -1128,9 +1128,9 @@ runCreateAutoincrementTable(NDBT_Context* ctx, NDBT_Step* step){
myTable.setName(tabname);
myColumn.setName("ATTR1");
- myColumn.setPrimaryKey(true);
myColumn.setType(NdbDictionary::Column::Unsigned);
myColumn.setLength(1);
+ myColumn.setPrimaryKey(true);
myColumn.setNullable(false);
myColumn.setAutoIncrement(true);
if (startvalue != ~0) // check that default value starts with 1
@@ -1576,6 +1576,7 @@ TESTCASE("DictionaryPerf",
NDBT_TESTSUITE_END(testDict);
int main(int argc, const char** argv){
+ ndb_init();
// Tables should not be auto created
testDict.setCreateTable(false);
myRandom48Init(NdbTick_CurrentMillisecond());
diff --git a/ndb/test/ndbapi/testGrep.cpp b/ndb/test/ndbapi/testGrep.cpp
index 0bf84cb4ec8..713aefbeafa 100644
--- a/ndb/test/ndbapi/testGrep.cpp
+++ b/ndb/test/ndbapi/testGrep.cpp
@@ -533,6 +533,7 @@ TESTCASE("FailSlave",
NDBT_TESTSUITE_END(testGrep);
int main(int argc, const char** argv){
+ ndb_init();
return testGrep.execute(argc, argv);
}
diff --git a/ndb/test/ndbapi/testGrepVerify.cpp b/ndb/test/ndbapi/testGrepVerify.cpp
index 05445c1ba1b..52dcda9a162 100644
--- a/ndb/test/ndbapi/testGrepVerify.cpp
+++ b/ndb/test/ndbapi/testGrepVerify.cpp
@@ -40,6 +40,7 @@
continue; }
int main(int argc, const char** argv){
+ ndb_init();
const char * connectString = NULL;
diff --git a/ndb/test/ndbapi/testIndex.cpp b/ndb/test/ndbapi/testIndex.cpp
index 6ebbfd8b680..bef3b310c96 100644
--- a/ndb/test/ndbapi/testIndex.cpp
+++ b/ndb/test/ndbapi/testIndex.cpp
@@ -1528,6 +1528,7 @@ TESTCASE("UniqueNull",
NDBT_TESTSUITE_END(testIndex);
int main(int argc, const char** argv){
+ ndb_init();
return testIndex.execute(argc, argv);
}
diff --git a/ndb/test/ndbapi/testInterpreter.cpp b/ndb/test/ndbapi/testInterpreter.cpp
index 9c584d6f581..0baba33d2b2 100644
--- a/ndb/test/ndbapi/testInterpreter.cpp
+++ b/ndb/test/ndbapi/testInterpreter.cpp
@@ -224,6 +224,7 @@ TESTCASE("NdbErrorOperation",
NDBT_TESTSUITE_END(testInterpreter);
int main(int argc, const char** argv){
+ ndb_init();
// TABLE("T1");
return testInterpreter.execute(argc, argv);
}
diff --git a/ndb/test/ndbapi/testMgm.cpp b/ndb/test/ndbapi/testMgm.cpp
index d5b9372cc9b..ef653d3f972 100644
--- a/ndb/test/ndbapi/testMgm.cpp
+++ b/ndb/test/ndbapi/testMgm.cpp
@@ -178,6 +178,7 @@ TESTCASE("SingleUserMode",
NDBT_TESTSUITE_END(testMgm);
int main(int argc, const char** argv){
+ ndb_init();
myRandom48Init(NdbTick_CurrentMillisecond());
return testMgm.execute(argc, argv);
}
diff --git a/ndb/test/ndbapi/testNdbApi.cpp b/ndb/test/ndbapi/testNdbApi.cpp
index 5b171d42578..47987629fe3 100644
--- a/ndb/test/ndbapi/testNdbApi.cpp
+++ b/ndb/test/ndbapi/testNdbApi.cpp
@@ -1006,6 +1006,7 @@ TESTCASE("NdbErrorOperation",
NDBT_TESTSUITE_END(testNdbApi);
int main(int argc, const char** argv){
+ ndb_init();
// TABLE("T1");
return testNdbApi.execute(argc, argv);
}
diff --git a/ndb/test/ndbapi/testNodeRestart.cpp b/ndb/test/ndbapi/testNodeRestart.cpp
index 89b38c78e71..6bfe59f8d3f 100644
--- a/ndb/test/ndbapi/testNodeRestart.cpp
+++ b/ndb/test/ndbapi/testNodeRestart.cpp
@@ -434,6 +434,7 @@ TESTCASE("StopOnError",
NDBT_TESTSUITE_END(testNodeRestart);
int main(int argc, const char** argv){
+ ndb_init();
#if 0
// It might be interesting to have longer defaults for num
// loops in this test
diff --git a/ndb/test/ndbapi/testOIBasic.cpp b/ndb/test/ndbapi/testOIBasic.cpp
index 29d03f0c33e..f9eb3514926 100644
--- a/ndb/test/ndbapi/testOIBasic.cpp
+++ b/ndb/test/ndbapi/testOIBasic.cpp
@@ -28,6 +28,7 @@
#include <NdbCondition.h>
#include <NdbThread.h>
#include <NdbTick.h>
+#include <my_sys.h>
// options
@@ -37,6 +38,8 @@ struct Opt {
const char* m_bound;
const char* m_case;
bool m_core;
+ const char* m_csname;
+ CHARSET_INFO* m_cs;
bool m_dups;
NdbDictionary::Object::FragmentType m_fragtype;
unsigned m_idxloop;
@@ -59,6 +62,8 @@ struct Opt {
m_bound("01234"),
m_case(0),
m_core(false),
+ m_csname("latin1_bin"),
+ m_cs(0),
m_dups(false),
m_fragtype(NdbDictionary::Object::FragUndefined),
m_idxloop(4),
@@ -94,6 +99,7 @@ printhelp()
<< " -bound xyz use only these bound types 0-4 [" << d.m_bound << "]" << endl
<< " -case abc only given test cases (letters a-z)" << endl
<< " -core core dump on error [" << d.m_core << "]" << endl
+ << " -csname S charset (collation) of non-pk char column [" << d.m_csname << "]" << endl
<< " -dups allow duplicate tuples from index scan [" << d.m_dups << "]" << endl
<< " -fragtype T fragment type single/small/medium/large" << endl
<< " -index xyz only given index numbers (digits 1-9)" << endl
@@ -979,10 +985,14 @@ createtable(Par par)
for (unsigned k = 0; k < tab.m_cols; k++) {
const Col& col = tab.m_col[k];
NdbDictionary::Column c(col.m_name);
- c.setPrimaryKey(col.m_pk);
c.setType(col.m_type);
c.setLength(col.m_length);
+ c.setPrimaryKey(col.m_pk);
c.setNullable(col.m_nullable);
+ if (c.getCharset()) { // test if char type
+ if (! col.m_pk)
+ c.setCharset(par.m_cs);
+ }
t.addColumn(c);
}
con.m_dic = con.m_ndb->getDictionary();
@@ -2236,9 +2246,8 @@ pkreadfast(Par par, unsigned count)
keyrow.calc(par, i);
CHK(keyrow.selrow(par) == 0);
NdbRecAttr* rec;
- CHK(con.getValue((Uint32)0, rec) == 0);
- CHK(con.executeScan() == 0);
// get 1st column
+ CHK(con.getValue((Uint32)0, rec) == 0);
CHK(con.execute(Commit) == 0);
con.closeTransaction();
}
@@ -3150,6 +3159,10 @@ runtest(Par par)
LL1("start");
if (par.m_seed != 0)
srandom(par.m_seed);
+ assert(par.m_csname != 0);
+ CHARSET_INFO* cs;
+ CHK((cs = get_charset_by_name(par.m_csname, MYF(0))) != 0 || (cs = get_charset_by_csname(par.m_csname, MY_CS_PRIMARY, MYF(0))) != 0);
+ par.m_cs = cs;
Con con;
CHK(con.connect() == 0);
par.m_con = &con;
@@ -3201,6 +3214,7 @@ runtest(Par par)
NDB_COMMAND(testOIBasic, "testOIBasic", "testOIBasic", "testOIBasic", 65535)
{
+ ndb_init();
while (++argv, --argc > 0) {
const char* arg = argv[0];
if (*arg != '-') {
@@ -3232,6 +3246,12 @@ NDB_COMMAND(testOIBasic, "testOIBasic", "testOIBasic", "testOIBasic", 65535)
g_opt.m_core = true;
continue;
}
+ if (strcmp(arg, "-csname") == 0) {
+ if (++argv, --argc > 0) {
+ g_opt.m_csname = strdup(argv[0]);
+ continue;
+ }
+ }
if (strcmp(arg, "-dups") == 0) {
g_opt.m_dups = true;
continue;
diff --git a/ndb/test/ndbapi/testOperations.cpp b/ndb/test/ndbapi/testOperations.cpp
index bb58e69e898..ba41e1d1c40 100644
--- a/ndb/test/ndbapi/testOperations.cpp
+++ b/ndb/test/ndbapi/testOperations.cpp
@@ -230,6 +230,7 @@ runClearTable(NDBT_Context* ctx, NDBT_Step* step){
int
main(int argc, const char** argv){
+ ndb_init();
NDBT_TestSuite ts("testOperations");
for(Uint32 i = 0; i<sizeof(matrix)/sizeof(matrix[0]); i++){
diff --git a/ndb/test/ndbapi/testOrderedIndex.cpp b/ndb/test/ndbapi/testOrderedIndex.cpp
index 51cc53c9975..b3a75410646 100644
--- a/ndb/test/ndbapi/testOrderedIndex.cpp
+++ b/ndb/test/ndbapi/testOrderedIndex.cpp
@@ -218,6 +218,7 @@ NDBT_TESTSUITE_END(testOrderedIndex);
int
main(int argc, const char** argv)
{
+ ndb_init();
return testOrderedIndex.execute(argc, argv);
}
diff --git a/ndb/test/ndbapi/testReadPerf.cpp b/ndb/test/ndbapi/testReadPerf.cpp
index 412661c4ff8..7cf3755d66f 100644
--- a/ndb/test/ndbapi/testReadPerf.cpp
+++ b/ndb/test/ndbapi/testReadPerf.cpp
@@ -91,6 +91,7 @@ void print_result();
int
main(int argc, const char** argv){
+ ndb_init();
int verbose = 1;
int optind = 0;
diff --git a/ndb/test/ndbapi/testRestartGci.cpp b/ndb/test/ndbapi/testRestartGci.cpp
index 54d38654ff2..4cdfca29e6f 100644
--- a/ndb/test/ndbapi/testRestartGci.cpp
+++ b/ndb/test/ndbapi/testRestartGci.cpp
@@ -214,6 +214,7 @@ TESTCASE("InsertRestartGci",
NDBT_TESTSUITE_END(testRestartGci);
int main(int argc, const char** argv){
+ ndb_init();
return testRestartGci.execute(argc, argv);
}
diff --git a/ndb/test/ndbapi/testScan.cpp b/ndb/test/ndbapi/testScan.cpp
index 3d8b37df0ca..0cd30dfefde 100644
--- a/ndb/test/ndbapi/testScan.cpp
+++ b/ndb/test/ndbapi/testScan.cpp
@@ -1400,6 +1400,7 @@ TESTCASE("ScanRestart",
NDBT_TESTSUITE_END(testScan);
int main(int argc, const char** argv){
+ ndb_init();
myRandom48Init(NdbTick_CurrentMillisecond());
return testScan.execute(argc, argv);
}
diff --git a/ndb/test/ndbapi/testScanInterpreter.cpp b/ndb/test/ndbapi/testScanInterpreter.cpp
index 3b5baf954e0..3a5ef22b613 100644
--- a/ndb/test/ndbapi/testScanInterpreter.cpp
+++ b/ndb/test/ndbapi/testScanInterpreter.cpp
@@ -273,6 +273,7 @@ TESTCASE("ScanLessThanLoop",
NDBT_TESTSUITE_END(testScanInterpreter);
int main(int argc, const char** argv){
+ ndb_init();
return testScanInterpreter.execute(argc, argv);
}
diff --git a/ndb/test/ndbapi/testScanPerf.cpp b/ndb/test/ndbapi/testScanPerf.cpp
index 829ba5a0f15..8c1a41047ca 100644
--- a/ndb/test/ndbapi/testScanPerf.cpp
+++ b/ndb/test/ndbapi/testScanPerf.cpp
@@ -72,6 +72,7 @@ int drop_table();
int
main(int argc, const char** argv){
+ ndb_init();
int verbose = 1;
int optind = 0;
diff --git a/ndb/test/ndbapi/testSystemRestart.cpp b/ndb/test/ndbapi/testSystemRestart.cpp
index 61e086ff941..f8f2b84acc4 100644
--- a/ndb/test/ndbapi/testSystemRestart.cpp
+++ b/ndb/test/ndbapi/testSystemRestart.cpp
@@ -872,7 +872,7 @@ int runSystemRestart7(NDBT_Context* ctx, NDBT_Step* step){
const Uint32 nodeCount = restarter.getNumDbNodes();
if(nodeCount < 2){
- g_info << "SR8 - Needs atleast 2 nodes to test" << endl;
+ g_info << "SR7 - Needs atleast 2 nodes to test" << endl;
return NDBT_OK;
}
@@ -1001,7 +1001,52 @@ int runSystemRestart8(NDBT_Context* ctx, NDBT_Step* step){
i++;
}
- g_info << "runSystemRestart7 finished" << endl;
+ g_info << "runSystemRestart8 finished" << endl;
+
+ return result;
+}
+
+int runSystemRestart9(NDBT_Context* ctx, NDBT_Step* step){
+ Ndb* pNdb = GETNDB(step);
+ int result = NDBT_OK;
+ int timeout = 300;
+ Uint32 loops = ctx->getNumLoops();
+ int records = ctx->getNumRecords();
+ NdbRestarter restarter;
+ Uint32 i = 1;
+
+ Uint32 currentRestartNodeIndex = 1;
+ UtilTransactions utilTrans(*ctx->getTab());
+ HugoTransactions hugoTrans(*ctx->getTab());
+
+ int args[] = { DumpStateOrd::DihMaxTimeBetweenLCP };
+ int dump[] = { DumpStateOrd::DihStartLcpImmediately };
+
+ do {
+ CHECK(restarter.dumpStateAllNodes(args, 1) == 0);
+
+ HugoOperations ops(* ctx->getTab());
+ CHECK(ops.startTransaction(pNdb) == 0);
+ for(i = 0; i<10; i++){
+ CHECK(ops.pkInsertRecord(pNdb, i, 1, 1) == 0);
+ CHECK(ops.execute_NoCommit(pNdb) == 0);
+ }
+ for(i = 0; i<10; i++){
+ CHECK(ops.pkUpdateRecord(pNdb, i, 1) == 0);
+ CHECK(ops.execute_NoCommit(pNdb) == 0);
+ }
+ NdbSleep_SecSleep(10);
+ CHECK(restarter.dumpStateAllNodes(dump, 1) == 0);
+ NdbSleep_SecSleep(10);
+ CHECK(ops.execute_Commit(pNdb) == 0);
+
+ CHECK(restarter.restartAll() == 0);
+ CHECK(restarter.waitClusterStarted(timeout) == 0);
+ CHECK(pNdb->waitUntilReady(timeout) == 0);
+ ops.closeTransaction(pNdb);
+ } while(0);
+
+ g_info << "runSystemRestart9 finished" << endl;
return result;
}
@@ -1176,9 +1221,23 @@ TESTCASE("SR8",
STEP(runSystemRestart8);
FINALIZER(runClearTable);
}
+TESTCASE("SR9",
+ "Perform partition win system restart with other nodes delayed\n"
+ "* 1. Start transaction\n"
+ "* 2. insert (1,1)\n"
+ "* 3. update (1,2)\n"
+ "* 4. start lcp\n"
+ "* 5. commit\n"
+ "* 6. restart\n"){
+ INITIALIZER(runWaitStarted);
+ INITIALIZER(runClearTable);
+ STEP(runSystemRestart9);
+ FINALIZER(runClearTable);
+}
NDBT_TESTSUITE_END(testSystemRestart);
int main(int argc, const char** argv){
+ ndb_init();
return testSystemRestart.execute(argc, argv);
}
diff --git a/ndb/test/ndbapi/testTimeout.cpp b/ndb/test/ndbapi/testTimeout.cpp
index 62e69125073..5cabb86541d 100644
--- a/ndb/test/ndbapi/testTimeout.cpp
+++ b/ndb/test/ndbapi/testTimeout.cpp
@@ -406,6 +406,7 @@ TESTCASE("BuddyTransNoTimeout5",
NDBT_TESTSUITE_END(testTimeout);
int main(int argc, const char** argv){
+ ndb_init();
myRandom48Init(NdbTick_CurrentMillisecond());
return testTimeout.execute(argc, argv);
}
diff --git a/ndb/test/ndbapi/testTransactions.cpp b/ndb/test/ndbapi/testTransactions.cpp
index 67a2df24390..2dca9e24fb4 100644
--- a/ndb/test/ndbapi/testTransactions.cpp
+++ b/ndb/test/ndbapi/testTransactions.cpp
@@ -364,6 +364,7 @@ runClearTable(NDBT_Context* ctx, NDBT_Step* step){
int
main(int argc, const char** argv){
+ ndb_init();
NDBT_TestSuite ts("testOperations");
for(Uint32 i = 0; i<sizeof(matrix)/sizeof(matrix[0]); i++){
diff --git a/ndb/test/ndbapi/test_event.cpp b/ndb/test/ndbapi/test_event.cpp
index 40fc1c6defa..cb2793e42b9 100644
--- a/ndb/test/ndbapi/test_event.cpp
+++ b/ndb/test/ndbapi/test_event.cpp
@@ -137,6 +137,7 @@ NDBT_TESTSUITE_END(test_event);
#endif
int main(int argc, const char** argv){
+ ndb_init();
return test_event.execute(argc, argv);
}
diff --git a/ndb/test/run-test/Makefile.am b/ndb/test/run-test/Makefile.am
index 03b53509f05..3bf2edde47a 100644
--- a/ndb/test/run-test/Makefile.am
+++ b/ndb/test/run-test/Makefile.am
@@ -16,7 +16,7 @@ LDADD_LOC = $(top_builddir)/ndb/src/mgmclient/CpcClient.o \
$(top_builddir)/ndb/src/libndbclient.la \
$(top_builddir)/dbug/libdbug.a \
$(top_builddir)/mysys/libmysys.a \
- $(top_builddir)/strings/libmystrings.a
+ $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
wrappersdir=$(prefix)/bin
wrappers_SCRIPTS=atrt-testBackup atrt-mysql-test-run
diff --git a/ndb/test/run-test/atrt-mysql-test-run b/ndb/test/run-test/atrt-mysql-test-run
index 3a044e43288..7657140d0fa 100755
--- a/ndb/test/run-test/atrt-mysql-test-run
+++ b/ndb/test/run-test/atrt-mysql-test-run
@@ -1,5 +1,18 @@
#!/bin/sh
-set -e -x
+set -x
+p=`pwd`
cd $MYSQL_BASE_DIR/mysql-test
-./mysql-test-run --with-ndbcluster --ndbconnectstring=$NDB_CONNECTSTRING $*
+./mysql-test-run --with-ndbcluster --ndbconnectstring=$NDB_CONNECTSTRING $* | tee $p/output.txt
+
+f=`grep -c fail $p/output.txt`
+o=`grep -c pass $p/output.txt`
+
+if [ $o -gt 0 -a $f -eq 0 ]
+then
+ echo "NDBT_ProgramExit: OK"
+ exit 0
+fi
+
+echo "NDBT_ProgramExit: Failed"
+exit 1
diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt
index 631378cb636..8d7e8a06c72 100644
--- a/ndb/test/run-test/daily-basic-tests.txt
+++ b/ndb/test/run-test/daily-basic-tests.txt
@@ -1006,3 +1006,18 @@ max-time: 1500
cmd: testSystemRestart
args: -n SR2 T7
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_UNDO T1
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_UNDO T6
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_UNDO T7
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_UNDO T8
diff --git a/ndb/test/run-test/daily-devel-tests.txt b/ndb/test/run-test/daily-devel-tests.txt
index 35e0d2e9a46..f2abc961807 100644
--- a/ndb/test/run-test/daily-devel-tests.txt
+++ b/ndb/test/run-test/daily-devel-tests.txt
@@ -26,10 +26,10 @@ max-time: 600
cmd: atrt-testBackup
args: -n BackupOne T1 T6 T3 I3
-#max-time: 600
-#cmd: testBackup
-#args: -n BackupBank T6
-#
+max-time: 1000
+cmd: atrt-testBackup
+args: -n BackupBank T6
+
#
# MGMAPI AND MGSRV
#
@@ -43,22 +43,6 @@ args: -n SingleUserMode T1
#
max-time: 1500
cmd: testSystemRestart
-args: -n SR_UNDO T1
-
-max-time: 1500
-cmd: testSystemRestart
-args: -n SR_UNDO T6
-
-max-time: 1500
-cmd: testSystemRestart
-args: -n SR_UNDO T7
-
-max-time: 1500
-cmd: testSystemRestart
-args: -n SR_UNDO T8
-
-max-time: 1500
-cmd: testSystemRestart
args: -n SR3 T6
max-time: 1500
@@ -206,3 +190,19 @@ max-time: 2500
cmd: test_event
args: -n BasicEventOperation T1 T6
+#
+max-time: 1500
+cmd: testSystemRestart
+args: -l 1 -n SR6 T1
+
+max-time: 1500
+cmd: testSystemRestart
+args: -l 1 -n SR7 T1
+
+max-time: 1500
+cmd: testSystemRestart
+args: -l 1 -n SR8 T1
+
+max-time: 1500
+cmd: testSystemRestart
+args: -l 1 -n SR9 T1
diff --git a/ndb/test/run-test/main.cpp b/ndb/test/run-test/main.cpp
index 653e16e0947..22799a9a1b2 100644
--- a/ndb/test/run-test/main.cpp
+++ b/ndb/test/run-test/main.cpp
@@ -79,6 +79,7 @@ const int arg_count = 10;
int
main(int argc, const char ** argv){
+ ndb_init();
bool restart = true;
int lineno = 1;
diff --git a/ndb/test/src/HugoTransactions.cpp b/ndb/test/src/HugoTransactions.cpp
index 05039562c76..994a45de3dc 100644
--- a/ndb/test/src/HugoTransactions.cpp
+++ b/ndb/test/src/HugoTransactions.cpp
@@ -728,10 +728,10 @@ HugoTransactions::loadTable(Ndb* pNdb,
if (doSleep > 0)
NdbSleep_MilliSleep(doSleep);
- if (first_batch || !oneTrans) {
+ // if (first_batch || !oneTrans) {
+ if (first_batch || !pTrans) {
first_batch = false;
pTrans = pNdb->startTransaction();
-
if (pTrans == NULL) {
const NdbError err = pNdb->getNdbError();
@@ -774,8 +774,10 @@ HugoTransactions::loadTable(Ndb* pNdb,
// Execute the transaction and insert the record
if (!oneTrans || (c + batch) >= records) {
- closeTrans = true;
+ // closeTrans = true;
+ closeTrans = false;
check = pTrans->execute( Commit );
+ pTrans->restart();
} else {
closeTrans = false;
check = pTrans->execute( NoCommit );
@@ -783,7 +785,7 @@ HugoTransactions::loadTable(Ndb* pNdb,
if(check == -1 ) {
const NdbError err = pTrans->getNdbError();
pNdb->closeTransaction(pTrans);
-
+ pTrans= 0;
switch(err.status){
case NdbError::Success:
ERR(err);
@@ -825,6 +827,7 @@ HugoTransactions::loadTable(Ndb* pNdb,
else{
if (closeTrans) {
pNdb->closeTransaction(pTrans);
+ pTrans= 0;
}
}
diff --git a/ndb/test/src/NDBT_Test.cpp b/ndb/test/src/NDBT_Test.cpp
index ba316bac01b..4ff94bcf296 100644
--- a/ndb/test/src/NDBT_Test.cpp
+++ b/ndb/test/src/NDBT_Test.cpp
@@ -15,7 +15,6 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <ndb_global.h>
-#include <my_sys.h>
#include "NDBT.hpp"
#include "NDBT_Test.hpp"
@@ -990,7 +989,6 @@ int NDBT_TestSuite::execute(int argc, const char** argv){
}
#ifndef DBUG_OFF
- my_init();
if (debug_option)
DBUG_PUSH(debug_option);
#endif
diff --git a/ndb/test/src/NdbBackup.cpp b/ndb/test/src/NdbBackup.cpp
index f33c5d8c313..ad26dbeab16 100644
--- a/ndb/test/src/NdbBackup.cpp
+++ b/ndb/test/src/NdbBackup.cpp
@@ -69,7 +69,11 @@ NdbBackup::getBackupDataDirForNode(int _node_id){
/**
* Fetch configuration from management server
*/
- ConfigRetriever cr(0, NODE_TYPE_API);
+ LocalConfig lc;
+ if (!lc.init(0,0)) {
+ abort();
+ }
+ ConfigRetriever cr(lc, 0, NODE_TYPE_API);
ndb_mgm_configuration * p = 0;
BaseString tmp; tmp.assfmt("%s:%d", host.c_str(), port);
@@ -140,14 +144,16 @@ NdbBackup::execRestore(bool _restore_data,
*/
snprintf(buf, buf_len,
- "scp %s:%s/BACKUP/BACKUP-%d/* .",
+ "scp %s:%s/BACKUP/BACKUP-%d/BACKUP-%d*.%d.* .",
host, path,
- _backup_id);
+ _backup_id,
+ _backup_id,
+ _node_id);
ndbout << "buf: "<< buf <<endl;
int res = system(buf);
- ndbout << "res: " << res << endl;
+ ndbout << "scp res: " << res << endl;
snprintf(buf, 255, "%sndb_restore -c \"host=%s\" -n %d -b %d %s %s .",
#if 1
@@ -162,9 +168,9 @@ NdbBackup::execRestore(bool _restore_data,
_restore_meta?"-m":"");
ndbout << "buf: "<< buf <<endl;
- res = system(buf);
+ res = system(buf);
- ndbout << "res: " << res << endl;
+ ndbout << "ndb_restore res: " << res << endl;
return res;
@@ -180,20 +186,13 @@ NdbBackup::restore(unsigned _backup_id){
return -1;
int res;
- if ( ndbNodes.size() == 1) {
- // restore metadata and data in one call
- res = execRestore(true, true, ndbNodes[0].node_id, _backup_id);
- } else {
- assert(ndbNodes.size() > 1);
-
- // restore metadata first
- res = execRestore(false, true, ndbNodes[0].node_id, _backup_id);
-
- // Restore data once for each node
- for(size_t i = 0; i < ndbNodes.size(); i++){
- res = execRestore(true, false, ndbNodes[i].node_id, _backup_id);
- }
+ // restore metadata first and data for first node
+ res = execRestore(true, true, ndbNodes[0].node_id, _backup_id);
+
+ // Restore data once for each node
+ for(size_t i = 1; i < ndbNodes.size(); i++){
+ res = execRestore(true, false, ndbNodes[i].node_id, _backup_id);
}
return 0;
diff --git a/ndb/test/tools/copy_tab.cpp b/ndb/test/tools/copy_tab.cpp
index 33ce8e01d9a..30141acaa78 100644
--- a/ndb/test/tools/copy_tab.cpp
+++ b/ndb/test/tools/copy_tab.cpp
@@ -24,6 +24,7 @@
#include <getarg.h>
int main(int argc, const char** argv){
+ ndb_init();
const char* _tabname = NULL;
const char* _to_tabname = NULL;
diff --git a/ndb/test/tools/cpcc.cpp b/ndb/test/tools/cpcc.cpp
index e30d458ffee..dd59e577f2c 100644
--- a/ndb/test/tools/cpcc.cpp
+++ b/ndb/test/tools/cpcc.cpp
@@ -173,6 +173,7 @@ add_hosts(Vector<SimpleCpcClient*> & hosts, BaseString list){
int
main(int argc, const char** argv){
+ ndb_init();
int help = 0;
const char *cmd=0, *name=0, *group=0, *owner=0;
int list = 0, start = 0, stop = 0, rm = 0;
diff --git a/ndb/test/tools/create_index.cpp b/ndb/test/tools/create_index.cpp
index f883755ea24..75a657522f6 100644
--- a/ndb/test/tools/create_index.cpp
+++ b/ndb/test/tools/create_index.cpp
@@ -26,6 +26,7 @@
int
main(int argc, const char** argv){
+ ndb_init();
const char* _dbname = "TEST_DB";
int _help = 0;
diff --git a/ndb/test/tools/hugoCalculator.cpp b/ndb/test/tools/hugoCalculator.cpp
index 7f2751be2ba..82c4bbff1a4 100644
--- a/ndb/test/tools/hugoCalculator.cpp
+++ b/ndb/test/tools/hugoCalculator.cpp
@@ -28,6 +28,7 @@
int main(int argc, const char** argv)
{
+ ndb_init();
int _row = 0;
int _column = 0;
int _updates = 0;
diff --git a/ndb/test/tools/hugoFill.cpp b/ndb/test/tools/hugoFill.cpp
index dee6ce2e6c8..6253bd1bb12 100644
--- a/ndb/test/tools/hugoFill.cpp
+++ b/ndb/test/tools/hugoFill.cpp
@@ -25,6 +25,7 @@
int main(int argc, const char** argv){
+ ndb_init();
int _records = 0;
const char* _tabname = NULL;
diff --git a/ndb/test/tools/hugoLoad.cpp b/ndb/test/tools/hugoLoad.cpp
index be7f878d106..c697ad22aad 100644
--- a/ndb/test/tools/hugoLoad.cpp
+++ b/ndb/test/tools/hugoLoad.cpp
@@ -24,6 +24,7 @@
int main(int argc, const char** argv){
+ ndb_init();
int _records = 0;
const char* _tabname = NULL;
diff --git a/ndb/test/tools/hugoLockRecords.cpp b/ndb/test/tools/hugoLockRecords.cpp
index e2c2cd13f00..629408d401d 100644
--- a/ndb/test/tools/hugoLockRecords.cpp
+++ b/ndb/test/tools/hugoLockRecords.cpp
@@ -27,6 +27,7 @@
#include <HugoTransactions.hpp>
int main(int argc, const char** argv){
+ ndb_init();
int _records = 0;
int _loops = 1;
diff --git a/ndb/test/tools/hugoPkDelete.cpp b/ndb/test/tools/hugoPkDelete.cpp
index 1855f19796f..78a90ebcb46 100644
--- a/ndb/test/tools/hugoPkDelete.cpp
+++ b/ndb/test/tools/hugoPkDelete.cpp
@@ -27,6 +27,7 @@
#include <HugoTransactions.hpp>
int main(int argc, const char** argv){
+ ndb_init();
int _records = 0;
int _loops = 1;
diff --git a/ndb/test/tools/hugoPkRead.cpp b/ndb/test/tools/hugoPkRead.cpp
index 50351f08195..cf08b137e8e 100644
--- a/ndb/test/tools/hugoPkRead.cpp
+++ b/ndb/test/tools/hugoPkRead.cpp
@@ -28,6 +28,7 @@
int main(int argc, const char** argv){
+ ndb_init();
int _records = 0;
int _loops = 1;
diff --git a/ndb/test/tools/hugoPkReadRecord.cpp b/ndb/test/tools/hugoPkReadRecord.cpp
index 85f20bd2060..38b7cae2bf4 100644
--- a/ndb/test/tools/hugoPkReadRecord.cpp
+++ b/ndb/test/tools/hugoPkReadRecord.cpp
@@ -28,6 +28,7 @@
int main(int argc, const char** argv)
{
+ ndb_init();
int _row = 0;
int _hex = 0;
int _primaryKey = 0;
diff --git a/ndb/test/tools/hugoPkUpdate.cpp b/ndb/test/tools/hugoPkUpdate.cpp
index e7edc3a991d..286be14a01c 100644
--- a/ndb/test/tools/hugoPkUpdate.cpp
+++ b/ndb/test/tools/hugoPkUpdate.cpp
@@ -27,6 +27,7 @@
#include <HugoTransactions.hpp>
int main(int argc, const char** argv){
+ ndb_init();
int _records = 0;
int _loops = 1;
diff --git a/ndb/test/tools/hugoScanRead.cpp b/ndb/test/tools/hugoScanRead.cpp
index 47ea8f4a8a7..cdfdcea4654 100644
--- a/ndb/test/tools/hugoScanRead.cpp
+++ b/ndb/test/tools/hugoScanRead.cpp
@@ -27,6 +27,7 @@
#include <HugoTransactions.hpp>
int main(int argc, const char** argv){
+ ndb_init();
int _records = 0;
int _loops = 1;
diff --git a/ndb/test/tools/hugoScanUpdate.cpp b/ndb/test/tools/hugoScanUpdate.cpp
index 3e2255ca0f3..96a487a02bf 100644
--- a/ndb/test/tools/hugoScanUpdate.cpp
+++ b/ndb/test/tools/hugoScanUpdate.cpp
@@ -27,6 +27,7 @@
#include <HugoTransactions.hpp>
int main(int argc, const char** argv){
+ ndb_init();
int _records = 0;
int _loops = 1;
diff --git a/ndb/test/tools/restart.cpp b/ndb/test/tools/restart.cpp
index 88cfb231a72..9ad20801fd7 100644
--- a/ndb/test/tools/restart.cpp
+++ b/ndb/test/tools/restart.cpp
@@ -27,6 +27,7 @@
#include <NDBT.hpp>
int main(int argc, const char** argv){
+ ndb_init();
const char* _hostName = NULL;
int _initial = 0;
diff --git a/ndb/test/tools/transproxy.cpp b/ndb/test/tools/transproxy.cpp
index 384a8a34f03..90e216ec785 100644
--- a/ndb/test/tools/transproxy.cpp
+++ b/ndb/test/tools/transproxy.cpp
@@ -346,6 +346,7 @@ start()
int
main(int av, char** ac)
{
+ ndb_init();
debug("start");
hostname = "ndb-srv7";
if (Ndb_getInAddr(&hostaddr.sin_addr, hostname) != 0) {
diff --git a/ndb/test/tools/verify_index.cpp b/ndb/test/tools/verify_index.cpp
index 1295b657e9b..6c8e304e1a1 100644
--- a/ndb/test/tools/verify_index.cpp
+++ b/ndb/test/tools/verify_index.cpp
@@ -27,6 +27,7 @@
int main(int argc, const char** argv){
+ ndb_init();
int _parallelism = 240;
const char* _tabname = NULL;
const char* _indexname = NULL;
diff --git a/ndb/tools/delete_all.cpp b/ndb/tools/delete_all.cpp
index 5110947c6a2..aa5798376ae 100644
--- a/ndb/tools/delete_all.cpp
+++ b/ndb/tools/delete_all.cpp
@@ -26,6 +26,7 @@
static int clear_table(Ndb* pNdb, const NdbDictionary::Table* pTab, int parallelism=240);
int main(int argc, const char** argv){
+ ndb_init();
const char* _tabname = NULL;
const char* _dbname = "TEST_DB";
diff --git a/ndb/tools/desc.cpp b/ndb/tools/desc.cpp
index 859a9544a79..0ab11a0fdd2 100644
--- a/ndb/tools/desc.cpp
+++ b/ndb/tools/desc.cpp
@@ -22,6 +22,7 @@
int main(int argc, const char** argv){
+ ndb_init();
const char* _tabname = NULL;
const char* _dbname = "TEST_DB";
int _unqualified = 0;
diff --git a/ndb/tools/drop_index.cpp b/ndb/tools/drop_index.cpp
index 327f15741c9..70c29461c23 100644
--- a/ndb/tools/drop_index.cpp
+++ b/ndb/tools/drop_index.cpp
@@ -23,6 +23,7 @@
#include <getarg.h>
int main(int argc, const char** argv){
+ ndb_init();
const char* _tabname = NULL;
const char* _dbname = "TEST_DB";
diff --git a/ndb/tools/drop_tab.cpp b/ndb/tools/drop_tab.cpp
index 70e5d85aabe..15c229cb0fb 100644
--- a/ndb/tools/drop_tab.cpp
+++ b/ndb/tools/drop_tab.cpp
@@ -23,6 +23,7 @@
#include <getarg.h>
int main(int argc, const char** argv){
+ ndb_init();
const char* _tabname = NULL;
const char* _dbname = "TEST_DB";
diff --git a/ndb/tools/listTables.cpp b/ndb/tools/listTables.cpp
index d6465f3214f..4b24929ee4b 100644
--- a/ndb/tools/listTables.cpp
+++ b/ndb/tools/listTables.cpp
@@ -22,7 +22,6 @@
*/
#include <ndb_global.h>
-#include <my_sys.h>
#include <getarg.h>
#include <NdbApi.hpp>
@@ -167,6 +166,7 @@ const char *debug_option= 0;
#endif
int main(int argc, const char** argv){
+ ndb_init();
int _loops = 1;
const char* _tabname = NULL;
const char* _dbname = "TEST_DB";
@@ -209,7 +209,6 @@ int main(int argc, const char** argv){
_tabname = argv[optind];
#ifndef DBUG_OFF
- my_init();
if (debug_option)
DBUG_PUSH(debug_option);
#endif
diff --git a/ndb/tools/ndbsql.cpp b/ndb/tools/ndbsql.cpp
index 6af5f47f6f4..1997e4abebd 100644
--- a/ndb/tools/ndbsql.cpp
+++ b/ndb/tools/ndbsql.cpp
@@ -671,6 +671,7 @@ void print_help_virtual() {
int main(int argc, const char** argv)
{
+ ndb_init();
const char* usage = "Usage: ndbsql [-h] [-d dsn] [-f file] [stmt]\n-h help\n-d <database name or connect string>\n-f <file name> batch mode\nstmt single SQL statement\n";
const char* dsn = "TEST_DB";
bool helpFlg = false, batchMode = false;
diff --git a/ndb/tools/select_all.cpp b/ndb/tools/select_all.cpp
index eb95947fc0f..8fb8437ba5f 100644
--- a/ndb/tools/select_all.cpp
+++ b/ndb/tools/select_all.cpp
@@ -16,7 +16,6 @@
#include <ndb_global.h>
-#include <my_sys.h>
#include <NdbOut.hpp>
@@ -42,6 +41,7 @@ int scanReadRecords(Ndb*,
bool orderby);
int main(int argc, const char** argv){
+ ndb_init();
int _parallelism = 240;
const char* _delimiter = "\t";
int _header = true;
@@ -89,7 +89,6 @@ int main(int argc, const char** argv){
_tabname = argv[optind];
#ifndef DBUG_OFF
- my_init();
if (debug_option)
DBUG_PUSH(debug_option);
#endif
diff --git a/ndb/tools/select_count.cpp b/ndb/tools/select_count.cpp
index bb7c9dea49b..6650421e637 100644
--- a/ndb/tools/select_count.cpp
+++ b/ndb/tools/select_count.cpp
@@ -33,6 +33,7 @@ select_count(Ndb* pNdb, const NdbDictionary::Table* pTab,
UtilTransactions::ScanLock lock);
int main(int argc, const char** argv){
+ ndb_init();
const char* _dbname = "TEST_DB";
int _parallelism = 240;
int _help = 0;
diff --git a/ndb/tools/waiter.cpp b/ndb/tools/waiter.cpp
index 63469c6d746..c27b46c9356 100644
--- a/ndb/tools/waiter.cpp
+++ b/ndb/tools/waiter.cpp
@@ -30,6 +30,7 @@ int
waitClusterStatus(const char* _addr, ndb_mgm_node_status _status, unsigned int _timeout);
int main(int argc, const char** argv){
+ ndb_init();
const char* _hostName = NULL;
int _no_contact = 0;
diff --git a/sql/Makefile.am b/sql/Makefile.am
index d951aae91e1..19bdf8055f3 100644
--- a/sql/Makefile.am
+++ b/sql/Makefile.am
@@ -37,7 +37,7 @@ LDADD = @isam_libs@ \
$(top_builddir)/mysys/libmysys.a \
$(top_builddir)/dbug/libdbug.a \
$(top_builddir)/regex/libregex.a \
- $(top_builddir)/strings/libmystrings.a @ZLIB_LIBS@
+ $(top_builddir)/strings/libmystrings.a @ZLIB_LIBS@ @NDB_SCI_LIBS@
mysqld_LDADD = @MYSQLD_EXTRA_LDFLAGS@ \
@bdb_libs@ @innodb_libs@ @pstack_libs@ \
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 4e474568671..07a1ade912a 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -140,6 +140,16 @@ static int ndb_to_mysql_error(const NdbError *err)
}
+
+inline
+int execute_no_commit(ha_ndbcluster *h, NdbConnection *trans)
+{
+ int m_batch_execute= 0;
+ if (false && m_batch_execute)
+ return 0;
+ return trans->execute(NoCommit);
+}
+
/*
Place holder for ha_ndbcluster thread specific data
*/
@@ -219,7 +229,8 @@ void ha_ndbcluster::no_uncommitted_rows_init(THD *thd)
void ha_ndbcluster::no_uncommitted_rows_update(int c)
{
DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_update");
- struct Ndb_table_local_info *info= (struct Ndb_table_local_info *)m_table_info;
+ struct Ndb_table_local_info *info=
+ (struct Ndb_table_local_info *)m_table_info;
info->no_uncommitted_rows_count+= c;
DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d",
((const NDBTAB *)m_table)->getTableId(),
@@ -600,58 +611,60 @@ int ha_ndbcluster::get_metadata(const char *path)
{
NDBDICT *dict= m_ndb->getDictionary();
const NDBTAB *tab;
- const void *data, *pack_data;
- const char **key_name;
- uint ndb_columns, mysql_columns, length, pack_length;
int error;
+ bool invalidating_ndb_table= false;
+
DBUG_ENTER("get_metadata");
DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, path));
- if (!(tab= dict->getTable(m_tabname)))
- ERR_RETURN(dict->getNdbError());
- DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion()));
-
- /*
- This is the place to check that the table we got from NDB
- is equal to the one on local disk
- */
- ndb_columns= (uint) tab->getNoOfColumns();
- mysql_columns= table->fields;
- if (table->primary_key == MAX_KEY)
- ndb_columns--;
- if (ndb_columns != mysql_columns)
- {
- DBUG_PRINT("error",
- ("Wrong number of columns, ndb: %d mysql: %d",
- ndb_columns, mysql_columns));
- DBUG_RETURN(HA_ERR_OLD_METADATA);
- }
-
- /*
- Compare FrmData in NDB with frm file from disk.
- */
- error= 0;
- if (readfrm(path, &data, &length) ||
- packfrm(data, length, &pack_data, &pack_length))
- {
- my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR));
- my_free((char*)pack_data, MYF(MY_ALLOW_ZERO_PTR));
- DBUG_RETURN(1);
- }
+ do {
+ const void *data, *pack_data;
+ uint length, pack_length;
+
+ if (!(tab= dict->getTable(m_tabname)))
+ ERR_RETURN(dict->getNdbError());
+ DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion()));
+ /*
+ Compare FrmData in NDB with frm file from disk.
+ */
+ error= 0;
+ if (readfrm(path, &data, &length) ||
+ packfrm(data, length, &pack_data, &pack_length))
+ {
+ my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR));
+ my_free((char*)pack_data, MYF(MY_ALLOW_ZERO_PTR));
+ DBUG_RETURN(1);
+ }
- if ((pack_length != tab->getFrmLength()) ||
- (memcmp(pack_data, tab->getFrmData(), pack_length)))
- {
- DBUG_PRINT("error",
- ("metadata, pack_length: %d getFrmLength: %d memcmp: %d",
- pack_length, tab->getFrmLength(),
- memcmp(pack_data, tab->getFrmData(), pack_length)));
- DBUG_DUMP("pack_data", (char*)pack_data, pack_length);
- DBUG_DUMP("frm", (char*)tab->getFrmData(), tab->getFrmLength());
- error= HA_ERR_OLD_METADATA;
- }
- my_free((char*)data, MYF(0));
- my_free((char*)pack_data, MYF(0));
+ if ((pack_length != tab->getFrmLength()) ||
+ (memcmp(pack_data, tab->getFrmData(), pack_length)))
+ {
+ if (!invalidating_ndb_table)
+ {
+ DBUG_PRINT("info", ("Invalidating table"));
+ dict->invalidateTable(m_tabname);
+ invalidating_ndb_table= true;
+ }
+ else
+ {
+ DBUG_PRINT("error",
+ ("metadata, pack_length: %d getFrmLength: %d memcmp: %d",
+ pack_length, tab->getFrmLength(),
+ memcmp(pack_data, tab->getFrmData(), pack_length)));
+ DBUG_DUMP("pack_data", (char*)pack_data, pack_length);
+ DBUG_DUMP("frm", (char*)tab->getFrmData(), tab->getFrmLength());
+ error= HA_ERR_OLD_METADATA;
+ invalidating_ndb_table= false;
+ }
+ }
+ else
+ {
+ invalidating_ndb_table= false;
+ }
+ my_free((char*)data, MYF(0));
+ my_free((char*)pack_data, MYF(0));
+ } while (invalidating_ndb_table);
+
if (error)
DBUG_RETURN(error);
@@ -1028,7 +1041,7 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data)
}
}
- if (trans->execute(NoCommit) != 0)
+ if (execute_no_commit(this,trans) != 0)
{
table->status= STATUS_NOT_FOUND;
DBUG_RETURN(ndb_err(trans));
@@ -1140,7 +1153,7 @@ inline int ha_ndbcluster::next_result(byte *buf)
*/
if (ops_pending && blobs_pending)
{
- if (trans->execute(NoCommit) != 0)
+ if (execute_no_commit(this,trans) != 0)
DBUG_RETURN(ndb_err(trans));
ops_pending= 0;
blobs_pending= false;
@@ -1168,7 +1181,7 @@ inline int ha_ndbcluster::next_result(byte *buf)
DBUG_PRINT("info", ("ops_pending: %d", ops_pending));
if (current_thd->transaction.on)
{
- if (ops_pending && (trans->execute(NoCommit) != 0))
+ if (ops_pending && (execute_no_commit(this,trans) != 0))
DBUG_RETURN(ndb_err(trans));
}
else
@@ -1508,7 +1521,7 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op)
ERR_RETURN(op->getNdbError());
}
- if (trans->execute(NoCommit) != 0)
+ if (execute_no_commit(this,trans) != 0)
DBUG_RETURN(ndb_err(trans));
DBUG_PRINT("exit", ("Scan started successfully"));
DBUG_RETURN(next_result(buf));
@@ -1601,7 +1614,7 @@ int ha_ndbcluster::write_row(byte *record)
bulk_insert_not_flushed= false;
if (thd->transaction.on)
{
- if (trans->execute(NoCommit) != 0)
+ if (execute_no_commit(this,trans) != 0)
{
skip_auto_increment= true;
no_uncommitted_rows_execute_failure();
@@ -1776,7 +1789,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data)
}
// Execute update operation
- if (!cursor && trans->execute(NoCommit) != 0) {
+ if (!cursor && execute_no_commit(this,trans) != 0) {
no_uncommitted_rows_execute_failure();
DBUG_RETURN(ndb_err(trans));
}
@@ -1846,7 +1859,7 @@ int ha_ndbcluster::delete_row(const byte *record)
}
// Execute delete operation
- if (trans->execute(NoCommit) != 0) {
+ if (execute_no_commit(this,trans) != 0) {
no_uncommitted_rows_execute_failure();
DBUG_RETURN(ndb_err(trans));
}
@@ -2276,7 +2289,7 @@ int ha_ndbcluster::close_scan()
deleteing/updating transaction before closing the scan
*/
DBUG_PRINT("info", ("ops_pending: %d", ops_pending));
- if (trans->execute(NoCommit) != 0) {
+ if (execute_no_commit(this,trans) != 0) {
no_uncommitted_rows_execute_failure();
DBUG_RETURN(ndb_err(trans));
}
@@ -2599,7 +2612,7 @@ int ha_ndbcluster::end_bulk_insert()
"rows_inserted:%d, bulk_insert_rows: %d",
rows_inserted, bulk_insert_rows));
bulk_insert_not_flushed= false;
- if (trans->execute(NoCommit) != 0) {
+ if (execute_no_commit(this,trans) != 0) {
no_uncommitted_rows_execute_failure();
my_errno= error= ndb_err(trans);
}
@@ -2953,6 +2966,8 @@ static int create_ndb_column(NDBCOL &col,
{
// Set name
col.setName(field->field_name);
+ // Get char set
+ CHARSET_INFO *cs= field->charset();
// Set type and sizes
const enum enum_field_types mysql_type= field->real_type();
switch (mysql_type) {
@@ -3024,15 +3039,19 @@ static int create_ndb_column(NDBCOL &col,
case MYSQL_TYPE_STRING:
if (field->flags & BINARY_FLAG)
col.setType(NDBCOL::Binary);
- else
+ else {
col.setType(NDBCOL::Char);
+ col.setCharset(cs);
+ }
col.setLength(field->pack_length());
break;
case MYSQL_TYPE_VAR_STRING:
if (field->flags & BINARY_FLAG)
col.setType(NDBCOL::Varbinary);
- else
+ else {
col.setType(NDBCOL::Varchar);
+ col.setCharset(cs);
+ }
col.setLength(field->pack_length());
break;
// Blob types (all come in as MYSQL_TYPE_BLOB)
@@ -3040,8 +3059,10 @@ static int create_ndb_column(NDBCOL &col,
case MYSQL_TYPE_TINY_BLOB:
if (field->flags & BINARY_FLAG)
col.setType(NDBCOL::Blob);
- else
+ else {
col.setType(NDBCOL::Text);
+ col.setCharset(cs);
+ }
col.setInlineSize(256);
// No parts
col.setPartSize(0);
@@ -3051,8 +3072,10 @@ static int create_ndb_column(NDBCOL &col,
case MYSQL_TYPE_BLOB:
if (field->flags & BINARY_FLAG)
col.setType(NDBCOL::Blob);
- else
+ else {
col.setType(NDBCOL::Text);
+ col.setCharset(cs);
+ }
// Use "<=" even if "<" is the exact condition
if (field->max_length() <= (1 << 8))
goto mysql_type_tiny_blob;
@@ -3071,8 +3094,10 @@ static int create_ndb_column(NDBCOL &col,
case MYSQL_TYPE_MEDIUM_BLOB:
if (field->flags & BINARY_FLAG)
col.setType(NDBCOL::Blob);
- else
+ else {
col.setType(NDBCOL::Text);
+ col.setCharset(cs);
+ }
col.setInlineSize(256);
col.setPartSize(4000);
col.setStripeSize(8);
@@ -3081,8 +3106,10 @@ static int create_ndb_column(NDBCOL &col,
case MYSQL_TYPE_LONG_BLOB:
if (field->flags & BINARY_FLAG)
col.setType(NDBCOL::Blob);
- else
+ else {
col.setType(NDBCOL::Text);
+ col.setCharset(cs);
+ }
col.setInlineSize(256);
col.setPartSize(8000);
col.setStripeSize(4);
@@ -3719,12 +3746,14 @@ bool ndbcluster_init()
{
g_ndb->waitUntilReady(10);
}
- else if(res == 1 && g_ndb_cluster_connection->start_connect_thread())
+ else if(res == 1)
{
- DBUG_PRINT("error", ("g_ndb_cluster_connection->start_connect_thread()"));
- DBUG_RETURN(TRUE);
+ if (g_ndb_cluster_connection->start_connect_thread()) {
+ DBUG_PRINT("error", ("g_ndb_cluster_connection->start_connect_thread()"));
+ DBUG_RETURN(TRUE);
+ }
}
- else
+ else
{
DBUG_ASSERT(res == -1);
DBUG_PRINT("error", ("permanent error"));
@@ -3737,7 +3766,7 @@ bool ndbcluster_init()
ndbcluster_inited= 1;
#ifdef USE_DISCOVER_ON_STARTUP
- if (ndb_discover_tables() != 0)
+ if (res == 0 && ndb_discover_tables() != 0)
DBUG_RETURN(TRUE);
#endif
DBUG_RETURN(false);
diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h
index c0ef172413f..eb4556a606b 100644
--- a/sql/ha_ndbcluster.h
+++ b/sql/ha_ndbcluster.h
@@ -264,6 +264,8 @@ class ha_ndbcluster: public handler
void no_uncommitted_rows_update(int);
void no_uncommitted_rows_init(THD *);
void no_uncommitted_rows_reset(THD *);
+
+ friend int execute_no_commit(ha_ndbcluster*, NdbConnection*);
};
bool ndbcluster_init(void);