summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorpekka@orca.ndb.mysql.com <>2006-12-01 10:10:22 +0100
committerpekka@orca.ndb.mysql.com <>2006-12-01 10:10:22 +0100
commitb304b3f8e1043c36514013c610ed07499fc2f2e4 (patch)
tree6b52fd4f4022c7f6c11289567849ee085d7188fe
parent6758c1679c092ccf3900d3ee262fee0063b202cd (diff)
parentdfd2c8f4fbbc6315ab308495549ecf6b7536996c (diff)
downloadmariadb-git-b304b3f8e1043c36514013c610ed07499fc2f2e4.tar.gz
Merge pnousiainen@bk-internal.mysql.com:/home/bk/mysql-5.1-ndb
into orca.ndb.mysql.com:/export/home/space/pekka/ndb/version/my51-ndb
-rw-r--r--mysys/my_open.c12
-rw-r--r--sql/ha_ndbcluster.cc25
-rw-r--r--sql/log.cc12
-rw-r--r--sql/log.h2
-rw-r--r--sql/share/errmsg.txt3
-rw-r--r--sql/sql_repl.cc1
-rw-r--r--sql/table.cc39
-rw-r--r--storage/ndb/include/mgmapi/mgmapi.h8
-rw-r--r--storage/ndb/include/ndbapi/ndberror.h2
-rw-r--r--storage/ndb/src/common/transporter/TCP_Transporter.cpp25
-rw-r--r--storage/ndb/src/common/transporter/TransporterRegistry.cpp24
-rw-r--r--storage/ndb/src/common/util/File.cpp16
-rw-r--r--storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp3
-rw-r--r--storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp3
-rw-r--r--storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp9
-rw-r--r--storage/ndb/src/kernel/vm/ArrayPool.hpp29
-rw-r--r--storage/ndb/src/kernel/vm/SimulatedBlock.cpp28
-rw-r--r--storage/ndb/src/kernel/vm/SimulatedBlock.hpp2
-rw-r--r--storage/ndb/src/mgmapi/Makefile.am2
-rw-r--r--storage/ndb/src/mgmapi/mgmapi_configuration.cpp38
-rw-r--r--storage/ndb/src/mgmsrv/ConfigInfo.cpp26
-rw-r--r--storage/ndb/src/mgmsrv/ParamInfo.cpp2077
-rw-r--r--storage/ndb/src/mgmsrv/ParamInfo.hpp44
-rw-r--r--storage/ndb/src/mgmsrv/Services.cpp11
-rw-r--r--storage/ndb/src/ndbapi/ndberror.c7
-rw-r--r--storage/ndb/tools/restore/consumer.hpp1
-rw-r--r--storage/ndb/tools/restore/consumer_restore.cpp27
-rw-r--r--storage/ndb/tools/restore/consumer_restore.hpp1
-rw-r--r--storage/ndb/tools/restore/restore_main.cpp9
29 files changed, 2417 insertions, 69 deletions
diff --git a/mysys/my_open.c b/mysys/my_open.c
index ab2f7c9ff27..a023a5ebe63 100644
--- a/mysys/my_open.c
+++ b/mysys/my_open.c
@@ -167,9 +167,17 @@ File my_register_filename(File fd, const char *FileName, enum file_type
else
my_errno=errno;
DBUG_PRINT("error",("Got error %d on open",my_errno));
- if (MyFlags & (MY_FFNF | MY_FAE | MY_WME))
- my_error(error_message_number, MYF(ME_BELL+ME_WAITTANG),
+ if (MyFlags & (MY_FFNF | MY_FAE | MY_WME)) {
+ if (my_errno == EMFILE) {
+ DBUG_PRINT("error",("print err: %d",EE_OUT_OF_FILERESOURCES));
+ my_error(EE_OUT_OF_FILERESOURCES, MYF(ME_BELL+ME_WAITTANG),
FileName, my_errno);
+ } else {
+ DBUG_PRINT("error",("print err: %d",error_message_number));
+ my_error(error_message_number, MYF(ME_BELL+ME_WAITTANG),
+ FileName, my_errno);
+ }
+ }
return(fd);
}
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 0fc884f8e68..78032b9ac12 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -4825,10 +4825,18 @@ int ha_ndbcluster::create(const char *name,
}
if (info->store_on_disk)
+ {
if (info->tablespace)
tab.setTablespace(info->tablespace);
else
tab.setTablespace("DEFAULT-TS");
+ }
+ else if (info->tablespace)
+ {
+ tab.setTablespace(info->tablespace);
+ info->store_on_disk = true; //if use tablespace, that also means store on disk
+ }
+
// No primary key, create shadow key as 64 bit, auto increment
if (form->s->primary_key == MAX_KEY)
{
@@ -6537,6 +6545,23 @@ int ndbcluster_find_files(handlerton *hton, THD *thd,
hash_free(&ok_tables);
hash_free(&ndb_tables);
+
+ // Delete schema file from files
+ if (!strcmp(db, NDB_REP_DB))
+ {
+ uint count = 0;
+ while (count++ < files->elements)
+ {
+ file_name = (char *)files->pop();
+ if (!strcmp(file_name, NDB_SCHEMA_TABLE))
+ {
+ DBUG_PRINT("info", ("skip %s.%s table, it should be hidden to user",
+ NDB_REP_DB, NDB_SCHEMA_TABLE));
+ continue;
+ }
+ files->push_back(file_name);
+ }
+ }
} // extra bracket to avoid gcc 2.95.3 warning
DBUG_RETURN(0);
}
diff --git a/sql/log.cc b/sql/log.cc
index b12eca9bb07..9d55b0a5e05 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -2973,6 +2973,7 @@ int MYSQL_BIN_LOG::purge_logs(const char *to_log,
ulonglong *decrease_log_space)
{
int error;
+ int ret = 0;
bool exit_loop= 0;
LOG_INFO log_info;
DBUG_ENTER("purge_logs");
@@ -3017,6 +3018,14 @@ int MYSQL_BIN_LOG::purge_logs(const char *to_log,
*decrease_log_space-= file_size;
ha_binlog_index_purge_file(current_thd, log_info.log_file_name);
+ if (current_thd->query_error) {
+ DBUG_PRINT("info",("query error: %d", current_thd->query_error));
+ if (my_errno == EMFILE) {
+ DBUG_PRINT("info",("my_errno: %d, set ret = LOG_INFO_EMFILE", my_errno));
+ ret = LOG_INFO_EMFILE;
+ break;
+ }
+ }
if (find_next_log(&log_info, 0) || exit_loop)
break;
@@ -3027,6 +3036,9 @@ int MYSQL_BIN_LOG::purge_logs(const char *to_log,
the log index file after restart - otherwise, this should be safe
*/
error= update_log_index(&log_info, need_update_threads);
+ if (error == 0) {
+ error = ret;
+ }
err:
if (need_mutex)
diff --git a/sql/log.h b/sql/log.h
index f39b52f5db2..f481d3448e0 100644
--- a/sql/log.h
+++ b/sql/log.h
@@ -114,6 +114,8 @@ extern TC_LOG_DUMMY tc_log_dummy;
#define LOG_INFO_MEM -6
#define LOG_INFO_FATAL -7
#define LOG_INFO_IN_USE -8
+#define LOG_INFO_EMFILE -9
+
/* bitmap to SQL_LOG::close() */
#define LOG_CLOSE_INDEX 1
diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt
index 5f9b9e6d563..cfeca697692 100644
--- a/sql/share/errmsg.txt
+++ b/sql/share/errmsg.txt
@@ -6012,4 +6012,5 @@ ER_WRONG_PARAMETERS_TO_NATIVE_FCT 42000
eng "Incorrect parameters in the call to native function '%-.64s'"
ER_NATIVE_FCT_NAME_COLLISION
eng "This function '%-.64s' has the same name as a native function."
-
+ER_BINLOG_PURGE_EMFILE
+ eng "Too many files opened, please execute the command again"
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index d4f6288c298..92c8aca0e0c 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -239,6 +239,7 @@ bool purge_error_message(THD* thd, int res)
case LOG_INFO_MEM: errmsg= ER_OUT_OF_RESOURCES; break;
case LOG_INFO_FATAL: errmsg= ER_BINLOG_PURGE_FATAL_ERR; break;
case LOG_INFO_IN_USE: errmsg= ER_LOG_IN_USE; break;
+ case LOG_INFO_EMFILE: errmsg= ER_BINLOG_PURGE_EMFILE; break;
default: errmsg= ER_LOG_PURGE_UNKNOWN_ERR; break;
}
diff --git a/sql/table.cc b/sql/table.cc
index 926b44dedbc..906edc6f956 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -1562,21 +1562,32 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
outparam->file->auto_repair() &&
!(ha_open_flags & HA_OPEN_FOR_REPAIR));
- if (ha_err == HA_ERR_NO_SUCH_TABLE)
+ switch (ha_err)
{
- /*
- The table did not exists in storage engine, use same error message
- as if the .frm file didn't exist
- */
- error= 1;
- my_errno= ENOENT;
- }
- else
- {
- outparam->file->print_error(ha_err, MYF(0));
- error_reported= TRUE;
- if (ha_err == HA_ERR_TABLE_DEF_CHANGED)
- error= 7;
+ case HA_ERR_NO_SUCH_TABLE:
+ /*
+ The table did not exists in storage engine, use same error message
+ as if the .frm file didn't exist
+ */
+ error= 1;
+ my_errno= ENOENT;
+ break;
+ case EMFILE:
+ /*
+ Too many files opened, use same error message as if the .frm
+ file can't open
+ */
+ DBUG_PRINT("error", ("open file: %s failed, too many files opened (errno: %d)",
+ share->normalized_path.str, ha_err));
+ error= 1;
+ my_errno= EMFILE;
+ break;
+ default:
+ outparam->file->print_error(ha_err, MYF(0));
+ error_reported= TRUE;
+ if (ha_err == HA_ERR_TABLE_DEF_CHANGED)
+ error= 7;
+ break;
}
goto err; /* purecov: inspected */
}
diff --git a/storage/ndb/include/mgmapi/mgmapi.h b/storage/ndb/include/mgmapi/mgmapi.h
index 70dda4d3b66..94e4904d0e4 100644
--- a/storage/ndb/include/mgmapi/mgmapi.h
+++ b/storage/ndb/include/mgmapi/mgmapi.h
@@ -1182,6 +1182,14 @@ extern "C" {
int ndb_mgm_check_connection(NdbMgmHandle handle);
int ndb_mgm_report_event(NdbMgmHandle handle, Uint32 *data, Uint32 length);
+
+ struct ndb_mgm_param_info
+ {
+ Uint32 m_id;
+ const char * m_name;
+ };
+ int ndb_mgm_get_db_parameter_info(Uint32 paramId, struct ndb_mgm_param_info * info,
+ size_t * size);
#endif
#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
diff --git a/storage/ndb/include/ndbapi/ndberror.h b/storage/ndb/include/ndbapi/ndberror.h
index cf03eb9da34..009818f5f4f 100644
--- a/storage/ndb/include/ndbapi/ndberror.h
+++ b/storage/ndb/include/ndbapi/ndberror.h
@@ -99,7 +99,7 @@ typedef ndberror_classification_enum ndberror_classification;
const char *ndberror_status_message(ndberror_status);
const char *ndberror_classification_message(ndberror_classification);
void ndberror_update(ndberror_struct *);
-int ndb_error_string(int err_no, char *str, unsigned int size);
+int ndb_error_string(int err_no, char *str, int size);
#endif /* doxygen skip internal*/
diff --git a/storage/ndb/src/common/transporter/TCP_Transporter.cpp b/storage/ndb/src/common/transporter/TCP_Transporter.cpp
index a2ceaea339f..d13c9ec8a36 100644
--- a/storage/ndb/src/common/transporter/TCP_Transporter.cpp
+++ b/storage/ndb/src/common/transporter/TCP_Transporter.cpp
@@ -20,6 +20,9 @@
#include "TCP_Transporter.hpp"
#include <NdbOut.hpp>
#include <NdbSleep.h>
+
+#include <EventLogger.hpp>
+extern EventLogger g_eventLogger;
// End of stuff to be moved
#ifdef NDB_WIN32
@@ -153,14 +156,14 @@ TCP_Transporter::setSocketOptions(){
if (setsockopt(theSocket, SOL_SOCKET, SO_RCVBUF,
(char*)&sockOptRcvBufSize, sizeof(sockOptRcvBufSize)) < 0) {
#ifdef DEBUG_TRANSPORTER
- ndbout_c("The setsockopt SO_RCVBUF error code = %d", InetErrno);
+ g_eventLogger.error("The setsockopt SO_RCVBUF error code = %d", InetErrno);
#endif
}//if
if (setsockopt(theSocket, SOL_SOCKET, SO_SNDBUF,
(char*)&sockOptSndBufSize, sizeof(sockOptSndBufSize)) < 0) {
#ifdef DEBUG_TRANSPORTER
- ndbout_c("The setsockopt SO_SNDBUF error code = %d", InetErrno);
+ g_eventLogger.error("The setsockopt SO_SNDBUF error code = %d", InetErrno);
#endif
}//if
@@ -171,7 +174,7 @@ TCP_Transporter::setSocketOptions(){
if (setsockopt(theSocket, IPPROTO_TCP, TCP_NODELAY,
(char*)&sockOptNodelay, sizeof(sockOptNodelay)) < 0) {
#ifdef DEBUG_TRANSPORTER
- ndbout_c("The setsockopt TCP_NODELAY error code = %d", InetErrno);
+ g_eventLogger.error("The setsockopt TCP_NODELAY error code = %d", InetErrno);
#endif
}//if
}
@@ -185,7 +188,7 @@ TCP_Transporter::setSocketNonBlocking(NDB_SOCKET_TYPE socket){
if(ioctlsocket(socket, FIONBIO, &ul))
{
#ifdef DEBUG_TRANSPORTER
- ndbout_c("Set non-blocking server error3: %d", InetErrno);
+ g_eventLogger.error("Set non-blocking server error3: %d", InetErrno);
#endif
}//if
return true;
@@ -199,13 +202,13 @@ TCP_Transporter::setSocketNonBlocking(NDB_SOCKET_TYPE socket){
flags = fcntl(socket, F_GETFL, 0);
if (flags < 0) {
#ifdef DEBUG_TRANSPORTER
- ndbout_c("Set non-blocking server error1: %s", strerror(InetErrno));
+ g_eventLogger.error("Set non-blocking server error1: %s", strerror(InetErrno));
#endif
}//if
flags |= NDB_NONBLOCK;
if (fcntl(socket, F_SETFL, flags) == -1) {
#ifdef DEBUG_TRANSPORTER
- ndbout_c("Set non-blocking server error2: %s", strerror(InetErrno));
+ g_eventLogger.error("Set non-blocking server error2: %s", strerror(InetErrno));
#endif
}//if
return true;
@@ -326,7 +329,7 @@ TCP_Transporter::doSend() {
} else {
// Send failed
#if defined DEBUG_TRANSPORTER
- ndbout_c("Send Failure(disconnect==%d) to node = %d nBytesSent = %d "
+ g_eventLogger.error("Send Failure(disconnect==%d) to node = %d nBytesSent = %d "
"errno = %d strerror = %s",
DISCONNECT_ERRNO(InetErrno, nBytesSent),
remoteNodeId, nBytesSent, InetErrno,
@@ -361,11 +364,11 @@ TCP_Transporter::doReceive() {
if(receiveBuffer.sizeOfData > receiveBuffer.sizeOfBuffer){
#ifdef DEBUG_TRANSPORTER
- ndbout_c("receiveBuffer.sizeOfData(%d) > receiveBuffer.sizeOfBuffer(%d)",
+ g_eventLogger.error("receiveBuffer.sizeOfData(%d) > receiveBuffer.sizeOfBuffer(%d)",
receiveBuffer.sizeOfData, receiveBuffer.sizeOfBuffer);
- ndbout_c("nBytesRead = %d", nBytesRead);
+ g_eventLogger.error("nBytesRead = %d", nBytesRead);
#endif
- ndbout_c("receiveBuffer.sizeOfData(%d) > receiveBuffer.sizeOfBuffer(%d)",
+ g_eventLogger.error("receiveBuffer.sizeOfData(%d) > receiveBuffer.sizeOfBuffer(%d)",
receiveBuffer.sizeOfData, receiveBuffer.sizeOfBuffer);
report_error(TE_INVALID_MESSAGE_LENGTH);
return 0;
@@ -382,7 +385,7 @@ TCP_Transporter::doReceive() {
return nBytesRead;
} else {
#if defined DEBUG_TRANSPORTER
- ndbout_c("Receive Failure(disconnect==%d) to node = %d nBytesSent = %d "
+ g_eventLogger.error("Receive Failure(disconnect==%d) to node = %d nBytesSent = %d "
"errno = %d strerror = %s",
DISCONNECT_ERRNO(InetErrno, nBytesRead),
remoteNodeId, nBytesRead, InetErrno,
diff --git a/storage/ndb/src/common/transporter/TransporterRegistry.cpp b/storage/ndb/src/common/transporter/TransporterRegistry.cpp
index 71f39b63ba1..cd1c1a0bdcc 100644
--- a/storage/ndb/src/common/transporter/TransporterRegistry.cpp
+++ b/storage/ndb/src/common/transporter/TransporterRegistry.cpp
@@ -778,7 +778,7 @@ TransporterRegistry::poll_TCP(Uint32 timeOutMillis)
tcpReadSelectReply = select(maxSocketValue, &tcpReadset, 0, 0, &timeout);
if(false && tcpReadSelectReply == -1 && errno == EINTR)
- ndbout_c("woke-up by signal");
+ g_eventLogger.info("woke-up by signal");
#ifdef NDB_WIN32
if(tcpReadSelectReply == SOCKET_ERROR)
@@ -1112,12 +1112,12 @@ TransporterRegistry::start_clients_thread()
}
else if(ndb_mgm_is_connected(m_mgm_handle))
{
- ndbout_c("Failed to get dynamic port to connect to: %d", res);
+ g_eventLogger.info("Failed to get dynamic port to connect to: %d", res);
ndb_mgm_disconnect(m_mgm_handle);
}
else
{
- ndbout_c("Management server closed connection early. "
+ g_eventLogger.info("Management server closed connection early. "
"It is probably being shut down (or has problems). "
"We will retry the connection.");
}
@@ -1215,7 +1215,7 @@ TransporterRegistry::start_service(SocketServer& socket_server)
DBUG_ENTER("TransporterRegistry::start_service");
if (m_transporter_interface.size() > 0 && !nodeIdSpecified)
{
- ndbout_c("TransporterRegistry::startReceiving: localNodeId not specified");
+ g_eventLogger.error("TransporterRegistry::startReceiving: localNodeId not specified");
DBUG_RETURN(false);
}
@@ -1241,7 +1241,7 @@ TransporterRegistry::start_service(SocketServer& socket_server)
* If it wasn't a dynamically allocated port, or
* our attempts at getting a new dynamic port failed
*/
- ndbout_c("Unable to setup transporter service port: %s:%d!\n"
+ g_eventLogger.error("Unable to setup transporter service port: %s:%d!\n"
"Please check if the port is already used,\n"
"(perhaps the node is already running)",
t.m_interface ? t.m_interface : "*", t.m_s_service_port);
@@ -1338,13 +1338,13 @@ bool TransporterRegistry::connect_client(NdbMgmHandle *h)
if(!mgm_nodeid)
{
- ndbout_c("%s: %d", __FILE__, __LINE__);
+ g_eventLogger.error("%s: %d", __FILE__, __LINE__);
return false;
}
Transporter * t = theTransporters[mgm_nodeid];
if (!t)
{
- ndbout_c("%s: %d", __FILE__, __LINE__);
+ g_eventLogger.error("%s: %d", __FILE__, __LINE__);
return false;
}
DBUG_RETURN(t->connect_client(connect_ndb_mgmd(h)));
@@ -1360,7 +1360,7 @@ NDB_SOCKET_TYPE TransporterRegistry::connect_ndb_mgmd(NdbMgmHandle *h)
if ( h==NULL || *h == NULL )
{
- ndbout_c("%s: %d", __FILE__, __LINE__);
+ g_eventLogger.error("%s: %d", __FILE__, __LINE__);
return NDB_INVALID_SOCKET;
}
@@ -1373,10 +1373,10 @@ NDB_SOCKET_TYPE TransporterRegistry::connect_ndb_mgmd(NdbMgmHandle *h)
m_transporter_interface[i].m_s_service_port,
&mgm_reply) < 0)
{
- ndbout_c("Error: %s: %d",
+ g_eventLogger.error("Error: %s: %d",
ndb_mgm_get_latest_error_desc(*h),
ndb_mgm_get_latest_error(*h));
- ndbout_c("%s: %d", __FILE__, __LINE__);
+ g_eventLogger.error("%s: %d", __FILE__, __LINE__);
ndb_mgm_destroy_handle(h);
return NDB_INVALID_SOCKET;
}
@@ -1388,10 +1388,10 @@ NDB_SOCKET_TYPE TransporterRegistry::connect_ndb_mgmd(NdbMgmHandle *h)
NDB_SOCKET_TYPE sockfd= ndb_mgm_convert_to_transporter(h);
if ( sockfd == NDB_INVALID_SOCKET)
{
- ndbout_c("Error: %s: %d",
+ g_eventLogger.error("Error: %s: %d",
ndb_mgm_get_latest_error_desc(*h),
ndb_mgm_get_latest_error(*h));
- ndbout_c("%s: %d", __FILE__, __LINE__);
+ g_eventLogger.error("%s: %d", __FILE__, __LINE__);
ndb_mgm_destroy_handle(h);
}
return sockfd;
diff --git a/storage/ndb/src/common/util/File.cpp b/storage/ndb/src/common/util/File.cpp
index f0aecc4f8e4..d893260f564 100644
--- a/storage/ndb/src/common/util/File.cpp
+++ b/storage/ndb/src/common/util/File.cpp
@@ -122,12 +122,24 @@ bool
File_class::close()
{
bool rc = true;
+ int retval = 0;
+
if (m_file != NULL)
{
::fflush(m_file);
- rc = (::fclose(m_file) == 0 ? true : false);
- m_file = NULL; // Try again?
+ retval = ::fclose(m_file);
+ while ( (retval != 0) && (errno == EINTR) ){
+ retval = ::fclose(m_file);
+ }
+ if( retval == 0){
+ rc = true;
+ }
+ else {
+ rc = false;
+ ndbout_c("ERROR: Close file error in File.cpp for %s",strerror(errno));
+ }
}
+ m_file = NULL;
return rc;
}
diff --git a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
index e4b2c018c1a..ec063b84022 100644
--- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
@@ -62,7 +62,8 @@ Cmvmi::Cmvmi(Block_context& ctx) :
&long_sig_buffer_size);
long_sig_buffer_size= long_sig_buffer_size / 256;
- g_sectionSegmentPool.setSize(long_sig_buffer_size);
+ g_sectionSegmentPool.setSize(long_sig_buffer_size,
+ false,true,true,CFG_DB_LONG_SIGNAL_BUFFER);
// Add received signals
addRecSignal(GSN_CONNECT_REP, &Cmvmi::execCONNECT_REP);
diff --git a/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp b/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
index cc8d0f38c4f..f4a712f4536 100644
--- a/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
+++ b/storage/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
@@ -52,7 +52,8 @@ void Dbacc::initRecords()
page8 = (Page8*)allocRecord("Page8",
sizeof(Page8),
cpagesize,
- false);
+ false,
+ CFG_DB_INDEX_MEM);
operationrec = (Operationrec*)allocRecord("Operationrec",
sizeof(Operationrec),
diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
index 973d2f7a9ef..0ae23083626 100644
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
@@ -311,7 +311,7 @@ void Dbtup::execREAD_CONFIG_REQ(Signal* signal)
c_storedProcPool.setSize(noOfStoredProc);
c_buildIndexPool.setSize(c_noOfBuildIndexRec);
- c_triggerPool.setSize(noOfTriggers);
+ c_triggerPool.setSize(noOfTriggers, false, true, true, CFG_DB_NO_TRIGGERS);
c_extent_hash.setSize(1024); // 4k
@@ -343,6 +343,7 @@ void Dbtup::initRecords()
{
unsigned i;
Uint32 tmp;
+ Uint32 tmp1 = 0;
const ndb_mgm_configuration_iterator * p =
m_ctx.m_config.getOwnConfigIterator();
ndbrequire(p != 0);
@@ -350,7 +351,7 @@ void Dbtup::initRecords()
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_PAGE, &tmp));
// Records with dynamic sizes
- Page* ptr =(Page*)allocRecord("Page", sizeof(Page), tmp, false);
+ Page* ptr =(Page*)allocRecord("Page", sizeof(Page), tmp, false, CFG_DB_DATA_MEM);
c_page_pool.set(ptr, tmp);
attrbufrec = (Attrbufrec*)allocRecord("Attrbufrec",
@@ -374,7 +375,9 @@ void Dbtup::initRecords()
cnoOfTabDescrRec);
ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUP_OP_RECS, &tmp));
- c_operation_pool.setSize(tmp);
+ ndb_mgm_get_int_parameter(p, CFG_DB_NO_LOCAL_OPS, &tmp1);
+ c_operation_pool.setSize(tmp, false, true, true,
+ tmp1 == 0 ? CFG_DB_NO_OPS : CFG_DB_NO_LOCAL_OPS);
pageRange = (PageRange*)allocRecord("PageRange",
sizeof(PageRange),
diff --git a/storage/ndb/src/kernel/vm/ArrayPool.hpp b/storage/ndb/src/kernel/vm/ArrayPool.hpp
index 6a5bb948dd8..25318a70b51 100644
--- a/storage/ndb/src/kernel/vm/ArrayPool.hpp
+++ b/storage/ndb/src/kernel/vm/ArrayPool.hpp
@@ -25,6 +25,7 @@
#include <ErrorReporter.hpp>
#include <NdbMem.h>
#include <Bitmask.hpp>
+#include <mgmapi.h>
template <class T> class Array;
@@ -43,7 +44,8 @@ public:
*
* Note, can currently only be called once
*/
- bool setSize(Uint32 noOfElements, bool align = false, bool exit_on_error = true, bool guard = true);
+ bool setSize(Uint32 noOfElements, bool align = false, bool exit_on_error = true,
+ bool guard = true, Uint32 paramId = 0);
bool set(T*, Uint32 cnt, bool align = false);
void clear() { theArray = 0; }
@@ -221,13 +223,19 @@ template <class T>
inline
bool
ArrayPool<T>::setSize(Uint32 noOfElements,
- bool align, bool exit_on_error, bool guard){
+ bool align, bool exit_on_error, bool guard, Uint32 paramId){
if(size == 0){
if(noOfElements == 0)
return true;
+ Uint64 real_size = (Uint64)noOfElements * sizeof(T);
+ size_t req_size = (size_t)real_size;
+ Uint64 real_size_align = real_size + sizeof(T);
+ size_t req_size_align = (size_t)real_size_align;
+
if(align)
{
- alloc_ptr = ndbd_malloc((noOfElements+1) * sizeof(T));
+ if((Uint64)req_size_align == real_size_align && req_size_align > 0)
+ alloc_ptr = ndbd_malloc(req_size_align);
UintPtr p = (UintPtr)alloc_ptr;
UintPtr mod = p % sizeof(T);
if (mod)
@@ -236,14 +244,23 @@ ArrayPool<T>::setSize(Uint32 noOfElements,
}
theArray = (T *)p;
}
- else
- theArray = (T *)(alloc_ptr = ndbd_malloc(noOfElements * sizeof(T)));
+ else if((Uint64)req_size == real_size && req_size > 0)
+ theArray = (T *)(alloc_ptr = ndbd_malloc(req_size));
if(theArray == 0)
{
+ char errmsg[255] = "ArrayPool<T>::setSize malloc failed";
+ struct ndb_mgm_param_info param_info;
+ size_t size = sizeof(ndb_mgm_param_info);
if (!exit_on_error)
return false;
- ErrorReporter::handleAssert("ArrayPool<T>::setSize malloc failed",
+
+ if(0 != paramId && 0 == ndb_mgm_get_db_parameter_info(paramId, &param_info, &size)) {
+ BaseString::snprintf(errmsg, sizeof(errmsg),
+ "Malloc memory for %s failed", param_info.m_name);
+ }
+
+ ErrorReporter::handleAssert(errmsg,
__FILE__, __LINE__, NDBD_EXIT_MEMALLOC);
return false; // not reached
}
diff --git a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp
index 1d6676287e8..6611ba36a60 100644
--- a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp
@@ -657,29 +657,39 @@ SimulatedBlock::getBatSize(Uint16 blockNo){
}
void*
-SimulatedBlock::allocRecord(const char * type, size_t s, size_t n, bool clear)
+SimulatedBlock::allocRecord(const char * type, size_t s, size_t n, bool clear, Uint32 paramId)
{
void * p = NULL;
size_t size = n*s;
+ Uint64 real_size = (Uint64)((Uint64)n)*((Uint64)s);
refresh_watch_dog();
- if (size > 0){
+ if (real_size > 0){
#ifdef VM_TRACE_MEM
- ndbout_c("%s::allocRecord(%s, %u, %u) = %u bytes",
+ ndbout_c("%s::allocRecord(%s, %u, %u) = %llu bytes",
getBlockName(number()),
type,
s,
n,
- size);
+ real_size);
#endif
- p = ndbd_malloc(size);
+ if( real_size == (Uint64)size )
+ p = ndbd_malloc(size);
if (p == NULL){
char buf1[255];
char buf2[255];
- BaseString::snprintf(buf1, sizeof(buf1), "%s could not allocate memory for %s",
- getBlockName(number()), type);
- BaseString::snprintf(buf2, sizeof(buf2), "Requested: %ux%u = %u bytes",
- (Uint32)s, (Uint32)n, (Uint32)size);
+ struct ndb_mgm_param_info param_info;
+ size_t size = sizeof(ndb_mgm_param_info);
+
+ if(0 != paramId && 0 == ndb_mgm_get_db_parameter_info(paramId, &param_info, &size)) {
+ BaseString::snprintf(buf1, sizeof(buf1), "%s could not allocate memory for parameter %s",
+ getBlockName(number()), param_info.m_name);
+ } else {
+ BaseString::snprintf(buf1, sizeof(buf1), "%s could not allocate memory for %s",
+ getBlockName(number()), type);
+ }
+ BaseString::snprintf(buf2, sizeof(buf2), "Requested: %ux%u = %llu bytes",
+ (Uint32)s, (Uint32)n, (Uint64)real_size);
ERROR_SET(fatal, NDBD_EXIT_MEMALLOC, buf1, buf2);
}
diff --git a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp
index 3e90b20705e..2e8f33bf023 100644
--- a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp
@@ -377,7 +377,7 @@ protected:
* Allocates memory for the datastructures where ndb keeps the data
*
*/
- void* allocRecord(const char * type, size_t s, size_t n, bool clear = true);
+ void* allocRecord(const char * type, size_t s, size_t n, bool clear = true, Uint32 paramId = 0);
/**
* Deallocate record
diff --git a/storage/ndb/src/mgmapi/Makefile.am b/storage/ndb/src/mgmapi/Makefile.am
index d04be9f16a4..c0e0c396537 100644
--- a/storage/ndb/src/mgmapi/Makefile.am
+++ b/storage/ndb/src/mgmapi/Makefile.am
@@ -1,7 +1,7 @@
noinst_LTLIBRARIES = libmgmapi.la
-libmgmapi_la_SOURCES = mgmapi.cpp ndb_logevent.cpp mgmapi_configuration.cpp LocalConfig.cpp ../kernel/error/ndbd_exit_codes.c
+libmgmapi_la_SOURCES = mgmapi.cpp ndb_logevent.cpp mgmapi_configuration.cpp LocalConfig.cpp ../kernel/error/ndbd_exit_codes.c ../mgmsrv/ParamInfo.cpp
INCLUDES_LOC = -I$(top_srcdir)/storage/ndb/include/mgmapi
diff --git a/storage/ndb/src/mgmapi/mgmapi_configuration.cpp b/storage/ndb/src/mgmapi/mgmapi_configuration.cpp
index 80ab428c05a..86dbbe8dc04 100644
--- a/storage/ndb/src/mgmapi/mgmapi_configuration.cpp
+++ b/storage/ndb/src/mgmapi/mgmapi_configuration.cpp
@@ -1,6 +1,10 @@
#include <ndb_types.h>
#include <mgmapi.h>
#include "mgmapi_configuration.hpp"
+#include "../mgmsrv/ParamInfo.hpp"
+
+extern const ParamInfo ParamInfoArray[];
+extern const int ParamInfoNum;
ndb_mgm_configuration_iterator::ndb_mgm_configuration_iterator
(const ndb_mgm_configuration & conf, unsigned type_of_section)
@@ -155,3 +159,37 @@ ndb_mgm_find(ndb_mgm_configuration_iterator* iter,
int param, unsigned search){
return iter->find(param, search);
}
+
+/**
+ * Retrieve information about parameter
+ * @param info : in - pointer to structure allocated by caller
+ * @param size : in/out : pointer to int initialized to sizeof(ndb_mgm_param_info)...will be set to bytes set by function on return
+*/
+extern "C"
+int
+ndb_mgm_get_db_parameter_info(Uint32 paramId, struct ndb_mgm_param_info * info, size_t * size) {
+ if ( paramId == 0 ) {
+ return -1;
+ }
+
+ for (int i = 0; i < ParamInfoNum; i++) {
+ if (paramId == ParamInfoArray[i]._paramId && strcmp(DB_TOKEN, ParamInfoArray[i]._section) == 0) {
+ size_t tmp = 0;
+ if (tmp + sizeof(info->m_id) <= *size)
+ {
+ info->m_id = ParamInfoArray[i]._paramId;
+ tmp += sizeof(info->m_id);
+ }
+
+ if (tmp + sizeof(info->m_name) <= *size)
+ {
+ info->m_name = ParamInfoArray[i]._fname;
+ tmp += sizeof(info->m_name);
+ }
+
+ *size = tmp;
+ return 0;
+ }
+ }
+ return -1;
+}
diff --git a/storage/ndb/src/mgmsrv/ConfigInfo.cpp b/storage/ndb/src/mgmsrv/ConfigInfo.cpp
index 25b6454b3ad..def349cf744 100644
--- a/storage/ndb/src/mgmsrv/ConfigInfo.cpp
+++ b/storage/ndb/src/mgmsrv/ConfigInfo.cpp
@@ -3150,7 +3150,31 @@ checkDbConstraints(InitConfigFileParser::Context & ctx, const char *){
} else {
ctx.m_userProperties.put("NoOfReplicas", replicas);
}
-
+
+ /**
+ * In kernel, will calculate the MaxNoOfMeataTables use the following sum:
+ * Uint32 noOfMetaTables = noOfTables + noOfOrderedIndexes +
+ * noOfUniqueHashIndexes + 2
+ * 2 is the number of the SysTables.
+ * So must check that the sum does't exceed the max value of Uint32.
+ */
+ Uint32 noOfTables = 0,
+ noOfOrderedIndexes = 0,
+ noOfUniqueHashIndexes = 0;
+ ctx.m_currentSection->get("MaxNoOfTables", &noOfTables);
+ ctx.m_currentSection->get("MaxNoOfOrderedIndexes", &noOfOrderedIndexes);
+ ctx.m_currentSection->get("MaxNoOfUniqueHashIndexes", &noOfUniqueHashIndexes);
+
+ Uint64 sum= (Uint64)noOfTables + noOfOrderedIndexes + noOfUniqueHashIndexes;
+
+ if (sum > ((Uint32)~0 - 2)) {
+ ctx.reportError("The sum of MaxNoOfTables, MaxNoOfOrderedIndexes and"
+ " MaxNoOfUniqueHashIndexes must not exceed %u - [%s]"
+ " starting at line: %d",
+ ((Uint32)~0 - 2), ctx.fname, ctx.m_sectionLineno);
+ return false;
+ }
+
return true;
}
diff --git a/storage/ndb/src/mgmsrv/ParamInfo.cpp b/storage/ndb/src/mgmsrv/ParamInfo.cpp
new file mode 100644
index 00000000000..01662fab588
--- /dev/null
+++ b/storage/ndb/src/mgmsrv/ParamInfo.cpp
@@ -0,0 +1,2077 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <ndb_global.h>
+#include <../../include/kernel/ndb_limits.h>
+#include "ParamInfo.hpp"
+#include <mgmapi_config_parameters.h>
+
+#ifndef MYSQLCLUSTERDIR
+#define MYSQLCLUSTERDIR "."
+#endif
+
+#define KEY_INTERNAL 0
+#define MAX_INT_RNIL 0xfffffeff
+#define MAX_PORT_NO 65535
+
+#define _STR_VALUE(x) #x
+#define STR_VALUE(x) _STR_VALUE(x)
+
+/****************************************************************************
+ * Section names
+ ****************************************************************************/
+#define DB_TOKEN_PRINT "ndbd(DB)"
+#define MGM_TOKEN_PRINT "ndb_mgmd(MGM)"
+#define API_TOKEN_PRINT "mysqld(API)"
+
+/**
+ * A MANDATORY parameters must be specified in the config file
+ * An UNDEFINED parameter may or may not be specified in the config file
+ */
+static const char* MANDATORY = (char*)~(UintPtr)0;// Default value for mandatory params.
+static const char* UNDEFINED = 0; // Default value for undefined params.
+
+extern const ParamInfo ParamInfoArray[];
+extern const int ParamInfoNum;
+
+/**
+ * The default constructors create objects with suitable values for the
+ * configuration parameters.
+ *
+ * Some are however given the value MANDATORY which means that the value
+ * must be specified in the configuration file.
+ *
+ * Min and max values are also given for some parameters.
+ * - Attr1: Name in file (initial config file)
+ * - Attr2: Name in prop (properties object)
+ * - Attr3: Name of Section (in init config file)
+ * - Attr4: Updateable
+ * - Attr5: Type of parameter (INT or BOOL)
+ * - Attr6: Default Value (number only)
+ * - Attr7: Min value
+ * - Attr8: Max value
+ *
+ * Parameter constraints are coded in file Config.cpp.
+ *
+ * *******************************************************************
+ * Parameters used under development should be marked "NOTIMPLEMENTED"
+ * *******************************************************************
+ */
+const ParamInfo ParamInfoArray[] = {
+
+ /****************************************************************************
+ * COMPUTER
+ ***************************************************************************/
+ {
+ KEY_INTERNAL,
+ "COMPUTER",
+ "COMPUTER",
+ "Computer section",
+ CI_INTERNAL,
+ false,
+ CI_SECTION,
+ 0,
+ 0, 0 },
+
+ {
+ KEY_INTERNAL,
+ "Id",
+ "COMPUTER",
+ "Name of computer",
+ CI_USED,
+ false,
+ CI_STRING,
+ MANDATORY,
+ 0, 0 },
+
+ {
+ KEY_INTERNAL,
+ "HostName",
+ "COMPUTER",
+ "Hostname of computer (e.g. mysql.com)",
+ CI_USED,
+ false,
+ CI_STRING,
+ MANDATORY,
+ 0, 0 },
+
+ {
+ KEY_INTERNAL,
+ "ByteOrder",
+ "COMPUTER",
+ 0,
+ CI_DEPRICATED,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0,
+ 0 },
+
+ /****************************************************************************
+ * SYSTEM
+ ***************************************************************************/
+ {
+ CFG_SECTION_SYSTEM,
+ "SYSTEM",
+ "SYSTEM",
+ "System section",
+ CI_USED,
+ false,
+ CI_SECTION,
+ (const char *)CFG_SECTION_SYSTEM,
+ 0, 0 },
+
+ {
+ CFG_SYS_NAME,
+ "Name",
+ "SYSTEM",
+ "Name of system (NDB Cluster)",
+ CI_USED,
+ false,
+ CI_STRING,
+ MANDATORY,
+ 0, 0 },
+
+ {
+ CFG_SYS_PRIMARY_MGM_NODE,
+ "PrimaryMGMNode",
+ "SYSTEM",
+ "Node id of Primary "MGM_TOKEN_PRINT" node",
+ CI_USED,
+ false,
+ CI_INT,
+ "0",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_SYS_CONFIG_GENERATION,
+ "ConfigGenerationNumber",
+ "SYSTEM",
+ "Configuration generation number",
+ CI_USED,
+ false,
+ CI_INT,
+ "0",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ /***************************************************************************
+ * DB
+ ***************************************************************************/
+ {
+ CFG_SECTION_NODE,
+ DB_TOKEN,
+ DB_TOKEN,
+ "Node section",
+ CI_USED,
+ false,
+ CI_SECTION,
+ (const char *)NODE_TYPE_DB,
+ 0, 0
+ },
+
+ {
+ CFG_NODE_HOST,
+ "HostName",
+ DB_TOKEN,
+ "Name of computer for this node",
+ CI_INTERNAL,
+ false,
+ CI_STRING,
+ "localhost",
+ 0, 0 },
+
+ {
+ CFG_NODE_SYSTEM,
+ "System",
+ DB_TOKEN,
+ "Name of system for this node",
+ CI_INTERNAL,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ {
+ KEY_INTERNAL,
+ "Id",
+ DB_TOKEN,
+ "",
+ CI_DEPRICATED,
+ false,
+ CI_INT,
+ MANDATORY,
+ "1",
+ STR_VALUE(MAX_NODES) },
+
+ {
+ CFG_NODE_ID,
+ "NodeId",
+ DB_TOKEN,
+ "Number identifying the database node ("DB_TOKEN_PRINT")",
+ CI_USED,
+ false,
+ CI_INT,
+ MANDATORY,
+ "1",
+ STR_VALUE(MAX_NODES) },
+
+ {
+ KEY_INTERNAL,
+ "ServerPort",
+ DB_TOKEN,
+ "Port used to setup transporter",
+ CI_USED,
+ false,
+ CI_INT,
+ UNDEFINED,
+ "1",
+ STR_VALUE(MAX_PORT_NO) },
+
+ {
+ CFG_DB_NO_REPLICAS,
+ "NoOfReplicas",
+ DB_TOKEN,
+ "Number of copies of all data in the database (1-4)",
+ CI_USED,
+ false,
+ CI_INT,
+ MANDATORY,
+ "1",
+ "4" },
+
+ {
+ CFG_DB_NO_ATTRIBUTES,
+ "MaxNoOfAttributes",
+ DB_TOKEN,
+ "Total number of attributes stored in database. I.e. sum over all tables",
+ CI_USED,
+ false,
+ CI_INT,
+ "1000",
+ "32",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_NO_TABLES,
+ "MaxNoOfTables",
+ DB_TOKEN,
+ "Total number of tables stored in the database",
+ CI_USED,
+ false,
+ CI_INT,
+ "128",
+ "8",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_NO_ORDERED_INDEXES,
+ "MaxNoOfOrderedIndexes",
+ DB_TOKEN,
+ "Total number of ordered indexes that can be defined in the system",
+ CI_USED,
+ false,
+ CI_INT,
+ "128",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_NO_UNIQUE_HASH_INDEXES,
+ "MaxNoOfUniqueHashIndexes",
+ DB_TOKEN,
+ "Total number of unique hash indexes that can be defined in the system",
+ CI_USED,
+ false,
+ CI_INT,
+ "64",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_NO_INDEXES,
+ "MaxNoOfIndexes",
+ DB_TOKEN,
+ "Total number of indexes that can be defined in the system",
+ CI_DEPRICATED,
+ false,
+ CI_INT,
+ "128",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_NO_INDEX_OPS,
+ "MaxNoOfConcurrentIndexOperations",
+ DB_TOKEN,
+ "Total number of index operations that can execute simultaneously on one "DB_TOKEN_PRINT" node",
+ CI_USED,
+ false,
+ CI_INT,
+ "8K",
+ "0",
+ STR_VALUE(MAX_INT_RNIL)
+ },
+
+ {
+ CFG_DB_NO_TRIGGERS,
+ "MaxNoOfTriggers",
+ DB_TOKEN,
+ "Total number of triggers that can be defined in the system",
+ CI_USED,
+ false,
+ CI_INT,
+ "768",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_NO_TRIGGER_OPS,
+ "MaxNoOfFiredTriggers",
+ DB_TOKEN,
+ "Total number of triggers that can fire simultaneously in one "DB_TOKEN_PRINT" node",
+ CI_USED,
+ false,
+ CI_INT,
+ "4000",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ KEY_INTERNAL,
+ "ExecuteOnComputer",
+ DB_TOKEN,
+ "String referencing an earlier defined COMPUTER",
+ CI_USED,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ {
+ CFG_DB_NO_SAVE_MSGS,
+ "MaxNoOfSavedMessages",
+ DB_TOKEN,
+ "Max number of error messages in error log and max number of trace files",
+ CI_USED,
+ true,
+ CI_INT,
+ "25",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_MEMLOCK,
+ "LockPagesInMainMemory",
+ DB_TOKEN,
+ "If set to yes, then NDB Cluster data will not be swapped out to disk",
+ CI_USED,
+ true,
+ CI_BOOL,
+ "false",
+ "false",
+ "true" },
+
+ {
+ CFG_DB_WATCHDOG_INTERVAL,
+ "TimeBetweenWatchDogCheck",
+ DB_TOKEN,
+ "Time between execution checks inside a database node",
+ CI_USED,
+ true,
+ CI_INT,
+ "6000",
+ "70",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_STOP_ON_ERROR,
+ "StopOnError",
+ DB_TOKEN,
+ "If set to N, "DB_TOKEN_PRINT" automatically restarts/recovers in case of node failure",
+ CI_USED,
+ true,
+ CI_BOOL,
+ "true",
+ "false",
+ "true" },
+
+ {
+ CFG_DB_STOP_ON_ERROR_INSERT,
+ "RestartOnErrorInsert",
+ DB_TOKEN,
+ "See src/kernel/vm/Emulator.hpp NdbRestartType for details",
+ CI_INTERNAL,
+ true,
+ CI_INT,
+ "2",
+ "0",
+ "4" },
+
+ {
+ CFG_DB_NO_OPS,
+ "MaxNoOfConcurrentOperations",
+ DB_TOKEN,
+ "Max number of operation records in transaction coordinator",
+ CI_USED,
+ false,
+ CI_INT,
+ "32k",
+ "32",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_NO_LOCAL_OPS,
+ "MaxNoOfLocalOperations",
+ DB_TOKEN,
+ "Max number of operation records defined in the local storage node",
+ CI_USED,
+ false,
+ CI_INT,
+ UNDEFINED,
+ "32",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_NO_LOCAL_SCANS,
+ "MaxNoOfLocalScans",
+ DB_TOKEN,
+ "Max number of fragment scans in parallel in the local storage node",
+ CI_USED,
+ false,
+ CI_INT,
+ UNDEFINED,
+ "32",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_BATCH_SIZE,
+ "BatchSizePerLocalScan",
+ DB_TOKEN,
+ "Used to calculate the number of lock records for scan with hold lock",
+ CI_USED,
+ false,
+ CI_INT,
+ STR_VALUE(DEF_BATCH_SIZE),
+ "1",
+ STR_VALUE(MAX_PARALLEL_OP_PER_SCAN) },
+
+ {
+ CFG_DB_NO_TRANSACTIONS,
+ "MaxNoOfConcurrentTransactions",
+ DB_TOKEN,
+ "Max number of transaction executing concurrently on the "DB_TOKEN_PRINT" node",
+ CI_USED,
+ false,
+ CI_INT,
+ "4096",
+ "32",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_NO_SCANS,
+ "MaxNoOfConcurrentScans",
+ DB_TOKEN,
+ "Max number of scans executing concurrently on the "DB_TOKEN_PRINT" node",
+ CI_USED,
+ false,
+ CI_INT,
+ "256",
+ "2",
+ "500" },
+
+ {
+ CFG_DB_TRANS_BUFFER_MEM,
+ "TransactionBufferMemory",
+ DB_TOKEN,
+ "Dynamic buffer space (in bytes) for key and attribute data allocated for each "DB_TOKEN_PRINT" node",
+ CI_USED,
+ false,
+ CI_INT,
+ "1M",
+ "1K",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_INDEX_MEM,
+ "IndexMemory",
+ DB_TOKEN,
+ "Number bytes on each "DB_TOKEN_PRINT" node allocated for storing indexes",
+ CI_USED,
+ false,
+ CI_INT64,
+ "18M",
+ "1M",
+ "1024G" },
+
+ {
+ CFG_DB_DATA_MEM,
+ "DataMemory",
+ DB_TOKEN,
+ "Number bytes on each "DB_TOKEN_PRINT" node allocated for storing data",
+ CI_USED,
+ false,
+ CI_INT64,
+ "80M",
+ "1M",
+ "1024G" },
+
+ {
+ CFG_DB_UNDO_INDEX_BUFFER,
+ "UndoIndexBuffer",
+ DB_TOKEN,
+ "Number bytes on each "DB_TOKEN_PRINT" node allocated for writing UNDO logs for index part",
+ CI_USED,
+ false,
+ CI_INT,
+ "2M",
+ "1M",
+ STR_VALUE(MAX_INT_RNIL)},
+
+ {
+ CFG_DB_UNDO_DATA_BUFFER,
+ "UndoDataBuffer",
+ DB_TOKEN,
+ "Number bytes on each "DB_TOKEN_PRINT" node allocated for writing UNDO logs for data part",
+ CI_USED,
+ false,
+ CI_INT,
+ "16M",
+ "1M",
+ STR_VALUE(MAX_INT_RNIL)},
+
+ {
+ CFG_DB_REDO_BUFFER,
+ "RedoBuffer",
+ DB_TOKEN,
+ "Number bytes on each "DB_TOKEN_PRINT" node allocated for writing REDO logs",
+ CI_USED,
+ false,
+ CI_INT,
+ "8M",
+ "1M",
+ STR_VALUE(MAX_INT_RNIL)},
+
+ {
+ CFG_DB_LONG_SIGNAL_BUFFER,
+ "LongMessageBuffer",
+ DB_TOKEN,
+ "Number bytes on each "DB_TOKEN_PRINT" node allocated for internal long messages",
+ CI_USED,
+ false,
+ CI_INT,
+ "1M",
+ "512k",
+ STR_VALUE(MAX_INT_RNIL)},
+
+ {
+ CFG_DB_DISK_PAGE_BUFFER_MEMORY,
+ "DiskPageBufferMemory",
+ DB_TOKEN,
+ "Number bytes on each "DB_TOKEN_PRINT" node allocated for disk page buffer cache",
+ CI_USED,
+ false,
+ CI_INT64,
+ "64M",
+ "4M",
+ "1024G" },
+
+ {
+ CFG_DB_SGA,
+ "SharedGlobalMemory",
+ DB_TOKEN,
+ "Total number bytes on each "DB_TOKEN_PRINT" node allocated for any use",
+ CI_USED,
+ false,
+ CI_INT64,
+ "20M",
+ "0",
+ "65536G" }, // 32k pages * 32-bit i value
+
+ {
+ CFG_DB_START_PARTIAL_TIMEOUT,
+ "StartPartialTimeout",
+ DB_TOKEN,
+ "Time to wait before trying to start wo/ all nodes. 0=Wait forever",
+ CI_USED,
+ true,
+ CI_INT,
+ "30000",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_START_PARTITION_TIMEOUT,
+ "StartPartitionedTimeout",
+ DB_TOKEN,
+ "Time to wait before trying to start partitioned. 0=Wait forever",
+ CI_USED,
+ true,
+ CI_INT,
+ "60000",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_START_FAILURE_TIMEOUT,
+ "StartFailureTimeout",
+ DB_TOKEN,
+ "Time to wait before terminating. 0=Wait forever",
+ CI_USED,
+ true,
+ CI_INT,
+ "0",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_HEARTBEAT_INTERVAL,
+ "HeartbeatIntervalDbDb",
+ DB_TOKEN,
+ "Time between "DB_TOKEN_PRINT"-"DB_TOKEN_PRINT" heartbeats. "DB_TOKEN_PRINT" considered dead after 3 missed HBs",
+ CI_USED,
+ true,
+ CI_INT,
+ "1500",
+ "10",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_API_HEARTBEAT_INTERVAL,
+ "HeartbeatIntervalDbApi",
+ DB_TOKEN,
+ "Time between "API_TOKEN_PRINT"-"DB_TOKEN_PRINT" heartbeats. "API_TOKEN_PRINT" connection closed after 3 missed HBs",
+ CI_USED,
+ true,
+ CI_INT,
+ "1500",
+ "100",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_LCP_INTERVAL,
+ "TimeBetweenLocalCheckpoints",
+ DB_TOKEN,
+ "Time between taking snapshots of the database (expressed in 2log of bytes)",
+ CI_USED,
+ true,
+ CI_INT,
+ "20",
+ "0",
+ "31" },
+
+ {
+ CFG_DB_GCP_INTERVAL,
+ "TimeBetweenGlobalCheckpoints",
+ DB_TOKEN,
+ "Time between doing group commit of transactions to disk",
+ CI_USED,
+ true,
+ CI_INT,
+ "2000",
+ "10",
+ "32000" },
+
+ {
+ CFG_DB_NO_REDOLOG_FILES,
+ "NoOfFragmentLogFiles",
+ DB_TOKEN,
+ "No of 16 Mbyte Redo log files in each of 4 file sets belonging to "DB_TOKEN_PRINT" node",
+ CI_USED,
+ false,
+ CI_INT,
+ "16",
+ "3",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_MAX_OPEN_FILES,
+ "MaxNoOfOpenFiles",
+ DB_TOKEN,
+ "Max number of files open per "DB_TOKEN_PRINT" node.(One thread is created per file)",
+ CI_USED,
+ false,
+ CI_INT,
+ "40",
+ "20",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_INITIAL_OPEN_FILES,
+ "InitialNoOfOpenFiles",
+ DB_TOKEN,
+ "Initial number of files open per "DB_TOKEN_PRINT" node.(One thread is created per file)",
+ CI_USED,
+ false,
+ CI_INT,
+ "27",
+ "20",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_TRANSACTION_CHECK_INTERVAL,
+ "TimeBetweenInactiveTransactionAbortCheck",
+ DB_TOKEN,
+ "Time between inactive transaction checks",
+ CI_USED,
+ true,
+ CI_INT,
+ "1000",
+ "1000",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_TRANSACTION_INACTIVE_TIMEOUT,
+ "TransactionInactiveTimeout",
+ DB_TOKEN,
+ "Time application can wait before executing another transaction part (ms).\n"
+ "This is the time the transaction coordinator waits for the application\n"
+ "to execute or send another part (query, statement) of the transaction.\n"
+ "If the application takes too long time, the transaction gets aborted.\n"
+ "Timeout set to 0 means that we don't timeout at all on application wait.",
+ CI_USED,
+ true,
+ CI_INT,
+ STR_VALUE(MAX_INT_RNIL),
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_TRANSACTION_DEADLOCK_TIMEOUT,
+ "TransactionDeadlockDetectionTimeout",
+ DB_TOKEN,
+ "Time transaction can be executing in a DB node (ms).\n"
+ "This is the time the transaction coordinator waits for each database node\n"
+ "of the transaction to execute a request. If the database node takes too\n"
+ "long time, the transaction gets aborted.",
+ CI_USED,
+ true,
+ CI_INT,
+ "1200",
+ "50",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_LCP_DISC_PAGES_TUP_SR,
+ "NoOfDiskPagesToDiskDuringRestartTUP",
+ DB_TOKEN,
+ "DiskCheckpointSpeedSr",
+ CI_DEPRICATED,
+ true,
+ CI_INT,
+ "40",
+ "1",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_LCP_DISC_PAGES_TUP,
+ "NoOfDiskPagesToDiskAfterRestartTUP",
+ DB_TOKEN,
+ "DiskCheckpointSpeed",
+ CI_DEPRICATED,
+ true,
+ CI_INT,
+ "40",
+ "1",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_LCP_DISC_PAGES_ACC_SR,
+ "NoOfDiskPagesToDiskDuringRestartACC",
+ DB_TOKEN,
+ "DiskCheckpointSpeedSr",
+ CI_DEPRICATED,
+ true,
+ CI_INT,
+ "20",
+ "1",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_LCP_DISC_PAGES_ACC,
+ "NoOfDiskPagesToDiskAfterRestartACC",
+ DB_TOKEN,
+ "DiskCheckpointSpeed",
+ CI_DEPRICATED,
+ true,
+ CI_INT,
+ "20",
+ "1",
+ STR_VALUE(MAX_INT_RNIL) },
+
+
+ {
+ CFG_DB_DISCLESS,
+ "Diskless",
+ DB_TOKEN,
+ "Run wo/ disk",
+ CI_USED,
+ true,
+ CI_BOOL,
+ "false",
+ "false",
+ "true"},
+
+ {
+ KEY_INTERNAL,
+ "Discless",
+ DB_TOKEN,
+ "Diskless",
+ CI_DEPRICATED,
+ true,
+ CI_BOOL,
+ "false",
+ "false",
+ "true"},
+
+
+
+ {
+ CFG_DB_ARBIT_TIMEOUT,
+ "ArbitrationTimeout",
+ DB_TOKEN,
+ "Max time (milliseconds) database partion waits for arbitration signal",
+ CI_USED,
+ false,
+ CI_INT,
+ "3000",
+ "10",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_NODE_DATADIR,
+ "DataDir",
+ DB_TOKEN,
+ "Data directory for this node",
+ CI_USED,
+ false,
+ CI_STRING,
+ MYSQLCLUSTERDIR,
+ 0, 0 },
+
+ {
+ CFG_DB_FILESYSTEM_PATH,
+ "FileSystemPath",
+ DB_TOKEN,
+ "Path to directory where the "DB_TOKEN_PRINT" node stores its data (directory must exist)",
+ CI_USED,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ {
+ CFG_LOGLEVEL_STARTUP,
+ "LogLevelStartup",
+ DB_TOKEN,
+ "Node startup info printed on stdout",
+ CI_USED,
+ false,
+ CI_INT,
+ "1",
+ "0",
+ "15" },
+
+ {
+ CFG_LOGLEVEL_SHUTDOWN,
+ "LogLevelShutdown",
+ DB_TOKEN,
+ "Node shutdown info printed on stdout",
+ CI_USED,
+ false,
+ CI_INT,
+ "0",
+ "0",
+ "15" },
+
+ {
+ CFG_LOGLEVEL_STATISTICS,
+ "LogLevelStatistic",
+ DB_TOKEN,
+ "Transaction, operation, transporter info printed on stdout",
+ CI_USED,
+ false,
+ CI_INT,
+ "0",
+ "0",
+ "15" },
+
+ {
+ CFG_LOGLEVEL_CHECKPOINT,
+ "LogLevelCheckpoint",
+ DB_TOKEN,
+ "Local and Global checkpoint info printed on stdout",
+ CI_USED,
+ false,
+ CI_INT,
+ "0",
+ "0",
+ "15" },
+
+ {
+ CFG_LOGLEVEL_NODERESTART,
+ "LogLevelNodeRestart",
+ DB_TOKEN,
+ "Node restart, node failure info printed on stdout",
+ CI_USED,
+ false,
+ CI_INT,
+ "0",
+ "0",
+ "15" },
+
+ {
+ CFG_LOGLEVEL_CONNECTION,
+ "LogLevelConnection",
+ DB_TOKEN,
+ "Node connect/disconnect info printed on stdout",
+ CI_USED,
+ false,
+ CI_INT,
+ "0",
+ "0",
+ "15" },
+
+ {
+ CFG_LOGLEVEL_CONGESTION,
+ "LogLevelCongestion",
+ DB_TOKEN,
+ "Congestion info printed on stdout",
+ CI_USED,
+ false,
+ CI_INT,
+ "0",
+ "0",
+ "15" },
+
+ {
+ CFG_LOGLEVEL_ERROR,
+ "LogLevelError",
+ DB_TOKEN,
+ "Transporter, heartbeat errors printed on stdout",
+ CI_USED,
+ false,
+ CI_INT,
+ "0",
+ "0",
+ "15" },
+
+ {
+ CFG_LOGLEVEL_INFO,
+ "LogLevelInfo",
+ DB_TOKEN,
+ "Heartbeat and log info printed on stdout",
+ CI_USED,
+ false,
+ CI_INT,
+ "0",
+ "0",
+ "15" },
+
+ /**
+ * Backup
+ */
+ {
+ CFG_DB_PARALLEL_BACKUPS,
+ "ParallelBackups",
+ DB_TOKEN,
+ "Maximum number of parallel backups",
+ CI_NOTIMPLEMENTED,
+ false,
+ CI_INT,
+ "1",
+ "1",
+ "1" },
+
+ {
+ CFG_DB_BACKUP_DATADIR,
+ "BackupDataDir",
+ DB_TOKEN,
+ "Path to where to store backups",
+ CI_USED,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ {
+ CFG_DB_DISK_SYNCH_SIZE,
+ "DiskSyncSize",
+ DB_TOKEN,
+ "Data written to a file before a synch is forced",
+ CI_USED,
+ false,
+ CI_INT,
+ "4M",
+ "32k",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_CHECKPOINT_SPEED,
+ "DiskCheckpointSpeed",
+ DB_TOKEN,
+ "Bytes per second allowed to be written by checkpoint",
+ CI_USED,
+ false,
+ CI_INT,
+ "10M",
+ "1M",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_CHECKPOINT_SPEED_SR,
+ "DiskCheckpointSpeedInRestart",
+ DB_TOKEN,
+ "Bytes per second allowed to be written by checkpoint during restart",
+ CI_USED,
+ false,
+ CI_INT,
+ "100M",
+ "1M",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_BACKUP_MEM,
+ "BackupMemory",
+ DB_TOKEN,
+ "Total memory allocated for backups per node (in bytes)",
+ CI_USED,
+ false,
+ CI_INT,
+ "4M", // sum of BackupDataBufferSize and BackupLogBufferSize
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_BACKUP_DATA_BUFFER_MEM,
+ "BackupDataBufferSize",
+ DB_TOKEN,
+ "Default size of databuffer for a backup (in bytes)",
+ CI_USED,
+ false,
+ CI_INT,
+ "2M", // remember to change BackupMemory
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_BACKUP_LOG_BUFFER_MEM,
+ "BackupLogBufferSize",
+ DB_TOKEN,
+ "Default size of logbuffer for a backup (in bytes)",
+ CI_USED,
+ false,
+ CI_INT,
+ "2M", // remember to change BackupMemory
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_BACKUP_WRITE_SIZE,
+ "BackupWriteSize",
+ DB_TOKEN,
+ "Default size of filesystem writes made by backup (in bytes)",
+ CI_USED,
+ false,
+ CI_INT,
+ "32K",
+ "2K",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_BACKUP_MAX_WRITE_SIZE,
+ "BackupMaxWriteSize",
+ DB_TOKEN,
+ "Max size of filesystem writes made by backup (in bytes)",
+ CI_USED,
+ false,
+ CI_INT,
+ "256K",
+ "2K",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_DB_STRING_MEMORY,
+ "StringMemory",
+ DB_TOKEN,
+ "Default size of string memory (0 -> 5% of max 1-100 -> %of max, >100 -> actual bytes)",
+ CI_USED,
+ false,
+ CI_INT,
+ "0",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ /***************************************************************************
+ * API
+ ***************************************************************************/
+ {
+ CFG_SECTION_NODE,
+ API_TOKEN,
+ API_TOKEN,
+ "Node section",
+ CI_USED,
+ false,
+ CI_SECTION,
+ (const char *)NODE_TYPE_API,
+ 0, 0
+ },
+
+ {
+ CFG_NODE_HOST,
+ "HostName",
+ API_TOKEN,
+ "Name of computer for this node",
+ CI_INTERNAL,
+ false,
+ CI_STRING,
+ "",
+ 0, 0 },
+
+ {
+ CFG_NODE_SYSTEM,
+ "System",
+ API_TOKEN,
+ "Name of system for this node",
+ CI_INTERNAL,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ {
+ KEY_INTERNAL,
+ "Id",
+ API_TOKEN,
+ "",
+ CI_DEPRICATED,
+ false,
+ CI_INT,
+ MANDATORY,
+ "1",
+ STR_VALUE(MAX_NODES) },
+
+ {
+ CFG_NODE_ID,
+ "NodeId",
+ API_TOKEN,
+ "Number identifying application node ("API_TOKEN_PRINT")",
+ CI_USED,
+ false,
+ CI_INT,
+ MANDATORY,
+ "1",
+ STR_VALUE(MAX_NODES) },
+
+ {
+ KEY_INTERNAL,
+ "ExecuteOnComputer",
+ API_TOKEN,
+ "String referencing an earlier defined COMPUTER",
+ CI_USED,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ {
+ CFG_NODE_ARBIT_RANK,
+ "ArbitrationRank",
+ API_TOKEN,
+ "If 0, then "API_TOKEN_PRINT" is not arbitrator. Kernel selects arbitrators in order 1, 2",
+ CI_USED,
+ false,
+ CI_INT,
+ "0",
+ "0",
+ "2" },
+
+ {
+ CFG_NODE_ARBIT_DELAY,
+ "ArbitrationDelay",
+ API_TOKEN,
+ "When asked to arbitrate, arbitrator waits this long before voting (msec)",
+ CI_USED,
+ false,
+ CI_INT,
+ "0",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_MAX_SCAN_BATCH_SIZE,
+ "MaxScanBatchSize",
+ "API",
+ "The maximum collective batch size for one scan",
+ CI_USED,
+ false,
+ CI_INT,
+ STR_VALUE(MAX_SCAN_BATCH_SIZE),
+ "32k",
+ "16M" },
+
+ {
+ CFG_BATCH_BYTE_SIZE,
+ "BatchByteSize",
+ "API",
+ "The default batch size in bytes",
+ CI_USED,
+ false,
+ CI_INT,
+ STR_VALUE(SCAN_BATCH_SIZE),
+ "1k",
+ "1M" },
+
+ {
+ CFG_BATCH_SIZE,
+ "BatchSize",
+ "API",
+ "The default batch size in number of records",
+ CI_USED,
+ false,
+ CI_INT,
+ STR_VALUE(DEF_BATCH_SIZE),
+ "1",
+ STR_VALUE(MAX_PARALLEL_OP_PER_SCAN) },
+
+ /****************************************************************************
+ * MGM
+ ***************************************************************************/
+ {
+ CFG_SECTION_NODE,
+ MGM_TOKEN,
+ MGM_TOKEN,
+ "Node section",
+ CI_USED,
+ false,
+ CI_SECTION,
+ (const char *)NODE_TYPE_MGM,
+ 0, 0
+ },
+
+ {
+ CFG_NODE_HOST,
+ "HostName",
+ MGM_TOKEN,
+ "Name of computer for this node",
+ CI_INTERNAL,
+ false,
+ CI_STRING,
+ "",
+ 0, 0 },
+
+ {
+ CFG_NODE_DATADIR,
+ "DataDir",
+ MGM_TOKEN,
+ "Data directory for this node",
+ CI_USED,
+ false,
+ CI_STRING,
+ MYSQLCLUSTERDIR,
+ 0, 0 },
+
+ {
+ CFG_NODE_SYSTEM,
+ "System",
+ MGM_TOKEN,
+ "Name of system for this node",
+ CI_INTERNAL,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ {
+ KEY_INTERNAL,
+ "Id",
+ MGM_TOKEN,
+ "",
+ CI_DEPRICATED,
+ false,
+ CI_INT,
+ MANDATORY,
+ "1",
+ STR_VALUE(MAX_NODES) },
+
+ {
+ CFG_NODE_ID,
+ "NodeId",
+ MGM_TOKEN,
+ "Number identifying the management server node ("MGM_TOKEN_PRINT")",
+ CI_USED,
+ false,
+ CI_INT,
+ MANDATORY,
+ "1",
+ STR_VALUE(MAX_NODES) },
+
+ {
+ CFG_LOG_DESTINATION,
+ "LogDestination",
+ MGM_TOKEN,
+ "String describing where logmessages are sent",
+ CI_USED,
+ false,
+ CI_STRING,
+ 0,
+ 0, 0 },
+
+ {
+ KEY_INTERNAL,
+ "ExecuteOnComputer",
+ MGM_TOKEN,
+ "String referencing an earlier defined COMPUTER",
+ CI_USED,
+ false,
+ CI_STRING,
+ 0,
+ 0, 0 },
+
+ {
+ KEY_INTERNAL,
+ "MaxNoOfSavedEvents",
+ MGM_TOKEN,
+ "",
+ CI_USED,
+ false,
+ CI_INT,
+ "100",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_MGM_PORT,
+ "PortNumber",
+ MGM_TOKEN,
+ "Port number to give commands to/fetch configurations from management server",
+ CI_USED,
+ false,
+ CI_INT,
+ NDB_PORT,
+ "0",
+ STR_VALUE(MAX_PORT_NO) },
+
+ {
+ KEY_INTERNAL,
+ "PortNumberStats",
+ MGM_TOKEN,
+ "Port number used to get statistical information from a management server",
+ CI_USED,
+ false,
+ CI_INT,
+ UNDEFINED,
+ "0",
+ STR_VALUE(MAX_PORT_NO) },
+
+ {
+ CFG_NODE_ARBIT_RANK,
+ "ArbitrationRank",
+ MGM_TOKEN,
+ "If 0, then "MGM_TOKEN_PRINT" is not arbitrator. Kernel selects arbitrators in order 1, 2",
+ CI_USED,
+ false,
+ CI_INT,
+ "1",
+ "0",
+ "2" },
+
+ {
+ CFG_NODE_ARBIT_DELAY,
+ "ArbitrationDelay",
+ MGM_TOKEN,
+ "",
+ CI_USED,
+ false,
+ CI_INT,
+ "0",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ /****************************************************************************
+ * TCP
+ ***************************************************************************/
+ {
+ CFG_SECTION_CONNECTION,
+ "TCP",
+ "TCP",
+ "Connection section",
+ CI_USED,
+ false,
+ CI_SECTION,
+ (const char *)CONNECTION_TYPE_TCP,
+ 0, 0
+ },
+
+ {
+ CFG_CONNECTION_HOSTNAME_1,
+ "HostName1",
+ "TCP",
+ "Name/IP of computer on one side of the connection",
+ CI_INTERNAL,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ {
+ CFG_CONNECTION_HOSTNAME_2,
+ "HostName2",
+ "TCP",
+ "Name/IP of computer on one side of the connection",
+ CI_INTERNAL,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ {
+ CFG_CONNECTION_NODE_1,
+ "NodeId1",
+ "TCP",
+ "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
+ CI_USED,
+ false,
+ CI_STRING,
+ MANDATORY,
+ 0, 0 },
+
+ {
+ CFG_CONNECTION_NODE_2,
+ "NodeId2",
+ "TCP",
+ "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
+ CI_USED,
+ false,
+ CI_STRING,
+ MANDATORY,
+ 0, 0 },
+
+ {
+ CFG_CONNECTION_GROUP,
+ "Group",
+ "TCP",
+ "",
+ CI_USED,
+ false,
+ CI_INT,
+ "55",
+ "0", "200" },
+
+ {
+ CFG_CONNECTION_NODE_ID_SERVER,
+ "NodeIdServer",
+ "TCP",
+ "",
+ CI_USED,
+ false,
+ CI_INT,
+ MANDATORY,
+ "1", "63" },
+
+ {
+ CFG_CONNECTION_SEND_SIGNAL_ID,
+ "SendSignalId",
+ "TCP",
+ "Sends id in each signal. Used in trace files.",
+ CI_USED,
+ false,
+ CI_BOOL,
+ "true",
+ "false",
+ "true" },
+
+
+ {
+ CFG_CONNECTION_CHECKSUM,
+ "Checksum",
+ "TCP",
+ "If checksum is enabled, all signals between nodes are checked for errors",
+ CI_USED,
+ false,
+ CI_BOOL,
+ "false",
+ "false",
+ "true" },
+
+ {
+ CFG_CONNECTION_SERVER_PORT,
+ "PortNumber",
+ "TCP",
+ "Port used for this transporter",
+ CI_USED,
+ false,
+ CI_INT,
+ MANDATORY,
+ "0",
+ STR_VALUE(MAX_PORT_NO) },
+
+ {
+ CFG_TCP_SEND_BUFFER_SIZE,
+ "SendBufferMemory",
+ "TCP",
+ "Bytes of buffer for signals sent from this node",
+ CI_USED,
+ false,
+ CI_INT,
+ "256K",
+ "64K",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_TCP_RECEIVE_BUFFER_SIZE,
+ "ReceiveBufferMemory",
+ "TCP",
+ "Bytes of buffer for signals received by this node",
+ CI_USED,
+ false,
+ CI_INT,
+ "64K",
+ "16K",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_TCP_PROXY,
+ "Proxy",
+ "TCP",
+ "",
+ CI_USED,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ {
+ CFG_CONNECTION_NODE_1_SYSTEM,
+ "NodeId1_System",
+ "TCP",
+ "System for node 1 in connection",
+ CI_INTERNAL,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ {
+ CFG_CONNECTION_NODE_2_SYSTEM,
+ "NodeId2_System",
+ "TCP",
+ "System for node 2 in connection",
+ CI_INTERNAL,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0, 0 },
+
+
+ /****************************************************************************
+ * SHM
+ ***************************************************************************/
+ {
+ CFG_SECTION_CONNECTION,
+ "SHM",
+ "SHM",
+ "Connection section",
+ CI_USED,
+ false,
+ CI_SECTION,
+ (const char *)CONNECTION_TYPE_SHM,
+ 0, 0 },
+
+ {
+ CFG_CONNECTION_HOSTNAME_1,
+ "HostName1",
+ "SHM",
+ "Name/IP of computer on one side of the connection",
+ CI_INTERNAL,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ {
+ CFG_CONNECTION_HOSTNAME_2,
+ "HostName2",
+ "SHM",
+ "Name/IP of computer on one side of the connection",
+ CI_INTERNAL,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ {
+ CFG_CONNECTION_SERVER_PORT,
+ "PortNumber",
+ "SHM",
+ "Port used for this transporter",
+ CI_USED,
+ false,
+ CI_INT,
+ MANDATORY,
+ "0",
+ STR_VALUE(MAX_PORT_NO) },
+
+ {
+ CFG_SHM_SIGNUM,
+ "Signum",
+ "SHM",
+ "Signum to be used for signalling",
+ CI_USED,
+ false,
+ CI_INT,
+ UNDEFINED,
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_CONNECTION_NODE_1,
+ "NodeId1",
+ "SHM",
+ "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
+ CI_USED,
+ false,
+ CI_STRING,
+ MANDATORY,
+ 0, 0 },
+
+ {
+ CFG_CONNECTION_NODE_2,
+ "NodeId2",
+ "SHM",
+ "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
+ CI_USED,
+ false,
+ CI_STRING,
+ MANDATORY,
+ 0, 0 },
+
+ {
+ CFG_CONNECTION_GROUP,
+ "Group",
+ "SHM",
+ "",
+ CI_USED,
+ false,
+ CI_INT,
+ "35",
+ "0", "200" },
+
+ {
+ CFG_CONNECTION_NODE_ID_SERVER,
+ "NodeIdServer",
+ "SHM",
+ "",
+ CI_USED,
+ false,
+ CI_INT,
+ MANDATORY,
+ "1", "63" },
+
+ {
+ CFG_CONNECTION_SEND_SIGNAL_ID,
+ "SendSignalId",
+ "SHM",
+ "Sends id in each signal. Used in trace files.",
+ CI_USED,
+ false,
+ CI_BOOL,
+ "false",
+ "false",
+ "true" },
+
+
+ {
+ CFG_CONNECTION_CHECKSUM,
+ "Checksum",
+ "SHM",
+ "If checksum is enabled, all signals between nodes are checked for errors",
+ CI_USED,
+ false,
+ CI_BOOL,
+ "true",
+ "false",
+ "true" },
+
+ {
+ CFG_SHM_KEY,
+ "ShmKey",
+ "SHM",
+ "A shared memory key",
+ CI_USED,
+ false,
+ CI_INT,
+ UNDEFINED,
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_SHM_BUFFER_MEM,
+ "ShmSize",
+ "SHM",
+ "Size of shared memory segment",
+ CI_USED,
+ false,
+ CI_INT,
+ "1M",
+ "64K",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_CONNECTION_NODE_1_SYSTEM,
+ "NodeId1_System",
+ "SHM",
+ "System for node 1 in connection",
+ CI_INTERNAL,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ {
+ CFG_CONNECTION_NODE_2_SYSTEM,
+ "NodeId2_System",
+ "SHM",
+ "System for node 2 in connection",
+ CI_INTERNAL,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ /****************************************************************************
+ * SCI
+ ***************************************************************************/
+ {
+ CFG_SECTION_CONNECTION,
+ "SCI",
+ "SCI",
+ "Connection section",
+ CI_USED,
+ false,
+ CI_SECTION,
+ (const char *)CONNECTION_TYPE_SCI,
+ 0, 0
+ },
+
+ {
+ CFG_CONNECTION_NODE_1,
+ "NodeId1",
+ "SCI",
+ "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
+ CI_USED,
+ false,
+ CI_STRING,
+ MANDATORY,
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_CONNECTION_NODE_2,
+ "NodeId2",
+ "SCI",
+ "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
+ CI_USED,
+ false,
+ CI_STRING,
+ MANDATORY,
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_CONNECTION_GROUP,
+ "Group",
+ "SCI",
+ "",
+ CI_USED,
+ false,
+ CI_INT,
+ "15",
+ "0", "200" },
+
+ {
+ CFG_CONNECTION_NODE_ID_SERVER,
+ "NodeIdServer",
+ "SCI",
+ "",
+ CI_USED,
+ false,
+ CI_INT,
+ MANDATORY,
+ "1", "63" },
+
+ {
+ CFG_CONNECTION_HOSTNAME_1,
+ "HostName1",
+ "SCI",
+ "Name/IP of computer on one side of the connection",
+ CI_INTERNAL,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ {
+ CFG_CONNECTION_HOSTNAME_2,
+ "HostName2",
+ "SCI",
+ "Name/IP of computer on one side of the connection",
+ CI_INTERNAL,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ {
+ CFG_CONNECTION_SERVER_PORT,
+ "PortNumber",
+ "SCI",
+ "Port used for this transporter",
+ CI_USED,
+ false,
+ CI_INT,
+ MANDATORY,
+ "0",
+ STR_VALUE(MAX_PORT_NO) },
+
+ {
+ CFG_SCI_HOST1_ID_0,
+ "Host1SciId0",
+ "SCI",
+ "SCI-node id for adapter 0 on Host1 (a computer can have two adapters)",
+ CI_USED,
+ false,
+ CI_INT,
+ MANDATORY,
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_SCI_HOST1_ID_1,
+ "Host1SciId1",
+ "SCI",
+ "SCI-node id for adapter 1 on Host1 (a computer can have two adapters)",
+ CI_USED,
+ false,
+ CI_INT,
+ "0",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_SCI_HOST2_ID_0,
+ "Host2SciId0",
+ "SCI",
+ "SCI-node id for adapter 0 on Host2 (a computer can have two adapters)",
+ CI_USED,
+ false,
+ CI_INT,
+ MANDATORY,
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_SCI_HOST2_ID_1,
+ "Host2SciId1",
+ "SCI",
+ "SCI-node id for adapter 1 on Host2 (a computer can have two adapters)",
+ CI_USED,
+ false,
+ CI_INT,
+ "0",
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_CONNECTION_SEND_SIGNAL_ID,
+ "SendSignalId",
+ "SCI",
+ "Sends id in each signal. Used in trace files.",
+ CI_USED,
+ false,
+ CI_BOOL,
+ "true",
+ "false",
+ "true" },
+
+ {
+ CFG_CONNECTION_CHECKSUM,
+ "Checksum",
+ "SCI",
+ "If checksum is enabled, all signals between nodes are checked for errors",
+ CI_USED,
+ false,
+ CI_BOOL,
+ "false",
+ "false",
+ "true" },
+
+ {
+ CFG_SCI_SEND_LIMIT,
+ "SendLimit",
+ "SCI",
+ "Transporter send buffer contents are sent when this no of bytes is buffered",
+ CI_USED,
+ false,
+ CI_INT,
+ "8K",
+ "128",
+ "32K" },
+
+ {
+ CFG_SCI_BUFFER_MEM,
+ "SharedBufferSize",
+ "SCI",
+ "Size of shared memory segment",
+ CI_USED,
+ false,
+ CI_INT,
+ "1M",
+ "64K",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_CONNECTION_NODE_1_SYSTEM,
+ "NodeId1_System",
+ "SCI",
+ "System for node 1 in connection",
+ CI_INTERNAL,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ {
+ CFG_CONNECTION_NODE_2_SYSTEM,
+ "NodeId2_System",
+ "SCI",
+ "System for node 2 in connection",
+ CI_INTERNAL,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ /****************************************************************************
+ * OSE
+ ***************************************************************************/
+ {
+ CFG_SECTION_CONNECTION,
+ "OSE",
+ "OSE",
+ "Connection section",
+ CI_USED,
+ false,
+ CI_SECTION,
+ (const char *)CONNECTION_TYPE_OSE,
+ 0, 0
+ },
+
+ {
+ CFG_CONNECTION_HOSTNAME_1,
+ "HostName1",
+ "OSE",
+ "Name of computer on one side of the connection",
+ CI_USED,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ {
+ CFG_CONNECTION_HOSTNAME_2,
+ "HostName2",
+ "OSE",
+ "Name of computer on one side of the connection",
+ CI_USED,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ {
+ CFG_CONNECTION_NODE_1,
+ "NodeId1",
+ "OSE",
+ "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
+ CI_USED,
+ false,
+ CI_INT,
+ MANDATORY,
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_CONNECTION_NODE_2,
+ "NodeId2",
+ "OSE",
+ "Id of node ("DB_TOKEN_PRINT", "API_TOKEN_PRINT" or "MGM_TOKEN_PRINT") on one side of the connection",
+ CI_USED,
+ false,
+ CI_INT,
+ UNDEFINED,
+ "0",
+ STR_VALUE(MAX_INT_RNIL) },
+
+ {
+ CFG_CONNECTION_SEND_SIGNAL_ID,
+ "SendSignalId",
+ "OSE",
+ "Sends id in each signal. Used in trace files.",
+ CI_USED,
+ false,
+ CI_BOOL,
+ "true",
+ "false",
+ "true" },
+
+ {
+ CFG_CONNECTION_CHECKSUM,
+ "Checksum",
+ "OSE",
+ "If checksum is enabled, all signals between nodes are checked for errors",
+ CI_USED,
+ false,
+ CI_BOOL,
+ "false",
+ "false",
+ "true" },
+
+ {
+ CFG_CONNECTION_NODE_1_SYSTEM,
+ "NodeId1_System",
+ "OSE",
+ "System for node 1 in connection",
+ CI_INTERNAL,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0, 0 },
+
+ {
+ CFG_CONNECTION_NODE_2_SYSTEM,
+ "NodeId2_System",
+ "OSE",
+ "System for node 2 in connection",
+ CI_INTERNAL,
+ false,
+ CI_STRING,
+ UNDEFINED,
+ 0, 0 },
+};
+
+const int ParamInfoNum = sizeof(ParamInfoArray) / sizeof(ParamInfo);
diff --git a/storage/ndb/src/mgmsrv/ParamInfo.hpp b/storage/ndb/src/mgmsrv/ParamInfo.hpp
new file mode 100644
index 00000000000..7d12cd6252f
--- /dev/null
+++ b/storage/ndb/src/mgmsrv/ParamInfo.hpp
@@ -0,0 +1,44 @@
+#ifndef PARAMINFO_H
+#define PARAMINFO_H
+
+#define DB_TOKEN "DB"
+#define MGM_TOKEN "MGM"
+#define API_TOKEN "API"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * The Configuration parameter type and status
+ */
+
+enum ParameterType { CI_BOOL, CI_INT, CI_INT64, CI_STRING, CI_SECTION };
+enum ParameterStatus { CI_USED, ///< Active
+ CI_DEPRICATED, ///< Can be, but shouldn't
+ CI_NOTIMPLEMENTED, ///< Is ignored.
+ CI_INTERNAL ///< Not configurable by the user
+};
+
+/**
+ * Entry for one configuration parameter
+ */
+typedef struct m_ParamInfo {
+ Uint32 _paramId;
+ const char* _fname;
+ const char* _section;
+ const char* _description;
+ ParameterStatus _status;
+ bool _updateable;
+ ParameterType _type;
+ const char* _default;
+ const char* _min;
+ const char* _max;
+}ParamInfo;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/storage/ndb/src/mgmsrv/Services.cpp b/storage/ndb/src/mgmsrv/Services.cpp
index d3272fc9de2..672e50729df 100644
--- a/storage/ndb/src/mgmsrv/Services.cpp
+++ b/storage/ndb/src/mgmsrv/Services.cpp
@@ -1352,7 +1352,7 @@ Ndb_mgmd_event_service::log(int eventType, const Uint32* theData, NodeId nodeId)
if (EventLoggerBase::event_lookup(eventType,cat,threshold,severity,textF))
DBUG_VOID_RETURN;
- char m_text[256];
+ char m_text[512];
EventLogger::getText(m_text, sizeof(m_text),
textF, theData, nodeId);
@@ -1368,6 +1368,15 @@ Ndb_mgmd_event_service::log(int eventType, const Uint32* theData, NodeId nodeId)
if (ndb_logevent_body[i].index_fn)
val= (*(ndb_logevent_body[i].index_fn))(val);
str.appfmt("%s=%d\n",ndb_logevent_body[i].token, val);
+ if(strcmp(ndb_logevent_body[i].token,"error") == 0)
+ {
+ int m_text_len= strlen(m_text);
+ if(sizeof(m_text)-m_text_len-3 > 0)
+ {
+ BaseString::snprintf(m_text+m_text_len, 4 , " - ");
+ ndb_error_string(val, m_text+(m_text_len+3), sizeof(m_text)-m_text_len-3);
+ }
+ }
}
Vector<NDB_SOCKET_TYPE> copy;
diff --git a/storage/ndb/src/ndbapi/ndberror.c b/storage/ndb/src/ndbapi/ndberror.c
index 083830e8b44..68c521e7b82 100644
--- a/storage/ndb/src/ndbapi/ndberror.c
+++ b/storage/ndb/src/ndbapi/ndberror.c
@@ -774,11 +774,14 @@ const char *ndberror_classification_message(ndberror_classification classificati
return empty_string;
}
-int ndb_error_string(int err_no, char *str, unsigned int size)
+int ndb_error_string(int err_no, char *str, int size)
{
ndberror_struct error;
- unsigned int len;
+ int len;
+ assert(size > 1);
+ if(size <= 1)
+ return 0;
error.code = err_no;
ndberror_update(&error);
diff --git a/storage/ndb/tools/restore/consumer.hpp b/storage/ndb/tools/restore/consumer.hpp
index d5c6d38985a..37f67884e01 100644
--- a/storage/ndb/tools/restore/consumer.hpp
+++ b/storage/ndb/tools/restore/consumer.hpp
@@ -36,6 +36,7 @@ public:
virtual void logEntry(const LogEntry &){}
virtual void endOfLogEntrys(){}
virtual bool finalize_table(const TableS &){return true;}
+ virtual bool createSystable(const TableS &){ return true;}
virtual bool update_apply_status(const RestoreMetaData &metaData){return true;}
NODE_GROUP_MAP *m_nodegroup_map;
uint m_nodegroup_map_len;
diff --git a/storage/ndb/tools/restore/consumer_restore.cpp b/storage/ndb/tools/restore/consumer_restore.cpp
index 7524558a2d6..bd8edc4c47c 100644
--- a/storage/ndb/tools/restore/consumer_restore.cpp
+++ b/storage/ndb/tools/restore/consumer_restore.cpp
@@ -667,6 +667,33 @@ err:
}
bool
+BackupRestore::createSystable(const TableS & tables){
+ const char *tablename = tables.getTableName();
+
+ if( strcmp(tablename, NDB_REP_DB "/def/" NDB_APPLY_TABLE) != 0 &&
+ strcmp(tablename, NDB_REP_DB "/def/" NDB_SCHEMA_TABLE) != 0 )
+ {
+ return true;
+ }
+
+ BaseString tmp(tablename);
+ Vector<BaseString> split;
+ if(tmp.split(split, "/") != 3){
+ err << "Invalid table name format " << tablename << endl;
+ return false;
+ }
+
+ m_ndb->setDatabaseName(split[0].c_str());
+ m_ndb->setSchemaName(split[1].c_str());
+
+ NdbDictionary::Dictionary* dict = m_ndb->getDictionary();
+ if( dict->getTable(split[2].c_str()) != NULL ){
+ return true;
+ }
+ return table(tables);
+}
+
+bool
BackupRestore::table(const TableS & table){
if (!m_restore && !m_restore_meta)
return true;
diff --git a/storage/ndb/tools/restore/consumer_restore.hpp b/storage/ndb/tools/restore/consumer_restore.hpp
index c1d9472aea0..3d20cb3041e 100644
--- a/storage/ndb/tools/restore/consumer_restore.hpp
+++ b/storage/ndb/tools/restore/consumer_restore.hpp
@@ -73,6 +73,7 @@ public:
virtual void endOfLogEntrys();
virtual bool finalize_table(const TableS &);
virtual bool has_temp_error();
+ virtual bool createSystable(const TableS & table);
virtual bool update_apply_status(const RestoreMetaData &metaData);
void connectToMysql();
bool map_in_frm(char *new_data, const char *data,
diff --git a/storage/ndb/tools/restore/restore_main.cpp b/storage/ndb/tools/restore/restore_main.cpp
index c6947f3bf01..8a632d388e0 100644
--- a/storage/ndb/tools/restore/restore_main.cpp
+++ b/storage/ndb/tools/restore/restore_main.cpp
@@ -567,6 +567,15 @@ main(int argc, char** argv)
err << metaData[i]->getTableName() << " ... Exiting " << endl;
exitHandler(NDBT_FAILED);
}
+ } else {
+ for(Uint32 j= 0; j < g_consumers.size(); j++)
+ if (!g_consumers[j]->createSystable(* metaData[i]))
+ {
+ err << "Restore: Failed to restore system table: ";
+ err << metaData[i]->getTableName() << " ... Exiting " << endl;
+ exitHandler(NDBT_FAILED);
+ }
+
}
}
debug << "Close tables" << endl;