diff options
author | unknown <tomas@whalegate.ndb.mysql.com> | 2007-11-12 10:50:58 +0100 |
---|---|---|
committer | unknown <tomas@whalegate.ndb.mysql.com> | 2007-11-12 10:50:58 +0100 |
commit | 72bdc00ec978268398457b81fd653698d004f222 (patch) | |
tree | 71b544be96d474691af694ea51d2ad66b9430d93 | |
parent | 10397af9c519b8238599a1ff9fed0d3e871c50e7 (diff) | |
parent | 5ac346667ce71c169ac730fdb04488ac1a4d4f59 (diff) | |
download | mariadb-git-72bdc00ec978268398457b81fd653698d004f222.tar.gz |
Merge whalegate.ndb.mysql.com:/home/tomas/cge-5.1
into whalegate.ndb.mysql.com:/home/tomas/mysql-5.1-new-ndb-merge
mysql-test/suite/ndb/r/ndb_multi.result:
Auto merged
mysql-test/suite/ndb/t/ndb_multi.test:
Auto merged
sql/ha_ndbcluster.cc:
Auto merged
sql/ha_ndbcluster_binlog.cc:
Auto merged
-rw-r--r-- | mysql-test/include/have_multi_ndb.inc | 32 | ||||
-rw-r--r-- | mysql-test/suite/ndb/r/ndb_basic.result | 24 | ||||
-rw-r--r-- | mysql-test/suite/ndb/r/ndb_multi.result | 1 | ||||
-rw-r--r-- | mysql-test/suite/ndb/r/ndb_multi_row.result | 1 | ||||
-rw-r--r-- | mysql-test/suite/ndb/t/ndb_basic.test | 22 | ||||
-rw-r--r-- | mysql-test/suite/ndb/t/ndb_multi.test | 4 | ||||
-rw-r--r-- | mysql-test/suite/ndb/t/ndb_multi_row.test | 3 | ||||
-rw-r--r-- | sql/ha_ndbcluster.cc | 2 | ||||
-rw-r--r-- | sql/ha_ndbcluster_binlog.cc | 84 | ||||
-rw-r--r-- | storage/ndb/src/kernel/blocks/ERROR_codes.txt | 7 | ||||
-rw-r--r-- | storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp | 10 | ||||
-rw-r--r-- | storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp | 60 | ||||
-rw-r--r-- | storage/ndb/test/ndbapi/testDict.cpp | 10 | ||||
-rw-r--r-- | storage/ndb/test/ndbapi/testNodeRestart.cpp | 48 | ||||
-rw-r--r-- | storage/ndb/test/run-test/daily-basic-tests.txt | 4 | ||||
-rw-r--r-- | storage/ndb/tools/restore/Restore.cpp | 156 | ||||
-rw-r--r-- | storage/ndb/tools/restore/Restore.hpp | 4 |
17 files changed, 361 insertions, 111 deletions
diff --git a/mysql-test/include/have_multi_ndb.inc b/mysql-test/include/have_multi_ndb.inc index deda22b64c0..9779f181191 100644 --- a/mysql-test/include/have_multi_ndb.inc +++ b/mysql-test/include/have_multi_ndb.inc @@ -5,10 +5,6 @@ connect (server2,127.0.0.1,root,,test,$MASTER_MYPORT1,); # Check that server1 has NDB support connection server1; disable_query_log; ---disable_warnings -drop table if exists t1, t2; ---enable_warnings -flush tables; --require r/true.require select (support = 'YES' or support = 'DEFAULT') as `TRUE` from information_schema.engines where engine = 'ndbcluster'; --source include/ndb_not_readonly.inc @@ -17,14 +13,32 @@ enable_query_log; # Check that server2 has NDB support connection server2; disable_query_log; ---disable_warnings -drop table if exists t1, t2; ---enable_warnings -flush tables; --require r/true.require select (support = 'YES' or support = 'DEFAULT') as `TRUE` from information_schema.engines where engine = 'ndbcluster'; --source include/ndb_not_readonly.inc enable_query_log; -# Set the default connection to 'server1' +# cleanup + +connection server1; +disable_query_log; +disable_warnings; +--error 0,1051 +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; +flush tables; +flush status; +enable_warnings; +enable_query_log; + +connection server2; +disable_query_log; +disable_warnings; +--error 0,1051 +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; +flush tables; +flush status; +enable_warnings; +enable_query_log; + +# Set the default connection connection server1; diff --git a/mysql-test/suite/ndb/r/ndb_basic.result b/mysql-test/suite/ndb/r/ndb_basic.result index 4eddaeb1227..9f4f8c0755c 100644 --- a/mysql-test/suite/ndb/r/ndb_basic.result +++ b/mysql-test/suite/ndb/r/ndb_basic.result @@ -869,6 +869,30 @@ a b 3 30 4 1 drop table t1,t2; +create table t1 (a varchar(100) primary key, b varchar(100)) engine = NDB; +insert into t1 values +('a', 'a'),('b','b'),('c', 'c'),('aa', 'aa'),('bb', 'bb'),('cc', 'cc'); +replace into t1 values ('a', '-a'); +replace into t1 values ('b', '-b'); +replace into t1 values ('c', '-c'); +replace into t1 values ('aa', '-aa'); +replace into t1 values ('bb', '-bb'); +replace into t1 values ('cc', '-cc'); +replace into t1 values ('aaa', '-aaa'); +replace into t1 values ('bbb', '-bbb'); +replace into t1 values ('ccc', '-ccc'); +select * from t1 order by 1,2; +a b +a -a +aa -aa +aaa -aaa +b -b +bb -bb +bbb -bbb +c -c +cc -cc +ccc -ccc +drop table t1; End of 5.0 tests CREATE TABLE t1 (a VARCHAR(255) NOT NULL, CONSTRAINT pk_a PRIMARY KEY (a))engine=ndb; diff --git a/mysql-test/suite/ndb/r/ndb_multi.result b/mysql-test/suite/ndb/r/ndb_multi.result index 17380b10fd7..40483887919 100644 --- a/mysql-test/suite/ndb/r/ndb_multi.result +++ b/mysql-test/suite/ndb/r/ndb_multi.result @@ -1,4 +1,5 @@ drop table if exists t1, t2, t3, t4; +flush status; drop table if exists t1, t2, t3, t4; flush status; create table t1 (a int) engine=ndbcluster; diff --git a/mysql-test/suite/ndb/r/ndb_multi_row.result b/mysql-test/suite/ndb/r/ndb_multi_row.result index cf5a76d6f01..3d34b16a1a8 100644 --- a/mysql-test/suite/ndb/r/ndb_multi_row.result +++ b/mysql-test/suite/ndb/r/ndb_multi_row.result @@ -1,4 +1,5 @@ drop table if exists t1, t2, t3, t4; +flush status; drop table if exists t1, t2, t3, t4; flush status; create table t1 (a int) engine=ndbcluster; diff --git a/mysql-test/suite/ndb/t/ndb_basic.test b/mysql-test/suite/ndb/t/ndb_basic.test index b9ccdf9fd0d..2fc140288ca 100644 --- a/mysql-test/suite/ndb/t/ndb_basic.test +++ b/mysql-test/suite/ndb/t/ndb_basic.test @@ -800,9 +800,27 @@ update ignore t1,t2 set a = 1, c = 1 where a = 3 and c = 3; select * from t1 order by a; drop table t1,t2; -# End of 5.0 tests ---echo End of 5.0 tests +# +# Bug#31635 +# +create table t1 (a varchar(100) primary key, b varchar(100)) engine = NDB; +insert into t1 values + ('a', 'a'),('b','b'),('c', 'c'),('aa', 'aa'),('bb', 'bb'),('cc', 'cc'); +replace into t1 values ('a', '-a'); +replace into t1 values ('b', '-b'); +replace into t1 values ('c', '-c'); + +replace into t1 values ('aa', '-aa'); +replace into t1 values ('bb', '-bb'); +replace into t1 values ('cc', '-cc'); + +replace into t1 values ('aaa', '-aaa'); +replace into t1 values ('bbb', '-bbb'); +replace into t1 values ('ccc', '-ccc'); +select * from t1 order by 1,2; +drop table t1; +--echo End of 5.0 tests # # Bug #18483 Cannot create table with FK constraint diff --git a/mysql-test/suite/ndb/t/ndb_multi.test b/mysql-test/suite/ndb/t/ndb_multi.test index c2217b51d08..e033ad1e479 100644 --- a/mysql-test/suite/ndb/t/ndb_multi.test +++ b/mysql-test/suite/ndb/t/ndb_multi.test @@ -4,11 +4,11 @@ --disable_warnings connection server2; drop table if exists t1, t2, t3, t4; +flush status; connection server1; drop table if exists t1, t2, t3, t4; ---enable_warnings - flush status; +--enable_warnings # Create test tables on server1 create table t1 (a int) engine=ndbcluster; diff --git a/mysql-test/suite/ndb/t/ndb_multi_row.test b/mysql-test/suite/ndb/t/ndb_multi_row.test index c82307839f4..26953093ed0 100644 --- a/mysql-test/suite/ndb/t/ndb_multi_row.test +++ b/mysql-test/suite/ndb/t/ndb_multi_row.test @@ -6,11 +6,12 @@ --disable_warnings connection server2; drop table if exists t1, t2, t3, t4; +flush status; connection server1; drop table if exists t1, t2, t3, t4; +flush status; --enable_warnings -flush status; # Create test tables on server1 create table t1 (a int) engine=ndbcluster; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 436710e3dee..41c0df4f6e4 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -618,7 +618,7 @@ bool ha_ndbcluster::get_error_message(int error, DBUG_ENTER("ha_ndbcluster::get_error_message"); DBUG_PRINT("enter", ("error: %d", error)); - Ndb *ndb= get_ndb(); + Ndb *ndb= check_ndb_in_thd(current_thd); if (!ndb) DBUG_RETURN(FALSE); diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index fc35a7a930e..6d2daa965a1 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -241,18 +241,22 @@ static void dbug_print_table(const char *info, TABLE *table) static void run_query(THD *thd, char *buf, char *end, const int *no_print_error, my_bool disable_binlog) { - ulong save_query_length= thd->query_length; - char *save_query= thd->query; - ulong save_thread_id= thd->variables.pseudo_thread_id; + ulong save_thd_query_length= thd->query_length; + char *save_thd_query= thd->query; + struct system_variables save_thd_variables= thd->variables; + struct system_status_var save_thd_status_var= thd->status_var; + THD_TRANS save_thd_transaction_all= thd->transaction.all; + THD_TRANS save_thd_transaction_stmt= thd->transaction.stmt; ulonglong save_thd_options= thd->options; DBUG_ASSERT(sizeof(save_thd_options) == sizeof(thd->options)); - NET save_net= thd->net; + NET save_thd_net= thd->net; const char* found_semicolon= NULL; bzero((char*) &thd->net, sizeof(NET)); thd->query_length= end - buf; thd->query= buf; thd->variables.pseudo_thread_id= thread_id; + thd->transaction.stmt.modified_non_trans_table= FALSE; if (disable_binlog) thd->options&= ~OPTION_BIN_LOG; @@ -275,10 +279,13 @@ static void run_query(THD *thd, char *buf, char *end, } thd->options= save_thd_options; - thd->query_length= save_query_length; - thd->query= save_query; - thd->variables.pseudo_thread_id= save_thread_id; - thd->net= save_net; + thd->query_length= save_thd_query_length; + thd->query= save_thd_query; + thd->variables= save_thd_variables; + thd->status_var= save_thd_status_var; + thd->transaction.all= save_thd_transaction_all; + thd->transaction.stmt= save_thd_transaction_stmt; + thd->net= save_thd_net; if (thd == injector_thd) { @@ -777,8 +784,9 @@ static int ndbcluster_create_ndb_apply_status_table(THD *thd) " end_pos BIGINT UNSIGNED NOT NULL, " " PRIMARY KEY USING HASH (server_id) ) ENGINE=NDB"); - const int no_print_error[4]= {ER_TABLE_EXISTS_ERROR, + const int no_print_error[5]= {ER_TABLE_EXISTS_ERROR, 701, + 702, 4009, 0}; // do not print error 701 etc run_query(thd, buf, end, no_print_error, TRUE); @@ -837,8 +845,9 @@ static int ndbcluster_create_schema_table(THD *thd) " type INT UNSIGNED NOT NULL," " PRIMARY KEY USING HASH (db,name) ) ENGINE=NDB"); - const int no_print_error[4]= {ER_TABLE_EXISTS_ERROR, + const int no_print_error[5]= {ER_TABLE_EXISTS_ERROR, 701, + 702, 4009, 0}; // do not print error 701 etc run_query(thd, buf, end, no_print_error, TRUE); @@ -3587,6 +3596,7 @@ pthread_handler_t ndb_binlog_thread_func(void *arg) Thd_ndb *thd_ndb=0; int ndb_update_ndb_binlog_index= 1; injector *inj= injector::instance(); + uint incident_id= 0; #ifdef RUN_NDB_BINLOG_TIMER Timer main_timer; @@ -3692,18 +3702,64 @@ pthread_handler_t ndb_binlog_thread_func(void *arg) pthread_mutex_unlock(&injector_mutex); pthread_cond_signal(&injector_cond); + /* + wait for mysql server to start (so that the binlog is started + and thus can receive the first GAP event) + */ + pthread_mutex_lock(&LOCK_server_started); + while (!mysqld_server_started) + { + struct timespec abstime; + set_timespec(abstime, 1); + pthread_cond_timedwait(&COND_server_started, &LOCK_server_started, + &abstime); + if (ndbcluster_terminating) + { + pthread_mutex_unlock(&LOCK_server_started); + pthread_mutex_lock(&LOCK_ndb_util_thread); + goto err; + } + } + pthread_mutex_unlock(&LOCK_server_started); restart: /* Main NDB Injector loop */ + while (ndb_binlog_running) { /* - Always insert a GAP event as we cannot know what has happened in the cluster - while not being connected. + check if it is the first log, if so we do not insert a GAP event + as there is really no log to have a GAP in */ - LEX_STRING const msg= { C_STRING_WITH_LEN("Cluster connect") }; - inj->record_incident(thd, INCIDENT_LOST_EVENTS, msg); + if (incident_id == 0) + { + LOG_INFO log_info; + mysql_bin_log.get_current_log(&log_info); + int len= strlen(log_info.log_file_name); + uint no= 0; + if ((sscanf(log_info.log_file_name + len - 6, "%u", &no) == 1) && + no == 1) + { + /* this is the fist log, so skip GAP event */ + break; + } + } + + /* + Always insert a GAP event as we cannot know what has happened + in the cluster while not being connected. + */ + LEX_STRING const msg[2]= + { + { C_STRING_WITH_LEN("mysqld startup") }, + { C_STRING_WITH_LEN("cluster disconnect")} + }; + IF_DBUG(int error=) + inj->record_incident(thd, INCIDENT_LOST_EVENTS, msg[incident_id]); + DBUG_ASSERT(!error); + break; } + incident_id= 1; { thd->proc_info= "Waiting for ndbcluster to start"; diff --git a/storage/ndb/src/kernel/blocks/ERROR_codes.txt b/storage/ndb/src/kernel/blocks/ERROR_codes.txt index 4d4d4fcafc4..72791cb0ebc 100644 --- a/storage/ndb/src/kernel/blocks/ERROR_codes.txt +++ b/storage/ndb/src/kernel/blocks/ERROR_codes.txt @@ -5,7 +5,7 @@ Next DBACC 3002 Next DBTUP 4029 Next DBLQH 5047 Next DBDICT 6008 -Next DBDIH 7193 +Next DBDIH 7195 Next DBTC 8054 Next CMVMI 9000 Next BACKUP 10038 @@ -81,6 +81,11 @@ Delay GCP_SAVEREQ by 10 secs 7185: Dont reply to COPY_GCI_REQ where reason == GCP +7193: Dont send LCP_FRAG_ORD to self, and crash when sending first + LCP_FRAG_ORD(last) + +7194: Force removeNodeFromStored to complete in the middle of MASTER_LCPCONF + ERROR CODES FOR TESTING NODE FAILURE, LOCAL CHECKPOINT HANDLING: ----------------------------------------------------------------- diff --git a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp index 21826df28f9..b0bbdefff55 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp +++ b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp @@ -1310,7 +1310,17 @@ private: LcpStatus lcpStatus; Uint32 lcpStatusUpdatedPlace; + struct Save { + LcpStatus m_status; + Uint32 m_place; + } m_saveState[10]; + void setLcpStatus(LcpStatus status, Uint32 line){ + for (Uint32 i = 9; i > 0; i--) + m_saveState[i] = m_saveState[i-1]; + m_saveState[0].m_status = lcpStatus; + m_saveState[0].m_place = lcpStatusUpdatedPlace; + lcpStatus = status; lcpStatusUpdatedPlace = line; } diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index 5403ac5cc38..28378c41f25 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -5181,11 +5181,19 @@ void Dbdih::startRemoveFailedNode(Signal* signal, NodeRecordPtr failedNodePtr) } jam(); - signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE; - signal->theData[1] = failedNodePtr.i; - signal->theData[2] = 0; // Tab id - sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); - + + if (!ERROR_INSERTED(7194)) + { + signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE; + signal->theData[1] = failedNodePtr.i; + signal->theData[2] = 0; // Tab id + sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); + } + else + { + ndbout_c("7194 Not starting ZREMOVE_NODE_FROM_TABLE"); + } + setLocalNodefailHandling(signal, failedNodePtr.i, NF_REMOVE_NODE_FROM_TABLE); }//Dbdih::startRemoveFailedNode() @@ -6114,12 +6122,22 @@ Dbdih::checkEmptyLcpComplete(Signal *signal){ signal->theData[0] = 7012; execDUMP_STATE_ORD(signal); + + if (ERROR_INSERTED(7194)) + { + ndbout_c("7194 starting ZREMOVE_NODE_FROM_TABLE"); + signal->theData[0] = DihContinueB::ZREMOVE_NODE_FROM_TABLE; + signal->theData[1] = c_lcpMasterTakeOverState.failedNodeId; + signal->theData[2] = 0; // Tab id + sendSignal(reference(), GSN_CONTINUEB, signal, 3, JBB); + } c_lcpMasterTakeOverState.set(LMTOS_INITIAL, __LINE__); MasterLCPReq * const req = (MasterLCPReq *)&signal->theData[0]; req->masterRef = reference(); req->failedNodeId = c_lcpMasterTakeOverState.failedNodeId; sendLoopMacro(MASTER_LCPREQ, sendMASTER_LCPREQ); + } else { sendMASTER_LCPCONF(signal); } @@ -6432,6 +6450,15 @@ void Dbdih::execMASTER_LCPCONF(Signal* signal) { const MasterLCPConf * const conf = (MasterLCPConf *)&signal->theData[0]; jamEntry(); + + if (ERROR_INSERTED(7194)) + { + ndbout_c("delaying MASTER_LCPCONF due to error 7194"); + sendSignalWithDelay(reference(), GSN_MASTER_LCPCONF, signal, + 300, signal->getLength()); + return; + } + Uint32 senderNodeId = conf->senderNodeId; MasterLCPConf::State lcpState = (MasterLCPConf::State)conf->lcpState; const Uint32 failedNodeId = conf->failedNodeId; @@ -6566,7 +6593,6 @@ void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId) #endif c_lcpState.keepGci = SYSFILE->keepGCI; - c_lcpState.setLcpStatus(LCP_START_LCP_ROUND, __LINE__); startLcpRoundLoopLab(signal, 0, 0); break; } @@ -10538,6 +10564,8 @@ void Dbdih::sendLastLCP_FRAG_ORD(Signal* signal) if(ERROR_INSERTED(7075)){ continue; } + + CRASH_INSERTION(7193); BlockReference ref = calcLqhBlockRef(nodePtr.i); sendSignal(ref, GSN_LCP_FRAG_ORD, signal,LcpFragOrd::SignalLength, JBB); } @@ -10765,6 +10793,13 @@ Dbdih::checkLcpAllTablesDoneInLqh(){ CRASH_INSERTION2(7017, !isMaster()); c_lcpState.setLcpStatus(LCP_TAB_COMPLETED, __LINE__); + + if (ERROR_INSERTED(7194)) + { + ndbout_c("CLEARING 7194"); + CLEAR_ERROR_INSERT_VALUE; + } + return true; } @@ -10954,6 +10989,11 @@ Dbdih::sendLCP_FRAG_ORD(Signal* signal, BlockReference ref = calcLqhBlockRef(replicaPtr.p->procNode); + if (ERROR_INSERTED(7193) && replicaPtr.p->procNode == getOwnNodeId()) + { + return; + } + LcpFragOrd * const lcpFragOrd = (LcpFragOrd *)&signal->theData[0]; lcpFragOrd->tableId = info.tableId; lcpFragOrd->fragmentId = info.fragId; @@ -14500,6 +14540,14 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal) ("immediateLcpStart = %d masterLcpNodeId = %d", c_lcpState.immediateLcpStart, refToNode(c_lcpState.m_masterLcpDihRef)); + + for (Uint32 i = 0; i<10; i++) + { + infoEvent("%u : status: %u place: %u", i, + c_lcpState.m_saveState[i].m_status, + c_lcpState.m_saveState[i].m_place); + } + infoEvent("-- Node %d LCP STATE --", getOwnNodeId()); } diff --git a/storage/ndb/test/ndbapi/testDict.cpp b/storage/ndb/test/ndbapi/testDict.cpp index 16b6e129605..e1b8f2b3c7f 100644 --- a/storage/ndb/test/ndbapi/testDict.cpp +++ b/storage/ndb/test/ndbapi/testDict.cpp @@ -2776,9 +2776,13 @@ runDropDDObjects(NDBT_Context* ctx, NDBT_Step* step){ case NdbDictionary::Object::UserTable: tableFound = list.elements[i].name; if(tableFound != 0){ - if(pDict->dropTable(tableFound) != 0){ - g_err << "Failed to drop table: " << pDict->getNdbError() << endl; - return NDBT_FAILED; + if(strcmp(tableFound, "ndb_apply_status") != 0 && + strcmp(tableFound, "NDB$BLOB_2_3") != 0 && + strcmp(tableFound, "ndb_schema") != 0){ + if(pDict->dropTable(tableFound) != 0){ + g_err << "Failed to drop table: " << tableFound << pDict->getNdbError() << endl; + return NDBT_FAILED; + } } } tableFound = 0; diff --git a/storage/ndb/test/ndbapi/testNodeRestart.cpp b/storage/ndb/test/ndbapi/testNodeRestart.cpp index 419196e00eb..2a5febb7ae9 100644 --- a/storage/ndb/test/ndbapi/testNodeRestart.cpp +++ b/storage/ndb/test/ndbapi/testNodeRestart.cpp @@ -1832,6 +1832,51 @@ runBug31525(NDBT_Context* ctx, NDBT_Step* step) if (res.waitClusterStarted()) return NDBT_FAILED; + + return NDBT_OK; +} + +int +runBug32160(NDBT_Context* ctx, NDBT_Step* step) +{ + int result = NDBT_OK; + int loops = ctx->getNumLoops(); + int records = ctx->getNumRecords(); + Ndb* pNdb = GETNDB(step); + NdbRestarter res; + + if (res.getNumDbNodes() < 2) + { + return NDBT_OK; + } + + int master = res.getMasterNodeId(); + int next = res.getNextMasterNodeId(master); + + if (res.insertErrorInNode(next, 7194)) + { + return NDBT_FAILED; + } + + int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 }; + if (res.dumpStateOneNode(master, val2, 2)) + return NDBT_FAILED; + + if (res.insertErrorInNode(master, 7193)) + return NDBT_FAILED; + + int val3[] = { 7099 }; + if (res.dumpStateOneNode(master, val3, 1)) + return NDBT_FAILED; + + if (res.waitNodesNoStart(&master, 1)) + return NDBT_FAILED; + + if (res.startNodes(&master, 1)) + return NDBT_FAILED; + + if (res.waitClusterStarted()) + return NDBT_FAILED; return NDBT_OK; } @@ -2205,6 +2250,9 @@ TESTCASE("Bug28717", ""){ TESTCASE("Bug29364", ""){ INITIALIZER(runBug29364); } +TESTCASE("Bug32160", ""){ + INITIALIZER(runBug32160); +} NDBT_TESTSUITE_END(testNodeRestart); int main(int argc, const char** argv){ diff --git a/storage/ndb/test/run-test/daily-basic-tests.txt b/storage/ndb/test/run-test/daily-basic-tests.txt index 103675d8e35..b6f3e51a515 100644 --- a/storage/ndb/test/run-test/daily-basic-tests.txt +++ b/storage/ndb/test/run-test/daily-basic-tests.txt @@ -581,6 +581,10 @@ max-time: 1000 cmd: testNodeRestart args: -n Bug29364 T1 +max-time: 300 +cmd: testNodeRestart +args: -n Bug32160 T1 + # # DICT TESTS max-time: 500 diff --git a/storage/ndb/tools/restore/Restore.cpp b/storage/ndb/tools/restore/Restore.cpp index a7d8a9d10d9..f599bb21978 100644 --- a/storage/ndb/tools/restore/Restore.cpp +++ b/storage/ndb/tools/restore/Restore.cpp @@ -534,6 +534,88 @@ TupleS::prepareRecord(TableS & tab){ return true; } +int +RestoreDataIterator::readTupleData(Uint32 *buf_ptr, Uint32 *ptr, + Uint32 dataLength) +{ + while (ptr + 2 < buf_ptr + dataLength) + { + typedef BackupFormat::DataFile::VariableData VarData; + VarData * data = (VarData *)ptr; + Uint32 sz = ntohl(data->Sz); + Uint32 attrId = ntohl(data->Id); // column_no + + AttributeData * attr_data = m_tuple.getData(attrId); + const AttributeDesc * attr_desc = m_tuple.getDesc(attrId); + + // just a reminder - remove when backwards compat implemented + if (m_currentTable->backupVersion < MAKE_VERSION(5,1,3) && + attr_desc->m_column->getNullable()) + { + const Uint32 ind = attr_desc->m_nullBitIndex; + if(BitmaskImpl::get(m_currentTable->m_nullBitmaskSize, + buf_ptr,ind)) + { + attr_data->null = true; + attr_data->void_value = NULL; + continue; + } + } + + if (m_currentTable->backupVersion < MAKE_VERSION(5,1,3)) + { + sz *= 4; + } + + attr_data->null = false; + attr_data->void_value = &data->Data[0]; + attr_data->size = sz; + + //if (m_currentTable->getTableId() >= 2) { ndbout << "var off=" << ptr-buf_ptr << " attrId=" << attrId << endl; } + + /** + * Compute array size + */ + const Uint32 arraySize = sz / (attr_desc->size / 8); + assert(arraySize <= attr_desc->arraySize); + + //convert the length of blob(v1) and text(v1) + if(!m_hostByteOrder + && (attr_desc->m_column->getType() == NdbDictionary::Column::Blob + || attr_desc->m_column->getType() == NdbDictionary::Column::Text) + && attr_desc->m_column->getArrayType() == NdbDictionary::Column::ArrayTypeFixed) + { + char* p = (char*)&attr_data->u_int64_value[0]; + Uint64 x; + memcpy(&x, p, sizeof(Uint64)); + x = Twiddle64(x); + memcpy(p, &x, sizeof(Uint64)); + } + + //convert datetime type + if(!m_hostByteOrder + && attr_desc->m_column->getType() == NdbDictionary::Column::Datetime) + { + char* p = (char*)&attr_data->u_int64_value[0]; + Uint64 x; + memcpy(&x, p, sizeof(Uint64)); + x = Twiddle64(x); + memcpy(p, &x, sizeof(Uint64)); + } + + if(!Twiddle(attr_desc, attr_data, attr_desc->arraySize)) + { + return -1; + } + + ptr += ((sz + 3) >> 2) + 2; + } + + assert(ptr == buf_ptr + dataLength); + + return 0; +} + const TupleS * RestoreDataIterator::getNextTuple(int & res) { @@ -630,78 +712,8 @@ RestoreDataIterator::getNextTuple(int & res) attr_data->void_value = NULL; } - while (ptr + 2 < buf_ptr + dataLength) { - typedef BackupFormat::DataFile::VariableData VarData; - VarData * data = (VarData *)ptr; - Uint32 sz = ntohl(data->Sz); - Uint32 attrId = ntohl(data->Id); // column_no - - AttributeData * attr_data = m_tuple.getData(attrId); - const AttributeDesc * attr_desc = m_tuple.getDesc(attrId); - - // just a reminder - remove when backwards compat implemented - if(m_currentTable->backupVersion < MAKE_VERSION(5,1,3) && - attr_desc->m_column->getNullable()){ - const Uint32 ind = attr_desc->m_nullBitIndex; - if(BitmaskImpl::get(m_currentTable->m_nullBitmaskSize, - buf_ptr,ind)){ - attr_data->null = true; - attr_data->void_value = NULL; - continue; - } - } - - if (m_currentTable->backupVersion < MAKE_VERSION(5,1,3)) - { - sz *= 4; - } - - attr_data->null = false; - attr_data->void_value = &data->Data[0]; - attr_data->size = sz; - - //if (m_currentTable->getTableId() >= 2) { ndbout << "var off=" << ptr-buf_ptr << " attrId=" << attrId << endl; } - - /** - * Compute array size - */ - const Uint32 arraySize = sz / (attr_desc->size / 8); - assert(arraySize <= attr_desc->arraySize); - - //convert the length of blob(v1) and text(v1) - if(!m_hostByteOrder - && (attr_desc->m_column->getType() == NdbDictionary::Column::Blob - || attr_desc->m_column->getType() == NdbDictionary::Column::Text) - && attr_desc->m_column->getArrayType() == NdbDictionary::Column::ArrayTypeFixed) - { - char* p = (char*)&attr_data->u_int64_value[0]; - Uint64 x; - memcpy(&x, p, sizeof(Uint64)); - x = Twiddle64(x); - memcpy(p, &x, sizeof(Uint64)); - } - - //convert datetime type - if(!m_hostByteOrder - && attr_desc->m_column->getType() == NdbDictionary::Column::Datetime) - { - char* p = (char*)&attr_data->u_int64_value[0]; - Uint64 x; - memcpy(&x, p, sizeof(Uint64)); - x = Twiddle64(x); - memcpy(p, &x, sizeof(Uint64)); - } - - if(!Twiddle(attr_desc, attr_data, attr_desc->arraySize)) - { - res = -1; - return NULL; - } - - ptr += ((sz + 3) >> 2) + 2; - } - - assert(ptr == buf_ptr + dataLength); + if ((res = readTupleData(buf_ptr, ptr, dataLength))) + return NULL; m_count ++; res = 0; diff --git a/storage/ndb/tools/restore/Restore.hpp b/storage/ndb/tools/restore/Restore.hpp index 5455fa17aa0..f6de9245509 100644 --- a/storage/ndb/tools/restore/Restore.hpp +++ b/storage/ndb/tools/restore/Restore.hpp @@ -355,6 +355,10 @@ public: bool validateFragmentFooter(); const TupleS *getNextTuple(int & res); + +private: + + int readTupleData(Uint32 *buf_ptr, Uint32 *ptr, Uint32 dataLength); }; class LogEntry { |