diff options
author | unknown <tomas@poseidon.mysql.com> | 2007-02-06 00:07:39 +0700 |
---|---|---|
committer | unknown <tomas@poseidon.mysql.com> | 2007-02-06 00:07:39 +0700 |
commit | 891222d105d8b30a8f3923cf8983797ec5967904 (patch) | |
tree | 129d9c1e76cddd4406a5690a952f861b3ede7403 | |
parent | 26f9af9fc8298163349fb20291630aeb35ccb4f0 (diff) | |
parent | e3ddf9259a637096ca6440ef7867923b32821d72 (diff) | |
download | mariadb-git-891222d105d8b30a8f3923cf8983797ec5967904.tar.gz |
Merge tulin@bk-internal.mysql.com:/home/bk/mysql-5.1
into poseidon.mysql.com:/home/tomas/mysql-5.1-new-ndb
43 files changed, 1081 insertions, 651 deletions
diff --git a/mysql-test/r/ndb_read_multi_range.result b/mysql-test/r/ndb_read_multi_range.result index 12f3185bb1a..02053fd5b68 100644 --- a/mysql-test/r/ndb_read_multi_range.result +++ b/mysql-test/r/ndb_read_multi_range.result @@ -442,3 +442,20 @@ SELECT id, tag, doc, type FROM t1 WHERE id IN ('flipper','sakila'); id tag doc type sakila 1 Some text goes here text DROP TABLE t1; +CREATE TABLE t1 ( +var1 int(2) NOT NULL, +var2 int(2) NOT NULL, +PRIMARY KEY (var1) +) ENGINE=ndbcluster DEFAULT CHARSET=ascii CHECKSUM=1; +CREATE TABLE t2 ( +var1 int(2) NOT NULL, +var2 int(2) NOT NULL, +PRIMARY KEY (var1) +) ENGINE=MyISAM DEFAULT CHARSET=ascii CHECKSUM=1; +CREATE TRIGGER testtrigger +AFTER UPDATE ON t1 FOR EACH ROW BEGIN +REPLACE INTO t2 SELECT * FROM t1 WHERE t1.var1 = NEW.var1;END| +INSERT INTO t1 VALUES (1,1),(2,2),(3,3); +UPDATE t1 SET var2 = 9 WHERE var1 IN(1,2,3); +DROP TRIGGER testtrigger; +DROP TABLE t1, t2; diff --git a/mysql-test/t/ndb_read_multi_range.test b/mysql-test/t/ndb_read_multi_range.test index 9d7415880ce..775a2eb9cef 100644 --- a/mysql-test/t/ndb_read_multi_range.test +++ b/mysql-test/t/ndb_read_multi_range.test @@ -272,3 +272,32 @@ SELECT id, tag, doc, type FROM t1 WHERE id IN ('flipper','orka'); SELECT id, tag, doc, type FROM t1 WHERE id IN ('flipper','sakila'); DROP TABLE t1; + +#bug#25522 +CREATE TABLE t1 ( + var1 int(2) NOT NULL, + var2 int(2) NOT NULL, + PRIMARY KEY (var1) + ) ENGINE=ndbcluster DEFAULT CHARSET=ascii CHECKSUM=1; + + +CREATE TABLE t2 ( + var1 int(2) NOT NULL, + var2 int(2) NOT NULL, + PRIMARY KEY (var1) + ) ENGINE=MyISAM DEFAULT CHARSET=ascii CHECKSUM=1; + + +DELIMITER |; +CREATE TRIGGER testtrigger + AFTER UPDATE ON t1 FOR EACH ROW BEGIN + REPLACE INTO t2 SELECT * FROM t1 WHERE t1.var1 = NEW.var1;END| +DELIMITER ;| + +INSERT INTO t1 VALUES (1,1),(2,2),(3,3); + +UPDATE t1 SET var2 = 9 WHERE var1 IN(1,2,3); + +DROP TRIGGER testtrigger; + +DROP TABLE t1, t2; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 36fe6457167..37667f27160 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -260,13 +260,14 @@ static int ndb_to_mysql_error(const NdbError *ndberr) int execute_no_commit_ignore_no_key(ha_ndbcluster *h, NdbTransaction *trans) { int res= trans->execute(NdbTransaction::NoCommit, - NdbTransaction::AO_IgnoreError, + NdbOperation::AO_IgnoreError, h->m_force_send); - if (res == 0) - return 0; + if (res == -1) + return -1; const NdbError &err= trans->getNdbError(); - if (err.classification != NdbError::ConstraintViolation && + if (err.classification != NdbError::NoError && + err.classification != NdbError::ConstraintViolation && err.classification != NdbError::NoDataFound) return res; @@ -286,7 +287,7 @@ int execute_no_commit(ha_ndbcluster *h, NdbTransaction *trans, return h->m_ignore_no_key ? execute_no_commit_ignore_no_key(h,trans) : trans->execute(NdbTransaction::NoCommit, - NdbTransaction::AbortOnError, + NdbOperation::AbortOnError, h->m_force_send); } @@ -299,7 +300,7 @@ int execute_commit(ha_ndbcluster *h, NdbTransaction *trans) return 0; #endif return trans->execute(NdbTransaction::Commit, - NdbTransaction::AbortOnError, + NdbOperation::AbortOnError, h->m_force_send); } @@ -312,7 +313,7 @@ int execute_commit(THD *thd, NdbTransaction *trans) return 0; #endif return trans->execute(NdbTransaction::Commit, - NdbTransaction::AbortOnError, + NdbOperation::AbortOnError, thd->variables.ndb_force_send); } @@ -327,7 +328,7 @@ int execute_no_commit_ie(ha_ndbcluster *h, NdbTransaction *trans, #endif h->release_completed_operations(trans, force_release); return trans->execute(NdbTransaction::NoCommit, - NdbTransaction::AO_IgnoreError, + NdbOperation::AO_IgnoreError, h->m_force_send); } @@ -1726,7 +1727,8 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf, ERR_RETURN(trans->getNdbError()); } - if (execute_no_commit_ie(this,trans,FALSE) != 0) + if ((res = execute_no_commit_ie(this,trans,FALSE)) != 0 || + op->getNdbError().code) { table->status= STATUS_NOT_FOUND; DBUG_RETURN(ndb_err(trans)); @@ -1998,7 +2000,8 @@ int ha_ndbcluster::unique_index_read(const byte *key, if ((res= define_read_attrs(buf, op))) DBUG_RETURN(res); - if (execute_no_commit_ie(this,trans,FALSE) != 0) + if (execute_no_commit_ie(this,trans,FALSE) != 0 || + op->getNdbError().code) { table->status= STATUS_NOT_FOUND; DBUG_RETURN(ndb_err(trans)); @@ -4337,11 +4340,10 @@ int ha_ndbcluster::start_stmt(THD *thd, thr_lock_type lock_type) ERR_RETURN(ndb->getNdbError()); no_uncommitted_rows_reset(thd); thd_ndb->stmt= trans; + thd_ndb->query_state&= NDB_QUERY_NORMAL; trans_register_ha(thd, FALSE, ndbcluster_hton); } - thd_ndb->query_state&= NDB_QUERY_NORMAL; m_active_trans= trans; - // Start of statement m_ops_pending= 0; thd->set_current_stmt_binlog_row_based_if_mixed(); @@ -6829,7 +6831,7 @@ static int ndbcluster_end(handlerton *hton, ha_panic_function type) fprintf(stderr, "NDB: table share %s with use_count %d not freed\n", share->key, share->use_count); #endif - real_free_share(&share); + ndbcluster_real_free_share(&share); } pthread_mutex_unlock(&ndbcluster_mutex); } @@ -7441,14 +7443,20 @@ int handle_trailing_share(NDB_SHARE *share) bzero((char*) &table_list,sizeof(table_list)); table_list.db= share->db; table_list.alias= table_list.table_name= share->table_name; + safe_mutex_assert_owner(&LOCK_open); close_cached_tables(thd, 0, &table_list, TRUE); pthread_mutex_lock(&ndbcluster_mutex); if (!--share->use_count) { - DBUG_PRINT("info", ("NDB_SHARE: close_cashed_tables %s freed share.", - share->key)); - real_free_share(&share); + if (ndb_extra_logging) + sql_print_information("NDB_SHARE: trailing share %s(connect_count: %u) " + "released by close_cached_tables at " + "connect_count: %u", + share->key, + share->connect_count, + g_ndb_cluster_connection->get_connect_count()); + ndbcluster_real_free_share(&share); DBUG_RETURN(0); } @@ -7458,10 +7466,14 @@ int handle_trailing_share(NDB_SHARE *share) */ if (share->state != NSS_DROPPED && !--share->use_count) { - DBUG_PRINT("info", ("NDB_SHARE: %s already exists, " - "use_count=%d state != NSS_DROPPED.", - share->key, share->use_count)); - real_free_share(&share); + if (ndb_extra_logging) + sql_print_information("NDB_SHARE: trailing share %s(connect_count: %u) " + "released after NSS_DROPPED check " + "at connect_count: %u", + share->key, + share->connect_count, + g_ndb_cluster_connection->get_connect_count()); + ndbcluster_real_free_share(&share); DBUG_RETURN(0); } DBUG_PRINT("error", ("NDB_SHARE: %s already exists use_count=%d.", @@ -7727,7 +7739,7 @@ void ndbcluster_free_share(NDB_SHARE **share, bool have_lock) (*share)->util_lock= 0; if (!--(*share)->use_count) { - real_free_share(share); + ndbcluster_real_free_share(share); } else { @@ -7800,7 +7812,7 @@ ndb_get_table_statistics(ha_ndbcluster* file, bool report_error, Ndb* ndb, const (char*)&var_mem); if (pTrans->execute(NdbTransaction::NoCommit, - NdbTransaction::AbortOnError, + NdbOperation::AbortOnError, TRUE) == -1) { error= pTrans->getNdbError(); @@ -8057,7 +8069,6 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, !op->readTuple(lm) && !set_primary_key(op, multi_range_curr->start_key.key) && !define_read_attrs(curr, op) && - (op->setAbortOption(AO_IgnoreError), TRUE) && (!m_use_partition_function || (op->setPartitionId(part_spec.start_part), TRUE))) curr += reclength; @@ -8079,8 +8090,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, if ((op= m_active_trans->getNdbIndexOperation(unique_idx, tab)) && !op->readTuple(lm) && !set_index_key(op, key_info, multi_range_curr->start_key.key) && - !define_read_attrs(curr, op) && - (op->setAbortOption(AO_IgnoreError), TRUE)) + !define_read_attrs(curr, op)) curr += reclength; else ERR_RETURN(op ? op->getNdbError() : m_active_trans->getNdbError()); @@ -8280,6 +8290,8 @@ close_scan: if (multi_range_curr == multi_range_end) { DBUG_MULTI_RANGE(16); + Thd_ndb *thd_ndb= get_thd_ndb(current_thd); + thd_ndb->query_state&= NDB_QUERY_NORMAL; DBUG_RETURN(HA_ERR_END_OF_FILE); } diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 5b6900766b6..63665fde0f8 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -108,6 +108,7 @@ typedef struct st_ndbcluster_share { char *table_name; Ndb::TupleIdRange tuple_id_range; #ifdef HAVE_NDB_BINLOG + uint32 connect_count; uint32 flags; NdbEventOperation *op; NdbEventOperation *op_old; // for rename table diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index fc7d933be7d..c213a3e049a 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -97,6 +97,7 @@ static ulonglong ndb_latest_received_binlog_epoch= 0; NDB_SHARE *ndb_apply_status_share= 0; NDB_SHARE *ndb_schema_share= 0; +pthread_mutex_t ndb_schema_share_mutex; /* Schema object distribution handling */ HASH ndb_schema_objects; @@ -361,6 +362,8 @@ void ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *_table) int do_event_op= ndb_binlog_running; DBUG_ENTER("ndbcluster_binlog_init_share"); + share->connect_count= g_ndb_cluster_connection->get_connect_count(); + share->op= 0; share->table= 0; @@ -604,7 +607,7 @@ static int ndbcluster_binlog_end(THD *thd) ("table->s->db.table_name: %s.%s", share->table->s->db.str, share->table->s->table_name.str)); if (share->state != NSS_DROPPED && !--share->use_count) - real_free_share(&share); + ndbcluster_real_free_share(&share); else { DBUG_PRINT("share", @@ -621,6 +624,7 @@ static int ndbcluster_binlog_end(THD *thd) pthread_mutex_destroy(&injector_mutex); pthread_cond_destroy(&injector_cond); + pthread_mutex_destroy(&ndb_schema_share_mutex); #endif DBUG_RETURN(0); } @@ -1271,6 +1275,16 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share, int no_storage_nodes= g_ndb_cluster_connection->no_db_nodes(); bitmap_init(&schema_subscribers, bitbuf, sizeof(bitbuf)*8, FALSE); bitmap_set_all(&schema_subscribers); + + /* begin protect ndb_schema_share */ + pthread_mutex_lock(&ndb_schema_share_mutex); + if (ndb_schema_share == 0) + { + pthread_mutex_unlock(&ndb_schema_share_mutex); + if (ndb_schema_object) + ndb_free_schema_object(&ndb_schema_object, FALSE); + DBUG_RETURN(0); + } (void) pthread_mutex_lock(&ndb_schema_share->mutex); for (i= 0; i < no_storage_nodes; i++) { @@ -1283,6 +1297,9 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share, } } (void) pthread_mutex_unlock(&ndb_schema_share->mutex); + pthread_mutex_unlock(&ndb_schema_share_mutex); + /* end protect ndb_schema_share */ + if (updated) { bitmap_clear_bit(&schema_subscribers, node_id); @@ -1478,6 +1495,14 @@ end: &abstime); if (thd->killed) break; + + /* begin protect ndb_schema_share */ + pthread_mutex_lock(&ndb_schema_share_mutex); + if (ndb_schema_share == 0) + { + pthread_mutex_unlock(&ndb_schema_share_mutex); + break; + } (void) pthread_mutex_lock(&ndb_schema_share->mutex); for (i= 0; i < no_storage_nodes; i++) { @@ -1487,6 +1512,8 @@ end: bitmap_intersect(&schema_subscribers, tmp); } (void) pthread_mutex_unlock(&ndb_schema_share->mutex); + pthread_mutex_unlock(&ndb_schema_share_mutex); + /* end protect ndb_schema_share */ /* remove any unsubscribed from ndb_schema_object->slock */ bitmap_intersect(&ndb_schema_object->slock_bitmap, &schema_subscribers); @@ -1910,8 +1937,14 @@ ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb, ndb_binlog_tables_inited && ndb_binlog_running) sql_print_information("NDB Binlog: ndb tables initially " "read only on reconnect."); + + /* begin protect ndb_schema_share */ + pthread_mutex_lock(&ndb_schema_share_mutex); free_share(&ndb_schema_share); ndb_schema_share= 0; + pthread_mutex_unlock(&ndb_schema_share_mutex); + /* end protect ndb_schema_share */ + close_cached_tables((THD*) 0, 0, (TABLE_LIST*) 0, FALSE); // fall through case NDBEVENT::TE_ALTER: @@ -2278,6 +2311,7 @@ int ndbcluster_binlog_start() pthread_mutex_init(&injector_mutex, MY_MUTEX_INIT_FAST); pthread_cond_init(&injector_cond, NULL); + pthread_mutex_init(&ndb_schema_share_mutex, MY_MUTEX_INIT_FAST); /* Create injector thread */ if (pthread_create(&ndb_binlog_thread, &connection_attrib, @@ -2411,11 +2445,20 @@ int ndbcluster_create_binlog_setup(Ndb *ndb, const char *key, pthread_mutex_unlock(&ndbcluster_mutex); DBUG_RETURN(1); } - handle_trailing_share(share); + if (share->connect_count != + g_ndb_cluster_connection->get_connect_count()) + { + handle_trailing_share(share); + share= NULL; + } } /* Create share which is needed to hold replication information */ - if (!(share= get_share(key, 0, TRUE, TRUE))) + if (share) + { + ++share->use_count; + } + else if (!(share= get_share(key, 0, TRUE, TRUE))) { sql_print_error("NDB Binlog: " "allocating table share for %s failed", key); @@ -3924,9 +3967,9 @@ restart: "%ld(%d e/s), total time %ld(%d e/s)", (ulong)gci, event_count, write_timer.elapsed_ms(), - event_count / write_timer.elapsed_ms(), + (1000*event_count) / write_timer.elapsed_ms(), gci_timer.elapsed_ms(), - event_count / gci_timer.elapsed_ms()); + (1000*event_count) / gci_timer.elapsed_ms()); #endif } } @@ -3966,8 +4009,12 @@ err: } if (ndb_schema_share) { + /* begin protect ndb_schema_share */ + pthread_mutex_lock(&ndb_schema_share_mutex); free_share(&ndb_schema_share); ndb_schema_share= 0; + pthread_mutex_unlock(&ndb_schema_share_mutex); + /* end protect ndb_schema_share */ } /* remove all event operations */ diff --git a/sql/ha_ndbcluster_binlog.h b/sql/ha_ndbcluster_binlog.h index 44183c6de9d..00fc689f061 100644 --- a/sql/ha_ndbcluster_binlog.h +++ b/sql/ha_ndbcluster_binlog.h @@ -208,11 +208,6 @@ inline void free_share(NDB_SHARE **share, bool have_lock= FALSE) ndbcluster_free_share(share, have_lock); } -inline void real_free_share(NDB_SHARE **share) -{ - ndbcluster_real_free_share(share); -} - inline Thd_ndb * get_thd_ndb(THD *thd) { return (Thd_ndb *) thd->ha_data[ndbcluster_hton->slot]; } diff --git a/sql/log_event.cc b/sql/log_event.cc index 82cfc0cd3a2..54d75449cd5 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -5702,9 +5702,26 @@ int Rows_log_event::exec_event(st_relay_log_info *rli) { if (!need_reopen) { - slave_print_msg(ERROR_LEVEL, rli, error, - "Error in %s event: when locking tables", - get_type_str()); + if (thd->query_error || thd->is_fatal_error) + { + /* + Error reporting borrowed from Query_log_event with many excessive + simplifications (we don't honour --slave-skip-errors) + */ + uint actual_error= thd->net.last_errno; + slave_print_msg(ERROR_LEVEL, rli, actual_error, + "Error '%s' in %s event: when locking tables", + (actual_error ? thd->net.last_error : + "unexpected success or fatal error"), + get_type_str()); + thd->is_fatal_error= 1; + } + else + { + slave_print_msg(ERROR_LEVEL, rli, error, + "Error in %s event: when locking tables", + get_type_str()); + } rli->clear_tables_to_lock(); DBUG_RETURN(error); } diff --git a/sql/protocol.h b/sql/protocol.h index 0e00a7c21e0..6c4c7414ea5 100644 --- a/sql/protocol.h +++ b/sql/protocol.h @@ -58,6 +58,8 @@ public: String *storage_packet() { return packet; } inline void free() { packet->free(); } virtual bool write(); + inline bool store(int from) + { return store_long((longlong) from); } inline bool store(uint32 from) { return store_long((longlong) from); } inline bool store(longlong from) diff --git a/sql/slave.cc b/sql/slave.cc index fb1f71e646f..1bdb884b5d2 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -1296,7 +1296,7 @@ bool show_master_info(THD* thd, MASTER_INFO* mi) rpl_filter->get_wild_ignore_table(&tmp); protocol->store(&tmp); - protocol->store((uint32) mi->rli.last_slave_errno); + protocol->store(mi->rli.last_slave_errno); protocol->store(mi->rli.last_slave_error, &my_charset_bin); protocol->store((uint32) mi->rli.slave_skip_counter); protocol->store((ulonglong) mi->rli.group_master_log_pos); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index dcf1f0cfc79..ea69eb72b51 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -3520,7 +3520,11 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, continue; } - *save_pos= *use; +#ifdef HAVE_purify + /* Valgrind complains about overlapped memcpy when save_pos==use. */ + if (save_pos != use) +#endif + *save_pos= *use; prev=use; found_eq_constant= !use->used_tables; /* Save ptr to first use */ diff --git a/storage/ndb/include/ndbapi/NdbIndexOperation.hpp b/storage/ndb/include/ndbapi/NdbIndexOperation.hpp index 0f06d8041ee..49e55f54f1a 100644 --- a/storage/ndb/include/ndbapi/NdbIndexOperation.hpp +++ b/storage/ndb/include/ndbapi/NdbIndexOperation.hpp @@ -181,8 +181,6 @@ private: const class NdbTableImpl* aTable, NdbTransaction*); - int prepareSend(Uint32 TC_ConnectPtr, Uint64 TransactionId); - // Private attributes const NdbIndexImpl* m_theIndex; friend struct Ndb_free_list_t<NdbIndexOperation>; diff --git a/storage/ndb/include/ndbapi/NdbOperation.hpp b/storage/ndb/include/ndbapi/NdbOperation.hpp index 556412c4799..90b90c7e481 100644 --- a/storage/ndb/include/ndbapi/NdbOperation.hpp +++ b/storage/ndb/include/ndbapi/NdbOperation.hpp @@ -98,6 +98,19 @@ public: }; /** + * How should transaction be handled if operation fails + * + * For READ, default is AO_IgnoreError + * DML, default is AbortOnError + * CommittedRead does _only_ support AO_IgnoreError + */ + enum AbortOption { + DefaultAbortOption = -1,///< Use default as specified by op-type + AbortOnError = 0, ///< Abort transaction on failed operation + AO_IgnoreError = 2 ///< Transaction continues on failed operation + }; + + /** * Define the NdbOperation to be a standard operation of type insertTuple. * When calling NdbTransaction::execute, this operation * adds a new tuple to the table. @@ -776,8 +789,13 @@ public: */ LockMode getLockMode() const { return theLockMode; } + /** + * Get/set abort option + */ + AbortOption getAbortOption() const; + int setAbortOption(AbortOption); + #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - void setAbortOption(Int8 ao) { m_abortOption = ao; } /** * Set/get partition key @@ -856,7 +874,8 @@ protected: int doSend(int ProcessorId, Uint32 lastFlag); virtual int prepareSend(Uint32 TC_ConnectPtr, - Uint64 TransactionId); + Uint64 TransactionId, + AbortOption); virtual void setLastFlag(NdbApiSignal* signal, Uint32 lastFlag); int prepareSendInterpreted(); // Help routine to prepare* diff --git a/storage/ndb/include/ndbapi/NdbTransaction.hpp b/storage/ndb/include/ndbapi/NdbTransaction.hpp index 7c5d02c209e..56d0801d507 100644 --- a/storage/ndb/include/ndbapi/NdbTransaction.hpp +++ b/storage/ndb/include/ndbapi/NdbTransaction.hpp @@ -20,6 +20,7 @@ #include "NdbError.hpp" #include "NdbDictionary.hpp" #include "Ndb.hpp" +#include "NdbOperation.hpp" class NdbTransaction; class NdbOperation; @@ -44,11 +45,12 @@ typedef void (* NdbAsynchCallback)(int, NdbTransaction*, void*); #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL enum AbortOption { - CommitIfFailFree= 0, - TryCommit= 0, - AbortOnError= 0, - CommitAsMuchAsPossible= 2, - AO_IgnoreError= 2 + DefaultAbortOption = NdbOperation::DefaultAbortOption, + CommitIfFailFree = NdbOperation::AbortOnError, + TryCommit = NdbOperation::AbortOnError, + AbortOnError= NdbOperation::AbortOnError, + CommitAsMuchAsPossible = NdbOperation::AO_IgnoreError, + AO_IgnoreError= NdbOperation::AO_IgnoreError }; enum ExecType { NoExecTypeDef = -1, @@ -145,20 +147,6 @@ class NdbTransaction public: /** - * Commit type of transaction - */ - enum AbortOption { - AbortOnError= ///< Abort transaction on failed operation -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - ::AbortOnError -#endif - ,AO_IgnoreError= ///< Transaction continues on failed operation -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - ::AO_IgnoreError -#endif - }; - - /** * Execution type of transaction */ enum ExecType { @@ -316,13 +304,15 @@ public: * @return 0 if successful otherwise -1. */ int execute(ExecType execType, - AbortOption abortOption = AbortOnError, + NdbOperation::AbortOption = NdbOperation::DefaultAbortOption, int force = 0 ); #ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED int execute(::ExecType execType, - ::AbortOption abortOption = ::AbortOnError, - int force = 0 ) - { return execute ((ExecType)execType,(AbortOption)abortOption,force); } + ::AbortOption abortOption = ::DefaultAbortOption, + int force = 0 ) { + return execute ((ExecType)execType, + (NdbOperation::AbortOption)abortOption, + force); } #endif #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL @@ -353,14 +343,14 @@ public: void executeAsynchPrepare(ExecType execType, NdbAsynchCallback callback, void* anyObject, - AbortOption abortOption = AbortOnError); + NdbOperation::AbortOption = NdbOperation::DefaultAbortOption); #ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED void executeAsynchPrepare(::ExecType execType, NdbAsynchCallback callback, void* anyObject, - ::AbortOption abortOption = ::AbortOnError) - { executeAsynchPrepare((ExecType)execType, callback, anyObject, - (AbortOption)abortOption); } + ::AbortOption ao = ::DefaultAbortOption) { + executeAsynchPrepare((ExecType)execType, callback, anyObject, + (NdbOperation::AbortOption)ao); } #endif /** @@ -379,14 +369,14 @@ public: void executeAsynch(ExecType aTypeOfExec, NdbAsynchCallback aCallback, void* anyObject, - AbortOption abortOption = AbortOnError); + NdbOperation::AbortOption = NdbOperation::DefaultAbortOption); #ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED void executeAsynch(::ExecType aTypeOfExec, NdbAsynchCallback aCallback, void* anyObject, - ::AbortOption abortOption= ::AbortOnError) + ::AbortOption abortOption= ::DefaultAbortOption) { executeAsynch((ExecType)aTypeOfExec, aCallback, anyObject, - (AbortOption)abortOption); } + (NdbOperation::AbortOption)abortOption); } #endif #endif /** @@ -588,7 +578,7 @@ private: void init(); // Initialize connection object for new transaction int executeNoBlobs(ExecType execType, - AbortOption abortOption = AbortOnError, + NdbOperation::AbortOption = NdbOperation::DefaultAbortOption, int force = 0 ); /** @@ -642,7 +632,7 @@ private: int sendCOMMIT(); // Send a TC_COMMITREQ signal; void setGCI(int GCI); // Set the global checkpoint identity - int OpCompleteFailure(Uint8 abortoption, bool setFailure = true); + int OpCompleteFailure(NdbOperation*); int OpCompleteSuccess(); void CompletedOperations(); // Move active ops to list of completed @@ -732,7 +722,6 @@ private: Uint32 theNoOfOpSent; // How many operations have been sent Uint32 theNoOfOpCompleted; // How many operations have completed - Uint32 theNoOfOpFetched; // How many operations was actually fetched Uint32 theMyRef; // Our block reference Uint32 theTCConPtr; // Transaction Co-ordinator connection pointer. Uint64 theTransactionId; // theTransactionId of the transaction @@ -756,7 +745,6 @@ private: bool theTransactionIsStarted; bool theInUseState; bool theSimpleState; - Uint8 m_abortOption; // Type of commi enum ListState { NotInList, diff --git a/storage/ndb/src/common/debugger/EventLogger.cpp b/storage/ndb/src/common/debugger/EventLogger.cpp index f5498d88887..4c0c4c44344 100644 --- a/storage/ndb/src/common/debugger/EventLogger.cpp +++ b/storage/ndb/src/common/debugger/EventLogger.cpp @@ -16,6 +16,7 @@ #include <ndb_global.h> #include <EventLogger.hpp> +#include <TransporterCallback.hpp> #include <NdbConfig.h> #include <kernel/BlockNumbers.h> @@ -526,11 +527,100 @@ void getTextUndoLogBlocked(QQQQ) { theData[1], theData[2]); } + void getTextTransporterError(QQQQ) { - BaseString::snprintf(m_text, m_text_len, - "Transporter to node %d reported error 0x%x", - theData[1], - theData[2]); + struct myTransporterError{ + int errorNum; + char errorString[256]; + }; + int i = 0; + int lenth = 0; + static const struct myTransporterError TransporterErrorString[]= + { + //TE_NO_ERROR = 0 + {TE_NO_ERROR,"No error"}, + //TE_ERROR_CLOSING_SOCKET = 0x1 + {TE_ERROR_CLOSING_SOCKET,"Error found during closing of socket"}, + //TE_ERROR_IN_SELECT_BEFORE_ACCEPT = 0x2 + {TE_ERROR_IN_SELECT_BEFORE_ACCEPT,"Error found before accept. The transporter will retry"}, + //TE_INVALID_MESSAGE_LENGTH = 0x3 | TE_DO_DISCONNECT + {TE_INVALID_MESSAGE_LENGTH,"Error found in message (invalid message length)"}, + //TE_INVALID_CHECKSUM = 0x4 | TE_DO_DISCONNECT + {TE_INVALID_CHECKSUM,"Error found in message (checksum)"}, + //TE_COULD_NOT_CREATE_SOCKET = 0x5 + {TE_COULD_NOT_CREATE_SOCKET,"Error found while creating socket(can't create socket)"}, + //TE_COULD_NOT_BIND_SOCKET = 0x6 + {TE_COULD_NOT_BIND_SOCKET,"Error found while binding server socket"}, + //TE_LISTEN_FAILED = 0x7 + {TE_LISTEN_FAILED,"Error found while listening to server socket"}, + //TE_ACCEPT_RETURN_ERROR = 0x8 + {TE_ACCEPT_RETURN_ERROR,"Error found during accept(accept return error)"}, + //TE_SHM_DISCONNECT = 0xb | TE_DO_DISCONNECT + {TE_SHM_DISCONNECT,"The remote node has disconnected"}, + //TE_SHM_IPC_STAT = 0xc | TE_DO_DISCONNECT + {TE_SHM_IPC_STAT,"Unable to check shm segment"}, + //TE_SHM_UNABLE_TO_CREATE_SEGMENT = 0xd + {TE_SHM_UNABLE_TO_CREATE_SEGMENT,"Unable to create shm segment"}, + //TE_SHM_UNABLE_TO_ATTACH_SEGMENT = 0xe + {TE_SHM_UNABLE_TO_ATTACH_SEGMENT,"Unable to attach shm segment"}, + //TE_SHM_UNABLE_TO_REMOVE_SEGMENT = 0xf + {TE_SHM_UNABLE_TO_REMOVE_SEGMENT,"Unable to remove shm segment"}, + //TE_TOO_SMALL_SIGID = 0x10 + {TE_TOO_SMALL_SIGID,"Sig ID too small"}, + //TE_TOO_LARGE_SIGID = 0x11 + {TE_TOO_LARGE_SIGID,"Sig ID too large"}, + //TE_WAIT_STACK_FULL = 0x12 | TE_DO_DISCONNECT + {TE_WAIT_STACK_FULL,"Wait stack was full"}, + //TE_RECEIVE_BUFFER_FULL = 0x13 | TE_DO_DISCONNECT + {TE_RECEIVE_BUFFER_FULL,"Receive buffer was full"}, + //TE_SIGNAL_LOST_SEND_BUFFER_FULL = 0x14 | TE_DO_DISCONNECT + {TE_SIGNAL_LOST_SEND_BUFFER_FULL,"Send buffer was full,and trying to force send fails"}, + //TE_SIGNAL_LOST = 0x15 + {TE_SIGNAL_LOST,"Send failed for unknown reason(signal lost)"}, + //TE_SEND_BUFFER_FULL = 0x16 + {TE_SEND_BUFFER_FULL,"The send buffer was full, but sleeping for a while solved"}, + //TE_SCI_LINK_ERROR = 0x0017 + {TE_SCI_LINK_ERROR,"There is no link from this node to the switch"}, + //TE_SCI_UNABLE_TO_START_SEQUENCE = 0x18 | TE_DO_DISCONNECT + {TE_SCI_UNABLE_TO_START_SEQUENCE,"Could not start a sequence, because system resources are exumed or no sequence has been created"}, + //TE_SCI_UNABLE_TO_REMOVE_SEQUENCE = 0x19 | TE_DO_DISCONNECT + {TE_SCI_UNABLE_TO_REMOVE_SEQUENCE,"Could not remove a sequence"}, + //TE_SCI_UNABLE_TO_CREATE_SEQUENCE = 0x1a | TE_DO_DISCONNECT + {TE_SCI_UNABLE_TO_CREATE_SEQUENCE,"Could not create a sequence, because system resources are exempted. Must reboot"}, + //TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR = 0x1b | TE_DO_DISCONNECT + {TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR,"Tried to send data on redundant link but failed"}, + //TE_SCI_CANNOT_INIT_LOCALSEGMENT = 0x1c | TE_DO_DISCONNECT + {TE_SCI_CANNOT_INIT_LOCALSEGMENT,"Cannot initialize local segment"}, + //TE_SCI_CANNOT_MAP_REMOTESEGMENT = 0x1d | TE_DO_DISCONNEC + {TE_SCI_CANNOT_MAP_REMOTESEGMENT,"Cannot map remote segment"}, + //TE_SCI_UNABLE_TO_UNMAP_SEGMENT = 0x1e | TE_DO_DISCONNECT + {TE_SCI_UNABLE_TO_UNMAP_SEGMENT,"Cannot free the resources used by this segment (step 1)"}, + //TE_SCI_UNABLE_TO_REMOVE_SEGMENT = 0x1f | TE_DO_DISCONNEC + {TE_SCI_UNABLE_TO_REMOVE_SEGMENT,"Cannot free the resources used by this segment (step 2)"}, + //TE_SCI_UNABLE_TO_DISCONNECT_SEGMENT = 0x20 | TE_DO_DISCONNECT + {TE_SCI_UNABLE_TO_DISCONNECT_SEGMENT,"Cannot disconnect from a remote segment"}, + //TE_SHM_IPC_PERMANENT = 0x21 + {TE_SHM_IPC_PERMANENT,"Shm ipc Permanent error"}, + //TE_SCI_UNABLE_TO_CLOSE_CHANNEL = 0x22 + {TE_SCI_UNABLE_TO_CLOSE_CHANNEL,"Unable to close the sci channel and the resources allocated"} + }; + + lenth = sizeof(TransporterErrorString)/sizeof(struct myTransporterError); + for(i=0; i<lenth; i++) + { + if(theData[2] == TransporterErrorString[i].errorNum) + { + BaseString::snprintf(m_text, m_text_len, + "Transporter to node %d reported error: %s", + theData[1], + TransporterErrorString[i].errorString); + break; + } + } + if(i == lenth) + BaseString::snprintf(m_text, m_text_len, + "Transporter to node %d reported error: no such error", + theData[1]); } void getTextTransporterWarning(QQQQ) { getTextTransporterError(m_text, m_text_len, theData); diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.cpp b/storage/ndb/src/kernel/blocks/backup/Backup.cpp index a07617f0bfb..cb85c2c5e7e 100644 --- a/storage/ndb/src/kernel/blocks/backup/Backup.cpp +++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp @@ -2405,6 +2405,18 @@ Backup::defineBackupRef(Signal* signal, BackupRecordPtr ptr, Uint32 errCode) if(ptr.p->is_lcp()) { jam(); + if (ptr.p->ctlFilePtr == RNIL) { + ptr.p->m_gsn = GSN_DEFINE_BACKUP_REF; + ndbrequire(ptr.p->errorCode != 0); + DefineBackupRef* ref = (DefineBackupRef*)signal->getDataPtrSend(); + ref->backupId = ptr.p->backupId; + ref->backupPtr = ptr.i; + ref->errorCode = ptr.p->errorCode; + ref->nodeId = getOwnNodeId(); + sendSignal(ptr.p->masterRef, GSN_DEFINE_BACKUP_REF, signal, + DefineBackupRef::SignalLength, JBB); + return; + } BackupFilePtr filePtr LINT_SET_PTR; ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index 3512ba10af3..163e4c61533 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -1525,10 +1525,26 @@ void Dbdih::ndbStartReqLab(Signal* signal, BlockReference ref) */ SYSFILE->lastCompletedGCI[nodePtr.i] = 0; ndbrequire(nodePtr.p->nodeStatus != NodeRecord::ALIVE); - warningEvent("Making filesystem for node %d unusable", + warningEvent("Making filesystem for node %d unusable (need --initial)", nodePtr.i); } + else if (nodePtr.p->nodeStatus == NodeRecord::ALIVE && + SYSFILE->lastCompletedGCI[nodePtr.i] == 0) + { + jam(); + CRASH_INSERTION(7170); + char buf[255]; + BaseString::snprintf(buf, sizeof(buf), + "Cluster requires this node to be started " + " with --initial as partial start has been performed" + " and this filesystem is unusable"); + progError(__LINE__, + NDBD_EXIT_SR_RESTARTCONFLICT, + buf); + ndbrequire(false); + } } + /** * This set which GCI we will try to restart to */ @@ -12515,14 +12531,23 @@ void Dbdih::newCrashedReplica(Uint32 nodeId, ReplicaRecordPtr ncrReplicaPtr) /* THAT THE NEW REPLICA IS NOT STARTED YET AND REPLICA_LAST_GCI IS*/ /* SET TO -1 TO INDICATE THAT IT IS NOT DEAD YET. */ /*----------------------------------------------------------------------*/ + Uint32 lastGCI = SYSFILE->lastCompletedGCI[nodeId]; arrGuardErr(ncrReplicaPtr.p->noCrashedReplicas + 1, 8, NDBD_EXIT_MAX_CRASHED_REPLICAS); ncrReplicaPtr.p->replicaLastGci[ncrReplicaPtr.p->noCrashedReplicas] = - SYSFILE->lastCompletedGCI[nodeId]; + lastGCI; ncrReplicaPtr.p->noCrashedReplicas = ncrReplicaPtr.p->noCrashedReplicas + 1; ncrReplicaPtr.p->createGci[ncrReplicaPtr.p->noCrashedReplicas] = 0; ncrReplicaPtr.p->replicaLastGci[ncrReplicaPtr.p->noCrashedReplicas] = (Uint32)-1; + + if (ncrReplicaPtr.p->noCrashedReplicas == 7 && lastGCI) + { + jam(); + SYSFILE->lastCompletedGCI[nodeId] = 0; + warningEvent("Making filesystem for node %d unusable (need --initial)", + nodeId); + } }//Dbdih::newCrashedReplica() /*************************************************************************/ diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index fa7bc0bbcac..f2eef543833 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -582,6 +582,26 @@ Dblqh::execDEFINE_BACKUP_REF(Signal* signal) { jamEntry(); m_backup_ptr = RNIL; + DefineBackupRef* ref = (DefineBackupRef*)signal->getDataPtrSend(); + int err_code = 0; + char * extra_msg = NULL; + + switch(ref->errorCode){ + case DefineBackupRef::Undefined: + case DefineBackupRef::FailedToSetupFsBuffers: + case DefineBackupRef::FailedToAllocateBuffers: + case DefineBackupRef::FailedToAllocateTables: + case DefineBackupRef::FailedAllocateTableMem: + case DefineBackupRef::FailedToAllocateFileRecord: + case DefineBackupRef::FailedToAllocateAttributeRecord: + case DefineBackupRef::FailedInsertFileHeader: + case DefineBackupRef::FailedInsertTableList: + jam(); + err_code = NDBD_EXIT_INVALID_CONFIG; + extra_msg = "Probably Backup parameters configuration error, Please consult the manual"; + progError(__LINE__, err_code, extra_msg); + } + sendsttorryLab(signal); } @@ -2479,8 +2499,16 @@ Dblqh::execREMOVE_MARKER_ORD(Signal* signal) CommitAckMarkerPtr removedPtr; m_commitAckMarkerHash.remove(removedPtr, key); +#if defined VM_TRACE || defined ERROR_INSERT ndbrequire(removedPtr.i != RNIL); m_commitAckMarkerPool.release(removedPtr); +#else + if (removedPtr.i != RNIL) + { + jam(); + m_commitAckMarkerPool.release(removedPtr); + } +#endif #ifdef MARKER_TRACE ndbout_c("Rem marker[%.8x %.8x]", key.transid1, key.transid2); #endif @@ -3138,20 +3166,23 @@ void Dblqh::lqhAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length) { TcConnectionrec * const regTcPtr = tcConnectptr.p; if (regTcPtr->operation != ZREAD) { - if (regTcPtr->opExec != 1) { - if (saveTupattrbuf(signal, dataPtr, length) == ZOK) { - ; - } else { - jam(); + if (regTcPtr->operation != ZDELETE) + { + if (regTcPtr->opExec != 1) { + if (saveTupattrbuf(signal, dataPtr, length) == ZOK) { + ; + } else { + jam(); /* ------------------------------------------------------------------------- */ /* WE MIGHT BE WAITING FOR RESPONSE FROM SOME BLOCK HERE. THUS WE NEED TO */ /* GO THROUGH THE STATE MACHINE FOR THE OPERATION. */ /* ------------------------------------------------------------------------- */ - localAbortStateHandlerLab(signal); - return; + localAbortStateHandlerLab(signal); + return; + }//if }//if }//if - }//if + } c_tup->receive_attrinfo(signal, regTcPtr->tupConnectrec, dataPtr, length); }//Dblqh::lqhAttrinfoLab() @@ -3405,7 +3436,7 @@ void Dblqh::execLQHKEYREQ(Signal* signal) markerPtr.p->tcNodeId = tcNodeId; CommitAckMarkerPtr tmp; -#ifdef VM_TRACE +#if defined VM_TRACE || defined ERROR_INSERT #ifdef MARKER_TRACE ndbout_c("Add marker[%.8x %.8x]", markerPtr.p->transid1, markerPtr.p->transid2); #endif @@ -7197,7 +7228,8 @@ void Dblqh::execACC_ABORTCONF(Signal* signal) TRACE_OP(regTcPtr, "ACC_ABORTCONF"); signal->theData[0] = regTcPtr->tupConnectrec; EXECUTE_DIRECT(DBTUP, GSN_TUP_ABORTREQ, signal, 1); - + + jamEntry(); continueAbortLab(signal); return; }//Dblqh::execACC_ABORTCONF() @@ -9628,7 +9660,7 @@ Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq) active.add(scanptr); if(scanptr.p->scanKeyinfoFlag){ jam(); -#ifdef VM_TRACE +#if defined VM_TRACE || defined ERROR_INSERT ScanRecordPtr tmp; ndbrequire(!c_scanTakeOverHash.find(tmp, * scanptr.p)); #endif @@ -9752,7 +9784,7 @@ void Dblqh::finishScanrec(Signal* signal) scans.add(restart); if(restart.p->scanKeyinfoFlag){ jam(); -#ifdef VM_TRACE +#if defined VM_TRACE || defined ERROR_INSERT ScanRecordPtr tmp; ndbrequire(!c_scanTakeOverHash.find(tmp, * restart.p)); #endif diff --git a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp index 44f7954f00d..c7ca8048354 100644 --- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp @@ -2522,7 +2522,7 @@ void Dbtc::execTCKEYREQ(Signal* signal) ApiConnectRecord * const regApiPtr = &localApiConnectRecord[TapiIndex]; apiConnectptr.p = regApiPtr; - Uint32 TstartFlag = tcKeyReq->getStartFlag(Treqinfo); + Uint32 TstartFlag = TcKeyReq::getStartFlag(Treqinfo); Uint32 TexecFlag = TcKeyReq::getExecuteFlag(Treqinfo); Uint8 isIndexOp = regApiPtr->isIndexOp; @@ -2692,14 +2692,14 @@ void Dbtc::execTCKEYREQ(Signal* signal) /* */ /* ---------------------------------------------------------------------- */ - UintR TapiVersionNo = tcKeyReq->getAPIVersion(tcKeyReq->attrLen); + UintR TapiVersionNo = TcKeyReq::getAPIVersion(tcKeyReq->attrLen); UintR Tlqhkeyreqrec = regApiPtr->lqhkeyreqrec; regApiPtr->lqhkeyreqrec = Tlqhkeyreqrec + 1; regCachePtr->apiVersionNo = TapiVersionNo; UintR TapiConnectptrIndex = apiConnectptr.i; UintR TsenderData = tcKeyReq->senderData; - UintR TattrLen = tcKeyReq->getAttrinfoLen(tcKeyReq->attrLen); + UintR TattrLen = TcKeyReq::getAttrinfoLen(tcKeyReq->attrLen); UintR TattrinfoCount = c_counters.cattrinfoCount; regTcPtr->apiConnect = TapiConnectptrIndex; @@ -2725,15 +2725,15 @@ void Dbtc::execTCKEYREQ(Signal* signal) UintR TtabptrIndex = localTabptr.i; UintR TtableSchemaVersion = tcKeyReq->tableSchemaVersion; - Uint8 TOperationType = tcKeyReq->getOperationType(Treqinfo); + Uint8 TOperationType = TcKeyReq::getOperationType(Treqinfo); regCachePtr->tableref = TtabptrIndex; regCachePtr->schemaVersion = TtableSchemaVersion; regTcPtr->operation = TOperationType; - Uint8 TSimpleFlag = tcKeyReq->getSimpleFlag(Treqinfo); - Uint8 TDirtyFlag = tcKeyReq->getDirtyFlag(Treqinfo); - Uint8 TInterpretedFlag = tcKeyReq->getInterpretedFlag(Treqinfo); - Uint8 TDistrKeyFlag = tcKeyReq->getDistributionKeyFlag(Treqinfo); + Uint8 TSimpleFlag = TcKeyReq::getSimpleFlag(Treqinfo); + Uint8 TDirtyFlag = TcKeyReq::getDirtyFlag(Treqinfo); + Uint8 TInterpretedFlag = TcKeyReq::getInterpretedFlag(Treqinfo); + Uint8 TDistrKeyFlag = TcKeyReq::getDistributionKeyFlag(Treqinfo); Uint8 TNoDiskFlag = TcKeyReq::getNoDiskFlag(Treqinfo); Uint8 TexecuteFlag = TexecFlag; @@ -2749,10 +2749,10 @@ void Dbtc::execTCKEYREQ(Signal* signal) Uint32 TkeyIndex; Uint32* TOptionalDataPtr = (Uint32*)&tcKeyReq->scanInfo; { - Uint32 TDistrGHIndex = tcKeyReq->getScanIndFlag(Treqinfo); + Uint32 TDistrGHIndex = TcKeyReq::getScanIndFlag(Treqinfo); Uint32 TDistrKeyIndex = TDistrGHIndex; - Uint32 TscanInfo = tcKeyReq->getTakeOverScanInfo(TOptionalDataPtr[0]); + Uint32 TscanInfo = TcKeyReq::getTakeOverScanInfo(TOptionalDataPtr[0]); regCachePtr->scanTakeOverInd = TDistrGHIndex; regCachePtr->scanInfo = TscanInfo; @@ -2774,7 +2774,7 @@ void Dbtc::execTCKEYREQ(Signal* signal) regCachePtr->keydata[2] = Tdata3; regCachePtr->keydata[3] = Tdata4; - TkeyLength = tcKeyReq->getKeyLength(Treqinfo); + TkeyLength = TcKeyReq::getKeyLength(Treqinfo); Uint32 TAIDataIndex; if (TkeyLength > 8) { TAIDataIndex = TkeyIndex + 8; @@ -2787,7 +2787,7 @@ void Dbtc::execTCKEYREQ(Signal* signal) }//if Uint32* TAIDataPtr = &TOptionalDataPtr[TAIDataIndex]; - titcLenAiInTckeyreq = tcKeyReq->getAIInTcKeyReq(Treqinfo); + titcLenAiInTckeyreq = TcKeyReq::getAIInTcKeyReq(Treqinfo); regCachePtr->keylen = TkeyLength; regCachePtr->lenAiInTckeyreq = titcLenAiInTckeyreq; regCachePtr->currReclenAi = titcLenAiInTckeyreq; @@ -2824,6 +2824,12 @@ void Dbtc::execTCKEYREQ(Signal* signal) tmp.p->apiNodeId = refToNode(regApiPtr->ndbapiBlockref); tmp.p->apiConnectPtr = TapiIndex; tmp.p->noOfLqhs = 0; +#if defined VM_TRACE || defined ERROR_INSERT + { + CommitAckMarkerPtr check; + ndbrequire(!m_commitAckMarkerHash.find(check, *tmp.p)); + } +#endif m_commitAckMarkerHash.add(tmp); } } @@ -2852,14 +2858,14 @@ void Dbtc::execTCKEYREQ(Signal* signal) }//switch }//if - Uint32 TabortOption = tcKeyReq->getAbortOption(Treqinfo); + Uint32 TabortOption = TcKeyReq::getAbortOption(Treqinfo); regTcPtr->m_execAbortOption = TabortOption; /*------------------------------------------------------------------------- * Check error handling per operation * If CommitFlag is set state accordingly and check for early abort *------------------------------------------------------------------------*/ - if (tcKeyReq->getCommitFlag(Treqinfo) == 1) { + if (TcKeyReq::getCommitFlag(Treqinfo) == 1) { ndbrequire(TexecuteFlag); regApiPtr->apiConnectstate = CS_REC_COMMITTING; } else { @@ -8114,6 +8120,13 @@ void Dbtc::initApiConnectFail(Signal* signal) tmp.p->noOfLqhs = 1; tmp.p->lqhNodeId[0] = tnodeid; tmp.p->apiConnectPtr = apiConnectptr.i; + +#if defined VM_TRACE || defined ERROR_INSERT + { + CommitAckMarkerPtr check; + ndbrequire(!m_commitAckMarkerHash.find(check, *tmp.p)); + } +#endif m_commitAckMarkerHash.add(tmp); } }//Dbtc::initApiConnectFail() @@ -8270,6 +8283,12 @@ void Dbtc::updateApiStateFail(Signal* signal) tmp.p->noOfLqhs = 1; tmp.p->lqhNodeId[0] = tnodeid; tmp.p->apiConnectPtr = apiConnectptr.i; +#if defined VM_TRACE || defined ERROR_INSERT + { + CommitAckMarkerPtr check; + ndbrequire(!m_commitAckMarkerHash.find(check, *tmp.p)); + } +#endif m_commitAckMarkerHash.add(tmp); } else { jam(); @@ -11472,7 +11491,7 @@ void Dbtc::execTCINDXREQ(Signal* signal) // If operation is readTupleExclusive or updateTuple then read index // table with exclusive lock Uint32 indexLength = TcKeyReq::getKeyLength(tcIndxRequestInfo); - Uint32 attrLength = tcIndxReq->attrLen; + Uint32 attrLength = TcKeyReq::getAttrinfoLen(tcIndxReq->attrLen); indexOp->expectedKeyInfo = indexLength; Uint32 includedIndexLength = MIN(indexLength, indexBufSize); indexOp->expectedAttrInfo = attrLength; diff --git a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index 3b25ac31d48..230895c942a 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -1736,7 +1736,8 @@ private: Operationrec* regOperPtr, Fragrecord* regFragPtr, Tablerec* regTabPtr, - KeyReqStruct* req_struct); + KeyReqStruct* req_struct, + bool disk); //------------------------------------------------------------------ //------------------------------------------------------------------ diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp index 52981e7fc83..c394812ad1a 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp @@ -814,7 +814,9 @@ void Dbtup::execTUPKEYREQ(Signal* signal) { jam(); if (handleDeleteReq(signal, regOperPtr, - regFragPtr, regTabPtr, &req_struct) == -1) { + regFragPtr, regTabPtr, + &req_struct, + disk_page != RNIL) == -1) { return; } /* @@ -1458,7 +1460,8 @@ int Dbtup::handleDeleteReq(Signal* signal, Operationrec* regOperPtr, Fragrecord* regFragPtr, Tablerec* regTabPtr, - KeyReqStruct *req_struct) + KeyReqStruct *req_struct, + bool disk) { // delete must set but not increment tupVersion if (!regOperPtr->is_first_operation()) @@ -1510,8 +1513,11 @@ int Dbtup::handleDeleteReq(Signal* signal, { return 0; } - - return handleReadReq(signal, regOperPtr, regTabPtr, req_struct); + + if (setup_read(req_struct, regOperPtr, regFragPtr, regTabPtr, disk)) + { + return handleReadReq(signal, regOperPtr, regTabPtr, req_struct); + } error: tupkeyErrorLab(signal); diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index bf57bd07f5a..78aafef053a 100644 --- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -3523,8 +3523,10 @@ void Qmgr::execCOMMIT_FAILREQ(Signal* signal) nodePtr.p->phase = ZFAIL_CLOSING; nodePtr.p->failState = WAITING_FOR_NDB_FAILCONF; setNodeInfo(nodePtr.i).m_heartbeat_cnt= 0; + setNodeInfo(nodePtr.i).m_version = 0; c_clusterNodes.clear(nodePtr.i); }//for + recompute_version_info(NodeInfo::DB); /*----------------------------------------------------------------------*/ /* WE INFORM THE API'S WE HAVE CONNECTED ABOUT THE FAILED NODES. */ /*----------------------------------------------------------------------*/ diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp index 42d4e49b14b..cb5ec0f6adf 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp @@ -4668,9 +4668,7 @@ Suma::out_of_buffer(Signal* signal) m_out_of_buffer_gci = m_last_complete_gci - 1; infoEvent("Out of event buffer: nodefailure will cause event failures"); - signal->theData[0] = SumaContinueB::OUT_OF_BUFFER_RELEASE; - signal->theData[1] = 0; - sendSignal(SUMA_REF, GSN_CONTINUEB, signal, 2, JBB); + out_of_buffer_release(signal, 0); } void @@ -4738,7 +4736,8 @@ loop: Uint32 count; m_tup->allocConsPages(16, count, ref); - ndbrequire(count > 0); + if (count == 0) + return RNIL; ndbout_c("alloc_chunk(%d %d) - ", ref, count); diff --git a/storage/ndb/src/mgmclient/CommandInterpreter.cpp b/storage/ndb/src/mgmclient/CommandInterpreter.cpp index e1deca38f4a..7c57d9eabbc 100644 --- a/storage/ndb/src/mgmclient/CommandInterpreter.cpp +++ b/storage/ndb/src/mgmclient/CommandInterpreter.cpp @@ -2054,6 +2054,18 @@ CommandInterpreter::executeStatus(int processId, ndbout << processId << ": Node not found" << endl; return -1; } + if (cl->node_states[i].node_type != NDB_MGM_NODE_TYPE_NDB){ + if (cl->node_states[i].version != 0){ + ndbout << "Node "<< cl->node_states[i].node_id <<": connected" ; + ndbout_c(" (Version %d.%d.%d)", + getMajor(version) , + getMinor(version), + getBuild(version)); + + }else + ndbout << "Node "<< cl->node_states[i].node_id <<": not connected" << endl; + return 0; + } status = cl->node_states[i].node_status; startPhase = cl->node_states[i].start_phase; version = cl->node_states[i].version; diff --git a/storage/ndb/src/mgmsrv/ConfigInfo.cpp b/storage/ndb/src/mgmsrv/ConfigInfo.cpp index 87976b92718..e7c6122d846 100644 --- a/storage/ndb/src/mgmsrv/ConfigInfo.cpp +++ b/storage/ndb/src/mgmsrv/ConfigInfo.cpp @@ -3613,6 +3613,7 @@ check_node_vs_replicas(Vector<ConfigInfo::ConfigRuleSection>§ions, Uint32 db_nodes= 0; Uint32 replicas= 0; Uint32 db_host_count= 0; + bool with_arbitration_rank= false; ctx.m_userProperties.get(DB_TOKEN, &db_nodes); ctx.m_userProperties.get("NoOfReplicas", &replicas); if((db_nodes % replicas) != 0){ @@ -3648,83 +3649,90 @@ check_node_vs_replicas(Vector<ConfigInfo::ConfigRuleSection>§ions, tmp->get("HostName", &host); if (strcmp(type,DB_TOKEN) == 0) - { - { - Uint32 ii; - if (!p_db_hosts.get(host,&ii)) - db_host_count++; - p_db_hosts.put(host,i); - if (p_arbitrators.get(host,&ii)) - { - arbitration_warning.appfmt(arbit_warn_fmt, ii, i, host); - p_arbitrators.remove(host); // only one warning per db node - } - } - { - unsigned j; - BaseString str, str2; - str.assfmt("#group%d_",group); - p_db_hosts.put(str.c_str(),i_group,host); - str2.assfmt("##group%d_",group); - p_db_hosts.put(str2.c_str(),i_group,i); - for (j= 0; j < i_group; j++) - { - const char *other_host; - p_db_hosts.get(str.c_str(),j,&other_host); - if (strcmp(host,other_host) == 0) { - unsigned int other_i, c= 0; - p_db_hosts.get(str2.c_str(),j,&other_i); - p_db_hosts.get(str.c_str(),&c); - if (c == 0) // first warning in this node group - node_group_warning.appfmt(" Node group %d", group); - c|= 1 << j; - p_db_hosts.put(str.c_str(),c); - - node_group_warning.appfmt(",\n db node with id %d and id %d " - "on same host %s", other_i, i, host); - } - } - i_group++; - DBUG_ASSERT(i_group <= replicas); - if (i_group == replicas) - { - unsigned c= 0; - p_db_hosts.get(str.c_str(),&c); - if (c+1 == (1u << (replicas-1))) // all nodes on same machine - node_group_warning.append(".\n Host failure will " - "cause complete cluster shutdown."); - else if (c > 0) - node_group_warning.append(".\n Host failure may " - "cause complete cluster shutdown."); - group++; - i_group= 0; - } - } + { + { + Uint32 ii; + if (!p_db_hosts.get(host,&ii)) + db_host_count++; + p_db_hosts.put(host,i); + if (p_arbitrators.get(host,&ii)) + { + arbitration_warning.appfmt(arbit_warn_fmt, ii, i, host); + p_arbitrators.remove(host); // only one warning per db node + } + } + { + unsigned j; + BaseString str, str2; + str.assfmt("#group%d_",group); + p_db_hosts.put(str.c_str(),i_group,host); + str2.assfmt("##group%d_",group); + p_db_hosts.put(str2.c_str(),i_group,i); + for (j= 0; j < i_group; j++) + { + const char *other_host; + p_db_hosts.get(str.c_str(),j,&other_host); + if (strcmp(host,other_host) == 0) { + unsigned int other_i, c= 0; + p_db_hosts.get(str2.c_str(),j,&other_i); + p_db_hosts.get(str.c_str(),&c); + if (c == 0) // first warning in this node group + node_group_warning.appfmt(" Node group %d", group); + c|= 1 << j; + p_db_hosts.put(str.c_str(),c); + node_group_warning.appfmt(",\n db node with id %d and id %d " + "on same host %s", other_i, i, host); + } + } + i_group++; + DBUG_ASSERT(i_group <= replicas); + if (i_group == replicas) + { + unsigned c= 0; + p_db_hosts.get(str.c_str(),&c); + if (c+1 == (1u << (replicas-1))) // all nodes on same machine + node_group_warning.append(".\n Host failure will " + "cause complete cluster shutdown."); + else if (c > 0) + node_group_warning.append(".\n Host failure may " + "cause complete cluster shutdown."); + group++; + i_group= 0; + } + } } else if (strcmp(type,API_TOKEN) == 0 || strcmp(type,MGM_TOKEN) == 0) - { - Uint32 rank; - if(tmp->get("ArbitrationRank", &rank) && rank > 0) - { - if(host && host[0] != 0) - { - Uint32 ii; - p_arbitrators.put(host,i); - if (p_db_hosts.get(host,&ii)) - { - arbitration_warning.appfmt(arbit_warn_fmt, i, ii, host); - } - } - else - { - arbitration_warning.appfmt(arbit_warn_fmt2, i); - } - } + { + Uint32 rank; + if(tmp->get("ArbitrationRank", &rank) && rank > 0) + { + with_arbitration_rank = true; //check whether MGM or API node configured with rank >0 + if(host && host[0] != 0) + { + Uint32 ii; + p_arbitrators.put(host,i); + if (p_db_hosts.get(host,&ii)) + { + arbitration_warning.appfmt(arbit_warn_fmt, i, ii, host); + } + } + else + { + arbitration_warning.appfmt(arbit_warn_fmt2, i); + } + } } } if (db_host_count > 1 && node_group_warning.length() > 0) ndbout_c("Cluster configuration warning:\n%s",node_group_warning.c_str()); + if (!with_arbitration_rank) + { + ndbout_c("Cluster configuration warning:" + "\n Neither %s nor %s nodes are configured with arbitrator," + "\n may cause complete cluster shutdown in case of host failure.", + MGM_TOKEN, API_TOKEN); + } if (db_host_count > 1 && arbitration_warning.length() > 0) ndbout_c("Cluster configuration warning:%s%s",arbitration_warning.c_str(), "\n Running arbitrator on the same host as a database node may" diff --git a/storage/ndb/src/mgmsrv/main.cpp b/storage/ndb/src/mgmsrv/main.cpp index 53c8299216f..b880657d89b 100644 --- a/storage/ndb/src/mgmsrv/main.cpp +++ b/storage/ndb/src/mgmsrv/main.cpp @@ -131,8 +131,6 @@ bool g_StopServer; bool g_RestartServer; extern EventLogger g_eventLogger; -extern int global_mgmt_server_check; - enum ndb_mgmd_options { OPT_INTERACTIVE = NDB_STD_OPTIONS_LAST, OPT_NO_NODEID_CHECKS, @@ -206,8 +204,6 @@ int main(int argc, char** argv) start: glob= new MgmGlobals; - global_mgmt_server_check = 1; - if (opt_interactive || opt_non_interactive || g_print_full_config) { diff --git a/storage/ndb/src/ndbapi/ClusterMgr.cpp b/storage/ndb/src/ndbapi/ClusterMgr.cpp index f7900b9b0b0..b162b85d61e 100644 --- a/storage/ndb/src/ndbapi/ClusterMgr.cpp +++ b/storage/ndb/src/ndbapi/ClusterMgr.cpp @@ -360,8 +360,6 @@ ClusterMgr::execAPI_REGREQ(const Uint32 * theData){ theFacade.sendSignalUnCond(&signal, nodeId); } -int global_mgmt_server_check = 0; // set to one in mgmtsrvr main; - void ClusterMgr::execAPI_REGCONF(const Uint32 * theData){ const ApiRegConf * const apiRegConf = (ApiRegConf *)&theData[0]; @@ -379,7 +377,7 @@ ClusterMgr::execAPI_REGCONF(const Uint32 * theData){ if(node.m_info.m_version != apiRegConf->version){ node.m_info.m_version = apiRegConf->version; - if (global_mgmt_server_check == 1) + if(theNodes[theFacade.ownId()].m_info.m_type == NodeInfo::MGM) node.compatible = ndbCompatible_mgmt_ndb(NDB_VERSION, node.m_info.m_version); else diff --git a/storage/ndb/src/ndbapi/Ndb.cpp b/storage/ndb/src/ndbapi/Ndb.cpp index 20c2ea711d2..e90dfbfa959 100644 --- a/storage/ndb/src/ndbapi/Ndb.cpp +++ b/storage/ndb/src/ndbapi/Ndb.cpp @@ -1075,7 +1075,7 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table, tOperation->incValue("NEXTID", opValue); tRecAttrResult = tOperation->getValue("NEXTID"); - if (tConnection->execute( Commit ) == -1 ) + if (tConnection->execute( NdbTransaction::Commit ) == -1 ) goto error_handler; tValue = tRecAttrResult->u_64_value(); @@ -1090,7 +1090,7 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table, tOperation->equal("SYSKEY_0", aTableId ); tOperation->setValue("NEXTID", opValue); - if (tConnection->execute( Commit ) == -1 ) + if (tConnection->execute( NdbTransaction::Commit ) == -1 ) goto error_handler; range.reset(); @@ -1107,7 +1107,7 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table, tOperation->def_label(0); tOperation->interpret_exit_nok(9999); - if (tConnection->execute( Commit ) == -1) + if (tConnection->execute( NdbTransaction::Commit ) == -1) { if (tConnection->theError.code != 9999) goto error_handler; @@ -1124,7 +1124,7 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table, tOperation->readTuple(); tOperation->equal("SYSKEY_0", aTableId ); tRecAttrResult = tOperation->getValue("NEXTID"); - if (tConnection->execute( Commit ) == -1 ) + if (tConnection->execute( NdbTransaction::Commit ) == -1 ) goto error_handler; opValue = tRecAttrResult->u_64_value(); // out break; diff --git a/storage/ndb/src/ndbapi/NdbBlob.cpp b/storage/ndb/src/ndbapi/NdbBlob.cpp index 9ebc5fa9a81..ee3a1cfd66a 100644 --- a/storage/ndb/src/ndbapi/NdbBlob.cpp +++ b/storage/ndb/src/ndbapi/NdbBlob.cpp @@ -1133,7 +1133,7 @@ NdbBlob::readTableParts(char* buf, Uint32 part, Uint32 count) setErrorCode(tOp); DBUG_RETURN(-1); } - tOp->m_abortOption = NdbTransaction::AbortOnError; + tOp->m_abortOption = NdbOperation::AbortOnError; buf += thePartSize; n++; thePendingBlobOps |= (1 << NdbOperation::ReadRequest); @@ -1169,7 +1169,7 @@ NdbBlob::insertParts(const char* buf, Uint32 part, Uint32 count) setErrorCode(tOp); DBUG_RETURN(-1); } - tOp->m_abortOption = NdbTransaction::AbortOnError; + tOp->m_abortOption = NdbOperation::AbortOnError; buf += thePartSize; n++; thePendingBlobOps |= (1 << NdbOperation::InsertRequest); @@ -1193,7 +1193,7 @@ NdbBlob::updateParts(const char* buf, Uint32 part, Uint32 count) setErrorCode(tOp); DBUG_RETURN(-1); } - tOp->m_abortOption = NdbTransaction::AbortOnError; + tOp->m_abortOption = NdbOperation::AbortOnError; buf += thePartSize; n++; thePendingBlobOps |= (1 << NdbOperation::UpdateRequest); @@ -1216,7 +1216,7 @@ NdbBlob::deleteParts(Uint32 part, Uint32 count) setErrorCode(tOp); DBUG_RETURN(-1); } - tOp->m_abortOption = NdbTransaction::AbortOnError; + tOp->m_abortOption = NdbOperation::AbortOnError; n++; thePendingBlobOps |= (1 << NdbOperation::DeleteRequest); theNdbCon->thePendingBlobOps |= (1 << NdbOperation::DeleteRequest); @@ -1252,7 +1252,7 @@ NdbBlob::deletePartsUnknown(Uint32 part) setErrorCode(tOp); DBUG_RETURN(-1); } - tOp->m_abortOption= NdbTransaction::AO_IgnoreError; + tOp->m_abortOption= NdbOperation::AO_IgnoreError; n++; } DBUG_PRINT("info", ("bat=%u", bat)); @@ -1588,7 +1588,7 @@ NdbBlob::preExecute(NdbTransaction::ExecType anExecType, bool& batch) DBUG_RETURN(-1); } if (isWriteOp()) { - tOp->m_abortOption = NdbTransaction::AO_IgnoreError; + tOp->m_abortOption = NdbOperation::AO_IgnoreError; } theHeadInlineReadOp = tOp; // execute immediately @@ -1634,7 +1634,7 @@ NdbBlob::preExecute(NdbTransaction::ExecType anExecType, bool& batch) DBUG_RETURN(-1); } if (isWriteOp()) { - tOp->m_abortOption = NdbTransaction::AO_IgnoreError; + tOp->m_abortOption = NdbOperation::AO_IgnoreError; } theHeadInlineReadOp = tOp; // execute immediately @@ -1807,7 +1807,7 @@ NdbBlob::postExecute(NdbTransaction::ExecType anExecType) setErrorCode(NdbBlobImpl::ErrAbort); DBUG_RETURN(-1); } - tOp->m_abortOption = NdbTransaction::AbortOnError; + tOp->m_abortOption = NdbOperation::AbortOnError; DBUG_PRINT("info", ("added op to update head+inline")); } DBUG_RETURN(0); @@ -1837,7 +1837,7 @@ NdbBlob::preCommit() setErrorCode(NdbBlobImpl::ErrAbort); DBUG_RETURN(-1); } - tOp->m_abortOption = NdbTransaction::AbortOnError; + tOp->m_abortOption = NdbOperation::AbortOnError; DBUG_PRINT("info", ("added op to update head+inline")); } } diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp index a425819df6b..6490ec91300 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -1609,17 +1609,24 @@ NdbEventBuffer::insert_event(NdbEventOperationImpl* impl, Uint32 &oid_ref) { NdbEventOperationImpl *dropped_ev_op = m_dropped_ev_op; + DBUG_PRINT("info", ("gci: %u", data.gci)); do { do { - oid_ref = impl->m_oid; - insertDataL(impl, &data, ptr); + if (impl->m_node_bit_mask.get(0u)) + { + oid_ref = impl->m_oid; + insertDataL(impl, &data, ptr); + } NdbEventOperationImpl* blob_op = impl->theBlobOpList; while (blob_op != NULL) { - oid_ref = blob_op->m_oid; - insertDataL(blob_op, &data, ptr); + if (blob_op->m_node_bit_mask.get(0u)) + { + oid_ref = blob_op->m_oid; + insertDataL(blob_op, &data, ptr); + } blob_op = blob_op->m_next; } } while((impl = impl->m_next)); @@ -1804,6 +1811,7 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op, switch (operation) { case NdbDictionary::Event::_TE_NODE_FAILURE: + DBUG_ASSERT(op->m_node_bit_mask.get(0u) != 0); op->m_node_bit_mask.clear(SubTableData::getNdbdNodeId(ri)); DBUG_PRINT("info", ("_TE_NODE_FAILURE: m_ref_count: %u for op: %p id: %u", @@ -1819,29 +1827,23 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op, DBUG_RETURN_EVENT(0); break; case NdbDictionary::Event::_TE_CLUSTER_FAILURE: - if (op->m_node_bit_mask.get(0)) - { - op->m_node_bit_mask.clear(); - DBUG_ASSERT(op->m_ref_count > 0); - // remove kernel reference - // added in execute_nolock - op->m_ref_count--; - DBUG_PRINT("info", ("_TE_CLUSTER_FAILURE: m_ref_count: %u for op: %p", - op->m_ref_count, op)); - if (op->theMainOp) - { - DBUG_ASSERT(op->m_ref_count == 0); - DBUG_ASSERT(op->theMainOp->m_ref_count > 0); - // remove blob reference in main op - // added in execute_no_lock - op->theMainOp->m_ref_count--; - DBUG_PRINT("info", ("m_ref_count: %u for op: %p", - op->theMainOp->m_ref_count, op->theMainOp)); - } - } - else + DBUG_ASSERT(op->m_node_bit_mask.get(0u) != 0); + op->m_node_bit_mask.clear(); + DBUG_ASSERT(op->m_ref_count > 0); + // remove kernel reference + // added in execute_nolock + op->m_ref_count--; + DBUG_PRINT("info", ("_TE_CLUSTER_FAILURE: m_ref_count: %u for op: %p", + op->m_ref_count, op)); + if (op->theMainOp) { - DBUG_ASSERT(op->m_node_bit_mask.isclear() != 0); + DBUG_ASSERT(op->m_ref_count == 0); + DBUG_ASSERT(op->theMainOp->m_ref_count > 0); + // remove blob reference in main op + // added in execute_no_lock + op->theMainOp->m_ref_count--; + DBUG_PRINT("info", ("m_ref_count: %u for op: %p", + op->theMainOp->m_ref_count, op->theMainOp)); } break; case NdbDictionary::Event::_TE_STOP: diff --git a/storage/ndb/src/ndbapi/NdbIndexOperation.cpp b/storage/ndb/src/ndbapi/NdbIndexOperation.cpp index 88e9253880f..fc19bd251d4 100644 --- a/storage/ndb/src/ndbapi/NdbIndexOperation.cpp +++ b/storage/ndb/src/ndbapi/NdbIndexOperation.cpp @@ -172,237 +172,6 @@ NdbIndexOperation::getIndex() const return m_theIndex; } -int -NdbIndexOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransactionId) -{ - Uint32 tTransId1, tTransId2; - Uint32 tReqInfo; - Uint32 tSignalCount = 0; - Uint32 tInterpretInd = theInterpretIndicator; - - theErrorLine = 0; - - if (tInterpretInd != 1) { - OperationType tOpType = theOperationType; - OperationStatus tStatus = theStatus; - if ((tOpType == UpdateRequest) || - (tOpType == InsertRequest) || - (tOpType == WriteRequest)) { - if (tStatus != SetValue) { - setErrorCodeAbort(4506); - return -1; - }//if - } else if ((tOpType == ReadRequest) || (tOpType == ReadExclusive) || - (tOpType == DeleteRequest)) { - if (tStatus != GetValue) { - setErrorCodeAbort(4506); - return -1; - }//if - } else { - setErrorCodeAbort(4507); - return -1; - }//if - } else { - if (prepareSendInterpreted() == -1) { - return -1; - }//if - }//if - -//------------------------------------------------------------- -// We start by filling in the first 8 unconditional words of the -// TCINDXREQ signal. -//------------------------------------------------------------- - TcKeyReq * tcKeyReq = - CAST_PTR(TcKeyReq, theTCREQ->getDataPtrSend()); - - Uint32 tTotalCurrAI_Len = theTotalCurrAI_Len; - Uint32 tIndexId = m_theIndex->m_id; - Uint32 tSchemaVersion = m_theIndex->m_version; - - tcKeyReq->apiConnectPtr = aTC_ConnectPtr; - tcKeyReq->senderData = ptr2int(); - tcKeyReq->attrLen = tTotalCurrAI_Len; - tcKeyReq->tableId = tIndexId; - tcKeyReq->tableSchemaVersion = tSchemaVersion; - - tTransId1 = (Uint32) aTransactionId; - tTransId2 = (Uint32) (aTransactionId >> 32); - -//------------------------------------------------------------- -// Simple is simple if simple or both start and commit is set. -//------------------------------------------------------------- -// Temporarily disable simple stuff - Uint8 tSimpleIndicator = 0; -// Uint8 tSimpleIndicator = theSimpleIndicator; - Uint8 tCommitIndicator = theCommitIndicator; - Uint8 tStartIndicator = theStartIndicator; -// if ((theNdbCon->theLastOpInList == this) && (theCommitIndicator == 0)) -// abort(); -// Temporarily disable simple stuff - Uint8 tSimpleAlt = 0; -// Uint8 tSimpleAlt = tStartIndicator & tCommitIndicator; - tSimpleIndicator = tSimpleIndicator | tSimpleAlt; - -//------------------------------------------------------------- -// Simple state is set if start and commit is set and it is -// a read request. Otherwise it is set to zero. -//------------------------------------------------------------- - //theNdbCon->theSimpleState = tSimpleState; - - tcKeyReq->transId1 = tTransId1; - tcKeyReq->transId2 = tTransId2; - - tReqInfo = 0; - - if (tTotalCurrAI_Len <= TcKeyReq::MaxAttrInfo) { - tcKeyReq->setAIInTcKeyReq(tReqInfo, tTotalCurrAI_Len); - } else { - tcKeyReq->setAIInTcKeyReq(tReqInfo, TcKeyReq::MaxAttrInfo); - }//if - - tcKeyReq->setSimpleFlag(tReqInfo, tSimpleIndicator); - tcKeyReq->setCommitFlag(tReqInfo, tCommitIndicator); - tcKeyReq->setStartFlag(tReqInfo, tStartIndicator); - const Uint8 tInterpretIndicator = theInterpretIndicator; - tcKeyReq->setInterpretedFlag(tReqInfo, tInterpretIndicator); - - Uint8 tDirtyIndicator = theDirtyIndicator; - OperationType tOperationType = theOperationType; - Uint32 tIndexLen = theTupKeyLen; - Uint8 abortOption = theNdbCon->m_abortOption; - - tcKeyReq->setDirtyFlag(tReqInfo, tDirtyIndicator); - tcKeyReq->setOperationType(tReqInfo, tOperationType); - tcKeyReq->setKeyLength(tReqInfo, tIndexLen); - tcKeyReq->setAbortOption(tReqInfo, abortOption); - - Uint8 tDistrKeyIndicator = theDistrKeyIndicator_; - Uint8 tScanIndicator = theScanInfo & 1; - - tcKeyReq->setDistributionKeyFlag(tReqInfo, tDistrKeyIndicator); - tcKeyReq->setScanIndFlag(tReqInfo, tScanIndicator); - - tcKeyReq->requestInfo = tReqInfo; - -//------------------------------------------------------------- -// The next step is to fill in the upto three conditional words. -//------------------------------------------------------------- - Uint32* tOptionalDataPtr = &tcKeyReq->scanInfo; - Uint32 tDistrGHIndex = tScanIndicator; - Uint32 tDistrKeyIndex = tDistrGHIndex; - - Uint32 tScanInfo = theScanInfo; - Uint32 tDistrKey = theDistributionKey; - - tOptionalDataPtr[0] = tScanInfo; - tOptionalDataPtr[tDistrKeyIndex] = tDistrKey; - -//------------------------------------------------------------- -// The next is step is to compress the key data part of the -// TCKEYREQ signal. -//------------------------------------------------------------- - Uint32 tKeyIndex = tDistrKeyIndex + tDistrKeyIndicator; - Uint32* tKeyDataPtr = &tOptionalDataPtr[tKeyIndex]; - Uint32 Tdata1 = tcKeyReq->keyInfo[0]; - Uint32 Tdata2 = tcKeyReq->keyInfo[1]; - Uint32 Tdata3 = tcKeyReq->keyInfo[2]; - Uint32 Tdata4 = tcKeyReq->keyInfo[3]; - Uint32 Tdata5; - - tKeyDataPtr[0] = Tdata1; - tKeyDataPtr[1] = Tdata2; - tKeyDataPtr[2] = Tdata3; - tKeyDataPtr[3] = Tdata4; - if (tIndexLen > 4) { - Tdata1 = tcKeyReq->keyInfo[4]; - Tdata2 = tcKeyReq->keyInfo[5]; - Tdata3 = tcKeyReq->keyInfo[6]; - Tdata4 = tcKeyReq->keyInfo[7]; - - tKeyDataPtr[4] = Tdata1; - tKeyDataPtr[5] = Tdata2; - tKeyDataPtr[6] = Tdata3; - tKeyDataPtr[7] = Tdata4; - }//if -//------------------------------------------------------------- -// Finally we also compress the INDXATTRINFO part of the signal. -// We optimise by using the if-statement for sending INDXKEYINFO -// signals to calculating the new Attrinfo Index. -//------------------------------------------------------------- - Uint32 tAttrInfoIndex; - - if (tIndexLen > TcKeyReq::MaxKeyInfo) { - /** - * Set transid and TC connect ptr in the INDXKEYINFO signals - */ - NdbApiSignal* tSignal = theTCREQ->next(); - Uint32 remainingKey = tIndexLen - TcKeyReq::MaxKeyInfo; - - do { - Uint32* tSigDataPtr = tSignal->getDataPtrSend(); - NdbApiSignal* tnextSignal = tSignal->next(); - tSignalCount++; - tSigDataPtr[0] = aTC_ConnectPtr; - tSigDataPtr[1] = tTransId1; - tSigDataPtr[2] = tTransId2; - if (remainingKey > IndxKeyInfo::DataLength) { - // The signal is full - tSignal->setLength(IndxKeyInfo::MaxSignalLength); - remainingKey -= IndxKeyInfo::DataLength; - } - else { - // Last signal - tSignal->setLength(IndxKeyInfo::HeaderLength + remainingKey); - remainingKey = 0; - } - tSignal = tnextSignal; - } while (tSignal != NULL); - tAttrInfoIndex = tKeyIndex + TcKeyReq::MaxKeyInfo; - } else { - tAttrInfoIndex = tKeyIndex + tIndexLen; - }//if - -//------------------------------------------------------------- -// Perform the Attrinfo packing in the TCKEYREQ signal started -// above. -//------------------------------------------------------------- - Uint32* tAIDataPtr = &tOptionalDataPtr[tAttrInfoIndex]; - Tdata1 = tcKeyReq->attrInfo[0]; - Tdata2 = tcKeyReq->attrInfo[1]; - Tdata3 = tcKeyReq->attrInfo[2]; - Tdata4 = tcKeyReq->attrInfo[3]; - Tdata5 = tcKeyReq->attrInfo[4]; - - theTCREQ->setLength(tcKeyReq->getAIInTcKeyReq(tReqInfo) + - tAttrInfoIndex + TcKeyReq::StaticLength); - tAIDataPtr[0] = Tdata1; - tAIDataPtr[1] = Tdata2; - tAIDataPtr[2] = Tdata3; - tAIDataPtr[3] = Tdata4; - tAIDataPtr[4] = Tdata5; - -/*************************************************** -* Send the INDXATTRINFO signals. -***************************************************/ - if (tTotalCurrAI_Len > 5) { - // Set the last signal's length. - NdbApiSignal* tSignal = theFirstATTRINFO; - theCurrentATTRINFO->setLength(theAI_LenInCurrAI); - do { - Uint32* tSigDataPtr = tSignal->getDataPtrSend(); - NdbApiSignal* tnextSignal = tSignal->next(); - tSignalCount++; - tSigDataPtr[0] = aTC_ConnectPtr; - tSigDataPtr[1] = tTransId1; - tSigDataPtr[2] = tTransId2; - tSignal = tnextSignal; - } while (tSignal != NULL); - }//if - theStatus = WaitResponse; - theReceiver.prepareSend(); - return 0; -} - /*************************************************************************** int receiveTCINDXREF( NdbApiSignal* aSignal) diff --git a/storage/ndb/src/ndbapi/NdbIndexStat.cpp b/storage/ndb/src/ndbapi/NdbIndexStat.cpp index 773c302e0cd..0ce96b1b4d9 100644 --- a/storage/ndb/src/ndbapi/NdbIndexStat.cpp +++ b/storage/ndb/src/ndbapi/NdbIndexStat.cpp @@ -426,7 +426,7 @@ NdbIndexStat::records_in_range(const NdbDictionary::Index* index, NdbIndexScanOp DBUG_RETURN(-1); } if (trans->execute(NdbTransaction::NoCommit, - NdbTransaction::AbortOnError, forceSend) == -1) { + NdbOperation::AbortOnError, forceSend) == -1) { m_error = trans->getNdbError(); DBUG_PRINT("error", ("trans:%d op:%d", trans->getNdbError().code, op->getNdbError().code)); diff --git a/storage/ndb/src/ndbapi/NdbOperationDefine.cpp b/storage/ndb/src/ndbapi/NdbOperationDefine.cpp index 757d39a75ce..8958b6ec596 100644 --- a/storage/ndb/src/ndbapi/NdbOperationDefine.cpp +++ b/storage/ndb/src/ndbapi/NdbOperationDefine.cpp @@ -44,6 +44,7 @@ NdbOperation::insertTuple() tNdbCon->theSimpleState = 0; theErrorLine = tErrorLine++; theLockMode = LM_Exclusive; + m_abortOption = AbortOnError; return 0; } else { setErrorCode(4200); @@ -64,6 +65,7 @@ NdbOperation::updateTuple() theOperationType = UpdateRequest; theErrorLine = tErrorLine++; theLockMode = LM_Exclusive; + m_abortOption = AbortOnError; return 0; } else { setErrorCode(4200); @@ -84,12 +86,35 @@ NdbOperation::writeTuple() theOperationType = WriteRequest; theErrorLine = tErrorLine++; theLockMode = LM_Exclusive; + m_abortOption = AbortOnError; return 0; } else { setErrorCode(4200); return -1; }//if }//NdbOperation::writeTuple() +/***************************************************************************** + * int deleteTuple(); + *****************************************************************************/ +int +NdbOperation::deleteTuple() +{ + NdbTransaction* tNdbCon = theNdbCon; + int tErrorLine = theErrorLine; + if (theStatus == Init) { + theStatus = OperationDefined; + tNdbCon->theSimpleState = 0; + theOperationType = DeleteRequest; + theErrorLine = tErrorLine++; + theLockMode = LM_Exclusive; + m_abortOption = AbortOnError; + return 0; + } else { + setErrorCode(4200); + return -1; + }//if +}//NdbOperation::deleteTuple() + /****************************************************************************** * int readTuple(); *****************************************************************************/ @@ -124,6 +149,7 @@ NdbOperation::readTuple() theOperationType = ReadRequest; theErrorLine = tErrorLine++; theLockMode = LM_Read; + m_abortOption = AO_IgnoreError; return 0; } else { setErrorCode(4200); @@ -131,27 +157,6 @@ NdbOperation::readTuple() }//if }//NdbOperation::readTuple() -/***************************************************************************** - * int deleteTuple(); - *****************************************************************************/ -int -NdbOperation::deleteTuple() -{ - NdbTransaction* tNdbCon = theNdbCon; - int tErrorLine = theErrorLine; - if (theStatus == Init) { - theStatus = OperationDefined; - tNdbCon->theSimpleState = 0; - theOperationType = DeleteRequest; - theErrorLine = tErrorLine++; - theLockMode = LM_Exclusive; - return 0; - } else { - setErrorCode(4200); - return -1; - }//if -}//NdbOperation::deleteTuple() - /****************************************************************************** * int readTupleExclusive(); *****************************************************************************/ @@ -166,6 +171,7 @@ NdbOperation::readTupleExclusive() theOperationType = ReadExclusive; theErrorLine = tErrorLine++; theLockMode = LM_Exclusive; + m_abortOption = AO_IgnoreError; return 0; } else { setErrorCode(4200); @@ -222,6 +228,7 @@ NdbOperation::committedRead() theDirtyIndicator = 1; theErrorLine = tErrorLine++; theLockMode = LM_CommittedRead; + m_abortOption = AO_IgnoreError; return 0; } else { setErrorCode(4200); @@ -245,6 +252,7 @@ NdbOperation::dirtyUpdate() theDirtyIndicator = 1; theErrorLine = tErrorLine++; theLockMode = LM_CommittedRead; + m_abortOption = AbortOnError; return 0; } else { setErrorCode(4200); @@ -268,6 +276,7 @@ NdbOperation::dirtyWrite() theDirtyIndicator = 1; theErrorLine = tErrorLine++; theLockMode = LM_CommittedRead; + m_abortOption = AbortOnError; return 0; } else { setErrorCode(4200); @@ -290,6 +299,7 @@ NdbOperation::interpretedUpdateTuple() theAI_LenInCurrAI = 25; theLockMode = LM_Exclusive; theErrorLine = tErrorLine++; + m_abortOption = AbortOnError; initInterpreter(); return 0; } else { @@ -314,6 +324,7 @@ NdbOperation::interpretedDeleteTuple() theErrorLine = tErrorLine++; theAI_LenInCurrAI = 25; theLockMode = LM_Exclusive; + m_abortOption = AbortOnError; initInterpreter(); return 0; } else { diff --git a/storage/ndb/src/ndbapi/NdbOperationExec.cpp b/storage/ndb/src/ndbapi/NdbOperationExec.cpp index 486c772de4d..ba1905760c3 100644 --- a/storage/ndb/src/ndbapi/NdbOperationExec.cpp +++ b/storage/ndb/src/ndbapi/NdbOperationExec.cpp @@ -99,7 +99,9 @@ Parameters: aTC_ConnectPtr: the Connect pointer to TC. Remark: Puts the the data into TCKEYREQ signal and optional KEYINFO and ATTRINFO signals. ***************************************************************************/ int -NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId) +NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, + Uint64 aTransId, + AbortOption ao) { Uint32 tTransId1, tTransId2; Uint32 tReqInfo; @@ -147,8 +149,8 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId) //------------------------------------------------------------- TcKeyReq * const tcKeyReq = CAST_PTR(TcKeyReq, theTCREQ->getDataPtrSend()); - Uint32 tTableId = m_currentTable->m_id; - Uint32 tSchemaVersion = m_currentTable->m_version; + Uint32 tTableId = m_accessTable->m_id; + Uint32 tSchemaVersion = m_accessTable->m_version; tcKeyReq->apiConnectPtr = aTC_ConnectPtr; tcKeyReq->apiOperationPtr = ptr2int(); @@ -177,6 +179,8 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId) // Simple state is set if start and commit is set and it is // a read request. Otherwise it is set to zero. //------------------------------------------------------------- + Uint8 tReadInd = (theOperationType == ReadRequest); + Uint8 tSimpleState = tReadInd & tSimpleIndicator; tcKeyReq->transId1 = tTransId1; tcKeyReq->transId2 = tTransId2; @@ -196,16 +200,16 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId) OperationType tOperationType = theOperationType; Uint32 tTupKeyLen = theTupKeyLen; - Uint8 abortOption = - m_abortOption != -1 ? m_abortOption : theNdbCon->m_abortOption; + Uint8 abortOption = (ao == DefaultAbortOption) ? m_abortOption : ao; tcKeyReq->setDirtyFlag(tReqInfo, tDirtyIndicator); tcKeyReq->setOperationType(tReqInfo, tOperationType); tcKeyReq->setKeyLength(tReqInfo, tTupKeyLen); // A simple read is always ignore error - abortOption = tSimpleIndicator ? (Uint8) AO_IgnoreError : abortOption; + abortOption = tSimpleState ? AO_IgnoreError : abortOption; tcKeyReq->setAbortOption(tReqInfo, abortOption); + m_abortOption = abortOption; Uint8 tDistrKeyIndicator = theDistrKeyIndicator_; Uint8 tScanIndicator = theScanInfo & 1; @@ -541,21 +545,16 @@ NdbOperation::receiveTCKEYREF( NdbApiSignal* aSignal) return -1; }//if - AbortOption ao = (AbortOption) - (m_abortOption != -1 ? m_abortOption : theNdbCon->m_abortOption); + setErrorCode(aSignal->readData(4)); + theStatus = Finished; theReceiver.m_received_result_length = ~0; - theStatus = Finished; - // blobs want this - if (m_abortOption != AO_IgnoreError) + // not simple read + if(! (theOperationType == ReadRequest && theSimpleIndicator)) { - theNdbCon->theReturnStatus = NdbTransaction::ReturnFailure; + theNdbCon->OpCompleteFailure(this); + return -1; } - theError.code = aSignal->readData(4); - theNdbCon->setOperationErrorCodeAbort(aSignal->readData(4), ao); - - if(theOperationType != ReadRequest || !theSimpleIndicator) // not simple read - return theNdbCon->OpCompleteFailure(ao, m_abortOption != AO_IgnoreError); /** * If TCKEYCONF has arrived @@ -563,23 +562,8 @@ NdbOperation::receiveTCKEYREF( NdbApiSignal* aSignal) */ if(theReceiver.m_expected_result_length) { - return theNdbCon->OpCompleteFailure(AbortOnError); + return theNdbCon->OpCompleteFailure(this); } return -1; } - - -void -NdbOperation::handleFailedAI_ElemLen() -{ - NdbRecAttr* tRecAttr = theReceiver.theFirstRecAttr; - while (tRecAttr != NULL) { - tRecAttr->setNULL(); - tRecAttr = tRecAttr->next(); - }//while -}//NdbOperation::handleFailedAI_ElemLen() - - - - diff --git a/storage/ndb/src/ndbapi/NdbScanOperation.cpp b/storage/ndb/src/ndbapi/NdbScanOperation.cpp index fe650827347..ce24d6cee0a 100644 --- a/storage/ndb/src/ndbapi/NdbScanOperation.cpp +++ b/storage/ndb/src/ndbapi/NdbScanOperation.cpp @@ -994,6 +994,7 @@ NdbScanOperation::takeOverScanOp(OperationType opType, NdbTransaction* pTrans) newOp->theTupKeyLen = len; newOp->theOperationType = opType; + newOp->m_abortOption = AbortOnError; switch (opType) { case (ReadRequest): newOp->theLockMode = theLockMode; diff --git a/storage/ndb/src/ndbapi/NdbTransaction.cpp b/storage/ndb/src/ndbapi/NdbTransaction.cpp index 5e3738823d3..9d337c05fd9 100644 --- a/storage/ndb/src/ndbapi/NdbTransaction.cpp +++ b/storage/ndb/src/ndbapi/NdbTransaction.cpp @@ -56,7 +56,6 @@ NdbTransaction::NdbTransaction( Ndb* aNdb ) : theCompletedLastOp(NULL), theNoOfOpSent(0), theNoOfOpCompleted(0), - theNoOfOpFetched(0), theMyRef(0), theTCConPtr(0), theTransactionId(0), @@ -131,7 +130,6 @@ NdbTransaction::init() theNdb->theImpl->m_ndb_cluster_connection.get_latest_trans_gci(); theCommitStatus = Started; theCompletionStatus = NotCompleted; - m_abortOption = AbortOnError; theError.code = 0; theErrorLine = 0; @@ -176,12 +174,9 @@ void NdbTransaction::setOperationErrorCodeAbort(int error, int abortOption) { DBUG_ENTER("NdbTransaction::setOperationErrorCodeAbort"); - if (abortOption == -1) - abortOption = m_abortOption; if (theTransactionIsStarted == false) { theCommitStatus = Aborted; - } else if ((abortOption == AbortOnError) && - (theCommitStatus != Committed) && + } else if ((theCommitStatus != Committed) && (theCommitStatus != Aborted)) { theCommitStatus = NeedAbort; }//if @@ -263,8 +258,8 @@ Remark: Initialise connection object for new transaction. *****************************************************************************/ int NdbTransaction::execute(ExecType aTypeOfExec, - AbortOption abortOption, - int forceSend) + NdbOperation::AbortOption abortOption, + int forceSend) { NdbError savedError= theError; DBUG_ENTER("NdbTransaction::execute"); @@ -354,40 +349,14 @@ NdbTransaction::execute(ExecType aTypeOfExec, theCompletedLastOp = NULL; } - if (executeNoBlobs(tExecType, abortOption, forceSend) == -1) + if (executeNoBlobs(tExecType, + NdbOperation::DefaultAbortOption, + forceSend) == -1) { - ret = -1; if(savedError.code==0) savedError= theError; - /** - * If AO_IgnoreError, error codes arent always set on individual - * operations, making postExecute impossible - */ - if (abortOption == AO_IgnoreError) - { - if (theCompletedFirstOp != NULL) - { - if (tCompletedFirstOp != NULL) - { - tCompletedLastOp->next(theCompletedFirstOp); - theCompletedFirstOp = tCompletedFirstOp; - } - } - else - { - theCompletedFirstOp = tCompletedFirstOp; - theCompletedLastOp = tCompletedLastOp; - } - if (tPrepOp != NULL && tRestOp != NULL) { - if (theFirstOpInList == NULL) - theFirstOpInList = tRestOp; - else - theLastOpInList->next(tRestOp); - theLastOpInList = tLastOp; - } - DBUG_RETURN(-1); - } + DBUG_RETURN(-1); } #ifdef ndb_api_crash_on_complex_blob_abort @@ -447,9 +416,9 @@ NdbTransaction::execute(ExecType aTypeOfExec, } int -NdbTransaction::executeNoBlobs(ExecType aTypeOfExec, - AbortOption abortOption, - int forceSend) +NdbTransaction::executeNoBlobs(NdbTransaction::ExecType aTypeOfExec, + NdbOperation::AbortOption abortOption, + int forceSend) { DBUG_ENTER("NdbTransaction::executeNoBlobs"); DBUG_PRINT("enter", ("aTypeOfExec: %d, abortOption: %d", @@ -527,10 +496,10 @@ Parameters : aTypeOfExec: Type of execute. Remark: Prepare a part of a transaction in an asynchronous manner. *****************************************************************************/ void -NdbTransaction::executeAsynchPrepare( ExecType aTypeOfExec, +NdbTransaction::executeAsynchPrepare(NdbTransaction::ExecType aTypeOfExec, NdbAsynchCallback aCallback, void* anyObject, - AbortOption abortOption) + NdbOperation::AbortOption abortOption) { DBUG_ENTER("NdbTransaction::executeAsynchPrepare"); DBUG_PRINT("enter", ("aTypeOfExec: %d, aCallback: 0x%lx, anyObject: Ox%lx", @@ -570,7 +539,6 @@ NdbTransaction::executeAsynchPrepare( ExecType aTypeOfExec, theReturnStatus = ReturnSuccess; theCallbackFunction = aCallback; theCallbackObject = anyObject; - m_abortOption = abortOption; m_waitForReply = true; tNdb->thePreparedTransactionsArray[tnoOfPreparedTransactions] = this; theTransArrayIndex = tnoOfPreparedTransactions; @@ -665,8 +633,7 @@ NdbTransaction::executeAsynchPrepare( ExecType aTypeOfExec, while (tOp) { int tReturnCode; NdbOperation* tNextOp = tOp->next(); - - tReturnCode = tOp->prepareSend(theTCConPtr, theTransactionId); + tReturnCode = tOp->prepareSend(theTCConPtr, theTransactionId, abortOption); if (tReturnCode == -1) { theSendStatus = sendABORTfail; DBUG_VOID_RETURN; @@ -1799,14 +1766,8 @@ from other transactions. } } else if ((tNoComp >= tNoSent) && (theLastExecOpInList->theCommitIndicator == 1)){ - - - if (m_abortOption == AO_IgnoreError && theError.code != 0){ - /** - * There's always a TCKEYCONF when using IgnoreError - */ - return -1; - } + + /**********************************************************************/ // We sent the transaction with Commit flag set and received a CONF with // no Commit flag set. This is clearly an anomaly. @@ -1980,13 +1941,6 @@ NdbTransaction::receiveTCINDXCONF(const TcIndxConf * indxConf, } else if ((tNoComp >= tNoSent) && (theLastExecOpInList->theCommitIndicator == 1)){ - if (m_abortOption == AO_IgnoreError && theError.code != 0){ - /** - * There's always a TCKEYCONF when using IgnoreError - */ - return -1; - } - /**********************************************************************/ // We sent the transaction with Commit flag set and received a CONF with // no Commit flag set. This is clearly an anomaly. @@ -2010,41 +1964,6 @@ NdbTransaction::receiveTCINDXCONF(const TcIndxConf * indxConf, return -1; }//NdbTransaction::receiveTCINDXCONF() -/***************************************************************************** -int receiveTCINDXREF( NdbApiSignal* aSignal) - -Return Value: Return 0 : send was succesful. - Return -1: In all other case. -Parameters: aSignal: the signal object that contains the - TCINDXREF signal from TC. -Remark: Handles the reception of the TCINDXREF signal. -*****************************************************************************/ -int -NdbTransaction::receiveTCINDXREF( NdbApiSignal* aSignal) -{ - if(checkState_TransId(aSignal->getDataPtr()+1)){ - theError.code = aSignal->readData(4); // Override any previous errors - - /**********************************************************************/ - /* A serious error has occured. This could be due to deadlock or */ - /* lack of resources or simply a programming error in NDB. This */ - /* transaction will be aborted. Actually it has already been */ - /* and we only need to report completion and return with the */ - /* error code to the application. */ - /**********************************************************************/ - theCompletionStatus = NdbTransaction::CompletedFailure; - theCommitStatus = NdbTransaction::Aborted; - theReturnStatus = NdbTransaction::ReturnFailure; - return 0; - } else { -#ifdef NDB_NO_DROPPED_SIGNAL - abort(); -#endif - } - - return -1; -}//NdbTransaction::receiveTCINDXREF() - /******************************************************************************* int OpCompletedFailure(); @@ -2054,36 +1973,15 @@ Parameters: aErrorCode: The error code. Remark: An operation was completed with failure. *******************************************************************************/ int -NdbTransaction::OpCompleteFailure(Uint8 abortOption, bool setFailure) +NdbTransaction::OpCompleteFailure(NdbOperation* op) { Uint32 tNoComp = theNoOfOpCompleted; Uint32 tNoSent = theNoOfOpSent; - if (setFailure) - theCompletionStatus = NdbTransaction::CompletedFailure; + tNoComp++; theNoOfOpCompleted = tNoComp; - if (tNoComp == tNoSent) { - //------------------------------------------------------------------------ - //If the transaction consists of only simple reads we can set - //Commit state Aborted. Otherwise this simple operation cannot - //decide the success of the whole transaction since a simple - //operation is not really part of that transaction. - //------------------------------------------------------------------------ - if (abortOption == AO_IgnoreError){ - /** - * There's always a TCKEYCONF when using IgnoreError - */ - return -1; - } - - return 0; // Last operation received - } else if (tNoComp > tNoSent) { - setOperationErrorCodeAbort(4113); // Too many operations, - // stop waiting for more - return 0; - } else { - return -1; // Continue waiting for more signals - }//if + + return (tNoComp == tNoSent) ? 0 : -1; }//NdbTransaction::OpCompleteFailure() /****************************************************************************** diff --git a/storage/ndb/test/include/HugoOperations.hpp b/storage/ndb/test/include/HugoOperations.hpp index b5014380eec..3147ee57d4d 100644 --- a/storage/ndb/test/include/HugoOperations.hpp +++ b/storage/ndb/test/include/HugoOperations.hpp @@ -106,8 +106,8 @@ public: NDBT_ResultRow& get_row(Uint32 idx) { return *rows[idx];} - int execute_async(Ndb*, NdbTransaction::ExecType, NdbTransaction::AbortOption = NdbTransaction::AbortOnError); - int execute_async_prepare(Ndb*, NdbTransaction::ExecType, NdbTransaction::AbortOption = NdbTransaction::AbortOnError); + int execute_async(Ndb*, NdbTransaction::ExecType, NdbOperation::AbortOption = NdbOperation::AbortOnError); + int execute_async_prepare(Ndb*, NdbTransaction::ExecType, NdbOperation::AbortOption = NdbOperation::AbortOnError); int wait_async(Ndb*, int timeout = -1); diff --git a/storage/ndb/test/ndbapi/testBasic.cpp b/storage/ndb/test/ndbapi/testBasic.cpp index e8e4548a91c..83c2628f8b0 100644 --- a/storage/ndb/test/ndbapi/testBasic.cpp +++ b/storage/ndb/test/ndbapi/testBasic.cpp @@ -1272,6 +1272,52 @@ runBug25090(NDBT_Context* ctx, NDBT_Step* step){ return NDBT_OK; } +int +runDeleteRead(NDBT_Context* ctx, NDBT_Step* step){ + + Ndb* pNdb = GETNDB(step); + + const NdbDictionary::Table* tab = ctx->getTab(); + NDBT_ResultRow row(*ctx->getTab()); + HugoTransactions tmp(*ctx->getTab()); + + int a; + int loops = ctx->getNumLoops(); + const int rows = ctx->getNumRecords(); + + while (loops--) + { + NdbTransaction* pTrans = pNdb->startTransaction(); + NdbOperation* pOp = pTrans->getNdbOperation(tab->getName()); + pOp->deleteTuple(); + for(a = 0; a<tab->getNoOfColumns(); a++) + { + if (tab->getColumn(a)->getPrimaryKey() == true) + { + if(tmp.equalForAttr(pOp, a, 0) != 0) + { + ERR(pTrans->getNdbError()); + return NDBT_FAILED; + } + } + } + + // Define attributes to read + for(a = 0; a<tab->getNoOfColumns(); a++) + { + if((row.attributeStore(a) = pOp->getValue(tab->getColumn(a)->getName())) == 0) { + ERR(pTrans->getNdbError()); + return NDBT_FAILED; + } + } + + pTrans->execute(Commit); + pTrans->close(); + } + + return NDBT_OK; +} + NDBT_TESTSUITE(testBasic); TESTCASE("PkInsert", "Verify that we can insert and delete from this table using PK" @@ -1542,6 +1588,12 @@ TESTCASE("Bug25090", "Verify what happens when we fill the db" ){ STEP(runBug25090); } +TESTCASE("DeleteRead", + "Verify Delete+Read" ){ + INITIALIZER(runLoadTable); + INITIALIZER(runDeleteRead); + FINALIZER(runClearTable2); +} NDBT_TESTSUITE_END(testBasic); #if 0 diff --git a/storage/ndb/test/ndbapi/testBlobs.cpp b/storage/ndb/test/ndbapi/testBlobs.cpp index 81072f6a12a..5d2552220f9 100644 --- a/storage/ndb/test/ndbapi/testBlobs.cpp +++ b/storage/ndb/test/ndbapi/testBlobs.cpp @@ -734,7 +734,7 @@ verifyHeadInline(const Tup& tup) if (! g_opt.m_oneblob) CHK((ra2 = g_opr->getValue("BL2")) != 0); if (tup.m_exists) { - CHK(g_con->execute(Commit) == 0); + CHK(g_con->execute(Commit, AbortOnError) == 0); DBG("verifyHeadInline BL1"); CHK(verifyHeadInline(g_opt.m_blob1, tup.m_blob1, ra1) == 0); if (! g_opt.m_oneblob) { @@ -742,7 +742,8 @@ verifyHeadInline(const Tup& tup) CHK(verifyHeadInline(g_opt.m_blob2, tup.m_blob2, ra2) == 0); } } else { - CHK(g_con->execute(Commit) == -1 && g_con->getNdbError().code == 626); + CHK(g_con->execute(Commit, AbortOnError) == -1 && + g_con->getNdbError().code == 626); } g_ndb->closeTransaction(g_con); g_opr = 0; @@ -1534,7 +1535,7 @@ testperf() g_dic = g_ndb->getDictionary(); NdbDictionary::Table tab(g_opt.m_tnameperf); if (g_dic->getTable(tab.getName()) != 0) - CHK(g_dic->dropTable(tab) == 0); + CHK(g_dic->dropTable(tab.getName()) == 0); // col A - pk { NdbDictionary::Column col("A"); col.setType(NdbDictionary::Column::Unsigned); diff --git a/storage/ndb/test/ndbapi/testNdbApi.cpp b/storage/ndb/test/ndbapi/testNdbApi.cpp index 39a9c7656c0..a7d3c3d0792 100644 --- a/storage/ndb/test/ndbapi/testNdbApi.cpp +++ b/storage/ndb/test/ndbapi/testNdbApi.cpp @@ -1249,6 +1249,274 @@ int runScan_4006(NDBT_Context* ctx, NDBT_Step* step){ return result; } +char pkIdxName[255]; + +int createPkIndex(NDBT_Context* ctx, NDBT_Step* step){ + bool orderedIndex = ctx->getProperty("OrderedIndex", (unsigned)0); + + const NdbDictionary::Table* pTab = ctx->getTab(); + Ndb* pNdb = GETNDB(step); + + bool logged = ctx->getProperty("LoggedIndexes", 1); + + // Create index + BaseString::snprintf(pkIdxName, 255, "IDC_PK_%s", pTab->getName()); + if (orderedIndex) + ndbout << "Creating " << ((logged)?"logged ": "temporary ") << "ordered index " + << pkIdxName << " ("; + else + ndbout << "Creating " << ((logged)?"logged ": "temporary ") << "unique index " + << pkIdxName << " ("; + + NdbDictionary::Index pIdx(pkIdxName); + pIdx.setTable(pTab->getName()); + if (orderedIndex) + pIdx.setType(NdbDictionary::Index::OrderedIndex); + else + pIdx.setType(NdbDictionary::Index::UniqueHashIndex); + for (int c = 0; c< pTab->getNoOfColumns(); c++){ + const NdbDictionary::Column * col = pTab->getColumn(c); + if(col->getPrimaryKey()){ + pIdx.addIndexColumn(col->getName()); + ndbout << col->getName() <<" "; + } + } + + pIdx.setStoredIndex(logged); + ndbout << ") "; + if (pNdb->getDictionary()->createIndex(pIdx) != 0){ + ndbout << "FAILED!" << endl; + const NdbError err = pNdb->getDictionary()->getNdbError(); + ERR(err); + return NDBT_FAILED; + } + + ndbout << "OK!" << endl; + return NDBT_OK; +} + +int createPkIndex_Drop(NDBT_Context* ctx, NDBT_Step* step){ + const NdbDictionary::Table* pTab = ctx->getTab(); + Ndb* pNdb = GETNDB(step); + + // Drop index + ndbout << "Dropping index " << pkIdxName << " "; + if (pNdb->getDictionary()->dropIndex(pkIdxName, + pTab->getName()) != 0){ + ndbout << "FAILED!" << endl; + ERR(pNdb->getDictionary()->getNdbError()); + return NDBT_FAILED; + } else { + ndbout << "OK!" << endl; + } + + return NDBT_OK; +} + +static +int +op_row(NdbTransaction* pTrans, HugoOperations& hugoOps, + const NdbDictionary::Table* pTab, int op, int row) +{ + NdbOperation * pOp = 0; + switch(op){ + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + pOp = pTrans->getNdbOperation(pTab->getName()); + break; + case 9: + return 0; + case 6: + case 7: + case 8: + case 10: + case 11: + pOp = pTrans->getNdbIndexOperation(pkIdxName, pTab->getName()); + default: + break; + } + + switch(op){ + case 0: + case 6: + pOp->readTuple(); + break; + case 1: + case 7: + pOp->committedRead(); + break; + case 2: + case 8: + pOp->readTupleExclusive(); + break; + case 3: + case 9: + pOp->insertTuple(); + break; + case 4: + case 10: + pOp->updateTuple(); + break; + case 5: + case 11: + pOp->deleteTuple(); + break; + default: + abort(); + } + + for(int a = 0; a<pTab->getNoOfColumns(); a++){ + if (pTab->getColumn(a)->getPrimaryKey() == true){ + if(hugoOps.equalForAttr(pOp, a, row) != 0){ + return NDBT_FAILED; + } + } + } + + switch(op){ + case 0: + case 1: + case 2: + case 6: + case 7: + case 8: + for(int a = 0; a<pTab->getNoOfColumns(); a++){ + pOp->getValue(a); + } + break; + case 3: + case 4: + case 10: + for(int a = 0; a<pTab->getNoOfColumns(); a++){ + if (pTab->getColumn(a)->getPrimaryKey() == false){ + if(hugoOps.setValueForAttr(pOp, a, row, 2) != 0){ + return NDBT_FAILED; + } + } + } + break; + case 5: + case 11: + pOp->deleteTuple(); + break; + case 9: + default: + abort(); + } + + return NDBT_OK; +} + +static void print(int op) +{ + const char * str = 0; + switch(op){ + case 0: str = "pk read-sh"; break; + case 1: str = "pk read-nl"; break; + case 2: str = "pk read-ex"; break; + case 3: str = "pk insert "; break; + case 4: str = "pk update "; break; + case 5: str = "pk delete "; break; + case 6: str = "uk read-sh"; break; + case 7: str = "uk read-nl"; break; + case 8: str = "uk read-ex"; break; + case 9: str = "noop "; break; + case 10: str = "uk update "; break; + case 11: str = "uk delete "; break; + default: + abort(); + } + printf("%s ", str); +} + +int +runTestIgnoreError(NDBT_Context* ctx, NDBT_Step* step) +{ + int result = NDBT_OK; + Uint32 loops = ctx->getNumRecords(); + const NdbDictionary::Table* pTab = ctx->getTab(); + + HugoOperations hugoOps(*pTab); + HugoTransactions hugoTrans(*pTab); + + Ndb* pNdb = GETNDB(step); + + struct { + ExecType et; + AbortOption ao; + } tests[] = { + { Commit, AbortOnError }, + { Commit, AO_IgnoreError }, + { NoCommit, AbortOnError }, + { NoCommit, AO_IgnoreError }, + }; + + printf("case: <op1> <op2> c/nc ao/ie\n"); + Uint32 tno = 0; + for (Uint32 op1 = 0; op1 < 12; op1++) + { + for (Uint32 op2 = op1; op2 < 12; op2++) + { + int ret; + NdbTransaction* pTrans = 0; + + for (Uint32 i = 0; i<4; i++, tno++) + { + if (loops != 1000 && loops != tno) + continue; + ExecType et = tests[i].et; + AbortOption ao = tests[i].ao; + + printf("%.3d : ", tno); + print(op1); + print(op2); + switch(et){ + case Commit: printf("c "); break; + case NoCommit: printf("nc "); break; + } + switch(ao){ + case AbortOnError: printf("aoe "); break; + case AO_IgnoreError: printf("ie "); break; + } + printf(": "); + + + hugoTrans.loadTable(pNdb, 1); + pTrans = pNdb->startTransaction(); + op_row(pTrans, hugoOps, pTab, op1, 0); + ret = pTrans->execute(et, ao); + pTrans->close(); + printf("%d ", ret); + hugoTrans.clearTable(pNdb); + + hugoTrans.loadTable(pNdb, 1); + pTrans = pNdb->startTransaction(); + op_row(pTrans, hugoOps, pTab, op1, 1); + ret = pTrans->execute(et, ao); + pTrans->close(); + printf("%d ", ret); + hugoTrans.clearTable(pNdb); + + hugoTrans.loadTable(pNdb, 1); + pTrans = pNdb->startTransaction(); + op_row(pTrans, hugoOps, pTab, op1, 0); + op_row(pTrans, hugoOps, pTab, op2, 1); + ret = pTrans->execute(et, ao); + pTrans->close(); + printf("%d\n", ret); + hugoTrans.clearTable(pNdb); + + hugoTrans.clearTable(pNdb); + } + } + } + return NDBT_OK; +} + template class Vector<NdbScanOperation*>; @@ -1342,6 +1610,12 @@ TESTCASE("Scan_4006", INITIALIZER(runScan_4006); FINALIZER(runClearTable); } +TESTCASE("IgnoreError", ""){ + INITIALIZER(createPkIndex); + STEP(runTestIgnoreError); + FINALIZER(runClearTable); + FINALIZER(createPkIndex_Drop); +} NDBT_TESTSUITE_END(testNdbApi); int main(int argc, const char** argv){ diff --git a/storage/ndb/test/ndbapi/testNodeRestart.cpp b/storage/ndb/test/ndbapi/testNodeRestart.cpp index 5185228fd37..04e77f70c38 100644 --- a/storage/ndb/test/ndbapi/testNodeRestart.cpp +++ b/storage/ndb/test/ndbapi/testNodeRestart.cpp @@ -1178,6 +1178,101 @@ int runBug25554(NDBT_Context* ctx, NDBT_Step* step){ return NDBT_OK; } +int runBug25984(NDBT_Context* ctx, NDBT_Step* step){ + + int result = NDBT_OK; + int loops = ctx->getNumLoops(); + int records = ctx->getNumRecords(); + NdbRestarter restarter; + + if (restarter.getNumDbNodes() < 2) + return NDBT_OK; + + if (restarter.restartAll(true, true, true)) + return NDBT_FAILED; + + if (restarter.waitClusterNoStart()) + return NDBT_FAILED; + + if (restarter.startAll()) + return NDBT_FAILED; + + if (restarter.waitClusterStarted()) + return NDBT_FAILED; + + int val2[] = { DumpStateOrd::CmvmiSetRestartOnErrorInsert, 1 }; + int master = restarter.getMasterNodeId(); + int victim = restarter.getRandomNodeOtherNodeGroup(master, rand()); + if (victim == -1) + victim = restarter.getRandomNodeSameNodeGroup(master, rand()); + + restarter.restartOneDbNode(victim, false, true, true); + + for (Uint32 i = 0; i<6; i++) + { + ndbout_c("Loop: %d", i); + if (restarter.waitNodesNoStart(&victim, 1)) + return NDBT_FAILED; + + if (restarter.dumpStateOneNode(victim, val2, 2)) + return NDBT_FAILED; + + if (restarter.insertErrorInNode(victim, 7016)) + return NDBT_FAILED; + + if (restarter.startNodes(&victim, 1)) + return NDBT_FAILED; + + if (restarter.waitNodesStartPhase(&victim, 1, 2)) + return NDBT_FAILED; + } + + if (restarter.waitNodesNoStart(&victim, 1)) + return NDBT_FAILED; + + if (restarter.dumpStateOneNode(victim, val2, 2)) + return NDBT_FAILED; + + if (restarter.insertErrorInNode(victim, 7170)) + return NDBT_FAILED; + + if (restarter.startNodes(&victim, 1)) + return NDBT_FAILED; + + if (restarter.waitNodesNoStart(&victim, 1)) + return NDBT_FAILED; + + if (restarter.restartAll(false, true, true)) + return NDBT_FAILED; + + if (restarter.insertErrorInAllNodes(932)) + return NDBT_FAILED; + + if (restarter.insertErrorInNode(master, 7170)) + return NDBT_FAILED; + + if (restarter.dumpStateAllNodes(val2, 2)) + return NDBT_FAILED; + + restarter.startNodes(&master, 1); + NdbSleep_MilliSleep(3000); + restarter.startAll(); + + if (restarter.waitClusterNoStart()) + return NDBT_FAILED; + + if (restarter.restartOneDbNode(victim, true, true, true)) + return NDBT_FAILED; + + if (restarter.startAll()) + return NDBT_FAILED; + + if (restarter.waitClusterStarted()) + return NDBT_FAILED; + + return NDBT_OK; +} + NDBT_TESTSUITE(testNodeRestart); TESTCASE("NoLoad", @@ -1514,6 +1609,9 @@ TESTCASE("Bug25468", ""){ TESTCASE("Bug25554", ""){ INITIALIZER(runBug25554); } +TESTCASE("Bug25984", ""){ + INITIALIZER(runBug25984); +} NDBT_TESTSUITE_END(testNodeRestart); int main(int argc, const char** argv){ diff --git a/storage/ndb/test/run-test/daily-basic-tests.txt b/storage/ndb/test/run-test/daily-basic-tests.txt index 8b69353d42d..c972d432375 100644 --- a/storage/ndb/test/run-test/daily-basic-tests.txt +++ b/storage/ndb/test/run-test/daily-basic-tests.txt @@ -81,6 +81,10 @@ args: -n UpdateAndRead max-time: 500 cmd: testBasic +args: -n DeleteRead + +max-time: 500 +cmd: testBasic args: -n PkReadAndLocker T6 D1 D2 max-time: 500 @@ -461,7 +465,7 @@ max-time: 500 cmd: testScan args: -n Bug24447 T1 -max-time: 500 +max-time: 1000 cmd: testScan args: -n ScanVariants @@ -521,6 +525,10 @@ max-time: 1000 cmd: testNodeRestart args: -n Bug25554 T1 +max-time: 1000 +cmd: testNodeRestart +args: -n Bug25984 + # # DICT TESTS max-time: 1500 diff --git a/storage/ndb/test/src/HugoOperations.cpp b/storage/ndb/test/src/HugoOperations.cpp index 50234cea351..188e7a9288e 100644 --- a/storage/ndb/test/src/HugoOperations.cpp +++ b/storage/ndb/test/src/HugoOperations.cpp @@ -457,7 +457,7 @@ HugoOperations::callback(int res, NdbTransaction* pCon) int HugoOperations::execute_async(Ndb* pNdb, NdbTransaction::ExecType et, - NdbTransaction::AbortOption eao){ + NdbOperation::AbortOption eao){ m_async_reply= 0; pTrans->executeAsynchPrepare(et, @@ -472,7 +472,7 @@ HugoOperations::execute_async(Ndb* pNdb, NdbTransaction::ExecType et, int HugoOperations::execute_async_prepare(Ndb* pNdb, NdbTransaction::ExecType et, - NdbTransaction::AbortOption eao){ + NdbOperation::AbortOption eao){ m_async_reply= 0; pTrans->executeAsynchPrepare(et, diff --git a/storage/ndb/test/tools/listen.cpp b/storage/ndb/test/tools/listen.cpp index 3e2bc03857a..1d142e0931d 100644 --- a/storage/ndb/test/tools/listen.cpp +++ b/storage/ndb/test/tools/listen.cpp @@ -178,6 +178,9 @@ main(int argc, const char** argv){ } } end: + for(i= 0; i<(int)event_ops.size(); i++) + MyNdb.dropEventOperation(event_ops[i]); + return NDBT_ProgramExit(NDBT_OK); } |