diff options
author | unknown <mskold@mysql.com> | 2005-04-13 16:24:17 +0200 |
---|---|---|
committer | unknown <mskold@mysql.com> | 2005-04-13 16:24:17 +0200 |
commit | f0438d548731b674ba989710da0994ac752c623d (patch) | |
tree | a1492386b6bc2738d5f8216a4814cf7d15ec4dad | |
parent | 5630f0731ab020471108c67e7ae962ba6eaef625 (diff) | |
parent | f53284f1933d721e3f2a583c2849d9c1b2e2b740 (diff) | |
download | mariadb-git-f0438d548731b674ba989710da0994ac752c623d.tar.gz |
Merge
innobase/dict/dict0dict.c:
Auto merged
innobase/dict/dict0load.c:
Auto merged
innobase/include/dict0dict.h:
Auto merged
innobase/row/row0mysql.c:
Auto merged
myisam/mi_check.c:
Auto merged
myisam/mi_dynrec.c:
Auto merged
myisam/myisamdef.h:
Auto merged
mysql-test/r/kill.result:
Auto merged
mysql-test/t/kill.test:
Auto merged
ndb/src/kernel/blocks/dbdih/DbdihMain.cpp:
Auto merged
ndb/src/kernel/main.cpp:
Auto merged
ndb/src/mgmsrv/MgmtSrvr.hpp:
Auto merged
sql/ha_innodb.cc:
Auto merged
sql/mysqld.cc:
Auto merged
-rw-r--r-- | innobase/dict/dict0dict.c | 19 | ||||
-rw-r--r-- | innobase/dict/dict0load.c | 15 | ||||
-rw-r--r-- | innobase/include/dict0dict.h | 3 | ||||
-rw-r--r-- | innobase/include/dict0load.h | 3 | ||||
-rw-r--r-- | innobase/row/row0mysql.c | 8 | ||||
-rw-r--r-- | myisam/mi_check.c | 7 | ||||
-rw-r--r-- | myisam/mi_dynrec.c | 11 | ||||
-rw-r--r-- | myisam/myisamdef.h | 2 | ||||
-rw-r--r-- | ndb/src/kernel/blocks/dbdih/DbdihMain.cpp | 45 | ||||
-rw-r--r-- | ndb/src/kernel/main.cpp | 2 | ||||
-rw-r--r-- | ndb/src/mgmsrv/MgmtSrvr.cpp | 12 | ||||
-rw-r--r-- | ndb/src/mgmsrv/MgmtSrvr.hpp | 2 | ||||
-rw-r--r-- | ndb/src/ndbapi/Ndb.cpp | 14 | ||||
-rw-r--r-- | sql/ha_innodb.cc | 4 | ||||
-rw-r--r-- | sql/ha_ndbcluster.cc | 28 | ||||
-rw-r--r-- | sql/mysqld.cc | 2 |
16 files changed, 123 insertions, 54 deletions
diff --git a/innobase/dict/dict0dict.c b/innobase/dict/dict0dict.c index f973b994055..8c9724da079 100644 --- a/innobase/dict/dict0dict.c +++ b/innobase/dict/dict0dict.c @@ -2201,7 +2201,8 @@ ulint dict_foreign_add_to_cache( /*======================*/ /* out: DB_SUCCESS or error code */ - dict_foreign_t* foreign) /* in, own: foreign key constraint */ + dict_foreign_t* foreign, /* in, own: foreign key constraint */ + ibool check_types) /* in: TRUE=check type compatibility */ { dict_table_t* for_table; dict_table_t* ref_table; @@ -2237,10 +2238,16 @@ dict_foreign_add_to_cache( } if (for_in_cache->referenced_table == NULL && ref_table) { + dict_index_t* types_idx; + if (check_types) { + types_idx = for_in_cache->foreign_index; + } else { + types_idx = NULL; + } index = dict_foreign_find_index(ref_table, (const char**) for_in_cache->referenced_col_names, for_in_cache->n_fields, - for_in_cache->foreign_index); + types_idx); if (index == NULL) { dict_foreign_error_report(ef, for_in_cache, @@ -2264,10 +2271,16 @@ dict_foreign_add_to_cache( } if (for_in_cache->foreign_table == NULL && for_table) { + dict_index_t* types_idx; + if (check_types) { + types_idx = for_in_cache->referenced_index; + } else { + types_idx = NULL; + } index = dict_foreign_find_index(for_table, (const char**) for_in_cache->foreign_col_names, for_in_cache->n_fields, - for_in_cache->referenced_index); + types_idx); if (index == NULL) { dict_foreign_error_report(ef, for_in_cache, diff --git a/innobase/dict/dict0load.c b/innobase/dict/dict0load.c index 289b5dab4f2..9bafcf33553 100644 --- a/innobase/dict/dict0load.c +++ b/innobase/dict/dict0load.c @@ -868,7 +868,7 @@ dict_load_table( dict_load_indexes(table, heap); - err = dict_load_foreigns(table->name); + err = dict_load_foreigns(table->name, TRUE); /* if (err != DB_SUCCESS) { @@ -1089,8 +1089,9 @@ ulint dict_load_foreign( /*==============*/ /* out: DB_SUCCESS or error code */ - const char* id) /* in: foreign constraint id as a + const char* id, /* in: foreign constraint id as a null-terminated string */ + ibool check_types)/* in: TRUE=check type compatibility */ { dict_foreign_t* foreign; dict_table_t* sys_foreign; @@ -1102,7 +1103,6 @@ dict_load_foreign( rec_t* rec; byte* field; ulint len; - ulint err; mtr_t mtr; #ifdef UNIV_SYNC_DEBUG @@ -1204,9 +1204,7 @@ dict_load_foreign( a new foreign key constraint but loading one from the data dictionary. */ - err = dict_foreign_add_to_cache(foreign); - - return(err); + return(dict_foreign_add_to_cache(foreign, check_types)); } /*************************************************************************** @@ -1220,7 +1218,8 @@ ulint dict_load_foreigns( /*===============*/ /* out: DB_SUCCESS or error code */ - const char* table_name) /* in: table name */ + const char* table_name, /* in: table name */ + ibool check_types) /* in: TRUE=check type compatibility */ { btr_pcur_t pcur; mem_heap_t* heap; @@ -1320,7 +1319,7 @@ loop: /* Load the foreign constraint definition to the dictionary cache */ - err = dict_load_foreign(id); + err = dict_load_foreign(id, check_types); if (err != DB_SUCCESS) { btr_pcur_close(&pcur); diff --git a/innobase/include/dict0dict.h b/innobase/include/dict0dict.h index fdcb6c1c4e1..56f0a158a20 100644 --- a/innobase/include/dict0dict.h +++ b/innobase/include/dict0dict.h @@ -189,7 +189,8 @@ ulint dict_foreign_add_to_cache( /*======================*/ /* out: DB_SUCCESS or error code */ - dict_foreign_t* foreign); /* in, own: foreign key constraint */ + dict_foreign_t* foreign, /* in, own: foreign key constraint */ + ibool check_types); /* in: TRUE=check type compatibility */ /************************************************************************* Checks if a table is referenced by foreign keys. */ diff --git a/innobase/include/dict0load.h b/innobase/include/dict0load.h index 1f0a5407140..f13620bc6e8 100644 --- a/innobase/include/dict0load.h +++ b/innobase/include/dict0load.h @@ -81,7 +81,8 @@ ulint dict_load_foreigns( /*===============*/ /* out: DB_SUCCESS or error code */ - const char* table_name); /* in: table name */ + const char* table_name, /* in: table name */ + ibool check_types); /* in: TRUE=check type compatibility */ /************************************************************************ Prints to the standard output information on all tables found in the data dictionary system table. */ diff --git a/innobase/row/row0mysql.c b/innobase/row/row0mysql.c index b13ba056d85..7f78a5b723b 100644 --- a/innobase/row/row0mysql.c +++ b/innobase/row/row0mysql.c @@ -2075,7 +2075,7 @@ row_table_add_foreign_constraints( if (err == DB_SUCCESS) { /* Check that also referencing constraints are ok */ - err = dict_load_foreigns(name); + err = dict_load_foreigns(name, trx->check_foreigns); } if (err != DB_SUCCESS) { @@ -3784,6 +3784,8 @@ row_rename_table_for_mysql( goto funct_exit; } + err = dict_load_foreigns(new_name, trx->check_foreigns); + if (row_is_mysql_tmp_table_name(old_name)) { /* MySQL is doing an ALTER TABLE command and it @@ -3793,8 +3795,6 @@ row_rename_table_for_mysql( table. But we want to load also the foreign key constraint definitions for the original table name. */ - err = dict_load_foreigns(new_name); - if (err != DB_SUCCESS) { ut_print_timestamp(stderr); fputs(" InnoDB: Error: in ALTER TABLE ", @@ -3813,8 +3813,6 @@ row_rename_table_for_mysql( trx->error_state = DB_SUCCESS; } } else { - err = dict_load_foreigns(new_name); - if (err != DB_SUCCESS) { ut_print_timestamp(stderr); diff --git a/myisam/mi_check.c b/myisam/mi_check.c index dd8cc736741..2949b39183d 100644 --- a/myisam/mi_check.c +++ b/myisam/mi_check.c @@ -972,7 +972,8 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend) info->checksum=mi_checksum(info,record); if (param->testflag & (T_EXTEND | T_MEDIUM | T_VERBOSE)) { - if (_mi_rec_check(info,record, info->rec_buff,block_info.rec_len)) + if (_mi_rec_check(info,record, info->rec_buff,block_info.rec_len, + test(info->s->calc_checksum))) { mi_check_print_error(param,"Found wrong packed record at %s", llstr(start_recpos,llbuff)); @@ -3025,7 +3026,9 @@ static int sort_get_next_record(MI_SORT_PARAM *sort_param) if ((param->testflag & (T_EXTEND | T_REP)) || searching) { if (_mi_rec_check(info, sort_param->record, sort_param->rec_buff, - sort_param->find_length)) + sort_param->find_length, + (param->testflag & T_QUICK) && + test(info->s->calc_checksum))) { mi_check_print_info(param,"Found wrong packed record at %s", llstr(sort_param->start_recpos,llbuff)); diff --git a/myisam/mi_dynrec.c b/myisam/mi_dynrec.c index 9d8e161b8fe..3a4dafade23 100644 --- a/myisam/mi_dynrec.c +++ b/myisam/mi_dynrec.c @@ -816,7 +816,7 @@ uint _mi_rec_pack(MI_INFO *info, register byte *to, register const byte *from) */ my_bool _mi_rec_check(MI_INFO *info,const char *record, byte *rec_buff, - ulong packed_length) + ulong packed_length, my_bool with_checksum) { uint length,new_length,flag,bit,i; char *pos,*end,*packpos,*to; @@ -920,13 +920,10 @@ my_bool _mi_rec_check(MI_INFO *info,const char *record, byte *rec_buff, if (packed_length != (uint) (to - rec_buff) + test(info->s->calc_checksum) || (bit != 1 && (flag & ~(bit - 1)))) goto err; - if (info->s->calc_checksum) + if (with_checksum && ((uchar) info->checksum != (uchar) *to)) { - if ((uchar) info->checksum != (uchar) *to) - { - DBUG_PRINT("error",("wrong checksum for row")); - goto err; - } + DBUG_PRINT("error",("wrong checksum for row")); + goto err; } DBUG_RETURN(0); diff --git a/myisam/myisamdef.h b/myisam/myisamdef.h index b66d7e71a05..a2d3dedf6df 100644 --- a/myisam/myisamdef.h +++ b/myisam/myisamdef.h @@ -584,7 +584,7 @@ extern byte *mi_alloc_rec_buff(MI_INFO *,ulong, byte**); extern ulong _mi_rec_unpack(MI_INFO *info,byte *to,byte *from, ulong reclength); extern my_bool _mi_rec_check(MI_INFO *info,const char *record, byte *packpos, - ulong reclength); + ulong packed_length, my_bool with_checkum); extern int _mi_write_part_record(MI_INFO *info,my_off_t filepos,ulong length, my_off_t next_filepos,byte **record, ulong *reclength,int *flag); diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index af75707560a..b2ed7acd347 100644 --- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -69,6 +69,9 @@ #include <signaldata/FsOpenReq.hpp> #include <DebuggerNames.hpp> +#include <EventLogger.hpp> +extern EventLogger g_eventLogger; + #define SYSFILE ((Sysfile *)&sysfileData[0]) #define RETURN_IF_NODE_NOT_ALIVE(node) \ @@ -13131,6 +13134,48 @@ Dbdih::execDUMP_STATE_ORD(Signal* signal) } } } + + if(dumpState->args[0] == 7019 && signal->getLength() == 2) + { + char buf2[8+1]; + NodeRecordPtr nodePtr; + nodePtr.i = signal->theData[1]; + ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRecord); + infoEvent("NF Node %d tc: %d lqh: %d dih: %d dict: %d recNODE_FAILREP: %d", + nodePtr.i, + nodePtr.p->dbtcFailCompleted, + nodePtr.p->dblqhFailCompleted, + nodePtr.p->dbdihFailCompleted, + nodePtr.p->dbdictFailCompleted, + nodePtr.p->recNODE_FAILREP); + infoEvent(" m_NF_COMPLETE_REP: %s m_nodefailSteps: %s", + nodePtr.p->m_NF_COMPLETE_REP.getText(), + nodePtr.p->m_nodefailSteps.getText(buf2)); + } + + if(dumpState->args[0] == 7020 && signal->getLength() > 3) + { + Uint32 gsn= signal->theData[1]; + Uint32 block= signal->theData[2]; + Uint32 length= signal->length() - 3; + memmove(signal->theData, signal->theData+3, 4*length); + sendSignal(numberToRef(block, getOwnNodeId()), gsn, signal, length, JBB); + + warningEvent("-- SENDING CUSTOM SIGNAL --"); + char buf[100], buf2[100]; + buf2[0]= 0; + for(Uint32 i = 0; i<length; i++) + { + snprintf(buf, 100, "%s %.8x", buf2, signal->theData[i]); + snprintf(buf2, 100, "%s", buf); + } + warningEvent("gsn: %d block: %s, length: %d theData: %s", + gsn, getBlockName(block, "UNKNOWN"), length, buf); + + g_eventLogger.warning("-- SENDING CUSTOM SIGNAL --"); + g_eventLogger.warning("gsn: %d block: %s, length: %d theData: %s", + gsn, getBlockName(block, "UNKNOWN"), length, buf); + } if(dumpState->args[0] == DumpStateOrd::DihDumpLCPState){ infoEvent("-- Node %d LCP STATE --", getOwnNodeId()); diff --git a/ndb/src/kernel/main.cpp b/ndb/src/kernel/main.cpp index bd15ef37e20..543757153d3 100644 --- a/ndb/src/kernel/main.cpp +++ b/ndb/src/kernel/main.cpp @@ -61,7 +61,9 @@ int main(int argc, char** argv) // Print to stdout/console g_eventLogger.createConsoleHandler(); g_eventLogger.setCategory("NDB"); + g_eventLogger.enable(Logger::LL_ON, Logger::LL_CRITICAL); g_eventLogger.enable(Logger::LL_ON, Logger::LL_ERROR); + g_eventLogger.enable(Logger::LL_ON, Logger::LL_WARNING); globalEmulatorData.create(); diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index 21c3c4d614d..86a5f3e8f4b 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -2078,7 +2078,7 @@ MgmtSrvr::handleStopReply(NodeId nodeId, Uint32 errCode) } void -MgmtSrvr::handleStatus(NodeId nodeId, bool alive) +MgmtSrvr::handleStatus(NodeId nodeId, bool alive, bool nfComplete) { DBUG_ENTER("MgmtSrvr::handleStatus"); Uint32 theData[25]; @@ -2087,9 +2087,14 @@ MgmtSrvr::handleStatus(NodeId nodeId, bool alive) m_started_nodes.push_back(nodeId); theData[0] = NDB_LE_Connected; } else { - handleStopReply(nodeId, 0); theData[0] = NDB_LE_Disconnected; + if(nfComplete) + { + handleStopReply(nodeId, 0); + DBUG_VOID_RETURN; + } } + eventReport(_ownNodeId, theData); DBUG_VOID_RETURN; } @@ -2114,8 +2119,7 @@ MgmtSrvr::nodeStatusNotification(void* mgmSrv, Uint32 nodeId, { DBUG_ENTER("MgmtSrvr::nodeStatusNotification"); DBUG_PRINT("enter",("nodeid= %d, alive= %d, nfComplete= %d", nodeId, alive, nfComplete)); - if(!(!alive && nfComplete)) - ((MgmtSrvr*)mgmSrv)->handleStatus(nodeId, alive); + ((MgmtSrvr*)mgmSrv)->handleStatus(nodeId, alive, nfComplete); DBUG_VOID_RETURN; } diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp index 95298630230..637f54f74a8 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.hpp +++ b/ndb/src/mgmsrv/MgmtSrvr.hpp @@ -584,7 +584,7 @@ private: // Returns: - //************************************************************************** - void handleStatus(NodeId nodeId, bool alive); + void handleStatus(NodeId nodeId, bool alive, bool nfComplete); //************************************************************************** // Description: Handle the death of a process // Parameters: diff --git a/ndb/src/ndbapi/Ndb.cpp b/ndb/src/ndbapi/Ndb.cpp index 651415cdc00..c01ce823dc0 100644 --- a/ndb/src/ndbapi/Ndb.cpp +++ b/ndb/src/ndbapi/Ndb.cpp @@ -761,7 +761,7 @@ Ndb::getAutoIncrementValue(const char* aTableName, Uint32 cacheSize) Ndb_local_table_info *info= theDictionary->get_local_table_info(internalTableName, false); if (info == 0) - DBUG_RETURN(~0); + DBUG_RETURN(~(Uint64)0); const NdbTableImpl *table= info->m_table_impl; Uint64 tupleId = getTupleIdFromNdb(table->m_tableId, cacheSize); DBUG_PRINT("info", ("value %u", tupleId)); @@ -773,7 +773,7 @@ Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable, Uint32 cacheSize { DBUG_ENTER("getAutoIncrementValue"); if (aTable == 0) - DBUG_RETURN(~0); + DBUG_RETURN(~(Uint64)0); const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable); Uint64 tupleId = getTupleIdFromNdb(table->m_tableId, cacheSize); DBUG_PRINT("info", ("value %u", tupleId)); @@ -785,7 +785,7 @@ Ndb::getTupleIdFromNdb(const char* aTableName, Uint32 cacheSize) { const NdbTableImpl* table = theDictionary->getTable(aTableName); if (table == 0) - return ~0; + return ~(Uint64)0; return getTupleIdFromNdb(table->m_tableId, cacheSize); } @@ -814,7 +814,7 @@ Ndb::readAutoIncrementValue(const char* aTableName) const NdbTableImpl* table = theDictionary->getTable(aTableName); if (table == 0) { theError= theDictionary->getNdbError(); - DBUG_RETURN(~0); + DBUG_RETURN(~(Uint64)0); } Uint64 tupleId = readTupleIdFromNdb(table->m_tableId); DBUG_PRINT("info", ("value %u", tupleId)); @@ -826,7 +826,7 @@ Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable) { DBUG_ENTER("readtAutoIncrementValue"); if (aTable == 0) - DBUG_RETURN(~0); + DBUG_RETURN(~(Uint64)0); const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable); Uint64 tupleId = readTupleIdFromNdb(table->m_tableId); DBUG_PRINT("info", ("value %u", tupleId)); @@ -863,7 +863,7 @@ Ndb::setAutoIncrementValue(const NdbDictionary::Table * aTable, Uint64 val, bool { DEBUG_TRACE("setAutoIncrementValue " << val); if (aTable == 0) - return ~0; + return ~(Uint64)0; const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable); return setTupleIdInNdb(table->m_tableId, val, increase); } @@ -1018,7 +1018,7 @@ Ndb::opTupleIdOnNdb(Uint32 aTableId, Uint64 opValue, Uint32 op) theError.code, tConnection ? tConnection->theError.code : -1, tOperation ? tOperation->theError.code : -1)); - DBUG_RETURN(~0); + DBUG_RETURN(~(Uint64)0); } Uint32 diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index bc4cc1267e0..3132e6c4c76 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -4673,6 +4673,10 @@ ha_innobase::rename_table( trx->mysql_thd = current_thd; trx->mysql_query_str = &((*current_thd).query); + if (current_thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) { + trx->check_foreigns = FALSE; + } + name_len1 = strlen(from); name_len2 = strlen(to); diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 70e6e1d98f8..a522d6256fb 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -1976,7 +1976,7 @@ int ha_ndbcluster::write_row(byte *record) m_rows_inserted++; no_uncommitted_rows_update(1); m_bulk_insert_not_flushed= TRUE; - if ((m_rows_to_insert == 1) || + if ((m_rows_to_insert == (ha_rows) 1) || ((m_rows_inserted % m_bulk_insert_rows) == 0) || set_blob_value) { @@ -2992,8 +2992,8 @@ void ha_ndbcluster::start_bulk_insert(ha_rows rows) DBUG_ENTER("start_bulk_insert"); DBUG_PRINT("enter", ("rows: %d", (int)rows)); - m_rows_inserted= 0; - if (rows == 0) + m_rows_inserted= (ha_rows) 0; + if (rows == (ha_rows) 0) /* We don't know how many will be inserted, guess */ m_rows_to_insert= m_autoincrement_prefetch; else @@ -3031,7 +3031,7 @@ int ha_ndbcluster::end_bulk_insert() // Send rows to NDB DBUG_PRINT("info", ("Sending inserts to NDB, "\ "rows_inserted:%d, bulk_insert_rows: %d", - m_rows_inserted, m_bulk_insert_rows)); + (int) m_rows_inserted, (int) m_bulk_insert_rows)); m_bulk_insert_not_flushed= FALSE; if (execute_no_commit(this,trans) != 0) { no_uncommitted_rows_execute_failure(); @@ -3039,8 +3039,8 @@ int ha_ndbcluster::end_bulk_insert() } } - m_rows_inserted= 0; - m_rows_to_insert= 1; + m_rows_inserted= (ha_rows) 0; + m_rows_to_insert= (ha_rows) 1; DBUG_RETURN(error); } @@ -3222,7 +3222,8 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) // store thread specific data first to set the right context m_force_send= thd->variables.ndb_force_send; m_ha_not_exact_count= !thd->variables.ndb_use_exact_count; - m_autoincrement_prefetch= thd->variables.ndb_autoincrement_prefetch_sz; + m_autoincrement_prefetch= + (ha_rows) thd->variables.ndb_autoincrement_prefetch_sz; if (!thd->transaction.on) m_transaction_on= FALSE; else @@ -3746,7 +3747,7 @@ static int create_ndb_column(NDBCOL &col, static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) { - if (form->s->max_rows == 0) /* default setting, don't set fragmentation */ + if (form->s->max_rows == (ha_rows) 0) /* default setting, don't set fragmentation */ return; /** * get the number of fragments right @@ -4112,6 +4113,7 @@ ulonglong ha_ndbcluster::get_auto_increment() /* We guessed too low */ m_rows_to_insert+= m_autoincrement_prefetch; cache_size= + (int) (m_rows_to_insert - m_rows_inserted < m_autoincrement_prefetch) ? m_rows_to_insert - m_rows_inserted : (m_rows_to_insert > m_autoincrement_prefetch) ? @@ -4148,10 +4150,10 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_primary_key_update(FALSE), m_retrieve_all_fields(FALSE), m_retrieve_primary_key(FALSE), - m_rows_to_insert(1), - m_rows_inserted(0), - m_bulk_insert_rows(1024), - m_rows_changed(0), + m_rows_to_insert((ha_rows) 1), + m_rows_inserted((ha_rows) 0), + m_bulk_insert_rows((ha_rows) 1024), + m_rows_changed((ha_rows) 0), m_bulk_insert_not_flushed(FALSE), m_ops_pending(0), m_skip_auto_increment(TRUE), @@ -4161,7 +4163,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_dupkey((uint) -1), m_ha_not_exact_count(FALSE), m_force_send(TRUE), - m_autoincrement_prefetch(32), + m_autoincrement_prefetch((ha_rows) 32), m_transaction_on(TRUE), m_cond_stack(NULL), m_multi_cursor(NULL) diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 2c44a789c79..15f40ebd288 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -4707,7 +4707,7 @@ Disable with --skip-ndbcluster (will save memory).", "Specify number of autoincrement values that are prefetched.", (gptr*) &global_system_variables.ndb_autoincrement_prefetch_sz, (gptr*) &global_system_variables.ndb_autoincrement_prefetch_sz, - 0, GET_INT, REQUIRED_ARG, 32, 1, 256, 0, 0, 0}, + 0, GET_ULONG, REQUIRED_ARG, 32, 1, 256, 0, 0, 0}, {"ndb-force-send", OPT_NDB_FORCE_SEND, "Force send of buffers to ndb immediately without waiting for " "other threads.", |