diff options
author | unknown <monty@mysql.com/narttu.mysql.fi> | 2007-01-29 01:47:35 +0200 |
---|---|---|
committer | unknown <monty@mysql.com/narttu.mysql.fi> | 2007-01-29 01:47:35 +0200 |
commit | f40e0cc0d0606c3f06151763a6445bfa4682eb77 (patch) | |
tree | 9a81ea8e9e695584f7915cc104eda630d7b98bc8 /sql/ha_ndbcluster.cc | |
parent | 36b058929ffbbf132a4a512ec8c3a6e937309387 (diff) | |
download | mariadb-git-f40e0cc0d0606c3f06151763a6445bfa4682eb77.tar.gz |
After merge fixes
Removed a lot of compiler warnings
Removed not used variables, functions and labels
Initialize some variables that could be used unitialized (fatal bugs)
%ll -> %l
BitKeeper/etc/ignore:
added storage/archive/archive_reader
BUILD/SETUP.sh:
ccache now works again
BUILD/compile-pentium-gcov:
Added marker that we are using gcov and need special version of ccache
client/mysql_upgrade.c:
after merge fixes
client/mysqlbinlog.cc:
after merge fixes
client/mysqldump.c:
Removed compiler warnings
client/mysqlimport.c:
Removed compiler warnings
client/mysqltest.c:
Removed compiler warnings
mysql-test/t/mysqlcheck.test:
After merge fixes
mysys/my_bitmap.c:
After merge fix
sql/event_data_objects.cc:
Removed not used variable
sql/event_db_repository.cc:
Removed not used variable
sql/event_queue.cc:
Removed not used variable
sql/field.cc:
After merge fixes
sql/filesort.cc:
Added missing initialization (could cause core dump on EOM)
sql/ha_ndbcluster.cc:
After merge fixes
Removed not used variables
false-> FALSE
true -> TRUE
%llu -> %lu (portability fix)
Fixed bug where field could be used unitialized in build_scan_filter_predicate()
sql/ha_ndbcluster_binlog.cc:
Removed not used label
sql/ha_partition.cc:
Removed not used variables
sql/handler.cc:
Removed not used variable & function
sql/item.cc:
After merge fixes
sql/item_cmpfunc.cc:
Removed not used variable
sql/item_func.cc:
Removed compiler warning
sql/item_xmlfunc.cc:
Removed not used variables & declarations
sql/log.cc:
Removed compiler warnings
Removed not used variables & label
sql/log.h:
After merge fixes
sql/log_event.cc:
Removed not used variable & function
sql/mysqld.cc:
After merge fixes
sql/opt_range.cc:
Removed not used declaration
sql/partition_info.cc:
Removed not used variable
sql/protocol.cc:
Removed compiler warnings
sql/set_var.cc:
Removed not used variable
sql/set_var.h:
After merge fix
sql/slave.cc:
After merge fixes
sql/slave.h:
Moved wrong declaration to slave.cc
sql/sp.cc:
Fixed format of DBUG_PRINT
sql/sp_head.cc:
After merge fixes
sql/spatial.cc:
Added DBUG_ASSERT() to verify that LINT_INIT is right
sql/sql_class.cc:
Removed not used variables
sql/sql_insert.cc:
After merge fixes
sql/sql_parse.cc:
Removed not used variable
After merge fixes
sql/sql_partition.cc:
Removed not used variables
sql/sql_plugin.cc:
Removed compiler warnings when compiling embedded server
sql/sql_servers.cc:
Removed not used variables
Moved wrong placed calle to use_all_columns()
sql/sql_servers.h:
Moved declaration to right sql_servers.cc
sql/sql_show.cc:
Removed not used variables and function
After merge fixes
sql/sql_table.cc:
Removed not used variable
sql/sql_yacc.yy:
Removed not used variables
Lex -> lex
sql/table.cc:
Indentation fix
storage/archive/ha_archive.cc:
After merge fixes
storage/example/ha_example.cc:
Indentation fixes
storage/federated/ha_federated.cc:
Removed not used variables
storage/myisam/mi_rkey.c:
Added 0x before address
storage/myisammrg/ha_myisammrg.cc:
Removed old declaration
storage/ndb/include/kernel/signaldata/ArbitSignalData.hpp:
After merge fixes
storage/ndb/include/util/SimpleProperties.hpp:
After merge fixes
storage/ndb/src/common/debugger/EventLogger.cpp:
Removed not used function
storage/ndb/src/kernel/blocks/suma/Suma.cpp:
Removed compiler warnings
Removed not used variables
storage/ndb/src/mgmsrv/MgmtSrvr.cpp:
After merge fixes
Removed not used variables
storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp:
Removed not used varibles.
storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp:
Removed not used variables
storage/ndb/src/ndbapi/NdbOperationDefine.cpp:
Removed not used variables and label
storage/ndb/src/ndbapi/NdbOperationSearch.cpp:
Removed not used label
storage/ndb/src/ndbapi/SignalSender.cpp:
Removed not used function
storage/ndb/src/ndbapi/TransporterFacade.cpp:
Removed not used variables
storage/ndb/src/ndbapi/ndb_cluster_connection.cpp:
Moved static declaration from header file
storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp:
Moved static declaration from header file
support-files/compiler_warnings.supp:
Remove some warnings from ndb
Diffstat (limited to 'sql/ha_ndbcluster.cc')
-rw-r--r-- | sql/ha_ndbcluster.cc | 282 |
1 files changed, 136 insertions, 146 deletions
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 4738fbb22f9..36fe6457167 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -150,7 +150,6 @@ static byte *ndbcluster_get_key(NDB_SHARE *share,uint *length, #ifdef HAVE_NDB_BINLOG static int rename_share(NDB_SHARE *share, const char *new_key); #endif -static void ndb_set_fragmentation(NDBTAB &tab, TABLE *table, uint pk_len); static int ndb_get_table_statistics(ha_ndbcluster*, bool, Ndb*, const NDBTAB *, struct Ndb_statistics *); @@ -451,7 +450,7 @@ ha_rows ha_ndbcluster::records() Ndb *ndb= get_ndb(); ndb->setDatabaseName(m_dbname); struct Ndb_statistics stat; - if (ndb_get_table_statistics(this, true, ndb, m_table, &stat) == 0) + if (ndb_get_table_statistics(this, TRUE, ndb, m_table, &stat) == 0) { retval= stat.row_count; } @@ -462,9 +461,9 @@ ha_rows ha_ndbcluster::records() THD *thd= current_thd; if (get_thd_ndb(thd)->error) - info->no_uncommitted_rows_count= 0; + local_info->no_uncommitted_rows_count= 0; - DBUG_RETURN(retval + info->no_uncommitted_rows_count); + DBUG_RETURN(retval + local_info->no_uncommitted_rows_count); } int ha_ndbcluster::records_update() @@ -482,7 +481,7 @@ int ha_ndbcluster::records_update() Ndb *ndb= get_ndb(); struct Ndb_statistics stat; ndb->setDatabaseName(m_dbname); - result= ndb_get_table_statistics(this, true, ndb, m_table, &stat); + result= ndb_get_table_statistics(this, TRUE, ndb, m_table, &stat); if (result == 0) { stats.mean_rec_length= stat.row_size; @@ -955,7 +954,6 @@ int ha_ndbcluster::get_ndb_partition_id(NdbOperation *ndb_op) bool ha_ndbcluster::uses_blob_value() { - uint blob_fields; MY_BITMAP *bitmap; uint *blob_index, *blob_index_end; if (table_share->blob_fields == 0) @@ -1105,7 +1103,6 @@ int ha_ndbcluster::create_indexes(Ndb *ndb, TABLE *tab) const char *index_name; KEY* key_info= tab->key_info; const char **key_name= tab->s->keynames.type_names; - NDBDICT *dict= ndb->getDictionary(); DBUG_ENTER("ha_ndbcluster::create_indexes"); for (i= 0; i < tab->s->keys; i++, key_info++, key_name++) @@ -1243,7 +1240,6 @@ int ha_ndbcluster::open_indexes(Ndb *ndb, TABLE *tab, bool ignore_error) int error= 0; THD *thd=current_thd; NDBDICT *dict= ndb->getDictionary(); - const char *index_name; KEY* key_info= tab->key_info; const char **key_name= tab->s->keynames.type_names; DBUG_ENTER("ha_ndbcluster::open_indexes"); @@ -1255,9 +1251,9 @@ int ha_ndbcluster::open_indexes(Ndb *ndb, TABLE *tab, bool ignore_error) m_index[i].index= m_index[i].unique_index= NULL; else break; - m_index[i].null_in_unique_index= false; + m_index[i].null_in_unique_index= FALSE; if (check_index_fields_not_null(key_info)) - m_index[i].null_in_unique_index= true; + m_index[i].null_in_unique_index= TRUE; } if (error && !ignore_error) @@ -1293,7 +1289,6 @@ void ha_ndbcluster::renumber_indexes(Ndb *ndb, TABLE *tab) const char *index_name; KEY* key_info= tab->key_info; const char **key_name= tab->s->keynames.type_names; - NDBDICT *dict= ndb->getDictionary(); DBUG_ENTER("ha_ndbcluster::renumber_indexes"); for (i= 0; i < tab->s->keys; i++, key_info++, key_name++) @@ -1410,10 +1405,10 @@ bool ha_ndbcluster::check_index_fields_not_null(KEY* key_info) { Field* field= key_part->field; if (field->maybe_null()) - DBUG_RETURN(true); + DBUG_RETURN(TRUE); } - DBUG_RETURN(false); + DBUG_RETURN(FALSE); } void ha_ndbcluster::release_metadata(THD *thd, Ndb *ndb) @@ -1731,7 +1726,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf, ERR_RETURN(trans->getNdbError()); } - if (execute_no_commit_ie(this,trans,false) != 0) + if (execute_no_commit_ie(this,trans,FALSE) != 0) { table->status= STATUS_NOT_FOUND; DBUG_RETURN(ndb_err(trans)); @@ -1796,7 +1791,7 @@ int ha_ndbcluster::complemented_read(const byte *old_data, byte *new_data, } } - if (execute_no_commit(this,trans,false) != 0) + if (execute_no_commit(this,trans,FALSE) != 0) { table->status= STATUS_NOT_FOUND; DBUG_RETURN(ndb_err(trans)); @@ -1842,7 +1837,7 @@ bool ha_ndbcluster::check_all_operations_for_error(NdbTransaction *trans, if (err.status != NdbError::Success) { if (ndb_to_mysql_error(&err) != (int) errcode) - DBUG_RETURN(false); + DBUG_RETURN(FALSE); if (op == last) break; op= trans->getNextCompletedOperation(op); } @@ -1873,10 +1868,10 @@ bool ha_ndbcluster::check_all_operations_for_error(NdbTransaction *trans, if (errcode == HA_ERR_KEY_NOT_FOUND) m_dupkey= table->s->primary_key; } - DBUG_RETURN(false); + DBUG_RETURN(FALSE); } } - DBUG_RETURN(true); + DBUG_RETURN(TRUE); } @@ -1954,7 +1949,7 @@ int ha_ndbcluster::peek_indexed_rows(const byte *record, } last= trans->getLastDefinedOperation(); if (first) - res= execute_no_commit_ie(this,trans,false); + res= execute_no_commit_ie(this,trans,FALSE); else { // Table has no keys @@ -2003,7 +1998,7 @@ int ha_ndbcluster::unique_index_read(const byte *key, if ((res= define_read_attrs(buf, op))) DBUG_RETURN(res); - if (execute_no_commit_ie(this,trans,false) != 0) + if (execute_no_commit_ie(this,trans,FALSE) != 0) { table->status= STATUS_NOT_FOUND; DBUG_RETURN(ndb_err(trans)); @@ -2036,13 +2031,13 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor) if (!(op= m_active_cursor->lockCurrentTuple())) { /* purecov: begin inspected */ - m_lock_tuple= false; + m_lock_tuple= FALSE; ERR_RETURN(con_trans->getNdbError()); /* purecov: end */ } m_ops_pending++; } - m_lock_tuple= false; + m_lock_tuple= FALSE; bool contact_ndb= m_lock.type < TL_WRITE_ALLOW_WRITE && m_lock.type != TL_READ_WITH_SHARED_LOCKS;; @@ -2053,7 +2048,7 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor) */ if (m_ops_pending && m_blobs_pending) { - if (execute_no_commit(this,trans,false) != 0) + if (execute_no_commit(this,trans,FALSE) != 0) DBUG_RETURN(ndb_err(trans)); m_ops_pending= 0; m_blobs_pending= FALSE; @@ -2085,7 +2080,7 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor) { if (m_transaction_on) { - if (execute_no_commit(this,trans,false) != 0) + if (execute_no_commit(this,trans,FALSE) != 0) DBUG_RETURN(-1); } else @@ -2366,7 +2361,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, bool need_pk = (lm == NdbOperation::LM_Read); if (!(op= trans->getNdbIndexScanOperation(m_index[active_index].index, m_table)) || - op->readTuples(lm, 0, parallelism, sorted, descending, false, need_pk)) + op->readTuples(lm, 0, parallelism, sorted, descending, FALSE, need_pk)) ERR_RETURN(trans->getNdbError()); if (m_use_partition_function && part_spec != NULL && part_spec->start_part == part_spec->end_part) @@ -2388,7 +2383,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, { const key_range *keys[2]= { start_key, end_key }; - res= set_bounds(op, active_index, false, keys); + res= set_bounds(op, active_index, FALSE, keys); if (res) DBUG_RETURN(res); } @@ -2412,7 +2407,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, ERR_RETURN(trans->getNdbError()); } - if (execute_no_commit(this,trans,false) != 0) + if (execute_no_commit(this,trans,FALSE) != 0) DBUG_RETURN(ndb_err(trans)); DBUG_RETURN(next_result(buf)); @@ -2507,7 +2502,7 @@ int ha_ndbcluster::unique_index_scan(const KEY* key_info, if ((res= define_read_attrs(buf, op))) DBUG_RETURN(res); - if (execute_no_commit(this,trans,false) != 0) + if (execute_no_commit(this,trans,FALSE) != 0) DBUG_RETURN(ndb_err(trans)); DBUG_PRINT("exit", ("Scan started successfully")); DBUG_RETURN(next_result(buf)); @@ -2576,7 +2571,7 @@ int ha_ndbcluster::full_table_scan(byte *buf) if ((res= define_read_attrs(buf, op))) DBUG_RETURN(res); - if (execute_no_commit(this,trans,false) != 0) + if (execute_no_commit(this,trans,FALSE) != 0) DBUG_RETURN(ndb_err(trans)); DBUG_PRINT("exit", ("Scan started successfully")); DBUG_RETURN(next_result(buf)); @@ -2624,7 +2619,7 @@ int ha_ndbcluster::write_row(byte *record) start_bulk_insert will set parameters to ensure that each write_row is committed individually */ - int peek_res= peek_indexed_rows(record, true); + int peek_res= peek_indexed_rows(record, TRUE); if (!peek_res) { @@ -2743,7 +2738,7 @@ int ha_ndbcluster::write_row(byte *record) m_bulk_insert_not_flushed= FALSE; if (m_transaction_on) { - if (execute_no_commit(this,trans,false) != 0) + if (execute_no_commit(this,trans,FALSE) != 0) { m_skip_auto_increment= TRUE; no_uncommitted_rows_execute_failure(); @@ -2934,7 +2929,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) DBUG_PRINT("info", ("Calling updateTuple on cursor")); if (!(op= cursor->updateCurrentTuple())) ERR_RETURN(trans->getNdbError()); - m_lock_tuple= false; + m_lock_tuple= FALSE; m_ops_pending++; if (uses_blob_value()) m_blobs_pending= TRUE; @@ -2997,7 +2992,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) op->setValue(no_fields, part_func_value); } // Execute update operation - if (!cursor && execute_no_commit(this,trans,false) != 0) { + if (!cursor && execute_no_commit(this,trans,FALSE) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -3043,7 +3038,7 @@ int ha_ndbcluster::delete_row(const byte *record) DBUG_PRINT("info", ("Calling deleteTuple on cursor")); if (cursor->deleteCurrentTuple() != 0) ERR_RETURN(trans->getNdbError()); - m_lock_tuple= false; + m_lock_tuple= FALSE; m_ops_pending++; if (m_use_partition_function) @@ -3083,7 +3078,7 @@ int ha_ndbcluster::delete_row(const byte *record) } // Execute delete operation - if (execute_no_commit(this,trans,false) != 0) { + if (execute_no_commit(this,trans,FALSE) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -3311,8 +3306,7 @@ int ha_ndbcluster::index_init(uint index, bool sorted) unless m_lock.type == TL_READ_HIGH_PRIORITY and no sub-sequent call to unlock_row() */ - m_lock_tuple= false; - m_lock_tuple= false; + m_lock_tuple= FALSE; DBUG_RETURN(0); } @@ -3571,12 +3565,12 @@ int ha_ndbcluster::close_scan() if (!(op= cursor->lockCurrentTuple())) { - m_lock_tuple= false; + m_lock_tuple= FALSE; ERR_RETURN(trans->getNdbError()); } m_ops_pending++; } - m_lock_tuple= false; + m_lock_tuple= FALSE; if (m_ops_pending) { /* @@ -3584,7 +3578,7 @@ int ha_ndbcluster::close_scan() deleteing/updating transaction before closing the scan */ DBUG_PRINT("info", ("ops_pending: %ld", (long) m_ops_pending)); - if (execute_no_commit(this,trans,false) != 0) { + if (execute_no_commit(this,trans,FALSE) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -3789,7 +3783,7 @@ int ha_ndbcluster::info(uint flag) struct Ndb_statistics stat; ndb->setDatabaseName(m_dbname); if (current_thd->variables.ndb_use_exact_count && - (result= ndb_get_table_statistics(this, true, ndb, m_table, &stat)) + (result= ndb_get_table_statistics(this, TRUE, ndb, m_table, &stat)) == 0) { stats.mean_rec_length= stat.row_size; @@ -3990,7 +3984,7 @@ int ha_ndbcluster::end_bulk_insert() m_bulk_insert_not_flushed= FALSE; if (m_transaction_on) { - if (execute_no_commit(this, trans,false) != 0) + if (execute_no_commit(this, trans,FALSE) != 0) { no_uncommitted_rows_execute_failure(); my_errno= error= ndb_err(trans); @@ -4315,7 +4309,7 @@ void ha_ndbcluster::unlock_row() DBUG_ENTER("unlock_row"); DBUG_PRINT("info", ("Unlocking row")); - m_lock_tuple= false; + m_lock_tuple= FALSE; DBUG_VOID_RETURN; } @@ -5008,7 +5002,7 @@ int ha_ndbcluster::create(const char *name, get a new share */ - if (!(share= get_share(name, form, true, true))) + if (!(share= get_share(name, form, TRUE, TRUE))) { sql_print_error("NDB: allocating table share for %s failed", name); /* my_errno is set */ @@ -5072,8 +5066,6 @@ int ha_ndbcluster::create_handler_files(const char *file, int action_flag, HA_CREATE_INFO *create_info) { - char path[FN_REFLEN]; - const char *name; Ndb* ndb; const NDBTAB *tab; const void *data, *pack_data; @@ -5373,7 +5365,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to) int ndb_table_id= orig_tab->getObjectId(); int ndb_table_version= orig_tab->getObjectVersion(); - NDB_SHARE *share= get_share(from, 0, false); + NDB_SHARE *share= get_share(from, 0, FALSE); if (share) { int r= rename_share(share, to); @@ -5527,7 +5519,7 @@ ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb, DBUG_PRINT("info", ("Schema distribution table not setup")); DBUG_RETURN(HA_ERR_NO_CONNECTION); } - NDB_SHARE *share= get_share(path, 0, false); + NDB_SHARE *share= get_share(path, 0, FALSE); #endif /* Drop the table from NDB */ @@ -5918,7 +5910,7 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked) Ndb *ndb= get_ndb(); ndb->setDatabaseName(m_dbname); struct Ndb_statistics stat; - res= ndb_get_table_statistics(NULL, false, ndb, m_table, &stat); + res= ndb_get_table_statistics(NULL, FALSE, ndb, m_table, &stat); stats.mean_rec_length= stat.row_size; stats.data_file_length= stat.fragment_memory; stats.records= stat.row_count; @@ -6073,7 +6065,7 @@ int ndbcluster_discover(handlerton *hton, THD* thd, const char *db, ndb->setDatabaseName(db); NDBDICT* dict= ndb->getDictionary(); build_table_filename(key, sizeof(key), db, name, "", 0); - NDB_SHARE *share= get_share(key, 0, false); + NDB_SHARE *share= get_share(key, 0, FALSE); if (share && get_ndb_share_state(share) == NSS_ALTERED) { // Frm has been altered on disk, but not yet written to ndb @@ -6240,7 +6232,6 @@ int ndbcluster_drop_database_impl(const char *path) static void ndbcluster_drop_database(handlerton *hton, char *path) { - THD *thd= current_thd; DBUG_ENTER("ndbcluster_drop_database"); #ifdef HAVE_NDB_BINLOG /* @@ -6257,6 +6248,7 @@ static void ndbcluster_drop_database(handlerton *hton, char *path) ndbcluster_drop_database_impl(path); #ifdef HAVE_NDB_BINLOG char db[FN_REFLEN]; + THD *thd= current_thd; ha_ndbcluster::set_dbname(path, db); ndbcluster_log_schema_op(thd, 0, thd->query, thd->query_length, @@ -6282,16 +6274,17 @@ int ndb_create_table_from_engine(THD *thd, const char *db, */ int ndbcluster_find_all_files(THD *thd) { - DBUG_ENTER("ndbcluster_find_all_files"); Ndb* ndb; char key[FN_REFLEN]; + NDBDICT *dict; + int unhandled, retries= 5, skipped; + DBUG_ENTER("ndbcluster_find_all_files"); if (!(ndb= check_ndb_in_thd(thd))) DBUG_RETURN(HA_ERR_NO_CONNECTION); - NDBDICT *dict= ndb->getDictionary(); + dict= ndb->getDictionary(); - int unhandled, retries= 5, skipped; LINT_INIT(unhandled); LINT_INIT(skipped); do @@ -6361,7 +6354,7 @@ int ndbcluster_find_all_files(THD *thd) } else if (cmp_frm(ndbtab, pack_data, pack_length)) { - NDB_SHARE *share= get_share(key, 0, false); + NDB_SHARE *share= get_share(key, 0, FALSE); if (!share || get_ndb_share_state(share) != NSS_ALTERED) { discover= 1; @@ -6475,12 +6468,12 @@ int ndbcluster_find_files(handlerton *hton, THD *thd, List<char> delete_list; while ((file_name=it++)) { - bool file_on_disk= false; + bool file_on_disk= FALSE; DBUG_PRINT("info", ("%s", file_name)); if (hash_search(&ndb_tables, file_name, strlen(file_name))) { DBUG_PRINT("info", ("%s existed in NDB _and_ on disk ", file_name)); - file_on_disk= true; + file_on_disk= TRUE; } // Check for .ndb file with this name @@ -7033,19 +7026,19 @@ ha_ndbcluster::records_in_range(uint inx, key_range *min_key, { // We must provide approx table rows Uint64 table_rows=0; - Ndb_local_table_statistics *info= m_table_info; - if (create_info->records != ~(ha_rows)0 && info->records != 0) + Ndb_local_table_statistics *ndb_info= m_table_info; + if (ndb_info->records != ~(ha_rows)0 && ndb_info->records != 0) { - table_rows = info->records; - DBUG_PRINT("info", ("use info->records: %llu", table_rows)); + table_rows = ndb_info->records; + DBUG_PRINT("info", ("use info->records: %lu", (ulong) table_rows)); } else { Ndb_statistics stat; - if ((res=ndb_get_table_statistics(this, true, ndb, m_table, &stat)) != 0) + if ((res=ndb_get_table_statistics(this, TRUE, ndb, m_table, &stat))) break; table_rows=stat.row_count; - DBUG_PRINT("info", ("use db row_count: %llu", table_rows)); + DBUG_PRINT("info", ("use db row_count: %lu", (ulong) table_rows)); if (table_rows == 0) { // Problem if autocommit=0 #ifdef ndb_get_table_statistics_uses_active_trans @@ -7068,7 +7061,7 @@ ha_ndbcluster::records_in_range(uint inx, key_range *min_key, if ((op->readTuples(NdbOperation::LM_CommittedRead)) == -1) ERR_BREAK(op->getNdbError(), res); const key_range *keys[2]={ min_key, max_key }; - if ((res=set_bounds(op, inx, true, keys)) != 0) + if ((res=set_bounds(op, inx, TRUE, keys)) != 0) break; // Decide if db should be contacted @@ -7203,7 +7196,7 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname, { Ndb_table_guard ndbtab_g(ndb->getDictionary(), tabname); if (ndbtab_g.get_table() == 0 - || ndb_get_table_statistics(NULL, false, ndb, ndbtab_g.get_table(), &stat)) + || ndb_get_table_statistics(NULL, FALSE, ndb, ndbtab_g.get_table(), &stat)) { free_share(&share); DBUG_RETURN(1); @@ -7382,9 +7375,9 @@ static byte *ndbcluster_get_key(NDB_SHARE *share,uint *length, static void print_share(const char* where, NDB_SHARE* share) { fprintf(DBUG_FILE, - "%s %s.%s: use_count: %u, commit_count: %llu\n", + "%s %s.%s: use_count: %u, commit_count: %lu\n", where, share->db, share->table_name, share->use_count, - (long long unsigned int) share->commit_count); + (ulong) share->commit_count); fprintf(DBUG_FILE, " - key: %s, key_length: %d\n", share->key, share->key_length); @@ -7621,7 +7614,6 @@ NDB_SHARE *ndbcluster_get_share(const char *key, TABLE *table, bool create_if_not_exists, bool have_lock) { - THD *thd= current_thd; NDB_SHARE *share; uint length= (uint) strlen(key); DBUG_ENTER("ndbcluster_get_share"); @@ -7951,10 +7943,10 @@ ha_ndbcluster::null_value_index_search(KEY_MULTI_RANGE *ranges, const byte *key= range->start_key.key; uint key_len= range->start_key.length; if (check_null_in_key(key_info, key, key_len)) - DBUG_RETURN(true); + DBUG_RETURN(TRUE); curr += reclength; } - DBUG_RETURN(false); + DBUG_RETURN(FALSE); } int @@ -8067,7 +8059,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, !define_read_attrs(curr, op) && (op->setAbortOption(AO_IgnoreError), TRUE) && (!m_use_partition_function || - (op->setPartitionId(part_spec.start_part), true))) + (op->setPartitionId(part_spec.start_part), TRUE))) curr += reclength; else ERR_RETURN(op ? op->getNdbError() : m_active_trans->getNdbError()); @@ -8128,7 +8120,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, const key_range *keys[2]= { &multi_range_curr->start_key, &multi_range_curr->end_key }; - if ((res= set_bounds(scanOp, active_index, false, keys, + if ((res= set_bounds(scanOp, active_index, FALSE, keys, multi_range_curr-ranges))) DBUG_RETURN(res); break; @@ -8250,7 +8242,7 @@ ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p) DBUG_MULTI_RANGE(6); // First fetch from cursor DBUG_ASSERT(range_no == -1); - if ((res= m_multi_cursor->nextResult(true))) + if ((res= m_multi_cursor->nextResult(TRUE))) { DBUG_MULTI_RANGE(15); goto close_scan; @@ -8372,7 +8364,6 @@ ha_ndbcluster::update_table_comment( } ndb->setDatabaseName(m_dbname); - NDBDICT* dict= ndb->getDictionary(); const NDBTAB* tab= m_table; DBUG_ASSERT(tab != NULL); @@ -8567,7 +8558,7 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) ndb->setDatabaseName(share->db); Ndb_table_guard ndbtab_g(ndb->getDictionary(), share->table_name); if (ndbtab_g.get_table() && - ndb_get_table_statistics(NULL, false, ndb, + ndb_get_table_statistics(NULL, FALSE, ndb, ndbtab_g.get_table(), &stat) == 0) { char buff[22], buff2[22]; @@ -8947,7 +8938,7 @@ void ndb_serialize_cond(const Item *item, void *arg) type == MYSQL_TYPE_DATETIME) ? (context->expecting_field_result(STRING_RESULT) || context->expecting_field_result(INT_RESULT)) - : true)) && + : TRUE)) && // Bit fields no yet supported in scan filter type != MYSQL_TYPE_BIT && // No BLOB support in scan filter @@ -9607,25 +9598,24 @@ ha_ndbcluster::build_scan_filter_predicate(Ndb_cond * &cond, break; Ndb_item *a= cond->next->ndb_item; Ndb_item *b, *field, *value= NULL; - LINT_INIT(field); switch (cond->ndb_item->argument_count()) { case 1: - field= - (a->type == NDB_FIELD)? a : NULL; + field= (a->type == NDB_FIELD)? a : NULL; break; case 2: if (!cond->next->next) + { + field= NULL; break; + } b= cond->next->next->ndb_item; - value= - (a->type == NDB_VALUE)? a - : (b->type == NDB_VALUE)? b - : NULL; - field= - (a->type == NDB_FIELD)? a - : (b->type == NDB_FIELD)? b - : NULL; + value= ((a->type == NDB_VALUE) ? a : + (b->type == NDB_VALUE) ? b : + NULL); + field= ((a->type == NDB_FIELD) ? a : + (b->type == NDB_FIELD) ? b : + NULL); break; default: field= NULL; //Keep compiler happy @@ -10194,8 +10184,8 @@ int ha_ndbcluster::get_default_no_partitions(HA_CREATE_INFO *create_info) ha_rows max_rows, min_rows; if (create_info) { - max_rows= info->max_rows; - min_rows= info->min_rows; + max_rows= create_info->max_rows; + min_rows= create_info->min_rows; } else { @@ -10346,15 +10336,14 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info, { uint16 frag_data[MAX_PARTITIONS]; char *ts_names[MAX_PARTITIONS]; - ulong ts_index= 0, fd_index= 0, i, j; + ulong fd_index= 0, i, j; NDBTAB *tab= (NDBTAB*)tab_par; NDBTAB::FragmentType ftype= NDBTAB::UserDefined; partition_element *part_elem; bool first= TRUE; - uint ts_id, ts_version, part_count= 0, tot_ts_name_len; + uint tot_ts_name_len; List_iterator<partition_element> part_it(part_info->partitions); int error; - char *name_ptr; DBUG_ENTER("ha_ndbcluster::set_up_partition_info"); if (part_info->part_type == HASH_PARTITION && @@ -10468,7 +10457,7 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info, } -bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *info, +bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *create_info, uint table_changes) { DBUG_ENTER("ha_ndbcluster::check_if_incompatible_data"); @@ -10532,70 +10521,72 @@ bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *info, /* Check that row format didn't change */ if ((create_info->used_fields & HA_CREATE_USED_AUTO) && - get_row_type() != info->row_type) + get_row_type() != create_info->row_type) DBUG_RETURN(COMPATIBLE_DATA_NO); DBUG_RETURN(COMPATIBLE_DATA_YES); } -bool set_up_tablespace(st_alter_tablespace *info, +bool set_up_tablespace(st_alter_tablespace *alter_info, NdbDictionary::Tablespace *ndb_ts) { - ndb_ts->setName(info->tablespace_name); - ndb_ts->setExtentSize(info->extent_size); - ndb_ts->setDefaultLogfileGroup(info->logfile_group_name); - return false; + ndb_ts->setName(alter_info->tablespace_name); + ndb_ts->setExtentSize(alter_info->extent_size); + ndb_ts->setDefaultLogfileGroup(alter_info->logfile_group_name); + return FALSE; } -bool set_up_datafile(st_alter_tablespace *info, +bool set_up_datafile(st_alter_tablespace *alter_info, NdbDictionary::Datafile *ndb_df) { - if (info->max_size > 0) + if (alter_info->max_size > 0) { my_error(ER_TABLESPACE_AUTO_EXTEND_ERROR, MYF(0)); - return true; + return TRUE; } - ndb_df->setPath(info->data_file_name); - ndb_df->setSize(info->initial_size); - ndb_df->setTablespace(info->tablespace_name); - return false; + ndb_df->setPath(alter_info->data_file_name); + ndb_df->setSize(alter_info->initial_size); + ndb_df->setTablespace(alter_info->tablespace_name); + return FALSE; } -bool set_up_logfile_group(st_alter_tablespace *info, +bool set_up_logfile_group(st_alter_tablespace *alter_info, NdbDictionary::LogfileGroup *ndb_lg) { - ndb_lg->setName(info->logfile_group_name); - ndb_lg->setUndoBufferSize(info->undo_buffer_size); - return false; + ndb_lg->setName(alter_info->logfile_group_name); + ndb_lg->setUndoBufferSize(alter_info->undo_buffer_size); + return FALSE; } -bool set_up_undofile(st_alter_tablespace *info, +bool set_up_undofile(st_alter_tablespace *alter_info, NdbDictionary::Undofile *ndb_uf) { - ndb_uf->setPath(info->undo_file_name); - ndb_uf->setSize(info->initial_size); - ndb_uf->setLogfileGroup(info->logfile_group_name); - return false; + ndb_uf->setPath(alter_info->undo_file_name); + ndb_uf->setSize(alter_info->initial_size); + ndb_uf->setLogfileGroup(alter_info->logfile_group_name); + return FALSE; } -int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace *info) +int ndbcluster_alter_tablespace(handlerton *hton, + THD* thd, st_alter_tablespace *alter_info) { + int is_tablespace= 0; + NdbError err; + NDBDICT *dict; + int error; + const char *errmsg; + Ndb *ndb; DBUG_ENTER("ha_ndbcluster::alter_tablespace"); + LINT_INIT(errmsg); - int is_tablespace= 0; - Ndb *ndb= check_ndb_in_thd(thd); + ndb= check_ndb_in_thd(thd); if (ndb == NULL) { DBUG_RETURN(HA_ERR_NO_CONNECTION); } + dict= ndb->getDictionary(); - NdbError err; - NDBDICT *dict= ndb->getDictionary(); - int error; - const char * errmsg; - LINT_INIT(errmsg); - - switch (info->ts_cmd_type){ + switch (alter_info->ts_cmd_type){ case (CREATE_TABLESPACE): { error= ER_CREATE_FILEGROUP_FAILED; @@ -10603,11 +10594,11 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace NdbDictionary::Tablespace ndb_ts; NdbDictionary::Datafile ndb_df; NdbDictionary::ObjectId objid; - if (set_up_tablespace(info, &ndb_ts)) + if (set_up_tablespace(alter_info, &ndb_ts)) { DBUG_RETURN(1); } - if (set_up_datafile(info, &ndb_df)) + if (set_up_datafile(alter_info, &ndb_df)) { DBUG_RETURN(1); } @@ -10617,7 +10608,7 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace DBUG_PRINT("error", ("createTablespace returned %d", error)); goto ndberror; } - DBUG_PRINT("info", ("Successfully created Tablespace")); + DBUG_PRINT("alter_info", ("Successfully created Tablespace")); errmsg= "DATAFILE"; if (dict->createDatafile(ndb_df)) { @@ -10639,10 +10630,10 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace case (ALTER_TABLESPACE): { error= ER_ALTER_FILEGROUP_FAILED; - if (info->ts_alter_tablespace_type == ALTER_TABLESPACE_ADD_FILE) + if (alter_info->ts_alter_tablespace_type == ALTER_TABLESPACE_ADD_FILE) { NdbDictionary::Datafile ndb_df; - if (set_up_datafile(info, &ndb_df)) + if (set_up_datafile(alter_info, &ndb_df)) { DBUG_RETURN(1); } @@ -10652,14 +10643,14 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace goto ndberror; } } - else if(info->ts_alter_tablespace_type == ALTER_TABLESPACE_DROP_FILE) + else if(alter_info->ts_alter_tablespace_type == ALTER_TABLESPACE_DROP_FILE) { - NdbDictionary::Tablespace ts= dict->getTablespace(info->tablespace_name); - NdbDictionary::Datafile df= dict->getDatafile(0, info->data_file_name); + NdbDictionary::Tablespace ts= dict->getTablespace(alter_info->tablespace_name); + NdbDictionary::Datafile df= dict->getDatafile(0, alter_info->data_file_name); NdbDictionary::ObjectId objid; df.getTablespaceId(&objid); if (ts.getObjectId() == objid.getObjectId() && - strcmp(df.getPath(), info->data_file_name) == 0) + strcmp(df.getPath(), alter_info->data_file_name) == 0) { errmsg= " DROP DATAFILE"; if (dict->dropDatafile(df)) @@ -10677,7 +10668,7 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace else { DBUG_PRINT("error", ("Unsupported alter tablespace: %d", - info->ts_alter_tablespace_type)); + alter_info->ts_alter_tablespace_type)); DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); } is_tablespace= 1; @@ -10689,14 +10680,14 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace NdbDictionary::LogfileGroup ndb_lg; NdbDictionary::Undofile ndb_uf; NdbDictionary::ObjectId objid; - if (info->undo_file_name == NULL) + if (alter_info->undo_file_name == NULL) { /* REDO files in LOGFILE GROUP not supported yet */ DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); } - if (set_up_logfile_group(info, &ndb_lg)) + if (set_up_logfile_group(alter_info, &ndb_lg)) { DBUG_RETURN(1); } @@ -10705,8 +10696,8 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace { goto ndberror; } - DBUG_PRINT("info", ("Successfully created Logfile Group")); - if (set_up_undofile(info, &ndb_uf)) + DBUG_PRINT("alter_info", ("Successfully created Logfile Group")); + if (set_up_undofile(alter_info, &ndb_uf)) { DBUG_RETURN(1); } @@ -10728,7 +10719,7 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace case (ALTER_LOGFILE_GROUP): { error= ER_ALTER_FILEGROUP_FAILED; - if (info->undo_file_name == NULL) + if (alter_info->undo_file_name == NULL) { /* REDO files in LOGFILE GROUP not supported yet @@ -10736,7 +10727,7 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); } NdbDictionary::Undofile ndb_uf; - if (set_up_undofile(info, &ndb_uf)) + if (set_up_undofile(alter_info, &ndb_uf)) { DBUG_RETURN(1); } @@ -10751,7 +10742,7 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace { error= ER_DROP_FILEGROUP_FAILED; errmsg= "TABLESPACE"; - if (dict->dropTablespace(dict->getTablespace(info->tablespace_name))) + if (dict->dropTablespace(dict->getTablespace(alter_info->tablespace_name))) { goto ndberror; } @@ -10762,7 +10753,7 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace { error= ER_DROP_FILEGROUP_FAILED; errmsg= "LOGFILE GROUP"; - if (dict->dropLogfileGroup(dict->getLogfileGroup(info->logfile_group_name))) + if (dict->dropLogfileGroup(dict->getLogfileGroup(alter_info->logfile_group_name))) { goto ndberror; } @@ -10785,13 +10776,13 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace if (is_tablespace) ndbcluster_log_schema_op(thd, 0, thd->query, thd->query_length, - "", info->tablespace_name, + "", alter_info->tablespace_name, 0, 0, SOT_TABLESPACE, 0, 0, 0); else ndbcluster_log_schema_op(thd, 0, thd->query, thd->query_length, - "", info->logfile_group_name, + "", alter_info->logfile_group_name, 0, 0, SOT_LOGFILE_GROUP, 0, 0, 0); #endif @@ -10812,7 +10803,6 @@ bool ha_ndbcluster::get_no_parts(const char *name, uint *no_parts) { Ndb *ndb; NDBDICT *dict; - const NDBTAB *tab; int err; DBUG_ENTER("ha_ndbcluster::get_no_parts"); LINT_INIT(err); |