diff options
author | unknown <monty@mysql.com/narttu.mysql.fi> | 2007-01-29 01:47:35 +0200 |
---|---|---|
committer | unknown <monty@mysql.com/narttu.mysql.fi> | 2007-01-29 01:47:35 +0200 |
commit | f40e0cc0d0606c3f06151763a6445bfa4682eb77 (patch) | |
tree | 9a81ea8e9e695584f7915cc104eda630d7b98bc8 | |
parent | 36b058929ffbbf132a4a512ec8c3a6e937309387 (diff) | |
download | mariadb-git-f40e0cc0d0606c3f06151763a6445bfa4682eb77.tar.gz |
After merge fixes
Removed a lot of compiler warnings
Removed not used variables, functions and labels
Initialize some variables that could be used unitialized (fatal bugs)
%ll -> %l
BitKeeper/etc/ignore:
added storage/archive/archive_reader
BUILD/SETUP.sh:
ccache now works again
BUILD/compile-pentium-gcov:
Added marker that we are using gcov and need special version of ccache
client/mysql_upgrade.c:
after merge fixes
client/mysqlbinlog.cc:
after merge fixes
client/mysqldump.c:
Removed compiler warnings
client/mysqlimport.c:
Removed compiler warnings
client/mysqltest.c:
Removed compiler warnings
mysql-test/t/mysqlcheck.test:
After merge fixes
mysys/my_bitmap.c:
After merge fix
sql/event_data_objects.cc:
Removed not used variable
sql/event_db_repository.cc:
Removed not used variable
sql/event_queue.cc:
Removed not used variable
sql/field.cc:
After merge fixes
sql/filesort.cc:
Added missing initialization (could cause core dump on EOM)
sql/ha_ndbcluster.cc:
After merge fixes
Removed not used variables
false-> FALSE
true -> TRUE
%llu -> %lu (portability fix)
Fixed bug where field could be used unitialized in build_scan_filter_predicate()
sql/ha_ndbcluster_binlog.cc:
Removed not used label
sql/ha_partition.cc:
Removed not used variables
sql/handler.cc:
Removed not used variable & function
sql/item.cc:
After merge fixes
sql/item_cmpfunc.cc:
Removed not used variable
sql/item_func.cc:
Removed compiler warning
sql/item_xmlfunc.cc:
Removed not used variables & declarations
sql/log.cc:
Removed compiler warnings
Removed not used variables & label
sql/log.h:
After merge fixes
sql/log_event.cc:
Removed not used variable & function
sql/mysqld.cc:
After merge fixes
sql/opt_range.cc:
Removed not used declaration
sql/partition_info.cc:
Removed not used variable
sql/protocol.cc:
Removed compiler warnings
sql/set_var.cc:
Removed not used variable
sql/set_var.h:
After merge fix
sql/slave.cc:
After merge fixes
sql/slave.h:
Moved wrong declaration to slave.cc
sql/sp.cc:
Fixed format of DBUG_PRINT
sql/sp_head.cc:
After merge fixes
sql/spatial.cc:
Added DBUG_ASSERT() to verify that LINT_INIT is right
sql/sql_class.cc:
Removed not used variables
sql/sql_insert.cc:
After merge fixes
sql/sql_parse.cc:
Removed not used variable
After merge fixes
sql/sql_partition.cc:
Removed not used variables
sql/sql_plugin.cc:
Removed compiler warnings when compiling embedded server
sql/sql_servers.cc:
Removed not used variables
Moved wrong placed calle to use_all_columns()
sql/sql_servers.h:
Moved declaration to right sql_servers.cc
sql/sql_show.cc:
Removed not used variables and function
After merge fixes
sql/sql_table.cc:
Removed not used variable
sql/sql_yacc.yy:
Removed not used variables
Lex -> lex
sql/table.cc:
Indentation fix
storage/archive/ha_archive.cc:
After merge fixes
storage/example/ha_example.cc:
Indentation fixes
storage/federated/ha_federated.cc:
Removed not used variables
storage/myisam/mi_rkey.c:
Added 0x before address
storage/myisammrg/ha_myisammrg.cc:
Removed old declaration
storage/ndb/include/kernel/signaldata/ArbitSignalData.hpp:
After merge fixes
storage/ndb/include/util/SimpleProperties.hpp:
After merge fixes
storage/ndb/src/common/debugger/EventLogger.cpp:
Removed not used function
storage/ndb/src/kernel/blocks/suma/Suma.cpp:
Removed compiler warnings
Removed not used variables
storage/ndb/src/mgmsrv/MgmtSrvr.cpp:
After merge fixes
Removed not used variables
storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp:
Removed not used varibles.
storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp:
Removed not used variables
storage/ndb/src/ndbapi/NdbOperationDefine.cpp:
Removed not used variables and label
storage/ndb/src/ndbapi/NdbOperationSearch.cpp:
Removed not used label
storage/ndb/src/ndbapi/SignalSender.cpp:
Removed not used function
storage/ndb/src/ndbapi/TransporterFacade.cpp:
Removed not used variables
storage/ndb/src/ndbapi/ndb_cluster_connection.cpp:
Moved static declaration from header file
storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp:
Moved static declaration from header file
support-files/compiler_warnings.supp:
Remove some warnings from ndb
67 files changed, 461 insertions, 442 deletions
diff --git a/.bzrignore b/.bzrignore index e00d2a16187..c91412de798 100644 --- a/.bzrignore +++ b/.bzrignore @@ -2934,3 +2934,4 @@ win/vs71cache.txt win/vs8cache.txt zlib/*.ds? zlib/*.vcproj +storage/archive/archive_reader diff --git a/BUILD/SETUP.sh b/BUILD/SETUP.sh index d2534d42b33..9ce2570746b 100755 --- a/BUILD/SETUP.sh +++ b/BUILD/SETUP.sh @@ -187,6 +187,12 @@ fi # (http://samba.org/ccache) is installed, use it. # We use 'grep' and hope 'grep' will work as expected # (returns 0 if finds lines) +if test "$USING_GCOV" != "1" +then + # Not using gcov; Safe to use ccache + CCACHE_GCOV_VERSION_ENABLED=1 +fi + if ccache -V > /dev/null 2>&1 && test "$CCACHE_GCOV_VERSION_ENABLED" = "1" then echo "$CC" | grep "ccache" > /dev/null || CC="ccache $CC" diff --git a/BUILD/compile-pentium-gcov b/BUILD/compile-pentium-gcov index 1a49b5f836a..5ee3c071f61 100755 --- a/BUILD/compile-pentium-gcov +++ b/BUILD/compile-pentium-gcov @@ -2,6 +2,7 @@ # Need to disable ccache, or we loose the gcov-needed compiler output files. +USING_GCOV=1 CCACHE_GCOV_VERSION_ENABLED=0 if ccache -V > /dev/null 2>&1 then diff --git a/client/mysql_upgrade.c b/client/mysql_upgrade.c index 5ae989c6174..6d8337abb4d 100644 --- a/client/mysql_upgrade.c +++ b/client/mysql_upgrade.c @@ -451,7 +451,7 @@ int main(int argc, char **argv) char *forced_extra_defaults; char *local_defaults_group_suffix; const char *script_line; - char *upgrade_defaults_path; + char *upgrade_defaults_path= 0; char *defaults_to_use= NULL; int upgrade_defaults_created= 0; diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc index 768b24eb2f3..027cf2ea435 100644 --- a/client/mysqlbinlog.cc +++ b/client/mysqlbinlog.cc @@ -639,7 +639,8 @@ Create_file event for file_id: %u\n",exv->file_id); case FORMAT_DESCRIPTION_EVENT: delete glob_description_event; glob_description_event= (Format_description_log_event*) ev; - print_event_info->common_header_len= glob_description_event->common_header_len; + print_event_info->common_header_len= + glob_description_event->common_header_len; ev->print(result_file, print_event_info); /* We don't want this event to be deleted now, so let's hide it (I @@ -649,7 +650,7 @@ Create_file event for file_id: %u\n",exv->file_id); */ ev= 0; if (!force_if_open_opt && - (description_event->flags & LOG_EVENT_BINLOG_IN_USE_F)) + (glob_description_event->flags & LOG_EVENT_BINLOG_IN_USE_F)) { file_not_closed_error= 1; DBUG_RETURN(1); diff --git a/client/mysqldump.c b/client/mysqldump.c index ec0a9def786..1f8e7937c90 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -2878,7 +2878,7 @@ static int dump_tablespaces_for_tables(char *db, char **table_names, int tables) dynstr_trunc(&where, 1); dynstr_append(&where,"))"); - DBUG_PRINT("info",("Dump TS for Tables where: %s",where)); + DBUG_PRINT("info",("Dump TS for Tables where: %s",where.str)); r= dump_tablespaces(where.str); dynstr_free(&where); return r; @@ -2908,7 +2908,7 @@ static int dump_tablespaces_for_databases(char** databases) dynstr_trunc(&where, 1); dynstr_append(&where,"))"); - DBUG_PRINT("info",("Dump TS for DBs where: %s",where)); + DBUG_PRINT("info",("Dump TS for DBs where: %s",where.str)); r= dump_tablespaces(where.str); dynstr_free(&where); return r; @@ -2920,7 +2920,7 @@ static int dump_tablespaces(char* ts_where) MYSQL_RES *tableres; char buf[FN_REFLEN]; DYNAMIC_STRING sqlbuf; - int first; + int first= 0; /* The following are used for parsing the EXTRA field */ diff --git a/client/mysqlimport.c b/client/mysqlimport.c index c9e21de1b2a..3e054fba308 100644 --- a/client/mysqlimport.c +++ b/client/mysqlimport.c @@ -34,7 +34,7 @@ /* Global Thread counter */ -int counter; +uint counter; #ifdef HAVE_LIBPTHREAD pthread_mutex_t counter_mutex; pthread_cond_t count_threshhold; diff --git a/client/mysqltest.c b/client/mysqltest.c index 64c8509bfc4..df95d94c2fc 100644 --- a/client/mysqltest.c +++ b/client/mysqltest.c @@ -1845,7 +1845,7 @@ void do_copy_file(struct st_command *command) void do_chmod_file(struct st_command *command) { - ulong mode= 0; + long mode= 0; static DYNAMIC_STRING ds_mode; static DYNAMIC_STRING ds_file; const struct command_arg chmod_file_args[] = { diff --git a/mysql-test/t/mysqlcheck.test b/mysql-test/t/mysqlcheck.test index ed99adf172c..d233546f9e3 100644 --- a/mysql-test/t/mysqlcheck.test +++ b/mysql-test/t/mysqlcheck.test @@ -1,11 +1,3 @@ -# Clean up after previous tests -# - ---disable_warnings -DROP TABLE IF EXISTS t1; -drop view if exists v1; -drop database if exists client_test_db; ---enable_warnings # Embedded server doesn't support external clients --source include/not_embedded.inc @@ -14,13 +6,16 @@ drop database if exists client_test_db; # depends on the presence of the log tables (which are CSV-based). --source include/have_csv.inc +# +# Clean up after previous tests +# + --disable_warnings +DROP TABLE IF EXISTS t1; +drop view if exists v1; drop database if exists client_test_db; --enable_warnings -DROP SCHEMA test; -CREATE SCHEMA test; -use test; # # Bug #13783 mysqlcheck tries to optimize and analyze information_schema # diff --git a/mysys/my_bitmap.c b/mysys/my_bitmap.c index 753537e36ba..10eff40b9ed 100644 --- a/mysys/my_bitmap.c +++ b/mysys/my_bitmap.c @@ -171,7 +171,7 @@ void bitmap_free(MY_BITMAP *map) my_bool bitmap_fast_test_and_set(MY_BITMAP *map, uint bitmap_bit) { - uchar *value= (uchar*) (map->bitmap + (bitmap_bit / 8)); + uchar *value= ((uchar*) map->bitmap) + (bitmap_bit / 8); uchar bit= 1 << ((bitmap_bit) & 7); uchar res= (*value) & bit; *value|= bit; diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc index 54b043bd916..07575a6d33a 100644 --- a/sql/event_data_objects.cc +++ b/sql/event_data_objects.cc @@ -1560,7 +1560,6 @@ done: int Event_timed::get_create_event(THD *thd, String *buf) { - int multipl= 0; char tmp_buf[2 * STRING_BUFFER_USUAL_SIZE]; String expr_buf(tmp_buf, sizeof(tmp_buf), system_charset_info); expr_buf.length(0); diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc index bcc7d476fff..940930ec4c6 100644 --- a/sql/event_db_repository.cc +++ b/sql/event_db_repository.cc @@ -518,7 +518,6 @@ Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data, my_bool create_if_not) { int ret= 0; - CHARSET_INFO *scs= system_charset_info; TABLE *table= NULL; char old_db_buf[NAME_LEN+1]; LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) }; diff --git a/sql/event_queue.cc b/sql/event_queue.cc index 45d354ea9b6..068abbe3408 100644 --- a/sql/event_queue.cc +++ b/sql/event_queue.cc @@ -139,8 +139,6 @@ bool Event_queue::init_queue(THD *thd, Event_db_repository *db_repo) { bool res; - struct event_queue_param *event_queue_param_value= NULL; - DBUG_ENTER("Event_queue::init_queue"); DBUG_PRINT("enter", ("this: 0x%lx", (long) this)); diff --git a/sql/field.cc b/sql/field.cc index f01f3b3731a..cc32e998f65 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -2556,7 +2556,7 @@ uint Field_new_decimal::is_equal(create_field *new_field) (uint) (flags & UNSIGNED_FLAG)) && ((new_field->flags & AUTO_INCREMENT_FLAG) == (uint) (flags & AUTO_INCREMENT_FLAG)) && - (new_field->length == max_length()) && + (new_field->length == max_display_length()) && (new_field->decimals == dec)); } @@ -6165,7 +6165,7 @@ uint Field_str::is_equal(create_field *new_field) return ((new_field->sql_type == real_type()) && new_field->charset == field_charset && - new_field->length == max_length()); + new_field->length == max_display_length()); } @@ -6999,11 +6999,11 @@ uint Field_varstring::is_equal(create_field *new_field) if (new_field->sql_type == real_type() && new_field->charset == field_charset) { - if (new_field->length == max_length()) + if (new_field->length == max_display_length()) return IS_EQUAL_YES; - if (new_field->length > max_length() && - ((new_field->length <= 255 && max_length() <= 255) || - (new_field->length > 255 && max_length() > 255))) + if (new_field->length > max_display_length() && + ((new_field->length <= 255 && max_display_length() <= 255) || + (new_field->length > 255 && max_display_length() > 255))) return IS_EQUAL_PACK_LENGTH; // VARCHAR, longer variable length } return IS_EQUAL_NO; @@ -8196,7 +8196,7 @@ uint Field_num::is_equal(create_field *new_field) UNSIGNED_FLAG)) && ((new_field->flags & AUTO_INCREMENT_FLAG) == (uint) (flags & AUTO_INCREMENT_FLAG)) && - (new_field->length <= max_length())); + (new_field->length <= max_display_length())); } diff --git a/sql/filesort.cc b/sql/filesort.cc index 448dea227ab..9f0bb9b45fb 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -104,7 +104,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, uint maxbuffer; BUFFPEK *buffpek; ha_rows records= HA_POS_ERROR; - uchar **sort_keys; + uchar **sort_keys= 0; IO_CACHE tempfile, buffpek_pointers, *selected_records_file, *outfile; SORTPARAM param; bool multi_byte_charset; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 4738fbb22f9..36fe6457167 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -150,7 +150,6 @@ static byte *ndbcluster_get_key(NDB_SHARE *share,uint *length, #ifdef HAVE_NDB_BINLOG static int rename_share(NDB_SHARE *share, const char *new_key); #endif -static void ndb_set_fragmentation(NDBTAB &tab, TABLE *table, uint pk_len); static int ndb_get_table_statistics(ha_ndbcluster*, bool, Ndb*, const NDBTAB *, struct Ndb_statistics *); @@ -451,7 +450,7 @@ ha_rows ha_ndbcluster::records() Ndb *ndb= get_ndb(); ndb->setDatabaseName(m_dbname); struct Ndb_statistics stat; - if (ndb_get_table_statistics(this, true, ndb, m_table, &stat) == 0) + if (ndb_get_table_statistics(this, TRUE, ndb, m_table, &stat) == 0) { retval= stat.row_count; } @@ -462,9 +461,9 @@ ha_rows ha_ndbcluster::records() THD *thd= current_thd; if (get_thd_ndb(thd)->error) - info->no_uncommitted_rows_count= 0; + local_info->no_uncommitted_rows_count= 0; - DBUG_RETURN(retval + info->no_uncommitted_rows_count); + DBUG_RETURN(retval + local_info->no_uncommitted_rows_count); } int ha_ndbcluster::records_update() @@ -482,7 +481,7 @@ int ha_ndbcluster::records_update() Ndb *ndb= get_ndb(); struct Ndb_statistics stat; ndb->setDatabaseName(m_dbname); - result= ndb_get_table_statistics(this, true, ndb, m_table, &stat); + result= ndb_get_table_statistics(this, TRUE, ndb, m_table, &stat); if (result == 0) { stats.mean_rec_length= stat.row_size; @@ -955,7 +954,6 @@ int ha_ndbcluster::get_ndb_partition_id(NdbOperation *ndb_op) bool ha_ndbcluster::uses_blob_value() { - uint blob_fields; MY_BITMAP *bitmap; uint *blob_index, *blob_index_end; if (table_share->blob_fields == 0) @@ -1105,7 +1103,6 @@ int ha_ndbcluster::create_indexes(Ndb *ndb, TABLE *tab) const char *index_name; KEY* key_info= tab->key_info; const char **key_name= tab->s->keynames.type_names; - NDBDICT *dict= ndb->getDictionary(); DBUG_ENTER("ha_ndbcluster::create_indexes"); for (i= 0; i < tab->s->keys; i++, key_info++, key_name++) @@ -1243,7 +1240,6 @@ int ha_ndbcluster::open_indexes(Ndb *ndb, TABLE *tab, bool ignore_error) int error= 0; THD *thd=current_thd; NDBDICT *dict= ndb->getDictionary(); - const char *index_name; KEY* key_info= tab->key_info; const char **key_name= tab->s->keynames.type_names; DBUG_ENTER("ha_ndbcluster::open_indexes"); @@ -1255,9 +1251,9 @@ int ha_ndbcluster::open_indexes(Ndb *ndb, TABLE *tab, bool ignore_error) m_index[i].index= m_index[i].unique_index= NULL; else break; - m_index[i].null_in_unique_index= false; + m_index[i].null_in_unique_index= FALSE; if (check_index_fields_not_null(key_info)) - m_index[i].null_in_unique_index= true; + m_index[i].null_in_unique_index= TRUE; } if (error && !ignore_error) @@ -1293,7 +1289,6 @@ void ha_ndbcluster::renumber_indexes(Ndb *ndb, TABLE *tab) const char *index_name; KEY* key_info= tab->key_info; const char **key_name= tab->s->keynames.type_names; - NDBDICT *dict= ndb->getDictionary(); DBUG_ENTER("ha_ndbcluster::renumber_indexes"); for (i= 0; i < tab->s->keys; i++, key_info++, key_name++) @@ -1410,10 +1405,10 @@ bool ha_ndbcluster::check_index_fields_not_null(KEY* key_info) { Field* field= key_part->field; if (field->maybe_null()) - DBUG_RETURN(true); + DBUG_RETURN(TRUE); } - DBUG_RETURN(false); + DBUG_RETURN(FALSE); } void ha_ndbcluster::release_metadata(THD *thd, Ndb *ndb) @@ -1731,7 +1726,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf, ERR_RETURN(trans->getNdbError()); } - if (execute_no_commit_ie(this,trans,false) != 0) + if (execute_no_commit_ie(this,trans,FALSE) != 0) { table->status= STATUS_NOT_FOUND; DBUG_RETURN(ndb_err(trans)); @@ -1796,7 +1791,7 @@ int ha_ndbcluster::complemented_read(const byte *old_data, byte *new_data, } } - if (execute_no_commit(this,trans,false) != 0) + if (execute_no_commit(this,trans,FALSE) != 0) { table->status= STATUS_NOT_FOUND; DBUG_RETURN(ndb_err(trans)); @@ -1842,7 +1837,7 @@ bool ha_ndbcluster::check_all_operations_for_error(NdbTransaction *trans, if (err.status != NdbError::Success) { if (ndb_to_mysql_error(&err) != (int) errcode) - DBUG_RETURN(false); + DBUG_RETURN(FALSE); if (op == last) break; op= trans->getNextCompletedOperation(op); } @@ -1873,10 +1868,10 @@ bool ha_ndbcluster::check_all_operations_for_error(NdbTransaction *trans, if (errcode == HA_ERR_KEY_NOT_FOUND) m_dupkey= table->s->primary_key; } - DBUG_RETURN(false); + DBUG_RETURN(FALSE); } } - DBUG_RETURN(true); + DBUG_RETURN(TRUE); } @@ -1954,7 +1949,7 @@ int ha_ndbcluster::peek_indexed_rows(const byte *record, } last= trans->getLastDefinedOperation(); if (first) - res= execute_no_commit_ie(this,trans,false); + res= execute_no_commit_ie(this,trans,FALSE); else { // Table has no keys @@ -2003,7 +1998,7 @@ int ha_ndbcluster::unique_index_read(const byte *key, if ((res= define_read_attrs(buf, op))) DBUG_RETURN(res); - if (execute_no_commit_ie(this,trans,false) != 0) + if (execute_no_commit_ie(this,trans,FALSE) != 0) { table->status= STATUS_NOT_FOUND; DBUG_RETURN(ndb_err(trans)); @@ -2036,13 +2031,13 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor) if (!(op= m_active_cursor->lockCurrentTuple())) { /* purecov: begin inspected */ - m_lock_tuple= false; + m_lock_tuple= FALSE; ERR_RETURN(con_trans->getNdbError()); /* purecov: end */ } m_ops_pending++; } - m_lock_tuple= false; + m_lock_tuple= FALSE; bool contact_ndb= m_lock.type < TL_WRITE_ALLOW_WRITE && m_lock.type != TL_READ_WITH_SHARED_LOCKS;; @@ -2053,7 +2048,7 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor) */ if (m_ops_pending && m_blobs_pending) { - if (execute_no_commit(this,trans,false) != 0) + if (execute_no_commit(this,trans,FALSE) != 0) DBUG_RETURN(ndb_err(trans)); m_ops_pending= 0; m_blobs_pending= FALSE; @@ -2085,7 +2080,7 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor) { if (m_transaction_on) { - if (execute_no_commit(this,trans,false) != 0) + if (execute_no_commit(this,trans,FALSE) != 0) DBUG_RETURN(-1); } else @@ -2366,7 +2361,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, bool need_pk = (lm == NdbOperation::LM_Read); if (!(op= trans->getNdbIndexScanOperation(m_index[active_index].index, m_table)) || - op->readTuples(lm, 0, parallelism, sorted, descending, false, need_pk)) + op->readTuples(lm, 0, parallelism, sorted, descending, FALSE, need_pk)) ERR_RETURN(trans->getNdbError()); if (m_use_partition_function && part_spec != NULL && part_spec->start_part == part_spec->end_part) @@ -2388,7 +2383,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, { const key_range *keys[2]= { start_key, end_key }; - res= set_bounds(op, active_index, false, keys); + res= set_bounds(op, active_index, FALSE, keys); if (res) DBUG_RETURN(res); } @@ -2412,7 +2407,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, ERR_RETURN(trans->getNdbError()); } - if (execute_no_commit(this,trans,false) != 0) + if (execute_no_commit(this,trans,FALSE) != 0) DBUG_RETURN(ndb_err(trans)); DBUG_RETURN(next_result(buf)); @@ -2507,7 +2502,7 @@ int ha_ndbcluster::unique_index_scan(const KEY* key_info, if ((res= define_read_attrs(buf, op))) DBUG_RETURN(res); - if (execute_no_commit(this,trans,false) != 0) + if (execute_no_commit(this,trans,FALSE) != 0) DBUG_RETURN(ndb_err(trans)); DBUG_PRINT("exit", ("Scan started successfully")); DBUG_RETURN(next_result(buf)); @@ -2576,7 +2571,7 @@ int ha_ndbcluster::full_table_scan(byte *buf) if ((res= define_read_attrs(buf, op))) DBUG_RETURN(res); - if (execute_no_commit(this,trans,false) != 0) + if (execute_no_commit(this,trans,FALSE) != 0) DBUG_RETURN(ndb_err(trans)); DBUG_PRINT("exit", ("Scan started successfully")); DBUG_RETURN(next_result(buf)); @@ -2624,7 +2619,7 @@ int ha_ndbcluster::write_row(byte *record) start_bulk_insert will set parameters to ensure that each write_row is committed individually */ - int peek_res= peek_indexed_rows(record, true); + int peek_res= peek_indexed_rows(record, TRUE); if (!peek_res) { @@ -2743,7 +2738,7 @@ int ha_ndbcluster::write_row(byte *record) m_bulk_insert_not_flushed= FALSE; if (m_transaction_on) { - if (execute_no_commit(this,trans,false) != 0) + if (execute_no_commit(this,trans,FALSE) != 0) { m_skip_auto_increment= TRUE; no_uncommitted_rows_execute_failure(); @@ -2934,7 +2929,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) DBUG_PRINT("info", ("Calling updateTuple on cursor")); if (!(op= cursor->updateCurrentTuple())) ERR_RETURN(trans->getNdbError()); - m_lock_tuple= false; + m_lock_tuple= FALSE; m_ops_pending++; if (uses_blob_value()) m_blobs_pending= TRUE; @@ -2997,7 +2992,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) op->setValue(no_fields, part_func_value); } // Execute update operation - if (!cursor && execute_no_commit(this,trans,false) != 0) { + if (!cursor && execute_no_commit(this,trans,FALSE) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -3043,7 +3038,7 @@ int ha_ndbcluster::delete_row(const byte *record) DBUG_PRINT("info", ("Calling deleteTuple on cursor")); if (cursor->deleteCurrentTuple() != 0) ERR_RETURN(trans->getNdbError()); - m_lock_tuple= false; + m_lock_tuple= FALSE; m_ops_pending++; if (m_use_partition_function) @@ -3083,7 +3078,7 @@ int ha_ndbcluster::delete_row(const byte *record) } // Execute delete operation - if (execute_no_commit(this,trans,false) != 0) { + if (execute_no_commit(this,trans,FALSE) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -3311,8 +3306,7 @@ int ha_ndbcluster::index_init(uint index, bool sorted) unless m_lock.type == TL_READ_HIGH_PRIORITY and no sub-sequent call to unlock_row() */ - m_lock_tuple= false; - m_lock_tuple= false; + m_lock_tuple= FALSE; DBUG_RETURN(0); } @@ -3571,12 +3565,12 @@ int ha_ndbcluster::close_scan() if (!(op= cursor->lockCurrentTuple())) { - m_lock_tuple= false; + m_lock_tuple= FALSE; ERR_RETURN(trans->getNdbError()); } m_ops_pending++; } - m_lock_tuple= false; + m_lock_tuple= FALSE; if (m_ops_pending) { /* @@ -3584,7 +3578,7 @@ int ha_ndbcluster::close_scan() deleteing/updating transaction before closing the scan */ DBUG_PRINT("info", ("ops_pending: %ld", (long) m_ops_pending)); - if (execute_no_commit(this,trans,false) != 0) { + if (execute_no_commit(this,trans,FALSE) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -3789,7 +3783,7 @@ int ha_ndbcluster::info(uint flag) struct Ndb_statistics stat; ndb->setDatabaseName(m_dbname); if (current_thd->variables.ndb_use_exact_count && - (result= ndb_get_table_statistics(this, true, ndb, m_table, &stat)) + (result= ndb_get_table_statistics(this, TRUE, ndb, m_table, &stat)) == 0) { stats.mean_rec_length= stat.row_size; @@ -3990,7 +3984,7 @@ int ha_ndbcluster::end_bulk_insert() m_bulk_insert_not_flushed= FALSE; if (m_transaction_on) { - if (execute_no_commit(this, trans,false) != 0) + if (execute_no_commit(this, trans,FALSE) != 0) { no_uncommitted_rows_execute_failure(); my_errno= error= ndb_err(trans); @@ -4315,7 +4309,7 @@ void ha_ndbcluster::unlock_row() DBUG_ENTER("unlock_row"); DBUG_PRINT("info", ("Unlocking row")); - m_lock_tuple= false; + m_lock_tuple= FALSE; DBUG_VOID_RETURN; } @@ -5008,7 +5002,7 @@ int ha_ndbcluster::create(const char *name, get a new share */ - if (!(share= get_share(name, form, true, true))) + if (!(share= get_share(name, form, TRUE, TRUE))) { sql_print_error("NDB: allocating table share for %s failed", name); /* my_errno is set */ @@ -5072,8 +5066,6 @@ int ha_ndbcluster::create_handler_files(const char *file, int action_flag, HA_CREATE_INFO *create_info) { - char path[FN_REFLEN]; - const char *name; Ndb* ndb; const NDBTAB *tab; const void *data, *pack_data; @@ -5373,7 +5365,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to) int ndb_table_id= orig_tab->getObjectId(); int ndb_table_version= orig_tab->getObjectVersion(); - NDB_SHARE *share= get_share(from, 0, false); + NDB_SHARE *share= get_share(from, 0, FALSE); if (share) { int r= rename_share(share, to); @@ -5527,7 +5519,7 @@ ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb, DBUG_PRINT("info", ("Schema distribution table not setup")); DBUG_RETURN(HA_ERR_NO_CONNECTION); } - NDB_SHARE *share= get_share(path, 0, false); + NDB_SHARE *share= get_share(path, 0, FALSE); #endif /* Drop the table from NDB */ @@ -5918,7 +5910,7 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked) Ndb *ndb= get_ndb(); ndb->setDatabaseName(m_dbname); struct Ndb_statistics stat; - res= ndb_get_table_statistics(NULL, false, ndb, m_table, &stat); + res= ndb_get_table_statistics(NULL, FALSE, ndb, m_table, &stat); stats.mean_rec_length= stat.row_size; stats.data_file_length= stat.fragment_memory; stats.records= stat.row_count; @@ -6073,7 +6065,7 @@ int ndbcluster_discover(handlerton *hton, THD* thd, const char *db, ndb->setDatabaseName(db); NDBDICT* dict= ndb->getDictionary(); build_table_filename(key, sizeof(key), db, name, "", 0); - NDB_SHARE *share= get_share(key, 0, false); + NDB_SHARE *share= get_share(key, 0, FALSE); if (share && get_ndb_share_state(share) == NSS_ALTERED) { // Frm has been altered on disk, but not yet written to ndb @@ -6240,7 +6232,6 @@ int ndbcluster_drop_database_impl(const char *path) static void ndbcluster_drop_database(handlerton *hton, char *path) { - THD *thd= current_thd; DBUG_ENTER("ndbcluster_drop_database"); #ifdef HAVE_NDB_BINLOG /* @@ -6257,6 +6248,7 @@ static void ndbcluster_drop_database(handlerton *hton, char *path) ndbcluster_drop_database_impl(path); #ifdef HAVE_NDB_BINLOG char db[FN_REFLEN]; + THD *thd= current_thd; ha_ndbcluster::set_dbname(path, db); ndbcluster_log_schema_op(thd, 0, thd->query, thd->query_length, @@ -6282,16 +6274,17 @@ int ndb_create_table_from_engine(THD *thd, const char *db, */ int ndbcluster_find_all_files(THD *thd) { - DBUG_ENTER("ndbcluster_find_all_files"); Ndb* ndb; char key[FN_REFLEN]; + NDBDICT *dict; + int unhandled, retries= 5, skipped; + DBUG_ENTER("ndbcluster_find_all_files"); if (!(ndb= check_ndb_in_thd(thd))) DBUG_RETURN(HA_ERR_NO_CONNECTION); - NDBDICT *dict= ndb->getDictionary(); + dict= ndb->getDictionary(); - int unhandled, retries= 5, skipped; LINT_INIT(unhandled); LINT_INIT(skipped); do @@ -6361,7 +6354,7 @@ int ndbcluster_find_all_files(THD *thd) } else if (cmp_frm(ndbtab, pack_data, pack_length)) { - NDB_SHARE *share= get_share(key, 0, false); + NDB_SHARE *share= get_share(key, 0, FALSE); if (!share || get_ndb_share_state(share) != NSS_ALTERED) { discover= 1; @@ -6475,12 +6468,12 @@ int ndbcluster_find_files(handlerton *hton, THD *thd, List<char> delete_list; while ((file_name=it++)) { - bool file_on_disk= false; + bool file_on_disk= FALSE; DBUG_PRINT("info", ("%s", file_name)); if (hash_search(&ndb_tables, file_name, strlen(file_name))) { DBUG_PRINT("info", ("%s existed in NDB _and_ on disk ", file_name)); - file_on_disk= true; + file_on_disk= TRUE; } // Check for .ndb file with this name @@ -7033,19 +7026,19 @@ ha_ndbcluster::records_in_range(uint inx, key_range *min_key, { // We must provide approx table rows Uint64 table_rows=0; - Ndb_local_table_statistics *info= m_table_info; - if (create_info->records != ~(ha_rows)0 && info->records != 0) + Ndb_local_table_statistics *ndb_info= m_table_info; + if (ndb_info->records != ~(ha_rows)0 && ndb_info->records != 0) { - table_rows = info->records; - DBUG_PRINT("info", ("use info->records: %llu", table_rows)); + table_rows = ndb_info->records; + DBUG_PRINT("info", ("use info->records: %lu", (ulong) table_rows)); } else { Ndb_statistics stat; - if ((res=ndb_get_table_statistics(this, true, ndb, m_table, &stat)) != 0) + if ((res=ndb_get_table_statistics(this, TRUE, ndb, m_table, &stat))) break; table_rows=stat.row_count; - DBUG_PRINT("info", ("use db row_count: %llu", table_rows)); + DBUG_PRINT("info", ("use db row_count: %lu", (ulong) table_rows)); if (table_rows == 0) { // Problem if autocommit=0 #ifdef ndb_get_table_statistics_uses_active_trans @@ -7068,7 +7061,7 @@ ha_ndbcluster::records_in_range(uint inx, key_range *min_key, if ((op->readTuples(NdbOperation::LM_CommittedRead)) == -1) ERR_BREAK(op->getNdbError(), res); const key_range *keys[2]={ min_key, max_key }; - if ((res=set_bounds(op, inx, true, keys)) != 0) + if ((res=set_bounds(op, inx, TRUE, keys)) != 0) break; // Decide if db should be contacted @@ -7203,7 +7196,7 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname, { Ndb_table_guard ndbtab_g(ndb->getDictionary(), tabname); if (ndbtab_g.get_table() == 0 - || ndb_get_table_statistics(NULL, false, ndb, ndbtab_g.get_table(), &stat)) + || ndb_get_table_statistics(NULL, FALSE, ndb, ndbtab_g.get_table(), &stat)) { free_share(&share); DBUG_RETURN(1); @@ -7382,9 +7375,9 @@ static byte *ndbcluster_get_key(NDB_SHARE *share,uint *length, static void print_share(const char* where, NDB_SHARE* share) { fprintf(DBUG_FILE, - "%s %s.%s: use_count: %u, commit_count: %llu\n", + "%s %s.%s: use_count: %u, commit_count: %lu\n", where, share->db, share->table_name, share->use_count, - (long long unsigned int) share->commit_count); + (ulong) share->commit_count); fprintf(DBUG_FILE, " - key: %s, key_length: %d\n", share->key, share->key_length); @@ -7621,7 +7614,6 @@ NDB_SHARE *ndbcluster_get_share(const char *key, TABLE *table, bool create_if_not_exists, bool have_lock) { - THD *thd= current_thd; NDB_SHARE *share; uint length= (uint) strlen(key); DBUG_ENTER("ndbcluster_get_share"); @@ -7951,10 +7943,10 @@ ha_ndbcluster::null_value_index_search(KEY_MULTI_RANGE *ranges, const byte *key= range->start_key.key; uint key_len= range->start_key.length; if (check_null_in_key(key_info, key, key_len)) - DBUG_RETURN(true); + DBUG_RETURN(TRUE); curr += reclength; } - DBUG_RETURN(false); + DBUG_RETURN(FALSE); } int @@ -8067,7 +8059,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, !define_read_attrs(curr, op) && (op->setAbortOption(AO_IgnoreError), TRUE) && (!m_use_partition_function || - (op->setPartitionId(part_spec.start_part), true))) + (op->setPartitionId(part_spec.start_part), TRUE))) curr += reclength; else ERR_RETURN(op ? op->getNdbError() : m_active_trans->getNdbError()); @@ -8128,7 +8120,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, const key_range *keys[2]= { &multi_range_curr->start_key, &multi_range_curr->end_key }; - if ((res= set_bounds(scanOp, active_index, false, keys, + if ((res= set_bounds(scanOp, active_index, FALSE, keys, multi_range_curr-ranges))) DBUG_RETURN(res); break; @@ -8250,7 +8242,7 @@ ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p) DBUG_MULTI_RANGE(6); // First fetch from cursor DBUG_ASSERT(range_no == -1); - if ((res= m_multi_cursor->nextResult(true))) + if ((res= m_multi_cursor->nextResult(TRUE))) { DBUG_MULTI_RANGE(15); goto close_scan; @@ -8372,7 +8364,6 @@ ha_ndbcluster::update_table_comment( } ndb->setDatabaseName(m_dbname); - NDBDICT* dict= ndb->getDictionary(); const NDBTAB* tab= m_table; DBUG_ASSERT(tab != NULL); @@ -8567,7 +8558,7 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) ndb->setDatabaseName(share->db); Ndb_table_guard ndbtab_g(ndb->getDictionary(), share->table_name); if (ndbtab_g.get_table() && - ndb_get_table_statistics(NULL, false, ndb, + ndb_get_table_statistics(NULL, FALSE, ndb, ndbtab_g.get_table(), &stat) == 0) { char buff[22], buff2[22]; @@ -8947,7 +8938,7 @@ void ndb_serialize_cond(const Item *item, void *arg) type == MYSQL_TYPE_DATETIME) ? (context->expecting_field_result(STRING_RESULT) || context->expecting_field_result(INT_RESULT)) - : true)) && + : TRUE)) && // Bit fields no yet supported in scan filter type != MYSQL_TYPE_BIT && // No BLOB support in scan filter @@ -9607,25 +9598,24 @@ ha_ndbcluster::build_scan_filter_predicate(Ndb_cond * &cond, break; Ndb_item *a= cond->next->ndb_item; Ndb_item *b, *field, *value= NULL; - LINT_INIT(field); switch (cond->ndb_item->argument_count()) { case 1: - field= - (a->type == NDB_FIELD)? a : NULL; + field= (a->type == NDB_FIELD)? a : NULL; break; case 2: if (!cond->next->next) + { + field= NULL; break; + } b= cond->next->next->ndb_item; - value= - (a->type == NDB_VALUE)? a - : (b->type == NDB_VALUE)? b - : NULL; - field= - (a->type == NDB_FIELD)? a - : (b->type == NDB_FIELD)? b - : NULL; + value= ((a->type == NDB_VALUE) ? a : + (b->type == NDB_VALUE) ? b : + NULL); + field= ((a->type == NDB_FIELD) ? a : + (b->type == NDB_FIELD) ? b : + NULL); break; default: field= NULL; //Keep compiler happy @@ -10194,8 +10184,8 @@ int ha_ndbcluster::get_default_no_partitions(HA_CREATE_INFO *create_info) ha_rows max_rows, min_rows; if (create_info) { - max_rows= info->max_rows; - min_rows= info->min_rows; + max_rows= create_info->max_rows; + min_rows= create_info->min_rows; } else { @@ -10346,15 +10336,14 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info, { uint16 frag_data[MAX_PARTITIONS]; char *ts_names[MAX_PARTITIONS]; - ulong ts_index= 0, fd_index= 0, i, j; + ulong fd_index= 0, i, j; NDBTAB *tab= (NDBTAB*)tab_par; NDBTAB::FragmentType ftype= NDBTAB::UserDefined; partition_element *part_elem; bool first= TRUE; - uint ts_id, ts_version, part_count= 0, tot_ts_name_len; + uint tot_ts_name_len; List_iterator<partition_element> part_it(part_info->partitions); int error; - char *name_ptr; DBUG_ENTER("ha_ndbcluster::set_up_partition_info"); if (part_info->part_type == HASH_PARTITION && @@ -10468,7 +10457,7 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info, } -bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *info, +bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *create_info, uint table_changes) { DBUG_ENTER("ha_ndbcluster::check_if_incompatible_data"); @@ -10532,70 +10521,72 @@ bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *info, /* Check that row format didn't change */ if ((create_info->used_fields & HA_CREATE_USED_AUTO) && - get_row_type() != info->row_type) + get_row_type() != create_info->row_type) DBUG_RETURN(COMPATIBLE_DATA_NO); DBUG_RETURN(COMPATIBLE_DATA_YES); } -bool set_up_tablespace(st_alter_tablespace *info, +bool set_up_tablespace(st_alter_tablespace *alter_info, NdbDictionary::Tablespace *ndb_ts) { - ndb_ts->setName(info->tablespace_name); - ndb_ts->setExtentSize(info->extent_size); - ndb_ts->setDefaultLogfileGroup(info->logfile_group_name); - return false; + ndb_ts->setName(alter_info->tablespace_name); + ndb_ts->setExtentSize(alter_info->extent_size); + ndb_ts->setDefaultLogfileGroup(alter_info->logfile_group_name); + return FALSE; } -bool set_up_datafile(st_alter_tablespace *info, +bool set_up_datafile(st_alter_tablespace *alter_info, NdbDictionary::Datafile *ndb_df) { - if (info->max_size > 0) + if (alter_info->max_size > 0) { my_error(ER_TABLESPACE_AUTO_EXTEND_ERROR, MYF(0)); - return true; + return TRUE; } - ndb_df->setPath(info->data_file_name); - ndb_df->setSize(info->initial_size); - ndb_df->setTablespace(info->tablespace_name); - return false; + ndb_df->setPath(alter_info->data_file_name); + ndb_df->setSize(alter_info->initial_size); + ndb_df->setTablespace(alter_info->tablespace_name); + return FALSE; } -bool set_up_logfile_group(st_alter_tablespace *info, +bool set_up_logfile_group(st_alter_tablespace *alter_info, NdbDictionary::LogfileGroup *ndb_lg) { - ndb_lg->setName(info->logfile_group_name); - ndb_lg->setUndoBufferSize(info->undo_buffer_size); - return false; + ndb_lg->setName(alter_info->logfile_group_name); + ndb_lg->setUndoBufferSize(alter_info->undo_buffer_size); + return FALSE; } -bool set_up_undofile(st_alter_tablespace *info, +bool set_up_undofile(st_alter_tablespace *alter_info, NdbDictionary::Undofile *ndb_uf) { - ndb_uf->setPath(info->undo_file_name); - ndb_uf->setSize(info->initial_size); - ndb_uf->setLogfileGroup(info->logfile_group_name); - return false; + ndb_uf->setPath(alter_info->undo_file_name); + ndb_uf->setSize(alter_info->initial_size); + ndb_uf->setLogfileGroup(alter_info->logfile_group_name); + return FALSE; } -int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace *info) +int ndbcluster_alter_tablespace(handlerton *hton, + THD* thd, st_alter_tablespace *alter_info) { + int is_tablespace= 0; + NdbError err; + NDBDICT *dict; + int error; + const char *errmsg; + Ndb *ndb; DBUG_ENTER("ha_ndbcluster::alter_tablespace"); + LINT_INIT(errmsg); - int is_tablespace= 0; - Ndb *ndb= check_ndb_in_thd(thd); + ndb= check_ndb_in_thd(thd); if (ndb == NULL) { DBUG_RETURN(HA_ERR_NO_CONNECTION); } + dict= ndb->getDictionary(); - NdbError err; - NDBDICT *dict= ndb->getDictionary(); - int error; - const char * errmsg; - LINT_INIT(errmsg); - - switch (info->ts_cmd_type){ + switch (alter_info->ts_cmd_type){ case (CREATE_TABLESPACE): { error= ER_CREATE_FILEGROUP_FAILED; @@ -10603,11 +10594,11 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace NdbDictionary::Tablespace ndb_ts; NdbDictionary::Datafile ndb_df; NdbDictionary::ObjectId objid; - if (set_up_tablespace(info, &ndb_ts)) + if (set_up_tablespace(alter_info, &ndb_ts)) { DBUG_RETURN(1); } - if (set_up_datafile(info, &ndb_df)) + if (set_up_datafile(alter_info, &ndb_df)) { DBUG_RETURN(1); } @@ -10617,7 +10608,7 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace DBUG_PRINT("error", ("createTablespace returned %d", error)); goto ndberror; } - DBUG_PRINT("info", ("Successfully created Tablespace")); + DBUG_PRINT("alter_info", ("Successfully created Tablespace")); errmsg= "DATAFILE"; if (dict->createDatafile(ndb_df)) { @@ -10639,10 +10630,10 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace case (ALTER_TABLESPACE): { error= ER_ALTER_FILEGROUP_FAILED; - if (info->ts_alter_tablespace_type == ALTER_TABLESPACE_ADD_FILE) + if (alter_info->ts_alter_tablespace_type == ALTER_TABLESPACE_ADD_FILE) { NdbDictionary::Datafile ndb_df; - if (set_up_datafile(info, &ndb_df)) + if (set_up_datafile(alter_info, &ndb_df)) { DBUG_RETURN(1); } @@ -10652,14 +10643,14 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace goto ndberror; } } - else if(info->ts_alter_tablespace_type == ALTER_TABLESPACE_DROP_FILE) + else if(alter_info->ts_alter_tablespace_type == ALTER_TABLESPACE_DROP_FILE) { - NdbDictionary::Tablespace ts= dict->getTablespace(info->tablespace_name); - NdbDictionary::Datafile df= dict->getDatafile(0, info->data_file_name); + NdbDictionary::Tablespace ts= dict->getTablespace(alter_info->tablespace_name); + NdbDictionary::Datafile df= dict->getDatafile(0, alter_info->data_file_name); NdbDictionary::ObjectId objid; df.getTablespaceId(&objid); if (ts.getObjectId() == objid.getObjectId() && - strcmp(df.getPath(), info->data_file_name) == 0) + strcmp(df.getPath(), alter_info->data_file_name) == 0) { errmsg= " DROP DATAFILE"; if (dict->dropDatafile(df)) @@ -10677,7 +10668,7 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace else { DBUG_PRINT("error", ("Unsupported alter tablespace: %d", - info->ts_alter_tablespace_type)); + alter_info->ts_alter_tablespace_type)); DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); } is_tablespace= 1; @@ -10689,14 +10680,14 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace NdbDictionary::LogfileGroup ndb_lg; NdbDictionary::Undofile ndb_uf; NdbDictionary::ObjectId objid; - if (info->undo_file_name == NULL) + if (alter_info->undo_file_name == NULL) { /* REDO files in LOGFILE GROUP not supported yet */ DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); } - if (set_up_logfile_group(info, &ndb_lg)) + if (set_up_logfile_group(alter_info, &ndb_lg)) { DBUG_RETURN(1); } @@ -10705,8 +10696,8 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace { goto ndberror; } - DBUG_PRINT("info", ("Successfully created Logfile Group")); - if (set_up_undofile(info, &ndb_uf)) + DBUG_PRINT("alter_info", ("Successfully created Logfile Group")); + if (set_up_undofile(alter_info, &ndb_uf)) { DBUG_RETURN(1); } @@ -10728,7 +10719,7 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace case (ALTER_LOGFILE_GROUP): { error= ER_ALTER_FILEGROUP_FAILED; - if (info->undo_file_name == NULL) + if (alter_info->undo_file_name == NULL) { /* REDO files in LOGFILE GROUP not supported yet @@ -10736,7 +10727,7 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); } NdbDictionary::Undofile ndb_uf; - if (set_up_undofile(info, &ndb_uf)) + if (set_up_undofile(alter_info, &ndb_uf)) { DBUG_RETURN(1); } @@ -10751,7 +10742,7 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace { error= ER_DROP_FILEGROUP_FAILED; errmsg= "TABLESPACE"; - if (dict->dropTablespace(dict->getTablespace(info->tablespace_name))) + if (dict->dropTablespace(dict->getTablespace(alter_info->tablespace_name))) { goto ndberror; } @@ -10762,7 +10753,7 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace { error= ER_DROP_FILEGROUP_FAILED; errmsg= "LOGFILE GROUP"; - if (dict->dropLogfileGroup(dict->getLogfileGroup(info->logfile_group_name))) + if (dict->dropLogfileGroup(dict->getLogfileGroup(alter_info->logfile_group_name))) { goto ndberror; } @@ -10785,13 +10776,13 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace if (is_tablespace) ndbcluster_log_schema_op(thd, 0, thd->query, thd->query_length, - "", info->tablespace_name, + "", alter_info->tablespace_name, 0, 0, SOT_TABLESPACE, 0, 0, 0); else ndbcluster_log_schema_op(thd, 0, thd->query, thd->query_length, - "", info->logfile_group_name, + "", alter_info->logfile_group_name, 0, 0, SOT_LOGFILE_GROUP, 0, 0, 0); #endif @@ -10812,7 +10803,6 @@ bool ha_ndbcluster::get_no_parts(const char *name, uint *no_parts) { Ndb *ndb; NDBDICT *dict; - const NDBTAB *tab; int err; DBUG_ENTER("ha_ndbcluster::get_no_parts"); LINT_INIT(err); diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 38b640d5f55..fc7d933be7d 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -1131,7 +1131,7 @@ ndbcluster_update_slock(THD *thd, ndb_error= this_error; break; } -end: + if (ndb_error) push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, ER_GET_ERRMSG, ER(ER_GET_ERRMSG), diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index feb08f474b7..0e9da3eb22f 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -584,7 +584,6 @@ int ha_partition::drop_partitions(const char *path) List_iterator<partition_element> part_it(m_part_info->partitions); char part_name_buff[FN_REFLEN]; uint no_parts= m_part_info->partitions.elements; - uint part_count= 0; uint no_subparts= m_part_info->no_subparts; uint i= 0; uint name_variant; @@ -1075,7 +1074,6 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt, uint no_parts= m_part_info->no_parts; uint no_subparts= m_part_info->no_subparts; uint i= 0; - LEX *lex= thd->lex; int error; DBUG_ENTER("ha_partition::handle_opt_partitions"); DBUG_PRINT("enter", ("all_parts %u, flag= %u", all_parts, flag)); @@ -1087,11 +1085,9 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt, { if (m_is_sub_partitioned) { - List_iterator<partition_element> sub_it(part_elem->subpartitions); uint j= 0, part; do { - partition_element *sub_elem= sub_it++; part= i * no_subparts + j; DBUG_PRINT("info", ("Optimize subpartition %u", part)); @@ -1136,7 +1132,6 @@ int ha_partition::prepare_new_partition(TABLE *table, { int error; bool create_flag= FALSE; - bool open_flag= FALSE; DBUG_ENTER("prepare_new_partition"); if ((error= set_up_table_before_create(table, part_name, create_info, @@ -1245,7 +1240,6 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, handler **new_file_array; int error= 1; bool first; - bool copy_parts= FALSE; uint temp_partitions= m_part_info->temp_partitions.elements; THD *thd= current_thd; DBUG_ENTER("ha_partition::change_partitions"); @@ -2061,7 +2055,6 @@ bool ha_partition::new_handlers_from_part_info(MEM_ROOT *mem_root) partition_element *part_elem; uint alloc_len= (m_tot_parts + 1) * sizeof(handler*); List_iterator_fast <partition_element> part_it(m_part_info->partitions); - THD *thd= current_thd; DBUG_ENTER("ha_partition::new_handlers_from_part_info"); if (!(m_file= (handler **) alloc_root(mem_root, alloc_len))) diff --git a/sql/handler.cc b/sql/handler.cc index 3c15313d59b..c9a58238877 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -44,10 +44,6 @@ static handlerton *installed_htons[128]; KEY_CREATE_INFO default_key_create_info= { HA_KEY_ALG_UNDEF, 0, {NullS,0} }; -/* static functions defined in this file */ - -static handler *create_default(TABLE_SHARE *table, MEM_ROOT *mem_root); - /* number of entries in handlertons[] */ ulong total_ha= 0; /* number of storage engines (from handlertons[]) that support 2pc */ @@ -164,11 +160,13 @@ const char *ha_get_storage_engine(enum legacy_db_type db_type) } +#ifdef NOT_USED static handler *create_default(TABLE_SHARE *table, MEM_ROOT *mem_root) { handlerton *hton= ha_default_handlerton(current_thd); return (hton && hton->create) ? hton->create(hton, table, mem_root) : NULL; } +#endif handlerton *ha_resolve_by_legacy_type(THD *thd, enum legacy_db_type db_type) @@ -3363,7 +3361,6 @@ static my_bool exts_handlerton(THD *unused, st_plugin_int *plugin, TYPELIB *ha_known_exts(void) { - MEM_ROOT *mem_root= current_thd->mem_root; if (!known_extensions.type_names || mysys_usage_id != known_extensions_id) { List<char> found_exts; diff --git a/sql/item.cc b/sql/item.cc index 433fcd1d078..7b81a4499e7 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -4096,7 +4096,7 @@ enum_field_types Item::string_field_type() const f_type= MYSQL_TYPE_LONG_BLOB; else if (max_length >= 65536) f_type= MYSQL_TYPE_MEDIUM_BLOB; - return type; + return f_type; } diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index cd6fd333d61..792c828ff29 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -1830,7 +1830,6 @@ void Item_func_case::fix_length_and_dec() { Item **agg; uint nagg; - THD *thd= current_thd; uint found_types= 0; if (!(agg= (Item**) sql_alloc(sizeof(Item*)*(ncases+1)))) return; diff --git a/sql/item_func.cc b/sql/item_func.cc index 1139db6ad72..4693191470c 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -997,7 +997,7 @@ String *Item_decimal_typecast::val_str(String *str) my_decimal tmp_buf, *tmp= val_decimal(&tmp_buf); if (null_value) return NULL; - my_decimal2string(E_DEC_FATAL_ERROR, &tmp_buf, 0, 0, 0, str); + my_decimal2string(E_DEC_FATAL_ERROR, tmp, 0, 0, 0, str); return str; } @@ -4860,7 +4860,7 @@ longlong Item_func_bit_xor::val_int() thd Thread handler var_type global / session name Name of base or system variable - component Component. + component Component NOTES If component.str = 0 then the variable name is in 'name' diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc index 3da68cf43c2..9321992e566 100644 --- a/sql/item_xmlfunc.cc +++ b/sql/item_xmlfunc.cc @@ -577,7 +577,6 @@ String * Item_nodeset_func_union::val_nodeset(String *nodeset) both_str.alloc(numnodes); char *both= (char*) both_str.ptr(); bzero((void*)both, numnodes); - uint pos= 0; MY_XPATH_FLT *flt; fltbeg= (MY_XPATH_FLT*) s0->ptr(); @@ -1484,7 +1483,6 @@ static int my_xpath_parse_AxisName(MY_XPATH *xpath) static int my_xpath_parse_LocationPath(MY_XPATH *xpath); static int my_xpath_parse_AbsoluteLocationPath(MY_XPATH *xpath); static int my_xpath_parse_RelativeLocationPath(MY_XPATH *xpath); -static int my_xpath_parse_AbbreviatedAbsoluteLocationPath(MY_XPATH *xpath); static int my_xpath_parse_AbbreviatedStep(MY_XPATH *xpath); static int my_xpath_parse_Step(MY_XPATH *xpath); static int my_xpath_parse_AxisSpecifier(MY_XPATH *xpath); @@ -1503,7 +1501,6 @@ static int my_xpath_parse_RelationalExpr(MY_XPATH *xpath); static int my_xpath_parse_AndExpr(MY_XPATH *xpath); static int my_xpath_parse_EqualityExpr(MY_XPATH *xpath); static int my_xpath_parse_VariableReference(MY_XPATH *xpath); -static int my_xpath_parse_slash_opt_slash(MY_XPATH *xpath); /* @@ -2699,7 +2696,6 @@ String *Item_func_xml_update::val_str(String *str) } MY_XML_NODE *nodebeg= (MY_XML_NODE*) pxml.ptr(); - MY_XML_NODE *nodeend= (MY_XML_NODE*) pxml.ptr() + pxml.length(); MY_XPATH_FLT *fltbeg= (MY_XPATH_FLT*) nodeset->ptr(); MY_XPATH_FLT *fltend= (MY_XPATH_FLT*) (nodeset->ptr() + nodeset->length()); diff --git a/sql/log.cc b/sql/log.cc index 5cffa2829ea..5e9ebfcb902 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -147,8 +147,7 @@ public: */ void truncate(my_off_t pos) { - DBUG_PRINT("info", ("truncating to position %lu", pos)); - DBUG_PRINT("info", ("before_stmt_pos=%lu", pos)); + DBUG_PRINT("info", ("truncating to position %lu", (ulong) pos)); delete pending(); set_pending(0); reinit_io_cache(&trans_log, WRITE_CACHE, pos, 0, 0); @@ -909,7 +908,7 @@ bool LOGGER::slow_log_print(THD *thd, const char *query, uint query_length, my_time_t current_time; Security_context *sctx= thd->security_ctx; - uint message_buff_len= 0, user_host_len= 0; + uint user_host_len= 0; longlong query_time= 0, lock_time= 0; /* @@ -1551,11 +1550,9 @@ static int binlog_prepare(handlerton *hton, THD *thd, bool all) static int binlog_commit(handlerton *hton, THD *thd, bool all) { - int error= 0; DBUG_ENTER("binlog_commit"); binlog_trx_data *const trx_data= (binlog_trx_data*) thd->ha_data[binlog_hton->slot]; - IO_CACHE *trans_log= &trx_data->trans_log; DBUG_ASSERT(mysql_bin_log.is_open()); if (all && trx_data->empty()) @@ -1584,7 +1581,6 @@ static int binlog_rollback(handlerton *hton, THD *thd, bool all) int error=0; binlog_trx_data *const trx_data= (binlog_trx_data*) thd->ha_data[binlog_hton->slot]; - IO_CACHE *trans_log= &trx_data->trans_log; DBUG_ASSERT(mysql_bin_log.is_open()); if (trx_data->empty()) { @@ -1647,9 +1643,6 @@ static int binlog_savepoint_set(handlerton *hton, THD *thd, void *sv) static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv) { DBUG_ENTER("binlog_savepoint_rollback"); - binlog_trx_data *const trx_data= - (binlog_trx_data*) thd->ha_data[binlog_hton->slot]; - IO_CACHE *trans_log= &trx_data->trans_log; DBUG_ASSERT(mysql_bin_log.is_open()); /* @@ -1660,7 +1653,7 @@ static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv) if (unlikely(thd->options & (OPTION_STATUS_NO_TRANS_UPDATE | OPTION_KEEP_LOG))) { - int const error= + int error= thd->binlog_query(THD::STMT_QUERY_TYPE, thd->query, thd->query_length, TRUE, FALSE); DBUG_RETURN(error); @@ -1669,6 +1662,7 @@ static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv) DBUG_RETURN(0); } + int check_binlog_magic(IO_CACHE* log, const char** errmsg) { char magic[4]; @@ -1689,6 +1683,7 @@ int check_binlog_magic(IO_CACHE* log, const char** errmsg) return 0; } + File open_binlog(IO_CACHE *log, const char *log_file_name, const char **errmsg) { File file; @@ -2195,7 +2190,6 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time, if (!(specialflag & SPECIAL_SHORT_LOG_FORMAT)) { - Security_context *sctx= thd->security_ctx; if (current_time != last_time) { last_time= current_time; @@ -2434,7 +2428,6 @@ bool MYSQL_BIN_LOG::open(const char *log_name, bool null_created_arg) { File file= -1; - int open_flags = O_CREAT | O_BINARY; DBUG_ENTER("MYSQL_BIN_LOG::open"); DBUG_PRINT("enter",("log_type: %d",(int) log_type_arg)); @@ -3245,7 +3238,6 @@ void MYSQL_BIN_LOG::new_file_impl(bool need_lock) We log the whole file name for log file as the user may decide to change base names at some point. */ - THD *thd = current_thd; /* may be 0 if we are reacting to SIGHUP */ Rotate_log_event r(new_name+dirname_length(new_name), 0, LOG_EVENT_OFFSET, 0); r.write(&log_file); @@ -3481,10 +3473,10 @@ int THD::binlog_flush_transaction_cache() { DBUG_ENTER("binlog_flush_transaction_cache"); binlog_trx_data *trx_data= (binlog_trx_data*) ha_data[binlog_hton->slot]; - DBUG_PRINT("enter", ("trx_data=0x%lu", trx_data)); + DBUG_PRINT("enter", ("trx_data: 0x%lx", (ulong) trx_data)); if (trx_data) - DBUG_PRINT("enter", ("trx_data->before_stmt_pos=%u", - trx_data->before_stmt_pos)); + DBUG_PRINT("enter", ("trx_data->before_stmt_pos: %lu", + (ulong) trx_data->before_stmt_pos)); /* Write the transaction cache to the binary log. We don't flush and @@ -3982,8 +3974,6 @@ bool MYSQL_BIN_LOG::write(THD *thd, IO_CACHE *cache, Log_event *commit_event) if (likely(is_open())) // Should always be true { - uint length; - /* We only bother to write to the binary log if there is anything to write. @@ -4023,9 +4013,6 @@ bool MYSQL_BIN_LOG::write(THD *thd, IO_CACHE *cache, Log_event *commit_event) if (commit_event && commit_event->write(&log_file)) goto err; -#ifndef DBUG_OFF - DBUG_skip_commit: -#endif if (flush_and_sync()) goto err; DBUG_EXECUTE_IF("half_binlogged_transaction", abort();); diff --git a/sql/log.h b/sql/log.h index 61db7052f75..80aa4b20ee6 100644 --- a/sql/log.h +++ b/sql/log.h @@ -33,7 +33,7 @@ class TC_LOG virtual int open(const char *opt_name)=0; virtual void close()=0; - virtual int log(THD *thd, my_xid xid)=0; + virtual int log_xid(THD *thd, my_xid xid)=0; virtual void unlog(ulong cookie, my_xid xid)=0; }; @@ -43,7 +43,7 @@ public: TC_LOG_DUMMY() {} int open(const char *opt_name) { return 0; } void close() { } - int log(THD *thd, my_xid xid) { return 1; } + int log_xid(THD *thd, my_xid xid) { return 1; } void unlog(ulong cookie, my_xid xid) { } }; @@ -88,7 +88,7 @@ class TC_LOG_MMAP: public TC_LOG TC_LOG_MMAP(): inited(0) {} int open(const char *opt_name); void close(); - int log(THD *thd, my_xid xid); + int log_xid(THD *thd, my_xid xid); void unlog(ulong cookie, my_xid xid); int recover(); @@ -287,7 +287,7 @@ public: int open(const char *opt_name); void close(); - int log(THD *thd, my_xid xid); + int log_xid(THD *thd, my_xid xid); void unlog(ulong cookie, my_xid xid); int recover(IO_CACHE *log, Format_description_log_event *fdle); #if !defined(MYSQL_CLIENT) diff --git a/sql/log_event.cc b/sql/log_event.cc index 520ced3671d..82cfc0cd3a2 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -139,22 +139,6 @@ static void pretty_print_str(IO_CACHE* cache, char* str, int len) } #endif /* MYSQL_CLIENT */ -#ifdef HAVE_purify -static void -valgrind_check_mem(void *ptr, size_t len) -{ - static volatile uchar dummy; - for (volatile uchar *p= (uchar*) ptr ; p != (uchar*) ptr + len ; ++p) - { - int const c = *p; - if (c < 128) - dummy= c + 1; - else - dummy = c - 1; - } -} -#endif - #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) static void clear_all_errors(THD *thd, struct st_relay_log_info *rli) @@ -5483,7 +5467,6 @@ int Rows_log_event::do_add_row_data(byte *const row_data, if (static_cast<my_size_t>(m_rows_end - m_rows_cur) < length) { my_size_t const block_size= 1024; - my_ptrdiff_t const old_alloc= m_rows_end - m_rows_buf; my_ptrdiff_t const cur_size= m_rows_cur - m_rows_buf; my_ptrdiff_t const new_alloc= block_size * ((cur_size + length) / block_size + block_size - 1); diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 86ff11d725e..4c5ccce57d6 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -722,7 +722,6 @@ pthread_handler_t handle_slave(void *arg); static ulong find_bit_type(const char *x, TYPELIB *bit_lib); static void clean_up(bool print_message); static int test_if_case_insensitive(const char *dir_name); -static void end_ssl(); #ifndef EMBEDDED_LIBRARY static void start_signal_handler(void); @@ -730,6 +729,7 @@ static void close_server_sock(); static void clean_up_mutexes(void); static void wait_for_signal_thread_to_end(void); static void create_pid_file(); +static void end_ssl(); #endif @@ -1236,7 +1236,9 @@ void clean_up(bool print_message) #endif delete binlog_filter; delete rpl_filter; +#ifndef EMBEDDED_LIBRARY end_ssl(); +#endif vio_end(); #ifdef USE_REGEX my_regex_end(); diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 1576cd7dfa2..2dedf41e2c3 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -2420,8 +2420,6 @@ static int find_used_partitions_imerge(PART_PRUNE_PARAM *ppar, static int find_used_partitions_imerge_list(PART_PRUNE_PARAM *ppar, List<SEL_IMERGE> &merges); static void mark_all_partitions_as_used(partition_info *part_info); -static uint32 part_num_to_part_id_range(PART_PRUNE_PARAM* prune_par, - uint32 num); #ifndef DBUG_OFF static void print_partitioning_index(KEY_PART *parts, KEY_PART *parts_end); @@ -4682,8 +4680,7 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree, param->table->key_info[keynr].name, found_read_time, read_time)); - if (read_time > found_read_time && found_records != HA_POS_ERROR - /*|| read_time == DBL_MAX*/ ) + if (read_time > found_read_time && found_records != HA_POS_ERROR) { read_time= found_read_time; best_records= found_records; diff --git a/sql/partition_info.cc b/sql/partition_info.cc index 76630e8530b..a7f9bd413c6 100644 --- a/sql/partition_info.cc +++ b/sql/partition_info.cc @@ -920,7 +920,6 @@ bool partition_info::set_up_charset_field_preps() if (field_is_partition_charset(field)) { char *field_buf; - CHARSET_INFO *cs= ((Field_str*)field)->charset(); size= field->pack_length(); if (!(field_buf= sql_calloc(size))) goto error; diff --git a/sql/protocol.cc b/sql/protocol.cc index b0f3d036b73..05e98c68e4e 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -937,15 +937,15 @@ bool Protocol_simple::store(Field *field) char buff[MAX_FIELD_WIDTH]; String str(buff,sizeof(buff), &my_charset_bin); CHARSET_INFO *tocs= this->thd->variables.character_set_results; +#ifndef DBUG_OFF TABLE *table= field->table; -#ifdef DBUG_OFF my_bitmap_map *old_map= 0; if (table->file) old_map= dbug_tmp_use_all_columns(table, table->read_set); #endif field->val_str(&str); -#ifdef DBUG_OFF +#ifndef DBUG_OFF if (old_map) dbug_tmp_restore_column_map(table->read_set, old_map); #endif diff --git a/sql/set_var.cc b/sql/set_var.cc index 2ef0473dc13..36161ce92e2 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -3996,8 +3996,6 @@ sys_var_event_scheduler::update(THD *thd, set_var *var) DBUG_PRINT("info", ("new_value: %d", (int) var->save_result.ulong_value)); - Item_result var_type= var->value->result_type(); - if (var->save_result.ulong_value == Events::EVENTS_ON) res= Events::get_instance()->start_execution_of_events(); else if (var->save_result.ulong_value == Events::EVENTS_OFF) diff --git a/sql/set_var.h b/sql/set_var.h index befb4a9d700..abf0ece03bf 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -502,7 +502,7 @@ public: sys_var_thd_dbug(const char *name_arg) :sys_var_thd(name_arg) {} bool check_update_type(Item_result type) { return type != STRING_RESULT; } bool check(THD *thd, set_var *var); - SHOW_TYPE type() { return SHOW_CHAR; } + SHOW_TYPE show_type() { return SHOW_CHAR; } bool update(THD *thd, set_var *var); void set_default(THD *thd, enum_var_type type) { DBUG_POP(); } byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *b); @@ -557,7 +557,7 @@ public: bool check_type(enum_var_type type) { return type != OPT_GLOBAL; } /* We can't retrieve the value of this, so we don't have to define - type() or value_ptr() + show_type() or value_ptr() */ }; @@ -803,7 +803,7 @@ public: byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); bool check_update_type(Item_result type) { return 0; } void set_default(THD *thd, enum_var_type type); - SHOW_TYPE type() { return SHOW_CHAR; } + SHOW_TYPE show_type() { return SHOW_CHAR; } }; @@ -852,7 +852,7 @@ public: bool check_default(enum_var_type type) { return 1; } bool check_type(enum_var_type type) { return type != OPT_GLOBAL; } bool check_update_type(Item_result type) { return 1; } - SHOW_TYPE type() { return SHOW_CHAR; } + SHOW_TYPE show_type() { return SHOW_CHAR; } bool is_readonly() const { return 1; } }; @@ -951,7 +951,7 @@ public: sys_var_long_ptr(name_arg, NULL, NULL) {}; bool update(THD *thd, set_var *var); byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); - SHOW_TYPE type() { return SHOW_CHAR; } + SHOW_TYPE show_type() { return SHOW_CHAR; } bool check(THD *thd, set_var *var); bool check_update_type(Item_result type) { diff --git a/sql/slave.cc b/sql/slave.cc index 414b5afaf46..fb1f71e646f 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -73,6 +73,7 @@ static int request_table_dump(MYSQL* mysql, const char* db, const char* table); static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, const char* table_name, bool overwrite); static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi); +static Log_event* next_event(RELAY_LOG_INFO* rli); /* Find out which replications threads are running diff --git a/sql/slave.h b/sql/slave.h index 43eb71be601..bc039f6eb75 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -111,8 +111,6 @@ extern ulonglong relay_log_space_limit; #define MYSQL_SLAVE_RUN_NOT_CONNECT 1 #define MYSQL_SLAVE_RUN_CONNECT 2 -static Log_event* next_event(RELAY_LOG_INFO* rli); - #define RPL_LOG_NAME (rli->group_master_log_name[0] ? rli->group_master_log_name :\ "FIRST") #define IO_RPL_LOG_NAME (mi->master_log_name[0] ? mi->master_log_name :\ diff --git a/sql/sp.cc b/sql/sp.cc index 301756dc229..3a7bea6a4b1 100644 --- a/sql/sp.cc +++ b/sql/sp.cc @@ -988,7 +988,7 @@ sp_find_routine(THD *thd, int type, sp_name *name, sp_cache **cp, DBUG_PRINT("info", ("found: 0x%lx", (ulong)sp)); if (sp->m_first_free_instance) { - DBUG_PRINT("info", ("first free: 0x%lx, level: %lu, flags %x", + DBUG_PRINT("info", ("first free: 0x%lx level: %lu flags %x", (ulong)sp->m_first_free_instance, sp->m_first_free_instance->m_recursion_level, sp->m_first_free_instance->m_flags)); diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 2ea5849603a..b77d0cc9a0c 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -1367,7 +1367,6 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount, MEM_ROOT call_mem_root; Query_arena call_arena(&call_mem_root, Query_arena::INITIALIZED_FOR_SP); Query_arena backup_arena; - DBUG_ENTER("sp_head::execute_function"); DBUG_PRINT("info", ("function %s", m_name.str)); diff --git a/sql/spatial.cc b/sql/spatial.cc index 62d0c7310e5..6cadb0f3aad 100644 --- a/sql/spatial.cc +++ b/sql/spatial.cc @@ -928,6 +928,8 @@ int Gis_polygon::centroid_xy(double *x, double *y) const n_linear_rings= uint4korr(data); data+= 4; + DBUG_ASSERT(n_linear_rings > 0); + while (n_linear_rings--) { uint32 n_points, org_n_points; diff --git a/sql/sql_class.cc b/sql/sql_class.cc index cfe96e9b5cd..d5f81168be3 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -2409,11 +2409,12 @@ THD::binlog_prepare_pending_rows_event(TABLE*, uint32, MY_BITMAP const*, my_size_t colcnt, my_size_t, bool, Update_rows_log_event *); #endif + +#ifdef NOT_USED static char const* field_type_name(enum_field_types type) { - switch (type) - { + switch (type) { case MYSQL_TYPE_DECIMAL: return "MYSQL_TYPE_DECIMAL"; case MYSQL_TYPE_TINY: @@ -2471,6 +2472,7 @@ field_type_name(enum_field_types type) } return "Unknown"; } +#endif my_size_t THD::max_row_length_blob(TABLE *table, const byte *data) const @@ -2651,8 +2653,6 @@ int THD::binlog_write_row(TABLE* table, bool is_trans, Pack records into format for transfer. We are allocating more memory than needed, but that doesn't matter. */ - int error= 0; - Row_data_memory memory(table, max_row_length(table, record)); if (!memory.has_memory()) return HA_ERR_OUT_OF_MEM; @@ -2679,7 +2679,6 @@ int THD::binlog_update_row(TABLE* table, bool is_trans, { DBUG_ASSERT(current_stmt_binlog_row_based && mysql_bin_log.is_open()); - int error= 0; my_size_t const before_maxlen = max_row_length(table, before_record); my_size_t const after_maxlen = max_row_length(table, after_record); @@ -2729,8 +2728,6 @@ int THD::binlog_delete_row(TABLE* table, bool is_trans, Pack records into format for transfer. We are allocating more memory than needed, but that doesn't matter. */ - int error= 0; - Row_data_memory memory(table, max_row_length(table, record)); if (unlikely(!memory.has_memory())) return HA_ERR_OUT_OF_MEM; diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index fe80837c909..63a8a948062 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -318,7 +318,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, runs without --log-update or --log-bin). */ bool log_on= ((thd->options & OPTION_BIN_LOG) || - (!(thd->security_ctx->master_access & SUPER_ACL)); + (!(thd->security_ctx->master_access & SUPER_ACL))); #endif thr_lock_type lock_type = table_list->lock_type; Item *unused_conds= 0; @@ -3090,8 +3090,9 @@ void select_create::send_error(uint errcode,const char *err) ("Current statement %s row-based", thd->current_stmt_binlog_row_based ? "is" : "is NOT")); DBUG_PRINT("info", - ("Current table (at 0x%lu) %s a temporary (or non-existant) table", - table, + ("Current table (at 0x%lx) %s a temporary (or non-existing) " + "table", + (ulong) table, table && !table->s->tmp_table ? "is NOT" : "is")); DBUG_PRINT("info", ("Table %s prior to executing this statement", diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 37a5d9c3743..3f734dbbdac 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1900,6 +1900,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, /* Locked closure of all tables */ TABLE_LIST table_list; LEX_STRING conv_name; + uint dummy; /* used as fields initializator */ lex_start(thd, 0, 0); @@ -4041,7 +4042,6 @@ end_with_restore_list: lex->spname->m_name); else { - uint affected= 1; if (!(res= Events::get_instance()->drop_event(thd, lex->spname->m_db, lex->spname->m_name, diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 0b6d841a337..923e851c0ff 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -2002,7 +2002,6 @@ char *generate_partition_syntax(partition_info *part_info, { uint i,j, tot_no_parts, no_subparts; partition_element *part_elem; - partition_element *save_part_elem= NULL; ulonglong buffer_length; char path[FN_REFLEN]; int err= 0; @@ -5369,7 +5368,6 @@ static bool write_log_dropped_partitions(ALTER_PARTITION_PARAM_TYPE *lpt, List_iterator<partition_element> temp_it(part_info->temp_partitions); uint no_temp_partitions= part_info->temp_partitions.elements; uint no_elements= part_info->partitions.elements; - uint i= 0; DBUG_ENTER("write_log_dropped_partitions"); ddl_log_entry.action_type= DDL_LOG_DELETE_ACTION; @@ -5742,7 +5740,6 @@ static void write_log_completed(ALTER_PARTITION_PARAM_TYPE *lpt, bool dont_crash) { partition_info *part_info= lpt->part_info; - uint count_loop= 0; DDL_LOG_MEMORY_ENTRY *log_entry= part_info->exec_log_entry; DBUG_ENTER("write_log_completed"); @@ -6016,8 +6013,6 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, uint fast_alter_partition) { /* Set-up struct used to write frm files */ - ulonglong copied= 0; - ulonglong deleted= 0; partition_info *part_info= table->part_info; ALTER_PARTITION_PARAM_TYPE lpt_obj; ALTER_PARTITION_PARAM_TYPE *lpt= &lpt_obj; diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 1d711b7835c..7c9cd483526 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -44,12 +44,15 @@ plugin_type_init plugin_type_deinitialize[MYSQL_MAX_PLUGIN_TYPE_NUM]= 0,ha_finalize_handlerton,0,0 }; +#ifdef HAVE_DLOPEN static const char *plugin_interface_version_sym= "_mysql_plugin_interface_version_"; static const char *sizeof_st_plugin_sym= "_mysql_sizeof_struct_st_plugin_"; static const char *plugin_declarations_sym= "_mysql_plugin_declarations_"; static int min_plugin_interface_version= MYSQL_PLUGIN_INTERFACE_VERSION & ~0xFF; +#endif + /* Note that 'int version' must be the first field of every plugin sub-structure (plugin->info). */ @@ -80,6 +83,8 @@ static int plugin_array_version=0; my_bool plugin_register_builtin(struct st_mysql_plugin *plugin); void plugin_load(void); +#ifdef HAVE_DLOPEN + static struct st_plugin_dl *plugin_dl_find(const LEX_STRING *dl) { uint i; @@ -117,6 +122,8 @@ static st_plugin_dl *plugin_dl_insert_or_reuse(struct st_plugin_dl *plugin_dl) DBUG_RETURN(dynamic_element(&plugin_dl_array, plugin_dl_array.elements - 1, struct st_plugin_dl *)); } +#endif /* HAVE_DLOPEN */ + static inline void free_plugin_mem(struct st_plugin_dl *p) { @@ -534,6 +541,8 @@ static void plugin_del(struct st_plugin_int *plugin) DBUG_VOID_RETURN; } +#ifdef NOT_USED + static void plugin_del(const LEX_STRING *name) { struct st_plugin_int *plugin; @@ -543,6 +552,8 @@ static void plugin_del(const LEX_STRING *name) DBUG_VOID_RETURN; } +#endif + void plugin_unlock(struct st_plugin_int *plugin) { DBUG_ENTER("plugin_unlock"); diff --git a/sql/sql_servers.cc b/sql/sql_servers.cc index 0ec7c54487a..5fa97dc5c2b 100644 --- a/sql/sql_servers.cc +++ b/sql/sql_servers.cc @@ -32,7 +32,6 @@ uint servers_cache_initialised=FALSE; static uint servers_version=0; static MEM_ROOT mem; static rw_lock_t THR_LOCK_servers; -static bool initialized=0; static byte *servers_cache_get_key(FOREIGN_SERVER *server, uint *length, my_bool not_used __attribute__((unused))) @@ -329,24 +328,22 @@ my_bool get_server_from_table_to_cache(TABLE *table) my_bool server_exists_in_table(THD *thd, LEX_SERVER_OPTIONS *server_options) { - byte server_key[MAX_KEY_LENGTH]; int result= 1; int error= 0; TABLE_LIST tables; TABLE *table; - DBUG_ENTER("server_exists"); bzero((char*) &tables, sizeof(tables)); tables.db= (char*) "mysql"; tables.alias= tables.table_name= (char*) "servers"; - table->use_all_columns(); - /* need to open before acquiring THR_LOCK_plugin or it will deadlock */ if (! (table= open_ltable(thd, &tables, TL_WRITE))) DBUG_RETURN(TRUE); + table->use_all_columns(); + rw_wrlock(&THR_LOCK_servers); VOID(pthread_mutex_lock(&servers_cache_mutex)); @@ -393,7 +390,6 @@ my_bool server_exists_in_table(THD *thd, LEX_SERVER_OPTIONS *server_options) int insert_server(THD *thd, FOREIGN_SERVER *server) { - byte server_key[MAX_KEY_LENGTH]; int error= 0; TABLE_LIST tables; TABLE *table; @@ -608,7 +604,6 @@ int insert_server_record(TABLE *table, FOREIGN_SERVER *server) int drop_server(THD *thd, LEX_SERVER_OPTIONS *server_options) { - byte server_key[MAX_KEY_LENGTH]; int error= 0; TABLE_LIST tables; TABLE *table; @@ -1208,7 +1203,7 @@ void servers_free(bool end) FOREIGN_SERVER *get_server_by_name(const char *server_name) { ulong error_num=0; - uint i, server_name_length; + uint server_name_length; FOREIGN_SERVER *server= 0; DBUG_ENTER("get_server_by_name"); DBUG_PRINT("info", ("server_name %s", server_name)); diff --git a/sql/sql_servers.h b/sql/sql_servers.h index 36fb4d07d1b..23b8cefd5bb 100644 --- a/sql/sql_servers.h +++ b/sql/sql_servers.h @@ -26,7 +26,6 @@ typedef struct st_federated_server /* cache handlers */ my_bool servers_init(bool dont_read_server_table); -static my_bool servers_load(THD *thd, TABLE_LIST *tables); my_bool servers_reload(THD *thd); my_bool get_server_from_table_to_cache(TABLE *table); void servers_free(bool end=0); diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 0feaa4d8b32..45fe961d3d6 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -139,7 +139,6 @@ static my_bool show_plugins(THD *thd, st_plugin_int *plugin, { TABLE *table= (TABLE*) arg; struct st_mysql_plugin *plug= plugin->plugin; - Protocol *protocol= thd->protocol; CHARSET_INFO *cs= system_charset_info; char version_buf[20]; @@ -152,8 +151,7 @@ static my_bool show_plugins(THD *thd, st_plugin_int *plugin, cs); - switch (plugin->state) - { + switch (plugin->state) { /* case PLUGIN_IS_FREED: does not happen */ case PLUGIN_IS_DELETED: table->field[2]->store(STRING_WITH_LEN("DELETED"), cs); @@ -1375,6 +1373,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, } if (table->s->key_block_size) { + char *end; packet->append(STRING_WITH_LEN(" KEY_BLOCK_SIZE=")); end= longlong10_to_str(table->s->key_block_size, buff, 10); packet->append(buff, (uint) (end - buff)); @@ -4026,7 +4025,6 @@ static int get_schema_partitions_record(THD *thd, struct st_table_list *tables, partition_element *part_elem; List_iterator<partition_element> part_it(part_info->partitions); uint part_pos= 0, part_id= 0; - uint no_parts= part_info->no_parts; restore_record(table, s->default_values); table->field[1]->store(base_name, strlen(base_name), cs); @@ -4196,6 +4194,7 @@ static int get_schema_partitions_record(THD *thd, struct st_table_list *tables, } +#ifdef NOT_USED static interval_type get_real_interval_type(interval_type i_type) { switch (i_type) { @@ -4239,6 +4238,8 @@ static interval_type get_real_interval_type(interval_type i_type) return INTERVAL_SECOND; } +#endif + /* Loads an event from mysql.event and copies it's data to a row of @@ -5033,7 +5034,6 @@ static my_bool run_hton_fill_schema_files(THD *thd, st_plugin_int *plugin, int fill_schema_files(THD *thd, TABLE_LIST *tables, COND *cond) { - TABLE *table= tables->table; DBUG_ENTER("fill_schema_files"); struct run_hton_fill_schema_files_args args; @@ -5091,7 +5091,7 @@ int fill_schema_status(THD *thd, SHOW_VAR *variables, if (show_type == SHOW_SYS) { - show_type= ((sys_var*) value)->type(); + show_type= ((sys_var*) value)->show_type(); value= (char*) ((sys_var*) value)->value_ptr(thd, OPT_GLOBAL, &null_lex_str); } diff --git a/sql/sql_table.cc b/sql/sql_table.cc index bb1edba51b2..bc847669d95 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -598,7 +598,6 @@ bool read_ddl_log_entry(uint read_entry, DDL_LOG_ENTRY *ddl_log_entry) static bool init_ddl_log() { - bool error= FALSE; char file_name[FN_REFLEN]; DBUG_ENTER("init_ddl_log"); diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 4b99f887721..0f957208ab0 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -3738,7 +3738,6 @@ part_definition: LEX *lex= Lex; partition_info *part_info= lex->part_info; partition_element *p_elem= new partition_element(); - uint part_id= part_info->partitions.elements; if (!p_elem || part_info->partitions.push_back(p_elem)) { @@ -3890,7 +3889,6 @@ part_bit_expr: bit_expr { Item *part_expr= $1; - int part_expression_ok= 1; THD *thd= YYTHD; LEX *lex= thd->lex; Name_resolution_context *context= &lex->current_select->context; @@ -5071,9 +5069,9 @@ alter: | ALTER SERVER_SYM ident_or_text OPTIONS_SYM '(' server_options_list ')' { LEX *lex= Lex; - Lex->sql_command= SQLCOM_ALTER_SERVER; - Lex->server_options.server_name= $3.str; - Lex->server_options.server_name_length= $3.length; + lex->sql_command= SQLCOM_ALTER_SERVER; + lex->server_options.server_name= $3.str; + lex->server_options.server_name_length= $3.length; } ; @@ -5745,9 +5743,9 @@ db_to_db: ident TO_SYM ident { LEX *lex=Lex; - if (Lex->db_list.push_back((LEX_STRING*) + if (lex->db_list.push_back((LEX_STRING*) sql_memdup(&$1, sizeof(LEX_STRING))) || - Lex->db_list.push_back((LEX_STRING*) + lex->db_list.push_back((LEX_STRING*) sql_memdup(&$3, sizeof(LEX_STRING)))) YYABORT; }; @@ -6673,7 +6671,6 @@ function_call_generic: udf_expr_list ')' { THD *thd= YYTHD; - LEX *lex= thd->lex; Create_func *builder; Item *item= NULL; diff --git a/sql/table.cc b/sql/table.cc index fcaea159248..0e03c79c564 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1261,7 +1261,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, share->column_bitmap_size= bitmap_buffer_size(share->fields); if (!(bitmaps= (my_bitmap_map*) alloc_root(&share->mem_root, - share->column_bitmap_size))) + share->column_bitmap_size))) goto err; bitmap_init(&share->all_set, bitmaps, share->fields, FALSE); bitmap_set_all(&share->all_set); diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index 2d8b02ac1e3..9e74869f169 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -366,16 +366,17 @@ int ha_archive::free_share() { int rc= 0; DBUG_ENTER("ha_archive::free_share"); - DBUG_PRINT("ha_archive", ("archive table %.*s has %d open handles on entrance", - share_to_free->table_name_length, share_to_free->table_name, - share_to_free->use_count)); + DBUG_PRINT("ha_archive", + ("archive table %.*s has %d open handles on entrance", + share->table_name_length, share->table_name, + share->use_count)); pthread_mutex_lock(&archive_mutex); if (!--share->use_count) { - hash_delete(&archive_open_tables, (byte*) share_to_free); - thr_lock_delete(&share_to_free->lock); - VOID(pthread_mutex_destroy(&share_to_free->mutex)); + hash_delete(&archive_open_tables, (byte*) share); + thr_lock_delete(&share->lock); + VOID(pthread_mutex_destroy(&share->mutex)); /* We need to make sure we don't reset the crashed state. If we open a crashed file, wee need to close it as crashed unless @@ -465,7 +466,7 @@ int ha_archive::open(const char *name, int mode, uint open_options) if (!record_buffer) { - free_share(share); + free_share(); DBUG_RETURN(HA_ERR_OUT_OF_MEM); } @@ -783,7 +784,7 @@ int ha_archive::write_row(byte *buf) { if (!memcmp(read_buf + mfield->offset(record), table->next_number_field->ptr, - mfield->max_length())) + mfield->max_display_length())) { rc= HA_ERR_FOUND_DUPP_KEY; goto error; diff --git a/storage/example/ha_example.cc b/storage/example/ha_example.cc index 12ca91f0a6f..bde6c41d777 100644 --- a/storage/example/ha_example.cc +++ b/storage/example/ha_example.cc @@ -13,39 +13,44 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -/** @file ha_example.cc +/** + @file ha_example.cc - @brief + @brief The ha_example engine is a stubbed storage engine for example purposes only; it does nothing at this point. Its purpose is to provide a source code illustration of how to begin writing new storage engines; see also /storage/example/ha_example.h. - @details - ha_example will let you create/open/delete tables, but nothing further - (for example, indexes are not supported nor can data be stored in the - table). Use this example as a template for implementing the same functionality - in your own storage engine. You can enable the example storage engine in - your build by doing the following during your build process:<br> - ./configure --with-example-storage-engine + @details + ha_example will let you create/open/delete tables, but + nothing further (for example, indexes are not supported nor can data + be stored in the table). Use this example as a template for + implementing the same functionality in your own storage engine. You + can enable the example storage engine in your build by doing the + following during your build process:<br> ./configure + --with-example-storage-engine Once this is done, MySQL will let you create tables with:<br> CREATE TABLE <table name> (...) ENGINE=EXAMPLE; - The example storage engine is set up to use table locks. It implements an - example "SHARE" that is inserted into a hash by table name. You can use this - to store information of state that any example handler object will be able to - see when it is using that table. + The example storage engine is set up to use table locks. It + implements an example "SHARE" that is inserted into a hash by table + name. You can use this to store information of state that any + example handler object will be able to see when it is using that + table. Please read the object definition in ha_example.h before reading the rest of this file. - @note - When you create an EXAMPLE table, the MySQL Server creates a table .frm (format) - file in the database directory, using the table name as the file name as is - customary with MySQL. No other files are created. To get an idea of what occurs, - here is an example select that would do a scan of an entire table: - @code + @note + When you create an EXAMPLE table, the MySQL Server creates a table .frm + (format) file in the database directory, using the table name as the file + name as is customary with MySQL. No other files are created. To get an idea + of what occurs, here is an example select that would do a scan of an entire + table: + + @code ha_example::store_lock ha_example::external_lock ha_example::info @@ -66,13 +71,13 @@ ha_example::external_lock ha_example::extra ENUM HA_EXTRA_RESET Reset database to after open - @endcode + @endcode - Here you see that the example storage engine has 9 rows called before rnd_next - signals that it has reached the end of its data. Also note that the table in - question was already opened; had it not been open, a call to ha_example::open() - would also have been necessary. Calls to ha_example::extra() are hints as to - what will be occuring to the request. + Here you see that the example storage engine has 9 rows called before + rnd_next signals that it has reached the end of its data. Also note that + the table in question was already opened; had it not been open, a call to + ha_example::open() would also have been necessary. Calls to + ha_example::extra() are hints as to what will be occuring to the request. Happy coding!<br> -Brian @@ -90,18 +95,25 @@ static handler *example_create_handler(handlerton *hton, TABLE_SHARE *table, MEM_ROOT *mem_root); -static int example_init_func(); handlerton *example_hton; /* Variables for example share methods */ -static HASH example_open_tables; ///< Hash used to track the number of open tables; variable for example share methods -pthread_mutex_t example_mutex; ///< This is the mutex used to init the hash; variable for example share methods -static int example_init= 0; ///< This variable is used to check the init state of hash; variable for example share methods -/** @brief +/* + Hash used to track the number of open tables; variable for example share + methods +*/ +static HASH example_open_tables; + +/* The mutex used to init the hash; variable for example share methods */ +pthread_mutex_t example_mutex; + +/** + @brief Function we use in the creation of our hash to get key. */ + static byte* example_get_key(EXAMPLE_SHARE *share,uint *length, my_bool not_used __attribute__((unused))) { @@ -109,6 +121,7 @@ static byte* example_get_key(EXAMPLE_SHARE *share,uint *length, return (byte*) share->table_name; } + static int example_init_func(void *p) { DBUG_ENTER("example_init_func"); @@ -126,6 +139,7 @@ static int example_init_func(void *p) DBUG_RETURN(0); } + static int example_done_func(void *p) { int error= 0; @@ -139,11 +153,15 @@ static int example_done_func(void *p) DBUG_RETURN(0); } -/** @brief - Example of simple lock controls. The "share" it creates is a structure we will - pass to each example handler. Do you have to have one of these? Well, you have - pieces that are used for locking, and they are needed to function. + +/** + @brief + Example of simple lock controls. The "share" it creates is a + structure we will pass to each example handler. Do you have to have + one of these? Well, you have pieces that are used for locking, and + they are needed to function. */ + static EXAMPLE_SHARE *get_share(const char *table_name, TABLE *table) { EXAMPLE_SHARE *share; @@ -188,10 +206,13 @@ error: return NULL; } -/** @brief + +/** + @brief Free lock controls. We call this whenever we close a table. If the table had the last reference to the share, then we free memory associated with it. */ + static int free_share(EXAMPLE_SHARE *share) { pthread_mutex_lock(&example_mutex); @@ -218,15 +239,19 @@ ha_example::ha_example(handlerton *hton, TABLE_SHARE *table_arg) :handler(hton, table_arg) {} -/** @brief - If frm_error() is called then we will use this to determine the file extensions - that exist for the storage engine. This is also used by the default rename_table - and delete_table method in handler.cc. - @see +/** + @brief + If frm_error() is called then we will use this to determine + the file extensions that exist for the storage engine. This is also + used by the default rename_table and delete_table method in + handler.cc. + + @see rename_table method in handler.cc and delete_table method in handler.cc */ + static const char *ha_example_exts[] = { NullS }; @@ -236,10 +261,12 @@ const char **ha_example::bas_ext() const return ha_example_exts; } -/** @brief + +/** + @brief Used for opening tables. The name will be the name of the file. - @details + @details A table is opened when it needs to be opened; e.g. when a request comes in for a SELECT on the table (tables are not open and closed for each request, they are cached). @@ -247,9 +274,10 @@ const char **ha_example::bas_ext() const Called from handler.cc by handler::ha_open(). The server opens all tables by calling ha_open() which then calls the handler specific open(). - @see + @see handler::ha_open() in handler.cc */ + int ha_example::open(const char *name, int mode, uint test_if_locked) { DBUG_ENTER("ha_example::open"); @@ -261,27 +289,32 @@ int ha_example::open(const char *name, int mode, uint test_if_locked) DBUG_RETURN(0); } -/** @brief + +/** + @brief Closes a table. We call the free_share() function to free any resources that we have allocated in the "shared" structure. - @details + @details Called from sql_base.cc, sql_select.cc, and table.cc. In sql_select.cc it is - only used to close up temporary tables or during the process where a temporary - table is converted over to being a myisam table. + only used to close up temporary tables or during the process where a + temporary table is converted over to being a myisam table. For sql_base.cc look at close_data_tables(). - @see + @see sql_base.cc, sql_select.cc and table.cc */ + int ha_example::close(void) { DBUG_ENTER("ha_example::close"); DBUG_RETURN(free_share(share)); } -/** @brief + +/** + @brief write_row() inserts a row. No extra() hint is given currently if a bulk load is happening. buf() is a byte array of data. You can use the field information to extract the data from the native byte array type. @@ -309,13 +342,16 @@ int ha_example::close(void) item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc, sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc and sql_update.cc */ + int ha_example::write_row(byte * buf) { DBUG_ENTER("ha_example::write_row"); DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief Yes, update_row() does what you expect, it updates a row. old_data will have the previous row record in it, while new_data will have the newest data in it. Keep in mind that the server can do updates based on ordering if an ORDER BY @@ -343,34 +379,41 @@ int ha_example::update_row(const byte * old_data, byte * new_data) DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief This will delete a row. buf will contain a copy of the row to be deleted. The server will call this right after the current row has been called (from either a previous rnd_nexT() or index call). - @details + @details If you keep a pointer to the last row or can access a primary key it will make doing the deletion quite a bit easier. Keep in mind that the server does not guarantee consecutive deletions. ORDER BY clauses can be used. - Called in sql_acl.cc and sql_udf.cc to manage internal table information. - Called in sql_delete.cc, sql_insert.cc, and sql_select.cc. In sql_select it is - used for removing duplicates while in insert it is used for REPLACE calls. + Called in sql_acl.cc and sql_udf.cc to manage internal table + information. Called in sql_delete.cc, sql_insert.cc, and + sql_select.cc. In sql_select it is used for removing duplicates + while in insert it is used for REPLACE calls. - @see + @see sql_acl.cc, sql_udf.cc, sql_delete.cc, sql_insert.cc and sql_select.cc */ + int ha_example::delete_row(const byte * buf) { DBUG_ENTER("ha_example::delete_row"); DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief Positions an index cursor to the index specified in the handle. Fetches the row if available. If the key value is null, begin at the first key of the index. */ + int ha_example::index_read(byte * buf, const byte * key, uint key_len __attribute__((unused)), enum ha_rkey_function find_flag @@ -380,25 +423,33 @@ int ha_example::index_read(byte * buf, const byte * key, DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief Used to read forward through the index. */ + int ha_example::index_next(byte * buf) { DBUG_ENTER("ha_example::index_next"); DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief Used to read backwards through the index. */ + int ha_example::index_prev(byte * buf) { DBUG_ENTER("ha_example::index_prev"); DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief index_first() asks for the first key in the index. @details @@ -413,7 +464,9 @@ int ha_example::index_first(byte * buf) DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief index_last() asks for the last key in the index. @details @@ -428,7 +481,9 @@ int ha_example::index_last(byte * buf) DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief rnd_init() is called when the system wants the storage engine to do a table scan. See the example in the introduction at the top of this file to see when rnd_init() is called. @@ -452,7 +507,9 @@ int ha_example::rnd_end() DBUG_RETURN(0); } -/** @brief + +/** + @brief This is called for each row of the table scan. When you run out of records you should return HA_ERR_END_OF_FILE. Fill buff up with the row information. The Field structure for the table is the key to getting data into buf @@ -471,7 +528,9 @@ int ha_example::rnd_next(byte *buf) DBUG_RETURN(HA_ERR_END_OF_FILE); } -/** @brief + +/** + @brief position() is called after each call to rnd_next() if the data needs to be ordered. You can do something like the following to store the position: @@ -497,7 +556,9 @@ void ha_example::position(const byte *record) DBUG_VOID_RETURN; } -/** @brief + +/** + @brief This is like rnd_next, but you are given a position to use to determine the row. The position will be of the type that you stored in ref. You can use ha_get_ptr(pos,ref_length) to retrieve whatever key @@ -515,7 +576,9 @@ int ha_example::rnd_pos(byte * buf, byte *pos) DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief ::info() is used to return information to the optimizer. See my_base.h for the complete description. @@ -558,7 +621,9 @@ int ha_example::info(uint flag) DBUG_RETURN(0); } -/** @brief + +/** + @brief extra() is called whenever the server wishes to send a hint to the storage engine. The myisam engine implements the most hints. ha_innodb.cc has the most exhaustive list of these hints. @@ -572,7 +637,9 @@ int ha_example::extra(enum ha_extra_function operation) DBUG_RETURN(0); } -/** @brief + +/** + @brief Used to delete all rows in a table, including cases of truncate and cases where the optimizer realizes that all rows will be removed as a result of an SQL statement. @@ -596,7 +663,9 @@ int ha_example::delete_all_rows() DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief This create a lock on the table. If you are implementing a storage engine that can handle transacations look at ha_berkely.cc to see how you will want to go about doing this. Otherwise you should consider calling flock() @@ -618,7 +687,9 @@ int ha_example::external_lock(THD *thd, int lock_type) DBUG_RETURN(0); } -/** @brief + +/** + @brief The idea with handler::store_lock() is: The statement decides which locks should be needed for the table. For updates/deletes/inserts we get WRITE locks, for SELECT... we get read locks. @@ -659,7 +730,9 @@ THR_LOCK_DATA **ha_example::store_lock(THD *thd, return to; } -/** @brief + +/** + @brief Used to delete a table. By the time delete_table() has been called all opened references to this table will have been closed (and your globally shared references released). The variable name will just be the name of @@ -684,17 +757,19 @@ int ha_example::delete_table(const char *name) DBUG_RETURN(0); } -/** @brief + +/** + @brief Renames a table from one name to another via an alter table call. - @details + @details If you do not implement this, the default rename_table() is called from handler.cc and it will delete all files with the file extensions returned by bas_ext(). Called from sql_table.cc by mysql_rename_table(). - @see + @see mysql_rename_table() in sql_table.cc */ int ha_example::rename_table(const char * from, const char * to) @@ -703,16 +778,18 @@ int ha_example::rename_table(const char * from, const char * to) DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief Given a starting key and an ending key, estimate the number of rows that will exist between the two keys. - @details + @details end_key may be empty, in which case determine if start_key matches any rows. Called from opt_range.cc by check_quick_keys(). - @see + @see check_quick_keys() in opt_range.cc */ ha_rows ha_example::records_in_range(uint inx, key_range *min_key, @@ -722,29 +799,38 @@ ha_rows ha_example::records_in_range(uint inx, key_range *min_key, DBUG_RETURN(10); // low number to force index usage } -/** @brief + +/** + @brief create() is called to create a database. The variable name will have the name of the table. - @details - When create() is called you do not need to worry about opening the table. Also, - the .frm file will have already been created so adjusting create_info is not - necessary. You can overwrite the .frm file at this point if you wish to change - the table definition, but there are no methods currently provided for doing so. + @details + When create() is called you do not need to worry about + opening the table. Also, the .frm file will have already been + created so adjusting create_info is not necessary. You can overwrite + the .frm file at this point if you wish to change the table + definition, but there are no methods currently provided for doing + so. Called from handle.cc by ha_create_table(). - @see + @see ha_create_table() in handle.cc */ + int ha_example::create(const char *name, TABLE *table_arg, HA_CREATE_INFO *create_info) { DBUG_ENTER("ha_example::create"); - /* This is not implemented but we want someone to be able to see that it works. */ + /* + This is not implemented but we want someone to be able to see that it + works. + */ DBUG_RETURN(0); } + struct st_mysql_storage_engine example_storage_engine= { MYSQL_HANDLERTON_INTERFACE_VERSION }; @@ -756,11 +842,11 @@ mysql_declare_plugin(example) "Brian Aker, MySQL AB", "Example storage engine", PLUGIN_LICENSE_GPL, - example_init_func, /* Plugin Init */ - example_done_func, /* Plugin Deinit */ + example_init_func, /* Plugin Init */ + example_done_func, /* Plugin Deinit */ 0x0001 /* 0.1 */, - NULL, /* status variables */ - NULL, /* system variables */ - NULL /* config options */ + NULL, /* status variables */ + NULL, /* system variables */ + NULL /* config options */ } mysql_declare_plugin_end; diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc index 83bf37bfaef..14ffe5da984 100644 --- a/storage/federated/ha_federated.cc +++ b/storage/federated/ha_federated.cc @@ -362,7 +362,6 @@ static handler *federated_create_handler(handlerton *hton, MEM_ROOT *mem_root); static int federated_commit(handlerton *hton, THD *thd, bool all); static int federated_rollback(handlerton *hton, THD *thd, bool all); -static int federated_db_init(void); /* Federated storage engine handlerton */ @@ -389,7 +388,7 @@ static byte *federated_get_key(FEDERATED_SHARE *share, uint *length, SYNOPSIS federated_db_init() - void + p Handlerton RETURN FALSE OK @@ -573,9 +572,6 @@ int get_connection(FEDERATED_SHARE *share) int error_num= ER_FOREIGN_SERVER_DOESNT_EXIST; char error_buffer[FEDERATED_QUERY_BUFFER_SIZE]; FOREIGN_SERVER *server; - MYSQL *mysql_conn= 0; - MYSQL_RES *result= 0; - MYSQL_ROW row= 0; DBUG_ENTER("ha_federated::get_connection"); if (!(server= @@ -2321,7 +2317,7 @@ int ha_federated::read_range_first(const key_range *start_key, sql_query.append(share->select_query); create_where_from_key(&sql_query, &table->key_info[active_index], - start_key, end_key, 0, eq_range); + start_key, end_key, 0, eq_range_arg); if (stored_result) { @@ -3044,4 +3040,3 @@ mysql_declare_plugin(federated) NULL /* config options */ } mysql_declare_plugin_end; - diff --git a/storage/myisam/mi_rkey.c b/storage/myisam/mi_rkey.c index 6323c95ffd7..917ba381504 100644 --- a/storage/myisam/mi_rkey.c +++ b/storage/myisam/mi_rkey.c @@ -30,7 +30,7 @@ int mi_rkey(MI_INFO *info, byte *buf, int inx, const byte *key, uint key_len, HA_KEYSEG *last_used_keyseg; uint pack_key_length, use_key_length, nextflag; DBUG_ENTER("mi_rkey"); - DBUG_PRINT("enter", ("base: %lx buf: %lx inx: %d search_flag: %d", + DBUG_PRINT("enter", ("base: 0x%lx buf: 0x%lx inx: %d search_flag: %d", (long) info, (long) buf, inx, search_flag)); if ((inx = _mi_check_index(info,inx)) < 0) diff --git a/storage/myisammrg/ha_myisammrg.cc b/storage/myisammrg/ha_myisammrg.cc index 184925c380e..093a85ee841 100644 --- a/storage/myisammrg/ha_myisammrg.cc +++ b/storage/myisammrg/ha_myisammrg.cc @@ -30,9 +30,6 @@ ** MyISAM MERGE tables *****************************************************************************/ -static handler *myisammrg_create_handler(TABLE_SHARE *table, - MEM_ROOT *mem_root); - static handler *myisammrg_create_handler(handlerton *hton, TABLE_SHARE *table, MEM_ROOT *mem_root) diff --git a/storage/ndb/include/kernel/signaldata/ArbitSignalData.hpp b/storage/ndb/include/kernel/signaldata/ArbitSignalData.hpp index 3a8356432ac..ed7e3929414 100644 --- a/storage/ndb/include/kernel/signaldata/ArbitSignalData.hpp +++ b/storage/ndb/include/kernel/signaldata/ArbitSignalData.hpp @@ -35,8 +35,6 @@ public: STATIC_CONST( DataLength = 2 ); STATIC_CONST( TextLength = DataLength * 8 ); // hex digits - ArbitTicket() {} - inline void clear() { data[0] = 0; data[1] = 0; @@ -148,7 +146,6 @@ public: ArbitSignalData() {} STATIC_CONST( SignalLength = 3 + ArbitTicket::DataLength + NodeBitmask::Size ); - ArbitSignalData() {} inline bool match(ArbitSignalData& aData) const { return node == aData.node && diff --git a/storage/ndb/include/util/SimpleProperties.hpp b/storage/ndb/include/util/SimpleProperties.hpp index f5f241c6e81..18acf689415 100644 --- a/storage/ndb/include/util/SimpleProperties.hpp +++ b/storage/ndb/include/util/SimpleProperties.hpp @@ -171,7 +171,6 @@ public: bool add(Uint16 key, Uint32 value); bool add(Uint16 key, const char * value); bool add(Uint16 key, const void* value, int len); - Writer() {} protected: virtual ~Writer() {} virtual bool reset() = 0; diff --git a/storage/ndb/src/common/debugger/EventLogger.cpp b/storage/ndb/src/common/debugger/EventLogger.cpp index b0f5627dd98..f5498d88887 100644 --- a/storage/ndb/src/common/debugger/EventLogger.cpp +++ b/storage/ndb/src/common/debugger/EventLogger.cpp @@ -950,6 +950,7 @@ EventLogger::close() removeAllHandlers(); } +#ifdef NOT_USED static NdbOut& operator<<(NdbOut& out, const LogLevel & ll) { @@ -959,6 +960,7 @@ operator<<(NdbOut& out, const LogLevel & ll) out << "]"; return out; } +#endif int EventLoggerBase::event_lookup(int eventType, diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp index e8f2f87077a..42d4e49b14b 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp @@ -614,6 +614,8 @@ Suma::removeSubscribersOnNode(Signal *signal, Uint32 nodeId) bool found = false; KeyTable<Table>::Iterator it; + LINT_INIT(it.bucket); + LINT_INIT(it.curr.p); for(c_tables.first(it);!it.isNull();c_tables.next(it)) { LocalDLList<Subscriber> subbs(c_subscriberPool,it.curr.p->c_subscribers); @@ -1265,7 +1267,7 @@ Suma::execSUB_SYNC_REQ(Signal* signal) jam(); syncPtr.p->m_tableList.append(&subPtr.p->m_tableId, 1); if(signal->getNoOfSections() > 0){ - SegmentedSectionPtr ptr; + SegmentedSectionPtr ptr(0,0,0); signal->getSection(ptr, SubSyncReq::ATTRIBUTE_LIST); LocalDataBuffer<15> attrBuf(c_dataBufferPool,syncPtr.p->m_attributeList); append(attrBuf, ptr, getSectionSegmentPool()); @@ -1711,7 +1713,7 @@ Suma::execGET_TABINFO_CONF(Signal* signal){ Uint32 tableId = conf->tableId; TablePtr tabPtr; c_tablePool.getPtr(tabPtr, conf->senderData); - SegmentedSectionPtr ptr; + SegmentedSectionPtr ptr(0,0,0); signal->getSection(ptr, GetTabInfoConf::DICT_TAB_INFO); ndbrequire(tabPtr.p->parseTable(ptr, *this)); releaseSections(signal); @@ -2103,6 +2105,7 @@ Suma::SyncRecord::nextScan(Signal* signal) LocalDataBuffer<15> attrBuf(suma.c_dataBufferPool, head); ScanFragReq * req = (ScanFragReq *)signal->getDataPtrSend(); + const Uint32 parallelism = 16; const Uint32 attrLen = 5 + attrBuf.getSize(); req->senderData = ptrI; @@ -3185,6 +3188,10 @@ Suma::Table::checkRelease(Suma &suma) static Uint32 f_bufferLock = 0; static Uint32 f_buffer[SUMA_BUF_SZ]; +static Uint32 f_trigBufferSize = 0; +static Uint32 b_bufferLock = 0; +static Uint32 b_buffer[SUMA_BUF_SZ]; +static Uint32 b_trigBufferSize = 0; void Suma::execTRANSID_AI(Signal* signal) @@ -3621,7 +3628,6 @@ Suma::execSUB_GCP_COMPLETE_REP(Signal* signal) if(c_buckets[i].m_buffer_tail != RNIL) { - Uint32* dst; get_buffer_ptr(signal, i, gci, 0); } } @@ -3966,9 +3972,6 @@ void Suma::completeSubRemove(SubscriptionPtr subPtr) { DBUG_ENTER("Suma::completeSubRemove"); - Uint32 subscriptionId = subPtr.p->m_subscriptionId; - Uint32 subscriptionKey = subPtr.p->m_subscriptionKey; - c_subscriptions.release(subPtr); DBUG_PRINT("info",("c_subscriptionPool size: %d free: %d", c_subscriptionPool.getSize(), @@ -4561,6 +4564,7 @@ Suma::execSUMA_HANDOVER_CONF(Signal* signal) { DBUG_VOID_RETURN; } +#ifdef NOT_USED static NdbOut& operator<<(NdbOut & out, const Suma::Page_pos & pos) @@ -4572,6 +4576,7 @@ operator<<(NdbOut & out, const Suma::Page_pos & pos) << " ]"; return out; } +#endif Uint32* Suma::get_buffer_ptr(Signal* signal, Uint32 buck, Uint32 gci, Uint32 sz) @@ -4742,6 +4747,7 @@ loop: ptr.p->m_free = count; Buffer_page* page; + LINT_INIT(page); for(Uint32 i = 0; i<count; i++) { page = (Buffer_page*)m_tup->c_page_pool.getPtr(ref); diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp index f005a1bc005..5560259a957 100644 --- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -101,7 +101,6 @@ MgmtSrvr::logLevelThread_C(void* m) extern EventLogger g_eventLogger; #ifdef NOT_USED - static NdbOut& operator<<(NdbOut& out, const LogLevel & ll) { @@ -1130,6 +1129,9 @@ int MgmtSrvr::sendSTOP_REQ(const Vector<NodeId> &node_ids, break; } case GSN_STOP_CONF:{ +#ifdef NOT_USED + const StopConf * const ref = CAST_CONSTPTR(StopConf, signal->getDataPtr()); +#endif const NodeId nodeId = refToNode(signal->header.theSendersBlockRef); #ifdef VM_TRACE ndbout_c("Node %d single user mode", nodeId); @@ -1159,8 +1161,9 @@ int MgmtSrvr::sendSTOP_REQ(const Vector<NodeId> &node_ids, break; } case GSN_NODE_FAILREP:{ + const NodeFailRep * const rep = + CAST_CONSTPTR(NodeFailRep, signal->getDataPtr()); NdbNodeBitmask mask; - char buf[100]; mask.assign(NdbNodeBitmask::Size, rep->theNodes); mask.bitAND(notstarted); nodes.bitANDC(mask); @@ -1354,7 +1357,7 @@ int MgmtSrvr::restartNodes(const Vector<NodeId> &node_ids, for (unsigned i = 0; i < node_ids.size(); i++) { - start(node_ids[i]); + (void) start(node_ids[i]); } return 0; } @@ -2064,8 +2067,10 @@ MgmtSrvr::alloc_node_id_req(NodeId free_node_id, enum ndb_mgm_node_type type) switch (gsn) { case GSN_ALLOC_NODEID_CONF: { +#ifdef NOT_USED const AllocNodeIdConf * const conf = CAST_CONSTPTR(AllocNodeIdConf, signal->getDataPtr()); +#endif return 0; } case GSN_ALLOC_NODEID_REF: diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp index ba2329888d2..1f4a9838c91 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -2904,7 +2904,6 @@ int NdbDictionaryImpl::dropTableGlobal(NdbTableImpl & impl) { int res; - const char * name = impl.getName(); DBUG_ENTER("NdbDictionaryImpl::dropTableGlobal"); DBUG_ASSERT(impl.m_status != NdbDictionary::Object::New); DBUG_ASSERT(impl.m_indexType == NdbDictionary::Object::TypeUndefined); @@ -4277,8 +4276,6 @@ void NdbDictInterface::execWAIT_GCP_CONF(NdbApiSignal* signal, LinearSectionPtr ptr[3]) { - const WaitGCPConf * const conf= - CAST_CONSTPTR(WaitGCPConf, signal->getDataPtr()); m_waiter.signal(NO_WAIT); } diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp index fe10cf133c2..8e392a27a98 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -49,7 +49,7 @@ static Gci_container_pod g_empty_gci_container; static const Uint32 ACTIVE_GCI_DIRECTORY_SIZE = 4; static const Uint32 ACTIVE_GCI_MASK = ACTIVE_GCI_DIRECTORY_SIZE - 1; -#ifdef VM_TRACE +#if defined(VM_TRACE) && defined(NOT_USED) static void print_std(const SubTableData * sdata, LinearSectionPtr ptr[3]) { @@ -730,7 +730,9 @@ NdbEventOperationImpl::receive_event() // Parse the new table definition and // create a table object NdbDictionary::Dictionary *myDict = m_ndb->getDictionary(); - NdbDictionaryImpl *dict = & NdbDictionaryImpl::getImpl(*myDict); +#ifdef NOT_USED + NdbDictionaryImpl *dict =&NdbDictionaryImpl::getImpl(*myDict); +#endif NdbError error; NdbDictInterface dif(error); NdbTableImpl *at; diff --git a/storage/ndb/src/ndbapi/NdbOperationDefine.cpp b/storage/ndb/src/ndbapi/NdbOperationDefine.cpp index ba26831749d..757d39a75ce 100644 --- a/storage/ndb/src/ndbapi/NdbOperationDefine.cpp +++ b/storage/ndb/src/ndbapi/NdbOperationDefine.cpp @@ -530,11 +530,9 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo, }//if }//if - // Including bits in last word - const Uint32 totalSizeInWords = (sizeInBytes + 3)/4; // Excluding bits in last word const Uint32 sizeInWords = sizeInBytes / 4; - AttributeHeader& ah = AttributeHeader::init(&ahValue, tAttrId, sizeInBytes); + (void) AttributeHeader::init(&ahValue, tAttrId, sizeInBytes); insertATTRINFO( ahValue ); /*********************************************************************** @@ -560,10 +558,6 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo, }//if theErrorLine++; DBUG_RETURN(0); - -error: - setErrorCodeAbort(tReturnCode); - DBUG_RETURN(-1); }//NdbOperation::setValue() NdbBlob* diff --git a/storage/ndb/src/ndbapi/NdbOperationSearch.cpp b/storage/ndb/src/ndbapi/NdbOperationSearch.cpp index afb1002ddd9..605c66d9859 100644 --- a/storage/ndb/src/ndbapi/NdbOperationSearch.cpp +++ b/storage/ndb/src/ndbapi/NdbOperationSearch.cpp @@ -309,10 +309,6 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo, equal_error2: setErrorCodeAbort(4206); DBUG_RETURN(-1); - - equal_error3: - setErrorCodeAbort(4209); - DBUG_RETURN(-1); } /****************************************************************************** diff --git a/storage/ndb/src/ndbapi/SignalSender.cpp b/storage/ndb/src/ndbapi/SignalSender.cpp index 9090bac4d9d..0c0a9bd0e1f 100644 --- a/storage/ndb/src/ndbapi/SignalSender.cpp +++ b/storage/ndb/src/ndbapi/SignalSender.cpp @@ -19,6 +19,7 @@ #include <signaldata/NFCompleteRep.hpp> #include <signaldata/NodeFailRep.hpp> +#ifdef NOT_USED static void require(bool x) @@ -26,6 +27,7 @@ require(bool x) if (!x) abort(); } +#endif SimpleSignal::SimpleSignal(bool dealloc){ memset(this, 0, sizeof(* this)); diff --git a/storage/ndb/src/ndbapi/TransporterFacade.cpp b/storage/ndb/src/ndbapi/TransporterFacade.cpp index 24bf6dbbc6a..80d7a7ee2d5 100644 --- a/storage/ndb/src/ndbapi/TransporterFacade.cpp +++ b/storage/ndb/src/ndbapi/TransporterFacade.cpp @@ -1379,7 +1379,7 @@ int PollGuard::wait_scan(int wait_time, NodeId nodeId, bool forceSend) int PollGuard::wait_for_input_in_loop(int wait_time, bool forceSend) { - int ret_val, response_time; + int ret_val; if (forceSend) m_tp->forceSend(m_block_no); else @@ -1441,7 +1441,7 @@ void PollGuard::wait_for_input(int wait_time) queue if it hasn't happened already. It is usually already out of the queue but at time-out it could be that the object is still there. */ - Uint32 cond_wait_index= m_tp->put_in_cond_wait_queue(m_waiter); + (void) m_tp->put_in_cond_wait_queue(m_waiter); m_waiter->wait(wait_time); if (m_waiter->get_cond_wait_index() != TransporterFacade::MAX_NO_THREADS) { diff --git a/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp b/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp index 6ad36da3000..24ac05caf07 100644 --- a/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp +++ b/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp @@ -39,6 +39,8 @@ EventLogger g_eventLogger; NdbMutex *ndb_print_state_mutex= NULL; #endif +static int g_ndb_connection_count = 0; + /* * Ndb_cluster_connection */ diff --git a/storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp b/storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp index 33346a2a1d7..a8b774ec2b8 100644 --- a/storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp +++ b/storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp @@ -22,7 +22,6 @@ #include <NdbMutex.h> extern NdbMutex *g_ndb_connection_mutex; -static int g_ndb_connection_count = 0; class TransporterFacade; class ConfigRetriever; diff --git a/support-files/compiler_warnings.supp b/support-files/compiler_warnings.supp index f8651e1d27d..a7ebafedfbf 100644 --- a/support-files/compiler_warnings.supp +++ b/support-files/compiler_warnings.supp @@ -10,3 +10,7 @@ DbtupSystemRestart.cpp : .*unused variable.* : 95 - 96 DbtupIndex.cpp : .*unused variable.* : 188 - 242 ndbd_malloc.cpp : .*defined but not used.* : 25 main.cpp : .*unused variable.* : 131 - 132 +kernel_types.h : .*only defines private constructors and has no friends.* : 51 +Dbtup.hpp: .*only defines private constructors and has no friends.* +diskpage.hpp: .*only defines private constructors and has no friends.* +tuppage.hpp: .*only defines private constructors and has no friends.* |