diff options
author | unknown <monty@mysql.com/narttu.mysql.fi> | 2007-01-29 01:47:35 +0200 |
---|---|---|
committer | unknown <monty@mysql.com/narttu.mysql.fi> | 2007-01-29 01:47:35 +0200 |
commit | f40e0cc0d0606c3f06151763a6445bfa4682eb77 (patch) | |
tree | 9a81ea8e9e695584f7915cc104eda630d7b98bc8 /sql | |
parent | 36b058929ffbbf132a4a512ec8c3a6e937309387 (diff) | |
download | mariadb-git-f40e0cc0d0606c3f06151763a6445bfa4682eb77.tar.gz |
After merge fixes
Removed a lot of compiler warnings
Removed not used variables, functions and labels
Initialize some variables that could be used unitialized (fatal bugs)
%ll -> %l
BitKeeper/etc/ignore:
added storage/archive/archive_reader
BUILD/SETUP.sh:
ccache now works again
BUILD/compile-pentium-gcov:
Added marker that we are using gcov and need special version of ccache
client/mysql_upgrade.c:
after merge fixes
client/mysqlbinlog.cc:
after merge fixes
client/mysqldump.c:
Removed compiler warnings
client/mysqlimport.c:
Removed compiler warnings
client/mysqltest.c:
Removed compiler warnings
mysql-test/t/mysqlcheck.test:
After merge fixes
mysys/my_bitmap.c:
After merge fix
sql/event_data_objects.cc:
Removed not used variable
sql/event_db_repository.cc:
Removed not used variable
sql/event_queue.cc:
Removed not used variable
sql/field.cc:
After merge fixes
sql/filesort.cc:
Added missing initialization (could cause core dump on EOM)
sql/ha_ndbcluster.cc:
After merge fixes
Removed not used variables
false-> FALSE
true -> TRUE
%llu -> %lu (portability fix)
Fixed bug where field could be used unitialized in build_scan_filter_predicate()
sql/ha_ndbcluster_binlog.cc:
Removed not used label
sql/ha_partition.cc:
Removed not used variables
sql/handler.cc:
Removed not used variable & function
sql/item.cc:
After merge fixes
sql/item_cmpfunc.cc:
Removed not used variable
sql/item_func.cc:
Removed compiler warning
sql/item_xmlfunc.cc:
Removed not used variables & declarations
sql/log.cc:
Removed compiler warnings
Removed not used variables & label
sql/log.h:
After merge fixes
sql/log_event.cc:
Removed not used variable & function
sql/mysqld.cc:
After merge fixes
sql/opt_range.cc:
Removed not used declaration
sql/partition_info.cc:
Removed not used variable
sql/protocol.cc:
Removed compiler warnings
sql/set_var.cc:
Removed not used variable
sql/set_var.h:
After merge fix
sql/slave.cc:
After merge fixes
sql/slave.h:
Moved wrong declaration to slave.cc
sql/sp.cc:
Fixed format of DBUG_PRINT
sql/sp_head.cc:
After merge fixes
sql/spatial.cc:
Added DBUG_ASSERT() to verify that LINT_INIT is right
sql/sql_class.cc:
Removed not used variables
sql/sql_insert.cc:
After merge fixes
sql/sql_parse.cc:
Removed not used variable
After merge fixes
sql/sql_partition.cc:
Removed not used variables
sql/sql_plugin.cc:
Removed compiler warnings when compiling embedded server
sql/sql_servers.cc:
Removed not used variables
Moved wrong placed calle to use_all_columns()
sql/sql_servers.h:
Moved declaration to right sql_servers.cc
sql/sql_show.cc:
Removed not used variables and function
After merge fixes
sql/sql_table.cc:
Removed not used variable
sql/sql_yacc.yy:
Removed not used variables
Lex -> lex
sql/table.cc:
Indentation fix
storage/archive/ha_archive.cc:
After merge fixes
storage/example/ha_example.cc:
Indentation fixes
storage/federated/ha_federated.cc:
Removed not used variables
storage/myisam/mi_rkey.c:
Added 0x before address
storage/myisammrg/ha_myisammrg.cc:
Removed old declaration
storage/ndb/include/kernel/signaldata/ArbitSignalData.hpp:
After merge fixes
storage/ndb/include/util/SimpleProperties.hpp:
After merge fixes
storage/ndb/src/common/debugger/EventLogger.cpp:
Removed not used function
storage/ndb/src/kernel/blocks/suma/Suma.cpp:
Removed compiler warnings
Removed not used variables
storage/ndb/src/mgmsrv/MgmtSrvr.cpp:
After merge fixes
Removed not used variables
storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp:
Removed not used varibles.
storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp:
Removed not used variables
storage/ndb/src/ndbapi/NdbOperationDefine.cpp:
Removed not used variables and label
storage/ndb/src/ndbapi/NdbOperationSearch.cpp:
Removed not used label
storage/ndb/src/ndbapi/SignalSender.cpp:
Removed not used function
storage/ndb/src/ndbapi/TransporterFacade.cpp:
Removed not used variables
storage/ndb/src/ndbapi/ndb_cluster_connection.cpp:
Moved static declaration from header file
storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp:
Moved static declaration from header file
support-files/compiler_warnings.supp:
Remove some warnings from ndb
Diffstat (limited to 'sql')
38 files changed, 212 insertions, 281 deletions
diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc index 54b043bd916..07575a6d33a 100644 --- a/sql/event_data_objects.cc +++ b/sql/event_data_objects.cc @@ -1560,7 +1560,6 @@ done: int Event_timed::get_create_event(THD *thd, String *buf) { - int multipl= 0; char tmp_buf[2 * STRING_BUFFER_USUAL_SIZE]; String expr_buf(tmp_buf, sizeof(tmp_buf), system_charset_info); expr_buf.length(0); diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc index bcc7d476fff..940930ec4c6 100644 --- a/sql/event_db_repository.cc +++ b/sql/event_db_repository.cc @@ -518,7 +518,6 @@ Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data, my_bool create_if_not) { int ret= 0; - CHARSET_INFO *scs= system_charset_info; TABLE *table= NULL; char old_db_buf[NAME_LEN+1]; LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) }; diff --git a/sql/event_queue.cc b/sql/event_queue.cc index 45d354ea9b6..068abbe3408 100644 --- a/sql/event_queue.cc +++ b/sql/event_queue.cc @@ -139,8 +139,6 @@ bool Event_queue::init_queue(THD *thd, Event_db_repository *db_repo) { bool res; - struct event_queue_param *event_queue_param_value= NULL; - DBUG_ENTER("Event_queue::init_queue"); DBUG_PRINT("enter", ("this: 0x%lx", (long) this)); diff --git a/sql/field.cc b/sql/field.cc index f01f3b3731a..cc32e998f65 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -2556,7 +2556,7 @@ uint Field_new_decimal::is_equal(create_field *new_field) (uint) (flags & UNSIGNED_FLAG)) && ((new_field->flags & AUTO_INCREMENT_FLAG) == (uint) (flags & AUTO_INCREMENT_FLAG)) && - (new_field->length == max_length()) && + (new_field->length == max_display_length()) && (new_field->decimals == dec)); } @@ -6165,7 +6165,7 @@ uint Field_str::is_equal(create_field *new_field) return ((new_field->sql_type == real_type()) && new_field->charset == field_charset && - new_field->length == max_length()); + new_field->length == max_display_length()); } @@ -6999,11 +6999,11 @@ uint Field_varstring::is_equal(create_field *new_field) if (new_field->sql_type == real_type() && new_field->charset == field_charset) { - if (new_field->length == max_length()) + if (new_field->length == max_display_length()) return IS_EQUAL_YES; - if (new_field->length > max_length() && - ((new_field->length <= 255 && max_length() <= 255) || - (new_field->length > 255 && max_length() > 255))) + if (new_field->length > max_display_length() && + ((new_field->length <= 255 && max_display_length() <= 255) || + (new_field->length > 255 && max_display_length() > 255))) return IS_EQUAL_PACK_LENGTH; // VARCHAR, longer variable length } return IS_EQUAL_NO; @@ -8196,7 +8196,7 @@ uint Field_num::is_equal(create_field *new_field) UNSIGNED_FLAG)) && ((new_field->flags & AUTO_INCREMENT_FLAG) == (uint) (flags & AUTO_INCREMENT_FLAG)) && - (new_field->length <= max_length())); + (new_field->length <= max_display_length())); } diff --git a/sql/filesort.cc b/sql/filesort.cc index 448dea227ab..9f0bb9b45fb 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -104,7 +104,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, uint maxbuffer; BUFFPEK *buffpek; ha_rows records= HA_POS_ERROR; - uchar **sort_keys; + uchar **sort_keys= 0; IO_CACHE tempfile, buffpek_pointers, *selected_records_file, *outfile; SORTPARAM param; bool multi_byte_charset; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 4738fbb22f9..36fe6457167 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -150,7 +150,6 @@ static byte *ndbcluster_get_key(NDB_SHARE *share,uint *length, #ifdef HAVE_NDB_BINLOG static int rename_share(NDB_SHARE *share, const char *new_key); #endif -static void ndb_set_fragmentation(NDBTAB &tab, TABLE *table, uint pk_len); static int ndb_get_table_statistics(ha_ndbcluster*, bool, Ndb*, const NDBTAB *, struct Ndb_statistics *); @@ -451,7 +450,7 @@ ha_rows ha_ndbcluster::records() Ndb *ndb= get_ndb(); ndb->setDatabaseName(m_dbname); struct Ndb_statistics stat; - if (ndb_get_table_statistics(this, true, ndb, m_table, &stat) == 0) + if (ndb_get_table_statistics(this, TRUE, ndb, m_table, &stat) == 0) { retval= stat.row_count; } @@ -462,9 +461,9 @@ ha_rows ha_ndbcluster::records() THD *thd= current_thd; if (get_thd_ndb(thd)->error) - info->no_uncommitted_rows_count= 0; + local_info->no_uncommitted_rows_count= 0; - DBUG_RETURN(retval + info->no_uncommitted_rows_count); + DBUG_RETURN(retval + local_info->no_uncommitted_rows_count); } int ha_ndbcluster::records_update() @@ -482,7 +481,7 @@ int ha_ndbcluster::records_update() Ndb *ndb= get_ndb(); struct Ndb_statistics stat; ndb->setDatabaseName(m_dbname); - result= ndb_get_table_statistics(this, true, ndb, m_table, &stat); + result= ndb_get_table_statistics(this, TRUE, ndb, m_table, &stat); if (result == 0) { stats.mean_rec_length= stat.row_size; @@ -955,7 +954,6 @@ int ha_ndbcluster::get_ndb_partition_id(NdbOperation *ndb_op) bool ha_ndbcluster::uses_blob_value() { - uint blob_fields; MY_BITMAP *bitmap; uint *blob_index, *blob_index_end; if (table_share->blob_fields == 0) @@ -1105,7 +1103,6 @@ int ha_ndbcluster::create_indexes(Ndb *ndb, TABLE *tab) const char *index_name; KEY* key_info= tab->key_info; const char **key_name= tab->s->keynames.type_names; - NDBDICT *dict= ndb->getDictionary(); DBUG_ENTER("ha_ndbcluster::create_indexes"); for (i= 0; i < tab->s->keys; i++, key_info++, key_name++) @@ -1243,7 +1240,6 @@ int ha_ndbcluster::open_indexes(Ndb *ndb, TABLE *tab, bool ignore_error) int error= 0; THD *thd=current_thd; NDBDICT *dict= ndb->getDictionary(); - const char *index_name; KEY* key_info= tab->key_info; const char **key_name= tab->s->keynames.type_names; DBUG_ENTER("ha_ndbcluster::open_indexes"); @@ -1255,9 +1251,9 @@ int ha_ndbcluster::open_indexes(Ndb *ndb, TABLE *tab, bool ignore_error) m_index[i].index= m_index[i].unique_index= NULL; else break; - m_index[i].null_in_unique_index= false; + m_index[i].null_in_unique_index= FALSE; if (check_index_fields_not_null(key_info)) - m_index[i].null_in_unique_index= true; + m_index[i].null_in_unique_index= TRUE; } if (error && !ignore_error) @@ -1293,7 +1289,6 @@ void ha_ndbcluster::renumber_indexes(Ndb *ndb, TABLE *tab) const char *index_name; KEY* key_info= tab->key_info; const char **key_name= tab->s->keynames.type_names; - NDBDICT *dict= ndb->getDictionary(); DBUG_ENTER("ha_ndbcluster::renumber_indexes"); for (i= 0; i < tab->s->keys; i++, key_info++, key_name++) @@ -1410,10 +1405,10 @@ bool ha_ndbcluster::check_index_fields_not_null(KEY* key_info) { Field* field= key_part->field; if (field->maybe_null()) - DBUG_RETURN(true); + DBUG_RETURN(TRUE); } - DBUG_RETURN(false); + DBUG_RETURN(FALSE); } void ha_ndbcluster::release_metadata(THD *thd, Ndb *ndb) @@ -1731,7 +1726,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf, ERR_RETURN(trans->getNdbError()); } - if (execute_no_commit_ie(this,trans,false) != 0) + if (execute_no_commit_ie(this,trans,FALSE) != 0) { table->status= STATUS_NOT_FOUND; DBUG_RETURN(ndb_err(trans)); @@ -1796,7 +1791,7 @@ int ha_ndbcluster::complemented_read(const byte *old_data, byte *new_data, } } - if (execute_no_commit(this,trans,false) != 0) + if (execute_no_commit(this,trans,FALSE) != 0) { table->status= STATUS_NOT_FOUND; DBUG_RETURN(ndb_err(trans)); @@ -1842,7 +1837,7 @@ bool ha_ndbcluster::check_all_operations_for_error(NdbTransaction *trans, if (err.status != NdbError::Success) { if (ndb_to_mysql_error(&err) != (int) errcode) - DBUG_RETURN(false); + DBUG_RETURN(FALSE); if (op == last) break; op= trans->getNextCompletedOperation(op); } @@ -1873,10 +1868,10 @@ bool ha_ndbcluster::check_all_operations_for_error(NdbTransaction *trans, if (errcode == HA_ERR_KEY_NOT_FOUND) m_dupkey= table->s->primary_key; } - DBUG_RETURN(false); + DBUG_RETURN(FALSE); } } - DBUG_RETURN(true); + DBUG_RETURN(TRUE); } @@ -1954,7 +1949,7 @@ int ha_ndbcluster::peek_indexed_rows(const byte *record, } last= trans->getLastDefinedOperation(); if (first) - res= execute_no_commit_ie(this,trans,false); + res= execute_no_commit_ie(this,trans,FALSE); else { // Table has no keys @@ -2003,7 +1998,7 @@ int ha_ndbcluster::unique_index_read(const byte *key, if ((res= define_read_attrs(buf, op))) DBUG_RETURN(res); - if (execute_no_commit_ie(this,trans,false) != 0) + if (execute_no_commit_ie(this,trans,FALSE) != 0) { table->status= STATUS_NOT_FOUND; DBUG_RETURN(ndb_err(trans)); @@ -2036,13 +2031,13 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor) if (!(op= m_active_cursor->lockCurrentTuple())) { /* purecov: begin inspected */ - m_lock_tuple= false; + m_lock_tuple= FALSE; ERR_RETURN(con_trans->getNdbError()); /* purecov: end */ } m_ops_pending++; } - m_lock_tuple= false; + m_lock_tuple= FALSE; bool contact_ndb= m_lock.type < TL_WRITE_ALLOW_WRITE && m_lock.type != TL_READ_WITH_SHARED_LOCKS;; @@ -2053,7 +2048,7 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor) */ if (m_ops_pending && m_blobs_pending) { - if (execute_no_commit(this,trans,false) != 0) + if (execute_no_commit(this,trans,FALSE) != 0) DBUG_RETURN(ndb_err(trans)); m_ops_pending= 0; m_blobs_pending= FALSE; @@ -2085,7 +2080,7 @@ inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor) { if (m_transaction_on) { - if (execute_no_commit(this,trans,false) != 0) + if (execute_no_commit(this,trans,FALSE) != 0) DBUG_RETURN(-1); } else @@ -2366,7 +2361,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, bool need_pk = (lm == NdbOperation::LM_Read); if (!(op= trans->getNdbIndexScanOperation(m_index[active_index].index, m_table)) || - op->readTuples(lm, 0, parallelism, sorted, descending, false, need_pk)) + op->readTuples(lm, 0, parallelism, sorted, descending, FALSE, need_pk)) ERR_RETURN(trans->getNdbError()); if (m_use_partition_function && part_spec != NULL && part_spec->start_part == part_spec->end_part) @@ -2388,7 +2383,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, { const key_range *keys[2]= { start_key, end_key }; - res= set_bounds(op, active_index, false, keys); + res= set_bounds(op, active_index, FALSE, keys); if (res) DBUG_RETURN(res); } @@ -2412,7 +2407,7 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, ERR_RETURN(trans->getNdbError()); } - if (execute_no_commit(this,trans,false) != 0) + if (execute_no_commit(this,trans,FALSE) != 0) DBUG_RETURN(ndb_err(trans)); DBUG_RETURN(next_result(buf)); @@ -2507,7 +2502,7 @@ int ha_ndbcluster::unique_index_scan(const KEY* key_info, if ((res= define_read_attrs(buf, op))) DBUG_RETURN(res); - if (execute_no_commit(this,trans,false) != 0) + if (execute_no_commit(this,trans,FALSE) != 0) DBUG_RETURN(ndb_err(trans)); DBUG_PRINT("exit", ("Scan started successfully")); DBUG_RETURN(next_result(buf)); @@ -2576,7 +2571,7 @@ int ha_ndbcluster::full_table_scan(byte *buf) if ((res= define_read_attrs(buf, op))) DBUG_RETURN(res); - if (execute_no_commit(this,trans,false) != 0) + if (execute_no_commit(this,trans,FALSE) != 0) DBUG_RETURN(ndb_err(trans)); DBUG_PRINT("exit", ("Scan started successfully")); DBUG_RETURN(next_result(buf)); @@ -2624,7 +2619,7 @@ int ha_ndbcluster::write_row(byte *record) start_bulk_insert will set parameters to ensure that each write_row is committed individually */ - int peek_res= peek_indexed_rows(record, true); + int peek_res= peek_indexed_rows(record, TRUE); if (!peek_res) { @@ -2743,7 +2738,7 @@ int ha_ndbcluster::write_row(byte *record) m_bulk_insert_not_flushed= FALSE; if (m_transaction_on) { - if (execute_no_commit(this,trans,false) != 0) + if (execute_no_commit(this,trans,FALSE) != 0) { m_skip_auto_increment= TRUE; no_uncommitted_rows_execute_failure(); @@ -2934,7 +2929,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) DBUG_PRINT("info", ("Calling updateTuple on cursor")); if (!(op= cursor->updateCurrentTuple())) ERR_RETURN(trans->getNdbError()); - m_lock_tuple= false; + m_lock_tuple= FALSE; m_ops_pending++; if (uses_blob_value()) m_blobs_pending= TRUE; @@ -2997,7 +2992,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) op->setValue(no_fields, part_func_value); } // Execute update operation - if (!cursor && execute_no_commit(this,trans,false) != 0) { + if (!cursor && execute_no_commit(this,trans,FALSE) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -3043,7 +3038,7 @@ int ha_ndbcluster::delete_row(const byte *record) DBUG_PRINT("info", ("Calling deleteTuple on cursor")); if (cursor->deleteCurrentTuple() != 0) ERR_RETURN(trans->getNdbError()); - m_lock_tuple= false; + m_lock_tuple= FALSE; m_ops_pending++; if (m_use_partition_function) @@ -3083,7 +3078,7 @@ int ha_ndbcluster::delete_row(const byte *record) } // Execute delete operation - if (execute_no_commit(this,trans,false) != 0) { + if (execute_no_commit(this,trans,FALSE) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -3311,8 +3306,7 @@ int ha_ndbcluster::index_init(uint index, bool sorted) unless m_lock.type == TL_READ_HIGH_PRIORITY and no sub-sequent call to unlock_row() */ - m_lock_tuple= false; - m_lock_tuple= false; + m_lock_tuple= FALSE; DBUG_RETURN(0); } @@ -3571,12 +3565,12 @@ int ha_ndbcluster::close_scan() if (!(op= cursor->lockCurrentTuple())) { - m_lock_tuple= false; + m_lock_tuple= FALSE; ERR_RETURN(trans->getNdbError()); } m_ops_pending++; } - m_lock_tuple= false; + m_lock_tuple= FALSE; if (m_ops_pending) { /* @@ -3584,7 +3578,7 @@ int ha_ndbcluster::close_scan() deleteing/updating transaction before closing the scan */ DBUG_PRINT("info", ("ops_pending: %ld", (long) m_ops_pending)); - if (execute_no_commit(this,trans,false) != 0) { + if (execute_no_commit(this,trans,FALSE) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -3789,7 +3783,7 @@ int ha_ndbcluster::info(uint flag) struct Ndb_statistics stat; ndb->setDatabaseName(m_dbname); if (current_thd->variables.ndb_use_exact_count && - (result= ndb_get_table_statistics(this, true, ndb, m_table, &stat)) + (result= ndb_get_table_statistics(this, TRUE, ndb, m_table, &stat)) == 0) { stats.mean_rec_length= stat.row_size; @@ -3990,7 +3984,7 @@ int ha_ndbcluster::end_bulk_insert() m_bulk_insert_not_flushed= FALSE; if (m_transaction_on) { - if (execute_no_commit(this, trans,false) != 0) + if (execute_no_commit(this, trans,FALSE) != 0) { no_uncommitted_rows_execute_failure(); my_errno= error= ndb_err(trans); @@ -4315,7 +4309,7 @@ void ha_ndbcluster::unlock_row() DBUG_ENTER("unlock_row"); DBUG_PRINT("info", ("Unlocking row")); - m_lock_tuple= false; + m_lock_tuple= FALSE; DBUG_VOID_RETURN; } @@ -5008,7 +5002,7 @@ int ha_ndbcluster::create(const char *name, get a new share */ - if (!(share= get_share(name, form, true, true))) + if (!(share= get_share(name, form, TRUE, TRUE))) { sql_print_error("NDB: allocating table share for %s failed", name); /* my_errno is set */ @@ -5072,8 +5066,6 @@ int ha_ndbcluster::create_handler_files(const char *file, int action_flag, HA_CREATE_INFO *create_info) { - char path[FN_REFLEN]; - const char *name; Ndb* ndb; const NDBTAB *tab; const void *data, *pack_data; @@ -5373,7 +5365,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to) int ndb_table_id= orig_tab->getObjectId(); int ndb_table_version= orig_tab->getObjectVersion(); - NDB_SHARE *share= get_share(from, 0, false); + NDB_SHARE *share= get_share(from, 0, FALSE); if (share) { int r= rename_share(share, to); @@ -5527,7 +5519,7 @@ ha_ndbcluster::delete_table(ha_ndbcluster *h, Ndb *ndb, DBUG_PRINT("info", ("Schema distribution table not setup")); DBUG_RETURN(HA_ERR_NO_CONNECTION); } - NDB_SHARE *share= get_share(path, 0, false); + NDB_SHARE *share= get_share(path, 0, FALSE); #endif /* Drop the table from NDB */ @@ -5918,7 +5910,7 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked) Ndb *ndb= get_ndb(); ndb->setDatabaseName(m_dbname); struct Ndb_statistics stat; - res= ndb_get_table_statistics(NULL, false, ndb, m_table, &stat); + res= ndb_get_table_statistics(NULL, FALSE, ndb, m_table, &stat); stats.mean_rec_length= stat.row_size; stats.data_file_length= stat.fragment_memory; stats.records= stat.row_count; @@ -6073,7 +6065,7 @@ int ndbcluster_discover(handlerton *hton, THD* thd, const char *db, ndb->setDatabaseName(db); NDBDICT* dict= ndb->getDictionary(); build_table_filename(key, sizeof(key), db, name, "", 0); - NDB_SHARE *share= get_share(key, 0, false); + NDB_SHARE *share= get_share(key, 0, FALSE); if (share && get_ndb_share_state(share) == NSS_ALTERED) { // Frm has been altered on disk, but not yet written to ndb @@ -6240,7 +6232,6 @@ int ndbcluster_drop_database_impl(const char *path) static void ndbcluster_drop_database(handlerton *hton, char *path) { - THD *thd= current_thd; DBUG_ENTER("ndbcluster_drop_database"); #ifdef HAVE_NDB_BINLOG /* @@ -6257,6 +6248,7 @@ static void ndbcluster_drop_database(handlerton *hton, char *path) ndbcluster_drop_database_impl(path); #ifdef HAVE_NDB_BINLOG char db[FN_REFLEN]; + THD *thd= current_thd; ha_ndbcluster::set_dbname(path, db); ndbcluster_log_schema_op(thd, 0, thd->query, thd->query_length, @@ -6282,16 +6274,17 @@ int ndb_create_table_from_engine(THD *thd, const char *db, */ int ndbcluster_find_all_files(THD *thd) { - DBUG_ENTER("ndbcluster_find_all_files"); Ndb* ndb; char key[FN_REFLEN]; + NDBDICT *dict; + int unhandled, retries= 5, skipped; + DBUG_ENTER("ndbcluster_find_all_files"); if (!(ndb= check_ndb_in_thd(thd))) DBUG_RETURN(HA_ERR_NO_CONNECTION); - NDBDICT *dict= ndb->getDictionary(); + dict= ndb->getDictionary(); - int unhandled, retries= 5, skipped; LINT_INIT(unhandled); LINT_INIT(skipped); do @@ -6361,7 +6354,7 @@ int ndbcluster_find_all_files(THD *thd) } else if (cmp_frm(ndbtab, pack_data, pack_length)) { - NDB_SHARE *share= get_share(key, 0, false); + NDB_SHARE *share= get_share(key, 0, FALSE); if (!share || get_ndb_share_state(share) != NSS_ALTERED) { discover= 1; @@ -6475,12 +6468,12 @@ int ndbcluster_find_files(handlerton *hton, THD *thd, List<char> delete_list; while ((file_name=it++)) { - bool file_on_disk= false; + bool file_on_disk= FALSE; DBUG_PRINT("info", ("%s", file_name)); if (hash_search(&ndb_tables, file_name, strlen(file_name))) { DBUG_PRINT("info", ("%s existed in NDB _and_ on disk ", file_name)); - file_on_disk= true; + file_on_disk= TRUE; } // Check for .ndb file with this name @@ -7033,19 +7026,19 @@ ha_ndbcluster::records_in_range(uint inx, key_range *min_key, { // We must provide approx table rows Uint64 table_rows=0; - Ndb_local_table_statistics *info= m_table_info; - if (create_info->records != ~(ha_rows)0 && info->records != 0) + Ndb_local_table_statistics *ndb_info= m_table_info; + if (ndb_info->records != ~(ha_rows)0 && ndb_info->records != 0) { - table_rows = info->records; - DBUG_PRINT("info", ("use info->records: %llu", table_rows)); + table_rows = ndb_info->records; + DBUG_PRINT("info", ("use info->records: %lu", (ulong) table_rows)); } else { Ndb_statistics stat; - if ((res=ndb_get_table_statistics(this, true, ndb, m_table, &stat)) != 0) + if ((res=ndb_get_table_statistics(this, TRUE, ndb, m_table, &stat))) break; table_rows=stat.row_count; - DBUG_PRINT("info", ("use db row_count: %llu", table_rows)); + DBUG_PRINT("info", ("use db row_count: %lu", (ulong) table_rows)); if (table_rows == 0) { // Problem if autocommit=0 #ifdef ndb_get_table_statistics_uses_active_trans @@ -7068,7 +7061,7 @@ ha_ndbcluster::records_in_range(uint inx, key_range *min_key, if ((op->readTuples(NdbOperation::LM_CommittedRead)) == -1) ERR_BREAK(op->getNdbError(), res); const key_range *keys[2]={ min_key, max_key }; - if ((res=set_bounds(op, inx, true, keys)) != 0) + if ((res=set_bounds(op, inx, TRUE, keys)) != 0) break; // Decide if db should be contacted @@ -7203,7 +7196,7 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname, { Ndb_table_guard ndbtab_g(ndb->getDictionary(), tabname); if (ndbtab_g.get_table() == 0 - || ndb_get_table_statistics(NULL, false, ndb, ndbtab_g.get_table(), &stat)) + || ndb_get_table_statistics(NULL, FALSE, ndb, ndbtab_g.get_table(), &stat)) { free_share(&share); DBUG_RETURN(1); @@ -7382,9 +7375,9 @@ static byte *ndbcluster_get_key(NDB_SHARE *share,uint *length, static void print_share(const char* where, NDB_SHARE* share) { fprintf(DBUG_FILE, - "%s %s.%s: use_count: %u, commit_count: %llu\n", + "%s %s.%s: use_count: %u, commit_count: %lu\n", where, share->db, share->table_name, share->use_count, - (long long unsigned int) share->commit_count); + (ulong) share->commit_count); fprintf(DBUG_FILE, " - key: %s, key_length: %d\n", share->key, share->key_length); @@ -7621,7 +7614,6 @@ NDB_SHARE *ndbcluster_get_share(const char *key, TABLE *table, bool create_if_not_exists, bool have_lock) { - THD *thd= current_thd; NDB_SHARE *share; uint length= (uint) strlen(key); DBUG_ENTER("ndbcluster_get_share"); @@ -7951,10 +7943,10 @@ ha_ndbcluster::null_value_index_search(KEY_MULTI_RANGE *ranges, const byte *key= range->start_key.key; uint key_len= range->start_key.length; if (check_null_in_key(key_info, key, key_len)) - DBUG_RETURN(true); + DBUG_RETURN(TRUE); curr += reclength; } - DBUG_RETURN(false); + DBUG_RETURN(FALSE); } int @@ -8067,7 +8059,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, !define_read_attrs(curr, op) && (op->setAbortOption(AO_IgnoreError), TRUE) && (!m_use_partition_function || - (op->setPartitionId(part_spec.start_part), true))) + (op->setPartitionId(part_spec.start_part), TRUE))) curr += reclength; else ERR_RETURN(op ? op->getNdbError() : m_active_trans->getNdbError()); @@ -8128,7 +8120,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, const key_range *keys[2]= { &multi_range_curr->start_key, &multi_range_curr->end_key }; - if ((res= set_bounds(scanOp, active_index, false, keys, + if ((res= set_bounds(scanOp, active_index, FALSE, keys, multi_range_curr-ranges))) DBUG_RETURN(res); break; @@ -8250,7 +8242,7 @@ ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p) DBUG_MULTI_RANGE(6); // First fetch from cursor DBUG_ASSERT(range_no == -1); - if ((res= m_multi_cursor->nextResult(true))) + if ((res= m_multi_cursor->nextResult(TRUE))) { DBUG_MULTI_RANGE(15); goto close_scan; @@ -8372,7 +8364,6 @@ ha_ndbcluster::update_table_comment( } ndb->setDatabaseName(m_dbname); - NDBDICT* dict= ndb->getDictionary(); const NDBTAB* tab= m_table; DBUG_ASSERT(tab != NULL); @@ -8567,7 +8558,7 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) ndb->setDatabaseName(share->db); Ndb_table_guard ndbtab_g(ndb->getDictionary(), share->table_name); if (ndbtab_g.get_table() && - ndb_get_table_statistics(NULL, false, ndb, + ndb_get_table_statistics(NULL, FALSE, ndb, ndbtab_g.get_table(), &stat) == 0) { char buff[22], buff2[22]; @@ -8947,7 +8938,7 @@ void ndb_serialize_cond(const Item *item, void *arg) type == MYSQL_TYPE_DATETIME) ? (context->expecting_field_result(STRING_RESULT) || context->expecting_field_result(INT_RESULT)) - : true)) && + : TRUE)) && // Bit fields no yet supported in scan filter type != MYSQL_TYPE_BIT && // No BLOB support in scan filter @@ -9607,25 +9598,24 @@ ha_ndbcluster::build_scan_filter_predicate(Ndb_cond * &cond, break; Ndb_item *a= cond->next->ndb_item; Ndb_item *b, *field, *value= NULL; - LINT_INIT(field); switch (cond->ndb_item->argument_count()) { case 1: - field= - (a->type == NDB_FIELD)? a : NULL; + field= (a->type == NDB_FIELD)? a : NULL; break; case 2: if (!cond->next->next) + { + field= NULL; break; + } b= cond->next->next->ndb_item; - value= - (a->type == NDB_VALUE)? a - : (b->type == NDB_VALUE)? b - : NULL; - field= - (a->type == NDB_FIELD)? a - : (b->type == NDB_FIELD)? b - : NULL; + value= ((a->type == NDB_VALUE) ? a : + (b->type == NDB_VALUE) ? b : + NULL); + field= ((a->type == NDB_FIELD) ? a : + (b->type == NDB_FIELD) ? b : + NULL); break; default: field= NULL; //Keep compiler happy @@ -10194,8 +10184,8 @@ int ha_ndbcluster::get_default_no_partitions(HA_CREATE_INFO *create_info) ha_rows max_rows, min_rows; if (create_info) { - max_rows= info->max_rows; - min_rows= info->min_rows; + max_rows= create_info->max_rows; + min_rows= create_info->min_rows; } else { @@ -10346,15 +10336,14 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info, { uint16 frag_data[MAX_PARTITIONS]; char *ts_names[MAX_PARTITIONS]; - ulong ts_index= 0, fd_index= 0, i, j; + ulong fd_index= 0, i, j; NDBTAB *tab= (NDBTAB*)tab_par; NDBTAB::FragmentType ftype= NDBTAB::UserDefined; partition_element *part_elem; bool first= TRUE; - uint ts_id, ts_version, part_count= 0, tot_ts_name_len; + uint tot_ts_name_len; List_iterator<partition_element> part_it(part_info->partitions); int error; - char *name_ptr; DBUG_ENTER("ha_ndbcluster::set_up_partition_info"); if (part_info->part_type == HASH_PARTITION && @@ -10468,7 +10457,7 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info, } -bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *info, +bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *create_info, uint table_changes) { DBUG_ENTER("ha_ndbcluster::check_if_incompatible_data"); @@ -10532,70 +10521,72 @@ bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *info, /* Check that row format didn't change */ if ((create_info->used_fields & HA_CREATE_USED_AUTO) && - get_row_type() != info->row_type) + get_row_type() != create_info->row_type) DBUG_RETURN(COMPATIBLE_DATA_NO); DBUG_RETURN(COMPATIBLE_DATA_YES); } -bool set_up_tablespace(st_alter_tablespace *info, +bool set_up_tablespace(st_alter_tablespace *alter_info, NdbDictionary::Tablespace *ndb_ts) { - ndb_ts->setName(info->tablespace_name); - ndb_ts->setExtentSize(info->extent_size); - ndb_ts->setDefaultLogfileGroup(info->logfile_group_name); - return false; + ndb_ts->setName(alter_info->tablespace_name); + ndb_ts->setExtentSize(alter_info->extent_size); + ndb_ts->setDefaultLogfileGroup(alter_info->logfile_group_name); + return FALSE; } -bool set_up_datafile(st_alter_tablespace *info, +bool set_up_datafile(st_alter_tablespace *alter_info, NdbDictionary::Datafile *ndb_df) { - if (info->max_size > 0) + if (alter_info->max_size > 0) { my_error(ER_TABLESPACE_AUTO_EXTEND_ERROR, MYF(0)); - return true; + return TRUE; } - ndb_df->setPath(info->data_file_name); - ndb_df->setSize(info->initial_size); - ndb_df->setTablespace(info->tablespace_name); - return false; + ndb_df->setPath(alter_info->data_file_name); + ndb_df->setSize(alter_info->initial_size); + ndb_df->setTablespace(alter_info->tablespace_name); + return FALSE; } -bool set_up_logfile_group(st_alter_tablespace *info, +bool set_up_logfile_group(st_alter_tablespace *alter_info, NdbDictionary::LogfileGroup *ndb_lg) { - ndb_lg->setName(info->logfile_group_name); - ndb_lg->setUndoBufferSize(info->undo_buffer_size); - return false; + ndb_lg->setName(alter_info->logfile_group_name); + ndb_lg->setUndoBufferSize(alter_info->undo_buffer_size); + return FALSE; } -bool set_up_undofile(st_alter_tablespace *info, +bool set_up_undofile(st_alter_tablespace *alter_info, NdbDictionary::Undofile *ndb_uf) { - ndb_uf->setPath(info->undo_file_name); - ndb_uf->setSize(info->initial_size); - ndb_uf->setLogfileGroup(info->logfile_group_name); - return false; + ndb_uf->setPath(alter_info->undo_file_name); + ndb_uf->setSize(alter_info->initial_size); + ndb_uf->setLogfileGroup(alter_info->logfile_group_name); + return FALSE; } -int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace *info) +int ndbcluster_alter_tablespace(handlerton *hton, + THD* thd, st_alter_tablespace *alter_info) { + int is_tablespace= 0; + NdbError err; + NDBDICT *dict; + int error; + const char *errmsg; + Ndb *ndb; DBUG_ENTER("ha_ndbcluster::alter_tablespace"); + LINT_INIT(errmsg); - int is_tablespace= 0; - Ndb *ndb= check_ndb_in_thd(thd); + ndb= check_ndb_in_thd(thd); if (ndb == NULL) { DBUG_RETURN(HA_ERR_NO_CONNECTION); } + dict= ndb->getDictionary(); - NdbError err; - NDBDICT *dict= ndb->getDictionary(); - int error; - const char * errmsg; - LINT_INIT(errmsg); - - switch (info->ts_cmd_type){ + switch (alter_info->ts_cmd_type){ case (CREATE_TABLESPACE): { error= ER_CREATE_FILEGROUP_FAILED; @@ -10603,11 +10594,11 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace NdbDictionary::Tablespace ndb_ts; NdbDictionary::Datafile ndb_df; NdbDictionary::ObjectId objid; - if (set_up_tablespace(info, &ndb_ts)) + if (set_up_tablespace(alter_info, &ndb_ts)) { DBUG_RETURN(1); } - if (set_up_datafile(info, &ndb_df)) + if (set_up_datafile(alter_info, &ndb_df)) { DBUG_RETURN(1); } @@ -10617,7 +10608,7 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace DBUG_PRINT("error", ("createTablespace returned %d", error)); goto ndberror; } - DBUG_PRINT("info", ("Successfully created Tablespace")); + DBUG_PRINT("alter_info", ("Successfully created Tablespace")); errmsg= "DATAFILE"; if (dict->createDatafile(ndb_df)) { @@ -10639,10 +10630,10 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace case (ALTER_TABLESPACE): { error= ER_ALTER_FILEGROUP_FAILED; - if (info->ts_alter_tablespace_type == ALTER_TABLESPACE_ADD_FILE) + if (alter_info->ts_alter_tablespace_type == ALTER_TABLESPACE_ADD_FILE) { NdbDictionary::Datafile ndb_df; - if (set_up_datafile(info, &ndb_df)) + if (set_up_datafile(alter_info, &ndb_df)) { DBUG_RETURN(1); } @@ -10652,14 +10643,14 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace goto ndberror; } } - else if(info->ts_alter_tablespace_type == ALTER_TABLESPACE_DROP_FILE) + else if(alter_info->ts_alter_tablespace_type == ALTER_TABLESPACE_DROP_FILE) { - NdbDictionary::Tablespace ts= dict->getTablespace(info->tablespace_name); - NdbDictionary::Datafile df= dict->getDatafile(0, info->data_file_name); + NdbDictionary::Tablespace ts= dict->getTablespace(alter_info->tablespace_name); + NdbDictionary::Datafile df= dict->getDatafile(0, alter_info->data_file_name); NdbDictionary::ObjectId objid; df.getTablespaceId(&objid); if (ts.getObjectId() == objid.getObjectId() && - strcmp(df.getPath(), info->data_file_name) == 0) + strcmp(df.getPath(), alter_info->data_file_name) == 0) { errmsg= " DROP DATAFILE"; if (dict->dropDatafile(df)) @@ -10677,7 +10668,7 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace else { DBUG_PRINT("error", ("Unsupported alter tablespace: %d", - info->ts_alter_tablespace_type)); + alter_info->ts_alter_tablespace_type)); DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); } is_tablespace= 1; @@ -10689,14 +10680,14 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace NdbDictionary::LogfileGroup ndb_lg; NdbDictionary::Undofile ndb_uf; NdbDictionary::ObjectId objid; - if (info->undo_file_name == NULL) + if (alter_info->undo_file_name == NULL) { /* REDO files in LOGFILE GROUP not supported yet */ DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); } - if (set_up_logfile_group(info, &ndb_lg)) + if (set_up_logfile_group(alter_info, &ndb_lg)) { DBUG_RETURN(1); } @@ -10705,8 +10696,8 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace { goto ndberror; } - DBUG_PRINT("info", ("Successfully created Logfile Group")); - if (set_up_undofile(info, &ndb_uf)) + DBUG_PRINT("alter_info", ("Successfully created Logfile Group")); + if (set_up_undofile(alter_info, &ndb_uf)) { DBUG_RETURN(1); } @@ -10728,7 +10719,7 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace case (ALTER_LOGFILE_GROUP): { error= ER_ALTER_FILEGROUP_FAILED; - if (info->undo_file_name == NULL) + if (alter_info->undo_file_name == NULL) { /* REDO files in LOGFILE GROUP not supported yet @@ -10736,7 +10727,7 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); } NdbDictionary::Undofile ndb_uf; - if (set_up_undofile(info, &ndb_uf)) + if (set_up_undofile(alter_info, &ndb_uf)) { DBUG_RETURN(1); } @@ -10751,7 +10742,7 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace { error= ER_DROP_FILEGROUP_FAILED; errmsg= "TABLESPACE"; - if (dict->dropTablespace(dict->getTablespace(info->tablespace_name))) + if (dict->dropTablespace(dict->getTablespace(alter_info->tablespace_name))) { goto ndberror; } @@ -10762,7 +10753,7 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace { error= ER_DROP_FILEGROUP_FAILED; errmsg= "LOGFILE GROUP"; - if (dict->dropLogfileGroup(dict->getLogfileGroup(info->logfile_group_name))) + if (dict->dropLogfileGroup(dict->getLogfileGroup(alter_info->logfile_group_name))) { goto ndberror; } @@ -10785,13 +10776,13 @@ int ndbcluster_alter_tablespace(handlerton *hton, THD* thd, st_alter_tablespace if (is_tablespace) ndbcluster_log_schema_op(thd, 0, thd->query, thd->query_length, - "", info->tablespace_name, + "", alter_info->tablespace_name, 0, 0, SOT_TABLESPACE, 0, 0, 0); else ndbcluster_log_schema_op(thd, 0, thd->query, thd->query_length, - "", info->logfile_group_name, + "", alter_info->logfile_group_name, 0, 0, SOT_LOGFILE_GROUP, 0, 0, 0); #endif @@ -10812,7 +10803,6 @@ bool ha_ndbcluster::get_no_parts(const char *name, uint *no_parts) { Ndb *ndb; NDBDICT *dict; - const NDBTAB *tab; int err; DBUG_ENTER("ha_ndbcluster::get_no_parts"); LINT_INIT(err); diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 38b640d5f55..fc7d933be7d 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -1131,7 +1131,7 @@ ndbcluster_update_slock(THD *thd, ndb_error= this_error; break; } -end: + if (ndb_error) push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, ER_GET_ERRMSG, ER(ER_GET_ERRMSG), diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index feb08f474b7..0e9da3eb22f 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -584,7 +584,6 @@ int ha_partition::drop_partitions(const char *path) List_iterator<partition_element> part_it(m_part_info->partitions); char part_name_buff[FN_REFLEN]; uint no_parts= m_part_info->partitions.elements; - uint part_count= 0; uint no_subparts= m_part_info->no_subparts; uint i= 0; uint name_variant; @@ -1075,7 +1074,6 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt, uint no_parts= m_part_info->no_parts; uint no_subparts= m_part_info->no_subparts; uint i= 0; - LEX *lex= thd->lex; int error; DBUG_ENTER("ha_partition::handle_opt_partitions"); DBUG_PRINT("enter", ("all_parts %u, flag= %u", all_parts, flag)); @@ -1087,11 +1085,9 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt, { if (m_is_sub_partitioned) { - List_iterator<partition_element> sub_it(part_elem->subpartitions); uint j= 0, part; do { - partition_element *sub_elem= sub_it++; part= i * no_subparts + j; DBUG_PRINT("info", ("Optimize subpartition %u", part)); @@ -1136,7 +1132,6 @@ int ha_partition::prepare_new_partition(TABLE *table, { int error; bool create_flag= FALSE; - bool open_flag= FALSE; DBUG_ENTER("prepare_new_partition"); if ((error= set_up_table_before_create(table, part_name, create_info, @@ -1245,7 +1240,6 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info, handler **new_file_array; int error= 1; bool first; - bool copy_parts= FALSE; uint temp_partitions= m_part_info->temp_partitions.elements; THD *thd= current_thd; DBUG_ENTER("ha_partition::change_partitions"); @@ -2061,7 +2055,6 @@ bool ha_partition::new_handlers_from_part_info(MEM_ROOT *mem_root) partition_element *part_elem; uint alloc_len= (m_tot_parts + 1) * sizeof(handler*); List_iterator_fast <partition_element> part_it(m_part_info->partitions); - THD *thd= current_thd; DBUG_ENTER("ha_partition::new_handlers_from_part_info"); if (!(m_file= (handler **) alloc_root(mem_root, alloc_len))) diff --git a/sql/handler.cc b/sql/handler.cc index 3c15313d59b..c9a58238877 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -44,10 +44,6 @@ static handlerton *installed_htons[128]; KEY_CREATE_INFO default_key_create_info= { HA_KEY_ALG_UNDEF, 0, {NullS,0} }; -/* static functions defined in this file */ - -static handler *create_default(TABLE_SHARE *table, MEM_ROOT *mem_root); - /* number of entries in handlertons[] */ ulong total_ha= 0; /* number of storage engines (from handlertons[]) that support 2pc */ @@ -164,11 +160,13 @@ const char *ha_get_storage_engine(enum legacy_db_type db_type) } +#ifdef NOT_USED static handler *create_default(TABLE_SHARE *table, MEM_ROOT *mem_root) { handlerton *hton= ha_default_handlerton(current_thd); return (hton && hton->create) ? hton->create(hton, table, mem_root) : NULL; } +#endif handlerton *ha_resolve_by_legacy_type(THD *thd, enum legacy_db_type db_type) @@ -3363,7 +3361,6 @@ static my_bool exts_handlerton(THD *unused, st_plugin_int *plugin, TYPELIB *ha_known_exts(void) { - MEM_ROOT *mem_root= current_thd->mem_root; if (!known_extensions.type_names || mysys_usage_id != known_extensions_id) { List<char> found_exts; diff --git a/sql/item.cc b/sql/item.cc index 433fcd1d078..7b81a4499e7 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -4096,7 +4096,7 @@ enum_field_types Item::string_field_type() const f_type= MYSQL_TYPE_LONG_BLOB; else if (max_length >= 65536) f_type= MYSQL_TYPE_MEDIUM_BLOB; - return type; + return f_type; } diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index cd6fd333d61..792c828ff29 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -1830,7 +1830,6 @@ void Item_func_case::fix_length_and_dec() { Item **agg; uint nagg; - THD *thd= current_thd; uint found_types= 0; if (!(agg= (Item**) sql_alloc(sizeof(Item*)*(ncases+1)))) return; diff --git a/sql/item_func.cc b/sql/item_func.cc index 1139db6ad72..4693191470c 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -997,7 +997,7 @@ String *Item_decimal_typecast::val_str(String *str) my_decimal tmp_buf, *tmp= val_decimal(&tmp_buf); if (null_value) return NULL; - my_decimal2string(E_DEC_FATAL_ERROR, &tmp_buf, 0, 0, 0, str); + my_decimal2string(E_DEC_FATAL_ERROR, tmp, 0, 0, 0, str); return str; } @@ -4860,7 +4860,7 @@ longlong Item_func_bit_xor::val_int() thd Thread handler var_type global / session name Name of base or system variable - component Component. + component Component NOTES If component.str = 0 then the variable name is in 'name' diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc index 3da68cf43c2..9321992e566 100644 --- a/sql/item_xmlfunc.cc +++ b/sql/item_xmlfunc.cc @@ -577,7 +577,6 @@ String * Item_nodeset_func_union::val_nodeset(String *nodeset) both_str.alloc(numnodes); char *both= (char*) both_str.ptr(); bzero((void*)both, numnodes); - uint pos= 0; MY_XPATH_FLT *flt; fltbeg= (MY_XPATH_FLT*) s0->ptr(); @@ -1484,7 +1483,6 @@ static int my_xpath_parse_AxisName(MY_XPATH *xpath) static int my_xpath_parse_LocationPath(MY_XPATH *xpath); static int my_xpath_parse_AbsoluteLocationPath(MY_XPATH *xpath); static int my_xpath_parse_RelativeLocationPath(MY_XPATH *xpath); -static int my_xpath_parse_AbbreviatedAbsoluteLocationPath(MY_XPATH *xpath); static int my_xpath_parse_AbbreviatedStep(MY_XPATH *xpath); static int my_xpath_parse_Step(MY_XPATH *xpath); static int my_xpath_parse_AxisSpecifier(MY_XPATH *xpath); @@ -1503,7 +1501,6 @@ static int my_xpath_parse_RelationalExpr(MY_XPATH *xpath); static int my_xpath_parse_AndExpr(MY_XPATH *xpath); static int my_xpath_parse_EqualityExpr(MY_XPATH *xpath); static int my_xpath_parse_VariableReference(MY_XPATH *xpath); -static int my_xpath_parse_slash_opt_slash(MY_XPATH *xpath); /* @@ -2699,7 +2696,6 @@ String *Item_func_xml_update::val_str(String *str) } MY_XML_NODE *nodebeg= (MY_XML_NODE*) pxml.ptr(); - MY_XML_NODE *nodeend= (MY_XML_NODE*) pxml.ptr() + pxml.length(); MY_XPATH_FLT *fltbeg= (MY_XPATH_FLT*) nodeset->ptr(); MY_XPATH_FLT *fltend= (MY_XPATH_FLT*) (nodeset->ptr() + nodeset->length()); diff --git a/sql/log.cc b/sql/log.cc index 5cffa2829ea..5e9ebfcb902 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -147,8 +147,7 @@ public: */ void truncate(my_off_t pos) { - DBUG_PRINT("info", ("truncating to position %lu", pos)); - DBUG_PRINT("info", ("before_stmt_pos=%lu", pos)); + DBUG_PRINT("info", ("truncating to position %lu", (ulong) pos)); delete pending(); set_pending(0); reinit_io_cache(&trans_log, WRITE_CACHE, pos, 0, 0); @@ -909,7 +908,7 @@ bool LOGGER::slow_log_print(THD *thd, const char *query, uint query_length, my_time_t current_time; Security_context *sctx= thd->security_ctx; - uint message_buff_len= 0, user_host_len= 0; + uint user_host_len= 0; longlong query_time= 0, lock_time= 0; /* @@ -1551,11 +1550,9 @@ static int binlog_prepare(handlerton *hton, THD *thd, bool all) static int binlog_commit(handlerton *hton, THD *thd, bool all) { - int error= 0; DBUG_ENTER("binlog_commit"); binlog_trx_data *const trx_data= (binlog_trx_data*) thd->ha_data[binlog_hton->slot]; - IO_CACHE *trans_log= &trx_data->trans_log; DBUG_ASSERT(mysql_bin_log.is_open()); if (all && trx_data->empty()) @@ -1584,7 +1581,6 @@ static int binlog_rollback(handlerton *hton, THD *thd, bool all) int error=0; binlog_trx_data *const trx_data= (binlog_trx_data*) thd->ha_data[binlog_hton->slot]; - IO_CACHE *trans_log= &trx_data->trans_log; DBUG_ASSERT(mysql_bin_log.is_open()); if (trx_data->empty()) { @@ -1647,9 +1643,6 @@ static int binlog_savepoint_set(handlerton *hton, THD *thd, void *sv) static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv) { DBUG_ENTER("binlog_savepoint_rollback"); - binlog_trx_data *const trx_data= - (binlog_trx_data*) thd->ha_data[binlog_hton->slot]; - IO_CACHE *trans_log= &trx_data->trans_log; DBUG_ASSERT(mysql_bin_log.is_open()); /* @@ -1660,7 +1653,7 @@ static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv) if (unlikely(thd->options & (OPTION_STATUS_NO_TRANS_UPDATE | OPTION_KEEP_LOG))) { - int const error= + int error= thd->binlog_query(THD::STMT_QUERY_TYPE, thd->query, thd->query_length, TRUE, FALSE); DBUG_RETURN(error); @@ -1669,6 +1662,7 @@ static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv) DBUG_RETURN(0); } + int check_binlog_magic(IO_CACHE* log, const char** errmsg) { char magic[4]; @@ -1689,6 +1683,7 @@ int check_binlog_magic(IO_CACHE* log, const char** errmsg) return 0; } + File open_binlog(IO_CACHE *log, const char *log_file_name, const char **errmsg) { File file; @@ -2195,7 +2190,6 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time, if (!(specialflag & SPECIAL_SHORT_LOG_FORMAT)) { - Security_context *sctx= thd->security_ctx; if (current_time != last_time) { last_time= current_time; @@ -2434,7 +2428,6 @@ bool MYSQL_BIN_LOG::open(const char *log_name, bool null_created_arg) { File file= -1; - int open_flags = O_CREAT | O_BINARY; DBUG_ENTER("MYSQL_BIN_LOG::open"); DBUG_PRINT("enter",("log_type: %d",(int) log_type_arg)); @@ -3245,7 +3238,6 @@ void MYSQL_BIN_LOG::new_file_impl(bool need_lock) We log the whole file name for log file as the user may decide to change base names at some point. */ - THD *thd = current_thd; /* may be 0 if we are reacting to SIGHUP */ Rotate_log_event r(new_name+dirname_length(new_name), 0, LOG_EVENT_OFFSET, 0); r.write(&log_file); @@ -3481,10 +3473,10 @@ int THD::binlog_flush_transaction_cache() { DBUG_ENTER("binlog_flush_transaction_cache"); binlog_trx_data *trx_data= (binlog_trx_data*) ha_data[binlog_hton->slot]; - DBUG_PRINT("enter", ("trx_data=0x%lu", trx_data)); + DBUG_PRINT("enter", ("trx_data: 0x%lx", (ulong) trx_data)); if (trx_data) - DBUG_PRINT("enter", ("trx_data->before_stmt_pos=%u", - trx_data->before_stmt_pos)); + DBUG_PRINT("enter", ("trx_data->before_stmt_pos: %lu", + (ulong) trx_data->before_stmt_pos)); /* Write the transaction cache to the binary log. We don't flush and @@ -3982,8 +3974,6 @@ bool MYSQL_BIN_LOG::write(THD *thd, IO_CACHE *cache, Log_event *commit_event) if (likely(is_open())) // Should always be true { - uint length; - /* We only bother to write to the binary log if there is anything to write. @@ -4023,9 +4013,6 @@ bool MYSQL_BIN_LOG::write(THD *thd, IO_CACHE *cache, Log_event *commit_event) if (commit_event && commit_event->write(&log_file)) goto err; -#ifndef DBUG_OFF - DBUG_skip_commit: -#endif if (flush_and_sync()) goto err; DBUG_EXECUTE_IF("half_binlogged_transaction", abort();); diff --git a/sql/log.h b/sql/log.h index 61db7052f75..80aa4b20ee6 100644 --- a/sql/log.h +++ b/sql/log.h @@ -33,7 +33,7 @@ class TC_LOG virtual int open(const char *opt_name)=0; virtual void close()=0; - virtual int log(THD *thd, my_xid xid)=0; + virtual int log_xid(THD *thd, my_xid xid)=0; virtual void unlog(ulong cookie, my_xid xid)=0; }; @@ -43,7 +43,7 @@ public: TC_LOG_DUMMY() {} int open(const char *opt_name) { return 0; } void close() { } - int log(THD *thd, my_xid xid) { return 1; } + int log_xid(THD *thd, my_xid xid) { return 1; } void unlog(ulong cookie, my_xid xid) { } }; @@ -88,7 +88,7 @@ class TC_LOG_MMAP: public TC_LOG TC_LOG_MMAP(): inited(0) {} int open(const char *opt_name); void close(); - int log(THD *thd, my_xid xid); + int log_xid(THD *thd, my_xid xid); void unlog(ulong cookie, my_xid xid); int recover(); @@ -287,7 +287,7 @@ public: int open(const char *opt_name); void close(); - int log(THD *thd, my_xid xid); + int log_xid(THD *thd, my_xid xid); void unlog(ulong cookie, my_xid xid); int recover(IO_CACHE *log, Format_description_log_event *fdle); #if !defined(MYSQL_CLIENT) diff --git a/sql/log_event.cc b/sql/log_event.cc index 520ced3671d..82cfc0cd3a2 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -139,22 +139,6 @@ static void pretty_print_str(IO_CACHE* cache, char* str, int len) } #endif /* MYSQL_CLIENT */ -#ifdef HAVE_purify -static void -valgrind_check_mem(void *ptr, size_t len) -{ - static volatile uchar dummy; - for (volatile uchar *p= (uchar*) ptr ; p != (uchar*) ptr + len ; ++p) - { - int const c = *p; - if (c < 128) - dummy= c + 1; - else - dummy = c - 1; - } -} -#endif - #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) static void clear_all_errors(THD *thd, struct st_relay_log_info *rli) @@ -5483,7 +5467,6 @@ int Rows_log_event::do_add_row_data(byte *const row_data, if (static_cast<my_size_t>(m_rows_end - m_rows_cur) < length) { my_size_t const block_size= 1024; - my_ptrdiff_t const old_alloc= m_rows_end - m_rows_buf; my_ptrdiff_t const cur_size= m_rows_cur - m_rows_buf; my_ptrdiff_t const new_alloc= block_size * ((cur_size + length) / block_size + block_size - 1); diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 86ff11d725e..4c5ccce57d6 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -722,7 +722,6 @@ pthread_handler_t handle_slave(void *arg); static ulong find_bit_type(const char *x, TYPELIB *bit_lib); static void clean_up(bool print_message); static int test_if_case_insensitive(const char *dir_name); -static void end_ssl(); #ifndef EMBEDDED_LIBRARY static void start_signal_handler(void); @@ -730,6 +729,7 @@ static void close_server_sock(); static void clean_up_mutexes(void); static void wait_for_signal_thread_to_end(void); static void create_pid_file(); +static void end_ssl(); #endif @@ -1236,7 +1236,9 @@ void clean_up(bool print_message) #endif delete binlog_filter; delete rpl_filter; +#ifndef EMBEDDED_LIBRARY end_ssl(); +#endif vio_end(); #ifdef USE_REGEX my_regex_end(); diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 1576cd7dfa2..2dedf41e2c3 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -2420,8 +2420,6 @@ static int find_used_partitions_imerge(PART_PRUNE_PARAM *ppar, static int find_used_partitions_imerge_list(PART_PRUNE_PARAM *ppar, List<SEL_IMERGE> &merges); static void mark_all_partitions_as_used(partition_info *part_info); -static uint32 part_num_to_part_id_range(PART_PRUNE_PARAM* prune_par, - uint32 num); #ifndef DBUG_OFF static void print_partitioning_index(KEY_PART *parts, KEY_PART *parts_end); @@ -4682,8 +4680,7 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree, param->table->key_info[keynr].name, found_read_time, read_time)); - if (read_time > found_read_time && found_records != HA_POS_ERROR - /*|| read_time == DBL_MAX*/ ) + if (read_time > found_read_time && found_records != HA_POS_ERROR) { read_time= found_read_time; best_records= found_records; diff --git a/sql/partition_info.cc b/sql/partition_info.cc index 76630e8530b..a7f9bd413c6 100644 --- a/sql/partition_info.cc +++ b/sql/partition_info.cc @@ -920,7 +920,6 @@ bool partition_info::set_up_charset_field_preps() if (field_is_partition_charset(field)) { char *field_buf; - CHARSET_INFO *cs= ((Field_str*)field)->charset(); size= field->pack_length(); if (!(field_buf= sql_calloc(size))) goto error; diff --git a/sql/protocol.cc b/sql/protocol.cc index b0f3d036b73..05e98c68e4e 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -937,15 +937,15 @@ bool Protocol_simple::store(Field *field) char buff[MAX_FIELD_WIDTH]; String str(buff,sizeof(buff), &my_charset_bin); CHARSET_INFO *tocs= this->thd->variables.character_set_results; +#ifndef DBUG_OFF TABLE *table= field->table; -#ifdef DBUG_OFF my_bitmap_map *old_map= 0; if (table->file) old_map= dbug_tmp_use_all_columns(table, table->read_set); #endif field->val_str(&str); -#ifdef DBUG_OFF +#ifndef DBUG_OFF if (old_map) dbug_tmp_restore_column_map(table->read_set, old_map); #endif diff --git a/sql/set_var.cc b/sql/set_var.cc index 2ef0473dc13..36161ce92e2 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -3996,8 +3996,6 @@ sys_var_event_scheduler::update(THD *thd, set_var *var) DBUG_PRINT("info", ("new_value: %d", (int) var->save_result.ulong_value)); - Item_result var_type= var->value->result_type(); - if (var->save_result.ulong_value == Events::EVENTS_ON) res= Events::get_instance()->start_execution_of_events(); else if (var->save_result.ulong_value == Events::EVENTS_OFF) diff --git a/sql/set_var.h b/sql/set_var.h index befb4a9d700..abf0ece03bf 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -502,7 +502,7 @@ public: sys_var_thd_dbug(const char *name_arg) :sys_var_thd(name_arg) {} bool check_update_type(Item_result type) { return type != STRING_RESULT; } bool check(THD *thd, set_var *var); - SHOW_TYPE type() { return SHOW_CHAR; } + SHOW_TYPE show_type() { return SHOW_CHAR; } bool update(THD *thd, set_var *var); void set_default(THD *thd, enum_var_type type) { DBUG_POP(); } byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *b); @@ -557,7 +557,7 @@ public: bool check_type(enum_var_type type) { return type != OPT_GLOBAL; } /* We can't retrieve the value of this, so we don't have to define - type() or value_ptr() + show_type() or value_ptr() */ }; @@ -803,7 +803,7 @@ public: byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); bool check_update_type(Item_result type) { return 0; } void set_default(THD *thd, enum_var_type type); - SHOW_TYPE type() { return SHOW_CHAR; } + SHOW_TYPE show_type() { return SHOW_CHAR; } }; @@ -852,7 +852,7 @@ public: bool check_default(enum_var_type type) { return 1; } bool check_type(enum_var_type type) { return type != OPT_GLOBAL; } bool check_update_type(Item_result type) { return 1; } - SHOW_TYPE type() { return SHOW_CHAR; } + SHOW_TYPE show_type() { return SHOW_CHAR; } bool is_readonly() const { return 1; } }; @@ -951,7 +951,7 @@ public: sys_var_long_ptr(name_arg, NULL, NULL) {}; bool update(THD *thd, set_var *var); byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); - SHOW_TYPE type() { return SHOW_CHAR; } + SHOW_TYPE show_type() { return SHOW_CHAR; } bool check(THD *thd, set_var *var); bool check_update_type(Item_result type) { diff --git a/sql/slave.cc b/sql/slave.cc index 414b5afaf46..fb1f71e646f 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -73,6 +73,7 @@ static int request_table_dump(MYSQL* mysql, const char* db, const char* table); static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, const char* table_name, bool overwrite); static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi); +static Log_event* next_event(RELAY_LOG_INFO* rli); /* Find out which replications threads are running diff --git a/sql/slave.h b/sql/slave.h index 43eb71be601..bc039f6eb75 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -111,8 +111,6 @@ extern ulonglong relay_log_space_limit; #define MYSQL_SLAVE_RUN_NOT_CONNECT 1 #define MYSQL_SLAVE_RUN_CONNECT 2 -static Log_event* next_event(RELAY_LOG_INFO* rli); - #define RPL_LOG_NAME (rli->group_master_log_name[0] ? rli->group_master_log_name :\ "FIRST") #define IO_RPL_LOG_NAME (mi->master_log_name[0] ? mi->master_log_name :\ diff --git a/sql/sp.cc b/sql/sp.cc index 301756dc229..3a7bea6a4b1 100644 --- a/sql/sp.cc +++ b/sql/sp.cc @@ -988,7 +988,7 @@ sp_find_routine(THD *thd, int type, sp_name *name, sp_cache **cp, DBUG_PRINT("info", ("found: 0x%lx", (ulong)sp)); if (sp->m_first_free_instance) { - DBUG_PRINT("info", ("first free: 0x%lx, level: %lu, flags %x", + DBUG_PRINT("info", ("first free: 0x%lx level: %lu flags %x", (ulong)sp->m_first_free_instance, sp->m_first_free_instance->m_recursion_level, sp->m_first_free_instance->m_flags)); diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 2ea5849603a..b77d0cc9a0c 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -1367,7 +1367,6 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount, MEM_ROOT call_mem_root; Query_arena call_arena(&call_mem_root, Query_arena::INITIALIZED_FOR_SP); Query_arena backup_arena; - DBUG_ENTER("sp_head::execute_function"); DBUG_PRINT("info", ("function %s", m_name.str)); diff --git a/sql/spatial.cc b/sql/spatial.cc index 62d0c7310e5..6cadb0f3aad 100644 --- a/sql/spatial.cc +++ b/sql/spatial.cc @@ -928,6 +928,8 @@ int Gis_polygon::centroid_xy(double *x, double *y) const n_linear_rings= uint4korr(data); data+= 4; + DBUG_ASSERT(n_linear_rings > 0); + while (n_linear_rings--) { uint32 n_points, org_n_points; diff --git a/sql/sql_class.cc b/sql/sql_class.cc index cfe96e9b5cd..d5f81168be3 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -2409,11 +2409,12 @@ THD::binlog_prepare_pending_rows_event(TABLE*, uint32, MY_BITMAP const*, my_size_t colcnt, my_size_t, bool, Update_rows_log_event *); #endif + +#ifdef NOT_USED static char const* field_type_name(enum_field_types type) { - switch (type) - { + switch (type) { case MYSQL_TYPE_DECIMAL: return "MYSQL_TYPE_DECIMAL"; case MYSQL_TYPE_TINY: @@ -2471,6 +2472,7 @@ field_type_name(enum_field_types type) } return "Unknown"; } +#endif my_size_t THD::max_row_length_blob(TABLE *table, const byte *data) const @@ -2651,8 +2653,6 @@ int THD::binlog_write_row(TABLE* table, bool is_trans, Pack records into format for transfer. We are allocating more memory than needed, but that doesn't matter. */ - int error= 0; - Row_data_memory memory(table, max_row_length(table, record)); if (!memory.has_memory()) return HA_ERR_OUT_OF_MEM; @@ -2679,7 +2679,6 @@ int THD::binlog_update_row(TABLE* table, bool is_trans, { DBUG_ASSERT(current_stmt_binlog_row_based && mysql_bin_log.is_open()); - int error= 0; my_size_t const before_maxlen = max_row_length(table, before_record); my_size_t const after_maxlen = max_row_length(table, after_record); @@ -2729,8 +2728,6 @@ int THD::binlog_delete_row(TABLE* table, bool is_trans, Pack records into format for transfer. We are allocating more memory than needed, but that doesn't matter. */ - int error= 0; - Row_data_memory memory(table, max_row_length(table, record)); if (unlikely(!memory.has_memory())) return HA_ERR_OUT_OF_MEM; diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index fe80837c909..63a8a948062 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -318,7 +318,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, runs without --log-update or --log-bin). */ bool log_on= ((thd->options & OPTION_BIN_LOG) || - (!(thd->security_ctx->master_access & SUPER_ACL)); + (!(thd->security_ctx->master_access & SUPER_ACL))); #endif thr_lock_type lock_type = table_list->lock_type; Item *unused_conds= 0; @@ -3090,8 +3090,9 @@ void select_create::send_error(uint errcode,const char *err) ("Current statement %s row-based", thd->current_stmt_binlog_row_based ? "is" : "is NOT")); DBUG_PRINT("info", - ("Current table (at 0x%lu) %s a temporary (or non-existant) table", - table, + ("Current table (at 0x%lx) %s a temporary (or non-existing) " + "table", + (ulong) table, table && !table->s->tmp_table ? "is NOT" : "is")); DBUG_PRINT("info", ("Table %s prior to executing this statement", diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 37a5d9c3743..3f734dbbdac 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1900,6 +1900,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, /* Locked closure of all tables */ TABLE_LIST table_list; LEX_STRING conv_name; + uint dummy; /* used as fields initializator */ lex_start(thd, 0, 0); @@ -4041,7 +4042,6 @@ end_with_restore_list: lex->spname->m_name); else { - uint affected= 1; if (!(res= Events::get_instance()->drop_event(thd, lex->spname->m_db, lex->spname->m_name, diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 0b6d841a337..923e851c0ff 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -2002,7 +2002,6 @@ char *generate_partition_syntax(partition_info *part_info, { uint i,j, tot_no_parts, no_subparts; partition_element *part_elem; - partition_element *save_part_elem= NULL; ulonglong buffer_length; char path[FN_REFLEN]; int err= 0; @@ -5369,7 +5368,6 @@ static bool write_log_dropped_partitions(ALTER_PARTITION_PARAM_TYPE *lpt, List_iterator<partition_element> temp_it(part_info->temp_partitions); uint no_temp_partitions= part_info->temp_partitions.elements; uint no_elements= part_info->partitions.elements; - uint i= 0; DBUG_ENTER("write_log_dropped_partitions"); ddl_log_entry.action_type= DDL_LOG_DELETE_ACTION; @@ -5742,7 +5740,6 @@ static void write_log_completed(ALTER_PARTITION_PARAM_TYPE *lpt, bool dont_crash) { partition_info *part_info= lpt->part_info; - uint count_loop= 0; DDL_LOG_MEMORY_ENTRY *log_entry= part_info->exec_log_entry; DBUG_ENTER("write_log_completed"); @@ -6016,8 +6013,6 @@ uint fast_alter_partition_table(THD *thd, TABLE *table, uint fast_alter_partition) { /* Set-up struct used to write frm files */ - ulonglong copied= 0; - ulonglong deleted= 0; partition_info *part_info= table->part_info; ALTER_PARTITION_PARAM_TYPE lpt_obj; ALTER_PARTITION_PARAM_TYPE *lpt= &lpt_obj; diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 1d711b7835c..7c9cd483526 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -44,12 +44,15 @@ plugin_type_init plugin_type_deinitialize[MYSQL_MAX_PLUGIN_TYPE_NUM]= 0,ha_finalize_handlerton,0,0 }; +#ifdef HAVE_DLOPEN static const char *plugin_interface_version_sym= "_mysql_plugin_interface_version_"; static const char *sizeof_st_plugin_sym= "_mysql_sizeof_struct_st_plugin_"; static const char *plugin_declarations_sym= "_mysql_plugin_declarations_"; static int min_plugin_interface_version= MYSQL_PLUGIN_INTERFACE_VERSION & ~0xFF; +#endif + /* Note that 'int version' must be the first field of every plugin sub-structure (plugin->info). */ @@ -80,6 +83,8 @@ static int plugin_array_version=0; my_bool plugin_register_builtin(struct st_mysql_plugin *plugin); void plugin_load(void); +#ifdef HAVE_DLOPEN + static struct st_plugin_dl *plugin_dl_find(const LEX_STRING *dl) { uint i; @@ -117,6 +122,8 @@ static st_plugin_dl *plugin_dl_insert_or_reuse(struct st_plugin_dl *plugin_dl) DBUG_RETURN(dynamic_element(&plugin_dl_array, plugin_dl_array.elements - 1, struct st_plugin_dl *)); } +#endif /* HAVE_DLOPEN */ + static inline void free_plugin_mem(struct st_plugin_dl *p) { @@ -534,6 +541,8 @@ static void plugin_del(struct st_plugin_int *plugin) DBUG_VOID_RETURN; } +#ifdef NOT_USED + static void plugin_del(const LEX_STRING *name) { struct st_plugin_int *plugin; @@ -543,6 +552,8 @@ static void plugin_del(const LEX_STRING *name) DBUG_VOID_RETURN; } +#endif + void plugin_unlock(struct st_plugin_int *plugin) { DBUG_ENTER("plugin_unlock"); diff --git a/sql/sql_servers.cc b/sql/sql_servers.cc index 0ec7c54487a..5fa97dc5c2b 100644 --- a/sql/sql_servers.cc +++ b/sql/sql_servers.cc @@ -32,7 +32,6 @@ uint servers_cache_initialised=FALSE; static uint servers_version=0; static MEM_ROOT mem; static rw_lock_t THR_LOCK_servers; -static bool initialized=0; static byte *servers_cache_get_key(FOREIGN_SERVER *server, uint *length, my_bool not_used __attribute__((unused))) @@ -329,24 +328,22 @@ my_bool get_server_from_table_to_cache(TABLE *table) my_bool server_exists_in_table(THD *thd, LEX_SERVER_OPTIONS *server_options) { - byte server_key[MAX_KEY_LENGTH]; int result= 1; int error= 0; TABLE_LIST tables; TABLE *table; - DBUG_ENTER("server_exists"); bzero((char*) &tables, sizeof(tables)); tables.db= (char*) "mysql"; tables.alias= tables.table_name= (char*) "servers"; - table->use_all_columns(); - /* need to open before acquiring THR_LOCK_plugin or it will deadlock */ if (! (table= open_ltable(thd, &tables, TL_WRITE))) DBUG_RETURN(TRUE); + table->use_all_columns(); + rw_wrlock(&THR_LOCK_servers); VOID(pthread_mutex_lock(&servers_cache_mutex)); @@ -393,7 +390,6 @@ my_bool server_exists_in_table(THD *thd, LEX_SERVER_OPTIONS *server_options) int insert_server(THD *thd, FOREIGN_SERVER *server) { - byte server_key[MAX_KEY_LENGTH]; int error= 0; TABLE_LIST tables; TABLE *table; @@ -608,7 +604,6 @@ int insert_server_record(TABLE *table, FOREIGN_SERVER *server) int drop_server(THD *thd, LEX_SERVER_OPTIONS *server_options) { - byte server_key[MAX_KEY_LENGTH]; int error= 0; TABLE_LIST tables; TABLE *table; @@ -1208,7 +1203,7 @@ void servers_free(bool end) FOREIGN_SERVER *get_server_by_name(const char *server_name) { ulong error_num=0; - uint i, server_name_length; + uint server_name_length; FOREIGN_SERVER *server= 0; DBUG_ENTER("get_server_by_name"); DBUG_PRINT("info", ("server_name %s", server_name)); diff --git a/sql/sql_servers.h b/sql/sql_servers.h index 36fb4d07d1b..23b8cefd5bb 100644 --- a/sql/sql_servers.h +++ b/sql/sql_servers.h @@ -26,7 +26,6 @@ typedef struct st_federated_server /* cache handlers */ my_bool servers_init(bool dont_read_server_table); -static my_bool servers_load(THD *thd, TABLE_LIST *tables); my_bool servers_reload(THD *thd); my_bool get_server_from_table_to_cache(TABLE *table); void servers_free(bool end=0); diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 0feaa4d8b32..45fe961d3d6 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -139,7 +139,6 @@ static my_bool show_plugins(THD *thd, st_plugin_int *plugin, { TABLE *table= (TABLE*) arg; struct st_mysql_plugin *plug= plugin->plugin; - Protocol *protocol= thd->protocol; CHARSET_INFO *cs= system_charset_info; char version_buf[20]; @@ -152,8 +151,7 @@ static my_bool show_plugins(THD *thd, st_plugin_int *plugin, cs); - switch (plugin->state) - { + switch (plugin->state) { /* case PLUGIN_IS_FREED: does not happen */ case PLUGIN_IS_DELETED: table->field[2]->store(STRING_WITH_LEN("DELETED"), cs); @@ -1375,6 +1373,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, } if (table->s->key_block_size) { + char *end; packet->append(STRING_WITH_LEN(" KEY_BLOCK_SIZE=")); end= longlong10_to_str(table->s->key_block_size, buff, 10); packet->append(buff, (uint) (end - buff)); @@ -4026,7 +4025,6 @@ static int get_schema_partitions_record(THD *thd, struct st_table_list *tables, partition_element *part_elem; List_iterator<partition_element> part_it(part_info->partitions); uint part_pos= 0, part_id= 0; - uint no_parts= part_info->no_parts; restore_record(table, s->default_values); table->field[1]->store(base_name, strlen(base_name), cs); @@ -4196,6 +4194,7 @@ static int get_schema_partitions_record(THD *thd, struct st_table_list *tables, } +#ifdef NOT_USED static interval_type get_real_interval_type(interval_type i_type) { switch (i_type) { @@ -4239,6 +4238,8 @@ static interval_type get_real_interval_type(interval_type i_type) return INTERVAL_SECOND; } +#endif + /* Loads an event from mysql.event and copies it's data to a row of @@ -5033,7 +5034,6 @@ static my_bool run_hton_fill_schema_files(THD *thd, st_plugin_int *plugin, int fill_schema_files(THD *thd, TABLE_LIST *tables, COND *cond) { - TABLE *table= tables->table; DBUG_ENTER("fill_schema_files"); struct run_hton_fill_schema_files_args args; @@ -5091,7 +5091,7 @@ int fill_schema_status(THD *thd, SHOW_VAR *variables, if (show_type == SHOW_SYS) { - show_type= ((sys_var*) value)->type(); + show_type= ((sys_var*) value)->show_type(); value= (char*) ((sys_var*) value)->value_ptr(thd, OPT_GLOBAL, &null_lex_str); } diff --git a/sql/sql_table.cc b/sql/sql_table.cc index bb1edba51b2..bc847669d95 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -598,7 +598,6 @@ bool read_ddl_log_entry(uint read_entry, DDL_LOG_ENTRY *ddl_log_entry) static bool init_ddl_log() { - bool error= FALSE; char file_name[FN_REFLEN]; DBUG_ENTER("init_ddl_log"); diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 4b99f887721..0f957208ab0 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -3738,7 +3738,6 @@ part_definition: LEX *lex= Lex; partition_info *part_info= lex->part_info; partition_element *p_elem= new partition_element(); - uint part_id= part_info->partitions.elements; if (!p_elem || part_info->partitions.push_back(p_elem)) { @@ -3890,7 +3889,6 @@ part_bit_expr: bit_expr { Item *part_expr= $1; - int part_expression_ok= 1; THD *thd= YYTHD; LEX *lex= thd->lex; Name_resolution_context *context= &lex->current_select->context; @@ -5071,9 +5069,9 @@ alter: | ALTER SERVER_SYM ident_or_text OPTIONS_SYM '(' server_options_list ')' { LEX *lex= Lex; - Lex->sql_command= SQLCOM_ALTER_SERVER; - Lex->server_options.server_name= $3.str; - Lex->server_options.server_name_length= $3.length; + lex->sql_command= SQLCOM_ALTER_SERVER; + lex->server_options.server_name= $3.str; + lex->server_options.server_name_length= $3.length; } ; @@ -5745,9 +5743,9 @@ db_to_db: ident TO_SYM ident { LEX *lex=Lex; - if (Lex->db_list.push_back((LEX_STRING*) + if (lex->db_list.push_back((LEX_STRING*) sql_memdup(&$1, sizeof(LEX_STRING))) || - Lex->db_list.push_back((LEX_STRING*) + lex->db_list.push_back((LEX_STRING*) sql_memdup(&$3, sizeof(LEX_STRING)))) YYABORT; }; @@ -6673,7 +6671,6 @@ function_call_generic: udf_expr_list ')' { THD *thd= YYTHD; - LEX *lex= thd->lex; Create_func *builder; Item *item= NULL; diff --git a/sql/table.cc b/sql/table.cc index fcaea159248..0e03c79c564 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1261,7 +1261,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, share->column_bitmap_size= bitmap_buffer_size(share->fields); if (!(bitmaps= (my_bitmap_map*) alloc_root(&share->mem_root, - share->column_bitmap_size))) + share->column_bitmap_size))) goto err; bitmap_init(&share->all_set, bitmaps, share->fields, FALSE); bitmap_set_all(&share->all_set); |