diff options
author | unknown <jani@a88-113-38-195.elisa-laajakaista.fi> | 2007-02-03 00:58:09 +0200 |
---|---|---|
committer | unknown <jani@a88-113-38-195.elisa-laajakaista.fi> | 2007-02-03 00:58:09 +0200 |
commit | 3f3d9093f9ce6152f5a018a946806c069837e219 (patch) | |
tree | 604c311840603ecece2a136bbdc86420a3ec0453 /storage | |
parent | 13390debf6aa979ca31437d687eaa9ae2740ad2f (diff) | |
parent | 6e501e6548f15d3450ffb122ba1e683f40a4c917 (diff) | |
download | mariadb-git-3f3d9093f9ce6152f5a018a946806c069837e219.tar.gz |
Merge jamppa@bk-internal.mysql.com:/home/bk/mysql-5.1
into a88-113-38-195.elisa-laajakaista.fi:/home/my/bk/mysql-5.1
BUILD/SETUP.sh:
Auto merged
client/mysql.cc:
Auto merged
configure.in:
Auto merged
libmysqld/lib_sql.cc:
Auto merged
sql/filesort.cc:
Auto merged
sql/ha_ndbcluster.cc:
Auto merged
sql/handler.cc:
Auto merged
sql/item.cc:
Auto merged
sql/item.h:
Auto merged
sql/item_cmpfunc.cc:
Auto merged
sql/item_cmpfunc.h:
Auto merged
sql/item_func.cc:
Auto merged
sql/item_subselect.cc:
Auto merged
sql/item_timefunc.cc:
Auto merged
sql/mysql_priv.h:
Auto merged
sql/opt_range.cc:
Auto merged
sql/sql_class.cc:
Auto merged
sql/sql_lex.h:
Auto merged
sql/sql_parse.cc:
Auto merged
sql/sql_plugin.cc:
Auto merged
sql/sql_select.cc:
Auto merged
sql/sql_show.cc:
Auto merged
sql/sql_yacc.yy:
Auto merged
sql/table.cc:
Auto merged
storage/archive/ha_archive.cc:
Auto merged
plugin/daemon_example/daemon_example.cc:
Merged with main 5.1
sql/mysqld.cc:
Merged with main 5.1
Diffstat (limited to 'storage')
95 files changed, 482 insertions, 414 deletions
diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index 4108bb32a07..bb638e1c17b 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -305,10 +305,8 @@ int ha_archive::read_data_header(azio_stream *file_to_read) See ha_example.cc for a longer description. */ -ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, - TABLE *table, int *rc) +ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, int *rc) { - ARCHIVE_SHARE *share; uint length; DBUG_ENTER("ha_archive::get_share"); @@ -381,20 +379,21 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, Free the share. See ha_example.cc for a description. */ -int ha_archive::free_share(ARCHIVE_SHARE *share_to_free) +int ha_archive::free_share() { int rc= 0; DBUG_ENTER("ha_archive::free_share"); - DBUG_PRINT("ha_archive", ("archive table %.*s has %d open handles on entrance", - share_to_free->table_name_length, share_to_free->table_name, - share_to_free->use_count)); + DBUG_PRINT("ha_archive", + ("archive table %.*s has %d open handles on entrance", + share->table_name_length, share->table_name, + share->use_count)); pthread_mutex_lock(&archive_mutex); - if (!--share_to_free->use_count) + if (!--share->use_count) { - hash_delete(&archive_open_tables, (byte*) share_to_free); - thr_lock_delete(&share_to_free->lock); - VOID(pthread_mutex_destroy(&share_to_free->mutex)); + hash_delete(&archive_open_tables, (byte*) share); + thr_lock_delete(&share->lock); + VOID(pthread_mutex_destroy(&share->mutex)); /* We need to make sure we don't reset the crashed state. If we open a crashed file, wee need to close it as crashed unless @@ -402,12 +401,12 @@ int ha_archive::free_share(ARCHIVE_SHARE *share_to_free) Since we will close the data down after this, we go on and count the flush on close; */ - if (share_to_free->archive_write_open) + if (share->archive_write_open) { - if (azclose(&(share_to_free->archive_write))) + if (azclose(&(share->archive_write))) rc= 1; } - my_free((gptr) share_to_free, MYF(0)); + my_free((gptr) share, MYF(0)); } pthread_mutex_unlock(&archive_mutex); @@ -462,12 +461,14 @@ int ha_archive::open(const char *name, int mode, uint open_options) DBUG_PRINT("ha_archive", ("archive table was opened for crash: %s", (open_options & HA_OPEN_FOR_REPAIR) ? "yes" : "no")); - share= get_share(name, table, &rc); + share= get_share(name, &rc); if (rc == HA_ERR_CRASHED_ON_USAGE && !(open_options & HA_OPEN_FOR_REPAIR)) { - free_share(share); + /* purecov: begin inspected */ + free_share(); DBUG_RETURN(rc); + /* purecov: end */ } else if (rc == HA_ERR_OUT_OF_MEM) { @@ -482,7 +483,7 @@ int ha_archive::open(const char *name, int mode, uint open_options) if (!record_buffer) { - free_share(share); + free_share(); DBUG_RETURN(HA_ERR_OUT_OF_MEM); } @@ -535,7 +536,7 @@ int ha_archive::close(void) if (azclose(&archive)) rc= 1; /* then also close share */ - rc|= free_share(share); + rc|= free_share(); DBUG_RETURN(rc); } @@ -837,7 +838,7 @@ int ha_archive::write_row(byte *buf) { if (!memcmp(read_buf + mfield->offset(record), table->next_number_field->ptr, - mfield->max_length())) + mfield->max_display_length())) { rc= HA_ERR_FOUND_DUPP_KEY; goto error; diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h index 638f0db71d0..8f56e8ce060 100644 --- a/storage/archive/ha_archive.h +++ b/storage/archive/ha_archive.h @@ -116,8 +116,8 @@ public: int get_row(azio_stream *file_to_read, byte *buf); int get_row_version2(azio_stream *file_to_read, byte *buf); int get_row_version3(azio_stream *file_to_read, byte *buf); - ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table, int *rc); - int free_share(ARCHIVE_SHARE *share); + ARCHIVE_SHARE *get_share(const char *table_name, int *rc); + int free_share(); int init_archive_writer(); bool auto_repair() const { return 1; } // For the moment we just do this int read_data_header(azio_stream *file_to_read); diff --git a/storage/example/ha_example.cc b/storage/example/ha_example.cc index 12ca91f0a6f..bde6c41d777 100644 --- a/storage/example/ha_example.cc +++ b/storage/example/ha_example.cc @@ -13,39 +13,44 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -/** @file ha_example.cc +/** + @file ha_example.cc - @brief + @brief The ha_example engine is a stubbed storage engine for example purposes only; it does nothing at this point. Its purpose is to provide a source code illustration of how to begin writing new storage engines; see also /storage/example/ha_example.h. - @details - ha_example will let you create/open/delete tables, but nothing further - (for example, indexes are not supported nor can data be stored in the - table). Use this example as a template for implementing the same functionality - in your own storage engine. You can enable the example storage engine in - your build by doing the following during your build process:<br> - ./configure --with-example-storage-engine + @details + ha_example will let you create/open/delete tables, but + nothing further (for example, indexes are not supported nor can data + be stored in the table). Use this example as a template for + implementing the same functionality in your own storage engine. You + can enable the example storage engine in your build by doing the + following during your build process:<br> ./configure + --with-example-storage-engine Once this is done, MySQL will let you create tables with:<br> CREATE TABLE <table name> (...) ENGINE=EXAMPLE; - The example storage engine is set up to use table locks. It implements an - example "SHARE" that is inserted into a hash by table name. You can use this - to store information of state that any example handler object will be able to - see when it is using that table. + The example storage engine is set up to use table locks. It + implements an example "SHARE" that is inserted into a hash by table + name. You can use this to store information of state that any + example handler object will be able to see when it is using that + table. Please read the object definition in ha_example.h before reading the rest of this file. - @note - When you create an EXAMPLE table, the MySQL Server creates a table .frm (format) - file in the database directory, using the table name as the file name as is - customary with MySQL. No other files are created. To get an idea of what occurs, - here is an example select that would do a scan of an entire table: - @code + @note + When you create an EXAMPLE table, the MySQL Server creates a table .frm + (format) file in the database directory, using the table name as the file + name as is customary with MySQL. No other files are created. To get an idea + of what occurs, here is an example select that would do a scan of an entire + table: + + @code ha_example::store_lock ha_example::external_lock ha_example::info @@ -66,13 +71,13 @@ ha_example::external_lock ha_example::extra ENUM HA_EXTRA_RESET Reset database to after open - @endcode + @endcode - Here you see that the example storage engine has 9 rows called before rnd_next - signals that it has reached the end of its data. Also note that the table in - question was already opened; had it not been open, a call to ha_example::open() - would also have been necessary. Calls to ha_example::extra() are hints as to - what will be occuring to the request. + Here you see that the example storage engine has 9 rows called before + rnd_next signals that it has reached the end of its data. Also note that + the table in question was already opened; had it not been open, a call to + ha_example::open() would also have been necessary. Calls to + ha_example::extra() are hints as to what will be occuring to the request. Happy coding!<br> -Brian @@ -90,18 +95,25 @@ static handler *example_create_handler(handlerton *hton, TABLE_SHARE *table, MEM_ROOT *mem_root); -static int example_init_func(); handlerton *example_hton; /* Variables for example share methods */ -static HASH example_open_tables; ///< Hash used to track the number of open tables; variable for example share methods -pthread_mutex_t example_mutex; ///< This is the mutex used to init the hash; variable for example share methods -static int example_init= 0; ///< This variable is used to check the init state of hash; variable for example share methods -/** @brief +/* + Hash used to track the number of open tables; variable for example share + methods +*/ +static HASH example_open_tables; + +/* The mutex used to init the hash; variable for example share methods */ +pthread_mutex_t example_mutex; + +/** + @brief Function we use in the creation of our hash to get key. */ + static byte* example_get_key(EXAMPLE_SHARE *share,uint *length, my_bool not_used __attribute__((unused))) { @@ -109,6 +121,7 @@ static byte* example_get_key(EXAMPLE_SHARE *share,uint *length, return (byte*) share->table_name; } + static int example_init_func(void *p) { DBUG_ENTER("example_init_func"); @@ -126,6 +139,7 @@ static int example_init_func(void *p) DBUG_RETURN(0); } + static int example_done_func(void *p) { int error= 0; @@ -139,11 +153,15 @@ static int example_done_func(void *p) DBUG_RETURN(0); } -/** @brief - Example of simple lock controls. The "share" it creates is a structure we will - pass to each example handler. Do you have to have one of these? Well, you have - pieces that are used for locking, and they are needed to function. + +/** + @brief + Example of simple lock controls. The "share" it creates is a + structure we will pass to each example handler. Do you have to have + one of these? Well, you have pieces that are used for locking, and + they are needed to function. */ + static EXAMPLE_SHARE *get_share(const char *table_name, TABLE *table) { EXAMPLE_SHARE *share; @@ -188,10 +206,13 @@ error: return NULL; } -/** @brief + +/** + @brief Free lock controls. We call this whenever we close a table. If the table had the last reference to the share, then we free memory associated with it. */ + static int free_share(EXAMPLE_SHARE *share) { pthread_mutex_lock(&example_mutex); @@ -218,15 +239,19 @@ ha_example::ha_example(handlerton *hton, TABLE_SHARE *table_arg) :handler(hton, table_arg) {} -/** @brief - If frm_error() is called then we will use this to determine the file extensions - that exist for the storage engine. This is also used by the default rename_table - and delete_table method in handler.cc. - @see +/** + @brief + If frm_error() is called then we will use this to determine + the file extensions that exist for the storage engine. This is also + used by the default rename_table and delete_table method in + handler.cc. + + @see rename_table method in handler.cc and delete_table method in handler.cc */ + static const char *ha_example_exts[] = { NullS }; @@ -236,10 +261,12 @@ const char **ha_example::bas_ext() const return ha_example_exts; } -/** @brief + +/** + @brief Used for opening tables. The name will be the name of the file. - @details + @details A table is opened when it needs to be opened; e.g. when a request comes in for a SELECT on the table (tables are not open and closed for each request, they are cached). @@ -247,9 +274,10 @@ const char **ha_example::bas_ext() const Called from handler.cc by handler::ha_open(). The server opens all tables by calling ha_open() which then calls the handler specific open(). - @see + @see handler::ha_open() in handler.cc */ + int ha_example::open(const char *name, int mode, uint test_if_locked) { DBUG_ENTER("ha_example::open"); @@ -261,27 +289,32 @@ int ha_example::open(const char *name, int mode, uint test_if_locked) DBUG_RETURN(0); } -/** @brief + +/** + @brief Closes a table. We call the free_share() function to free any resources that we have allocated in the "shared" structure. - @details + @details Called from sql_base.cc, sql_select.cc, and table.cc. In sql_select.cc it is - only used to close up temporary tables or during the process where a temporary - table is converted over to being a myisam table. + only used to close up temporary tables or during the process where a + temporary table is converted over to being a myisam table. For sql_base.cc look at close_data_tables(). - @see + @see sql_base.cc, sql_select.cc and table.cc */ + int ha_example::close(void) { DBUG_ENTER("ha_example::close"); DBUG_RETURN(free_share(share)); } -/** @brief + +/** + @brief write_row() inserts a row. No extra() hint is given currently if a bulk load is happening. buf() is a byte array of data. You can use the field information to extract the data from the native byte array type. @@ -309,13 +342,16 @@ int ha_example::close(void) item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc, sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc and sql_update.cc */ + int ha_example::write_row(byte * buf) { DBUG_ENTER("ha_example::write_row"); DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief Yes, update_row() does what you expect, it updates a row. old_data will have the previous row record in it, while new_data will have the newest data in it. Keep in mind that the server can do updates based on ordering if an ORDER BY @@ -343,34 +379,41 @@ int ha_example::update_row(const byte * old_data, byte * new_data) DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief This will delete a row. buf will contain a copy of the row to be deleted. The server will call this right after the current row has been called (from either a previous rnd_nexT() or index call). - @details + @details If you keep a pointer to the last row or can access a primary key it will make doing the deletion quite a bit easier. Keep in mind that the server does not guarantee consecutive deletions. ORDER BY clauses can be used. - Called in sql_acl.cc and sql_udf.cc to manage internal table information. - Called in sql_delete.cc, sql_insert.cc, and sql_select.cc. In sql_select it is - used for removing duplicates while in insert it is used for REPLACE calls. + Called in sql_acl.cc and sql_udf.cc to manage internal table + information. Called in sql_delete.cc, sql_insert.cc, and + sql_select.cc. In sql_select it is used for removing duplicates + while in insert it is used for REPLACE calls. - @see + @see sql_acl.cc, sql_udf.cc, sql_delete.cc, sql_insert.cc and sql_select.cc */ + int ha_example::delete_row(const byte * buf) { DBUG_ENTER("ha_example::delete_row"); DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief Positions an index cursor to the index specified in the handle. Fetches the row if available. If the key value is null, begin at the first key of the index. */ + int ha_example::index_read(byte * buf, const byte * key, uint key_len __attribute__((unused)), enum ha_rkey_function find_flag @@ -380,25 +423,33 @@ int ha_example::index_read(byte * buf, const byte * key, DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief Used to read forward through the index. */ + int ha_example::index_next(byte * buf) { DBUG_ENTER("ha_example::index_next"); DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief Used to read backwards through the index. */ + int ha_example::index_prev(byte * buf) { DBUG_ENTER("ha_example::index_prev"); DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief index_first() asks for the first key in the index. @details @@ -413,7 +464,9 @@ int ha_example::index_first(byte * buf) DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief index_last() asks for the last key in the index. @details @@ -428,7 +481,9 @@ int ha_example::index_last(byte * buf) DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief rnd_init() is called when the system wants the storage engine to do a table scan. See the example in the introduction at the top of this file to see when rnd_init() is called. @@ -452,7 +507,9 @@ int ha_example::rnd_end() DBUG_RETURN(0); } -/** @brief + +/** + @brief This is called for each row of the table scan. When you run out of records you should return HA_ERR_END_OF_FILE. Fill buff up with the row information. The Field structure for the table is the key to getting data into buf @@ -471,7 +528,9 @@ int ha_example::rnd_next(byte *buf) DBUG_RETURN(HA_ERR_END_OF_FILE); } -/** @brief + +/** + @brief position() is called after each call to rnd_next() if the data needs to be ordered. You can do something like the following to store the position: @@ -497,7 +556,9 @@ void ha_example::position(const byte *record) DBUG_VOID_RETURN; } -/** @brief + +/** + @brief This is like rnd_next, but you are given a position to use to determine the row. The position will be of the type that you stored in ref. You can use ha_get_ptr(pos,ref_length) to retrieve whatever key @@ -515,7 +576,9 @@ int ha_example::rnd_pos(byte * buf, byte *pos) DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief ::info() is used to return information to the optimizer. See my_base.h for the complete description. @@ -558,7 +621,9 @@ int ha_example::info(uint flag) DBUG_RETURN(0); } -/** @brief + +/** + @brief extra() is called whenever the server wishes to send a hint to the storage engine. The myisam engine implements the most hints. ha_innodb.cc has the most exhaustive list of these hints. @@ -572,7 +637,9 @@ int ha_example::extra(enum ha_extra_function operation) DBUG_RETURN(0); } -/** @brief + +/** + @brief Used to delete all rows in a table, including cases of truncate and cases where the optimizer realizes that all rows will be removed as a result of an SQL statement. @@ -596,7 +663,9 @@ int ha_example::delete_all_rows() DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief This create a lock on the table. If you are implementing a storage engine that can handle transacations look at ha_berkely.cc to see how you will want to go about doing this. Otherwise you should consider calling flock() @@ -618,7 +687,9 @@ int ha_example::external_lock(THD *thd, int lock_type) DBUG_RETURN(0); } -/** @brief + +/** + @brief The idea with handler::store_lock() is: The statement decides which locks should be needed for the table. For updates/deletes/inserts we get WRITE locks, for SELECT... we get read locks. @@ -659,7 +730,9 @@ THR_LOCK_DATA **ha_example::store_lock(THD *thd, return to; } -/** @brief + +/** + @brief Used to delete a table. By the time delete_table() has been called all opened references to this table will have been closed (and your globally shared references released). The variable name will just be the name of @@ -684,17 +757,19 @@ int ha_example::delete_table(const char *name) DBUG_RETURN(0); } -/** @brief + +/** + @brief Renames a table from one name to another via an alter table call. - @details + @details If you do not implement this, the default rename_table() is called from handler.cc and it will delete all files with the file extensions returned by bas_ext(). Called from sql_table.cc by mysql_rename_table(). - @see + @see mysql_rename_table() in sql_table.cc */ int ha_example::rename_table(const char * from, const char * to) @@ -703,16 +778,18 @@ int ha_example::rename_table(const char * from, const char * to) DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -/** @brief + +/** + @brief Given a starting key and an ending key, estimate the number of rows that will exist between the two keys. - @details + @details end_key may be empty, in which case determine if start_key matches any rows. Called from opt_range.cc by check_quick_keys(). - @see + @see check_quick_keys() in opt_range.cc */ ha_rows ha_example::records_in_range(uint inx, key_range *min_key, @@ -722,29 +799,38 @@ ha_rows ha_example::records_in_range(uint inx, key_range *min_key, DBUG_RETURN(10); // low number to force index usage } -/** @brief + +/** + @brief create() is called to create a database. The variable name will have the name of the table. - @details - When create() is called you do not need to worry about opening the table. Also, - the .frm file will have already been created so adjusting create_info is not - necessary. You can overwrite the .frm file at this point if you wish to change - the table definition, but there are no methods currently provided for doing so. + @details + When create() is called you do not need to worry about + opening the table. Also, the .frm file will have already been + created so adjusting create_info is not necessary. You can overwrite + the .frm file at this point if you wish to change the table + definition, but there are no methods currently provided for doing + so. Called from handle.cc by ha_create_table(). - @see + @see ha_create_table() in handle.cc */ + int ha_example::create(const char *name, TABLE *table_arg, HA_CREATE_INFO *create_info) { DBUG_ENTER("ha_example::create"); - /* This is not implemented but we want someone to be able to see that it works. */ + /* + This is not implemented but we want someone to be able to see that it + works. + */ DBUG_RETURN(0); } + struct st_mysql_storage_engine example_storage_engine= { MYSQL_HANDLERTON_INTERFACE_VERSION }; @@ -756,11 +842,11 @@ mysql_declare_plugin(example) "Brian Aker, MySQL AB", "Example storage engine", PLUGIN_LICENSE_GPL, - example_init_func, /* Plugin Init */ - example_done_func, /* Plugin Deinit */ + example_init_func, /* Plugin Init */ + example_done_func, /* Plugin Deinit */ 0x0001 /* 0.1 */, - NULL, /* status variables */ - NULL, /* system variables */ - NULL /* config options */ + NULL, /* status variables */ + NULL, /* system variables */ + NULL /* config options */ } mysql_declare_plugin_end; diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc index 9290418c7aa..14ffe5da984 100644 --- a/storage/federated/ha_federated.cc +++ b/storage/federated/ha_federated.cc @@ -362,7 +362,6 @@ static handler *federated_create_handler(handlerton *hton, MEM_ROOT *mem_root); static int federated_commit(handlerton *hton, THD *thd, bool all); static int federated_rollback(handlerton *hton, THD *thd, bool all); -static int federated_db_init(void); /* Federated storage engine handlerton */ @@ -389,7 +388,7 @@ static byte *federated_get_key(FEDERATED_SHARE *share, uint *length, SYNOPSIS federated_db_init() - void + p Handlerton RETURN FALSE OK @@ -573,9 +572,6 @@ int get_connection(FEDERATED_SHARE *share) int error_num= ER_FOREIGN_SERVER_DOESNT_EXIST; char error_buffer[FEDERATED_QUERY_BUFFER_SIZE]; FOREIGN_SERVER *server; - MYSQL *mysql_conn= 0; - MYSQL_RES *result= 0; - MYSQL_ROW row= 0; DBUG_ENTER("ha_federated::get_connection"); if (!(server= @@ -1223,7 +1219,7 @@ bool ha_federated::create_where_from_key(String *to, KEY *key_info, const key_range *start_key, const key_range *end_key, - bool records_in_range, + bool from_records_in_range, bool eq_range) { bool both_not_null= @@ -1294,7 +1290,7 @@ bool ha_federated::create_where_from_key(String *to, if (emit_key_part_name(&tmp, key_part)) goto err; - if (records_in_range) + if (from_records_in_range) { if (tmp.append(STRING_WITH_LEN(" >= "))) goto err; @@ -1534,7 +1530,7 @@ static int free_share(FEDERATED_SHARE *share) ha_rows ha_federated::records_in_range(uint inx, key_range *start_key, - key_range *end_key) + key_range *end_key) { /* @@ -2306,7 +2302,7 @@ int ha_federated::index_init(uint keynr, bool sorted) int ha_federated::read_range_first(const key_range *start_key, const key_range *end_key, - bool eq_range, bool sorted) + bool eq_range_arg, bool sorted) { char sql_query_buffer[FEDERATED_QUERY_BUFFER_SIZE]; int retval; @@ -2321,7 +2317,7 @@ int ha_federated::read_range_first(const key_range *start_key, sql_query.append(share->select_query); create_where_from_key(&sql_query, &table->key_info[active_index], - start_key, end_key, 0, eq_range); + start_key, end_key, 0, eq_range_arg); if (stored_result) { @@ -3044,4 +3040,3 @@ mysql_declare_plugin(federated) NULL /* config options */ } mysql_declare_plugin_end; - diff --git a/storage/heap/ha_heap.cc b/storage/heap/ha_heap.cc index 7a2f8e20c56..cf11c9923eb 100644 --- a/storage/heap/ha_heap.cc +++ b/storage/heap/ha_heap.cc @@ -327,11 +327,11 @@ int ha_heap::rnd_next(byte *buf) int ha_heap::rnd_pos(byte * buf, byte *pos) { int error; - HEAP_PTR position; + HEAP_PTR heap_position; statistic_increment(table->in_use->status_var.ha_read_rnd_count, &LOCK_status); - memcpy_fixed((char*) &position,pos,sizeof(HEAP_PTR)); - error=heap_rrnd(file, buf, position); + memcpy_fixed((char*) &heap_position, pos, sizeof(HEAP_PTR)); + error=heap_rrnd(file, buf, heap_position); table->status=error ? STATUS_NOT_FOUND: 0; return error; } @@ -343,19 +343,19 @@ void ha_heap::position(const byte *record) int ha_heap::info(uint flag) { - HEAPINFO info; - (void) heap_info(file,&info,flag); - - errkey= info.errkey; - stats.records = info.records; - stats.deleted = info.deleted; - stats.mean_rec_length=info.reclength; - stats.data_file_length=info.data_length; - stats.index_file_length=info.index_length; - stats.max_data_file_length= info.max_records* info.reclength; - stats.delete_length= info.deleted * info.reclength; + HEAPINFO hp_info; + (void) heap_info(file,&hp_info,flag); + + errkey= hp_info.errkey; + stats.records= hp_info.records; + stats.deleted= hp_info.deleted; + stats.mean_rec_length= hp_info.reclength; + stats.data_file_length= hp_info.data_length; + stats.index_file_length= hp_info.index_length; + stats.max_data_file_length= hp_info.max_records * hp_info.reclength; + stats.delete_length= hp_info.deleted * hp_info.reclength; if (flag & HA_STATUS_AUTO) - stats.auto_increment_value= info.auto_increment; + stats.auto_increment_value= hp_info.auto_increment; /* If info() is called for the first time after open(), we will still have to update the key statistics. Hoping that a table lock is now diff --git a/storage/myisam/ft_boolean_search.c b/storage/myisam/ft_boolean_search.c index 68395d8abef..68076d7e401 100644 --- a/storage/myisam/ft_boolean_search.c +++ b/storage/myisam/ft_boolean_search.c @@ -677,7 +677,7 @@ static void _ftb_climb_the_tree(FTB *ftb, FTB_WORD *ftbw, FT_SEG_ITERATOR *ftsi_ FT_SEG_ITERATOR ftsi; FTB_EXPR *ftbe; float weight=ftbw->weight; - int yn=ftbw->flags, ythresh, mode=(ftsi_orig != 0); + int yn_flag= ftbw->flags, ythresh, mode=(ftsi_orig != 0); my_off_t curdoc=ftbw->docid[mode]; struct st_mysql_ftparser *parser= ftb->keynr == NO_SUCH_KEY ? &ft_default_parser : @@ -694,13 +694,13 @@ static void _ftb_climb_the_tree(FTB *ftb, FTB_WORD *ftbw, FT_SEG_ITERATOR *ftsi_ } if (ftbe->nos) break; - if (yn & FTB_FLAG_YES) + if (yn_flag & FTB_FLAG_YES) { weight /= ftbe->ythresh; ftbe->cur_weight += weight; if ((int) ++ftbe->yesses == ythresh) { - yn=ftbe->flags; + yn_flag=ftbe->flags; weight=ftbe->cur_weight*ftbe->weight; if (mode && ftbe->phrase) { @@ -721,14 +721,14 @@ static void _ftb_climb_the_tree(FTB *ftb, FTB_WORD *ftbw, FT_SEG_ITERATOR *ftsi_ break; } else - if (yn & FTB_FLAG_NO) + if (yn_flag & FTB_FLAG_NO) { /* NOTE: special sort function of queue assures that all - (yn & FTB_FLAG_NO) != 0 + (yn_flag & FTB_FLAG_NO) != 0 events for every particular subexpression will "auto-magically" happen BEFORE all the - (yn & FTB_FLAG_YES) != 0 events. So no + (yn_flag & FTB_FLAG_YES) != 0 events. So no already matched expression can become not-matched again. */ ++ftbe->nos; @@ -741,8 +741,8 @@ static void _ftb_climb_the_tree(FTB *ftb, FTB_WORD *ftbw, FT_SEG_ITERATOR *ftsi_ ftbe->cur_weight += weight; if ((int) ftbe->yesses < ythresh) break; - if (!(yn & FTB_FLAG_WONLY)) - yn= ((int) ftbe->yesses++ == ythresh) ? ftbe->flags : FTB_FLAG_WONLY ; + if (!(yn_flag & FTB_FLAG_WONLY)) + yn_flag= ((int) ftbe->yesses++ == ythresh) ? ftbe->flags : FTB_FLAG_WONLY ; weight*= ftbe->weight; } } diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index 397856a4a4e..7b5a2b39763 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -656,11 +656,11 @@ int ha_myisam::optimize(THD* thd, HA_CHECK_OPT *check_opt) } -int ha_myisam::repair(THD *thd, MI_CHECK ¶m, bool optimize) +int ha_myisam::repair(THD *thd, MI_CHECK ¶m, bool do_optimize) { int error=0; uint local_testflag=param.testflag; - bool optimize_done= !optimize, statistics_done=0; + bool optimize_done= !do_optimize, statistics_done=0; const char *old_proc_info=thd->proc_info; char fixed_name[FN_REFLEN]; MYISAM_SHARE* share = file->s; @@ -684,7 +684,7 @@ int ha_myisam::repair(THD *thd, MI_CHECK ¶m, bool optimize) DBUG_RETURN(HA_ADMIN_FAILED); } - if (!optimize || + if (!do_optimize || ((file->state->del || share->state.split != file->state->records) && (!(param.testflag & T_QUICK) || !(share->state.changed & STATE_NOT_OPTIMIZED_KEYS)))) @@ -1320,46 +1320,46 @@ int ha_myisam::rnd_pos(byte * buf, byte *pos) void ha_myisam::position(const byte* record) { - my_off_t position=mi_position(file); - my_store_ptr(ref, ref_length, position); + my_off_t row_position= mi_position(file); + my_store_ptr(ref, ref_length, row_position); } int ha_myisam::info(uint flag) { - MI_ISAMINFO info; + MI_ISAMINFO misam_info; char name_buff[FN_REFLEN]; - (void) mi_status(file,&info,flag); + (void) mi_status(file,&misam_info,flag); if (flag & HA_STATUS_VARIABLE) { - stats.records = info.records; - stats.deleted = info.deleted; - stats.data_file_length=info.data_file_length; - stats.index_file_length=info.index_file_length; - stats.delete_length = info.delete_length; - stats.check_time = info.check_time; - stats. mean_rec_length=info.mean_reclength; + stats.records= misam_info.records; + stats.deleted= misam_info.deleted; + stats.data_file_length= misam_info.data_file_length; + stats.index_file_length= misam_info.index_file_length; + stats.delete_length= misam_info.delete_length; + stats.check_time= misam_info.check_time; + stats.mean_rec_length= misam_info.mean_reclength; } if (flag & HA_STATUS_CONST) { TABLE_SHARE *share= table->s; - stats.max_data_file_length= info.max_data_file_length; - stats.max_index_file_length= info.max_index_file_length; - stats.create_time= info.create_time; - ref_length= info.reflength; - share->db_options_in_use= info.options; + stats.max_data_file_length= misam_info.max_data_file_length; + stats.max_index_file_length= misam_info.max_index_file_length; + stats.create_time= misam_info.create_time; + ref_length= misam_info.reflength; + share->db_options_in_use= misam_info.options; stats.block_size= myisam_block_size; /* record block size */ /* Update share */ if (share->tmp_table == NO_TMP_TABLE) pthread_mutex_lock(&share->mutex); share->keys_in_use.set_prefix(share->keys); - share->keys_in_use.intersect_extended(info.key_map); + share->keys_in_use.intersect_extended(misam_info.key_map); share->keys_for_keyread.intersect(share->keys_in_use); - share->db_record_offset= info.record_offset; + share->db_record_offset= misam_info.record_offset; if (share->key_parts) memcpy((char*) table->key_info[0].rec_per_key, - (char*) info.rec_per_key, + (char*) misam_info.rec_per_key, sizeof(table->key_info[0].rec_per_key)*share->key_parts); if (share->tmp_table == NO_TMP_TABLE) pthread_mutex_unlock(&share->mutex); @@ -1371,22 +1371,22 @@ int ha_myisam::info(uint flag) data_file_name= index_file_name= 0; fn_format(name_buff, file->filename, "", MI_NAME_DEXT, MY_APPEND_EXT | MY_UNPACK_FILENAME); - if (strcmp(name_buff, info.data_file_name)) - data_file_name=info.data_file_name; + if (strcmp(name_buff, misam_info.data_file_name)) + data_file_name=misam_info.data_file_name; fn_format(name_buff, file->filename, "", MI_NAME_IEXT, MY_APPEND_EXT | MY_UNPACK_FILENAME); - if (strcmp(name_buff, info.index_file_name)) - index_file_name=info.index_file_name; + if (strcmp(name_buff, misam_info.index_file_name)) + index_file_name=misam_info.index_file_name; } if (flag & HA_STATUS_ERRKEY) { - errkey = info.errkey; - my_store_ptr(dup_ref, ref_length, info.dupp_key_pos); + errkey = misam_info.errkey; + my_store_ptr(dup_ref, ref_length, misam_info.dupp_key_pos); } if (flag & HA_STATUS_TIME) - stats.update_time = info.update_time; + stats.update_time = misam_info.update_time; if (flag & HA_STATUS_AUTO) - stats.auto_increment_value= info.auto_increment; + stats.auto_increment_value= misam_info.auto_increment; return 0; } @@ -1454,7 +1454,7 @@ void ha_myisam::update_create_info(HA_CREATE_INFO *create_info) int ha_myisam::create(const char *name, register TABLE *table_arg, - HA_CREATE_INFO *info) + HA_CREATE_INFO *ha_create_info) { int error; uint i,j,recpos,minpos,fieldpos,temp_length,length, create_flags= 0; @@ -1638,15 +1638,15 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, create_info.max_rows= share->max_rows; create_info.reloc_rows= share->min_rows; create_info.with_auto_increment=found_real_auto_increment; - create_info.auto_increment=(info->auto_increment_value ? - info->auto_increment_value -1 : - (ulonglong) 0); + create_info.auto_increment= (ha_create_info->auto_increment_value ? + ha_create_info->auto_increment_value -1 : + (ulonglong) 0); create_info.data_file_length= ((ulonglong) share->max_rows * share->avg_row_length); - create_info.data_file_name= info->data_file_name; - create_info.index_file_name= info->index_file_name; + create_info.data_file_name= ha_create_info->data_file_name; + create_info.index_file_name= ha_create_info->index_file_name; - if (info->options & HA_LEX_CREATE_TMP_TABLE) + if (ha_create_info->options & HA_LEX_CREATE_TMP_TABLE) create_flags|= HA_CREATE_TMP_TABLE; if (options & HA_OPTION_PACK_RECORD) create_flags|= HA_PACK_RECORD; diff --git a/storage/myisam/mi_open.c b/storage/myisam/mi_open.c index afed5d05963..5e783bf7890 100644 --- a/storage/myisam/mi_open.c +++ b/storage/myisam/mi_open.c @@ -376,11 +376,11 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags) } else { - uint j; + uint k; share->keyinfo[i].seg=pos; - for (j=0; j < FT_SEGS; j++) + for (k=0; k < FT_SEGS; k++) { - *pos=ft_keysegs[j]; + *pos= ft_keysegs[k]; pos[0].language= pos[-1].language; if (!(pos[0].charset= pos[-1].charset)) { diff --git a/storage/myisam/mi_rkey.c b/storage/myisam/mi_rkey.c index 6323c95ffd7..917ba381504 100644 --- a/storage/myisam/mi_rkey.c +++ b/storage/myisam/mi_rkey.c @@ -30,7 +30,7 @@ int mi_rkey(MI_INFO *info, byte *buf, int inx, const byte *key, uint key_len, HA_KEYSEG *last_used_keyseg; uint pack_key_length, use_key_length, nextflag; DBUG_ENTER("mi_rkey"); - DBUG_PRINT("enter", ("base: %lx buf: %lx inx: %d search_flag: %d", + DBUG_PRINT("enter", ("base: 0x%lx buf: 0x%lx inx: %d search_flag: %d", (long) info, (long) buf, inx, search_flag)); if ((inx = _mi_check_index(info,inx)) < 0) diff --git a/storage/myisam/mi_search.c b/storage/myisam/mi_search.c index fb3bdd4409e..8d2b68a97f0 100644 --- a/storage/myisam/mi_search.c +++ b/storage/myisam/mi_search.c @@ -472,9 +472,9 @@ int _mi_prefix_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page, else { /* We have to compare k and vseg as if they were space extended */ - uchar *end= k+ (cmplen - len); - for ( ; k < end && *k == ' '; k++) ; - if (k == end) + uchar *k_end= k+ (cmplen - len); + for ( ; k < k_end && *k == ' '; k++) ; + if (k == k_end) goto cmp_rest; /* should never happen */ if (*k < (uchar) ' ') { @@ -486,15 +486,15 @@ int _mi_prefix_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page, } else if (len > cmplen) { - uchar *end; + uchar *vseg_end; if ((nextflag & SEARCH_PREFIX) && key_len_left == 0) goto fix_flag; /* We have to compare k and vseg as if they were space extended */ - for (end=vseg + (len-cmplen) ; - vseg < end && *vseg == (uchar) ' '; + for (vseg_end= vseg + (len-cmplen) ; + vseg < vseg_end && *vseg == (uchar) ' '; vseg++, matched++) ; - DBUG_ASSERT(vseg < end); + DBUG_ASSERT(vseg < vseg_end); if (*vseg > (uchar) ' ') { diff --git a/storage/myisam/myisampack.c b/storage/myisam/myisampack.c index 6daa062472e..fb631b5e63e 100644 --- a/storage/myisam/myisampack.c +++ b/storage/myisam/myisampack.c @@ -2689,8 +2689,9 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts) } case FIELD_VARCHAR: { - uint pack_length= HA_VARCHAR_PACKLENGTH(count->field_length-1); - ulong col_length= (pack_length == 1 ? (uint) *(uchar*) start_pos : + uint var_pack_length= HA_VARCHAR_PACKLENGTH(count->field_length-1); + ulong col_length= (var_pack_length == 1 ? + (uint) *(uchar*) start_pos : uint2korr(start_pos)); /* Empty varchar are encoded with a single 1 bit. */ if (!col_length) @@ -2700,7 +2701,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts) } else { - byte *end=start_pos+pack_length+col_length; + byte *end= start_pos + var_pack_length + col_length; DBUG_PRINT("fields", ("FIELD_VARCHAR not empty, bits: 1")); write_bits(0,1); /* Write the varchar length. */ @@ -2708,7 +2709,7 @@ static int compress_isam_file(PACK_MRG_INFO *mrg, HUFF_COUNTS *huff_counts) col_length, count->length_bits)); write_bits(col_length,count->length_bits); /* Encode the varchar bytes. */ - for (start_pos+=pack_length ; start_pos < end ; start_pos++) + for (start_pos+= var_pack_length ; start_pos < end ; start_pos++) { DBUG_PRINT("fields", ("value: 0x%02x code: 0x%s bits: %2u bin: %s", diff --git a/storage/myisam/rt_index.c b/storage/myisam/rt_index.c index 99080c22644..edb33ec10b9 100644 --- a/storage/myisam/rt_index.c +++ b/storage/myisam/rt_index.c @@ -626,8 +626,6 @@ static int rtree_insert_level(MI_INFO *info, uint keynr, uchar *key, if ((old_root = info->s->state.key_root[keynr]) == HA_OFFSET_ERROR) { - int res; - if ((old_root = _mi_new(info, keyinfo, DFLT_INIT_HITS)) == HA_OFFSET_ERROR) return -1; info->buff_used = 1; @@ -913,7 +911,6 @@ int rtree_delete(MI_INFO *info, uint keynr, uchar *key, uint key_length) for (i = 0; i < ReinsertList.n_pages; ++i) { uchar *page_buf; - uint nod_flag; uchar *k; uchar *last; diff --git a/storage/myisam/sort.c b/storage/myisam/sort.c index bc37e0291d2..53eb6b2e310 100644 --- a/storage/myisam/sort.c +++ b/storage/myisam/sort.c @@ -220,9 +220,9 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages, if (my_b_inited(&tempfile_for_exceptions)) { - MI_INFO *index=info->sort_info->info; + MI_INFO *idx=info->sort_info->info; uint keyno=info->key; - uint key_length, ref_length=index->s->rec_reflength; + uint key_length, ref_length=idx->s->rec_reflength; if (!no_messages) printf(" - Adding exceptions\n"); /* purecov: tested */ @@ -235,7 +235,7 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages, && !my_b_read(&tempfile_for_exceptions,(byte*)sort_keys, (uint) key_length)) { - if (_mi_ck_write(index,keyno,(uchar*) sort_keys,key_length-ref_length)) + if (_mi_ck_write(idx,keyno,(uchar*) sort_keys,key_length-ref_length)) goto err; } } diff --git a/storage/myisammrg/ha_myisammrg.cc b/storage/myisammrg/ha_myisammrg.cc index 7df81a4802f..093a85ee841 100644 --- a/storage/myisammrg/ha_myisammrg.cc +++ b/storage/myisammrg/ha_myisammrg.cc @@ -30,9 +30,6 @@ ** MyISAM MERGE tables *****************************************************************************/ -static handler *myisammrg_create_handler(TABLE_SHARE *table, - MEM_ROOT *mem_root); - static handler *myisammrg_create_handler(handlerton *hton, TABLE_SHARE *table, MEM_ROOT *mem_root) @@ -249,8 +246,8 @@ int ha_myisammrg::rnd_pos(byte * buf, byte *pos) void ha_myisammrg::position(const byte *record) { - ulonglong position= myrg_position(file); - my_store_ptr(ref, ref_length, (my_off_t) position); + ulonglong row_position= myrg_position(file); + my_store_ptr(ref, ref_length, (my_off_t) row_position); } @@ -263,24 +260,23 @@ ha_rows ha_myisammrg::records_in_range(uint inx, key_range *min_key, int ha_myisammrg::info(uint flag) { - MYMERGE_INFO info; - (void) myrg_status(file,&info,flag); + MYMERGE_INFO mrg_info; + (void) myrg_status(file,&mrg_info,flag); /* The following fails if one has not compiled MySQL with -DBIG_TABLES and one has more than 2^32 rows in the merge tables. */ - stats.records = (ha_rows) info.records; - stats.deleted = (ha_rows) info.deleted; + stats.records = (ha_rows) mrg_info.records; + stats.deleted = (ha_rows) mrg_info.deleted; #if !defined(BIG_TABLES) || SIZEOF_OFF_T == 4 - if ((info.records >= (ulonglong) 1 << 32) || - (info.deleted >= (ulonglong) 1 << 32)) + if ((mrg_info.records >= (ulonglong) 1 << 32) || + (mrg_info.deleted >= (ulonglong) 1 << 32)) table->s->crashed= 1; #endif - stats.data_file_length=info.data_file_length; - errkey = info.errkey; + stats.data_file_length= mrg_info.data_file_length; + errkey= mrg_info.errkey; table->s->keys_in_use.set_prefix(table->s->keys); - table->s->db_options_in_use= info.options; - stats.mean_rec_length= info.reclength; + stats.mean_rec_length= mrg_info.reclength; /* The handler::block_size is used all over the code in index scan cost @@ -310,7 +306,7 @@ int ha_myisammrg::info(uint flag) #endif if (flag & HA_STATUS_CONST) { - if (table->s->key_parts && info.rec_per_key) + if (table->s->key_parts && mrg_info.rec_per_key) { #ifdef HAVE_purify /* @@ -323,7 +319,7 @@ int ha_myisammrg::info(uint flag) sizeof(table->key_info[0].rec_per_key) * table->s->key_parts); #endif memcpy((char*) table->key_info[0].rec_per_key, - (char*) info.rec_per_key, + (char*) mrg_info.rec_per_key, sizeof(table->key_info[0].rec_per_key) * min(file->keys, table->s->key_parts)); } diff --git a/storage/ndb/include/kernel/signaldata/ArbitSignalData.hpp b/storage/ndb/include/kernel/signaldata/ArbitSignalData.hpp index d3dd070a62e..ed7e3929414 100644 --- a/storage/ndb/include/kernel/signaldata/ArbitSignalData.hpp +++ b/storage/ndb/include/kernel/signaldata/ArbitSignalData.hpp @@ -31,11 +31,10 @@ private: Uint32 data[2]; public: + ArbitTicket() {} STATIC_CONST( DataLength = 2 ); STATIC_CONST( TextLength = DataLength * 8 ); // hex digits - ArbitTicket() {} - inline void clear() { data[0] = 0; data[1] = 0; @@ -144,9 +143,9 @@ public: ArbitTicket ticket; // ticket NodeBitmask mask; // set of nodes + ArbitSignalData() {} STATIC_CONST( SignalLength = 3 + ArbitTicket::DataLength + NodeBitmask::Size ); - ArbitSignalData() {} inline bool match(ArbitSignalData& aData) const { return node == aData.node && diff --git a/storage/ndb/include/ndbapi/NdbReceiver.hpp b/storage/ndb/include/ndbapi/NdbReceiver.hpp index 73bf5c66863..0af55c88f68 100644 --- a/storage/ndb/include/ndbapi/NdbReceiver.hpp +++ b/storage/ndb/include/ndbapi/NdbReceiver.hpp @@ -57,7 +57,7 @@ public: bool checkMagicNumber() const; - inline void next(NdbReceiver* next) { m_next = next;} + inline void next(NdbReceiver* next_arg) { m_next = next_arg;} inline NdbReceiver* next() { return m_next; } void setErrorCode(int); diff --git a/storage/ndb/include/transporter/TransporterDefinitions.hpp b/storage/ndb/include/transporter/TransporterDefinitions.hpp index 8154d8ea8bd..003824d01e8 100644 --- a/storage/ndb/include/transporter/TransporterDefinitions.hpp +++ b/storage/ndb/include/transporter/TransporterDefinitions.hpp @@ -117,6 +117,10 @@ struct SegmentedSectionPtr { struct SectionSegment * p; SegmentedSectionPtr() {} + SegmentedSectionPtr(Uint32 sz_arg, Uint32 i_arg, + struct SectionSegment *p_arg) + :sz(sz_arg), i(i_arg), p(p_arg) + {} void setNull() { p = 0;} bool isNull() const { return p == 0;} }; diff --git a/storage/ndb/src/common/debugger/EventLogger.cpp b/storage/ndb/src/common/debugger/EventLogger.cpp index b0f5627dd98..f5498d88887 100644 --- a/storage/ndb/src/common/debugger/EventLogger.cpp +++ b/storage/ndb/src/common/debugger/EventLogger.cpp @@ -950,6 +950,7 @@ EventLogger::close() removeAllHandlers(); } +#ifdef NOT_USED static NdbOut& operator<<(NdbOut& out, const LogLevel & ll) { @@ -959,6 +960,7 @@ operator<<(NdbOut& out, const LogLevel & ll) out << "]"; return out; } +#endif int EventLoggerBase::event_lookup(int eventType, diff --git a/storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp b/storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp index 0521c01248a..bcb13f38c72 100644 --- a/storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp +++ b/storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp @@ -157,14 +157,15 @@ ConfigRetriever::getConfig() { } ndb_mgm_configuration * -ConfigRetriever::getConfig(NdbMgmHandle m_handle) +ConfigRetriever::getConfig(NdbMgmHandle m_handle_arg) { - ndb_mgm_configuration * conf = ndb_mgm_get_configuration(m_handle,m_version); + ndb_mgm_configuration * conf = ndb_mgm_get_configuration(m_handle_arg, + m_version); if(conf == 0) { - BaseString tmp(ndb_mgm_get_latest_error_msg(m_handle)); + BaseString tmp(ndb_mgm_get_latest_error_msg(m_handle_arg)); tmp.append(" : "); - tmp.append(ndb_mgm_get_latest_error_desc(m_handle)); + tmp.append(ndb_mgm_get_latest_error_desc(m_handle_arg)); setError(CR_ERROR, tmp.c_str()); return 0; } diff --git a/storage/ndb/src/common/portlib/NdbTick.c b/storage/ndb/src/common/portlib/NdbTick.c index eff6b28b7eb..238e9b1956d 100644 --- a/storage/ndb/src/common/portlib/NdbTick.c +++ b/storage/ndb/src/common/portlib/NdbTick.c @@ -60,9 +60,9 @@ NdbTick_CurrentMicrosecond(NDB_TICKS * secs, Uint32 * micros){ int res = gettimeofday(&tick_time, 0); if(secs==0) { - NDB_TICKS secs = tick_time.tv_sec; + NDB_TICKS local_secs = tick_time.tv_sec; *micros = tick_time.tv_usec; - *micros = secs*1000000+*micros; + *micros = local_secs*1000000+*micros; } else { * secs = tick_time.tv_sec; * micros = tick_time.tv_usec; diff --git a/storage/ndb/src/common/transporter/SHM_Transporter.cpp b/storage/ndb/src/common/transporter/SHM_Transporter.cpp index e0c2e726a92..3ce21940254 100644 --- a/storage/ndb/src/common/transporter/SHM_Transporter.cpp +++ b/storage/ndb/src/common/transporter/SHM_Transporter.cpp @@ -31,7 +31,7 @@ SHM_Transporter::SHM_Transporter(TransporterRegistry &t_reg, const char *lHostName, const char *rHostName, int r_port, - bool isMgmConnection, + bool isMgmConnection_arg, NodeId lNodeId, NodeId rNodeId, NodeId serverNodeId, @@ -40,7 +40,7 @@ SHM_Transporter::SHM_Transporter(TransporterRegistry &t_reg, key_t _shmKey, Uint32 _shmSize) : Transporter(t_reg, tt_SHM_TRANSPORTER, - lHostName, rHostName, r_port, isMgmConnection, + lHostName, rHostName, r_port, isMgmConnection_arg, lNodeId, rNodeId, serverNodeId, 0, false, checksum, signalId), shmKey(_shmKey), diff --git a/storage/ndb/src/common/transporter/TCP_Transporter.cpp b/storage/ndb/src/common/transporter/TCP_Transporter.cpp index 9cf5bef35e0..18171a09974 100644 --- a/storage/ndb/src/common/transporter/TCP_Transporter.cpp +++ b/storage/ndb/src/common/transporter/TCP_Transporter.cpp @@ -64,14 +64,14 @@ TCP_Transporter::TCP_Transporter(TransporterRegistry &t_reg, const char *lHostName, const char *rHostName, int r_port, - bool isMgmConnection, + bool isMgmConnection_arg, NodeId lNodeId, NodeId rNodeId, NodeId serverNodeId, bool chksm, bool signalId, Uint32 _reportFreq) : Transporter(t_reg, tt_TCP_TRANSPORTER, - lHostName, rHostName, r_port, isMgmConnection, + lHostName, rHostName, r_port, isMgmConnection_arg, lNodeId, rNodeId, serverNodeId, 0, false, chksm, signalId), m_sendBuffer(sendBufSize) diff --git a/storage/ndb/src/common/transporter/TCP_Transporter.hpp b/storage/ndb/src/common/transporter/TCP_Transporter.hpp index d6ce14a6a2d..211ace8f03d 100644 --- a/storage/ndb/src/common/transporter/TCP_Transporter.hpp +++ b/storage/ndb/src/common/transporter/TCP_Transporter.hpp @@ -33,6 +33,7 @@ struct ReceiveBuffer { Uint32 sizeOfData; // In bytes Uint32 sizeOfBuffer; + ReceiveBuffer() {} bool init(int bytes); void destroy(); diff --git a/storage/ndb/src/common/transporter/Transporter.cpp b/storage/ndb/src/common/transporter/Transporter.cpp index 20b6be8ce26..cec018575e0 100644 --- a/storage/ndb/src/common/transporter/Transporter.cpp +++ b/storage/ndb/src/common/transporter/Transporter.cpp @@ -107,7 +107,7 @@ Transporter::connect_server(NDB_SOCKET_TYPE sockfd) { { struct sockaddr_in addr; SOCKET_SIZE_TYPE addrlen= sizeof(addr); - int r= getpeername(sockfd, (struct sockaddr*)&addr, &addrlen); + getpeername(sockfd, (struct sockaddr*)&addr, &addrlen); m_connect_address= (&addr)->sin_addr; } @@ -213,7 +213,7 @@ Transporter::connect_client(NDB_SOCKET_TYPE sockfd) { { struct sockaddr_in addr; SOCKET_SIZE_TYPE addrlen= sizeof(addr); - int r= getpeername(sockfd, (struct sockaddr*)&addr, &addrlen); + getpeername(sockfd, (struct sockaddr*)&addr, &addrlen); m_connect_address= (&addr)->sin_addr; } diff --git a/storage/ndb/src/common/transporter/TransporterRegistry.cpp b/storage/ndb/src/common/transporter/TransporterRegistry.cpp index c83f05c59c5..8ff95d1115e 100644 --- a/storage/ndb/src/common/transporter/TransporterRegistry.cpp +++ b/storage/ndb/src/common/transporter/TransporterRegistry.cpp @@ -1209,8 +1209,6 @@ TransporterRegistry::add_transporter_interface(NodeId remoteNodeId, bool TransporterRegistry::start_service(SocketServer& socket_server) { - struct ndb_mgm_reply mgm_reply; - DBUG_ENTER("TransporterRegistry::start_service"); if (m_transporter_interface.size() > 0 && !nodeIdSpecified) { diff --git a/storage/ndb/src/common/util/Bitmask.cpp b/storage/ndb/src/common/util/Bitmask.cpp index cdcc7862e25..edfe2363039 100644 --- a/storage/ndb/src/common/util/Bitmask.cpp +++ b/storage/ndb/src/common/util/Bitmask.cpp @@ -16,21 +16,6 @@ #include <Bitmask.hpp> #include <NdbOut.hpp> -static -void print(const Uint32 src[], Uint32 len, Uint32 pos = 0) -{ - printf("b'"); - for(unsigned i = 0; i<len; i++) - { - if(BitmaskImpl::get((pos + len + 31) >> 5, src, i+pos)) - printf("1"); - else - printf("0"); - if((i & 31) == 31) - printf(" "); - } -} - void BitmaskImpl::getFieldImpl(const Uint32 src[], unsigned shiftL, unsigned len, Uint32 dst[]) @@ -93,6 +78,22 @@ BitmaskImpl::setFieldImpl(Uint32 dst[], } #ifdef __TEST_BITMASK__ +static +void print(const Uint32 src[], Uint32 len, Uint32 pos = 0) +{ + printf("b'"); + for(unsigned i = 0; i<len; i++) + { + if(BitmaskImpl::get((pos + len + 31) >> 5, src, i+pos)) + printf("1"); + else + printf("0"); + if((i & 31) == 31) + printf(" "); + } +} + + #define DEBUG 0 #include <Vector.hpp> static void do_test(int bitmask_size); diff --git a/storage/ndb/src/common/util/ConfigValues.cpp b/storage/ndb/src/common/util/ConfigValues.cpp index cf6dcf904a6..6652fd5753b 100644 --- a/storage/ndb/src/common/util/ConfigValues.cpp +++ b/storage/ndb/src/common/util/ConfigValues.cpp @@ -18,8 +18,6 @@ #include <NdbOut.hpp> #include <NdbTCP.h> -static Uint32 hash(Uint32 key, Uint32 size); -static Uint32 nextHash(Uint32 key, Uint32 size, Uint32 pos, Uint32 count); static bool findKey(const Uint32 * vals, Uint32 sz, Uint32 key, Uint32 * pos); /** diff --git a/storage/ndb/src/common/util/File.cpp b/storage/ndb/src/common/util/File.cpp index 0482e2979eb..53e129e56a6 100644 --- a/storage/ndb/src/common/util/File.cpp +++ b/storage/ndb/src/common/util/File.cpp @@ -162,9 +162,9 @@ File_class::readChar(char* buf) } int -File_class::write(const void* buf, size_t size, size_t nitems) +File_class::write(const void* buf, size_t size_arg, size_t nitems) { - return ::fwrite(buf, size, nitems, m_file); + return ::fwrite(buf, size_arg, nitems, m_file); } int diff --git a/storage/ndb/src/common/util/Properties.cpp b/storage/ndb/src/common/util/Properties.cpp index 8d5c56affd3..11a1d8690ae 100644 --- a/storage/ndb/src/common/util/Properties.cpp +++ b/storage/ndb/src/common/util/Properties.cpp @@ -627,11 +627,11 @@ PropertiesImpl::getPropsPut(const char * name, if(nvp == 0){ Properties * tmpP = new Properties(); PropertyImpl * tmpPI = new PropertyImpl(tmp2, tmpP); - PropertyImpl * nvp = put(tmpPI); + PropertyImpl * nvp2 = put(tmpPI); delete tmpP; free(tmp2); - return ((Properties*)nvp->value)->impl->getPropsPut(tmp+1, impl); + return ((Properties*)nvp2->value)->impl->getPropsPut(tmp+1, impl); } free(tmp2); if(nvp->valueType != PropertiesType_Properties){ diff --git a/storage/ndb/src/common/util/SocketClient.cpp b/storage/ndb/src/common/util/SocketClient.cpp index c2825901929..3d1fd07d581 100644 --- a/storage/ndb/src/common/util/SocketClient.cpp +++ b/storage/ndb/src/common/util/SocketClient.cpp @@ -88,7 +88,7 @@ SocketClient::bind(const char* bindaddress, unsigned short localport) int ret = errno; NDB_CLOSE_SOCKET(m_sockfd); m_sockfd= NDB_INVALID_SOCKET; - return errno; + return ret; } if (::bind(m_sockfd, (struct sockaddr*)&local, sizeof(local)) == -1) diff --git a/storage/ndb/src/common/util/random.c b/storage/ndb/src/common/util/random.c index 3d4a48e7ef0..20ef537d89a 100644 --- a/storage/ndb/src/common/util/random.c +++ b/storage/ndb/src/common/util/random.c @@ -197,7 +197,7 @@ int initSequence(RandomSequence *seq, SequenceValues *inputValues) unsigned int i; unsigned int j; unsigned int totalLength; - unsigned int index; + unsigned int idx; if( !seq || !inputValues ) return(-1); @@ -219,12 +219,12 @@ int initSequence(RandomSequence *seq, SequenceValues *inputValues) /*----------------------*/ /* set the array values */ /*----------------------*/ - index = 0; + idx = 0; for(i = 0; inputValues[i].length != 0; i++) { for(j = 0; j < inputValues[i].length; j++ ) { - seq->values[index] = inputValues[i].value; - index++; + seq->values[idx] = inputValues[i].value; + idx++; } } diff --git a/storage/ndb/src/common/util/socket_io.cpp b/storage/ndb/src/common/util/socket_io.cpp index bbb76314032..d19c792e20f 100644 --- a/storage/ndb/src/common/util/socket_io.cpp +++ b/storage/ndb/src/common/util/socket_io.cpp @@ -169,8 +169,8 @@ write_socket(NDB_SOCKET_TYPE socket, int timeout_millis, FD_SET(socket, &writeset); timeout.tv_sec = 1; timeout.tv_usec = 0; - const int selectRes = select(socket + 1, 0, &writeset, 0, &timeout); - if(selectRes != 1){ + const int selectRes2 = select(socket + 1, 0, &writeset, 0, &timeout); + if(selectRes2 != 1){ return -1; } } diff --git a/storage/ndb/src/cw/cpcd/APIService.cpp b/storage/ndb/src/cw/cpcd/APIService.cpp index 5bbf2c86e23..1c1cfb94cd4 100644 --- a/storage/ndb/src/cw/cpcd/APIService.cpp +++ b/storage/ndb/src/cw/cpcd/APIService.cpp @@ -389,7 +389,6 @@ CPCDAPISession::listProcesses(Parser_t::Context & /* unused */, void CPCDAPISession::showVersion(Parser_t::Context & /* unused */, const class Properties & args){ - Uint32 id; CPCD::RequestStatus rs; m_output->println("show version"); diff --git a/storage/ndb/src/cw/cpcd/main.cpp b/storage/ndb/src/cw/cpcd/main.cpp index 7021b4bc68d..f23a92b8010 100644 --- a/storage/ndb/src/cw/cpcd/main.cpp +++ b/storage/ndb/src/cw/cpcd/main.cpp @@ -74,8 +74,6 @@ extern "C" static void sig_child(int signo, siginfo_t*, void*); const char *progname = "ndb_cpcd"; int main(int argc, char** argv){ - int save_argc= argc; - char** save_argv= argv; const char *load_default_groups[]= { "ndb_cpcd",0 }; MY_INIT(argv[0]); diff --git a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp index 57828fa928f..ddf0dc95098 100644 --- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp @@ -1319,7 +1319,7 @@ Cmvmi::execTESTSIG(Signal* signal){ fprintf(stdout, "\n"); for(i = 0; i<signal->header.m_noOfSections; i++){ - SegmentedSectionPtr ptr; + SegmentedSectionPtr ptr(0,0,0); ndbout_c("-- Section %d --", i); signal->getSection(ptr, i); ndbrequire(ptr.p != 0); @@ -1377,7 +1377,7 @@ Cmvmi::execTESTSIG(Signal* signal){ LinearSectionPtr ptr[3]; const Uint32 secs = signal->getNoOfSections(); for(i = 0; i<secs; i++){ - SegmentedSectionPtr sptr; + SegmentedSectionPtr sptr(0,0,0); signal->getSection(sptr, i); ptr[i].sz = sptr.sz; ptr[i].p = new Uint32[sptr.sz]; @@ -1426,7 +1426,7 @@ Cmvmi::execTESTSIG(Signal* signal){ LinearSectionPtr ptr[3]; const Uint32 secs = signal->getNoOfSections(); for(i = 0; i<secs; i++){ - SegmentedSectionPtr sptr; + SegmentedSectionPtr sptr(0,0,0); signal->getSection(sptr, i); ptr[i].sz = sptr.sz; ptr[i].p = new Uint32[sptr.sz]; @@ -1492,7 +1492,7 @@ Cmvmi::execTESTSIG(Signal* signal){ const Uint32 secs = signal->getNoOfSections(); memset(g_test, 0, sizeof(g_test)); for(i = 0; i<secs; i++){ - SegmentedSectionPtr sptr; + SegmentedSectionPtr sptr(0,0,0); signal->getSection(sptr, i); g_test[i].sz = sptr.sz; g_test[i].p = new Uint32[sptr.sz]; diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index cae9e8432f5..00a984e591b 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -6119,11 +6119,6 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, tablePtr.p->defaultNoPartFlag = c_tableDesc.DefaultNoPartFlag; tablePtr.p->linearHashFlag = c_tableDesc.LinearHashFlag; - Uint64 maxRows = - (((Uint64)tablePtr.p->maxRowsHigh) << 32) + tablePtr.p->maxRowsLow; - Uint64 minRows = - (((Uint64)tablePtr.p->minRowsHigh) << 32) + tablePtr.p->minRowsLow; - { Rope frm(c_rope_pool, tablePtr.p->frmData); tabRequire(frm.assign(c_tableDesc.FrmData, c_tableDesc.FrmLen), diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp index 67d2ff25390..d9fd604036e 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp @@ -1131,6 +1131,7 @@ private: * seize/release invokes ctor/dtor automatically. */ struct OpRecordCommon { + OpRecordCommon() {} Uint32 key; // key shared between master and slaves Uint32 nextHash; Uint32 prevHash; @@ -1146,6 +1147,7 @@ private: * Create table record */ struct CreateTableRecord : OpRecordCommon { + CreateTableRecord() {} Uint32 m_senderRef; Uint32 m_senderData; Uint32 m_coordinatorRef; @@ -1189,6 +1191,7 @@ private: * Drop table record */ struct DropTableRecord : OpRecordCommon { + DropTableRecord() {} DropTableReq m_request; Uint32 m_requestType; diff --git a/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp b/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp index ca9b733f4d2..602881095c3 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp +++ b/storage/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp @@ -40,6 +40,8 @@ usage() << "Example: " << progname << " -ceq ndb_*_fs/D[12]/DBDICT/P0.SchemaLog" << endl; } +#ifdef NOT_USED + static void fill(const char * buf, int mod) { @@ -50,6 +52,7 @@ fill(const char * buf, int mod) len++; } } +#endif static const char* version(Uint32 v) diff --git a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp index 952050955b0..eb81672fef5 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp +++ b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp @@ -1302,6 +1302,7 @@ public: private: struct LcpState { + LcpState() {} LcpStatus lcpStatus; Uint32 lcpStatusUpdatedPlace; @@ -1408,6 +1409,7 @@ public: private: class MasterTakeOverState { public: + MasterTakeOverState() {} void set(LcpMasterTakeOverState s, Uint32 line) { state = s; updatePlace = line; } @@ -1495,6 +1497,7 @@ private: * SwitchReplicaRecord - Should only be used by master */ struct SwitchReplicaRecord { + SwitchReplicaRecord() {} void clear(){} Uint32 nodeId; diff --git a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp index e3026fc59f0..3951b53184c 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp +++ b/storage/ndb/src/kernel/blocks/dblqh/Dblqh.hpp @@ -493,6 +493,7 @@ public: typedef Ptr<Databuf> DatabufPtr; struct ScanRecord { + ScanRecord() {} enum ScanState { SCAN_FREE = 0, WAIT_STORED_PROC_COPY = 1, @@ -2898,6 +2899,7 @@ public: * */ struct CommitAckMarker { + CommitAckMarker() {} Uint32 transid1; Uint32 transid2; @@ -2924,6 +2926,7 @@ public: void scanMarkers(Signal* signal, Uint32 tcNodeFail, Uint32 bucket, Uint32 i); struct Counters { + Counters() {} Uint32 operations; inline void clear(){ diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index e4764698f5b..fa7bc0bbcac 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -8325,7 +8325,6 @@ void Dblqh::execSCAN_FRAGREQ(Signal* signal) const Uint32 scanLockMode = ScanFragReq::getLockMode(reqinfo); const Uint8 keyinfo = ScanFragReq::getKeyinfoFlag(reqinfo); const Uint8 rangeScan = ScanFragReq::getRangeScanFlag(reqinfo); - const Uint8 tupScan = ScanFragReq::getTupScanFlag(reqinfo); ptrCheckGuard(tabptr, ctabrecFileSize, tablerec); if(tabptr.p->tableStatus != Tablerec::TABLE_DEFINED){ @@ -9814,9 +9813,11 @@ Uint32 Dblqh::sendKeyinfo20(Signal* signal, const Uint32 scanOp = scanP->m_curr_batch_size_rows; const Uint32 nodeId = refToNode(ref); const bool connectedToNode = getNodeInfo(nodeId).m_connected; - //const Uint32 type = getNodeInfo(nodeId).m_type; - //const bool is_api= (type >= NodeInfo::API && type <= NodeInfo::REP); - //const bool old_dest= (getNodeInfo(nodeId).m_version < MAKE_VERSION(3,5,0)); +#ifdef NOT_USED + const Uint32 type = getNodeInfo(nodeId).m_type; + const bool is_api= (type >= NodeInfo::API && type <= NodeInfo::REP); + const bool old_dest= (getNodeInfo(nodeId).m_version < MAKE_VERSION(3,5,0)); +#endif const bool longable = true; // TODO is_api && !old_dest; Uint32 * dst = keyInfo->keyData; @@ -9917,7 +9918,9 @@ void Dblqh::sendScanFragConf(Signal* signal, Uint32 scanCompleted) return; } ScanFragConf * conf = (ScanFragConf*)&signal->theData[0]; - //NodeId tc_node_id= refToNode(tcConnectptr.p->clientBlockref); +#ifdef NOT_USED + NodeId tc_node_id= refToNode(tcConnectptr.p->clientBlockref); +#endif Uint32 trans_id1= tcConnectptr.p->transid[0]; Uint32 trans_id2= tcConnectptr.p->transid[1]; @@ -15187,8 +15190,6 @@ void Dblqh::execDEBUG_SIG(Signal* signal) 2.5 TEMPORARY VARIABLES ----------------------- */ - UintR tdebug; - jamEntry(); //logPagePtr.i = signal->theData[0]; //tdebug = logPagePtr.p->logPageWord[0]; diff --git a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp index ad2fa0e1696..39b7c00e3a1 100644 --- a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp +++ b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp @@ -302,6 +302,7 @@ public: /* WHEN THE TRIGGER IS DEACTIVATED. */ /* **************************************** */ struct TcDefinedTriggerData { + TcDefinedTriggerData() {} /** * Trigger id, used to identify the trigger */ @@ -958,6 +959,7 @@ public: /* ALL TABLES IN THE SYSTEM. */ /********************************************************/ struct TableRecord { + TableRecord() {} Uint32 currentSchemaVersion; Uint8 enabled; Uint8 dropping; @@ -1667,6 +1669,7 @@ private: UintR tcheckGcpId; struct TransCounters { + TransCounters() {} enum { Off, Timer, Started } c_trans_status; UintR cattrinfoCount; UintR ctransCount; @@ -1805,6 +1808,7 @@ private: */ public: struct CommitAckMarker { + CommitAckMarker() {} Uint32 transid1; Uint32 transid2; union { Uint32 nextPool; Uint32 nextHash; }; diff --git a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index 357dec6fde7..3b25ac31d48 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -788,6 +788,7 @@ typedef Ptr<PageRange> PageRangePtr; /* WHEN THE TRIGGER IS DEACTIVATED. */ /* **************************************** */ struct TupTriggerData { + TupTriggerData() {} /** * Trigger id, used by DICT/TRIX to identify the trigger diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp index 32cd7ab0460..13485a31414 100644 --- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp +++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp @@ -401,8 +401,6 @@ Dbtux::nodePopUpScans(NodeHandle& node, unsigned pos) void Dbtux::nodeSlide(NodeHandle& dstNode, NodeHandle& srcNode, unsigned cnt, unsigned i) { - Frag& frag = dstNode.m_frag; - TreeHead& tree = frag.m_tree; ndbrequire(i <= 1); while (cnt != 0) { TreeEnt ent; diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp index ef34fcefe19..a0643848530 100644 --- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp +++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp @@ -707,7 +707,6 @@ Dbtux::scanFirst(ScanOpPtr scanPtr) debugOut << "Enter first scan " << scanPtr.i << " " << scan << endl; } #endif - TreeHead& tree = frag.m_tree; // set up index keys for this operation setKeyAttrs(frag); // scan direction 0, 1 @@ -987,7 +986,6 @@ Dbtux::scanVisible(ScanOpPtr scanPtr, TreeEnt ent) const ScanOp& scan = *scanPtr.p; const Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI); Uint32 tableFragPtrI = frag.m_tupTableFragPtrI; - Uint32 fragId = frag.m_fragId; Uint32 tupAddr = getTupAddr(frag, ent); Uint32 tupVersion = ent.m_tupVersion; // check for same tuple twice in row diff --git a/storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp b/storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp index 0d0c0ed9592..970ee794281 100644 --- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp +++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp @@ -221,7 +221,6 @@ Dbtux::treeRemove(Frag& frag, TreePos treePos) void Dbtux::treeRemoveInner(Frag& frag, NodeHandle lubNode, unsigned pos) { - TreeHead& tree = frag.m_tree; TreeEnt ent; // find g.l.b node NodeHandle glbNode(frag); diff --git a/storage/ndb/src/kernel/blocks/lgman.cpp b/storage/ndb/src/kernel/blocks/lgman.cpp index d61dc422556..90c72fe7010 100644 --- a/storage/ndb/src/kernel/blocks/lgman.cpp +++ b/storage/ndb/src/kernel/blocks/lgman.cpp @@ -133,10 +133,6 @@ void Lgman::execSTTOR(Signal* signal) { jamEntry(); - - const Uint32 startphase = signal->theData[1]; - const Uint32 typeOfStart = signal->theData[7]; - sendSTTORRY(signal); return; @@ -274,7 +270,6 @@ Lgman::execDUMP_STATE_ORD(Signal* signal){ !ptr.p->m_log_sync_waiters.isEmpty()); if (!ptr.p->m_log_buffer_waiters.isEmpty()) { - Uint32 free_buffer= ptr.p->m_free_buffer_words; Ptr<Log_waiter> waiter; Local_log_waiter_list list(m_log_waiter_pool, ptr.p->m_log_buffer_waiters); @@ -1937,8 +1932,7 @@ void Lgman::execSUB_GCP_COMPLETE_REP(Signal* signal) { jamEntry(); - Uint32 gci= ((SubGcpCompleteRep*)signal->getDataPtr())->gci; - + Ptr<Logfile_group> ptr; m_logfile_group_list.first(ptr); diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp b/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp index 9c02226596e..84a0ada2d01 100644 --- a/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp +++ b/storage/ndb/src/kernel/blocks/ndbcntr/Ndbcntr.hpp @@ -82,6 +82,7 @@ public: */ struct StartRecord { + StartRecord() {} Uint64 m_startTime; void reset(); diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp index 8ad231479f1..e2d402ca76a 100644 --- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp +++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp @@ -853,7 +853,6 @@ Ndbcntr::trySystemRestart(Signal* signal){ */ const bool allNodes = c_start.m_waiting.equal(c_allDefinedNodes); const bool allClusterNodes = c_start.m_waiting.equal(c_clusterNodes); - const Uint64 now = NdbTick_CurrentMillisecond(); if(!allClusterNodes){ jam(); @@ -1425,7 +1424,6 @@ void Ndbcntr::execNODE_FAILREP(Signal* signal) const bool tMasterFailed = allFailed.get(cmasterNodeId); const bool tStarted = !failedStarted.isclear(); const bool tStarting = !failedStarting.isclear(); - const bool tWaiting = !failedWaiting.isclear(); if(tMasterFailed){ jam(); @@ -2514,8 +2512,6 @@ void Ndbcntr::execABORT_ALL_CONF(Signal* signal){ void Ndbcntr::execABORT_ALL_REF(Signal* signal){ jamEntry(); - AbortAllRef *abortAllRef = (AbortAllRef *)&signal->theData[0]; - AbortAllRef::ErrorCode errorCode = (AbortAllRef::ErrorCode) abortAllRef->errorCode; StopRef * const stopRef = (StopRef *)&signal->theData[0]; stopRef->senderData = c_stopRec.stopReq.senderData; diff --git a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp index e8f2deb016c..cc667225ce2 100644 --- a/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp +++ b/storage/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp @@ -106,6 +106,8 @@ class AsyncFile; class Request { public: + Request() {} + enum Action { open, close, diff --git a/storage/ndb/src/kernel/blocks/pgman.cpp b/storage/ndb/src/kernel/blocks/pgman.cpp index af648c71253..4e1d1c29ab8 100644 --- a/storage/ndb/src/kernel/blocks/pgman.cpp +++ b/storage/ndb/src/kernel/blocks/pgman.cpp @@ -471,7 +471,6 @@ Pgman::lirs_stack_prune() debugOut << "PGMAN: >lirs_stack_prune" << endl; #endif Page_stack& pl_stack = m_page_stack; - Page_queue& pl_queue = m_page_queue; Ptr<Page_entry> ptr; while (pl_stack.first(ptr)) // first is stack bottom @@ -804,7 +803,6 @@ Pgman::process_bind(Signal* signal, Ptr<Page_entry> ptr) #ifdef VM_TRACE debugOut << "PGMAN: " << ptr << " : process_bind" << endl; #endif - Page_sublist& pl_bind = *m_page_sublist[Page_entry::SL_BIND]; Page_queue& pl_queue = m_page_queue; Ptr<GlobalPage> gptr; diff --git a/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp index 92b8b5d3306..d0ce9f4d1e0 100644 --- a/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp +++ b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp @@ -105,6 +105,7 @@ public: }; struct StartRecord { + StartRecord() {} void reset(){ m_startKey++; m_startNode = 0; @@ -173,6 +174,7 @@ public: }; struct ArbitRec { + ArbitRec() {} ArbitState state; // state bool newstate; // flag to initialize new state unsigned thread; // identifies a continueB "thread" diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index 3a6f5151fec..bf57bd07f5a 100644 --- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -849,7 +849,6 @@ void Qmgr::execCM_REGCONF(Signal* signal) jamEntry(); const CmRegConf * const cmRegConf = (CmRegConf *)&signal->theData[0]; - Uint32 presidentNodeId = cmRegConf->presidentNodeId; if (!ndbCompatible_ndb_ndb(NDB_VERSION, cmRegConf->presidentVersion)) { jam(); @@ -1274,7 +1273,6 @@ Qmgr::check_startup(Signal* signal) /** * Check for missing node group directly */ - char buf[100]; NdbNodeBitmask check; check.assign(c_definedNodes); check.bitANDC(c_start.m_starting_nodes); // Not connected nodes diff --git a/storage/ndb/src/kernel/blocks/restore.cpp b/storage/ndb/src/kernel/blocks/restore.cpp index 214cddeeda5..695f393c3b6 100644 --- a/storage/ndb/src/kernel/blocks/restore.cpp +++ b/storage/ndb/src/kernel/blocks/restore.cpp @@ -71,8 +71,6 @@ Restore::execSTTOR(Signal* signal) { jamEntry(); - const Uint32 startphase = signal->theData[1]; - const Uint32 typeOfStart = signal->theData[7]; c_lqh = (Dblqh*)globalData.getBlock(DBLQH); c_tup = (Dbtup*)globalData.getBlock(DBTUP); sendSTTORRY(signal); @@ -801,7 +799,6 @@ Restore::parse_table_description(Signal* signal, FilePtr file_ptr, return; } - Uint32 null_offset = 0; Column c; Uint32 colstore[sizeof(Column)/sizeof(Uint32)]; diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp index b201d05726d..42d4e49b14b 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp @@ -614,6 +614,8 @@ Suma::removeSubscribersOnNode(Signal *signal, Uint32 nodeId) bool found = false; KeyTable<Table>::Iterator it; + LINT_INIT(it.bucket); + LINT_INIT(it.curr.p); for(c_tables.first(it);!it.isNull();c_tables.next(it)) { LocalDLList<Subscriber> subbs(c_subscriberPool,it.curr.p->c_subscribers); @@ -1265,7 +1267,7 @@ Suma::execSUB_SYNC_REQ(Signal* signal) jam(); syncPtr.p->m_tableList.append(&subPtr.p->m_tableId, 1); if(signal->getNoOfSections() > 0){ - SegmentedSectionPtr ptr; + SegmentedSectionPtr ptr(0,0,0); signal->getSection(ptr, SubSyncReq::ATTRIBUTE_LIST); LocalDataBuffer<15> attrBuf(c_dataBufferPool,syncPtr.p->m_attributeList); append(attrBuf, ptr, getSectionSegmentPool()); @@ -1711,7 +1713,7 @@ Suma::execGET_TABINFO_CONF(Signal* signal){ Uint32 tableId = conf->tableId; TablePtr tabPtr; c_tablePool.getPtr(tabPtr, conf->senderData); - SegmentedSectionPtr ptr; + SegmentedSectionPtr ptr(0,0,0); signal->getSection(ptr, GetTabInfoConf::DICT_TAB_INFO); ndbrequire(tabPtr.p->parseTable(ptr, *this)); releaseSections(signal); @@ -3626,7 +3628,6 @@ Suma::execSUB_GCP_COMPLETE_REP(Signal* signal) if(c_buckets[i].m_buffer_tail != RNIL) { - Uint32* dst; get_buffer_ptr(signal, i, gci, 0); } } @@ -3971,9 +3972,6 @@ void Suma::completeSubRemove(SubscriptionPtr subPtr) { DBUG_ENTER("Suma::completeSubRemove"); - Uint32 subscriptionId = subPtr.p->m_subscriptionId; - Uint32 subscriptionKey = subPtr.p->m_subscriptionKey; - c_subscriptions.release(subPtr); DBUG_PRINT("info",("c_subscriptionPool size: %d free: %d", c_subscriptionPool.getSize(), @@ -4566,6 +4564,7 @@ Suma::execSUMA_HANDOVER_CONF(Signal* signal) { DBUG_VOID_RETURN; } +#ifdef NOT_USED static NdbOut& operator<<(NdbOut & out, const Suma::Page_pos & pos) @@ -4577,6 +4576,7 @@ operator<<(NdbOut & out, const Suma::Page_pos & pos) << " ]"; return out; } +#endif Uint32* Suma::get_buffer_ptr(Signal* signal, Uint32 buck, Uint32 gci, Uint32 sz) @@ -4747,6 +4747,7 @@ loop: ptr.p->m_free = count; Buffer_page* page; + LINT_INIT(page); for(Uint32 i = 0; i<count; i++) { page = (Buffer_page*)m_tup->c_page_pool.getPtr(ref); diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.hpp b/storage/ndb/src/kernel/blocks/suma/Suma.hpp index 64f095860ec..675706d5431 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.hpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.hpp @@ -149,6 +149,7 @@ public: */ struct Subscription { + Subscription() {} Uint32 m_senderRef; Uint32 m_senderData; Uint32 m_subscriptionId; diff --git a/storage/ndb/src/kernel/blocks/tsman.cpp b/storage/ndb/src/kernel/blocks/tsman.cpp index 83b662c91be..daf7750a7e6 100644 --- a/storage/ndb/src/kernel/blocks/tsman.cpp +++ b/storage/ndb/src/kernel/blocks/tsman.cpp @@ -126,9 +126,6 @@ Tsman::execSTTOR(Signal* signal) { jamEntry(); - const Uint32 startphase = signal->theData[1]; - const Uint32 typeOfStart = signal->theData[7]; - sendSTTORRY(signal); return; @@ -1186,7 +1183,6 @@ Tsman::scan_extent_headers(Signal* signal, Ptr<Datafile> ptr) Uint32 firstFree= RNIL; Uint32 size = ptr.p->m_extent_size; Uint32 per_page = ptr.p->m_online.m_extent_headers_per_extent_page; - Uint32 SZ= File_formats::Datafile::EXTENT_HEADER_BITMASK_BITS_PER_PAGE; Uint32 pages= ptr.p->m_online.m_offset_data_pages - 1; Uint32 datapages= ptr.p->m_online.m_data_pages; Dbtup* tup= (Dbtup*)globalData.getBlock(DBTUP); diff --git a/storage/ndb/src/kernel/vm/DynArr256.cpp b/storage/ndb/src/kernel/vm/DynArr256.cpp index 83e36f34dba..ff3e7578c6c 100644 --- a/storage/ndb/src/kernel/vm/DynArr256.cpp +++ b/storage/ndb/src/kernel/vm/DynArr256.cpp @@ -303,7 +303,6 @@ DynArr256::expand(Uint32 pos) Uint32 idx = 0; Uint32 alloc[5]; Uint32 sz = m_head.m_sz; - Uint32 shl = 0; for (; pos >= g_max_sizes[sz]; sz++); diff --git a/storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp b/storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp index 059ecd81c4d..2c2d66d1334 100644 --- a/storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp +++ b/storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp @@ -220,7 +220,6 @@ Ndbd_mem_manager::init(bool alloc_less_memory) while (cnt < MAX_CHUNKS && allocated < pages) { InitChunk chunk; - Uint32 remaining = pages - allocated; #if defined(_lint) || defined(FORCE_INIT_OF_VARS) memset((char*) &chunk, 0 , sizeof(chunk)); diff --git a/storage/ndb/src/mgmapi/LocalConfig.cpp b/storage/ndb/src/mgmapi/LocalConfig.cpp index f01b6ff3da3..476e2d6dd84 100644 --- a/storage/ndb/src/mgmapi/LocalConfig.cpp +++ b/storage/ndb/src/mgmapi/LocalConfig.cpp @@ -73,9 +73,9 @@ LocalConfig::init(const char *connectString, //4. Check Ndb.cfg in NDB_HOME { bool fopenError; - char *buf= NdbConfig_NdbCfgName(1 /*true*/); - NdbAutoPtr<char> tmp_aptr(buf); - if(readFile(buf, fopenError)) + char *buf2= NdbConfig_NdbCfgName(1 /*true*/); + NdbAutoPtr<char> tmp_aptr(buf2); + if(readFile(buf2, fopenError)) DBUG_RETURN(true); if (!fopenError) DBUG_RETURN(false); @@ -84,9 +84,9 @@ LocalConfig::init(const char *connectString, //5. Check Ndb.cfg in cwd { bool fopenError; - char *buf= NdbConfig_NdbCfgName(0 /*false*/); - NdbAutoPtr<char> tmp_aptr(buf); - if(readFile(buf, fopenError)) + char *buf2= NdbConfig_NdbCfgName(0 /*false*/); + NdbAutoPtr<char> tmp_aptr(buf2); + if(readFile(buf2, fopenError)) DBUG_RETURN(true); if (!fopenError) DBUG_RETURN(false); @@ -94,9 +94,9 @@ LocalConfig::init(const char *connectString, //7. Check { - char buf[256]; - BaseString::snprintf(buf, sizeof(buf), "host=localhost:%s", NDB_PORT); - if(readConnectString(buf, "default connect string")) + char buf2[256]; + BaseString::snprintf(buf2, sizeof(buf2), "host=localhost:%s", NDB_PORT); + if(readConnectString(buf2, "default connect string")) DBUG_RETURN(true); } diff --git a/storage/ndb/src/mgmapi/mgmapi.cpp b/storage/ndb/src/mgmapi/mgmapi.cpp index 486d7fec1a1..28dbf573bdf 100644 --- a/storage/ndb/src/mgmapi/mgmapi.cpp +++ b/storage/ndb/src/mgmapi/mgmapi.cpp @@ -472,7 +472,6 @@ ndb_mgm_connect(NdbMgmHandle handle, int no_retries, LocalConfig &cfg= handle->cfg; NDB_SOCKET_TYPE sockfd= NDB_INVALID_SOCKET; Uint32 i; - int binderror = 0; SocketClient s(0, 0); s.set_connect_timeout(handle->connect_timeout); if (!s.init()) @@ -842,12 +841,12 @@ ndb_mgm_get_status(NdbMgmHandle handle) break; } - Vector<BaseString> split; - tmp.split(split, ":.", 4); - if(split.size() != 4) + Vector<BaseString> split2; + tmp.split(split2, ":.", 4); + if(split2.size() != 4) break; - const int id = atoi(split[1].c_str()); + const int id = atoi(split2[1].c_str()); if(id != nodeId){ ptr++; i++; @@ -855,9 +854,9 @@ ndb_mgm_get_status(NdbMgmHandle handle) ptr->node_id = id; } - split[3].trim(" \t\n"); + split2[3].trim(" \t\n"); - if(status_ackumulate(ptr,split[2].c_str(), split[3].c_str()) != 0) { + if(status_ackumulate(ptr,split2[2].c_str(), split2[3].c_str()) != 0) { break; } } diff --git a/storage/ndb/src/mgmclient/CommandInterpreter.cpp b/storage/ndb/src/mgmclient/CommandInterpreter.cpp index bb9ef764109..e1deca38f4a 100644 --- a/storage/ndb/src/mgmclient/CommandInterpreter.cpp +++ b/storage/ndb/src/mgmclient/CommandInterpreter.cpp @@ -1478,7 +1478,6 @@ CommandInterpreter::executePurge(char* parameters) return -1; } - int i; char *str; if (ndb_mgm_purge_stale_sessions(m_mgmsrv, &str)) { @@ -1596,7 +1595,6 @@ CommandInterpreter::executeConnect(char* parameters, bool interactive) { BaseString *basestring = NULL; - int retval; disconnect(); if (!emptyString(parameters)) { basestring= new BaseString(parameters); @@ -2039,7 +2037,6 @@ CommandInterpreter::executeStatus(int processId, ndb_mgm_node_status status; Uint32 startPhase, version; - bool system; struct ndb_mgm_cluster_state *cl; cl = ndb_mgm_get_status(m_mgmsrv); diff --git a/storage/ndb/src/mgmsrv/ConfigInfo.cpp b/storage/ndb/src/mgmsrv/ConfigInfo.cpp index fe0e7c9f429..87976b92718 100644 --- a/storage/ndb/src/mgmsrv/ConfigInfo.cpp +++ b/storage/ndb/src/mgmsrv/ConfigInfo.cpp @@ -2261,7 +2261,6 @@ ConfigInfo::ConfigInfo() break; case CI_BOOL: { - bool tmp_bool; require(InitConfigFileParser::convertStringToBool(param._default, default_bool)); require(p->put(param._fname, default_bool)); break; @@ -2269,7 +2268,6 @@ ConfigInfo::ConfigInfo() case CI_INT: case CI_INT64: { - Uint64 tmp_uint64; require(InitConfigFileParser::convertStringToUint64(param._default, default_uint64)); require(p->put(param._fname, default_uint64)); break; @@ -2780,7 +2778,7 @@ applyDefaultValues(InitConfigFileParser::Context & ctx, Properties::Iterator it(defaults); for(const char * name = it.first(); name != NULL; name = it.next()){ - ConfigInfo::Status st = ctx.m_info->getStatus(ctx.m_currentInfo, name); + (void) ctx.m_info->getStatus(ctx.m_currentInfo, name); if(!ctx.m_currentSection->contains(name)){ switch (ctx.m_info->getType(ctx.m_currentInfo, name)){ case ConfigInfo::CI_INT: @@ -3411,7 +3409,7 @@ saveInConfigValues(InitConfigFileParser::Context & ctx, const char * data){ if(!ctx.m_currentInfo->get(n, &info)) continue; - Uint32 id = 0; + id = 0; info->get("Id", &id); if(id == KEY_INTERNAL) diff --git a/storage/ndb/src/mgmsrv/ConfigInfo.hpp b/storage/ndb/src/mgmsrv/ConfigInfo.hpp index 08b12522807..6f9c8ad17b8 100644 --- a/storage/ndb/src/mgmsrv/ConfigInfo.hpp +++ b/storage/ndb/src/mgmsrv/ConfigInfo.hpp @@ -26,8 +26,11 @@ * A MANDATORY parameters must be specified in the config file * An UNDEFINED parameter may or may not be specified in the config file */ -static const char* MANDATORY = (char*)~(UintPtr)0;// Default value for mandatory params. -static const char* UNDEFINED = 0; // Default value for undefined params. + +// Default value for mandatory params. +#define MANDATORY ((char*)~(UintPtr)0) +// Default value for undefined params. +#define UNDEFINED ((char*) 0) /** * @class ConfigInfo diff --git a/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp b/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp index ca0471f97b4..b159c90605e 100644 --- a/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp +++ b/storage/ndb/src/mgmsrv/InitConfigFileParser.cpp @@ -701,34 +701,35 @@ load_defaults(Vector<struct my_option>& options, const char* groups[]) BaseString extra_file; BaseString group_suffix; - const char *save_file = defaults_file; - char *save_extra_file = defaults_extra_file; - const char *save_group_suffix = defaults_group_suffix; + const char *save_file = my_defaults_file; + char *save_extra_file = my_defaults_extra_file; + const char *save_group_suffix = my_defaults_group_suffix; - if (defaults_file) + if (my_defaults_file) { - file.assfmt("--defaults-file=%s", defaults_file); + file.assfmt("--defaults-file=%s", my_defaults_file); argv[argc++] = file.c_str(); } - if (defaults_extra_file) + if (my_defaults_extra_file) { - extra_file.assfmt("--defaults-extra-file=%s", defaults_extra_file); + extra_file.assfmt("--defaults-extra-file=%s", my_defaults_extra_file); argv[argc++] = extra_file.c_str(); } - if (defaults_group_suffix) + if (my_defaults_group_suffix) { - group_suffix.assfmt("--defaults-group-suffix=%s", defaults_group_suffix); + group_suffix.assfmt("--defaults-group-suffix=%s", + my_defaults_group_suffix); argv[argc++] = group_suffix.c_str(); } char ** tmp = (char**)argv; int ret = load_defaults("my", groups, &argc, &tmp); - defaults_file = save_file; - defaults_extra_file = save_extra_file; - defaults_group_suffix = save_group_suffix; + my_defaults_file = save_file; + my_defaults_extra_file = save_extra_file; + my_defaults_group_suffix = save_group_suffix; if (ret == 0) { diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp index b0d9818dfff..5560259a957 100644 --- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -100,6 +100,7 @@ MgmtSrvr::logLevelThread_C(void* m) extern EventLogger g_eventLogger; +#ifdef NOT_USED static NdbOut& operator<<(NdbOut& out, const LogLevel & ll) { @@ -109,6 +110,7 @@ operator<<(NdbOut& out, const LogLevel & ll) out << "]"; return out; } +#endif void MgmtSrvr::logLevelThreadRun() @@ -1127,7 +1129,9 @@ int MgmtSrvr::sendSTOP_REQ(const Vector<NodeId> &node_ids, break; } case GSN_STOP_CONF:{ +#ifdef NOT_USED const StopConf * const ref = CAST_CONSTPTR(StopConf, signal->getDataPtr()); +#endif const NodeId nodeId = refToNode(signal->header.theSendersBlockRef); #ifdef VM_TRACE ndbout_c("Node %d single user mode", nodeId); @@ -1160,7 +1164,6 @@ int MgmtSrvr::sendSTOP_REQ(const Vector<NodeId> &node_ids, const NodeFailRep * const rep = CAST_CONSTPTR(NodeFailRep, signal->getDataPtr()); NdbNodeBitmask mask; - char buf[100]; mask.assign(NdbNodeBitmask::Size, rep->theNodes); mask.bitAND(notstarted); nodes.bitANDC(mask); @@ -1354,7 +1357,7 @@ int MgmtSrvr::restartNodes(const Vector<NodeId> &node_ids, for (unsigned i = 0; i < node_ids.size(); i++) { - int result = start(node_ids[i]); + (void) start(node_ids[i]); } return 0; } @@ -2064,8 +2067,10 @@ MgmtSrvr::alloc_node_id_req(NodeId free_node_id, enum ndb_mgm_node_type type) switch (gsn) { case GSN_ALLOC_NODEID_CONF: { +#ifdef NOT_USED const AllocNodeIdConf * const conf = CAST_CONSTPTR(AllocNodeIdConf, signal->getDataPtr()); +#endif return 0; } case GSN_ALLOC_NODEID_REF: diff --git a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp index d73dd4561e9..66e2fde0d40 100644 --- a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp +++ b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp @@ -49,6 +49,7 @@ class Ndb_mgmd_event_service : public EventLoggerBase friend class MgmtSrvr; public: struct Event_listener : public EventLoggerBase { + Event_listener() {} NDB_SOCKET_TYPE m_socket; Uint32 m_parsable; }; diff --git a/storage/ndb/src/mgmsrv/Services.cpp b/storage/ndb/src/mgmsrv/Services.cpp index d76ae4d6be5..1cde5242a1d 100644 --- a/storage/ndb/src/mgmsrv/Services.cpp +++ b/storage/ndb/src/mgmsrv/Services.cpp @@ -669,14 +669,13 @@ MgmApiSession::getConfig_common(Parser_t::Context &, NdbMutex_Lock(m_mgmsrv.m_configMutex); const ConfigValues * cfg = &conf->m_configValues->m_config; - const Uint32 size = cfg->getPackedSize(); UtilBuffer src; cfg->pack(src); NdbMutex_Unlock(m_mgmsrv.m_configMutex); char *tmp_str = (char *) malloc(base64_needed_encoded_length(src.length())); - int res = base64_encode(src.get_data(), src.length(), tmp_str); + (void) base64_encode(src.get_data(), src.length(), tmp_str); m_output->println("get config reply"); m_output->println("result: Ok"); @@ -850,8 +849,6 @@ MgmApiSession::setClusterLogLevel(Parser<MgmApiSession>::Context &, const char *reply= "set cluster loglevel reply"; Uint32 node, level, cat; BaseString errorString; - SetLogLevelOrd logLevel; - int result; DBUG_ENTER("MgmApiSession::setClusterLogLevel"); args.get("node", &node); args.get("category", &cat); @@ -898,7 +895,6 @@ MgmApiSession::setLogLevel(Parser<MgmApiSession>::Context &, Uint32 node = 0, level = 0, cat; BaseString errorString; SetLogLevelOrd logLevel; - int result; logLevel.clear(); args.get("node", &node); args.get("category", &cat); @@ -1327,6 +1323,8 @@ MgmApiSession::setLogFilter(Parser_t::Context &ctx, m_output->println(""); } +#ifdef NOT_USED + static NdbOut& operator<<(NdbOut& out, const LogLevel & ll) { @@ -1336,6 +1334,7 @@ operator<<(NdbOut& out, const LogLevel & ll) out << "]"; return out; } +#endif void Ndb_mgmd_event_service::log(int eventType, const Uint32* theData, NodeId nodeId){ diff --git a/storage/ndb/src/mgmsrv/main.cpp b/storage/ndb/src/mgmsrv/main.cpp index 1c52bf7c518..53c8299216f 100644 --- a/storage/ndb/src/mgmsrv/main.cpp +++ b/storage/ndb/src/mgmsrv/main.cpp @@ -190,7 +190,6 @@ static void usage() */ int main(int argc, char** argv) { - int mgm_connect_result; NDB_INIT(argv[0]); diff --git a/storage/ndb/src/ndbapi/Ndb.cpp b/storage/ndb/src/ndbapi/Ndb.cpp index 0db20f723ea..20c2ea711d2 100644 --- a/storage/ndb/src/ndbapi/Ndb.cpp +++ b/storage/ndb/src/ndbapi/Ndb.cpp @@ -255,8 +255,6 @@ Ndb::waitUntilReady(int timeout) DBUG_ENTER("Ndb::waitUntilReady"); int secondsCounter = 0; int milliCounter = 0; - int noChecksSinceFirstAliveFound = 0; - int id; if (theInitState != Initialised) { // Ndb::init is not called diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp index ba2329888d2..1f4a9838c91 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -2904,7 +2904,6 @@ int NdbDictionaryImpl::dropTableGlobal(NdbTableImpl & impl) { int res; - const char * name = impl.getName(); DBUG_ENTER("NdbDictionaryImpl::dropTableGlobal"); DBUG_ASSERT(impl.m_status != NdbDictionary::Object::New); DBUG_ASSERT(impl.m_indexType == NdbDictionary::Object::TypeUndefined); @@ -4277,8 +4276,6 @@ void NdbDictInterface::execWAIT_GCP_CONF(NdbApiSignal* signal, LinearSectionPtr ptr[3]) { - const WaitGCPConf * const conf= - CAST_CONSTPTR(WaitGCPConf, signal->getDataPtr()); m_waiter.signal(NO_WAIT); } diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp index 828ba51bc21..a425819df6b 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -49,7 +49,7 @@ static Gci_container_pod g_empty_gci_container; static const Uint32 ACTIVE_GCI_DIRECTORY_SIZE = 4; static const Uint32 ACTIVE_GCI_MASK = ACTIVE_GCI_DIRECTORY_SIZE - 1; -#ifdef VM_TRACE +#if defined(VM_TRACE) && defined(NOT_USED) static void print_std(const SubTableData * sdata, LinearSectionPtr ptr[3]) { @@ -731,8 +731,6 @@ NdbEventOperationImpl::receive_event() { // Parse the new table definition and // create a table object - NdbDictionary::Dictionary *myDict = m_ndb->getDictionary(); - NdbDictionaryImpl *dict = & NdbDictionaryImpl::getImpl(*myDict); NdbError error; NdbDictInterface dif(error); NdbTableImpl *at; diff --git a/storage/ndb/src/ndbapi/NdbIndexOperation.cpp b/storage/ndb/src/ndbapi/NdbIndexOperation.cpp index 0965338b325..88e9253880f 100644 --- a/storage/ndb/src/ndbapi/NdbIndexOperation.cpp +++ b/storage/ndb/src/ndbapi/NdbIndexOperation.cpp @@ -247,8 +247,6 @@ NdbIndexOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransactionId) // Simple state is set if start and commit is set and it is // a read request. Otherwise it is set to zero. //------------------------------------------------------------- - Uint8 tReadInd = (theOperationType == ReadRequest); - Uint8 tSimpleState = tReadInd & tSimpleAlt; //theNdbCon->theSimpleState = tSimpleState; tcKeyReq->transId1 = tTransId1; diff --git a/storage/ndb/src/ndbapi/NdbIndexStat.cpp b/storage/ndb/src/ndbapi/NdbIndexStat.cpp index f95dcfb994c..773c302e0cd 100644 --- a/storage/ndb/src/ndbapi/NdbIndexStat.cpp +++ b/storage/ndb/src/ndbapi/NdbIndexStat.cpp @@ -240,7 +240,6 @@ NdbIndexStat::stat_oldest(const Area& a) m = ~(Uint32)0; // shut up incorrect CC warning for (i = 0; i < a.m_entries; i++) { Pointer& p = a.get_pointer(i); - Entry& e = a.get_entry(i); Uint32 m2 = m_seq >= p.m_seq ? m_seq - p.m_seq : p.m_seq - m_seq; if (! found || m < m2) { m = m2; diff --git a/storage/ndb/src/ndbapi/NdbOperationDefine.cpp b/storage/ndb/src/ndbapi/NdbOperationDefine.cpp index ba26831749d..757d39a75ce 100644 --- a/storage/ndb/src/ndbapi/NdbOperationDefine.cpp +++ b/storage/ndb/src/ndbapi/NdbOperationDefine.cpp @@ -530,11 +530,9 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo, }//if }//if - // Including bits in last word - const Uint32 totalSizeInWords = (sizeInBytes + 3)/4; // Excluding bits in last word const Uint32 sizeInWords = sizeInBytes / 4; - AttributeHeader& ah = AttributeHeader::init(&ahValue, tAttrId, sizeInBytes); + (void) AttributeHeader::init(&ahValue, tAttrId, sizeInBytes); insertATTRINFO( ahValue ); /*********************************************************************** @@ -560,10 +558,6 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo, }//if theErrorLine++; DBUG_RETURN(0); - -error: - setErrorCodeAbort(tReturnCode); - DBUG_RETURN(-1); }//NdbOperation::setValue() NdbBlob* diff --git a/storage/ndb/src/ndbapi/NdbOperationExec.cpp b/storage/ndb/src/ndbapi/NdbOperationExec.cpp index 80f388605b0..486c772de4d 100644 --- a/storage/ndb/src/ndbapi/NdbOperationExec.cpp +++ b/storage/ndb/src/ndbapi/NdbOperationExec.cpp @@ -177,8 +177,6 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId) // Simple state is set if start and commit is set and it is // a read request. Otherwise it is set to zero. //------------------------------------------------------------- - Uint8 tReadInd = (theOperationType == ReadRequest); - Uint8 tSimpleState = tReadInd & tSimpleIndicator; tcKeyReq->transId1 = tTransId1; tcKeyReq->transId2 = tTransId2; diff --git a/storage/ndb/src/ndbapi/NdbOperationSearch.cpp b/storage/ndb/src/ndbapi/NdbOperationSearch.cpp index dd995989799..605c66d9859 100644 --- a/storage/ndb/src/ndbapi/NdbOperationSearch.cpp +++ b/storage/ndb/src/ndbapi/NdbOperationSearch.cpp @@ -60,7 +60,6 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo, tAttrInfo->m_name.c_str(), theOperationType, (long) aValuePassed)); - Uint32 tData; const char* aValue = aValuePassed; Uint64 tempData[512]; @@ -310,10 +309,6 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo, equal_error2: setErrorCodeAbort(4206); DBUG_RETURN(-1); - - equal_error3: - setErrorCodeAbort(4209); - DBUG_RETURN(-1); } /****************************************************************************** @@ -343,7 +338,6 @@ NdbOperation::insertKEYINFO(const char* aValue, Uint32 tEndPos; Uint32 tPos; Uint32 signalCounter; - Uint32 tData; /***************************************************************************** * Calculate the end position of the attribute in the key information. * @@ -543,7 +537,6 @@ NdbOperation::handle_distribution_key(const Uint64* value, Uint32 len) * Copy distribution key to linear memory */ NdbColumnImpl* const * cols = m_accessTable->m_columns.getBase(); - Uint32 len = 0; Uint64 tmp[1000]; Uint32 chunk = 8; diff --git a/storage/ndb/src/ndbapi/NdbRecAttr.cpp b/storage/ndb/src/ndbapi/NdbRecAttr.cpp index c46219af7d0..b633b14465c 100644 --- a/storage/ndb/src/ndbapi/NdbRecAttr.cpp +++ b/storage/ndb/src/ndbapi/NdbRecAttr.cpp @@ -120,8 +120,8 @@ NdbRecAttr::clone() const { } bool -NdbRecAttr::receive_data(const Uint32 * data, Uint32 sz){ - const Uint32 n = m_size_in_bytes; +NdbRecAttr::receive_data(const Uint32 * data, Uint32 sz) +{ if(sz) { if(!copyoutRequired()) diff --git a/storage/ndb/src/ndbapi/NdbScanFilter.cpp b/storage/ndb/src/ndbapi/NdbScanFilter.cpp index 2e9e338d5aa..6ff7485416b 100644 --- a/storage/ndb/src/ndbapi/NdbScanFilter.cpp +++ b/storage/ndb/src/ndbapi/NdbScanFilter.cpp @@ -31,6 +31,7 @@ class NdbScanFilterImpl { public: + NdbScanFilterImpl() {} struct State { NdbScanFilter::Group m_group; Uint32 m_popCount; diff --git a/storage/ndb/src/ndbapi/NdbScanOperation.cpp b/storage/ndb/src/ndbapi/NdbScanOperation.cpp index 6e867df47a2..fe650827347 100644 --- a/storage/ndb/src/ndbapi/NdbScanOperation.cpp +++ b/storage/ndb/src/ndbapi/NdbScanOperation.cpp @@ -852,7 +852,6 @@ NdbScanOperation::doSendScan(int aProcessorId) tSignal = theSCAN_TABREQ; Uint32 tupKeyLen = theTupKeyLen; - Uint32 len = theTotalNrOfKeyWordInSignal; Uint32 aTC_ConnectPtr = theNdbCon->theTCConPtr; Uint64 transId = theNdbCon->theTransactionId; @@ -1361,8 +1360,6 @@ NdbIndexScanOperation::fix_get_values(){ Uint32 cnt = m_accessTable->getNoOfColumns() - 1; assert(cnt < NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY); - const NdbIndexImpl * idx = m_accessTable->m_index; - const NdbTableImpl * tab = m_currentTable; for(Uint32 i = 0; i<cnt; i++){ Uint32 val = theTupleKeyDefined[i][0]; switch(val){ diff --git a/storage/ndb/src/ndbapi/SignalSender.cpp b/storage/ndb/src/ndbapi/SignalSender.cpp index 7f1958e9b1f..0c0a9bd0e1f 100644 --- a/storage/ndb/src/ndbapi/SignalSender.cpp +++ b/storage/ndb/src/ndbapi/SignalSender.cpp @@ -19,6 +19,7 @@ #include <signaldata/NFCompleteRep.hpp> #include <signaldata/NodeFailRep.hpp> +#ifdef NOT_USED static void require(bool x) @@ -26,6 +27,7 @@ require(bool x) if (!x) abort(); } +#endif SimpleSignal::SimpleSignal(bool dealloc){ memset(this, 0, sizeof(* this)); @@ -172,6 +174,7 @@ SignalSender::waitFor(Uint32 timeOutMillis, T & t) class WaitForAny { public: + WaitForAny() {} SimpleSignal * check(Vector<SimpleSignal*> & m_jobBuffer){ if(m_jobBuffer.size() > 0){ SimpleSignal * s = m_jobBuffer[0]; @@ -191,6 +194,7 @@ SignalSender::waitFor(Uint32 timeOutMillis){ class WaitForNode { public: + WaitForNode() {} Uint32 m_nodeId; SimpleSignal * check(Vector<SimpleSignal*> & m_jobBuffer){ Uint32 len = m_jobBuffer.size(); diff --git a/storage/ndb/src/ndbapi/TransporterFacade.cpp b/storage/ndb/src/ndbapi/TransporterFacade.cpp index 24bf6dbbc6a..80d7a7ee2d5 100644 --- a/storage/ndb/src/ndbapi/TransporterFacade.cpp +++ b/storage/ndb/src/ndbapi/TransporterFacade.cpp @@ -1379,7 +1379,7 @@ int PollGuard::wait_scan(int wait_time, NodeId nodeId, bool forceSend) int PollGuard::wait_for_input_in_loop(int wait_time, bool forceSend) { - int ret_val, response_time; + int ret_val; if (forceSend) m_tp->forceSend(m_block_no); else @@ -1441,7 +1441,7 @@ void PollGuard::wait_for_input(int wait_time) queue if it hasn't happened already. It is usually already out of the queue but at time-out it could be that the object is still there. */ - Uint32 cond_wait_index= m_tp->put_in_cond_wait_queue(m_waiter); + (void) m_tp->put_in_cond_wait_queue(m_waiter); m_waiter->wait(wait_time); if (m_waiter->get_cond_wait_index() != TransporterFacade::MAX_NO_THREADS) { diff --git a/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp b/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp index 0e8a5f54c6e..24ac05caf07 100644 --- a/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp +++ b/storage/ndb/src/ndbapi/ndb_cluster_connection.cpp @@ -39,6 +39,8 @@ EventLogger g_eventLogger; NdbMutex *ndb_print_state_mutex= NULL; #endif +static int g_ndb_connection_count = 0; + /* * Ndb_cluster_connection */ @@ -572,7 +574,6 @@ int Ndb_cluster_connection::connect(int no_retries, int retry_delay_in_seconds, struct ndb_mgm_reply mgm_reply; DBUG_ENTER("Ndb_cluster_connection::connect"); - const char* error = 0; do { if (m_impl.m_config_retriever == 0) DBUG_RETURN(-1); diff --git a/storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp b/storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp index 33346a2a1d7..a8b774ec2b8 100644 --- a/storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp +++ b/storage/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp @@ -22,7 +22,6 @@ #include <NdbMutex.h> extern NdbMutex *g_ndb_connection_mutex; -static int g_ndb_connection_count = 0; class TransporterFacade; class ConfigRetriever; diff --git a/storage/ndb/tools/delete_all.cpp b/storage/ndb/tools/delete_all.cpp index e032709856e..c925e22d3ec 100644 --- a/storage/ndb/tools/delete_all.cpp +++ b/storage/ndb/tools/delete_all.cpp @@ -51,9 +51,11 @@ static struct my_option my_long_options[] = }; static void usage() { +#ifdef NOT_USED char desc[] = "tabname\n"\ "This program will delete all records in the specified table using scan delete.\n"; +#endif ndb_std_print_version(); print_defaults(MYSQL_CONFIG_NAME,load_default_groups); puts(""); diff --git a/storage/ndb/tools/desc.cpp b/storage/ndb/tools/desc.cpp index 02ab9d5f25f..4d9d6dff72a 100644 --- a/storage/ndb/tools/desc.cpp +++ b/storage/ndb/tools/desc.cpp @@ -54,10 +54,12 @@ static struct my_option my_long_options[] = }; static void usage() { +#ifdef NOT_USED char desc[] = "tabname\n"\ "This program list all properties of table(s) in NDB Cluster.\n"\ " ex: desc T1 T2 T4\n"; +#endif ndb_std_print_version(); print_defaults(MYSQL_CONFIG_NAME,load_default_groups); puts(""); @@ -96,7 +98,6 @@ int main(int argc, char** argv){ return NDBT_ProgramExit(NDBT_FAILED); } - NdbDictionary::Dictionary * dict= MyNdb.getDictionary(); for(int i= 0; i<argc;i++) { if(desc_table(&MyNdb,argv[i])) diff --git a/storage/ndb/tools/drop_index.cpp b/storage/ndb/tools/drop_index.cpp index 7cc791dcdb7..23ebfff6cf4 100644 --- a/storage/ndb/tools/drop_index.cpp +++ b/storage/ndb/tools/drop_index.cpp @@ -36,9 +36,11 @@ static struct my_option my_long_options[] = }; static void usage() { +#ifdef NOT_USED char desc[] = "[<table> <index>]+\n"\ "This program will drop index(es) in Ndb\n"; +#endif ndb_std_print_version(); print_defaults(MYSQL_CONFIG_NAME,load_default_groups); puts(""); diff --git a/storage/ndb/tools/drop_tab.cpp b/storage/ndb/tools/drop_tab.cpp index efbbba73d4b..d965be29f31 100644 --- a/storage/ndb/tools/drop_tab.cpp +++ b/storage/ndb/tools/drop_tab.cpp @@ -36,9 +36,11 @@ static struct my_option my_long_options[] = }; static void usage() { +#ifdef NOT_USED char desc[] = "tabname\n"\ "This program will drop one table in Ndb\n"; +#endif ndb_std_print_version(); print_defaults(MYSQL_CONFIG_NAME,load_default_groups); puts(""); diff --git a/storage/ndb/tools/listTables.cpp b/storage/ndb/tools/listTables.cpp index 6c78f93d285..359e308dcb8 100644 --- a/storage/ndb/tools/listTables.cpp +++ b/storage/ndb/tools/listTables.cpp @@ -277,6 +277,7 @@ static struct my_option my_long_options[] = }; static void usage() { +#ifdef NOT_USED char desc[] = "tabname\n"\ "This program list all system objects in NDB Cluster.\n"\ @@ -284,6 +285,7 @@ static void usage() " ex: ndb_show_tables -t 2 would show all UserTables\n"\ "To show all indexes for a table write table name as final argument\n"\ " ex: ndb_show_tables T1\n"; +#endif ndb_std_print_version(); print_defaults(MYSQL_CONFIG_NAME,load_default_groups); puts(""); diff --git a/storage/ndb/tools/ndb_condig.cpp b/storage/ndb/tools/ndb_condig.cpp index 5c842076873..31fc59a8b83 100644 --- a/storage/ndb/tools/ndb_condig.cpp +++ b/storage/ndb/tools/ndb_condig.cpp @@ -97,6 +97,7 @@ static void usage() { char desc[] = "This program will retreive config options for a ndb cluster\n"; + puts(desc); ndb_std_print_version(); print_defaults(MYSQL_CONFIG_NAME,load_default_groups); puts(""); @@ -111,12 +112,14 @@ struct Match { int m_key; BaseString m_value; + Match() {} virtual int eval(const Iter&); virtual ~Match() {} }; struct HostMatch : public Match { + HostMatch() {} virtual int eval(const Iter&); }; @@ -131,11 +134,13 @@ struct Apply struct NodeTypeApply : public Apply { + NodeTypeApply() {} virtual int apply(const Iter&); }; struct ConnectionTypeApply : public Apply { + ConnectionTypeApply() {} virtual int apply(const Iter&); }; @@ -294,10 +299,10 @@ parse_where(Vector<Match*>& where, int &argc, char**& argv) Match m; if(g_host) { - HostMatch *m = new HostMatch; - m->m_key = CFG_NODE_HOST; - m->m_value.assfmt("%s", g_host); - where.push_back(m); + HostMatch *tmp = new HostMatch; + tmp->m_key = CFG_NODE_HOST; + tmp->m_value.assfmt("%s", g_host); + where.push_back(tmp); } if(g_type) diff --git a/storage/ndb/tools/restore/Restore.hpp b/storage/ndb/tools/restore/Restore.hpp index 0517b8006ee..74834fe0abc 100644 --- a/storage/ndb/tools/restore/Restore.hpp +++ b/storage/ndb/tools/restore/Restore.hpp @@ -25,7 +25,7 @@ #include <ndb_version.h> #include <version.h> -static const char * delimiter = ";"; // Delimiter in file dump +#define delimiter ";" const int FileNameLenC = 256; const int TableNameLenC = 256; @@ -377,6 +377,7 @@ public: m_values_e.push_back(m_values[i]); m_values.clear(); } + LogEntry() {} ~LogEntry() { Uint32 i; diff --git a/storage/ndb/tools/restore/consumer.hpp b/storage/ndb/tools/restore/consumer.hpp index 36986ea30ba..b05f7e00402 100644 --- a/storage/ndb/tools/restore/consumer.hpp +++ b/storage/ndb/tools/restore/consumer.hpp @@ -24,6 +24,7 @@ extern const char *Ndb_apply_table; class BackupConsumer { public: + BackupConsumer() {} virtual ~BackupConsumer() { } virtual bool init() { return true;} virtual bool object(Uint32 tableType, const void*) { return true;} diff --git a/storage/ndb/tools/restore/restore_main.cpp b/storage/ndb/tools/restore/restore_main.cpp index 1f91c03d2cf..4734e9f609d 100644 --- a/storage/ndb/tools/restore/restore_main.cpp +++ b/storage/ndb/tools/restore/restore_main.cpp @@ -605,8 +605,8 @@ main(int argc, char** argv) while ((tuple = dataIter.getNextTuple(res= 1)) != 0) { if (checkSysTable(tuple->getTable())) - for(Uint32 i= 0; i < g_consumers.size(); i++) - g_consumers[i]->tuple(* tuple, fragmentId); + for(Uint32 j= 0; j < g_consumers.size(); j++) + g_consumers[j]->tuple(* tuple, fragmentId); } // while (tuple != NULL); if (res < 0) @@ -649,8 +649,8 @@ main(int argc, char** argv) while ((logEntry = logIter.getNextLogEntry(res= 0)) != 0) { if (checkSysTable(logEntry->m_table)) - for(Uint32 i= 0; i < g_consumers.size(); i++) - g_consumers[i]->logEntry(* logEntry); + for(Uint32 j= 0; j < g_consumers.size(); j++) + g_consumers[j]->logEntry(* logEntry); } if (res < 0) { @@ -691,9 +691,9 @@ main(int argc, char** argv) } } - for(Uint32 i= 0; i < g_consumers.size(); i++) + for(Uint32 j= 0; j < g_consumers.size(); j++) { - if (g_consumers[i]->has_temp_error()) + if (g_consumers[j]->has_temp_error()) { clearConsumers(); ndbout_c("\nRestore successful, but encountered temporary error, " diff --git a/storage/ndb/tools/select_all.cpp b/storage/ndb/tools/select_all.cpp index 84187894fb1..9adde165003 100644 --- a/storage/ndb/tools/select_all.cpp +++ b/storage/ndb/tools/select_all.cpp @@ -39,7 +39,7 @@ NDB_STD_OPTS_VARS; static const char* _dbname = "TEST_DB"; static const char* _delimiter = "\t"; -static int _unqualified, _header, _parallelism, _useHexFormat, _lock, +static int _header, _parallelism, _useHexFormat, _lock, _order, _descending; const char *load_default_groups[]= { "mysql_cluster",0 }; @@ -96,6 +96,7 @@ static struct my_option my_long_options[] = }; static void usage() { +#ifdef NOT_USED char desc[] = "tabname\n"\ "This program reads all records from one table in NDB Cluster\n"\ @@ -103,6 +104,7 @@ static void usage() "(It only print error messages if it encounters a permanent error.)\n"\ "It can also be used to dump the content of a table to file \n"\ " ex: select_all --no-header --delimiter=';' T4 > T4.data\n"; +#endif ndb_std_print_version(); print_defaults(MYSQL_CONFIG_NAME,load_default_groups); puts(""); diff --git a/storage/ndb/tools/select_count.cpp b/storage/ndb/tools/select_count.cpp index a133f7967f8..8933d803f53 100644 --- a/storage/ndb/tools/select_count.cpp +++ b/storage/ndb/tools/select_count.cpp @@ -55,9 +55,11 @@ static struct my_option my_long_options[] = }; static void usage() { +#ifdef NOT_USED char desc[] = "tabname1 ... tabnameN\n"\ "This program will count the number of records in tables\n"; +#endif ndb_std_print_version(); print_defaults(MYSQL_CONFIG_NAME,load_default_groups); puts(""); |