diff options
author | unknown <monty@mysql.com/narttu.mysql.fi> | 2006-11-30 21:56:03 +0200 |
---|---|---|
committer | unknown <monty@mysql.com/narttu.mysql.fi> | 2006-11-30 21:56:03 +0200 |
commit | 25226de9ea52d06667c2c7fbd99f5cd014764241 (patch) | |
tree | 0a9f9390874c91b9e4430fecfbbffa72967e7b69 /sql | |
parent | 7191e775394db0392b78b07e2662c766eeace59c (diff) | |
download | mariadb-git-25226de9ea52d06667c2c7fbd99f5cd014764241.tar.gz |
Fixed compiler warnings
Don't assert if my_thread_end() is called twice (common case)
client/mysql.cc:
Removed not used variables
client/mysqldump.c:
Fixed compiler warnings
client/mysqltest.c:
Fixed compiler warnings
cmd-line-utils/readline/bind.c:
Fixed compiler warnings
cmd-line-utils/readline/histfile.c:
Fixed compiler warnings
extra/replace.c:
Fixed compiler warning on windows
extra/yassl/taocrypt/include/algebra.hpp:
Fixed compiler warnings
heap/hp_write.c:
Fixed compiler warnings
innobase/os/os0file.c:
Fixed compiler warnings
libmysql/libmysql.c:
Call my_end()/my_thread_end last.
my_end() calls free_charsets(), which allowed me to move the call
myisam/myisampack.c:
Fixed compiler warnings
myisammrg/myrg_rkey.c:
Fixed compiler warnings
mysys/my_thr_init.c:
More comments
Don't assert if my_thread_end() is called twice (common case)
ndb/src/mgmapi/mgmapi.cpp:
Fixed compiler warnings
ndb/src/ndbapi/Ndb.cpp:
Fixed compiler warnings
ndb/src/ndbapi/NdbScanOperation.cpp:
Fixed compiler warnings
ndb/src/ndbapi/NdbTransaction.cpp:
Fixed compiler warnings
ndb/src/ndbapi/Ndblist.cpp:
Fixed compiler warnings
server-tools/instance-manager/guardian.cc:
Removed not used variable
server-tools/instance-manager/portability.h:
Removed duplicated symbol
sql/gen_lex_hash.cc:
Fixed compiler warning
sql/ha_archive.cc:
Fixed compiler warnings
sql/ha_ndbcluster.cc:
Fixed compiler warnings
sql/mysqld.cc:
Fixed compiler warnings
sql/sql_cache.cc:
Fixed compiler warnings
Fixed DBUG_PRINT strings to be consistent with 5.1
sql/tztime.cc:
Fixed compiler warnings
sql/uniques.cc:
Fixed compiler warnings
Diffstat (limited to 'sql')
-rw-r--r-- | sql/gen_lex_hash.cc | 5 | ||||
-rw-r--r-- | sql/ha_archive.cc | 13 | ||||
-rw-r--r-- | sql/ha_ndbcluster.cc | 4 | ||||
-rw-r--r-- | sql/mysqld.cc | 2 | ||||
-rw-r--r-- | sql/sql_cache.cc | 38 | ||||
-rw-r--r-- | sql/tztime.cc | 10 | ||||
-rw-r--r-- | sql/uniques.cc | 4 |
7 files changed, 39 insertions, 37 deletions
diff --git a/sql/gen_lex_hash.cc b/sql/gen_lex_hash.cc index 5a8bd48d699..2674b2e65f7 100644 --- a/sql/gen_lex_hash.cc +++ b/sql/gen_lex_hash.cc @@ -206,9 +206,10 @@ void insert_symbols() void insert_sql_functions() { - size_t i= 0; + int i= 0; SYMBOL *cur; - for (cur= sql_functions; i<array_elements(sql_functions); cur++, i++){ + for (cur= sql_functions; i < (int) array_elements(sql_functions); cur++, i++) + { hash_lex_struct *root= get_hash_struct_by_len(&root_by_len,cur->length,&max_len); insert_into_hash(root,cur->name,0,-i-1,1); diff --git a/sql/ha_archive.cc b/sql/ha_archive.cc index 099f44c82f3..e3f979952e0 100644 --- a/sql/ha_archive.cc +++ b/sql/ha_archive.cc @@ -327,8 +327,8 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows) DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0])); DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1])); - DBUG_PRINT("ha_archive::read_meta_file", ("Rows %lld", *rows)); - DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %lld", check_point)); + DBUG_PRINT("ha_archive::read_meta_file", ("Rows %lu", (ulong) *rows)); + DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %lu", (ulong) check_point)); DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)meta_buffer[18])); if ((meta_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) || @@ -359,8 +359,8 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows, bool dirty) *(meta_buffer + 18)= (uchar)dirty; DBUG_PRINT("ha_archive::write_meta_file", ("Check %d", (uint)ARCHIVE_CHECK_HEADER)); DBUG_PRINT("ha_archive::write_meta_file", ("Version %d", (uint)ARCHIVE_VERSION)); - DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu", (ulonglong)rows)); - DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu", check_point)); + DBUG_PRINT("ha_archive::write_meta_file", ("Rows %lu", (ulong)rows)); + DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %lu", (ulong) check_point)); DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty)); VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0))); @@ -783,7 +783,7 @@ int ha_archive::rnd_init(bool scan) if (scan) { scan_rows= share->rows_recorded; - DBUG_PRINT("info", ("archive will retrieve %llu rows", scan_rows)); + DBUG_PRINT("info", ("archive will retrieve %lu rows", (ulong) scan_rows)); records= 0; /* @@ -1019,7 +1019,8 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) share->rows_recorded++; } } - DBUG_PRINT("info", ("recovered %llu archive rows", share->rows_recorded)); + DBUG_PRINT("info", ("recovered %lu archive rows", + (ulong) share->rows_recorded)); my_free((char*)buf, MYF(0)); if (rc && rc != HA_ERR_END_OF_FILE) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 739fae79565..2ef16ddacbf 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -822,8 +822,8 @@ int ha_ndbcluster::get_ndb_blobs_value(NdbBlob *last_ndb_blob, { char *buf= m_blobs_buffer + offset; uint32 len= 0xffffffff; // Max uint32 - DBUG_PRINT("value", ("read blob ptr=%x len=%u", - (UintPtr)buf, (uint)blob_len)); + DBUG_PRINT("value", ("read blob ptr: 0x%lx len: %u", + (long)buf, (uint)blob_len)); if (ndb_blob->readData(buf, len) != 0) DBUG_RETURN(-1); DBUG_ASSERT(len == blob_len); diff --git a/sql/mysqld.cc b/sql/mysqld.cc index dd483b0b158..d8d75a28b75 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -1538,7 +1538,7 @@ static void network_init(void) if (strlen(mysqld_unix_port) > (sizeof(UNIXaddr.sun_path) - 1)) { sql_print_error("The socket file path is too long (> %u): %s", - sizeof(UNIXaddr.sun_path) - 1, mysqld_unix_port); + (uint) sizeof(UNIXaddr.sun_path) - 1, mysqld_unix_port); unireg_abort(1); } if ((unix_sock= socket(AF_UNIX, SOCK_STREAM, 0)) < 0) diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 5902374dff0..bd2f5c42695 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -902,7 +902,7 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu", if (thd->db_length) { memcpy(thd->query+thd->query_length+1, thd->db, thd->db_length); - DBUG_PRINT("qcache", ("database : %s length %u", + DBUG_PRINT("qcache", ("database: %s length: %u", thd->db, thd->db_length)); } else @@ -1048,7 +1048,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) (pre-space is removed in dispatch_command) First '/' looks like comment before command it is not - frequently appeared in real lihe, consequently we can + frequently appeared in real life, consequently we can check all such queries, too. */ if ((my_toupper(system_charset_info, sql[i]) != 'S' || @@ -1077,7 +1077,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) if (thd->db_length) { memcpy(sql+query_length+1, thd->db, thd->db_length); - DBUG_PRINT("qcache", ("database: '%s' length %u", + DBUG_PRINT("qcache", ("database: '%s' length: %u", thd->db, thd->db_length)); } else @@ -1230,9 +1230,9 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu", if (engine_data != table->engine_data()) { DBUG_PRINT("qcache", - ("Handler require invalidation queries of %s.%s %lld-%lld", - table_list.db, table_list.alias, - engine_data, table->engine_data())); + ("Handler require invalidation queries of %s.%s %lu-%lu", + table_list.db, table_list.alias, + (ulong) engine_data, (ulong) table->engine_data())); invalidate_table((byte *) table->db(), table->key_length()); } else @@ -1253,10 +1253,10 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu", #ifndef EMBEDDED_LIBRARY do { - DBUG_PRINT("qcache", ("Results (len: %lu used: %lu headers: %u)", + DBUG_PRINT("qcache", ("Results (len: %lu used: %lu headers: %lu)", result_block->length, result_block->used, - result_block->headers_len()+ - ALIGN_SIZE(sizeof(Query_cache_result)))); + (ulong) (result_block->headers_len()+ + ALIGN_SIZE(sizeof(Query_cache_result))))); Query_cache_result *result = result_block->result(); if (net_real_write(&thd->net, result->data(), @@ -1338,7 +1338,7 @@ void Query_cache::invalidate(CHANGED_TABLE_LIST *tables_used) for (; tables_used; tables_used= tables_used->next) { invalidate_table((byte*) tables_used->key, tables_used->key_length); - DBUG_PRINT("qcache", (" db %s, table %s", tables_used->key, + DBUG_PRINT("qcache", ("db: %s table: %s", tables_used->key, tables_used->key+ strlen(tables_used->key)+1)); } @@ -2349,7 +2349,7 @@ Query_cache::register_tables_from_list(TABLE_LIST *tables_used, { char key[MAX_DBKEY_LENGTH]; uint key_length; - DBUG_PRINT("qcache", ("view %s, db %s", + DBUG_PRINT("qcache", ("view: %s db: %s", tables_used->view_name.str, tables_used->view_db.str)); key_length= (uint) (strmov(strmov(key, tables_used->view_db.str) + 1, @@ -2470,11 +2470,11 @@ Query_cache::insert_table(uint key_len, char *key, table_block->table()->engine_data() != engine_data) { DBUG_PRINT("qcache", - ("Handler require invalidation queries of %s.%s %lld-%lld", + ("Handler require invalidation queries of %s.%s %lu-%lu", table_block->table()->db(), table_block->table()->table(), - engine_data, - table_block->table()->engine_data())); + (ulong) engine_data, + (ulong) table_block->table()->engine_data())); /* as far as we delete all queries with this table, table block will be deleted, too @@ -2972,7 +2972,7 @@ static TABLE_COUNTER_TYPE process_and_count_tables(TABLE_LIST *tables_used, table_count++; if (tables_used->view) { - DBUG_PRINT("qcache", ("view %s, db %s", + DBUG_PRINT("qcache", ("view: %s db: %s", tables_used->view_name.str, tables_used->view_db.str)); *tables_type|= HA_CACHE_TBL_NONTRANSACT; @@ -3038,7 +3038,7 @@ Query_cache::is_cacheable(THD *thd, uint32 query_len, char *query, LEX *lex, lex->safe_to_cache_query) { DBUG_PRINT("qcache", ("options: %lx %lx type: %u", - OPTION_TO_QUERY_CACHE, + (long) OPTION_TO_QUERY_CACHE, (long) lex->select_lex.options, (int) thd->variables.query_cache_type)); @@ -3058,7 +3058,7 @@ Query_cache::is_cacheable(THD *thd, uint32 query_len, char *query, LEX *lex, DBUG_PRINT("qcache", ("not interesting query: %d or not cacheable, options %lx %lx type: %u", (int) lex->sql_command, - OPTION_TO_QUERY_CACHE, + (long) OPTION_TO_QUERY_CACHE, (long) lex->select_lex.options, (int) thd->variables.query_cache_type)); DBUG_RETURN(0); @@ -3757,8 +3757,8 @@ my_bool Query_cache::check_integrity(bool locked) (((long)first_block) % (long)ALIGN_SIZE(1))) { DBUG_PRINT("error", - ("block 0x%lx do not aligned by %d", (ulong) block, - ALIGN_SIZE(1))); + ("block 0x%lx do not aligned by %d", (long) block, + (int) ALIGN_SIZE(1))); result = 1; } // Check memory allocation diff --git a/sql/tztime.cc b/sql/tztime.cc index fe23954bbb2..4becf4a9fcc 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -1729,9 +1729,9 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) tz_leapcnt++; DBUG_PRINT("info", - ("time_zone_leap_second table: tz_leapcnt=%u tt_time=%lld offset=%ld", - tz_leapcnt, (longlong)tz_lsis[tz_leapcnt-1].ls_trans, - tz_lsis[tz_leapcnt-1].ls_corr)); + ("time_zone_leap_second table: tz_leapcnt: %u tt_time: %lu offset=%ld", + tz_leapcnt, (ulong) tz_lsis[tz_leapcnt-1].ls_trans, + tz_lsis[tz_leapcnt-1].ls_corr)); res= table->file->index_next(table->record[0]); } @@ -2041,8 +2041,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) tz_info->timecnt++; DBUG_PRINT("info", - ("time_zone_transition table: tz_id=%u tt_time=%lld tt_id=%u", - tzid, (longlong)ttime, ttid)); + ("time_zone_transition table: tz_id: %u tt_time: %lu tt_id: %u", + tzid, (ulong) ttime, ttid)); res= table->file->index_next_same(table->record[0], (byte*)table->field[0]->ptr, 4); diff --git a/sql/uniques.cc b/sql/uniques.cc index c0343c0c085..c7bdbdeb207 100644 --- a/sql/uniques.cc +++ b/sql/uniques.cc @@ -428,8 +428,8 @@ static bool merge_walk(uchar *merge_buffer, ulong merge_buffer_size, BUFFPEK_COMPARE_CONTEXT compare_context = { compare, compare_arg }; QUEUE queue; if (end <= begin || - merge_buffer_size < key_length * (end - begin + 1) || - init_queue(&queue, end - begin, offsetof(BUFFPEK, key), 0, + merge_buffer_size < (ulong) (key_length * (end - begin + 1)) || + init_queue(&queue, (uint) (end - begin), offsetof(BUFFPEK, key), 0, buffpek_compare, &compare_context)) return 1; /* we need space for one key when a piece of merge buffer is re-read */ |