summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--client/mysql.cc2
-rw-r--r--client/mysqldump.c2
-rw-r--r--client/mysqltest.c17
-rw-r--r--cmd-line-utils/readline/bind.c4
-rw-r--r--cmd-line-utils/readline/histfile.c3
-rw-r--r--extra/replace.c6
-rw-r--r--extra/yassl/taocrypt/include/algebra.hpp2
-rw-r--r--heap/hp_write.c2
-rw-r--r--innobase/os/os0file.c2
-rw-r--r--libmysql/libmysql.c10
-rw-r--r--myisam/myisampack.c14
-rw-r--r--myisammrg/myrg_rkey.c2
-rw-r--r--mysys/my_thr_init.c40
-rw-r--r--ndb/src/mgmapi/mgmapi.cpp6
-rw-r--r--ndb/src/ndbapi/Ndb.cpp23
-rw-r--r--ndb/src/ndbapi/NdbScanOperation.cpp6
-rw-r--r--ndb/src/ndbapi/NdbTransaction.cpp3
-rw-r--r--ndb/src/ndbapi/Ndblist.cpp2
-rw-r--r--server-tools/instance-manager/guardian.cc2
-rw-r--r--server-tools/instance-manager/portability.h1
-rw-r--r--sql/gen_lex_hash.cc5
-rw-r--r--sql/ha_archive.cc13
-rw-r--r--sql/ha_ndbcluster.cc4
-rw-r--r--sql/mysqld.cc2
-rw-r--r--sql/sql_cache.cc38
-rw-r--r--sql/tztime.cc10
-rw-r--r--sql/uniques.cc4
27 files changed, 129 insertions, 96 deletions
diff --git a/client/mysql.cc b/client/mysql.cc
index d0965588b80..cd24e1d5ce2 100644
--- a/client/mysql.cc
+++ b/client/mysql.cc
@@ -2397,7 +2397,6 @@ print_table_data(MYSQL_RES *result)
const char *buffer;
uint data_length;
uint field_max_length;
- bool right_justified;
uint visible_length;
uint extra_padding;
@@ -3446,7 +3445,6 @@ server_version_string(MYSQL *mysql)
{
char *bufp = buf;
MYSQL_RES *result;
- MYSQL_ROW cur;
bufp = strnmov(buf, mysql_get_server_info(mysql), sizeof buf);
diff --git a/client/mysqldump.c b/client/mysqldump.c
index 5b4983b4a4e..05765ef2a02 100644
--- a/client/mysqldump.c
+++ b/client/mysqldump.c
@@ -1419,7 +1419,7 @@ static uint dump_routines_for_db(char *db)
routine body of other routines that are not the creator of!
*/
DBUG_PRINT("info",("length of body for %s row[2] '%s' is %d",
- routine_name, row[2], strlen(row[2])));
+ routine_name, row[2], (int) strlen(row[2])));
if (strlen(row[2]))
{
char *query_str= NULL;
diff --git a/client/mysqltest.c b/client/mysqltest.c
index 2063d31251d..318e5064ab8 100644
--- a/client/mysqltest.c
+++ b/client/mysqltest.c
@@ -946,8 +946,8 @@ int dyn_string_cmp(DYNAMIC_STRING* ds, const char *fname)
die(NullS);
if (!eval_result && (uint) stat_info.st_size != ds->length)
{
- DBUG_PRINT("info",("Size differs: result size: %u file size: %llu",
- ds->length, stat_info.st_size));
+ DBUG_PRINT("info",("Size differs: result size: %u file size: %lu",
+ ds->length, (ulong) stat_info.st_size));
DBUG_PRINT("info",("result: '%s'", ds->str));
DBUG_RETURN(RESULT_LENGTH_MISMATCH);
}
@@ -3130,14 +3130,15 @@ void do_connect(struct st_command *command)
else if (!strncmp(con_options, "COMPRESS", 8))
con_compress= 1;
else
- die("Illegal option to connect: %.*s", end - con_options, con_options);
+ die("Illegal option to connect: %.*s",
+ (int) (end - con_options), con_options);
/* Process next option */
con_options= end;
}
if (next_con == connections_end)
die("Connection limit exhausted, you can have max %d connections",
- (sizeof(connections)/sizeof(struct st_connection)));
+ (int) (sizeof(connections)/sizeof(struct st_connection)));
if (find_connection_by_name(ds_connection_name.str))
die("Connection %s already exists", ds_connection_name.str);
@@ -3458,10 +3459,10 @@ int read_line(char *buf, int size)
DBUG_RETURN(0);
}
else if ((c == '{' &&
- (!my_strnncoll_simple(charset_info, "while", 5,
- buf, min(5, p - buf), 0) ||
- !my_strnncoll_simple(charset_info, "if", 2,
- buf, min(2, p - buf), 0))))
+ (!my_strnncoll_simple(charset_info, (const uchar*) "while", 5,
+ (uchar*) buf, min(5, p - buf), 0) ||
+ !my_strnncoll_simple(charset_info, (const uchar*) "if", 2,
+ (uchar*) buf, min(2, p - buf), 0))))
{
/* Only if and while commands can be terminated by { */
*p++= c;
diff --git a/cmd-line-utils/readline/bind.c b/cmd-line-utils/readline/bind.c
index ab1136c7da5..568c3e8776a 100644
--- a/cmd-line-utils/readline/bind.c
+++ b/cmd-line-utils/readline/bind.c
@@ -337,6 +337,7 @@ rl_generic_bind (type, keyseq, data, map)
KEYMAP_ENTRY k;
k.function = 0;
+ k.type= 0;
/* If no keys to bind to, exit right away. */
if (!keyseq || !*keyseq)
@@ -735,7 +736,8 @@ _rl_read_file (filename, sizep)
file_size = (size_t)finfo.st_size;
/* check for overflow on very large files */
- if (file_size != finfo.st_size || file_size + 1 < file_size)
+ if ((long long) file_size != (long long) finfo.st_size ||
+ file_size + 1 < file_size)
{
if (file >= 0)
close (file);
diff --git a/cmd-line-utils/readline/histfile.c b/cmd-line-utils/readline/histfile.c
index 7d340b346d4..a7d6a68098e 100644
--- a/cmd-line-utils/readline/histfile.c
+++ b/cmd-line-utils/readline/histfile.c
@@ -184,7 +184,8 @@ read_history_range (filename, from, to)
file_size = (size_t)finfo.st_size;
/* check for overflow on very large files */
- if (file_size != finfo.st_size || file_size + 1 < file_size)
+ if ((long long) file_size != (long long) finfo.st_size ||
+ file_size + 1 < file_size)
{
errno = overflow_errno;
goto error_and_exit;
diff --git a/extra/replace.c b/extra/replace.c
index 9acf1620d49..cad3589819b 100644
--- a/extra/replace.c
+++ b/extra/replace.c
@@ -1052,8 +1052,10 @@ static int convert_file(REPLACE *rep, my_string name)
{
int error;
FILE *in,*out;
- char dir_buff[FN_REFLEN], tempname[FN_REFLEN];
- char link_name[FN_REFLEN], *org_name = name;
+ char dir_buff[FN_REFLEN], tempname[FN_REFLEN], *org_name = name;
+#ifdef HAVE_READLINK
+ char link_name[FN_REFLEN];
+#endif
File temp_file;
DBUG_ENTER("convert_file");
diff --git a/extra/yassl/taocrypt/include/algebra.hpp b/extra/yassl/taocrypt/include/algebra.hpp
index 07fc405f093..535ce2599c4 100644
--- a/extra/yassl/taocrypt/include/algebra.hpp
+++ b/extra/yassl/taocrypt/include/algebra.hpp
@@ -75,7 +75,7 @@ public:
typedef Integer Element;
AbstractRing() : AbstractGroup() {m_mg.m_pRing = this;}
- AbstractRing(const AbstractRing &source) {m_mg.m_pRing = this;}
+ AbstractRing(const AbstractRing &source) :AbstractGroup() {m_mg.m_pRing = this;}
AbstractRing& operator=(const AbstractRing &source) {return *this;}
virtual bool IsUnit(const Element &a) const =0;
diff --git a/heap/hp_write.c b/heap/hp_write.c
index 16f02999c93..c83ae65c966 100644
--- a/heap/hp_write.c
+++ b/heap/hp_write.c
@@ -68,7 +68,7 @@ int heap_write(HP_INFO *info, const byte *record)
DBUG_RETURN(0);
err:
- DBUG_PRINT("info",("Duplicate key: %d", keydef - share->keydef));
+ DBUG_PRINT("info",("Duplicate key: %d", (int) (keydef - share->keydef)));
info->errkey= keydef - share->keydef;
if (keydef->algorithm == HA_KEY_ALG_BTREE)
{
diff --git a/innobase/os/os0file.c b/innobase/os/os0file.c
index 075005c3611..5e5c4b19eb0 100644
--- a/innobase/os/os0file.c
+++ b/innobase/os/os0file.c
@@ -1718,7 +1718,7 @@ os_file_set_size(
}
/* Print about progress for each 100 MB written */
- if ((current_size + n_bytes) / (ib_longlong)(100 * 1024 * 1024)
+ if ((ib_longlong) (current_size + n_bytes) / (ib_longlong)(100 * 1024 * 1024)
!= current_size / (ib_longlong)(100 * 1024 * 1024)) {
fprintf(stderr, " %lu00",
diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c
index 6a592f64e49..8c4eb2279e1 100644
--- a/libmysql/libmysql.c
+++ b/libmysql/libmysql.c
@@ -175,6 +175,9 @@ void STDCALL mysql_server_end()
#ifdef EMBEDDED_LIBRARY
end_embedded_server();
#endif
+ finish_client_errs();
+ vio_end();
+
/* If library called my_init(), free memory allocated by it */
if (!org_my_init_done)
{
@@ -185,10 +188,11 @@ void STDCALL mysql_server_end()
#endif
}
else
+ {
+ free_charsets();
mysql_thread_end();
- finish_client_errs();
- free_charsets();
- vio_end();
+ }
+
mysql_client_init= org_my_init_done= 0;
#ifdef EMBEDDED_SERVER
if (stderror_file)
diff --git a/myisam/myisampack.c b/myisam/myisampack.c
index ab67ab60105..912ed22d278 100644
--- a/myisam/myisampack.c
+++ b/myisam/myisampack.c
@@ -1105,18 +1105,18 @@ static int get_statistic(PACK_MRG_INFO *mrg,HUFF_COUNTS *huff_counts)
my_off_t total_count;
char llbuf[32];
- DBUG_PRINT("info", ("column: %3u", count - huff_counts + 1));
+ DBUG_PRINT("info", ("column: %3u", (uint) (count - huff_counts + 1)));
if (verbose >= 2)
- VOID(printf("column: %3u\n", count - huff_counts + 1));
+ VOID(printf("column: %3u\n", (uint) (count - huff_counts + 1)));
if (count->tree_buff)
{
DBUG_PRINT("info", ("number of distinct values: %u",
- (count->tree_pos - count->tree_buff) /
- count->field_length));
+ (uint) ((count->tree_pos - count->tree_buff) /
+ count->field_length)));
if (verbose >= 2)
VOID(printf("number of distinct values: %u\n",
- (count->tree_pos - count->tree_buff) /
- count->field_length));
+ (uint) ((count->tree_pos - count->tree_buff) /
+ count->field_length)));
}
total_count= 0;
for (idx= 0; idx < 256; idx++)
@@ -2280,7 +2280,7 @@ static my_off_t write_huff_tree(HUFF_TREE *huff_tree, uint trees)
{
VOID(fflush(stdout));
VOID(fprintf(stderr, "error: Huffman code too long: %u/%u\n",
- bits, 8 * sizeof(code)));
+ bits, (uint) (8 * sizeof(code))));
errors++;
break;
}
diff --git a/myisammrg/myrg_rkey.c b/myisammrg/myrg_rkey.c
index f87b264081e..85464374d5d 100644
--- a/myisammrg/myrg_rkey.c
+++ b/myisammrg/myrg_rkey.c
@@ -88,7 +88,7 @@ int myrg_rkey(MYRG_INFO *info,byte *buf,int inx, const byte *key,
mi=(info->current_table=(MYRG_TABLE *)queue_top(&(info->by_key)))->table;
mi->once_flags|= RRND_PRESERVE_LASTINX;
DBUG_PRINT("info", ("using table no: %d",
- info->current_table - info->open_tables + 1));
+ (int) (info->current_table - info->open_tables + 1)));
DBUG_DUMP("result key", (byte*) mi->lastkey, mi->lastkey_length);
DBUG_RETURN(_myrg_mi_read_record(mi,buf));
}
diff --git a/mysys/my_thr_init.c b/mysys/my_thr_init.c
index 0e921bbe8a0..fcae18d4686 100644
--- a/mysys/my_thr_init.c
+++ b/mysys/my_thr_init.c
@@ -98,7 +98,7 @@ my_bool my_thread_global_init(void)
pthread_mutex_init(&THR_LOCK_net,MY_MUTEX_INIT_FAST);
pthread_mutex_init(&THR_LOCK_charset,MY_MUTEX_INIT_FAST);
pthread_mutex_init(&THR_LOCK_threads,MY_MUTEX_INIT_FAST);
- pthread_cond_init (&THR_COND_threads, NULL);
+ pthread_cond_init(&THR_COND_threads, NULL);
#if defined( __WIN__) || defined(OS2)
win_pthread_init();
#endif
@@ -131,7 +131,8 @@ void my_thread_global_end(void)
if (error == ETIMEDOUT || error == ETIME)
{
if (THR_thread_count)
- fprintf(stderr,"Error in my_thread_global_end(): %d threads didn't exit\n",
+ fprintf(stderr,
+ "Error in my_thread_global_end(): %d threads didn't exit\n",
THR_thread_count);
all_threads_killed= 0;
break;
@@ -170,10 +171,23 @@ void my_thread_global_end(void)
static long thread_id=0;
/*
- We can't use mutex_locks here if we are using windows as
- we may have compiled the program with SAFE_MUTEX, in which
- case the checking of mutex_locks will not work until
- the pthread_self thread specific variable is initialized.
+ Allocate thread specific memory for the thread, used by mysys and dbug
+
+ SYNOPSIS
+ my_thread_init()
+
+ NOTES
+ We can't use mutex_locks here if we are using windows as
+ we may have compiled the program with SAFE_MUTEX, in which
+ case the checking of mutex_locks will not work until
+ the pthread_self thread specific variable is initialized.
+
+ This function may called multiple times for a thread, for example
+ if one uses my_init() followed by mysql_server_init().
+
+ RETURN
+ 0 ok
+ 1 Fatal error; mysys/dbug functions can't be used
*/
my_bool my_thread_init(void)
@@ -225,11 +239,22 @@ end:
}
+/*
+ Deallocate memory used by the thread for book-keeping
+
+ SYNOPSIS
+ my_thread_end()
+
+ NOTE
+ This may be called multiple times for a thread.
+ This happens for example when one calls 'mysql_server_init()'
+ mysql_server_end() and then ends with a mysql_end().
+*/
+
void my_thread_end(void)
{
struct st_my_thread_var *tmp;
tmp= my_pthread_getspecific(struct st_my_thread_var*,THR_KEY_mysys);
- DBUG_ASSERT(tmp);
#ifdef EXTRA_DEBUG_THREADS
fprintf(stderr,"my_thread_end(): tmp: 0x%lx thread_id=%ld\n",
@@ -272,7 +297,6 @@ void my_thread_end(void)
#if (!defined(__WIN__) && !defined(OS2)) || defined(USE_TLS)
pthread_setspecific(THR_KEY_mysys,0);
#endif
-
}
struct st_my_thread_var *_my_thread_var(void)
diff --git a/ndb/src/mgmapi/mgmapi.cpp b/ndb/src/mgmapi/mgmapi.cpp
index df69684784a..8aa4f7213de 100644
--- a/ndb/src/mgmapi/mgmapi.cpp
+++ b/ndb/src/mgmapi/mgmapi.cpp
@@ -178,7 +178,7 @@ ndb_mgm_create_handle()
h->mgmd_version_minor= -1;
h->mgmd_version_build= -1;
- DBUG_PRINT("info", ("handle=0x%x", (UintPtr)h));
+ DBUG_PRINT("info", ("handle: 0x%lx", (long)h));
DBUG_RETURN(h);
}
@@ -195,7 +195,7 @@ int
ndb_mgm_set_connectstring(NdbMgmHandle handle, const char * mgmsrv)
{
DBUG_ENTER("ndb_mgm_set_connectstring");
- DBUG_PRINT("info", ("handle=0x%x", (UintPtr)handle));
+ DBUG_PRINT("info", ("handle: 0x%lx", (long)handle));
handle->cfg.~LocalConfig();
new (&(handle->cfg)) LocalConfig;
if (!handle->cfg.init(mgmsrv, 0) ||
@@ -237,7 +237,7 @@ ndb_mgm_destroy_handle(NdbMgmHandle * handle)
DBUG_ENTER("ndb_mgm_destroy_handle");
if(!handle)
DBUG_VOID_RETURN;
- DBUG_PRINT("info", ("handle=0x%x", (UintPtr)(* handle)));
+ DBUG_PRINT("info", ("handle: 0x%lx", (long)(* handle)));
/**
* important! only disconnect if connected
* other code relies on this
diff --git a/ndb/src/ndbapi/Ndb.cpp b/ndb/src/ndbapi/Ndb.cpp
index c701fbf77e0..59a204be2a8 100644
--- a/ndb/src/ndbapi/Ndb.cpp
+++ b/ndb/src/ndbapi/Ndb.cpp
@@ -777,7 +777,7 @@ Ndb::getAutoIncrementValue(const char* aTableName,
}
if (getTupleIdFromNdb(info, tupleId, cacheSize) == -1)
DBUG_RETURN(-1);
- DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId));
+ DBUG_PRINT("info", ("value %lu", (ulong) tupleId));
DBUG_RETURN(0);
}
@@ -798,7 +798,7 @@ Ndb::getAutoIncrementValue(const NdbDictionary::Table * aTable,
}
if (getTupleIdFromNdb(info, tupleId, cacheSize) == -1)
DBUG_RETURN(-1);
- DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId));
+ DBUG_PRINT("info", ("value %lu", (ulong)tupleId));
DBUG_RETURN(0);
}
@@ -811,7 +811,7 @@ Ndb::getTupleIdFromNdb(Ndb_local_table_info* info,
{
assert(info->m_first_tuple_id < info->m_last_tuple_id);
tupleId = ++info->m_first_tuple_id;
- DBUG_PRINT("info", ("next cached value %llu", (ulonglong)tupleId));
+ DBUG_PRINT("info", ("next cached value %lu", (ulong)tupleId));
}
else
{
@@ -845,7 +845,7 @@ Ndb::readAutoIncrementValue(const char* aTableName,
}
if (readTupleIdFromNdb(info, tupleId) == -1)
DBUG_RETURN(-1);
- DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId));
+ DBUG_PRINT("info", ("value %lu", (ulong)tupleId));
DBUG_RETURN(0);
}
@@ -866,7 +866,7 @@ Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable,
}
if (readTupleIdFromNdb(info, tupleId) == -1)
DBUG_RETURN(-1);
- DBUG_PRINT("info", ("value %llu", (ulonglong)tupleId));
+ DBUG_PRINT("info", ("value %lu", (ulong)tupleId));
DBUG_RETURN(0);
}
@@ -948,8 +948,8 @@ Ndb::setTupleIdInNdb(Ndb_local_table_info* info,
{
info->m_first_tuple_id = tupleId - 1;
DBUG_PRINT("info",
- ("Setting next auto increment cached value to %llu",
- (ulonglong)tupleId));
+ ("Setting next auto increment cached value to %lu",
+ (ulong)tupleId));
DBUG_RETURN(0);
}
}
@@ -976,7 +976,8 @@ Ndb::opTupleIdOnNdb(Ndb_local_table_info* info, Uint64 & opValue, Uint32 op)
{
DBUG_ENTER("Ndb::opTupleIdOnNdb");
Uint32 aTableId = info->m_table_impl->m_tableId;
- DBUG_PRINT("enter", ("table=%u value=%llu op=%u", aTableId, opValue, op));
+ DBUG_PRINT("enter", ("table: %u value: %lu op: %u",
+ aTableId, (ulong) opValue, op));
NdbTransaction* tConnection;
NdbOperation* tOperation= 0; // Compiler warning if not initialized
@@ -1050,8 +1051,8 @@ Ndb::opTupleIdOnNdb(Ndb_local_table_info* info, Uint64 & opValue, Uint32 op)
else
{
DBUG_PRINT("info",
- ("Setting next auto increment value (db) to %llu",
- (ulonglong)opValue));
+ ("Setting next auto increment value (db) to %lu",
+ (ulong)opValue));
info->m_first_tuple_id = info->m_last_tuple_id = opValue - 1;
}
break;
@@ -1247,7 +1248,7 @@ Ndb::internalize_index_name(const NdbTableImpl * table,
if (!table)
{
DBUG_PRINT("error", ("!table"));
- return ret;
+ DBUG_RETURN(ret);
}
if (fullyQualifiedNames)
diff --git a/ndb/src/ndbapi/NdbScanOperation.cpp b/ndb/src/ndbapi/NdbScanOperation.cpp
index a0322f09256..92a1e476b45 100644
--- a/ndb/src/ndbapi/NdbScanOperation.cpp
+++ b/ndb/src/ndbapi/NdbScanOperation.cpp
@@ -653,9 +653,9 @@ NdbScanOperation::doSend(int ProcessorId)
void NdbScanOperation::close(bool forceSend, bool releaseOp)
{
DBUG_ENTER("NdbScanOperation::close");
- DBUG_PRINT("enter", ("this=%x tcon=%x con=%x force=%d release=%d",
- (UintPtr)this,
- (UintPtr)m_transConnection, (UintPtr)theNdbCon,
+ DBUG_PRINT("enter", ("this: 0x%lx tcon: 0x%lx con: 0x%lx force: %d release: %d",
+ (long)this,
+ (long)m_transConnection, (long)theNdbCon,
forceSend, releaseOp));
if(m_transConnection){
diff --git a/ndb/src/ndbapi/NdbTransaction.cpp b/ndb/src/ndbapi/NdbTransaction.cpp
index 86dc92a86c1..36b1217c101 100644
--- a/ndb/src/ndbapi/NdbTransaction.cpp
+++ b/ndb/src/ndbapi/NdbTransaction.cpp
@@ -1007,7 +1007,8 @@ void
NdbTransaction::releaseExecutedScanOperation(NdbIndexScanOperation* cursorOp)
{
DBUG_ENTER("NdbTransaction::releaseExecutedScanOperation");
- DBUG_PRINT("enter", ("this=0x%x op=0x%x", (UintPtr)this, (UintPtr)cursorOp));
+ DBUG_PRINT("enter", ("this: 0x%lx op=0x%lx",
+ (long)this, (long)cursorOp));
releaseScanOperation(&m_firstExecutedScanOp, 0, cursorOp);
diff --git a/ndb/src/ndbapi/Ndblist.cpp b/ndb/src/ndbapi/Ndblist.cpp
index f82348fc91d..00ab4599fdf 100644
--- a/ndb/src/ndbapi/Ndblist.cpp
+++ b/ndb/src/ndbapi/Ndblist.cpp
@@ -361,7 +361,7 @@ void
Ndb::releaseScanOperation(NdbIndexScanOperation* aScanOperation)
{
DBUG_ENTER("Ndb::releaseScanOperation");
- DBUG_PRINT("enter", ("op=%x", (UintPtr)aScanOperation));
+ DBUG_PRINT("enter", ("op: 0x%lx", (long)aScanOperation));
#ifdef ndb_release_check_dup
{ NdbIndexScanOperation* tOp = theScanOpIdleList;
while (tOp != NULL) {
diff --git a/server-tools/instance-manager/guardian.cc b/server-tools/instance-manager/guardian.cc
index 3587a599160..0a037b5b98a 100644
--- a/server-tools/instance-manager/guardian.cc
+++ b/server-tools/instance-manager/guardian.cc
@@ -222,8 +222,6 @@ void Guardian_thread::run()
while (node != NULL)
{
- struct timespec timeout;
-
GUARD_NODE *current_node= (GUARD_NODE *) node->data;
instance= ((GUARD_NODE *) node->data)->instance;
process_instance(instance, current_node, &guarded_instances, node);
diff --git a/server-tools/instance-manager/portability.h b/server-tools/instance-manager/portability.h
index 23a5a5bd14c..7992a06c336 100644
--- a/server-tools/instance-manager/portability.h
+++ b/server-tools/instance-manager/portability.h
@@ -15,7 +15,6 @@
#define snprintf _snprintf
#define SIGKILL 9
-#define SHUT_RDWR 0x2
/*TODO: fix this */
#define PROTOCOL_VERSION 10
diff --git a/sql/gen_lex_hash.cc b/sql/gen_lex_hash.cc
index 5a8bd48d699..2674b2e65f7 100644
--- a/sql/gen_lex_hash.cc
+++ b/sql/gen_lex_hash.cc
@@ -206,9 +206,10 @@ void insert_symbols()
void insert_sql_functions()
{
- size_t i= 0;
+ int i= 0;
SYMBOL *cur;
- for (cur= sql_functions; i<array_elements(sql_functions); cur++, i++){
+ for (cur= sql_functions; i < (int) array_elements(sql_functions); cur++, i++)
+ {
hash_lex_struct *root=
get_hash_struct_by_len(&root_by_len,cur->length,&max_len);
insert_into_hash(root,cur->name,0,-i-1,1);
diff --git a/sql/ha_archive.cc b/sql/ha_archive.cc
index 099f44c82f3..e3f979952e0 100644
--- a/sql/ha_archive.cc
+++ b/sql/ha_archive.cc
@@ -327,8 +327,8 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows)
DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0]));
DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1]));
- DBUG_PRINT("ha_archive::read_meta_file", ("Rows %lld", *rows));
- DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %lld", check_point));
+ DBUG_PRINT("ha_archive::read_meta_file", ("Rows %lu", (ulong) *rows));
+ DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %lu", (ulong) check_point));
DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)meta_buffer[18]));
if ((meta_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) ||
@@ -359,8 +359,8 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows, bool dirty)
*(meta_buffer + 18)= (uchar)dirty;
DBUG_PRINT("ha_archive::write_meta_file", ("Check %d", (uint)ARCHIVE_CHECK_HEADER));
DBUG_PRINT("ha_archive::write_meta_file", ("Version %d", (uint)ARCHIVE_VERSION));
- DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu", (ulonglong)rows));
- DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu", check_point));
+ DBUG_PRINT("ha_archive::write_meta_file", ("Rows %lu", (ulong)rows));
+ DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %lu", (ulong) check_point));
DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty));
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
@@ -783,7 +783,7 @@ int ha_archive::rnd_init(bool scan)
if (scan)
{
scan_rows= share->rows_recorded;
- DBUG_PRINT("info", ("archive will retrieve %llu rows", scan_rows));
+ DBUG_PRINT("info", ("archive will retrieve %lu rows", (ulong) scan_rows));
records= 0;
/*
@@ -1019,7 +1019,8 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
share->rows_recorded++;
}
}
- DBUG_PRINT("info", ("recovered %llu archive rows", share->rows_recorded));
+ DBUG_PRINT("info", ("recovered %lu archive rows",
+ (ulong) share->rows_recorded));
my_free((char*)buf, MYF(0));
if (rc && rc != HA_ERR_END_OF_FILE)
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 739fae79565..2ef16ddacbf 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -822,8 +822,8 @@ int ha_ndbcluster::get_ndb_blobs_value(NdbBlob *last_ndb_blob,
{
char *buf= m_blobs_buffer + offset;
uint32 len= 0xffffffff; // Max uint32
- DBUG_PRINT("value", ("read blob ptr=%x len=%u",
- (UintPtr)buf, (uint)blob_len));
+ DBUG_PRINT("value", ("read blob ptr: 0x%lx len: %u",
+ (long)buf, (uint)blob_len));
if (ndb_blob->readData(buf, len) != 0)
DBUG_RETURN(-1);
DBUG_ASSERT(len == blob_len);
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index dd483b0b158..d8d75a28b75 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -1538,7 +1538,7 @@ static void network_init(void)
if (strlen(mysqld_unix_port) > (sizeof(UNIXaddr.sun_path) - 1))
{
sql_print_error("The socket file path is too long (> %u): %s",
- sizeof(UNIXaddr.sun_path) - 1, mysqld_unix_port);
+ (uint) sizeof(UNIXaddr.sun_path) - 1, mysqld_unix_port);
unireg_abort(1);
}
if ((unix_sock= socket(AF_UNIX, SOCK_STREAM, 0)) < 0)
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index 5902374dff0..bd2f5c42695 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -902,7 +902,7 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu",
if (thd->db_length)
{
memcpy(thd->query+thd->query_length+1, thd->db, thd->db_length);
- DBUG_PRINT("qcache", ("database : %s length %u",
+ DBUG_PRINT("qcache", ("database: %s length: %u",
thd->db, thd->db_length));
}
else
@@ -1048,7 +1048,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
(pre-space is removed in dispatch_command)
First '/' looks like comment before command it is not
- frequently appeared in real lihe, consequently we can
+ frequently appeared in real life, consequently we can
check all such queries, too.
*/
if ((my_toupper(system_charset_info, sql[i]) != 'S' ||
@@ -1077,7 +1077,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
if (thd->db_length)
{
memcpy(sql+query_length+1, thd->db, thd->db_length);
- DBUG_PRINT("qcache", ("database: '%s' length %u",
+ DBUG_PRINT("qcache", ("database: '%s' length: %u",
thd->db, thd->db_length));
}
else
@@ -1230,9 +1230,9 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu",
if (engine_data != table->engine_data())
{
DBUG_PRINT("qcache",
- ("Handler require invalidation queries of %s.%s %lld-%lld",
- table_list.db, table_list.alias,
- engine_data, table->engine_data()));
+ ("Handler require invalidation queries of %s.%s %lu-%lu",
+ table_list.db, table_list.alias,
+ (ulong) engine_data, (ulong) table->engine_data()));
invalidate_table((byte *) table->db(), table->key_length());
}
else
@@ -1253,10 +1253,10 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu",
#ifndef EMBEDDED_LIBRARY
do
{
- DBUG_PRINT("qcache", ("Results (len: %lu used: %lu headers: %u)",
+ DBUG_PRINT("qcache", ("Results (len: %lu used: %lu headers: %lu)",
result_block->length, result_block->used,
- result_block->headers_len()+
- ALIGN_SIZE(sizeof(Query_cache_result))));
+ (ulong) (result_block->headers_len()+
+ ALIGN_SIZE(sizeof(Query_cache_result)))));
Query_cache_result *result = result_block->result();
if (net_real_write(&thd->net, result->data(),
@@ -1338,7 +1338,7 @@ void Query_cache::invalidate(CHANGED_TABLE_LIST *tables_used)
for (; tables_used; tables_used= tables_used->next)
{
invalidate_table((byte*) tables_used->key, tables_used->key_length);
- DBUG_PRINT("qcache", (" db %s, table %s", tables_used->key,
+ DBUG_PRINT("qcache", ("db: %s table: %s", tables_used->key,
tables_used->key+
strlen(tables_used->key)+1));
}
@@ -2349,7 +2349,7 @@ Query_cache::register_tables_from_list(TABLE_LIST *tables_used,
{
char key[MAX_DBKEY_LENGTH];
uint key_length;
- DBUG_PRINT("qcache", ("view %s, db %s",
+ DBUG_PRINT("qcache", ("view: %s db: %s",
tables_used->view_name.str,
tables_used->view_db.str));
key_length= (uint) (strmov(strmov(key, tables_used->view_db.str) + 1,
@@ -2470,11 +2470,11 @@ Query_cache::insert_table(uint key_len, char *key,
table_block->table()->engine_data() != engine_data)
{
DBUG_PRINT("qcache",
- ("Handler require invalidation queries of %s.%s %lld-%lld",
+ ("Handler require invalidation queries of %s.%s %lu-%lu",
table_block->table()->db(),
table_block->table()->table(),
- engine_data,
- table_block->table()->engine_data()));
+ (ulong) engine_data,
+ (ulong) table_block->table()->engine_data()));
/*
as far as we delete all queries with this table, table block will be
deleted, too
@@ -2972,7 +2972,7 @@ static TABLE_COUNTER_TYPE process_and_count_tables(TABLE_LIST *tables_used,
table_count++;
if (tables_used->view)
{
- DBUG_PRINT("qcache", ("view %s, db %s",
+ DBUG_PRINT("qcache", ("view: %s db: %s",
tables_used->view_name.str,
tables_used->view_db.str));
*tables_type|= HA_CACHE_TBL_NONTRANSACT;
@@ -3038,7 +3038,7 @@ Query_cache::is_cacheable(THD *thd, uint32 query_len, char *query, LEX *lex,
lex->safe_to_cache_query)
{
DBUG_PRINT("qcache", ("options: %lx %lx type: %u",
- OPTION_TO_QUERY_CACHE,
+ (long) OPTION_TO_QUERY_CACHE,
(long) lex->select_lex.options,
(int) thd->variables.query_cache_type));
@@ -3058,7 +3058,7 @@ Query_cache::is_cacheable(THD *thd, uint32 query_len, char *query, LEX *lex,
DBUG_PRINT("qcache",
("not interesting query: %d or not cacheable, options %lx %lx type: %u",
(int) lex->sql_command,
- OPTION_TO_QUERY_CACHE,
+ (long) OPTION_TO_QUERY_CACHE,
(long) lex->select_lex.options,
(int) thd->variables.query_cache_type));
DBUG_RETURN(0);
@@ -3757,8 +3757,8 @@ my_bool Query_cache::check_integrity(bool locked)
(((long)first_block) % (long)ALIGN_SIZE(1)))
{
DBUG_PRINT("error",
- ("block 0x%lx do not aligned by %d", (ulong) block,
- ALIGN_SIZE(1)));
+ ("block 0x%lx do not aligned by %d", (long) block,
+ (int) ALIGN_SIZE(1)));
result = 1;
}
// Check memory allocation
diff --git a/sql/tztime.cc b/sql/tztime.cc
index fe23954bbb2..4becf4a9fcc 100644
--- a/sql/tztime.cc
+++ b/sql/tztime.cc
@@ -1729,9 +1729,9 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
tz_leapcnt++;
DBUG_PRINT("info",
- ("time_zone_leap_second table: tz_leapcnt=%u tt_time=%lld offset=%ld",
- tz_leapcnt, (longlong)tz_lsis[tz_leapcnt-1].ls_trans,
- tz_lsis[tz_leapcnt-1].ls_corr));
+ ("time_zone_leap_second table: tz_leapcnt: %u tt_time: %lu offset=%ld",
+ tz_leapcnt, (ulong) tz_lsis[tz_leapcnt-1].ls_trans,
+ tz_lsis[tz_leapcnt-1].ls_corr));
res= table->file->index_next(table->record[0]);
}
@@ -2041,8 +2041,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
tz_info->timecnt++;
DBUG_PRINT("info",
- ("time_zone_transition table: tz_id=%u tt_time=%lld tt_id=%u",
- tzid, (longlong)ttime, ttid));
+ ("time_zone_transition table: tz_id: %u tt_time: %lu tt_id: %u",
+ tzid, (ulong) ttime, ttid));
res= table->file->index_next_same(table->record[0],
(byte*)table->field[0]->ptr, 4);
diff --git a/sql/uniques.cc b/sql/uniques.cc
index c0343c0c085..c7bdbdeb207 100644
--- a/sql/uniques.cc
+++ b/sql/uniques.cc
@@ -428,8 +428,8 @@ static bool merge_walk(uchar *merge_buffer, ulong merge_buffer_size,
BUFFPEK_COMPARE_CONTEXT compare_context = { compare, compare_arg };
QUEUE queue;
if (end <= begin ||
- merge_buffer_size < key_length * (end - begin + 1) ||
- init_queue(&queue, end - begin, offsetof(BUFFPEK, key), 0,
+ merge_buffer_size < (ulong) (key_length * (end - begin + 1)) ||
+ init_queue(&queue, (uint) (end - begin), offsetof(BUFFPEK, key), 0,
buffpek_compare, &compare_context))
return 1;
/* we need space for one key when a piece of merge buffer is re-read */