summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <monty@mysql.com/nosik.monty.fi>2006-11-27 01:47:38 +0200
committerunknown <monty@mysql.com/nosik.monty.fi>2006-11-27 01:47:38 +0200
commit788ad30f081bc55ff97ceed78ec7ff545e25ed99 (patch)
treeb29f99476834178f395c188e1c4d352dd106fc01 /sql
parentb8fe9fb47f6b30aa72c16642ca51d84650169817 (diff)
downloadmariadb-git-788ad30f081bc55ff97ceed78ec7ff545e25ed99.tar.gz
Fixed a LOT of compiler warnings
Added missing DBUG_RETURN statements (in mysqldump.c) Added missing enums Fixed a lot of wrong DBUG_PRINT() statements, some of which could cause crashes Removed usage of %lld and %p in printf strings as these are not portable or produces different results on different systems. client/mysqldump.c: Fixed some compiler warnings Added some missing DBUG_RETURN Remove copying of 'cluster' database client/mysqlslap.c: Fixed compiler warnings client/mysqltest.c: After merge fix extra/yassl/taocrypt/include/algebra.hpp: Removed compiler warning mysql-test/include/im_check_env.inc: Fixed race condition (mysqld1 could report 'starting' or 'online' mysql-test/mysql-test-run.pl: After merge fixes Added missing directory to LD_LIBRARY_PATH mysql-test/r/ctype_cp1250_ch.result: After merge fix mysql-test/r/im_cmd_line.result: Fixed race condition mysql-test/r/im_daemon_life_cycle.result: Fixed race condition mysql-test/r/im_instance_conf.result: Fixed race condition mysql-test/r/im_life_cycle.result: Fixed race condition mysql-test/r/im_utils.result: Fixed race condition mysql-test/r/log_tables.result: Fixed wrong result mysql-test/t/disabled.def: Disabled ndb_restore_partion, as ndb_restore_compate caused it to fail, becasue of table 'cluster/def/schema' which is stored in ndb_backup50 mysys/my_compress.c: Removed compiler warnings mysys/my_getopt.c: Ensure we always have at least one space between option name and value plugin/fulltext/plugin_example.c: Removed compiler warnings server-tools/instance-manager/mysql_connection.cc: After merge fix sql/event_data_objects.cc: Fixed compiler warnings Fixed platform compatibility issues (%lld is not portable) sql/event_data_objects.h: Fixed compiler warnings sql/event_db_repository.cc: Fixed compiler warnings sql/event_queue.cc: Fixed compiler warnings sql/event_scheduler.cc: Fixed compiler warnings sql/events.cc: Fixed compiler warnings sql/field.cc: Fixed compiler warnings sql/ha_ndbcluster.cc: Fixed compiler warnings sql/ha_ndbcluster_binlog.cc: Fixed compiler warnings sql/ha_partition.cc: Fixed compiler warnings sql/handler.cc: Fixed compiler warnings sql/item_cmpfunc.cc: Fixed DBUG_PRINT style sql/item_func.cc: Fixed compiler warnings sql/log.cc: Fixed compiler warnings sql/log_event.cc: Fixed compiler warnings sql/mysqld.cc: Fixed compiler warnings sql/opt_range.cc: Fixed compiler warnings sql/repl_failsafe.cc: Indentation fixes sql/rpl_rli.cc: Fixed compiler warnings sql/rpl_tblmap.cc: Fixed compiler warnings sql/set_var.cc: Fixed compiler warnings sql/slave.cc: Fixed compiler warnings sql/sp_head.cc: Fixed compiler warnings sql/sql_base.cc: Fixed compiler warnings Fixed indentation sql/sql_binlog.cc: Fixed compiler warnings sql/sql_cache.cc: Fixed compiler warnings sql/sql_class.cc: Fixed compiler warnings sql/sql_handler.cc: Fixed compiler warnings sql/sql_lex.cc: Fixed compiler warnings sql/sql_parse.cc: Fixed compiler warnings sql/sql_partition.cc: Fixed compiler warnings sql/sql_prepare.cc: Fixed compiler warnings sql/sql_table.cc: Fixed compiler warnings sql/sql_test.cc: Fixed DBUG_PRINT style sql/sql_trigger.cc: Fixed DBUG_PRINT style sql/table.cc: Fixed compiler warnings storage/federated/ha_federated.cc: Fixed compiler warnings storage/myisam/mi_rsamepos.c: Fixed compiler warnings storage/ndb/include/ndb_global.h.in: After merge fix storage/ndb/include/util/NdbOut.hpp: Inform gcc that ndbout_c takes a printf() string as argument storage/ndb/include/util/SimpleProperties.hpp: After merge fixes storage/ndb/src/kernel/blocks/backup/Backup.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp: Fixed compiler warnings Fixed usage of uninitialized value (Got help from Jonas with patch) storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/lgman.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/pgman.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/restore.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/dbtup/DbtupDiskAlloc.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/dbtup/DbtupFixAlloc.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp: Fixed compiler warnings storage/ndb/src/kernel/blocks/suma/Suma.cpp: Fixed compiler warnings Added missing enum's to switch storage/ndb/src/kernel/vm/Configuration.cpp: Fixed compiler warnings storage/ndb/src/kernel/vm/DLHashTable.hpp: Fixed compiler warnings storage/ndb/src/kernel/vm/RWPool.hpp: Fixed compiler warnings storage/ndb/src/kernel/vm/SimulatedBlock.cpp: Fixed compiler warnings storage/ndb/src/kernel/vm/WOPool.hpp: Fixed compiler warnings storage/ndb/src/kernel/vm/ndbd_malloc_impl.cpp: Fixed compiler warnings storage/ndb/src/mgmclient/CommandInterpreter.cpp: Fixed compiler warnings storage/ndb/src/mgmsrv/MgmtSrvr.cpp: Fixed compiler warnings storage/ndb/src/ndbapi/DictCache.cpp: Fixed compiler warnings storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp: Fixed compiler warnings storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp: Fixed compiler warnings storage/ndb/src/ndbapi/NdbIndexOperation.cpp: Fixed compiler warnings storage/ndb/src/ndbapi/NdbIndexStat.cpp: Initialize possible uninitialized variable storage/ndb/src/ndbapi/NdbOperationInt.cpp: Fixed compiler warnings storage/ndb/src/ndbapi/NdbRecAttr.cpp: Added missing enum's (To avoid compiler warnings) storage/ndb/src/ndbapi/NdbScanOperation.cpp: Fixed compiler warnings storage/ndb/src/ndbapi/ObjectMap.hpp: Fixed compiler warnings storage/ndb/tools/desc.cpp: Fixed compiler warnings storage/ndb/tools/restore/Restore.cpp: Fixed compiler warnings storage/ndb/tools/restore/consumer_restore.cpp: Fixed compiler warnings unittest/mytap/t/basic-t.c: Fixed compiler warnings unittest/mytap/tap.c: Fixed compiler warnings
Diffstat (limited to 'sql')
-rw-r--r--sql/event_data_objects.cc67
-rw-r--r--sql/event_data_objects.h4
-rw-r--r--sql/event_db_repository.cc2
-rw-r--r--sql/event_queue.cc46
-rw-r--r--sql/event_scheduler.cc21
-rw-r--r--sql/events.cc2
-rw-r--r--sql/field.cc4
-rw-r--r--sql/ha_ndbcluster.cc108
-rw-r--r--sql/ha_ndbcluster_binlog.cc98
-rw-r--r--sql/ha_partition.cc6
-rw-r--r--sql/handler.cc14
-rw-r--r--sql/item_cmpfunc.cc2
-rw-r--r--sql/item_func.cc2
-rw-r--r--sql/log.cc41
-rw-r--r--sql/log_event.cc32
-rw-r--r--sql/mysqld.cc2
-rw-r--r--sql/opt_range.cc2
-rw-r--r--sql/repl_failsafe.cc4
-rw-r--r--sql/rpl_rli.cc4
-rw-r--r--sql/rpl_tblmap.cc16
-rw-r--r--sql/set_var.cc2
-rw-r--r--sql/slave.cc14
-rw-r--r--sql/sp_head.cc2
-rw-r--r--sql/sql_base.cc25
-rw-r--r--sql/sql_binlog.cc16
-rw-r--r--sql/sql_cache.cc6
-rw-r--r--sql/sql_class.cc6
-rw-r--r--sql/sql_handler.cc7
-rw-r--r--sql/sql_lex.cc2
-rw-r--r--sql/sql_parse.cc4
-rw-r--r--sql/sql_partition.cc2
-rw-r--r--sql/sql_prepare.cc15
-rw-r--r--sql/sql_table.cc2
-rw-r--r--sql/sql_test.cc9
-rw-r--r--sql/sql_trigger.cc4
-rw-r--r--sql/table.cc6
36 files changed, 316 insertions, 283 deletions
diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc
index afd10350bb5..397688d3bff 100644
--- a/sql/event_data_objects.cc
+++ b/sql/event_data_objects.cc
@@ -124,8 +124,8 @@ void
Event_parse_data::init_body(THD *thd)
{
DBUG_ENTER("Event_parse_data::init_body");
- DBUG_PRINT("info", ("body=[%s] body_begin=0x%lx end=0x%lx", body_begin,
- body_begin, thd->lex->ptr));
+ DBUG_PRINT("info", ("body: '%s' body_begin: 0x%lx end: 0x%lx", body_begin,
+ (long) body_begin, (long) thd->lex->ptr));
body.length= thd->lex->ptr - body_begin;
const uchar *body_end= body_begin + body.length - 1;
@@ -399,8 +399,9 @@ Event_parse_data::init_starts(THD *thd)
thd->variables.time_zone->gmt_sec_to_TIME(&time_tmp,
(my_time_t) thd->query_start());
- DBUG_PRINT("info",("now =%lld", TIME_to_ulonglong_datetime(&time_tmp)));
- DBUG_PRINT("info",("starts=%lld", TIME_to_ulonglong_datetime(&ltime)));
+ DBUG_PRINT("info",("now: %ld starts: %ld",
+ (long) TIME_to_ulonglong_datetime(&time_tmp),
+ (long) TIME_to_ulonglong_datetime(&ltime)));
if (TIME_to_ulonglong_datetime(&ltime) <
TIME_to_ulonglong_datetime(&time_tmp))
goto wrong_value;
@@ -536,8 +537,9 @@ Event_parse_data::check_parse_data(THD *thd)
{
bool ret;
DBUG_ENTER("Event_parse_data::check_parse_data");
- DBUG_PRINT("info", ("execute_at=0x%lx expr=0x%lx starts=0x%lx ends=0x%lx",
- item_execute_at, item_expression, item_starts, item_ends));
+ DBUG_PRINT("info", ("execute_at: 0x%lx expr=0x%lx starts=0x%lx ends=0x%lx",
+ (long) item_execute_at, (long) item_expression,
+ (long) item_starts, (long) item_ends));
init_name(thd, identifier);
@@ -564,9 +566,9 @@ Event_parse_data::init_definer(THD *thd)
int definer_host_len;
DBUG_ENTER("Event_parse_data::init_definer");
- DBUG_PRINT("info",("init definer_user thd->mem_root=0x%lx "
- "thd->sec_ctx->priv_user=0x%lx", thd->mem_root,
- thd->security_ctx->priv_user));
+ DBUG_PRINT("info",("init definer_user thd->mem_root: 0x%lx "
+ "thd->sec_ctx->priv_user: 0x%lx", (long) thd->mem_root,
+ (long) thd->security_ctx->priv_user));
definer_user_len= strlen(thd->security_ctx->priv_user);
definer_host_len= strlen(thd->security_ctx->priv_host);
@@ -1032,8 +1034,9 @@ bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec,
TIME tmp;
longlong months=0, seconds=0;
DBUG_ENTER("get_next_time");
- DBUG_PRINT("enter", ("start=%llu now=%llu", TIME_to_ulonglong_datetime(start),
- TIME_to_ulonglong_datetime(time_now)));
+ DBUG_PRINT("enter", ("start: %lu now: %lu",
+ (long) TIME_to_ulonglong_datetime(start),
+ (long) TIME_to_ulonglong_datetime(time_now)));
bzero(&interval, sizeof(interval));
@@ -1081,7 +1084,7 @@ bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec,
case INTERVAL_LAST:
DBUG_ASSERT(0);
}
- DBUG_PRINT("info", ("seconds=%ld months=%ld", seconds, months));
+ DBUG_PRINT("info", ("seconds: %ld months: %ld", (long) seconds, (long) months));
if (seconds)
{
longlong seconds_diff;
@@ -1099,14 +1102,14 @@ bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec,
event two times for the same time
get the next exec if the modulus is not
*/
- DBUG_PRINT("info", ("multiplier=%d", multiplier));
+ DBUG_PRINT("info", ("multiplier: %d", multiplier));
if (seconds_diff % seconds || (!seconds_diff && last_exec->year) ||
TIME_to_ulonglong_datetime(time_now) ==
TIME_to_ulonglong_datetime(last_exec))
++multiplier;
interval.second= seconds * multiplier;
- DBUG_PRINT("info", ("multiplier=%u interval.second=%u", multiplier,
- interval.second));
+ DBUG_PRINT("info", ("multiplier: %lu interval.second: %lu", (ulong) multiplier,
+ (ulong) interval.second));
tmp= *start;
if (!(ret= date_add_interval(&tmp, INTERVAL_SECOND, interval)))
*next= tmp;
@@ -1158,7 +1161,7 @@ bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec,
}
done:
- DBUG_PRINT("info", ("next=%llu", TIME_to_ulonglong_datetime(next)));
+ DBUG_PRINT("info", ("next: %lu", (long) TIME_to_ulonglong_datetime(next)));
DBUG_RETURN(ret);
}
@@ -1183,17 +1186,17 @@ Event_queue_element::compute_next_execution_time()
{
TIME time_now;
int tmp;
-
DBUG_ENTER("Event_queue_element::compute_next_execution_time");
- DBUG_PRINT("enter", ("starts=%llu ends=%llu last_executed=%llu this=0x%lx",
- TIME_to_ulonglong_datetime(&starts),
- TIME_to_ulonglong_datetime(&ends),
- TIME_to_ulonglong_datetime(&last_executed), this));
+ DBUG_PRINT("enter", ("starts: %lu ends: %lu last_executed: %lu this: 0x%lx",
+ (long) TIME_to_ulonglong_datetime(&starts),
+ (long) TIME_to_ulonglong_datetime(&ends),
+ (long) TIME_to_ulonglong_datetime(&last_executed),
+ (long) this));
if (status == Event_queue_element::DISABLED)
{
DBUG_PRINT("compute_next_execution_time",
- ("Event %s is DISABLED", name.str));
+ ("Event %s is DISABLED", name.str));
goto ret;
}
/* If one-time, no need to do computation */
@@ -1203,9 +1206,9 @@ Event_queue_element::compute_next_execution_time()
if (last_executed.year)
{
DBUG_PRINT("info",("One-time event %s.%s of was already executed",
- dbname.str, name.str, definer.str));
+ dbname.str, name.str));
dropped= (on_completion == Event_queue_element::ON_COMPLETION_DROP);
- DBUG_PRINT("info",("One-time event will be dropped=%d.", dropped));
+ DBUG_PRINT("info",("One-time event will be dropped: %d.", dropped));
status= Event_queue_element::DISABLED;
status_changed= TRUE;
@@ -1226,7 +1229,7 @@ Event_queue_element::compute_next_execution_time()
execute_at_null= TRUE;
if (on_completion == Event_queue_element::ON_COMPLETION_DROP)
dropped= TRUE;
- DBUG_PRINT("info", ("Dropped=%d", dropped));
+ DBUG_PRINT("info", ("Dropped: %d", dropped));
status= Event_queue_element::DISABLED;
status_changed= TRUE;
@@ -1400,8 +1403,8 @@ Event_queue_element::compute_next_execution_time()
goto ret;
}
ret:
- DBUG_PRINT("info", ("ret=0 execute_at=%llu",
- TIME_to_ulonglong_datetime(&execute_at)));
+ DBUG_PRINT("info", ("ret: 0 execute_at: %lu",
+ (long) TIME_to_ulonglong_datetime(&execute_at)));
DBUG_RETURN(FALSE);
err:
DBUG_PRINT("info", ("ret=1"));
@@ -1688,7 +1691,7 @@ done:
thd->end_statement();
thd->cleanup_after_query();
- DBUG_PRINT("info", ("EXECUTED %s.%s ret=%d", dbname.str, name.str, ret));
+ DBUG_PRINT("info", ("EXECUTED %s.%s ret: %d", dbname.str, name.str, ret));
DBUG_RETURN(ret);
}
@@ -1752,7 +1755,7 @@ Event_job_data::compile(THD *thd, MEM_ROOT *mem_root)
thd->update_charset();
- DBUG_PRINT("info",("old_sql_mode=%d new_sql_mode=%d",old_sql_mode, sql_mode));
+ DBUG_PRINT("info",("old_sql_mode: %lu new_sql_mode: %lu",old_sql_mode, sql_mode));
thd->variables.sql_mode= this->sql_mode;
/* Change the memory root for the execution time */
if (mem_root)
@@ -1769,7 +1772,7 @@ Event_job_data::compile(THD *thd, MEM_ROOT *mem_root)
thd->query= show_create.c_ptr_safe();
thd->query_length= show_create.length();
- DBUG_PRINT("info", ("query:%s",thd->query));
+ DBUG_PRINT("info", ("query: %s",thd->query));
event_change_security_context(thd, definer_user, definer_host, dbname,
&save_ctx);
@@ -1777,14 +1780,14 @@ Event_job_data::compile(THD *thd, MEM_ROOT *mem_root)
mysql_init_query(thd, (uchar*) thd->query, thd->query_length);
if (MYSQLparse((void *)thd) || thd->is_fatal_error)
{
- DBUG_PRINT("error", ("error during compile or thd->is_fatal_error=%d",
+ DBUG_PRINT("error", ("error during compile or thd->is_fatal_error: %d",
thd->is_fatal_error));
/*
Free lex associated resources
QQ: Do we really need all this stuff here?
*/
sql_print_error("SCHEDULER: Error during compilation of %s.%s or "
- "thd->is_fatal_error=%d",
+ "thd->is_fatal_error: %d",
dbname.str, name.str, thd->is_fatal_error);
lex.unit.cleanup();
diff --git a/sql/event_data_objects.h b/sql/event_data_objects.h
index e7e96d299fb..2da39c2158b 100644
--- a/sql/event_data_objects.h
+++ b/sql/event_data_objects.h
@@ -111,14 +111,14 @@ public:
void *p;
DBUG_ENTER("Event_queue_element::new(size)");
p= my_malloc(size, MYF(0));
- DBUG_PRINT("info", ("alloc_ptr=0x%lx", p));
+ DBUG_PRINT("info", ("alloc_ptr: 0x%lx", (long) p));
DBUG_RETURN(p);
}
static void operator delete(void *ptr, size_t size)
{
DBUG_ENTER("Event_queue_element::delete(ptr,size)");
- DBUG_PRINT("enter", ("free_ptr=0x%lx", ptr));
+ DBUG_PRINT("enter", ("free_ptr: 0x%lx", (long) ptr));
TRASH(ptr, size);
my_free((gptr) ptr, MYF(0));
DBUG_VOID_RETURN;
diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc
index 3d30aff669b..367c5bae579 100644
--- a/sql/event_db_repository.cc
+++ b/sql/event_db_repository.cc
@@ -958,7 +958,7 @@ Event_db_repository::load_named_event(THD *thd, LEX_STRING dbname,
Open_tables_state backup;
DBUG_ENTER("Event_db_repository::load_named_event");
- DBUG_PRINT("enter",("thd=0x%lx name:%*s",thd, name.length, name.str));
+ DBUG_PRINT("enter",("thd: 0x%lx name: %*s", (long) thd, name.length, name.str));
thd->reset_n_backup_open_tables_state(&backup);
diff --git a/sql/event_queue.cc b/sql/event_queue.cc
index 527a59018a8..7ec665fcd5f 100644
--- a/sql/event_queue.cc
+++ b/sql/event_queue.cc
@@ -143,7 +143,7 @@ Event_queue::init_queue(THD *thd, Event_db_repository *db_repo)
struct event_queue_param *event_queue_param_value= NULL;
DBUG_ENTER("Event_queue::init_queue");
- DBUG_PRINT("enter", ("this=0x%lx", this));
+ DBUG_PRINT("enter", ("this: 0x%lx", (long) this));
LOCK_QUEUE_DATA();
db_repository= db_repo;
@@ -218,7 +218,7 @@ Event_queue::create_event(THD *thd, LEX_STRING dbname, LEX_STRING name)
int res;
Event_queue_element *new_element;
DBUG_ENTER("Event_queue::create_event");
- DBUG_PRINT("enter", ("thd=0x%lx et=%s.%s",thd, dbname.str, name.str));
+ DBUG_PRINT("enter", ("thd: 0x%lx et=%s.%s", (long) thd, dbname.str, name.str));
new_element= new Event_queue_element();
res= db_repository->load_named_event(thd, dbname, name, new_element);
@@ -229,7 +229,7 @@ Event_queue::create_event(THD *thd, LEX_STRING dbname, LEX_STRING name)
new_element->compute_next_execution_time();
LOCK_QUEUE_DATA();
- DBUG_PRINT("info", ("new event in the queue 0x%lx", new_element));
+ DBUG_PRINT("info", ("new event in the queue: 0x%lx", (long) new_element));
queue_insert_safe(&queue, (byte *) new_element);
dbug_dump_queue(thd->query_start());
pthread_cond_broadcast(&COND_queue_state);
@@ -264,7 +264,7 @@ Event_queue::update_event(THD *thd, LEX_STRING dbname, LEX_STRING name,
Event_queue_element *new_element;
DBUG_ENTER("Event_queue::update_event");
- DBUG_PRINT("enter", ("thd=0x%lx et=[%s.%s]", thd, dbname.str, name.str));
+ DBUG_PRINT("enter", ("thd: 0x%lx et=[%s.%s]", (long) thd, dbname.str, name.str));
new_element= new Event_queue_element();
@@ -294,7 +294,7 @@ Event_queue::update_event(THD *thd, LEX_STRING dbname, LEX_STRING name,
/* If not disabled event */
if (new_element)
{
- DBUG_PRINT("info", ("new event in the Q 0x%lx", new_element));
+ DBUG_PRINT("info", ("new event in the queue: 0x%lx", (long) new_element));
queue_insert_safe(&queue, (byte *) new_element);
pthread_cond_broadcast(&COND_queue_state);
}
@@ -322,7 +322,8 @@ void
Event_queue::drop_event(THD *thd, LEX_STRING dbname, LEX_STRING name)
{
DBUG_ENTER("Event_queue::drop_event");
- DBUG_PRINT("enter", ("thd=0x%lx db=%s name=%s", thd, dbname.str, name.str));
+ DBUG_PRINT("enter", ("thd: 0x%lx db :%s name: %s", (long) thd,
+ dbname.str, name.str));
LOCK_QUEUE_DATA();
find_n_remove_event(dbname, name);
@@ -484,7 +485,7 @@ Event_queue::load_events_from_db(THD *thd)
bool clean_the_queue= TRUE;
DBUG_ENTER("Event_queue::load_events_from_db");
- DBUG_PRINT("enter", ("thd=0x%lx", thd));
+ DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd));
if ((ret= db_repository->open_event_table(thd, TL_READ, &table)))
{
@@ -555,7 +556,6 @@ Event_queue::load_events_from_db(THD *thd)
goto end;
}
- DBUG_PRINT("load_events_from_db", ("Adding 0x%lx to the exec list."));
queue_insert_safe(&queue, (byte *) et);
count++;
}
@@ -663,16 +663,20 @@ Event_queue::dbug_dump_queue(time_t now)
for (i = 0; i < queue.elements; i++)
{
et= ((Event_queue_element*)queue_element(&queue, i));
- DBUG_PRINT("info",("et=0x%lx db=%s name=%s",et, et->dbname.str, et->name.str));
- DBUG_PRINT("info", ("exec_at=%llu starts=%llu ends=%llu execs_so_far=%u"
- " expr=%lld et.exec_at=%d now=%d (et.exec_at - now)=%d if=%d",
- TIME_to_ulonglong_datetime(&et->execute_at),
- TIME_to_ulonglong_datetime(&et->starts),
- TIME_to_ulonglong_datetime(&et->ends),
- et->execution_count,
- et->expression, sec_since_epoch_TIME(&et->execute_at), now,
- (int)(sec_since_epoch_TIME(&et->execute_at) - now),
- sec_since_epoch_TIME(&et->execute_at) <= now));
+ DBUG_PRINT("info", ("et: 0x%lx name: %s.%s", (long) et,
+ et->dbname.str, et->name.str));
+ DBUG_PRINT("info", ("exec_at: %lu starts: %lu ends: %lu execs_so_far: %u "
+ "expr: %ld et.exec_at: %ld now: %ld "
+ "(et.exec_at - now): %d if: %d",
+ (long) TIME_to_ulonglong_datetime(&et->execute_at),
+ (long) TIME_to_ulonglong_datetime(&et->starts),
+ (long) TIME_to_ulonglong_datetime(&et->ends),
+ et->execution_count,
+ (long) et->expression,
+ (long) (sec_since_epoch_TIME(&et->execute_at)),
+ (long) now,
+ (int) (sec_since_epoch_TIME(&et->execute_at) - now),
+ sec_since_epoch_TIME(&et->execute_at) <= now));
}
DBUG_VOID_RETURN;
#endif
@@ -812,11 +816,11 @@ end:
if (to_free)
delete top;
- DBUG_PRINT("info", ("returning %d. et_new=0x%lx abstime.tv_sec=%d ",
- ret, *job_data, abstime? abstime->tv_sec:0));
+ DBUG_PRINT("info", ("returning %d et_new: 0x%lx abstime.tv_sec: %ld ",
+ ret, (long) *job_data, abstime ? abstime->tv_sec : 0));
if (*job_data)
- DBUG_PRINT("info", ("db=%s name=%s definer=%s", (*job_data)->dbname.str,
+ DBUG_PRINT("info", ("db: %s name: %s definer=%s", (*job_data)->dbname.str,
(*job_data)->name.str, (*job_data)->definer.str));
DBUG_RETURN(ret);
diff --git a/sql/event_scheduler.cc b/sql/event_scheduler.cc
index 6f9f6887c12..b1a82477c3c 100644
--- a/sql/event_scheduler.cc
+++ b/sql/event_scheduler.cc
@@ -264,8 +264,9 @@ event_worker_thread(void *arg)
if (!post_init_event_thread(thd))
{
- DBUG_PRINT("info", ("Baikonur, time is %d, BURAN reporting and operational."
- "THD=0x%lx", time(NULL), thd));
+ DBUG_PRINT("info", ("Baikonur, time is %ld, BURAN reporting and operational."
+ "THD: 0x%lx",
+ (long) time(NULL), (long) thd));
sql_print_information("SCHEDULER: [%s.%s of %s] executing in thread %lu. "
"Execution %u",
@@ -378,7 +379,7 @@ Event_scheduler::start()
DBUG_ENTER("Event_scheduler::start");
LOCK_DATA();
- DBUG_PRINT("info", ("state before action %s", scheduler_states_names[state]));
+ DBUG_PRINT("info", ("state before action %s", scheduler_states_names[state].str));
if (state > INITIALIZED)
goto end;
@@ -400,7 +401,7 @@ Event_scheduler::start()
scheduler_thd= new_thd;
DBUG_PRINT("info", ("Setting state go RUNNING"));
state= RUNNING;
- DBUG_PRINT("info", ("Forking new thread for scheduduler. THD=0x%lx", new_thd));
+ DBUG_PRINT("info", ("Forking new thread for scheduduler. THD: 0x%lx", (long) new_thd));
if (pthread_create(&th, &connection_attrib, event_scheduler_thread,
(void*)scheduler_param_value))
{
@@ -463,7 +464,7 @@ Event_scheduler::run(THD *thd)
break;
}
- DBUG_PRINT("info", ("get_top returned job_data=0x%lx", job_data));
+ DBUG_PRINT("info", ("get_top returned job_data: 0x%lx", (long) job_data));
if (job_data)
{
if ((res= execute_top(thd, job_data)))
@@ -522,11 +523,11 @@ Event_scheduler::execute_top(THD *thd, Event_job_data *job_data)
++started_events;
- DBUG_PRINT("info", ("Launch succeeded. BURAN is in THD=0x%lx", new_thd));
+ DBUG_PRINT("info", ("Launch succeeded. BURAN is in THD: 0x%lx", (long) new_thd));
DBUG_RETURN(FALSE);
error:
- DBUG_PRINT("error", ("Baikonur, we have a problem! res=%d", res));
+ DBUG_PRINT("error", ("Baikonur, we have a problem! res: %d", res));
if (new_thd)
{
new_thd->proc_info= "Clearing";
@@ -581,10 +582,10 @@ Event_scheduler::stop()
{
THD *thd= current_thd;
DBUG_ENTER("Event_scheduler::stop");
- DBUG_PRINT("enter", ("thd=0x%lx", current_thd));
+ DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd));
LOCK_DATA();
- DBUG_PRINT("info", ("state before action %s", scheduler_states_names[state]));
+ DBUG_PRINT("info", ("state before action %s", scheduler_states_names[state].str));
if (state != RUNNING)
goto end;
@@ -605,7 +606,7 @@ Event_scheduler::stop()
*/
state= STOPPING;
- DBUG_PRINT("info", ("Manager thread has id %d", scheduler_thd->thread_id));
+ DBUG_PRINT("info", ("Manager thread has id %lu", scheduler_thd->thread_id));
/* Lock from delete */
pthread_mutex_lock(&scheduler_thd->LOCK_delete);
/* This will wake up the thread if it waits on Queue's conditional */
diff --git a/sql/events.cc b/sql/events.cc
index 10a8be948ef..3dbc6fd27e1 100644
--- a/sql/events.cc
+++ b/sql/events.cc
@@ -858,7 +858,7 @@ Events::check_system_tables(THD *thd)
bool ret= FALSE;
DBUG_ENTER("Events::check_system_tables");
- DBUG_PRINT("enter", ("thd=0x%lx", thd));
+ DBUG_PRINT("enter", ("thd: 0x%lx", (long) thd));
thd->reset_n_backup_open_tables_state(&backup);
diff --git a/sql/field.cc b/sql/field.cc
index d01cc00c711..1551b78bd72 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -8180,8 +8180,8 @@ Field_bit::do_last_null_byte() const
bits. On systems with CHAR_BIT > 8 (not very common), the storage
will lose the extra bits.
*/
- DBUG_PRINT("debug", ("bit_ofs=%d, bit_len=%d, bit_ptr=%p",
- bit_ofs, bit_len, bit_ptr));
+ DBUG_PRINT("test", ("bit_ofs: %d, bit_len: %d bit_ptr: 0x%lx",
+ bit_ofs, bit_len, (long) bit_ptr));
uchar *result;
if (bit_len == 0)
result= null_ptr;
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 7700aebe367..0703e18b5f7 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -413,7 +413,8 @@ Thd_ndb::get_open_table(THD *thd, const void *key)
thd_ndb_share->stat.no_uncommitted_rows_count= 0;
thd_ndb_share->stat.records= ~(ha_rows)0;
}
- DBUG_PRINT("exit", ("thd_ndb_share: 0x%x key: 0x%x", thd_ndb_share, key));
+ DBUG_PRINT("exit", ("thd_ndb_share: 0x%lx key: 0x%lx",
+ (long) thd_ndb_share, (long) key));
DBUG_RETURN(thd_ndb_share);
}
@@ -761,8 +762,8 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field,
blob_ptr= (char*)"";
}
- DBUG_PRINT("value", ("set blob ptr=%p len=%u",
- blob_ptr, blob_len));
+ DBUG_PRINT("value", ("set blob ptr: 0x%lx len: %u",
+ (long) blob_ptr, blob_len));
DBUG_DUMP("value", (char*)blob_ptr, min(blob_len, 26));
if (set_blob_value)
@@ -847,8 +848,8 @@ int get_ndb_blobs_value(TABLE* table, NdbValue* value_array,
uint32 len= 0xffffffff; // Max uint32
if (ndb_blob->readData(buf, len) != 0)
ERR_RETURN(ndb_blob->getNdbError());
- DBUG_PRINT("info", ("[%u] offset=%u buf=%p len=%u [ptrdiff=%d]",
- i, offset, buf, len, (int)ptrdiff));
+ DBUG_PRINT("info", ("[%u] offset: %u buf: 0x%lx len=%u [ptrdiff=%d]",
+ i, offset, (long) buf, len, (int)ptrdiff));
DBUG_ASSERT(len == len64);
// Ugly hack assumes only ptr needs to be changed
field_blob->ptr+= ptrdiff;
@@ -1171,8 +1172,8 @@ int ha_ndbcluster::add_index_handle(THD *thd, NDBDICT *dict, KEY *key_info,
index= dict->getIndexGlobal(index_name, *m_table);
if (!index)
ERR_RETURN(dict->getNdbError());
- DBUG_PRINT("info", ("index: 0x%x id: %d version: %d.%d status: %d",
- index,
+ DBUG_PRINT("info", ("index: 0x%lx id: %d version: %d.%d status: %d",
+ (long) index,
index->getObjectId(),
index->getObjectVersion() & 0xFFFFFF,
index->getObjectVersion() >> 24,
@@ -1215,8 +1216,8 @@ int ha_ndbcluster::add_index_handle(THD *thd, NDBDICT *dict, KEY *key_info,
index= dict->getIndexGlobal(unique_index_name, *m_table);
if (!index)
ERR_RETURN(dict->getNdbError());
- DBUG_PRINT("info", ("index: 0x%x id: %d version: %d.%d status: %d",
- index,
+ DBUG_PRINT("info", ("index: 0x%lx id: %d version: %d.%d status: %d",
+ (long) index,
index->getObjectId(),
index->getObjectVersion() & 0xFFFFFF,
index->getObjectVersion() >> 24,
@@ -2305,7 +2306,7 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op,
// Set bound if not done with this key
if (p.key != NULL)
{
- DBUG_PRINT("info", ("key %d:%d offset=%d length=%d last=%d bound=%d",
+ DBUG_PRINT("info", ("key %d:%d offset: %d length: %d last: %d bound: %d",
j, i, tot_len, part_len, p.part_last, p.bound_type));
DBUG_DUMP("info", (const char*)p.part_ptr, part_store_len);
@@ -2462,7 +2463,7 @@ int ha_ndbcluster::full_table_scan(byte *buf)
part_spec.start_part= 0;
part_spec.end_part= m_part_info->get_tot_partitions() - 1;
prune_partition_set(table, &part_spec);
- DBUG_PRINT("info", ("part_spec.start_part = %u, part_spec.end_part = %u",
+ DBUG_PRINT("info", ("part_spec.start_part: %u part_spec.end_part: %u",
part_spec.start_part, part_spec.end_part));
/*
If partition pruning has found no partition in set
@@ -2658,7 +2659,7 @@ int ha_ndbcluster::write_row(byte *record)
{
// Send rows to NDB
DBUG_PRINT("info", ("Sending inserts to NDB, "\
- "rows_inserted:%d, bulk_insert_rows: %d",
+ "rows_inserted: %d bulk_insert_rows: %d",
(int)m_rows_inserted, (int)m_bulk_insert_rows));
m_bulk_insert_not_flushed= FALSE;
@@ -3108,7 +3109,8 @@ void ndb_unpack_record(TABLE *table, NdbValue *value,
char* ptr;
field_blob->get_ptr(&ptr, row_offset);
uint32 len= field_blob->get_length(row_offset);
- DBUG_PRINT("info",("[%u] SET ptr=%p len=%u", col_no, ptr, len));
+ DBUG_PRINT("info",("[%u] SET ptr: 0x%lx len: %u",
+ col_no, (long) ptr, len));
#endif
}
}
@@ -3350,7 +3352,7 @@ int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key,
if (m_use_partition_function)
{
get_partition_set(table, buf, active_index, start_key, &part_spec);
- DBUG_PRINT("info", ("part_spec.start_part = %u, part_spec.end_part = %u",
+ DBUG_PRINT("info", ("part_spec.start_part: %u part_spec.end_part: %u",
part_spec.start_part, part_spec.end_part));
/*
If partition pruning has found no partition in set
@@ -3876,7 +3878,7 @@ int ha_ndbcluster::end_bulk_insert()
NdbTransaction *trans= m_active_trans;
// Send rows to NDB
DBUG_PRINT("info", ("Sending inserts to NDB, "\
- "rows_inserted:%d, bulk_insert_rows: %d",
+ "rows_inserted: %d bulk_insert_rows: %d",
(int) m_rows_inserted, (int) m_bulk_insert_rows));
m_bulk_insert_not_flushed= FALSE;
if (m_transaction_on)
@@ -5101,13 +5103,12 @@ void ha_ndbcluster::prepare_for_alter()
int ha_ndbcluster::add_index(TABLE *table_arg,
KEY *key_info, uint num_of_keys)
{
- DBUG_ENTER("ha_ndbcluster::add_index");
- DBUG_PRINT("info", ("ha_ndbcluster::add_index to table %s",
- table_arg->s->table_name));
int error= 0;
uint idx;
-
+ DBUG_ENTER("ha_ndbcluster::add_index");
+ DBUG_PRINT("enter", ("table %s", table_arg->s->table_name.str));
DBUG_ASSERT(m_share->state == NSS_ALTERED);
+
for (idx= 0; idx < num_of_keys; idx++)
{
KEY *key= key_info + idx;
@@ -6662,7 +6663,7 @@ static int ndbcluster_end(handlerton *hton, ha_panic_function type)
void ha_ndbcluster::print_error(int error, myf errflag)
{
DBUG_ENTER("ha_ndbcluster::print_error");
- DBUG_PRINT("enter", ("error = %d", error));
+ DBUG_PRINT("enter", ("error: %d", error));
if (error == HA_ERR_NO_PARTITION_FOUND)
m_part_info->print_no_partition_found(table);
@@ -7168,16 +7169,16 @@ static void dbug_print_open_tables()
for (uint i= 0; i < ndbcluster_open_tables.records; i++)
{
NDB_SHARE *share= (NDB_SHARE*) hash_element(&ndbcluster_open_tables, i);
- DBUG_PRINT("share",
- ("[%d] 0x%lx key: %s key_length: %d",
- i, share, share->key, share->key_length));
- DBUG_PRINT("share",
- ("db.tablename: %s.%s use_count: %d commit_count: %d",
+ DBUG_PRINT("loop",
+ ("[%d] 0x%lx key: %s key_length: %d",
+ i, (long) share, share->key, share->key_length));
+ DBUG_PRINT("loop",
+ ("db.tablename: %s.%s use_count: %d commit_count: %lu",
share->db, share->table_name,
- share->use_count, share->commit_count));
+ share->use_count, (ulong) share->commit_count));
#ifdef HAVE_NDB_BINLOG
if (share->table)
- DBUG_PRINT("share",
+ DBUG_PRINT("loop",
("table->s->db.table_name: %s.%s",
share->table->s->db.str, share->table->s->table_name.str));
#endif
@@ -7330,13 +7331,13 @@ static int rename_share(NDB_SHARE *share, const char *new_key)
share->table_name= share->db + strlen(share->db) + 1;
ha_ndbcluster::set_tabname(new_key, share->table_name);
- DBUG_PRINT("rename_share",
- ("0x%lx key: %s key_length: %d",
- share, share->key, share->key_length));
- DBUG_PRINT("rename_share",
- ("db.tablename: %s.%s use_count: %d commit_count: %d",
+ DBUG_PRINT("info",
+ ("share: 0x%lx key: %s key_length: %d",
+ (long) share, share->key, share->key_length));
+ DBUG_PRINT("info",
+ ("db.tablename: %s.%s use_count: %d commit_count: %lu",
share->db, share->table_name,
- share->use_count, share->commit_count));
+ share->use_count, (ulong) share->commit_count));
if (share->table)
{
DBUG_PRINT("rename_share",
@@ -7371,13 +7372,13 @@ NDB_SHARE *ndbcluster_get_share(NDB_SHARE *share)
dbug_print_open_tables();
- DBUG_PRINT("get_share",
- ("0x%lx key: %s key_length: %d",
- share, share->key, share->key_length));
- DBUG_PRINT("get_share",
- ("db.tablename: %s.%s use_count: %d commit_count: %d",
+ DBUG_PRINT("info",
+ ("share: 0x%lx key: %s key_length: %d",
+ (long) share, share->key, share->key_length));
+ DBUG_PRINT("info",
+ ("db.tablename: %s.%s use_count: %d commit_count: %lu",
share->db, share->table_name,
- share->use_count, share->commit_count));
+ share->use_count, (ulong) share->commit_count));
pthread_mutex_unlock(&ndbcluster_mutex);
return share;
}
@@ -7485,13 +7486,12 @@ NDB_SHARE *ndbcluster_get_share(const char *key, TABLE *table,
void ndbcluster_real_free_share(NDB_SHARE **share)
{
DBUG_ENTER("ndbcluster_real_free_share");
- DBUG_PRINT("real_free_share",
- ("0x%lx key: %s key_length: %d",
- (*share), (*share)->key, (*share)->key_length));
- DBUG_PRINT("real_free_share",
- ("db.tablename: %s.%s use_count: %d commit_count: %d",
+ DBUG_PRINT("enter",
+ ("share: 0x%lx key: %s key_length: %d "
+ "db.tablename: %s.%s use_count: %d commit_count: %lu",
+ (long) (*share), (*share)->key, (*share)->key_length,
(*share)->db, (*share)->table_name,
- (*share)->use_count, (*share)->commit_count));
+ (*share)->use_count, (ulong) (*share)->commit_count));
hash_delete(&ndbcluster_open_tables, (byte*) *share);
thr_lock_delete(&(*share)->lock);
@@ -7539,13 +7539,13 @@ void ndbcluster_free_share(NDB_SHARE **share, bool have_lock)
else
{
dbug_print_open_tables();
- DBUG_PRINT("free_share",
- ("0x%lx key: %s key_length: %d",
- *share, (*share)->key, (*share)->key_length));
- DBUG_PRINT("free_share",
- ("db.tablename: %s.%s use_count: %d commit_count: %d",
+ DBUG_PRINT("info",
+ ("share: 0x%lx key: %s key_length: %d",
+ (long) *share, (*share)->key, (*share)->key_length));
+ DBUG_PRINT("info",
+ ("db.tablename: %s.%s use_count: %d commit_count: %lu",
(*share)->db, (*share)->table_name,
- (*share)->use_count, (*share)->commit_count));
+ (*share)->use_count, (ulong) (*share)->commit_count));
}
if (!have_lock)
pthread_mutex_unlock(&ndbcluster_mutex);
@@ -7815,7 +7815,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p,
get_partition_set(table, curr, active_index,
&multi_range_curr->start_key,
&part_spec);
- DBUG_PRINT("info", ("part_spec.start_part = %u, part_spec.end_part = %u",
+ DBUG_PRINT("info", ("part_spec.start_part: %u part_spec.end_part: %u",
part_spec.start_part, part_spec.end_part));
/*
If partition pruning has found no partition in set
@@ -8347,8 +8347,8 @@ pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused)))
ndb_get_table_statistics(NULL, false, ndb, ndbtab_g.get_table(), &stat) == 0)
{
char buff[22], buff2[22];
- DBUG_PRINT("ndb_util_thread",
- ("Table: %s, commit_count: %llu, rows: %llu",
+ DBUG_PRINT("info",
+ ("Table: %s commit_count: %s rows: %s",
share->key,
llstr(stat.commit_count, buff),
llstr(stat.row_count, buff2)));
diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc
index e0b7502a40a..cb2ac56e828 100644
--- a/sql/ha_ndbcluster_binlog.cc
+++ b/sql/ha_ndbcluster_binlog.cc
@@ -161,16 +161,16 @@ static void dbug_print_table(const char *info, TABLE *table)
}
DBUG_PRINT("info",
("%s: %s.%s s->fields: %d "
- "reclength: %d rec_buff_length: %d record[0]: %lx "
- "record[1]: %lx",
+ "reclength: %lu rec_buff_length: %u record[0]: 0x%lx "
+ "record[1]: 0x%lx",
info,
table->s->db.str,
table->s->table_name.str,
table->s->fields,
table->s->reclength,
table->s->rec_buff_length,
- table->record[0],
- table->record[1]));
+ (long) table->record[0],
+ (long) table->record[1]));
for (unsigned int i= 0; i < table->s->fields; i++)
{
@@ -180,7 +180,7 @@ static void dbug_print_table(const char *info, TABLE *table)
"ptr: 0x%lx[+%d] null_bit: %u null_ptr: 0x%lx[+%d]",
i,
f->field_name,
- f->flags,
+ (long) f->flags,
(f->flags & PRI_KEY_FLAG) ? "pri" : "attr",
(f->flags & NOT_NULL_FLAG) ? "" : ",nullable",
(f->flags & UNSIGNED_FLAG) ? ",unsigned" : ",signed",
@@ -189,16 +189,18 @@ static void dbug_print_table(const char *info, TABLE *table)
(f->flags & BINARY_FLAG) ? ",binary" : "",
f->real_type(),
f->pack_length(),
- f->ptr, f->ptr - table->record[0],
+ (long) f->ptr, (int) (f->ptr - table->record[0]),
f->null_bit,
- f->null_ptr, (byte*) f->null_ptr - table->record[0]));
+ (long) f->null_ptr,
+ (int) ((byte*) f->null_ptr - table->record[0])));
if (f->type() == MYSQL_TYPE_BIT)
{
Field_bit *g= (Field_bit*) f;
DBUG_PRINT("MYSQL_TYPE_BIT",("field_length: %d bit_ptr: 0x%lx[+%d] "
- "bit_ofs: %u bit_len: %u",
- g->field_length, g->bit_ptr,
- (byte*) g->bit_ptr-table->record[0],
+ "bit_ofs: %d bit_len: %u",
+ g->field_length, (long) g->bit_ptr,
+ (int) ((byte*) g->bit_ptr -
+ table->record[0]),
g->bit_ofs, g->bit_len));
}
}
@@ -605,11 +607,11 @@ static int ndbcluster_binlog_end(THD *thd)
{
DBUG_PRINT("share",
("[%d] 0x%lx key: %s key_length: %d",
- i, share, share->key, share->key_length));
+ i, (long) share, share->key, share->key_length));
DBUG_PRINT("share",
- ("db.tablename: %s.%s use_count: %d commit_count: %d",
+ ("db.tablename: %s.%s use_count: %d commit_count: %lu",
share->db, share->table_name,
- share->use_count, share->commit_count));
+ share->use_count, (long) share->commit_count));
}
}
pthread_mutex_unlock(&ndbcluster_mutex);
@@ -685,8 +687,8 @@ static NDB_SHARE *ndbcluster_check_apply_status_share()
void *share= hash_search(&ndbcluster_open_tables,
NDB_APPLY_TABLE_FILE,
sizeof(NDB_APPLY_TABLE_FILE) - 1);
- DBUG_PRINT("info",("ndbcluster_check_apply_status_share %s %p",
- NDB_APPLY_TABLE_FILE, share));
+ DBUG_PRINT("info",("ndbcluster_check_apply_status_share %s 0x%lx",
+ NDB_APPLY_TABLE_FILE, (long) share));
pthread_mutex_unlock(&ndbcluster_mutex);
return (NDB_SHARE*) share;
}
@@ -703,8 +705,8 @@ static NDB_SHARE *ndbcluster_check_schema_share()
void *share= hash_search(&ndbcluster_open_tables,
NDB_SCHEMA_TABLE_FILE,
sizeof(NDB_SCHEMA_TABLE_FILE) - 1);
- DBUG_PRINT("info",("ndbcluster_check_schema_share %s %p",
- NDB_SCHEMA_TABLE_FILE, share));
+ DBUG_PRINT("info",("ndbcluster_check_schema_share %s 0x%lx",
+ NDB_SCHEMA_TABLE_FILE, (long) share));
pthread_mutex_unlock(&ndbcluster_mutex);
return (NDB_SHARE*) share;
}
@@ -2721,10 +2723,9 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
if (share->flags & NSF_BLOB_FLAG)
op->mergeEvents(TRUE); // currently not inherited from event
- DBUG_PRINT("info", ("share->ndb_value[0]: 0x%x",
- share->ndb_value[0]));
- DBUG_PRINT("info", ("share->ndb_value[1]: 0x%x",
- share->ndb_value[1]));
+ DBUG_PRINT("info", ("share->ndb_value[0]: 0x%lx share->ndb_value[1]: 0x%lx",
+ (long) share->ndb_value[0],
+ (long) share->ndb_value[1]));
int n_columns= ndbtab->getNoOfColumns();
int n_fields= table ? table->s->fields : 0; // XXX ???
for (int j= 0; j < n_columns; j++)
@@ -2778,12 +2779,14 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
}
share->ndb_value[0][j].ptr= attr0.ptr;
share->ndb_value[1][j].ptr= attr1.ptr;
- DBUG_PRINT("info", ("&share->ndb_value[0][%d]: 0x%x "
- "share->ndb_value[0][%d]: 0x%x",
- j, &share->ndb_value[0][j], j, attr0.ptr));
- DBUG_PRINT("info", ("&share->ndb_value[1][%d]: 0x%x "
- "share->ndb_value[1][%d]: 0x%x",
- j, &share->ndb_value[0][j], j, attr1.ptr));
+ DBUG_PRINT("info", ("&share->ndb_value[0][%d]: 0x%lx "
+ "share->ndb_value[0][%d]: 0x%lx",
+ j, (long) &share->ndb_value[0][j],
+ j, (long) attr0.ptr));
+ DBUG_PRINT("info", ("&share->ndb_value[1][%d]: 0x%lx "
+ "share->ndb_value[1][%d]: 0x%lx",
+ j, (long) &share->ndb_value[0][j],
+ j, (long) attr1.ptr));
}
op->setCustomData((void *) share); // set before execute
share->op= op; // assign op in NDB_SHARE
@@ -2826,8 +2829,8 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
(void) pthread_cond_signal(&injector_cond);
}
- DBUG_PRINT("info",("%s share->op: 0x%lx, share->use_count: %u",
- share->key, share->op, share->use_count));
+ DBUG_PRINT("info",("%s share->op: 0x%lx share->use_count: %u",
+ share->key, (long) share->op, share->use_count));
if (ndb_extra_logging)
sql_print_information("NDB Binlog: logging %s", share->key);
@@ -3012,10 +3015,11 @@ ndb_binlog_thread_handle_non_data_event(THD *thd, Ndb *ndb,
free_share(&apply_status_share);
apply_status_share= 0;
}
- DBUG_PRINT("info", ("CLUSTER FAILURE EVENT: "
- "%s received share: 0x%lx op: %lx share op: %lx "
- "op_old: %lx",
- share->key, share, pOp, share->op, share->op_old));
+ DBUG_PRINT("error", ("CLUSTER FAILURE EVENT: "
+ "%s received share: 0x%lx op: 0x%lx share op: 0x%lx "
+ "op_old: 0x%lx",
+ share->key, (long) share, (long) pOp,
+ (long) share->op, (long) share->op_old));
break;
case NDBEVENT::TE_DROP:
if (apply_status_share == share)
@@ -3033,10 +3037,11 @@ ndb_binlog_thread_handle_non_data_event(THD *thd, Ndb *ndb,
// fall through
case NDBEVENT::TE_ALTER:
row.n_schemaops++;
- DBUG_PRINT("info", ("TABLE %s EVENT: %s received share: 0x%lx op: %lx "
- "share op: %lx op_old: %lx",
- type == NDBEVENT::TE_DROP ? "DROP" : "ALTER",
- share->key, share, pOp, share->op, share->op_old));
+ DBUG_PRINT("info", ("TABLE %s EVENT: %s received share: 0x%lx op: 0x%lx "
+ "share op: 0x%lx op_old: 0x%lx",
+ type == NDBEVENT::TE_DROP ? "DROP" : "ALTER",
+ share->key, (long) share, (long) pOp,
+ (long) share->op, (long) share->op_old));
break;
case NDBEVENT::TE_NODE_FAILURE:
/* fall through */
@@ -3513,7 +3518,8 @@ restart:
}
}
// now check that we have epochs consistant with what we had before the restart
- DBUG_PRINT("info", ("schema_res: %d schema_gci: %d", schema_res, schema_gci));
+ DBUG_PRINT("info", ("schema_res: %d schema_gci: %lu", schema_res,
+ (long) schema_gci));
{
i_ndb->flushIncompleteEvents(schema_gci);
s_ndb->flushIncompleteEvents(schema_gci);
@@ -3697,8 +3703,8 @@ restart:
!= NULL)
{
NDB_SHARE *share= (NDB_SHARE*)gci_op->getCustomData();
- DBUG_PRINT("info", ("per gci_op: %p share: %p event_types: 0x%x",
- gci_op, share, event_types));
+ DBUG_PRINT("info", ("per gci_op: 0x%lx share: 0x%lx event_types: 0x%x",
+ (long) gci_op, (long) share, event_types));
// workaround for interface returning TE_STOP events
// which are normally filtered out below in the nextEvent loop
if ((event_types & ~NdbDictionary::Event::TE_STOP) == 0)
@@ -3784,11 +3790,13 @@ restart:
{
NDB_SHARE *share= (NDB_SHARE*) pOp->getCustomData();
DBUG_PRINT("info",
- ("EVENT TYPE: %d GCI: %lld last applied: %lld "
- "share: 0x%lx (%s.%s)", pOp->getEventType(), gci,
- ndb_latest_applied_binlog_epoch, share,
- share ? share->db : "share == NULL",
- share ? share->table_name : ""));
+ ("EVENT TYPE: %d GCI: %ld last applied: %ld "
+ "share: 0x%lx (%s.%s)", pOp->getEventType(),
+ (long) gci,
+ (long) ndb_latest_applied_binlog_epoch,
+ (long) share,
+ share ? share->db : "'NULL'",
+ share ? share->table_name : "'NULL'"));
DBUG_ASSERT(share != 0);
}
// assert that there is consistancy between gci op list
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 3edd3923779..82b43ce578f 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -2027,7 +2027,7 @@ bool ha_partition::create_handlers(MEM_ROOT *mem_root)
if (!(m_file[i]= get_new_handler(table_share, mem_root,
m_engine_array[i])))
DBUG_RETURN(TRUE);
- DBUG_PRINT("info", ("engine_type: %u", m_engine_array[i]));
+ DBUG_PRINT("info", ("engine_type: %u", m_engine_array[i]->db_type));
}
/* For the moment we only support partition over the same table engine */
if (m_engine_array[0] == myisam_hton)
@@ -2939,8 +2939,8 @@ int ha_partition::rnd_init(bool scan)
include_partition_fields_in_used_fields();
/* Now we see what the index of our first important partition is */
- DBUG_PRINT("info", ("m_part_info->used_partitions 0x%x",
- m_part_info->used_partitions.bitmap));
+ DBUG_PRINT("info", ("m_part_info->used_partitions: 0x%lx",
+ (long) m_part_info->used_partitions.bitmap));
part_id= bitmap_get_first_set(&(m_part_info->used_partitions));
DBUG_PRINT("info", ("m_part_spec.start_part %d", part_id));
diff --git a/sql/handler.cc b/sql/handler.cc
index ae679826dbf..f874100e634 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -1513,7 +1513,7 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
DBUG_ENTER("handler::ha_open");
DBUG_PRINT("enter",
("name: %s db_type: %d db_stat: %d mode: %d lock_test: %d",
- name, table_share->db_type, table_arg->db_stat, mode,
+ name, ht->db_type, table_arg->db_stat, mode,
test_if_locked));
table= table_arg;
@@ -1927,8 +1927,8 @@ int handler::update_auto_increment()
void handler::column_bitmaps_signal()
{
DBUG_ENTER("column_bitmaps_signal");
- DBUG_PRINT("info", ("read_set: 0x%lx write_set: 0x%lx", table->read_set,
- table->write_set));
+ DBUG_PRINT("info", ("read_set: 0x%lx write_set: 0x%lx", (long) table->read_set,
+ (long) table->write_set));
DBUG_VOID_RETURN;
}
@@ -3507,8 +3507,10 @@ namespace
int write_locked_table_maps(THD *thd)
{
DBUG_ENTER("write_locked_table_maps");
- DBUG_PRINT("enter", ("thd=%p, thd->lock=%p, thd->locked_tables=%p, thd->extra_lock",
- thd, thd->lock, thd->locked_tables, thd->extra_lock));
+ DBUG_PRINT("enter", ("thd: 0x%lx thd->lock: 0x%lx thd->locked_tables: 0x%lx "
+ "thd->extra_lock: 0x%lx",
+ (long) thd, (long) thd->lock,
+ (long) thd->locked_tables, (long) thd->extra_lock));
if (thd->get_binlog_table_maps() == 0)
{
@@ -3528,7 +3530,7 @@ namespace
++table_ptr)
{
TABLE *const table= *table_ptr;
- DBUG_PRINT("info", ("Checking table %s", table->s->table_name));
+ DBUG_PRINT("info", ("Checking table %s", table->s->table_name.str));
if (table->current_lock == F_WRLCK &&
check_table_binlog_row_based(thd, table))
{
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 1d3048af54c..b252144ae2e 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -3061,7 +3061,7 @@ longlong Item_is_not_null_test::val_int()
if (!used_tables_cache)
{
owner->was_null|= (!cached_value);
- DBUG_PRINT("info", ("cached :%ld", (long) cached_value));
+ DBUG_PRINT("info", ("cached: %ld", (long) cached_value));
DBUG_RETURN(cached_value);
}
if (args[0]->is_null())
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 407ab4a66f7..574a8055ac3 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -5044,7 +5044,7 @@ Item_func_sp::result_type() const
{
Field *field;
DBUG_ENTER("Item_func_sp::result_type");
- DBUG_PRINT("info", ("m_sp = %p", m_sp));
+ DBUG_PRINT("info", ("m_sp: 0x%lx", (long) m_sp));
if (result_field)
DBUG_RETURN(result_field->result_type());
diff --git a/sql/log.cc b/sql/log.cc
index 620445aecfa..b12eca9bb07 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -1344,7 +1344,7 @@ binlog_trans_log_savepos(THD *thd, my_off_t *pos)
(binlog_trx_data*) thd->ha_data[binlog_hton->slot];
DBUG_ASSERT(mysql_bin_log.is_open());
*pos= trx_data->position();
- DBUG_PRINT("return", ("*pos=%u", *pos));
+ DBUG_PRINT("return", ("*pos: %lu", (ulong) *pos));
DBUG_VOID_RETURN;
}
@@ -1368,7 +1368,7 @@ static void
binlog_trans_log_truncate(THD *thd, my_off_t pos)
{
DBUG_ENTER("binlog_trans_log_truncate");
- DBUG_PRINT("enter", ("pos=%u", pos));
+ DBUG_PRINT("enter", ("pos: %lu", (ulong) pos));
DBUG_ASSERT(thd->ha_data[binlog_hton->slot] != NULL);
/* Only true if binlog_trans_log_savepos() wasn't called before */
@@ -1444,8 +1444,8 @@ binlog_end_trans(THD *thd, binlog_trx_data *trx_data,
DBUG_ENTER("binlog_end_trans");
int error=0;
IO_CACHE *trans_log= &trx_data->trans_log;
- DBUG_PRINT("enter", ("transaction: %s, end_ev=%p",
- all ? "all" : "stmt", end_ev));
+ DBUG_PRINT("enter", ("transaction: %s end_ev: 0x%lx",
+ all ? "all" : "stmt", (long) end_ev));
DBUG_PRINT("info", ("thd->options={ %s%s}",
FLAGSTR(thd->options, OPTION_NOT_AUTOCOMMIT),
FLAGSTR(thd->options, OPTION_BEGIN)));
@@ -3417,12 +3417,13 @@ int THD::binlog_setup_trx_data()
void
THD::binlog_start_trans_and_stmt()
{
- DBUG_ENTER("binlog_start_trans_and_stmt");
binlog_trx_data *trx_data= (binlog_trx_data*) ha_data[binlog_hton->slot];
- DBUG_PRINT("enter", ("trx_data=0x%lu", trx_data));
- if (trx_data)
- DBUG_PRINT("enter", ("trx_data->before_stmt_pos=%u",
- trx_data->before_stmt_pos));
+ DBUG_ENTER("binlog_start_trans_and_stmt");
+ DBUG_PRINT("enter", ("trx_data: 0x%lx trx_data->before_stmt_pos: %lu",
+ (long) trx_data,
+ (trx_data ? (ulong) trx_data->before_stmt_pos :
+ (ulong) 0)));
+
if (trx_data == NULL ||
trx_data->before_stmt_pos == MY_OFF_T_UNDEF)
{
@@ -3453,8 +3454,8 @@ int THD::binlog_write_table_map(TABLE *table, bool is_trans)
{
int error;
DBUG_ENTER("THD::binlog_write_table_map");
- DBUG_PRINT("enter", ("table: %0xlx (%s: #%u)",
- (long) table, table->s->table_name,
+ DBUG_PRINT("enter", ("table: 0x%lx (%s: #%lu)",
+ (long) table, table->s->table_name.str,
table->s->table_map_id));
/* Pre-conditions */
@@ -3517,7 +3518,7 @@ MYSQL_BIN_LOG::flush_and_set_pending_rows_event(THD *thd,
{
DBUG_ENTER("MYSQL_BIN_LOG::flush_and_set_pending_rows_event(event)");
DBUG_ASSERT(mysql_bin_log.is_open());
- DBUG_PRINT("enter", ("event=%p", event));
+ DBUG_PRINT("enter", ("event: 0x%lx", (long) event));
int error= 0;
@@ -3526,7 +3527,7 @@ MYSQL_BIN_LOG::flush_and_set_pending_rows_event(THD *thd,
DBUG_ASSERT(trx_data);
- DBUG_PRINT("info", ("trx_data->pending()=%p", trx_data->pending()));
+ DBUG_PRINT("info", ("trx_data->pending(): 0x%lx", (long) trx_data->pending()));
if (Rows_log_event* pending= trx_data->pending())
{
@@ -3681,9 +3682,9 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info)
my_off_t trans_log_pos= my_b_tell(trans_log);
if (event_info->get_cache_stmt() || trans_log_pos != 0)
{
- DBUG_PRINT("info", ("Using trans_log: cache=%d, trans_log_pos=%u",
+ DBUG_PRINT("info", ("Using trans_log: cache: %d, trans_log_pos: %lu",
event_info->get_cache_stmt(),
- trans_log_pos));
+ (ulong) trans_log_pos));
if (trans_log_pos == 0)
thd->binlog_start_trans_and_stmt();
file= trans_log;
@@ -3725,15 +3726,17 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info)
}
if (thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements() > 0)
{
- DBUG_PRINT("info",("number of auto_inc intervals: %lu",
- thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements()));
+ DBUG_PRINT("info",("number of auto_inc intervals: %u",
+ thd->auto_inc_intervals_in_cur_stmt_for_binlog.
+ nb_elements()));
/*
If the auto_increment was second in a table's index (possible with
MyISAM or BDB) (table->next_number_key_offset != 0), such event is
in fact not necessary. We could avoid logging it.
*/
- Intvar_log_event e(thd,(uchar) INSERT_ID_EVENT,
- thd->auto_inc_intervals_in_cur_stmt_for_binlog.minimum());
+ Intvar_log_event e(thd, (uchar) INSERT_ID_EVENT,
+ thd->auto_inc_intervals_in_cur_stmt_for_binlog.
+ minimum());
if (e.write(file))
goto err;
}
diff --git a/sql/log_event.cc b/sql/log_event.cc
index e170194bc37..79e3a35cbe8 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -5345,8 +5345,8 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len,
uint8 const common_header_len= description_event->common_header_len;
uint8 const post_header_len= description_event->post_header_len[event_type-1];
- DBUG_PRINT("enter",("event_len=%ld, common_header_len=%d, "
- "post_header_len=%d",
+ DBUG_PRINT("enter",("event_len: %u common_header_len: %d "
+ "post_header_len: %d",
event_len, common_header_len,
post_header_len));
@@ -5376,7 +5376,7 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len,
const byte* const ptr_rows_data= var_start + byte_count + 1;
my_size_t const data_size= event_len - (ptr_rows_data - (const byte *) buf);
- DBUG_PRINT("info",("m_table_id=%lu, m_flags=%d, m_width=%u, data_size=%lu",
+ DBUG_PRINT("info",("m_table_id: %lu m_flags: %d m_width: %lu data_size: %u",
m_table_id, m_flags, m_width, data_size));
m_rows_buf= (byte*)my_malloc(data_size, MYF(MY_WME));
@@ -5416,7 +5416,7 @@ int Rows_log_event::do_add_row_data(byte *const row_data,
would save binlog space. TODO
*/
DBUG_ENTER("Rows_log_event::do_add_row_data");
- DBUG_PRINT("enter", ("row_data: 0x%lx length: %lu", (ulong) row_data,
+ DBUG_PRINT("enter", ("row_data: 0x%lx length: %u", (ulong) row_data,
length));
/*
Don't print debug messages when running valgrind since they can
@@ -5513,7 +5513,7 @@ unpack_row(RELAY_LOG_INFO *rli,
{
DBUG_ENTER("unpack_row");
DBUG_ASSERT(record && row);
- DBUG_PRINT("enter", ("row=0x%lx; record=0x%lx", row, record));
+ DBUG_PRINT("enter", ("row: 0x%lx record: 0x%lx", (long) row, (long) record));
my_ptrdiff_t const offset= record - (byte*) table->record[0];
my_size_t master_null_bytes= table->s->null_bytes;
@@ -5555,10 +5555,12 @@ unpack_row(RELAY_LOG_INFO *rli,
if (bitmap_is_set(cols, field_ptr - begin_ptr))
{
DBUG_ASSERT(table->record[0] <= f->ptr);
- DBUG_ASSERT(f->ptr < table->record[0] + table->s->reclength + (f->pack_length_in_rec() == 0));
+ DBUG_ASSERT(f->ptr < (table->record[0] + table->s->reclength +
+ (f->pack_length_in_rec() == 0)));
f->move_field_offset(offset);
- DBUG_PRINT("info", ("unpacking column '%s' to 0x%lx", f->field_name, f->ptr));
+ DBUG_PRINT("info", ("unpacking column '%s' to 0x%lx", f->field_name,
+ (long) f->ptr));
ptr= f->unpack(f->ptr, ptr);
f->move_field_offset(-offset);
/* Field...::unpack() cannot return 0 */
@@ -6068,7 +6070,7 @@ Table_map_log_event::Table_map_log_event(const char *buf, uint event_len,
uint8 common_header_len= description_event->common_header_len;
uint8 post_header_len= description_event->post_header_len[TABLE_MAP_EVENT-1];
- DBUG_PRINT("info",("event_len=%ld, common_header_len=%d, post_header_len=%d",
+ DBUG_PRINT("info",("event_len: %u common_header_len: %d post_header_len: %d",
event_len, common_header_len, post_header_len));
/*
@@ -6116,10 +6118,10 @@ Table_map_log_event::Table_map_log_event(const char *buf, uint event_len,
uchar *ptr_after_colcnt= (uchar*) ptr_colcnt;
m_colcnt= net_field_length(&ptr_after_colcnt);
- DBUG_PRINT("info",("m_dblen=%d off=%d m_tbllen=%d off=%d m_colcnt=%d off=%d",
- m_dblen, ptr_dblen-(const byte*)vpart,
- m_tbllen, ptr_tbllen-(const byte*)vpart,
- m_colcnt, ptr_colcnt-(const byte*)vpart));
+ DBUG_PRINT("info",("m_dblen: %d off: %ld m_tbllen: %d off: %ld m_colcnt: %lu off: %ld",
+ m_dblen, (long) (ptr_dblen-(const byte*)vpart),
+ m_tbllen, (long) (ptr_tbllen-(const byte*)vpart),
+ m_colcnt, (long) (ptr_colcnt-(const byte*)vpart)));
/* Allocate mem for all fields in one go. If fails, catched in is_valid() */
m_memory= my_multi_malloc(MYF(MY_WME),
@@ -6523,10 +6525,10 @@ copy_extra_record_fields(TABLE *table,
my_size_t master_reclength,
my_ptrdiff_t master_fields)
{
- DBUG_PRINT("info", ("Copying to %p "
+ DBUG_PRINT("info", ("Copying to 0x%lx "
"from field %ld at offset %u "
- "to field %d at offset %u",
- table->record[0],
+ "to field %d at offset %lu",
+ (long) table->record[0],
master_fields, master_reclength,
table->s->fields, table->s->reclength));
/*
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index bf803fad360..423dfc19fdf 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -2121,7 +2121,7 @@ the thread stack. Please read http://www.mysql.com/doc/en/Linux.html\n\n",
#ifdef HAVE_STACKTRACE
if (!(test_flags & TEST_NO_STACKTRACE))
{
- fprintf(stderr,"thd=%p\n",thd);
+ fprintf(stderr,"thd: 0x%lx\n",(long) thd);
print_stacktrace(thd ? (gptr) thd->thread_stack : (gptr) 0,
thread_stack);
}
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 64300c16ee7..1d6b384df35 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -10814,7 +10814,7 @@ static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map,
if (!tmp.length())
tmp.append(STRING_WITH_LEN("(empty)"));
- DBUG_PRINT("info", ("SEL_TREE %p (%s) scans:%s", tree, msg, tmp.ptr()));
+ DBUG_PRINT("info", ("SEL_TREE: 0x%lx (%s) scans: %s", (long) tree, msg, tmp.ptr()));
DBUG_VOID_RETURN;
}
diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc
index 2b034d50d6a..762fcfb7a6a 100644
--- a/sql/repl_failsafe.cc
+++ b/sql/repl_failsafe.cc
@@ -564,8 +564,8 @@ err:
mysql_free_result(res);
if (error)
{
- sql_print_error("While trying to obtain the list of slaves from the master \
-'%s:%d', user '%s' got the following error: '%s'",
+ sql_print_error("While trying to obtain the list of slaves from the master "
+ "'%s:%d', user '%s' got the following error: '%s'",
mi->host, mi->port, mi->user, error);
DBUG_RETURN(1);
}
diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc
index f01fc5d1c9e..a2edb9dc8a8 100644
--- a/sql/rpl_rli.cc
+++ b/sql/rpl_rli.cc
@@ -402,7 +402,7 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log,
bool look_for_description_event)
{
DBUG_ENTER("init_relay_log_pos");
- DBUG_PRINT("info", ("pos=%lu", pos));
+ DBUG_PRINT("info", ("pos: %lu", (ulong) pos));
*errmsg=0;
pthread_mutex_t *log_lock=rli->relay_log.get_log_lock();
@@ -855,7 +855,7 @@ void st_relay_log_info::close_temporary_tables()
Don't ask for disk deletion. For now, anyway they will be deleted when
slave restarts, but it is a better intention to not delete them.
*/
- DBUG_PRINT("info", ("table: %p", table));
+ DBUG_PRINT("info", ("table: 0x%lx", (long) table));
close_temporary(table, 1, 0);
}
save_temporary_tables= 0;
diff --git a/sql/rpl_tblmap.cc b/sql/rpl_tblmap.cc
index a0272b23ee8..97f0066233c 100644
--- a/sql/rpl_tblmap.cc
+++ b/sql/rpl_tblmap.cc
@@ -50,17 +50,17 @@ table_mapping::~table_mapping()
st_table* table_mapping::get_table(ulong table_id)
{
DBUG_ENTER("table_mapping::get_table(ulong)");
- DBUG_PRINT("enter", ("table_id=%d", table_id));
+ DBUG_PRINT("enter", ("table_id: %lu", table_id));
entry *e= find_entry(table_id);
if (e)
{
- DBUG_PRINT("info", ("tid %d -> table %p (%s)",
- table_id, e->table,
+ DBUG_PRINT("info", ("tid %lu -> table 0x%lx (%s)",
+ table_id, (long) e->table,
MAYBE_TABLE_NAME(e->table)));
DBUG_RETURN(e->table);
}
- DBUG_PRINT("info", ("tid %d is not mapped!", table_id));
+ DBUG_PRINT("info", ("tid %lu is not mapped!", table_id));
DBUG_RETURN(NULL);
}
@@ -93,9 +93,9 @@ int table_mapping::expand()
int table_mapping::set_table(ulong table_id, TABLE* table)
{
DBUG_ENTER("table_mapping::set_table(ulong,TABLE*)");
- DBUG_PRINT("enter", ("table_id=%d, table=%p (%s)",
+ DBUG_PRINT("enter", ("table_id: %lu table: 0x%lx (%s)",
table_id,
- table, MAYBE_TABLE_NAME(table)));
+ (long) table, MAYBE_TABLE_NAME(table)));
entry *e= find_entry(table_id);
if (e == 0)
{
@@ -111,8 +111,8 @@ int table_mapping::set_table(ulong table_id, TABLE* table)
e->table= table;
my_hash_insert(&m_table_ids,(byte *)e);
- DBUG_PRINT("info", ("tid %d -> table %p (%s)",
- table_id, e->table,
+ DBUG_PRINT("info", ("tid %lu -> table 0x%lx (%s)",
+ table_id, (long) e->table,
MAYBE_TABLE_NAME(e->table)));
DBUG_RETURN(0); // All OK
}
diff --git a/sql/set_var.cc b/sql/set_var.cc
index 5590e71c810..dc78eb2f509 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -3943,7 +3943,7 @@ sys_var_event_scheduler::update(THD *thd, set_var *var)
DBUG_RETURN(TRUE);
}
- DBUG_PRINT("new_value", ("%lu", (bool)var->save_result.ulong_value));
+ DBUG_PRINT("info", ("new_value: %d", (int) var->save_result.ulong_value));
Item_result var_type= var->value->result_type();
diff --git a/sql/slave.cc b/sql/slave.cc
index d06b405b06b..4c5f0fc4764 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -1609,7 +1609,7 @@ static ulong read_event(MYSQL* mysql, MASTER_INFO *mi, bool* suppress_warnings)
DBUG_RETURN(packet_error);
}
- DBUG_PRINT("info",( "len=%u, net->read_pos[4] = %d\n",
+ DBUG_PRINT("exit", ("len: %lu net->read_pos[4]: %d",
len, mysql->net.read_pos[4]));
DBUG_RETURN(len - 1);
}
@@ -1800,7 +1800,7 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli)
ev->when = time(NULL);
ev->thd = thd; // because up to this point, ev->thd == 0
exec_res = ev->exec_event(rli);
- DBUG_PRINT("info", ("exec_event result = %d", exec_res));
+ DBUG_PRINT("info", ("exec_event result: %d", exec_res));
DBUG_ASSERT(rli->sql_thd==thd);
/*
Format_description_log_event should not be deleted because it will be
@@ -1951,9 +1951,9 @@ pthread_handler_t handle_slave_io(void *arg)
// we can get killed during safe_connect
if (!safe_connect(thd, mysql, mi))
{
- sql_print_information("Slave I/O thread: connected to master '%s@%s:%d',\
- replication started in log '%s' at position %s", mi->user,
- mi->host, mi->port,
+ sql_print_information("Slave I/O thread: connected to master '%s@%s:%d',"
+ "replication started in log '%s' at position %s",
+ mi->user, mi->host, mi->port,
IO_RPL_LOG_NAME,
llstr(mi->master_log_pos,llbuff));
/*
@@ -3107,8 +3107,8 @@ static int connect_to_master(THD* thd, MYSQL* mysql, MASTER_INFO* mi,
{
last_errno=mysql_errno(mysql);
suppress_warnings= 0;
- sql_print_error("Slave I/O thread: error %s to master \
-'%s@%s:%d': \
+ sql_print_error("Slave I/O thread: error %s to master "
+ "'%s@%s:%d': \
Error: '%s' errno: %d retry-time: %d retries: %lu",
(reconnect ? "reconnecting" : "connecting"),
mi->user, mi->host, mi->port,
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index 47a623ec749..622d9efdde0 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -899,7 +899,7 @@ subst_spvars(THD *thd, sp_instr *instr, LEX_STRING *query_str)
break;
val= (*splocal)->this_item();
- DBUG_PRINT("info", ("print %p", val));
+ DBUG_PRINT("info", ("print 0x%lx", (long) val));
str_value= sp_get_item_value(val, &str_value_holder);
if (str_value)
res|= qbuf.append(*str_value);
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 28bc1e9dcbf..f1a685778f9 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -1087,7 +1087,7 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived)
if (!lock_in_use)
VOID(pthread_mutex_lock(&LOCK_open));
- DBUG_PRINT("info", ("thd->open_tables: %p", thd->open_tables));
+ DBUG_PRINT("info", ("thd->open_tables: 0x%lx", (long) thd->open_tables));
found_old_table= 0;
while (thd->open_tables)
@@ -1177,6 +1177,16 @@ static inline uint tmpkeyval(THD *thd, TABLE *table)
void close_temporary_tables(THD *thd)
{
TABLE *table;
+ TABLE *next;
+ /*
+ TODO: 5.1 maintains prev link in temporary_tables
+ double-linked list so we could fix it. But it is not necessary
+ at this time when the list is being destroyed
+ */
+ TABLE *prev_table;
+ /* Assume thd->options has OPTION_QUOTE_SHOW_CREATE */
+ bool was_quote_show= TRUE;
+
if (!thd->temporary_tables)
return;
@@ -1192,12 +1202,7 @@ void close_temporary_tables(THD *thd)
return;
}
- TABLE *next,
- *prev_table /* TODO: 5.1 maintaines prev link in temporary_tables
- double-linked list so we could fix it. But it is not necessary
- at this time when the list is being destroyed */;
- bool was_quote_show= true; /* to assume thd->options has OPTION_QUOTE_SHOW_CREATE */
- // Better add "if exists", in case a RESET MASTER has been done
+ /* Better add "if exists", in case a RESET MASTER has been done */
const char stub[]= "DROP /*!40005 TEMPORARY */ TABLE IF EXISTS ";
uint stub_len= sizeof(stub) - 1;
char buf[256];
@@ -1303,7 +1308,7 @@ void close_temporary_tables(THD *thd)
}
}
if (!was_quote_show)
- thd->options &= ~OPTION_QUOTE_SHOW_CREATE; /* restore option */
+ thd->options&= ~OPTION_QUOTE_SHOW_CREATE; /* restore option */
thd->temporary_tables=0;
}
@@ -2069,7 +2074,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
VOID(pthread_mutex_unlock(&LOCK_open));
DBUG_RETURN(0); // VIEW
}
- DBUG_PRINT("info", ("inserting table %p into the cache", table));
+ DBUG_PRINT("info", ("inserting table 0x%lx into the cache", (long) table));
VOID(my_hash_insert(&open_cache,(byte*) table));
}
@@ -2399,7 +2404,7 @@ bool table_is_used(TABLE *table, bool wait_for_name_lock)
{
DBUG_PRINT("info", ("share: 0x%lx locked_by_logger: %d "
"locked_by_flush: %d locked_by_name: %d "
- "db_stat: %u version: %u",
+ "db_stat: %u version: %lu",
(ulong) search->s, search->locked_by_logger,
search->locked_by_flush, search->locked_by_name,
search->db_stat,
diff --git a/sql/sql_binlog.cc b/sql/sql_binlog.cc
index 23ca5330053..a48c0ac0b31 100644
--- a/sql/sql_binlog.cc
+++ b/sql/sql_binlog.cc
@@ -80,8 +80,9 @@ void mysql_client_binlog_statement(THD* thd)
int bytes_decoded= base64_decode(strptr, coded_len, buf, &endptr);
DBUG_PRINT("info",
- ("bytes_decoded=%d; strptr=0x%lu; endptr=0x%lu ('%c':%d)",
- bytes_decoded, strptr, endptr, *endptr, *endptr));
+ ("bytes_decoded: %d strptr: 0x%lx endptr: 0x%lx ('%c':%d)",
+ bytes_decoded, (long) strptr, (long) endptr, *endptr,
+ *endptr));
if (bytes_decoded < 0)
{
@@ -145,14 +146,15 @@ void mysql_client_binlog_statement(THD* thd)
bufptr += event_len;
DBUG_PRINT("info",("ev->get_type_code()=%d", ev->get_type_code()));
- DBUG_PRINT("info",("bufptr+EVENT_TYPE_OFFSET=0x%lx",
- bufptr+EVENT_TYPE_OFFSET));
- DBUG_PRINT("info", ("bytes_decoded=%d; bufptr=0x%lx; buf[EVENT_LEN_OFFSET]=%u",
- bytes_decoded, bufptr, uint4korr(bufptr+EVENT_LEN_OFFSET)));
+ DBUG_PRINT("info",("bufptr+EVENT_TYPE_OFFSET: 0x%lx",
+ (long) (bufptr+EVENT_TYPE_OFFSET)));
+ DBUG_PRINT("info", ("bytes_decoded: %d bufptr: 0x%lx buf[EVENT_LEN_OFFSET]: %lu",
+ bytes_decoded, (long) bufptr,
+ uint4korr(bufptr+EVENT_LEN_OFFSET)));
ev->thd= thd;
if (int err= ev->exec_event(thd->rli_fake))
{
- DBUG_PRINT("info", ("exec_event() - error=%d", error));
+ DBUG_PRINT("error", ("exec_event() returned: %d", err));
/*
TODO: Maybe a better error message since the BINLOG statement
now contains several events.
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index 1d217cbe54c..9fc39685407 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -2981,7 +2981,7 @@ static TABLE_COUNTER_TYPE process_and_count_tables(TABLE_LIST *tables_used,
DBUG_PRINT("qcache", ("table: %s db: %s type: %u",
tables_used->table->s->table_name.str,
tables_used->table->s->db.str,
- tables_used->table->s->db_type));
+ tables_used->table->s->db_type->db_type));
if (tables_used->derived)
{
table_count--;
@@ -3037,7 +3037,7 @@ Query_cache::is_cacheable(THD *thd, uint32 query_len, char *query, LEX *lex,
lex->safe_to_cache_query)
{
DBUG_PRINT("qcache", ("options: %lx %lx type: %u",
- OPTION_TO_QUERY_CACHE,
+ (long) OPTION_TO_QUERY_CACHE,
(long) lex->select_lex.options,
(int) thd->variables.query_cache_type));
@@ -3057,7 +3057,7 @@ Query_cache::is_cacheable(THD *thd, uint32 query_len, char *query, LEX *lex,
DBUG_PRINT("qcache",
("not interesting query: %d or not cacheable, options %lx %lx type: %u",
(int) lex->sql_command,
- OPTION_TO_QUERY_CACHE,
+ (long) OPTION_TO_QUERY_CACHE,
(long) lex->select_lex.options,
(int) thd->variables.query_cache_type));
DBUG_RETURN(0);
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index ac93200266d..07510c1fbb0 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -551,7 +551,7 @@ void add_diff_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var,
void THD::awake(THD::killed_state state_to_set)
{
DBUG_ENTER("THD::awake");
- DBUG_PRINT("enter", ("this=0x%lx", this));
+ DBUG_PRINT("enter", ("this: 0x%lx", (long) this));
THD_CHECK_SENTRY(this);
safe_mutex_assert_owner(&LOCK_delete);
@@ -2623,9 +2623,9 @@ namespace {
return m_memory != 0;
}
- byte *slot(int const s)
+ byte *slot(uint s)
{
- DBUG_ASSERT(0 <= s && s < sizeof(m_ptr)/sizeof(*m_ptr));
+ DBUG_ASSERT(s < sizeof(m_ptr)/sizeof(*m_ptr));
DBUG_ASSERT(m_ptr[s] != 0);
DBUG_ASSERT(m_alloc_checked == true);
return m_ptr[s];
diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc
index 0d893a6c9be..c448be04ac5 100644
--- a/sql/sql_handler.cc
+++ b/sql/sql_handler.cc
@@ -367,9 +367,9 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
strlen(tables->alias) + 1)))
{
table= hash_tables->table;
- DBUG_PRINT("info-in-hash",("'%s'.'%s' as '%s' tab %p",
+ DBUG_PRINT("info-in-hash",("'%s'.'%s' as '%s' table: 0x%lx",
hash_tables->db, hash_tables->table_name,
- hash_tables->alias, table));
+ hash_tables->alias, (long) table));
if (!table)
{
/*
@@ -633,7 +633,8 @@ int mysql_ha_flush(THD *thd, TABLE_LIST *tables, uint mode_flags,
TABLE **table_ptr;
bool did_lock= FALSE;
DBUG_ENTER("mysql_ha_flush");
- DBUG_PRINT("enter", ("tables: %p mode_flags: 0x%02x", tables, mode_flags));
+ DBUG_PRINT("enter", ("tables: 0x%lx mode_flags: 0x%02x",
+ (long) tables, mode_flags));
if (tables)
{
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index c35ef4079d3..ffd32bea42a 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -1443,7 +1443,7 @@ bool st_select_lex::add_order_to_list(THD *thd, Item *item, bool asc)
bool st_select_lex::add_item_to_list(THD *thd, Item *item)
{
DBUG_ENTER("st_select_lex::add_item_to_list");
- DBUG_PRINT("info", ("Item: %p", item));
+ DBUG_PRINT("info", ("Item: 0x%lx", (long) item));
DBUG_RETURN(item_list.push_back(item));
}
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index 8ccf7116d95..2c130a45f77 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -1604,7 +1604,7 @@ bool do_command(THD *thd)
command= COM_END; // Wrong command
DBUG_PRINT("info",("Command on %s = %d (%s)",
vio_description(net->vio), command,
- command_name[command]));
+ command_name[command].str));
}
net->read_timeout=old_timeout; // restore it
/*
@@ -1828,7 +1828,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
char *packet_end= thd->query + thd->query_length;
/* 'b' stands for 'buffer' parameter', special for 'my_snprintf' */
const char *format= "%.*b";
- general_log.write(thd, command, format, thd->query_length, thd->query);
+ general_log_print(thd, command, format, thd->query_length, thd->query);
DBUG_PRINT("query",("%-.4096s",thd->query));
if (!(specialflag & SPECIAL_NO_PRIOR))
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index 8df527fd25b..266a5bad34d 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -4480,7 +4480,7 @@ that are reorganised.
{
if (!alt_part_info->use_default_partitions)
{
- DBUG_PRINT("info", ("part_info= %x", tab_part_info));
+ DBUG_PRINT("info", ("part_info: 0x%lx", (long) tab_part_info));
tab_part_info->use_default_partitions= FALSE;
}
tab_part_info->use_default_no_partitions= FALSE;
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index c505f8c0fbc..0c6a5fe5846 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -1918,7 +1918,7 @@ void mysql_stmt_prepare(THD *thd, const char *packet, uint packet_length)
else
{
const char *format= "[%lu] %.*b";
- general_log.write(thd, COM_STMT_PREPARE, format, stmt->id,
+ general_log_print(thd, COM_STMT_PREPARE, format, stmt->id,
stmt->query_length, stmt->query);
}
@@ -2265,7 +2265,7 @@ void mysql_stmt_execute(THD *thd, char *packet_arg, uint packet_length)
DBUG_VOID_RETURN;
DBUG_PRINT("exec_query", ("%s", stmt->query));
- DBUG_PRINT("info",("stmt: %p", stmt));
+ DBUG_PRINT("info",("stmt: 0x%lx", (long) stmt));
sp_cache_flush_obsolete(&thd->sp_proc_cache);
sp_cache_flush_obsolete(&thd->sp_func_cache);
@@ -2305,9 +2305,9 @@ void mysql_stmt_execute(THD *thd, char *packet_arg, uint packet_length)
if (error == 0)
{
const char *format= "[%lu] %.*b";
- general_log.write(thd, COM_STMT_EXECUTE, format, stmt->id,
+ general_log_print(thd, COM_STMT_EXECUTE, format, stmt->id,
thd->query_length, thd->query);
-
+ }
DBUG_VOID_RETURN;
set_params_data_err:
@@ -2360,7 +2360,7 @@ void mysql_sql_stmt_execute(THD *thd)
DBUG_VOID_RETURN;
}
- DBUG_PRINT("info",("stmt: %p", stmt));
+ DBUG_PRINT("info",("stmt: 0x%lx", (long) stmt));
/*
If the free_list is not empty, we'll wrongly free some externally
@@ -2724,7 +2724,8 @@ void Prepared_statement::setup_set_params()
Prepared_statement::~Prepared_statement()
{
DBUG_ENTER("Prepared_statement::~Prepared_statement");
- DBUG_PRINT("enter",("stmt: %p cursor: %p", this, cursor));
+ DBUG_PRINT("enter",("stmt: 0x%lx cursor: 0x%lx",
+ (long) this, (long) cursor));
delete cursor;
/*
We have to call free on the items even if cleanup is called as some items,
@@ -2745,7 +2746,7 @@ Query_arena::Type Prepared_statement::type() const
void Prepared_statement::cleanup_stmt()
{
DBUG_ENTER("Prepared_statement::cleanup_stmt");
- DBUG_PRINT("enter",("stmt: %p", this));
+ DBUG_PRINT("enter",("stmt: 0x%lx", (long) this));
/* The order is important */
lex->unit.cleanup();
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 511d9fa6677..a0149b1a34d 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -3743,7 +3743,7 @@ static void wait_while_table_is_used(THD *thd,TABLE *table,
enum ha_extra_function function)
{
DBUG_ENTER("wait_while_table_is_used");
- DBUG_PRINT("enter", ("table: '%s' share: 0x%lx db_stat: %u version: %u",
+ DBUG_PRINT("enter", ("table: '%s' share: 0x%lx db_stat: %u version: %lu",
table->s->table_name.str, (ulong) table->s,
table->db_stat, table->s->version));
diff --git a/sql/sql_test.cc b/sql/sql_test.cc
index c4c40ea63c8..219ca8260ed 100644
--- a/sql/sql_test.cc
+++ b/sql/sql_test.cc
@@ -248,14 +248,15 @@ print_plan(JOIN* join, uint idx, double record_count, double read_time,
if (join->best_read == DBL_MAX)
{
fprintf(DBUG_FILE,
- "%s; idx:%u, best: DBL_MAX, atime: %g, itime: %g, count: %g\n",
- info, idx, current_read_time, read_time, record_count);
+ "%s; idx: %u best: DBL_MAX atime: %g itime: %g count: %g\n",
+ info, idx, current_read_time, read_time, record_count);
}
else
{
fprintf(DBUG_FILE,
- "%s; idx:%u, best: %g, accumulated: %g, increment: %g, count: %g\n",
- info, idx, join->best_read, current_read_time, read_time, record_count);
+ "%s; idx :%u best: %g accumulated: %g increment: %g count: %g\n",
+ info, idx, join->best_read, current_read_time, read_time,
+ record_count);
}
/* Print the tables in JOIN->positions */
diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc
index fb56b7ae3b0..8baf84585b2 100644
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@ -1612,7 +1612,7 @@ Handle_old_incorrect_sql_modes_hook::process_unknown_string(char *&unknown_key,
char *end)
{
DBUG_ENTER("Handle_old_incorrect_sql_modes_hook::process_unknown_string");
- DBUG_PRINT("info", ("unknown key:%60s", unknown_key));
+ DBUG_PRINT("info", ("unknown key: %60s", unknown_key));
if (unknown_key + INVALID_SQL_MODES_LENGTH + 1 < end &&
unknown_key[INVALID_SQL_MODES_LENGTH] == '=' &&
@@ -1654,7 +1654,7 @@ process_unknown_string(char *&unknown_key, gptr base, MEM_ROOT *mem_root,
char *end)
{
DBUG_ENTER("Handle_old_incorrect_trigger_table_hook::process_unknown_string");
- DBUG_PRINT("info", ("unknown key:%60s", unknown_key));
+ DBUG_PRINT("info", ("unknown key: %60s", unknown_key));
if (unknown_key + INVALID_TRIGGER_TABLE_LENGTH + 1 < end &&
unknown_key[INVALID_TRIGGER_TABLE_LENGTH] == '=' &&
diff --git a/sql/table.cc b/sql/table.cc
index c2b76a21a8e..0ddaf99810d 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -1339,7 +1339,7 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias,
Field **field_ptr;
DBUG_ENTER("open_table_from_share");
DBUG_PRINT("enter",("name: '%s.%s' form: 0x%lx", share->db.str,
- share->table_name.str, outparam));
+ share->table_name.str, (long) outparam));
error= 1;
bzero((char*) outparam, sizeof(*outparam));
@@ -2401,8 +2401,8 @@ table_check_intact(TABLE *table, const uint table_f_count,
my_bool error= FALSE;
my_bool fields_diff_count;
DBUG_ENTER("table_check_intact");
- DBUG_PRINT("info",("table=%s expected_count=%d",table->alias, table_f_count));
- DBUG_PRINT("info",("last_create_time=%d", *last_create_time));
+ DBUG_PRINT("info",("table: %s expected_count: %d last_create_time: %ld",
+ table->alias, table_f_count, *last_create_time));
if ((fields_diff_count= (table->s->fields != table_f_count)) ||
(*last_create_time != table->file->stats.create_time))