summaryrefslogtreecommitdiff
path: root/storage
diff options
context:
space:
mode:
Diffstat (limited to 'storage')
-rw-r--r--storage/archive/ha_archive.cc2
-rw-r--r--storage/connect/jsonudf.cpp2
-rw-r--r--storage/connect/plugutil.cpp2
-rw-r--r--storage/connect/tabmysql.cpp6
-rw-r--r--storage/federated/ha_federated.cc18
-rw-r--r--storage/federatedx/ha_federatedx.cc18
-rw-r--r--storage/heap/ha_heap.cc2
-rw-r--r--storage/innobase/CMakeLists.txt7
-rw-r--r--storage/innobase/btr/btr0cur.cc14
-rw-r--r--storage/innobase/buf/buf0buf.cc2
-rw-r--r--storage/innobase/buf/buf0rea.cc4
-rw-r--r--storage/innobase/dict/dict0crea.cc10
-rw-r--r--storage/innobase/dict/dict0dict.cc27
-rw-r--r--storage/innobase/dict/dict0load.cc2
-rw-r--r--storage/innobase/fil/fil0crypt.cc4
-rw-r--r--storage/innobase/fil/fil0fil.cc58
-rw-r--r--storage/innobase/fil/fil0pagecompress.cc12
-rw-r--r--storage/innobase/fts/fts0fts.cc4
-rw-r--r--storage/innobase/handler/ha_innodb.cc138
-rw-r--r--storage/innobase/handler/handler0alter.cc384
-rw-r--r--storage/innobase/include/dict0crea.h5
-rw-r--r--storage/innobase/include/dict0dict.h7
-rw-r--r--storage/innobase/include/dict0mem.h4
-rw-r--r--storage/innobase/include/fsp0pagecompress.ic2
-rw-r--r--storage/innobase/include/ha_prototypes.h2
-rw-r--r--storage/innobase/include/lock0lock.h15
-rw-r--r--storage/innobase/include/os0file.h42
-rw-r--r--storage/innobase/include/row0mysql.h1
-rw-r--r--storage/innobase/include/row0upd.h3
-rw-r--r--storage/innobase/include/row0upd.ic14
-rw-r--r--storage/innobase/include/sync0types.h10
-rw-r--r--storage/innobase/include/trx0rec.h1
-rw-r--r--storage/innobase/lock/lock0lock.cc5
-rw-r--r--storage/innobase/lock/lock0wait.cc7
-rw-r--r--storage/innobase/os/os0file.cc80
-rw-r--r--storage/innobase/pars/pars0pars.cc2
-rw-r--r--storage/innobase/row/row0merge.cc6
-rw-r--r--storage/innobase/row/row0mysql.cc11
-rw-r--r--storage/innobase/row/row0purge.cc5
-rw-r--r--storage/innobase/row/row0sel.cc6
-rw-r--r--storage/innobase/row/row0umod.cc2
-rw-r--r--storage/innobase/row/row0upd.cc8
-rw-r--r--storage/innobase/srv/srv0start.cc3
-rw-r--r--storage/innobase/trx/trx0rec.cc9
-rw-r--r--storage/innobase/ut/ut0ut.cc13
-rw-r--r--storage/maria/ha_maria.cc16
-rw-r--r--storage/maria/ma_bitmap.c4
-rw-r--r--storage/maria/ma_blockrec.c2
-rw-r--r--storage/maria/ma_check.c14
-rw-r--r--storage/maria/ma_checkpoint.c11
-rw-r--r--storage/maria/ma_close.c4
-rw-r--r--storage/maria/ma_commit.c2
-rw-r--r--storage/maria/ma_control_file.c6
-rw-r--r--storage/maria/ma_create.c59
-rw-r--r--storage/maria/ma_delete.c16
-rw-r--r--storage/maria/ma_dynrec.c4
-rw-r--r--storage/maria/ma_key.c4
-rw-r--r--storage/maria/ma_keycache.c4
-rw-r--r--storage/maria/ma_loghandler.c490
-rw-r--r--storage/maria/ma_loghandler_lsn.h3
-rw-r--r--storage/maria/ma_open.c2
-rw-r--r--storage/maria/ma_page.c4
-rw-r--r--storage/maria/ma_pagecache.c20
-rw-r--r--storage/maria/ma_recovery.c64
-rw-r--r--storage/maria/ma_rkey.c4
-rw-r--r--storage/maria/ma_rt_index.c8
-rw-r--r--storage/maria/ma_rt_key.c2
-rw-r--r--storage/maria/ma_rt_split.c10
-rw-r--r--storage/maria/ma_search.c56
-rw-r--r--storage/maria/ma_servicethread.c6
-rw-r--r--storage/maria/ma_sort.c10
-rw-r--r--storage/maria/ma_state.c8
-rw-r--r--storage/maria/ma_write.c14
-rw-r--r--storage/maria/maria_chk.c4
-rw-r--r--storage/maria/maria_read_log.c8
-rw-r--r--storage/maria/unittest/ma_test_loghandler-t.c30
-rw-r--r--storage/maria/unittest/ma_test_loghandler_first_lsn-t.c6
-rw-r--r--storage/maria/unittest/ma_test_loghandler_max_lsn-t.c6
-rw-r--r--storage/maria/unittest/ma_test_loghandler_multigroup-t.c26
-rw-r--r--storage/maria/unittest/ma_test_loghandler_multithread-t.c6
-rw-r--r--storage/maria/unittest/ma_test_loghandler_noflush-t.c2
-rw-r--r--storage/myisam/ha_myisam.cc4
-rw-r--r--storage/myisam/mi_check.c6
-rw-r--r--storage/myisam/mi_close.c4
-rw-r--r--storage/myisam/mi_delete.c14
-rw-r--r--storage/myisam/mi_dynrec.c4
-rw-r--r--storage/myisam/mi_preload.c6
-rw-r--r--storage/myisam/mi_rkey.c4
-rw-r--r--storage/myisam/mi_search.c50
-rw-r--r--storage/myisam/mi_write.c14
-rw-r--r--storage/myisam/myisamchk.c4
-rw-r--r--storage/myisammrg/ha_myisammrg.cc22
-rw-r--r--storage/perfschema/pfs.cc6
-rw-r--r--storage/perfschema/pfs_account.cc2
-rw-r--r--storage/perfschema/pfs_engine_table.cc2
-rw-r--r--storage/perfschema/pfs_host.cc2
-rw-r--r--storage/perfschema/pfs_instr.cc4
-rw-r--r--storage/perfschema/pfs_instr_class.cc2
-rw-r--r--storage/perfschema/pfs_setup_actor.cc2
-rw-r--r--storage/perfschema/pfs_setup_object.cc2
-rw-r--r--storage/perfschema/pfs_user.cc2
-rw-r--r--storage/perfschema/table_events_waits.cc2
-rw-r--r--storage/rocksdb/ha_rocksdb.cc8
-rw-r--r--storage/rocksdb/rdb_sst_info.cc2
-rw-r--r--storage/sphinx/ha_sphinx.cc54
-rw-r--r--storage/sphinx/snippets_udf.cc4
-rw-r--r--storage/spider/ha_spider.cc2
-rw-r--r--storage/spider/hs_client/config.cpp4
-rw-r--r--storage/spider/hs_client/hstcpcli.cpp2
-rw-r--r--storage/spider/hs_client/socket.cpp6
-rw-r--r--storage/spider/spd_sys_table.cc4
-rw-r--r--storage/xtradb/btr/btr0cur.cc14
-rw-r--r--storage/xtradb/fil/fil0pagecompress.cc12
-rw-r--r--storage/xtradb/handler/ha_innodb.cc2
-rw-r--r--storage/xtradb/include/lock0lock.h2
-rw-r--r--storage/xtradb/lock/lock0lock.cc4
-rw-r--r--storage/xtradb/lock/lock0wait.cc4
-rw-r--r--storage/xtradb/log/log0crypt.cc2
-rw-r--r--storage/xtradb/row/row0log.cc29
-rw-r--r--storage/xtradb/row/row0merge.cc19
-rw-r--r--storage/xtradb/row/row0upd.cc7
121 files changed, 878 insertions, 1375 deletions
diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc
index 7f83905547b..afa39e6a5f7 100644
--- a/storage/archive/ha_archive.cc
+++ b/storage/archive/ha_archive.cc
@@ -409,7 +409,7 @@ unsigned int ha_archive::pack_row_v1(uchar *record)
pos+= length;
}
}
- DBUG_RETURN(pos - record_buffer->buffer);
+ DBUG_RETURN((int)(pos - record_buffer->buffer));
}
/*
diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp
index 29bccc4afeb..2733479a4e6 100644
--- a/storage/connect/jsonudf.cpp
+++ b/storage/connect/jsonudf.cpp
@@ -1564,7 +1564,7 @@ static PCSZ MakeKey(PGLOBAL g, UDF_ARGS *args, int i)
n = strlen(s);
if (IsJson(args, i))
- j = strchr(s, '_') - s + 1;
+ j = (int)(strchr(s, '_') - s + 1);
if (j && n > j) {
s += j;
diff --git a/storage/connect/plugutil.cpp b/storage/connect/plugutil.cpp
index d63674e2e36..dbf90fb5599 100644
--- a/storage/connect/plugutil.cpp
+++ b/storage/connect/plugutil.cpp
@@ -534,7 +534,7 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size)
if (trace)
htrc("PlugSubAlloc: %s\n", g->Message);
- throw 1234;
+ abort();
} /* endif size OS32 code */
/*********************************************************************/
diff --git a/storage/connect/tabmysql.cpp b/storage/connect/tabmysql.cpp
index d1e2ae69608..b9c97bd2f40 100644
--- a/storage/connect/tabmysql.cpp
+++ b/storage/connect/tabmysql.cpp
@@ -124,8 +124,8 @@ bool MYSQLDEF::GetServerInfo(PGLOBAL g, const char *server_name)
DBUG_RETURN(true);
} // endif server
- DBUG_PRINT("info", ("get_server_by_name returned server at %lx",
- (size_t) server));
+ DBUG_PRINT("info", ("get_server_by_name returned server at %p",
+ server));
// TODO: We need to examine which of these can really be NULL
Hostname = PlugDup(g, server->host);
@@ -681,7 +681,7 @@ bool TDBMYSQL::MakeCommand(PGLOBAL g)
strlwr(strcpy(name, Name)); // Not a keyword
if ((p = strstr(qrystr, name))) {
- Query->Set(Qrystr, p - qrystr);
+ Query->Set(Qrystr, (uint)(p - qrystr));
if (qtd && *(p-1) == ' ') {
Query->Append('`');
diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc
index 059113e2fa5..e6fbceb4af2 100644
--- a/storage/federated/ha_federated.cc
+++ b/storage/federated/ha_federated.cc
@@ -614,8 +614,8 @@ int get_connection(MEM_ROOT *mem_root, FEDERATED_SHARE *share)
error_num=1;
goto error;
}
- DBUG_PRINT("info", ("get_server_by_name returned server at %lx",
- (long unsigned int) server));
+ DBUG_PRINT("info", ("get_server_by_name returned server at %p",
+ server));
/*
Most of these should never be empty strings, error handling will
@@ -716,15 +716,15 @@ static int parse_url(MEM_ROOT *mem_root, FEDERATED_SHARE *share, TABLE *table,
share->port= 0;
share->socket= 0;
- DBUG_PRINT("info", ("share at %lx", (long unsigned int) share));
+ DBUG_PRINT("info", ("share at %p", share));
DBUG_PRINT("info", ("Length: %u", (uint) table->s->connect_string.length));
DBUG_PRINT("info", ("String: '%.*s'", (int) table->s->connect_string.length,
table->s->connect_string.str));
share->connection_string= strmake_root(mem_root, table->s->connect_string.str,
table->s->connect_string.length);
- DBUG_PRINT("info",("parse_url alloced share->connection_string %lx",
- (long unsigned int) share->connection_string));
+ DBUG_PRINT("info",("parse_url alloced share->connection_string %p",
+ share->connection_string));
DBUG_PRINT("info",("share->connection_string %s",share->connection_string));
/*
@@ -737,9 +737,9 @@ static int parse_url(MEM_ROOT *mem_root, FEDERATED_SHARE *share, TABLE *table,
DBUG_PRINT("info",
("share->connection_string %s internal format \
- share->connection_string %lx",
+ share->connection_string %p",
share->connection_string,
- (long unsigned int) share->connection_string));
+ share->connection_string));
/* ok, so we do a little parsing, but not completely! */
share->parsed= FALSE;
@@ -793,8 +793,8 @@ static int parse_url(MEM_ROOT *mem_root, FEDERATED_SHARE *share, TABLE *table,
// Add a null for later termination of table name
share->connection_string[table->s->connect_string.length]= 0;
share->scheme= share->connection_string;
- DBUG_PRINT("info",("parse_url alloced share->scheme %lx",
- (long unsigned int) share->scheme));
+ DBUG_PRINT("info",("parse_url alloced share->scheme %p",
+ share->scheme));
/*
remove addition of null terminator and store length
diff --git a/storage/federatedx/ha_federatedx.cc b/storage/federatedx/ha_federatedx.cc
index 63290e5feda..a761c635305 100644
--- a/storage/federatedx/ha_federatedx.cc
+++ b/storage/federatedx/ha_federatedx.cc
@@ -553,8 +553,8 @@ int get_connection(MEM_ROOT *mem_root, FEDERATEDX_SHARE *share)
error_num=1;
goto error;
}
- DBUG_PRINT("info", ("get_server_by_name returned server at %lx",
- (long unsigned int) server));
+ DBUG_PRINT("info", ("get_server_by_name returned server at %p",
+ server));
/*
Most of these should never be empty strings, error handling will
@@ -655,15 +655,15 @@ static int parse_url(MEM_ROOT *mem_root, FEDERATEDX_SHARE *share,
share->port= 0;
share->socket= 0;
- DBUG_PRINT("info", ("share at %lx", (long unsigned int) share));
+ DBUG_PRINT("info", ("share at %p", share));
DBUG_PRINT("info", ("Length: %u", (uint) table_s->connect_string.length));
DBUG_PRINT("info", ("String: '%.*s'", (int) table_s->connect_string.length,
table_s->connect_string.str));
share->connection_string= strmake_root(mem_root, table_s->connect_string.str,
table_s->connect_string.length);
- DBUG_PRINT("info",("parse_url alloced share->connection_string %lx",
- (long unsigned int) share->connection_string));
+ DBUG_PRINT("info",("parse_url alloced share->connection_string %p",
+ share->connection_string));
DBUG_PRINT("info",("share->connection_string: %s",share->connection_string));
/*
@@ -676,9 +676,9 @@ static int parse_url(MEM_ROOT *mem_root, FEDERATEDX_SHARE *share,
DBUG_PRINT("info",
("share->connection_string: %s internal format "
- "share->connection_string: %lx",
+ "share->connection_string: %p",
share->connection_string,
- (ulong) share->connection_string));
+ share->connection_string));
/* ok, so we do a little parsing, but not completely! */
share->parsed= FALSE;
@@ -731,8 +731,8 @@ static int parse_url(MEM_ROOT *mem_root, FEDERATEDX_SHARE *share,
// Add a null for later termination of table name
share->connection_string[table_s->connect_string.length]= 0;
share->scheme= share->connection_string;
- DBUG_PRINT("info",("parse_url alloced share->scheme: %lx",
- (ulong) share->scheme));
+ DBUG_PRINT("info",("parse_url alloced share->scheme: %p",
+ share->scheme));
/*
Remove addition of null terminator and store length
diff --git a/storage/heap/ha_heap.cc b/storage/heap/ha_heap.cc
index ef6530e98e1..f370b282b07 100644
--- a/storage/heap/ha_heap.cc
+++ b/storage/heap/ha_heap.cc
@@ -216,7 +216,7 @@ void ha_heap::update_key_stats()
else
{
ha_rows hash_buckets= file->s->keydef[i].hash_buckets;
- ha_rows no_records= hash_buckets ? (file->s->records/hash_buckets) : 2;
+ ulong no_records= hash_buckets ? (ulong)(file->s->records/hash_buckets) : 2;
if (no_records < 2)
no_records= 2;
key->rec_per_key[key->user_defined_key_parts-1]= no_records;
diff --git a/storage/innobase/CMakeLists.txt b/storage/innobase/CMakeLists.txt
index 061ff1ce2ac..d291f6f7c37 100644
--- a/storage/innobase/CMakeLists.txt
+++ b/storage/innobase/CMakeLists.txt
@@ -163,6 +163,7 @@ IF(NOT TARGET innobase)
RETURN()
ENDIF()
+INCLUDE(${MYSQL_CMAKE_SCRIPT_DIR}/compile_flags.cmake)
# A GCC bug causes crash when compiling these files on ARM64 with -O1+
# Compile them with -O0 as a workaround.
IF(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
@@ -170,7 +171,6 @@ IF(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
EXECUTE_PROCESS(COMMAND ${CMAKE_C_COMPILER} -dumpversion
OUTPUT_VARIABLE GCC_VERSION)
IF(GCC_VERSION VERSION_LESS 5.2)
- INCLUDE(${MYSQL_CMAKE_SCRIPT_DIR}/compile_flags.cmake)
ADD_COMPILE_FLAGS(
btr/btr0btr.cc
btr/btr0cur.cc
@@ -180,5 +180,10 @@ IF(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
)
ENDIF()
ENDIF()
+IF(MSVC)
+ # silence "switch statement contains 'default' but no 'case' label
+ # on generated file.
+ TARGET_COMPILE_OPTIONS(innobase PRIVATE "/wd4065")
+ENDIF()
ADD_SUBDIRECTORY(${CMAKE_SOURCE_DIR}/extra/mariabackup ${CMAKE_BINARY_DIR}/extra/mariabackup)
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index 25498a2530c..bb13b6a71d3 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -5055,7 +5055,6 @@ btr_cur_pessimistic_delete(
ulint n_reserved = 0;
bool success;
ibool ret = FALSE;
- ulint level;
mem_heap_t* heap;
ulint* offsets;
#ifdef UNIV_DEBUG
@@ -5113,6 +5112,10 @@ btr_cur_pessimistic_delete(
#endif /* UNIV_ZIP_DEBUG */
}
+ if (flags == 0) {
+ lock_update_delete(block, rec);
+ }
+
if (UNIV_UNLIKELY(page_get_n_recs(page) < 2)
&& UNIV_UNLIKELY(dict_index_get_page(index)
!= block->page.id.page_no())) {
@@ -5127,13 +5130,7 @@ btr_cur_pessimistic_delete(
goto return_after_reservations;
}
- if (flags == 0) {
- lock_update_delete(block, rec);
- }
-
- level = btr_page_get_level(page, mtr);
-
- if (level == 0) {
+ if (page_is_leaf(page)) {
btr_search_update_hash_on_delete(cursor);
} else if (UNIV_UNLIKELY(page_rec_is_first(rec, page))) {
rec_t* next_rec = page_rec_get_next(rec);
@@ -5188,6 +5185,7 @@ btr_cur_pessimistic_delete(
on a page, we have to change the parent node pointer
so that it is equal to the new leftmost node pointer
on the page */
+ ulint level = btr_page_get_level(page, mtr);
btr_node_ptr_delete(index, block, mtr);
diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc
index eac8cf7c924..5ce8b1d584d 100644
--- a/storage/innobase/buf/buf0buf.cc
+++ b/storage/innobase/buf/buf0buf.cc
@@ -790,7 +790,7 @@ buf_page_is_checksum_valid_none(
&& srv_checksum_algorithm == SRV_CHECKSUM_ALGORITHM_STRICT_NONE) {
fprintf(log_file,
"page::%llu; none checksum: calculated"
- " = " ULINTPF "; recorded checksum_field1 = " ULINTPF
+ " = %lu; recorded checksum_field1 = " ULINTPF
" recorded checksum_field2 = " ULINTPF "\n",
cur_page_num, BUF_NO_CHECKSUM_MAGIC,
checksum_field1, checksum_field2);
diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc
index 3c2b697b0f4..f7ea768f5c1 100644
--- a/storage/innobase/buf/buf0rea.cc
+++ b/storage/innobase/buf/buf0rea.cc
@@ -292,8 +292,8 @@ buf_read_ahead_random(
node != NULL;
node = UT_LIST_GET_NEXT(chain, node)) {
- size += os_file_get_size(node->handle)
- / page_size.physical();
+ size += ulint(os_file_get_size(node->handle)
+ / page_size.physical());
}
ut_ad(size == space->size);
diff --git a/storage/innobase/dict/dict0crea.cc b/storage/innobase/dict/dict0crea.cc
index 1c28a39a62e..8a06cd1db2c 100644
--- a/storage/innobase/dict/dict0crea.cc
+++ b/storage/innobase/dict/dict0crea.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -2551,7 +2552,6 @@ replacing what was there previously.
@param[in] flags Tablespace flags
@param[in] path Tablespace path
@param[in] trx Transaction
-@param[in] commit If true, commit the transaction
@return error code or DB_SUCCESS */
dberr_t
dict_replace_tablespace_in_dictionary(
@@ -2559,8 +2559,7 @@ dict_replace_tablespace_in_dictionary(
const char* name,
ulint flags,
const char* path,
- trx_t* trx,
- bool commit)
+ trx_t* trx)
{
if (!srv_sys_tablespaces_open) {
/* Startup procedure is not yet ready for updates. */
@@ -2609,11 +2608,6 @@ dict_replace_tablespace_in_dictionary(
return(error);
}
- if (commit) {
- trx->op_info = "committing tablespace and datafile definition";
- trx_commit(trx);
- }
-
trx->op_info = "";
return(error);
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index ecd8839c36e..d533cfad47a 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -1275,31 +1275,6 @@ dict_table_add_system_columns(
#endif
}
-/** Mark if table has big rows.
-@param[in,out] table table handler */
-void
-dict_table_set_big_rows(
- dict_table_t* table)
-{
- ulint row_len = 0;
- for (ulint i = 0; i < table->n_def; i++) {
- ulint col_len = dict_col_get_max_size(
- dict_table_get_nth_col(table, i));
-
- row_len += col_len;
-
- /* If we have a single unbounded field, or several gigantic
- fields, mark the maximum row size as BIG_ROW_SIZE. */
- if (row_len >= BIG_ROW_SIZE || col_len >= BIG_ROW_SIZE) {
- row_len = BIG_ROW_SIZE;
-
- break;
- }
- }
-
- table->big_rows = (row_len >= BIG_ROW_SIZE) ? TRUE : FALSE;
-}
-
/**********************************************************************//**
Adds a table object to the dictionary cache. */
void
@@ -1322,8 +1297,6 @@ dict_table_add_to_cache(
fold = ut_fold_string(table->name.m_name);
id_fold = ut_fold_ull(table->id);
- dict_table_set_big_rows(table);
-
/* Look for a table with the same name: error if such exists */
{
dict_table_t* table2;
diff --git a/storage/innobase/dict/dict0load.cc b/storage/innobase/dict/dict0load.cc
index 6193a8f66f5..47cc4811272 100644
--- a/storage/innobase/dict/dict0load.cc
+++ b/storage/innobase/dict/dict0load.cc
@@ -990,7 +990,7 @@ dict_replace_tablespace_and_filepath(
SYS_DATAFILES. Assume the record is also missing in
SYS_TABLESPACES. Insert records into them both. */
err = dict_replace_tablespace_in_dictionary(
- space_id, name, fsp_flags, filepath, trx, false);
+ space_id, name, fsp_flags, filepath, trx);
trx_commit_for_mysql(trx);
trx->dict_operation_lock_mode = 0;
diff --git a/storage/innobase/fil/fil0crypt.cc b/storage/innobase/fil/fil0crypt.cc
index 5e7e5a2f21b..08a832a4cd5 100644
--- a/storage/innobase/fil/fil0crypt.cc
+++ b/storage/innobase/fil/fil0crypt.cc
@@ -1324,7 +1324,7 @@ fil_crypt_realloc_iops(
if (10 * state->cnt_waited > state->batch) {
/* if we waited more than 10% re-estimate max_iops */
ulint avg_wait_time_us =
- state->sum_waited_us / state->cnt_waited;
+ ulint(state->sum_waited_us / state->cnt_waited);
if (avg_wait_time_us == 0) {
avg_wait_time_us = 1; // prevent division by zero
@@ -1669,7 +1669,7 @@ fil_crypt_get_page_throttle_func(
/* average page load */
ulint add_sleeptime_ms = 0;
- ulint avg_wait_time_us = state->sum_waited_us / state->cnt_waited;
+ ulint avg_wait_time_us =ulint(state->sum_waited_us / state->cnt_waited);
ulint alloc_wait_us = 1000000 / state->allocated_iops;
if (avg_wait_time_us < alloc_wait_us) {
diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc
index f34619663ad..e408b6d88bb 100644
--- a/storage/innobase/fil/fil0fil.cc
+++ b/storage/innobase/fil/fil0fil.cc
@@ -1062,32 +1062,23 @@ fil_space_extend_must_retry(
const ulint page_size = pageSize.physical();
#ifdef _WIN32
- /* Logically or physically extend the file with zero bytes,
- depending on whether it is sparse. */
-
- /* FIXME: Call DeviceIoControl(node->handle, FSCTL_SET_SPARSE, ...)
- when opening a file when FSP_FLAGS_HAS_PAGE_COMPRESSION(). */
- {
- FILE_END_OF_FILE_INFO feof;
- /* fil_read_first_page() expects UNIV_PAGE_SIZE bytes.
- fil_node_open_file() expects at least 4 * UNIV_PAGE_SIZE bytes.
- Do not shrink short ROW_FORMAT=COMPRESSED files. */
- feof.EndOfFile.QuadPart = std::max(
+ os_offset_t new_file_size =
+ std::max(
os_offset_t(size - file_start_page_no) * page_size,
- os_offset_t(FIL_IBD_FILE_INITIAL_SIZE
- * UNIV_PAGE_SIZE));
- *success = SetFileInformationByHandle(node->handle,
- FileEndOfFileInfo,
- &feof, sizeof feof);
- if (!*success) {
- ib::error() << "extending file '" << node->name
- << "' from "
- << os_offset_t(node->size) * page_size
- << " to " << feof.EndOfFile.QuadPart
- << " bytes failed with " << GetLastError();
- } else {
- last_page_no = size;
- }
+ os_offset_t(FIL_IBD_FILE_INITIAL_SIZE * UNIV_PAGE_SIZE));
+
+ /* os_file_change_size_win32() handles both compressed(sparse)
+ and normal files correctly.
+ It allocates physical storage for normal files and "virtual"
+ storage for sparse ones.*/
+ *success = os_file_change_size_win32(node->name,
+ node->handle, new_file_size);
+
+ if (*success) {
+ last_page_no = size;
+ } else {
+ ib::error() << "extending file '" << node->name
+ << " to size " << new_file_size << " failed";
}
#else
/* We will logically extend the file with ftruncate() if
@@ -3843,7 +3834,19 @@ fil_ibd_create(
return(DB_ERROR);
}
- success= false;
+ bool punch_hole = false;
+
+#ifdef _WIN32
+
+ if (FSP_FLAGS_HAS_PAGE_COMPRESSION(flags)) {
+ punch_hole = os_file_set_sparse_win32(file);
+ }
+
+ success = os_file_change_size_win32(path, file, size * UNIV_PAGE_SIZE);
+
+#else
+
+ success= false;
#ifdef HAVE_POSIX_FALLOCATE
/*
Extend the file using posix_fallocate(). This is required by
@@ -3882,7 +3885,7 @@ fil_ibd_create(
be lost after this call, if it succeeds. In this case the file
should be full of NULs. */
- bool punch_hole = os_is_sparse_file_supported(path, file);
+ punch_hole = os_is_sparse_file_supported(file);
if (punch_hole) {
@@ -3894,6 +3897,7 @@ fil_ibd_create(
punch_hole = false;
}
}
+#endif
ulint block_size = os_file_get_block_size(file, path);
diff --git a/storage/innobase/fil/fil0pagecompress.cc b/storage/innobase/fil/fil0pagecompress.cc
index 58fbb4cd9ef..cddeb6a2d28 100644
--- a/storage/innobase/fil/fil0pagecompress.cc
+++ b/storage/innobase/fil/fil0pagecompress.cc
@@ -100,6 +100,9 @@ fil_compress_page(
int comp_level = int(level);
ulint header_len = FIL_PAGE_DATA + FIL_PAGE_COMPRESSED_SIZE;
ulint write_size = 0;
+#if HAVE_LZO
+ lzo_uint write_size_lzo = write_size;
+#endif
/* Cache to avoid change during function execution */
ulint comp_method = innodb_compression_algorithm;
bool allocated = false;
@@ -182,7 +185,9 @@ fil_compress_page(
#ifdef HAVE_LZO
case PAGE_LZO_ALGORITHM:
err = lzo1x_1_15_compress(
- buf, len, out_buf+header_len, &write_size, out_buf+UNIV_PAGE_SIZE);
+ buf, len, out_buf+header_len, &write_size_lzo, out_buf+UNIV_PAGE_SIZE);
+
+ write_size = write_size_lzo;
if (err != LZO_E_OK || write_size > UNIV_PAGE_SIZE-header_len) {
goto err_exit;
@@ -523,8 +528,11 @@ fil_decompress_page(
#ifdef HAVE_LZO
case PAGE_LZO_ALGORITHM: {
ulint olen = 0;
+ lzo_uint olen_lzo = olen;
err = lzo1x_decompress((const unsigned char *)buf+header_len,
- actual_size,(unsigned char *)in_buf, &olen, NULL);
+ actual_size,(unsigned char *)in_buf, &olen_lzo, NULL);
+
+ olen = olen_lzo;
if (err != LZO_E_OK || (olen == 0 || olen > UNIV_PAGE_SIZE)) {
len = olen;
diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc
index dd8de511b21..09012ad4101 100644
--- a/storage/innobase/fts/fts0fts.cc
+++ b/storage/innobase/fts/fts0fts.cc
@@ -1784,7 +1784,7 @@ fts_create_one_common_table(
FTS_CONFIG_TABLE_VALUE_COL_LEN);
}
- error = row_create_table_for_mysql(new_table, trx, false,
+ error = row_create_table_for_mysql(new_table, trx,
FIL_ENCRYPTION_DEFAULT, FIL_DEFAULT_ENCRYPTION_KEY);
if (error == DB_SUCCESS) {
@@ -2001,7 +2001,7 @@ fts_create_one_index_table(
(DATA_MTYPE_MAX << 16) | DATA_UNSIGNED | DATA_NOT_NULL,
FTS_INDEX_ILIST_LEN);
- error = row_create_table_for_mysql(new_table, trx, false,
+ error = row_create_table_for_mysql(new_table, trx,
FIL_ENCRYPTION_DEFAULT, FIL_DEFAULT_ENCRYPTION_KEY);
if (error == DB_SUCCESS) {
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index 1421a562ca7..383f84d1dd1 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -49,9 +49,6 @@ this program; if not, write to the Free Software Foundation, Inc.,
#include <sql_class.h>
#include <sql_show.h>
#include <sql_table.h>
-#include <sql_tablespace.h>
-// MySQL 5.7 Header */
-// #include <sql_thd_internal_api.h>
#include <table_cache.h>
#include <my_check_opt.h>
#include <my_bitmap.h>
@@ -152,12 +149,6 @@ innodb_check_deprecated(void);
#include <string>
#include <sstream>
-/* for ha_innopart, Native InnoDB Partitioning. */
-/* JAN: TODO: MySQL 5.7 Native InnoDB Partitioning */
-#ifdef HAVE_HA_INNOPART_H
-#include "ha_innopart.h"
-#endif
-
#include <mysql/plugin.h>
#include <mysql/service_wsrep.h>
@@ -1360,16 +1351,6 @@ innobase_release_savepoint(
static void innobase_checkpoint_request(handlerton *hton, void *cookie);
-/************************************************************************//**
-Function for constructing an InnoDB table handler instance. */
-static
-handler*
-innobase_create_handler(
-/*====================*/
- handlerton* hton, /*!< in/out: handlerton for InnoDB */
- TABLE_SHARE* table,
- MEM_ROOT* mem_root);
-
/** @brief Initialize the default value of innodb_commit_concurrency.
Once InnoDB is running, the innodb_commit_concurrency must not change
@@ -1636,26 +1617,6 @@ innobase_create_handler(
TABLE_SHARE* table,
MEM_ROOT* mem_root)
{
-#ifdef MYSQL_INNODB_PARTITIONING
- /* If the table:
- 1) have type InnoDB (not the generic partition handlerton)
- 2) have partitioning defined
- Then return the native partitioning handler ha_innopart
- else return normal ha_innobase handler. */
- if (table
- && table->db_type() == innodb_hton_ptr // 1)
- && table->partition_info_str // 2)
- && table->partition_info_str_len) { // 2)
- ha_innopart* file = new (mem_root) ha_innopart(hton, table);
- if (file && file->init_partitioning(mem_root))
- {
- delete file;
- return(NULL);
- }
- return(file);
- }
-#endif
-
return(new (mem_root) ha_innobase(hton, table));
}
@@ -2181,7 +2142,7 @@ convert_error_code_to_mysql(
bool comp = !!(flags & DICT_TF_COMPACT);
ulint free_space = page_get_free_space_of_empty(comp) / 2;
- if (free_space >= (comp ? COMPRESSED_REC_MAX_DATA_SIZE :
+ if (free_space >= ulint(comp ? COMPRESSED_REC_MAX_DATA_SIZE :
REDUNDANT_REC_MAX_DATA_SIZE)) {
free_space = (comp ? COMPRESSED_REC_MAX_DATA_SIZE :
REDUNDANT_REC_MAX_DATA_SIZE) - 1;
@@ -2935,48 +2896,6 @@ check_trx_exists(
return(trx);
}
-#ifdef MYSQL_REPLACE_TRX_IN_THD
-/** InnoDB transaction object that is currently associated with THD is
-replaced with that of the 2nd argument. The previous value is
-returned through the 3rd argument's buffer, unless it's NULL. When
-the buffer is not provided (value NULL) that should mean the caller
-restores previously saved association so the current trx has to be
-additionally freed from all association with MYSQL.
-
-@param[in,out] thd MySQL thread handle
-@param[in] new_trx_arg replacement trx_t
-@param[in,out] ptr_trx_arg pointer to a buffer to store old trx_t */
-static
-void
-innodb_replace_trx_in_thd(
- THD* thd,
- void* new_trx_arg,
- void** ptr_trx_arg)
-{
- trx_t*& trx = thd_to_trx(thd);
-
- ut_ad(new_trx_arg == NULL
- || (((trx_t*) new_trx_arg)->mysql_thd == thd
- && !((trx_t*) new_trx_arg)->is_recovered));
-
- if (ptr_trx_arg) {
- *ptr_trx_arg = trx;
-
- ut_ad(trx == NULL
- || (trx->mysql_thd == thd && !trx->is_recovered));
-
- } else if (trx->state == TRX_STATE_NOT_STARTED) {
- ut_ad(thd == trx->mysql_thd);
- trx_free_for_mysql(trx);
- } else {
- ut_ad(thd == trx->mysql_thd);
- ut_ad(trx_state_eq(trx, TRX_STATE_PREPARED));
- trx_disconnect_prepared(trx);
- }
- trx = static_cast<trx_t*>(new_trx_arg);
-}
-#endif /* MYSQL_REPLACE_TRX_IN_THD */
-
/*************************************************************************
Gets current trx. */
trx_t*
@@ -3880,11 +3799,6 @@ innobase_init(
innobase_hton->flags =
HTON_SUPPORTS_EXTENDED_KEYS | HTON_SUPPORTS_FOREIGN_KEYS;
-#ifdef MYSQL_REPLACE_TRX_IN_THD
- innobase_hton->replace_native_transaction_in_thd =
- innodb_replace_trx_in_thd;
-#endif
-
#ifdef WITH_WSREP
innobase_hton->abort_transaction=wsrep_abort_transaction;
innobase_hton->set_checkpoint=innobase_wsrep_set_checkpoint;
@@ -4613,7 +4527,7 @@ innobase_commit_low(
}
trx->will_lock = 0;
#ifdef WITH_WSREP
- if (wsrep_on(thd)) { thd_proc_info(thd, tmp); }
+ if (thd && wsrep_on(thd)) { thd_proc_info(thd, tmp); }
#endif /* WITH_WSREP */
}
@@ -5504,19 +5418,6 @@ ha_innobase::table_flags() const
THD* thd = ha_thd();
handler::Table_flags flags = m_int_table_flags;
- /* If querying the table flags when no table_share is given,
- then we must check if the table to be created/checked is partitioned.
- */
- if (table_share == NULL) {
- /* JAN: TODO: MySQL 5.7 Partitioning && thd_get_work_part_info(thd) != NULL) { */
- /* Currently ha_innopart does not support
- all InnoDB features such as GEOMETRY, FULLTEXT etc. */
- /* JAN: TODO: MySQL 5.7
- flags &= ~(HA_INNOPART_DISABLED_TABLE_FLAGS);
- }
- */
- }
-
/* Need to use tx_isolation here since table flags is (also)
called before prebuilt is inited. */
@@ -11851,9 +11752,9 @@ err_col:
} else {
if (err == DB_SUCCESS) {
err = row_create_table_for_mysql(
- table, m_trx, false,
+ table, m_trx,
(fil_encryption_t)options->encryption,
- options->encryption_key_id);
+ (uint32_t)options->encryption_key_id);
}
DBUG_EXECUTE_IF("ib_crash_during_create_for_encryption",
@@ -15483,7 +15384,7 @@ ha_innobase::update_table_comment(
{
uint length = (uint) strlen(comment);
char* str=0;
- long flen;
+ size_t flen;
std::string fk_str;
/* We do not know if MySQL can call this function before calling
@@ -15511,9 +15412,7 @@ ha_innobase::update_table_comment(
flen = fk_str.length();
- if (flen < 0) {
- flen = 0;
- } else if (length + flen + 3 > 64000) {
+ if (length + flen + 3 > 64000) {
flen = 64000 - 3 - length;
}
/* allocate buffer for the full string */
@@ -15602,7 +15501,7 @@ get_foreign_key_info(
ptr = dict_remove_db_name(foreign->id);
f_key_info.foreign_id = thd_make_lex_string(
- thd, 0, ptr, (uint) strlen(ptr), 1);
+ thd, 0, ptr, strlen(ptr), 1);
/* Name format: database name, '/', table name, '\0' */
@@ -15614,13 +15513,13 @@ get_foreign_key_info(
len = filename_to_tablename(tmp_buff, name_buff, sizeof(name_buff));
f_key_info.referenced_db = thd_make_lex_string(
- thd, 0, name_buff, static_cast<unsigned int>(len), 1);
+ thd, 0, name_buff, len, 1);
/* Referenced (parent) table name */
ptr = dict_remove_db_name(foreign->referenced_table_name);
len = filename_to_tablename(ptr, name_buff, sizeof(name_buff));
f_key_info.referenced_table = thd_make_lex_string(
- thd, 0, name_buff, static_cast<unsigned int>(len), 1);
+ thd, 0, name_buff, len, 1);
/* Dependent (child) database name */
len = dict_get_db_name_len(foreign->foreign_table_name);
@@ -15630,22 +15529,22 @@ get_foreign_key_info(
len = filename_to_tablename(tmp_buff, name_buff, sizeof(name_buff));
f_key_info.foreign_db = thd_make_lex_string(
- thd, 0, name_buff, static_cast<unsigned int>(len), 1);
+ thd, 0, name_buff, len, 1);
/* Dependent (child) table name */
ptr = dict_remove_db_name(foreign->foreign_table_name);
len = filename_to_tablename(ptr, name_buff, sizeof(name_buff));
f_key_info.foreign_table = thd_make_lex_string(
- thd, 0, name_buff, static_cast<unsigned int>(len), 1);
+ thd, 0, name_buff, len, 1);
do {
ptr = foreign->foreign_col_names[i];
name = thd_make_lex_string(thd, name, ptr,
- (uint) strlen(ptr), 1);
+ strlen(ptr), 1);
f_key_info.foreign_fields.push_back(name);
ptr = foreign->referenced_col_names[i];
name = thd_make_lex_string(thd, name, ptr,
- (uint) strlen(ptr), 1);
+ strlen(ptr), 1);
f_key_info.referenced_fields.push_back(name);
} while (++i < foreign->n_fields);
@@ -15702,7 +15601,7 @@ get_foreign_key_info(
thd,
f_key_info.referenced_key_name,
foreign->referenced_index->name,
- (uint) strlen(foreign->referenced_index->name),
+ strlen(foreign->referenced_index->name),
1);
} else {
referenced_key_name = NULL;
@@ -22155,8 +22054,8 @@ innobase_find_mysql_table_for_vc(
char dbname[MAX_DATABASE_NAME_LEN + 1];
char tbname[MAX_TABLE_NAME_LEN + 1];
char* name = table->name.m_name;
- uint dbnamelen = dict_get_db_name_len(name);
- uint tbnamelen = strlen(name) - dbnamelen - 1;
+ uint dbnamelen = (uint) dict_get_db_name_len(name);
+ uint tbnamelen = (uint) strlen(name) - dbnamelen - 1;
char t_dbname[MAX_DATABASE_NAME_LEN + 1];
char t_tbname[MAX_TABLE_NAME_LEN + 1];
@@ -22171,7 +22070,6 @@ innobase_find_mysql_table_for_vc(
if (is_part != NULL) {
*is_part = '\0';
- tbnamelen = is_part - tbname;
}
dbnamelen = filename_to_tablename(dbname, t_dbname,
@@ -22681,7 +22579,7 @@ innobase_convert_to_filename_charset(
return(static_cast<uint>(strconvert(
cs_from, from, strlen(from),
- cs_to, to, static_cast<size_t>(len), &errors)));
+ cs_to, to, static_cast<uint>(len), &errors)));
}
/**********************************************************************
@@ -22700,7 +22598,7 @@ innobase_convert_to_system_charset(
return(static_cast<uint>(strconvert(
cs1, from, strlen(from),
- cs2, to, static_cast<size_t>(len), errors)));
+ cs2, to, static_cast<uint>(len), errors)));
}
/**********************************************************************
diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc
index ce42d7ed3fe..da8fc3ae672 100644
--- a/storage/innobase/handler/handler0alter.cc
+++ b/storage/innobase/handler/handler0alter.cc
@@ -63,11 +63,6 @@ static const char *MSG_UNSUPPORTED_ALTER_ONLINE_ON_VIRTUAL_COLUMN=
"INPLACE ADD or DROP of virtual columns cannot be "
"combined with other ALTER TABLE actions";
-/* For supporting Native InnoDB Partitioning. */
-/* JAN: TODO: MySQL 5.7
-#include "partition_info.h"
-#include "ha_innopart.h"
-*/
/** Operations for creating secondary indexes (no rebuild needed) */
static const Alter_inplace_info::HA_ALTER_FLAGS INNOBASE_ONLINE_CREATE
= Alter_inplace_info::ADD_INDEX
@@ -326,7 +321,7 @@ my_error_innodb(
bool comp = !!(flags & DICT_TF_COMPACT);
ulint free_space = page_get_free_space_of_empty(comp) / 2;
- if (free_space >= (comp ? COMPRESSED_REC_MAX_DATA_SIZE :
+ if (free_space >= ulint(comp ? COMPRESSED_REC_MAX_DATA_SIZE :
REDUNDANT_REC_MAX_DATA_SIZE)) {
free_space = (comp ? COMPRESSED_REC_MAX_DATA_SIZE :
REDUNDANT_REC_MAX_DATA_SIZE) - 1;
@@ -414,12 +409,10 @@ innobase_need_rebuild(
== Alter_inplace_info::CHANGE_CREATE_OPTION
&& !(ha_alter_info->create_info->used_fields
& (HA_CREATE_USED_ROW_FORMAT
- | HA_CREATE_USED_KEY_BLOCK_SIZE))) {
- // JAN: TODO: MySQL 5.7
- // | HA_CREATE_USED_TABLESPACE))) {
+ | HA_CREATE_USED_KEY_BLOCK_SIZE))) {
/* Any other CHANGE_CREATE_OPTION than changing
- ROW_FORMAT, KEY_BLOCK_SIZE or TABLESPACE can be done
- without rebuilding the table. */
+ ROW_FORMAT or KEY_BLOCK_SIZE can be done without
+ rebuilding the table. */
return(false);
}
@@ -4716,7 +4709,7 @@ prepare_inplace_alter_table_dict(
}
error = row_create_table_for_mysql(
- ctx->new_table, ctx->trx, false, mode, key_id);
+ ctx->new_table, ctx->trx, mode, key_id);
switch (error) {
dict_table_t* temp_table;
@@ -9084,373 +9077,6 @@ foreign_fail:
DBUG_RETURN(false);
}
-
-/** Helper class for in-place alter, see handler.h */
-class ha_innopart_inplace_ctx : public inplace_alter_handler_ctx
-{
-/* Only used locally in this file, so have everything public for
-conveniance. */
-public:
- /** Total number of partitions. */
- uint m_tot_parts;
- /** Array of inplace contexts for all partitions. */
- inplace_alter_handler_ctx** ctx_array;
- /** Array of prebuilt for all partitions. */
- row_prebuilt_t** prebuilt_array;
-
- ha_innopart_inplace_ctx(THD *thd, uint tot_parts)
- : inplace_alter_handler_ctx(),
- m_tot_parts(tot_parts),
- ctx_array(),
- prebuilt_array()
- {}
-
- ~ha_innopart_inplace_ctx()
- {
- if (ctx_array) {
- for (uint i = 0; i < m_tot_parts; i++) {
- delete ctx_array[i];
- }
- ut_free(ctx_array);
- }
- if (prebuilt_array) {
- /* First entry is the original prebuilt! */
- for (uint i = 1; i < m_tot_parts; i++) {
- /* Don't close the tables. */
- prebuilt_array[i]->table = NULL;
- row_prebuilt_free(prebuilt_array[i], false);
- }
- ut_free(prebuilt_array);
- }
- }
-};
-
-#ifdef MYSQL_INNODB_PARTITIONING
-
-/** Check if supported inplace alter table.
-@param[in] altered_table Altered MySQL table.
-@param[in] ha_alter_info Information about inplace operations to do.
-@return Lock level, not supported or error */
-enum_alter_inplace_result
-ha_innopart::check_if_supported_inplace_alter(
- TABLE* altered_table,
- Alter_inplace_info* ha_alter_info)
-{
- DBUG_ENTER("ha_innopart::check_if_supported_inplace_alter");
- DBUG_ASSERT(ha_alter_info->handler_ctx == NULL);
-
- /* Not supporting these for partitioned tables yet! */
-
- /* FK not yet supported. */
- if (ha_alter_info->handler_flags
- & (Alter_inplace_info::ADD_FOREIGN_KEY
- | Alter_inplace_info::DROP_FOREIGN_KEY)) {
-
- ha_alter_info->unsupported_reason = innobase_get_err_msg(
- ER_FOREIGN_KEY_ON_PARTITIONED);
- DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
- }
- /* FTS not yet supported either. */
- if ((ha_alter_info->handler_flags
- & Alter_inplace_info::ADD_INDEX)) {
-
- for (uint i = 0; i < ha_alter_info->index_add_count; i++) {
- const KEY* key =
- &ha_alter_info->key_info_buffer[
- ha_alter_info->index_add_buffer[i]];
- if (key->flags & HA_FULLTEXT) {
- DBUG_ASSERT(!(key->flags & HA_KEYFLAG_MASK
- & ~(HA_FULLTEXT
- | HA_PACK_KEY
- | HA_GENERATED_KEY
- | HA_BINARY_PACK_KEY)));
- ha_alter_info->unsupported_reason =
- innobase_get_err_msg(
- ER_FULLTEXT_NOT_SUPPORTED_WITH_PARTITIONING);
- DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
- }
- }
- }
- /* We cannot allow INPLACE to change order of KEY partitioning fields! */
- if ((ha_alter_info->handler_flags
- & Alter_inplace_info::ALTER_STORED_COLUMN_ORDER)
- && !m_part_info->same_key_column_order(
- &ha_alter_info->alter_info->create_list)) {
-
- DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
- }
-
- /* Cannot allow INPLACE for drop and create PRIMARY KEY if partition is
- on Primary Key - PARTITION BY KEY() */
- if ((ha_alter_info->handler_flags
- & (Alter_inplace_info::ADD_PK_INDEX
- | Alter_inplace_info::DROP_PK_INDEX))) {
-
- /* Check partition by key(). */
- if ((m_part_info->part_type == HASH_PARTITION)
- && m_part_info->list_of_part_fields
- && m_part_info->part_field_list.is_empty()) {
-
- DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
- }
-
- /* Check sub-partition by key(). */
- if ((m_part_info->subpart_type == HASH_PARTITION)
- && m_part_info->list_of_subpart_fields
- && m_part_info->subpart_field_list.is_empty()) {
-
- DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
- }
- }
-
- /* Check for PK and UNIQUE should already be done when creating the
- new table metadata.
- (fix_partition_info/check_primary_key+check_unique_key) */
-
- set_partition(0);
- DBUG_RETURN(ha_innobase::check_if_supported_inplace_alter(altered_table,
- ha_alter_info));
-}
-
-/** Prepare inplace alter table.
-Allows InnoDB to update internal structures with concurrent
-writes blocked (provided that check_if_supported_inplace_alter()
-did not return HA_ALTER_INPLACE_NO_LOCK).
-This will be invoked before inplace_alter_table().
-@param[in] altered_table TABLE object for new version of table.
-@param[in] ha_alter_info Structure describing changes to be done
-by ALTER TABLE and holding data used during in-place alter.
-@retval true Failure.
-@retval false Success. */
-bool
-ha_innopart::prepare_inplace_alter_table(
- TABLE* altered_table,
- Alter_inplace_info* ha_alter_info)
-{
- THD* thd;
- ha_innopart_inplace_ctx* ctx_parts;
- bool res = true;
- DBUG_ENTER("ha_innopart::prepare_inplace_alter_table");
- DBUG_ASSERT(ha_alter_info->handler_ctx == NULL);
-
- thd = ha_thd();
-
- /* Clean up all ins/upd nodes. */
- clear_ins_upd_nodes();
- /* Based on Sql_alloc class, return NULL for new on failure. */
- ctx_parts = new ha_innopart_inplace_ctx(thd, m_tot_parts);
- if (!ctx_parts) {
- DBUG_RETURN(HA_ALTER_ERROR);
- }
-
- uint ctx_array_size = sizeof(inplace_alter_handler_ctx*)
- * (m_tot_parts + 1);
- ctx_parts->ctx_array =
- static_cast<inplace_alter_handler_ctx**>(
- ut_malloc(ctx_array_size,
- mem_key_partitioning));
- if (!ctx_parts->ctx_array) {
- DBUG_RETURN(HA_ALTER_ERROR);
- }
-
- /* Set all to NULL, including the terminating one. */
- memset(ctx_parts->ctx_array, 0, ctx_array_size);
-
- ctx_parts->prebuilt_array = static_cast<row_prebuilt_t**>(
- ut_malloc(sizeof(row_prebuilt_t*)
- * m_tot_parts,
- mem_key_partitioning));
- if (!ctx_parts->prebuilt_array) {
- DBUG_RETURN(HA_ALTER_ERROR);
- }
- /* For the first partition use the current prebuilt. */
- ctx_parts->prebuilt_array[0] = m_prebuilt;
- /* Create new prebuilt for the rest of the partitions.
- It is needed for the current implementation of
- ha_innobase::commit_inplace_alter_table(). */
- for (uint i = 1; i < m_tot_parts; i++) {
- row_prebuilt_t* tmp_prebuilt;
- tmp_prebuilt = row_create_prebuilt(
- m_part_share->get_table_part(i),
- table_share->reclength);
- /* Use same trx as original prebuilt. */
- tmp_prebuilt->trx = m_prebuilt->trx;
- ctx_parts->prebuilt_array[i] = tmp_prebuilt;
- }
-
- const char* save_tablespace =
- ha_alter_info->create_info->tablespace;
-
- const char* save_data_file_name =
- ha_alter_info->create_info->data_file_name;
-
- for (uint i = 0; i < m_tot_parts; i++) {
- m_prebuilt = ctx_parts->prebuilt_array[i];
- m_prebuilt_ptr = ctx_parts->prebuilt_array + i;
- ha_alter_info->handler_ctx = ctx_parts->ctx_array[i];
- set_partition(i);
-
- /* Set the tablespace and data_file_name value of the
- alter_info to the tablespace value and data_file_name
- value that was existing for the partition originally,
- so that for ALTER TABLE the tablespace clause in create
- option is ignored for existing partitions, and later
- set it back to its old value */
-
- ha_alter_info->create_info->tablespace =
- m_prebuilt->table->tablespace;
- ha_alter_info->create_info->data_file_name =
- m_prebuilt->table->data_dir_path;
-
- res = ha_innobase::prepare_inplace_alter_table(altered_table,
- ha_alter_info);
- update_partition(i);
- ctx_parts->ctx_array[i] = ha_alter_info->handler_ctx;
- if (res) {
- break;
- }
- }
- m_prebuilt = ctx_parts->prebuilt_array[0];
- m_prebuilt_ptr = &m_prebuilt;
- ha_alter_info->handler_ctx = ctx_parts;
- ha_alter_info->group_commit_ctx = ctx_parts->ctx_array;
- ha_alter_info->create_info->tablespace = save_tablespace;
- ha_alter_info->create_info->data_file_name = save_data_file_name;
- DBUG_RETURN(res);
-}
-
-/** Inplace alter table.
-Alter the table structure in-place with operations
-specified using Alter_inplace_info.
-The level of concurrency allowed during this operation depends
-on the return value from check_if_supported_inplace_alter().
-@param[in] altered_table TABLE object for new version of table.
-@param[in] ha_alter_info Structure describing changes to be done
-by ALTER TABLE and holding data used during in-place alter.
-@retval true Failure.
-@retval false Success. */
-bool
-ha_innopart::inplace_alter_table(
- TABLE* altered_table,
- Alter_inplace_info* ha_alter_info)
-{
- bool res = true;
- ha_innopart_inplace_ctx* ctx_parts;
-
- ctx_parts = static_cast<ha_innopart_inplace_ctx*>(
- ha_alter_info->handler_ctx);
- for (uint i = 0; i < m_tot_parts; i++) {
- m_prebuilt = ctx_parts->prebuilt_array[i];
- ha_alter_info->handler_ctx = ctx_parts->ctx_array[i];
- set_partition(i);
- res = ha_innobase::inplace_alter_table(altered_table,
- ha_alter_info);
- ut_ad(ctx_parts->ctx_array[i] == ha_alter_info->handler_ctx);
- ctx_parts->ctx_array[i] = ha_alter_info->handler_ctx;
- if (res) {
- break;
- }
- }
- m_prebuilt = ctx_parts->prebuilt_array[0];
- ha_alter_info->handler_ctx = ctx_parts;
- return(res);
-}
-
-/** Commit or rollback inplace alter table.
-Commit or rollback the changes made during
-prepare_inplace_alter_table() and inplace_alter_table() inside
-the storage engine. Note that the allowed level of concurrency
-during this operation will be the same as for
-inplace_alter_table() and thus might be higher than during
-prepare_inplace_alter_table(). (E.g concurrent writes were
-blocked during prepare, but might not be during commit).
-@param[in] altered_table TABLE object for new version of table.
-@param[in] ha_alter_info Structure describing changes to be done
-by ALTER TABLE and holding data used during in-place alter.
-@param[in] commit true => Commit, false => Rollback.
-@retval true Failure.
-@retval false Success. */
-bool
-ha_innopart::commit_inplace_alter_table(
- TABLE* altered_table,
- Alter_inplace_info* ha_alter_info,
- bool commit)
-{
- bool res = false;
- ha_innopart_inplace_ctx* ctx_parts;
-
- ctx_parts = static_cast<ha_innopart_inplace_ctx*>(
- ha_alter_info->handler_ctx);
- ut_ad(ctx_parts);
- ut_ad(ctx_parts->prebuilt_array);
- ut_ad(ctx_parts->prebuilt_array[0] == m_prebuilt);
- if (commit) {
- /* Commit is done through first partition (group commit). */
- ut_ad(ha_alter_info->group_commit_ctx == ctx_parts->ctx_array);
- ha_alter_info->handler_ctx = ctx_parts->ctx_array[0];
- set_partition(0);
- res = ha_innobase::commit_inplace_alter_table(altered_table,
- ha_alter_info,
- commit);
- ut_ad(res || !ha_alter_info->group_commit_ctx);
- goto end;
- }
- /* Rollback is done for each partition. */
- for (uint i = 0; i < m_tot_parts; i++) {
- m_prebuilt = ctx_parts->prebuilt_array[i];
- ha_alter_info->handler_ctx = ctx_parts->ctx_array[i];
- set_partition(i);
- if (ha_innobase::commit_inplace_alter_table(altered_table,
- ha_alter_info, commit)) {
- res = true;
- }
- ut_ad(ctx_parts->ctx_array[i] == ha_alter_info->handler_ctx);
- ctx_parts->ctx_array[i] = ha_alter_info->handler_ctx;
- }
-end:
- /* Move the ownership of the new tables back to
- the m_part_share. */
- ha_innobase_inplace_ctx* ctx;
- for (uint i = 0; i < m_tot_parts; i++) {
- /* TODO: Fix to only use one prebuilt (i.e. make inplace
- alter partition aware instead of using multiple prebuilt
- copies... */
- ctx = static_cast<ha_innobase_inplace_ctx*>(
- ctx_parts->ctx_array[i]);
- if (ctx) {
- m_part_share->set_table_part(i, ctx->prebuilt->table);
- ctx->prebuilt->table = NULL;
- ctx_parts->prebuilt_array[i] = ctx->prebuilt;
- }
- }
- /* The above juggling of prebuilt must be reset here. */
- m_prebuilt = ctx_parts->prebuilt_array[0];
- m_prebuilt->table = m_part_share->get_table_part(0);
- ha_alter_info->handler_ctx = ctx_parts;
- return(res);
-}
-
-/** Notify the storage engine that the table structure (.frm) has
-been updated.
-
-ha_partition allows inplace operations that also upgrades the engine
-if it supports partitioning natively. So if this is the case then
-we will remove the .par file since it is not used with ha_innopart
-(we use the internal data dictionary instead). */
-void
-ha_innopart::notify_table_changed()
-{
- char tmp_par_path[FN_REFLEN + 1];
- strxnmov(tmp_par_path, FN_REFLEN, table->s->normalized_path.str,
- ".par", NullS);
-
- if (my_access(tmp_par_path, W_OK) == 0)
- {
- my_delete(tmp_par_path, MYF(0));
- }
-}
-#endif /* MYSQL_INNODB_PARTITIONING */
-
/**
@param thd the session
@param start_value the lower bound
diff --git a/storage/innobase/include/dict0crea.h b/storage/innobase/include/dict0crea.h
index 76eb48d1e7e..86a4bcf23a3 100644
--- a/storage/innobase/include/dict0crea.h
+++ b/storage/innobase/include/dict0crea.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2017, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -242,7 +243,6 @@ replacing what was there previously.
@param[in] flags Tablespace flags
@param[in] path Tablespace path
@param[in] trx Transaction
-@param[in] commit If true, commit the transaction
@return error code or DB_SUCCESS */
dberr_t
dict_replace_tablespace_in_dictionary(
@@ -250,8 +250,7 @@ dict_replace_tablespace_in_dictionary(
const char* name,
ulint flags,
const char* path,
- trx_t* trx,
- bool commit);
+ trx_t* trx);
/** Delete records from SYS_TABLESPACES and SYS_DATAFILES associated
with a particular tablespace ID.
diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h
index 6617cf8d73b..0c72165e052 100644
--- a/storage/innobase/include/dict0dict.h
+++ b/storage/innobase/include/dict0dict.h
@@ -386,13 +386,6 @@ dict_table_add_system_columns(
dict_table_t* table, /*!< in/out: table */
mem_heap_t* heap) /*!< in: temporary heap */
MY_ATTRIBUTE((nonnull));
-
-/** Mark if table has big rows.
-@param[in,out] table table handler */
-void
-dict_table_set_big_rows(
- dict_table_t* table)
- MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Adds a table object to the dictionary cache. */
void
diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h
index 2ec63c9d3b7..aa96405ddcc 100644
--- a/storage/innobase/include/dict0mem.h
+++ b/storage/innobase/include/dict0mem.h
@@ -1495,10 +1495,6 @@ struct dict_table_t {
/*!< set of foreign key constraints which refer to this table */
dict_foreign_set referenced_set;
- /** TRUE if the maximum length of a single row exceeds BIG_ROW_SIZE.
- Initialized in dict_table_add_to_cache(). */
- unsigned big_rows:1;
-
/** Statistics for query optimization. @{ */
/** Creation state of 'stats_latch'. */
diff --git a/storage/innobase/include/fsp0pagecompress.ic b/storage/innobase/include/fsp0pagecompress.ic
index f4b95162b2a..d1f2ea45fbd 100644
--- a/storage/innobase/include/fsp0pagecompress.ic
+++ b/storage/innobase/include/fsp0pagecompress.ic
@@ -82,7 +82,7 @@ UNIV_INLINE
const char*
fil_get_compression_alg_name(
/*=========================*/
- ulint comp_alg) /*!<in: compression algorithm number */
+ ib_uint64_t comp_alg) /*!<in: compression algorithm number */
{
switch(comp_alg) {
case PAGE_UNCOMPRESSED:
diff --git a/storage/innobase/include/ha_prototypes.h b/storage/innobase/include/ha_prototypes.h
index 459304fc712..5b97b4b3a88 100644
--- a/storage/innobase/include/ha_prototypes.h
+++ b/storage/innobase/include/ha_prototypes.h
@@ -39,10 +39,8 @@ class THD;
// JAN: TODO missing features:
#undef MYSQL_FT_INIT_EXT
-#undef MYSQL_INNODB_PARTITIONING
#undef MYSQL_PFS
#undef MYSQL_RENAME_INDEX
-#undef MYSQL_REPLACE_TRX_IN_THD
#undef MYSQL_STORE_FTS_DOC_ID
/*******************************************************************//**
diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h
index b3de1bf27f2..60b07f2fe72 100644
--- a/storage/innobase/include/lock0lock.h
+++ b/storage/innobase/include/lock0lock.h
@@ -190,7 +190,7 @@ lock_update_merge_left(
const buf_block_t* right_block); /*!< in: merged index page
which will be discarded */
/*************************************************************//**
-Updates the lock table when a page is splited and merged to
+Updates the lock table when a page is split and merged to
two pages. */
UNIV_INTERN
void
@@ -1073,16 +1073,9 @@ std::string
lock_get_info(
const lock_t*);
-/*************************************************************//**
-Updates the lock table when a page is split and merged to
-two pages. */
-UNIV_INTERN
-void
-lock_update_split_and_merge(
- const buf_block_t* left_block, /*!< in: left page to which merged */
- const rec_t* orig_pred, /*!< in: original predecessor of
- supremum on the left page before merge*/
- const buf_block_t* right_block);/*!< in: right page from which merged */
+/*******************************************************************//**
+@return whether wsrep_on is true on trx->mysql_thd*/
+#define wsrep_on_trx(trx) ((trx)->mysql_thd && wsrep_on((trx)->mysql_thd))
#endif /* WITH_WSREP */
diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h
index b366267d6d2..1b90ea8d7e7 100644
--- a/storage/innobase/include/os0file.h
+++ b/storage/innobase/include/os0file.h
@@ -638,6 +638,9 @@ os_file_create_simple_no_error_handling_func(
bool* success)
MY_ATTRIBUTE((warn_unused_result));
+#ifdef _WIN32
+#define os_file_set_nocache(fd, file_name, operation_name) do{}while(0)
+#else
/** Tries to disable OS caching on an opened file descriptor.
@param[in] fd file descriptor to alter
@param[in] file_name file name, used in the diagnostic message
@@ -646,9 +649,10 @@ os_file_create_simple_no_error_handling_func(
void
os_file_set_nocache(
/*================*/
- os_file_t fd, /*!< in: file descriptor to alter */
+ int fd, /*!< in: file descriptor to alter */
const char* file_name,
const char* operation_name);
+#endif
/** NOTE! Use the corresponding macro os_file_create(), not directly
this function!
@@ -1565,20 +1569,48 @@ innobase_mysql_tmpfile(
void
os_file_set_umask(ulint umask);
+#ifdef _WIN32
+
+/**
+Make file sparse, on Windows.
+
+@param[in] file file handle
+@return true on success, false on error */
+bool os_file_set_sparse_win32(os_file_t file);
+
+/**
+Changes file size on Windows
+
+If file is extended, following happens the bytes between
+old and new EOF are zeros.
+
+If file is sparse, "virtual" block is added at the end of
+allocated area.
+
+If file is normal, file system allocates storage.
+
+@param[in] pathname file path
+@param[in] file file handle
+@param[in] size size to preserve in bytes
+@return true if success */
+bool
+os_file_change_size_win32(
+ const char* pathname,
+ os_file_t file,
+ os_offset_t size);
+
+#endif /*_WIN32 */
+
/** Check if the file system supports sparse files.
Warning: On POSIX systems we try and punch a hole from offset 0 to
the system configured page size. This should only be called on an empty
file.
-Note: On Windows we use the name and on Unices we use the file handle.
-
-@param[in] name File name
@param[in] fh File handle for the file - if opened
@return true if the file system supports sparse files */
bool
os_is_sparse_file_supported(
- const char* path,
os_file_t fh)
MY_ATTRIBUTE((warn_unused_result));
diff --git a/storage/innobase/include/row0mysql.h b/storage/innobase/include/row0mysql.h
index 2b000603929..8d3752974a6 100644
--- a/storage/innobase/include/row0mysql.h
+++ b/storage/innobase/include/row0mysql.h
@@ -352,7 +352,6 @@ row_create_table_for_mysql(
(will be freed, or on DB_SUCCESS
added to the data dictionary cache) */
trx_t* trx, /*!< in/out: transaction */
- bool commit, /*!< in: if true, commit the transaction */
fil_encryption_t mode, /*!< in: encryption mode */
uint32_t key_id) /*!< in: encryption key_id */
MY_ATTRIBUTE((warn_unused_result));
diff --git a/storage/innobase/include/row0upd.h b/storage/innobase/include/row0upd.h
index 359d243a0cd..ec7995dd096 100644
--- a/storage/innobase/include/row0upd.h
+++ b/storage/innobase/include/row0upd.h
@@ -87,8 +87,7 @@ upd_field_set_field_no(
upd_field_t* upd_field, /*!< in: update vector field */
ulint field_no, /*!< in: field number in a clustered
index */
- dict_index_t* index, /*!< in: index */
- trx_t* trx); /*!< in: transaction */
+ dict_index_t* index);
/** set field number to a update vector field, marks this field is updated
@param[in,out] upd_field update vector field
diff --git a/storage/innobase/include/row0upd.ic b/storage/innobase/include/row0upd.ic
index 18c72309930..11271d6e9af 100644
--- a/storage/innobase/include/row0upd.ic
+++ b/storage/innobase/include/row0upd.ic
@@ -95,22 +95,10 @@ upd_field_set_field_no(
upd_field_t* upd_field, /*!< in: update vector field */
ulint field_no, /*!< in: field number in a clustered
index */
- dict_index_t* index, /*!< in: index */
- trx_t* trx) /*!< in: transaction */
+ dict_index_t* index) /*!< in: index */
{
upd_field->field_no = unsigned(field_no);
upd_field->orig_len = 0;
-
- if (UNIV_UNLIKELY(field_no >= dict_index_get_n_fields(index))) {
- ib::error()
- << " trying to access field " << field_no
- << " in " << index->name
- << " of table " << index->table->name
- << " which contains only " << index->n_fields
- << " fields";
- ut_ad(0);
- }
-
dict_col_copy_type(dict_index_get_nth_col(index, field_no),
dfield_get_type(&upd_field->new_val));
}
diff --git a/storage/innobase/include/sync0types.h b/storage/innobase/include/sync0types.h
index 8d08416cccd..d7583f87f3b 100644
--- a/storage/innobase/include/sync0types.h
+++ b/storage/innobase/include/sync0types.h
@@ -1192,10 +1192,16 @@ struct MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) simple_counter
{
compile_time_assert(!atomic || sizeof(Type) == sizeof(lint));
if (atomic) {
- /* Silence MSVS warnings when instantiating
- this template with atomic=false. */
+#ifdef _MSC_VER
+// Suppress type conversion/ possible loss of data warning
+#pragma warning (push)
+#pragma warning (disable : 4244)
+#endif
return Type(my_atomic_addlint(reinterpret_cast<lint*>
(&m_counter), i));
+#ifdef _MSC_VER
+#pragma warning (pop)
+#endif
} else {
return m_counter += i;
}
diff --git a/storage/innobase/include/trx0rec.h b/storage/innobase/include/trx0rec.h
index 3b4a195735b..61398d3540d 100644
--- a/storage/innobase/include/trx0rec.h
+++ b/storage/innobase/include/trx0rec.h
@@ -152,7 +152,6 @@ trx_undo_update_rec_get_update(
trx_id_t trx_id, /*!< in: transaction id from this undorecord */
roll_ptr_t roll_ptr,/*!< in: roll pointer from this undo record */
ulint info_bits,/*!< in: info bits from this undo record */
- trx_t* trx, /*!< in: transaction */
mem_heap_t* heap, /*!< in: memory heap from which the memory
needed is allocated */
upd_t** upd); /*!< out, own: update vector */
diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc
index b2f0d90583e..0141d8105ed 100644
--- a/storage/innobase/lock/lock0lock.cc
+++ b/storage/innobase/lock/lock0lock.cc
@@ -1455,7 +1455,7 @@ lock_rec_other_has_conflicting(
if (lock_rec_has_to_wait(true, trx, mode, lock, is_supremum)) {
#ifdef WITH_WSREP
- if (wsrep_on(trx->mysql_thd)) {
+ if (wsrep_on_trx(trx)) {
trx_mutex_enter(lock->trx);
wsrep_kill_victim((trx_t *)trx, (lock_t *)lock);
trx_mutex_exit(lock->trx);
@@ -1985,8 +1985,7 @@ RecLock::create(
}
#ifdef WITH_WSREP
- if (c_lock &&
- wsrep_on(trx->mysql_thd) &&
+ if (c_lock && wsrep_on_trx(trx) &&
wsrep_thd_is_BF(trx->mysql_thd, FALSE)) {
lock_t *hash = (lock_t *)c_lock->hash;
lock_t *prev = NULL;
diff --git a/storage/innobase/lock/lock0wait.cc b/storage/innobase/lock/lock0wait.cc
index 0954ad9430a..0ed55558dc4 100644
--- a/storage/innobase/lock/lock0wait.cc
+++ b/storage/innobase/lock/lock0wait.cc
@@ -189,8 +189,7 @@ wsrep_is_BF_lock_timeout(
/*====================*/
trx_t* trx) /* in: trx to check for lock priority */
{
- if (wsrep_on(trx->mysql_thd) &&
- wsrep_thd_is_BF(trx->mysql_thd, FALSE)) {
+ if (wsrep_on_trx(trx) && wsrep_thd_is_BF(trx->mysql_thd, FALSE)) {
fprintf(stderr, "WSREP: BF lock wait long\n");
srv_print_innodb_monitor = TRUE;
srv_print_innodb_lock_monitor = TRUE;
@@ -198,7 +197,7 @@ wsrep_is_BF_lock_timeout(
return TRUE;
}
return FALSE;
- }
+}
#endif /* WITH_WSREP */
/***************************************************************//**
@@ -399,7 +398,7 @@ lock_wait_suspend_thread(
if (lock_wait_timeout < 100000000
&& wait_time > (double) lock_wait_timeout
#ifdef WITH_WSREP
- && (!wsrep_on(trx->mysql_thd) ||
+ && (!wsrep_on_trx(trx) ||
(!wsrep_is_BF_lock_timeout(trx) && trx->error_state != DB_DEADLOCK))
#endif /* WITH_WSREP */
&& !trx_is_high_priority(trx)) {
diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc
index 837e60882e6..c894a3c15ab 100644
--- a/storage/innobase/os/os0file.cc
+++ b/storage/innobase/os/os0file.cc
@@ -858,7 +858,8 @@ os_file_get_block_size(
&tmp);
if (!result) {
- if (GetLastError() == ERROR_INVALID_FUNCTION) {
+ DWORD err = GetLastError();
+ if (err == ERROR_INVALID_FUNCTION || err == ERROR_NOT_SUPPORTED) {
// Don't report error, it is driver's fault, not ours or users.
// We handle this with fallback. Report wit info message, just once.
static bool write_info = true;
@@ -3779,6 +3780,7 @@ os_file_get_last_error_low(
return(OS_FILE_ERROR_MAX + err);
}
+
/** NOTE! Use the corresponding macro os_file_create_simple(), not directly
this function!
A simple function to open or create a file.
@@ -3897,15 +3899,6 @@ os_file_create_simple_func(
retry = false;
*success = true;
-
- DWORD temp;
-
- /* This is a best effort use case, if it fails then
- we will find out when we try and punch the hole. */
-
- os_win32_device_io_control(
- file, FSCTL_SET_SPARSE, NULL, 0, NULL, 0,
- &temp);
}
} while (retry);
@@ -4298,13 +4291,6 @@ os_file_create_func(
/* Bind the file handle to completion port */
ut_a(CreateIoCompletionPort(file, completion_port, 0, 0));
}
- DWORD temp;
-
- /* This is a best effort use case, if it fails then
- we will find out when we try and punch the hole. */
- os_win32_device_io_control(
- file, FSCTL_SET_SPARSE, NULL, 0, NULL, 0,
- &temp);
}
} while (retry);
@@ -4752,16 +4738,36 @@ os_file_get_status_win32(
return(DB_SUCCESS);
}
-/** Truncates a file to a specified size in bytes.
-Do nothing if the size to preserve is greater or equal to the current
-size of the file.
+/**
+Sets a sparse flag on Windows file.
+@param[in] file file handle
+@return true on success, false on error
+*/
+bool os_file_set_sparse_win32(os_file_t file)
+{
+
+ DWORD temp;
+ return os_win32_device_io_control(file, FSCTL_SET_SPARSE, 0, 0, 0, 0,&temp);
+}
+
+
+/**
+Change file size on Windows.
+
+If file is extended, the bytes between old and new EOF
+are zeros.
+
+If file is sparse, "virtual" block is added at the end of
+allocated area.
+
+If file is normal, file system allocates storage.
+
@param[in] pathname file path
-@param[in] file file to be truncated
+@param[in] file file handle
@param[in] size size to preserve in bytes
@return true if success */
-static
bool
-os_file_truncate_win32(
+os_file_change_size_win32(
const char* pathname,
os_file_t file,
os_offset_t size)
@@ -5254,6 +5260,7 @@ os_file_handle_error_no_exit(
name, operation, false, on_error_silent));
}
+#ifndef _WIN32
/** Tries to disable OS caching on an opened file descriptor.
@param[in] fd file descriptor to alter
@param[in] file_name file name, used in the diagnostic message
@@ -5261,7 +5268,7 @@ os_file_handle_error_no_exit(
message */
void
os_file_set_nocache(
- os_file_t fd MY_ATTRIBUTE((unused)),
+ int fd MY_ATTRIBUTE((unused)),
const char* file_name MY_ATTRIBUTE((unused)),
const char* operation_name MY_ATTRIBUTE((unused)))
{
@@ -5310,6 +5317,8 @@ short_warning:
#endif /* defined(UNIV_SOLARIS) && defined(DIRECTIO_ON) */
}
+#endif /* _WIN32 */
+
/** Write the specified number of zeros to a newly created file.
@param[in] name name of the file or path as a null-terminated
string
@@ -5324,6 +5333,9 @@ os_file_set_size(
os_offset_t size,
bool read_only)
{
+#ifdef _WIN32
+ return os_file_change_size_win32(name, file, size);
+#endif
/* Write up to 1 megabyte at a time. */
ulint buf_size = ut_min(
static_cast<ulint>(64),
@@ -5413,7 +5425,7 @@ os_file_truncate(
}
#ifdef _WIN32
- return(os_file_truncate_win32(pathname, file, size));
+ return(os_file_change_size_win32(pathname, file, size));
#else /* _WIN32 */
return(os_file_truncate_posix(pathname, file, size));
#endif /* _WIN32 */
@@ -5553,14 +5565,10 @@ IORequest::punch_hole(os_file_t fh, os_offset_t off, ulint len)
Warning: On POSIX systems we try and punch a hole from offset 0 to
the system configured page size. This should only be called on an empty
file.
-
-Note: On Windows we use the name and on Unices we use the file handle.
-
-@param[in] name File name
@param[in] fh File handle for the file - if opened
@return true if the file system supports sparse files */
bool
-os_is_sparse_file_supported(const char* path, os_file_t fh)
+os_is_sparse_file_supported(os_file_t fh)
{
/* In this debugging mode, we act as if punch hole is supported,
then we skip any calls to actually punch a hole. In this way,
@@ -5570,7 +5578,13 @@ os_is_sparse_file_supported(const char* path, os_file_t fh)
);
#ifdef _WIN32
- return(os_is_sparse_file_supported_win32(path));
+ BY_HANDLE_FILE_INFORMATION info;
+ if (GetFileInformationByHandle(fh,&info)) {
+ if (info.dwFileAttributes != INVALID_FILE_ATTRIBUTES) {
+ return (info.dwFileAttributes & FILE_ATTRIBUTE_SPARSE_FILE) != 0;
+ }
+ }
+ return false;
#else
dberr_t err;
@@ -7531,9 +7545,9 @@ AIO::to_file(FILE* file) const
fprintf(file,
"%s IO for %s (offset=" UINT64PF
- ", size=" ULINTPF ")\n",
+ ", size=%lu)\n",
slot.type.is_read() ? "read" : "write",
- slot.name, slot.offset, slot.len);
+ slot.name, slot.offset, (unsigned long)(slot.len));
}
}
diff --git a/storage/innobase/pars/pars0pars.cc b/storage/innobase/pars/pars0pars.cc
index 21325cac12a..56ca037f247 100644
--- a/storage/innobase/pars/pars0pars.cc
+++ b/storage/innobase/pars/pars0pars.cc
@@ -1180,7 +1180,7 @@ pars_process_assign_list(
upd_field_set_field_no(upd_field, dict_index_get_nth_col_pos(
clust_index, col_sym->col_no,
NULL),
- clust_index, NULL);
+ clust_index);
upd_field->exp = assign_node->val;
if (!dict_col_get_fixed_size(
diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc
index cffc40af835..b463eedf8f1 100644
--- a/storage/innobase/row/row0merge.cc
+++ b/storage/innobase/row/row0merge.cc
@@ -1090,8 +1090,8 @@ row_merge_read(
const bool success = os_file_read_no_error_handling_int_fd(
request, fd, buf, ofs, srv_sort_buf_size);
- /* For encrypted tables, decrypt data after reading and copy data */
- if (log_tmp_is_encrypted()) {
+ /* If encryption is enabled decrypt buffer */
+ if (success && log_tmp_is_encrypted()) {
if (!log_tmp_block_decrypt(buf, srv_sort_buf_size,
crypt_buf, ofs, space)) {
return (FALSE);
@@ -4016,7 +4016,7 @@ row_merge_file_create(
if (merge_file->fd >= 0) {
if (srv_disable_sort_file_cache) {
- os_file_set_nocache((os_file_t)merge_file->fd,
+ os_file_set_nocache(merge_file->fd,
"row0merge.cc", "sort");
}
}
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index 583acdc482b..eb1c253be1c 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -2370,7 +2370,6 @@ row_create_table_for_mysql(
(will be freed, or on DB_SUCCESS
added to the data dictionary cache) */
trx_t* trx, /*!< in/out: transaction */
- bool commit, /*!< in: if true, commit the transaction */
fil_encryption_t mode, /*!< in: encryption mode */
uint32_t key_id) /*!< in: encryption key_id */
{
@@ -2400,10 +2399,6 @@ err_exit:
#endif /* !DBUG_OFF */
dict_mem_table_free(table);
- if (commit) {
- trx_commit_for_mysql(trx);
- }
-
trx->op_info = "";
return(DB_ERROR);
@@ -2448,7 +2443,7 @@ err_exit:
err = dict_replace_tablespace_in_dictionary(
table->space, table->name.m_name,
fil_space_get_flags(table->space),
- path, trx, commit);
+ path, trx);
ut_free(path);
@@ -2474,10 +2469,6 @@ err_exit:
DICT_ERR_IGNORE_NONE)) {
dict_table_close_and_drop(trx, table);
-
- if (commit) {
- trx_commit_for_mysql(trx);
- }
} else {
dict_mem_table_free(table);
}
diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc
index ed07bcb03af..97fbce3b141 100644
--- a/storage/innobase/row/row0purge.cc
+++ b/storage/innobase/row/row0purge.cc
@@ -819,7 +819,6 @@ row_purge_parse_undo_rec(
{
dict_index_t* clust_index;
byte* ptr;
- trx_t* trx;
undo_no_t undo_no;
table_id_t table_id;
trx_id_t trx_id;
@@ -911,10 +910,8 @@ err_exit:
ptr = trx_undo_rec_get_row_ref(ptr, clust_index, &(node->ref),
node->heap);
- trx = thr_get_trx(thr);
-
ptr = trx_undo_update_rec_get_update(ptr, clust_index, type, trx_id,
- roll_ptr, info_bits, trx,
+ roll_ptr, info_bits,
node->heap, &(node->update));
/* Read to the partial row the fields that occur in indexes */
diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc
index 86ffc631d95..0ad4c846fd6 100644
--- a/storage/innobase/row/row0sel.cc
+++ b/storage/innobase/row/row0sel.cc
@@ -1657,8 +1657,7 @@ table_loop:
#ifdef BTR_CUR_HASH_ADAPT
if (consistent_read && plan->unique_search && !plan->pcur_is_open
- && !plan->must_get_clust
- && !plan->table->big_rows) {
+ && !plan->must_get_clust) {
if (!search_latch_locked) {
btr_search_s_lock(index);
@@ -2085,8 +2084,7 @@ skip_lock:
ut_ad(plan->pcur.latch_mode == BTR_SEARCH_LEAF);
if ((plan->n_rows_fetched <= SEL_PREFETCH_LIMIT)
- || plan->unique_search || plan->no_prefetch
- || plan->table->big_rows) {
+ || plan->unique_search || plan->no_prefetch) {
/* No prefetch in operation: go to the next table */
diff --git a/storage/innobase/row/row0umod.cc b/storage/innobase/row/row0umod.cc
index b45444ef064..f201115da26 100644
--- a/storage/innobase/row/row0umod.cc
+++ b/storage/innobase/row/row0umod.cc
@@ -1160,7 +1160,7 @@ close_table:
node->heap);
ptr = trx_undo_update_rec_get_update(ptr, clust_index, type, trx_id,
- roll_ptr, info_bits, node->trx,
+ roll_ptr, info_bits,
node->heap, &(node->update));
node->new_trx_id = trx_id;
node->cmpl_info = cmpl_info;
diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc
index a0314c9f67c..d78c363ff73 100644
--- a/storage/innobase/row/row0upd.cc
+++ b/storage/innobase/row/row0upd.cc
@@ -464,8 +464,8 @@ inline
bool
wsrep_must_process_fk(const upd_node_t* node, const trx_t* trx)
{
- if (que_node_get_type(node->common.parent) != QUE_NODE_UPDATE ||
- !wsrep_on(trx->mysql_thd)) {
+ if (que_node_get_type(node->common.parent) != QUE_NODE_UPDATE
+ || !wsrep_on_trx(trx)) {
return false;
}
@@ -1006,7 +1006,7 @@ row_upd_build_sec_rec_difference_binary(
dfield_copy(&(upd_field->new_val), dfield);
- upd_field_set_field_no(upd_field, i, index, NULL);
+ upd_field_set_field_no(upd_field, i, index);
n_diff++;
}
@@ -1103,7 +1103,7 @@ row_upd_build_difference_binary(
dfield_copy(&(upd_field->new_val), dfield);
- upd_field_set_field_no(upd_field, i, index, trx);
+ upd_field_set_field_no(upd_field, i, index);
n_diff++;
}
diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc
index bd3c5a3d8bc..d7e1e062d7a 100644
--- a/storage/innobase/srv/srv0start.cc
+++ b/storage/innobase/srv/srv0start.cc
@@ -1898,9 +1898,6 @@ innobase_start_or_create_for_mysql()
return(srv_init_abort(DB_ERROR));
}
- compile_time_assert(ulonglong(ULINT_MAX) * UNIV_PAGE_SIZE_MIN
- >= 512ULL << 30);
-
os_normalize_path(srv_data_home);
/* Check if the data files exist or not. */
diff --git a/storage/innobase/trx/trx0rec.cc b/storage/innobase/trx/trx0rec.cc
index ca715371c44..86eb2542f75 100644
--- a/storage/innobase/trx/trx0rec.cc
+++ b/storage/innobase/trx/trx0rec.cc
@@ -1453,7 +1453,6 @@ trx_undo_update_rec_get_update(
trx_id_t trx_id, /*!< in: transaction id from this undo record */
roll_ptr_t roll_ptr,/*!< in: roll pointer from this undo record */
ulint info_bits,/*!< in: info bits from this undo record */
- trx_t* trx, /*!< in: transaction */
mem_heap_t* heap, /*!< in: memory heap from which the memory
needed is allocated */
upd_t** upd) /*!< out, own: update vector */
@@ -1489,7 +1488,7 @@ trx_undo_update_rec_get_update(
upd_field_set_field_no(upd_field,
dict_index_get_sys_col_pos(index, DATA_TRX_ID),
- index, trx);
+ index);
dfield_set_data(&(upd_field->new_val), buf, DATA_TRX_ID_LEN);
upd_field = upd_get_nth_field(update, n_fields + 1);
@@ -1500,7 +1499,7 @@ trx_undo_update_rec_get_update(
upd_field_set_field_no(
upd_field, dict_index_get_sys_col_pos(index, DATA_ROLL_PTR),
- index, trx);
+ index);
dfield_set_data(&(upd_field->new_val), buf, DATA_ROLL_PTR_LEN);
/* Store then the updated ordinary columns to the update vector */
@@ -1561,7 +1560,7 @@ trx_undo_update_rec_get_update(
upd_field_set_v_field_no(
upd_field, field_no, index);
} else {
- upd_field_set_field_no(upd_field, field_no, index, trx);
+ upd_field_set_field_no(upd_field, field_no, index);
}
ptr = trx_undo_rec_get_col_val(ptr, &field, &len, &orig_len);
@@ -2247,7 +2246,7 @@ trx_undo_prev_version_build(
ptr = trx_undo_update_rec_get_update(ptr, index, type, trx_id,
roll_ptr, info_bits,
- NULL, heap, &update);
+ heap, &update);
ut_a(ptr);
if (row_upd_changes_field_size_or_external(index, offsets, update)) {
diff --git a/storage/innobase/ut/ut0ut.cc b/storage/innobase/ut/ut0ut.cc
index 7393a24b72c..7ad80c3cd1f 100644
--- a/storage/innobase/ut/ut0ut.cc
+++ b/storage/innobase/ut/ut0ut.cc
@@ -834,6 +834,15 @@ error::~error()
sql_print_error("InnoDB: %s", m_oss.str().c_str());
}
+#ifdef _MSC_VER
+/* disable warning
+ "ib::fatal::~fatal': destructor never returns, potential memory leak"
+ on Windows.
+*/
+#pragma warning (push)
+#pragma warning (disable : 4722)
+#endif
+
ATTRIBUTE_NORETURN
fatal::~fatal()
{
@@ -841,6 +850,10 @@ fatal::~fatal()
abort();
}
+#ifdef _MSC_VER
+#pragma warning (pop)
+#endif
+
error_or_warn::~error_or_warn()
{
if (m_error) {
diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc
index fe160b6b215..c090ba49091 100644
--- a/storage/maria/ha_maria.cc
+++ b/storage/maria/ha_maria.cc
@@ -405,7 +405,7 @@ static void _ma_check_print_msg(HA_CHECK *param, const char *msg_type,
{
THD *thd= (THD *) param->thd;
Protocol *protocol= thd->protocol;
- uint length, msg_length;
+ size_t length, msg_length;
char msgbuf[MYSQL_ERRMSG_SIZE];
char name[NAME_LEN * 2 + 2];
@@ -442,10 +442,10 @@ static void _ma_check_print_msg(HA_CHECK *param, const char *msg_type,
push_warning).
*/
protocol->prepare_for_resend();
- protocol->store(name, length, system_charset_info);
+ protocol->store(name, (uint)length, system_charset_info);
protocol->store(param->op_name, system_charset_info);
protocol->store(msg_type, system_charset_info);
- protocol->store(msgbuf, msg_length, system_charset_info);
+ protocol->store(msgbuf, (uint)msg_length, system_charset_info);
if (protocol->write())
sql_print_error("Failed on my_net_write, writing to stderr instead: %s.%s: %s\n",
param->db_name, param->table_name, msgbuf);
@@ -620,8 +620,8 @@ static int table2maria(TABLE *table_arg, data_file_type row_type,
}
}
}
- DBUG_PRINT("loop", ("found: 0x%lx recpos: %d minpos: %d length: %d",
- (long) found, recpos, minpos, length));
+ DBUG_PRINT("loop", ("found: %p recpos: %d minpos: %d length: %d",
+ found, recpos, minpos, length));
if (!found)
break;
@@ -3435,7 +3435,7 @@ bool maria_show_status(handlerton *hton,
{
char *file;
const char *status;
- uint length, status_len;
+ size_t length, status_len;
MY_STAT stat_buff, *stat;
const char error[]= "can't stat";
char object[SHOW_MSG_LEN];
@@ -3463,8 +3463,8 @@ bool maria_show_status(handlerton *hton,
status= needed;
status_len= sizeof(needed) - 1;
}
- length= my_snprintf(object, SHOW_MSG_LEN, "Size %12lu ; %s",
- (ulong) stat->st_size, file);
+ length= my_snprintf(object, SHOW_MSG_LEN, "Size %12llu ; %s",
+ (ulonglong) stat->st_size, file);
}
print(thd, engine_name->str, engine_name->length,
diff --git a/storage/maria/ma_bitmap.c b/storage/maria/ma_bitmap.c
index 0abfa34a85f..d50df294b8a 100644
--- a/storage/maria/ma_bitmap.c
+++ b/storage/maria/ma_bitmap.c
@@ -864,8 +864,8 @@ static void _ma_print_bitmap_changes(MARIA_FILE_BITMAP *bitmap)
end= bitmap->map + bitmap->used_size;
DBUG_LOCK_FILE;
- fprintf(DBUG_FILE,"\nBitmap page changes at page: %lu bitmap: 0x%lx\n",
- (ulong) bitmap->page, (long) bitmap->map);
+ fprintf(DBUG_FILE,"\nBitmap page changes at page: %lu bitmap: %p\n",
+ (ulong) bitmap->page, bitmap->map);
page= (ulong) bitmap->page+1;
for (pos= bitmap->map, org_pos= bitmap->map + bitmap->block_size ;
diff --git a/storage/maria/ma_blockrec.c b/storage/maria/ma_blockrec.c
index 57596558208..b965f5db34b 100644
--- a/storage/maria/ma_blockrec.c
+++ b/storage/maria/ma_blockrec.c
@@ -7540,7 +7540,7 @@ void _ma_print_block_info(MARIA_SHARE *share, uchar *buff)
{
LSN lsn= lsn_korr(buff);
- printf("LSN: %lu,0x%lx type: %u dir_entries: %u dir_free: %u empty_space: %u\n",
+ printf("LSN:" LSN_FMT " type: %u dir_entries: %u dir_free: %u empty_space: %u\n",
LSN_IN_PARTS(lsn),
(uint)buff[PAGE_TYPE_OFFSET],
(uint)buff[DIR_COUNT_OFFSET],
diff --git a/storage/maria/ma_check.c b/storage/maria/ma_check.c
index 2b27207e601..7caec1fd834 100644
--- a/storage/maria/ma_check.c
+++ b/storage/maria/ma_check.c
@@ -772,7 +772,7 @@ static
void maria_collect_stats_nonulls_first(HA_KEYSEG *keyseg, ulonglong *notnull,
const uchar *key)
{
- uint first_null, kp;
+ size_t first_null, kp;
first_null= ha_find_null(keyseg, key) - keyseg;
/*
All prefix tuples that don't include keypart_{first_null} are not-null
@@ -814,7 +814,7 @@ int maria_collect_stats_nonulls_next(HA_KEYSEG *keyseg, ulonglong *notnull,
const uchar *last_key)
{
uint diffs[2];
- uint first_null_seg, kp;
+ size_t first_null_seg, kp;
HA_KEYSEG *seg;
/*
@@ -2519,8 +2519,8 @@ static int maria_drop_all_indexes(HA_CHECK *param, MARIA_HA *info,
DBUG_PRINT("repair", ("creating missing indexes"));
for (i= 0; i < share->base.keys; i++)
{
- DBUG_PRINT("repair", ("index #: %u key_root: 0x%lx active: %d",
- i, (long) state->key_root[i],
+ DBUG_PRINT("repair", ("index #: %u key_root:%lld active: %d",
+ i, state->key_root[i],
maria_is_key_active(state->key_map, i)));
if ((state->key_root[i] != HA_OFFSET_ERROR) &&
!maria_is_key_active(state->key_map, i))
@@ -4477,8 +4477,8 @@ int maria_repair_parallel(HA_CHECK *param, register MARIA_HA *info,
*/
sort_param[i].read_cache= ((rep_quick || !i) ? param->read_cache :
new_data_cache);
- DBUG_PRINT("io_cache_share", ("thread: %u read_cache: 0x%lx",
- i, (long) &sort_param[i].read_cache));
+ DBUG_PRINT("io_cache_share", ("thread: %u read_cache: %p",
+ i, &sort_param[i].read_cache));
/*
two approaches: the same amount of memory for each thread
@@ -5671,7 +5671,7 @@ static int sort_maria_ft_key_write(MARIA_SORT_PARAM *sort_param,
key_block++;
sort_info->key_block=key_block;
sort_param->keyinfo= &share->ft2_keyinfo;
- ft_buf->count=(ft_buf->buf - p)/val_len;
+ ft_buf->count=(uint)(ft_buf->buf - p)/val_len;
/* flushing buffer to second-level tree */
for (error=0; !error && p < ft_buf->buf; p+= val_len)
diff --git a/storage/maria/ma_checkpoint.c b/storage/maria/ma_checkpoint.c
index c00278781ea..84c142ca151 100644
--- a/storage/maria/ma_checkpoint.c
+++ b/storage/maria/ma_checkpoint.c
@@ -170,7 +170,7 @@ static int really_execute_checkpoint(void)
"Horizon" is a lower bound of the LSN of the next log record.
*/
checkpoint_start_log_horizon= translog_get_horizon();
- DBUG_PRINT("info",("checkpoint_start_log_horizon (%lu,0x%lx)",
+ DBUG_PRINT("info",("checkpoint_start_log_horizon " LSN_FMT,
LSN_IN_PARTS(checkpoint_start_log_horizon)));
lsn_store(checkpoint_start_log_horizon_char, checkpoint_start_log_horizon);
@@ -333,10 +333,11 @@ int ma_checkpoint_init(ulong interval)
else if (interval > 0)
{
compile_time_assert(sizeof(void *) >= sizeof(ulong));
+ size_t intv= interval;
if ((res= mysql_thread_create(key_thread_checkpoint,
&checkpoint_control.thread, NULL,
ma_checkpoint_background,
- (void*) interval)))
+ (void*) intv)))
checkpoint_control.killed= TRUE;
}
else
@@ -375,7 +376,7 @@ static void flush_all_tables(int what_to_flush)
MA_STATE_INFO_WRITE_DONT_MOVE_OFFSET|
MA_STATE_INFO_WRITE_LOCK);
DBUG_PRINT("maria_flush_states",
- ("is_of_horizon: LSN (%lu,0x%lx)",
+ ("is_of_horizon: LSN " LSN_FMT,
LSN_IN_PARTS(info->s->state.is_of_horizon)));
break;
case 2:
@@ -546,8 +547,8 @@ pthread_handler_t ma_checkpoint_background(void *arg)
right after "case 0", thus having 'dfile' unset. So the thread cares only
about the interval's value when it started.
*/
- const ulong interval= (ulong)arg;
- uint sleeps, sleep_time;
+ const size_t interval= (size_t)arg;
+ size_t sleeps, sleep_time;
TRANSLOG_ADDRESS log_horizon_at_last_checkpoint=
translog_get_horizon();
ulonglong pagecache_flushes_at_last_checkpoint=
diff --git a/storage/maria/ma_close.c b/storage/maria/ma_close.c
index d89a69f02ab..882e9f585f1 100644
--- a/storage/maria/ma_close.c
+++ b/storage/maria/ma_close.c
@@ -30,9 +30,9 @@ int maria_close(register MARIA_HA *info)
MARIA_SHARE *share= info->s;
my_bool internal_table= share->internal_table;
DBUG_ENTER("maria_close");
- DBUG_PRINT("enter",("name: '%s' base: 0x%lx reopen: %u locks: %u",
+ DBUG_PRINT("enter",("name: '%s' base: %p reopen: %u locks: %u",
share->open_file_name.str,
- (long) info, (uint) share->reopen,
+ info, (uint) share->reopen,
(uint) share->tot_locks));
/* Check that we have unlocked key delete-links properly */
diff --git a/storage/maria/ma_commit.c b/storage/maria/ma_commit.c
index 358f564d3f1..68435a45c0a 100644
--- a/storage/maria/ma_commit.c
+++ b/storage/maria/ma_commit.c
@@ -121,7 +121,7 @@ int maria_begin(MARIA_HA *info)
if (unlikely(!trn))
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
- DBUG_PRINT("info", ("TRN set to 0x%lx", (ulong) trn));
+ DBUG_PRINT("info", ("TRN set to %p", trn));
_ma_set_trn_for_table(info, trn);
}
DBUG_RETURN(0);
diff --git a/storage/maria/ma_control_file.c b/storage/maria/ma_control_file.c
index b46cf7e1765..1ccb67d5698 100644
--- a/storage/maria/ma_control_file.c
+++ b/storage/maria/ma_control_file.c
@@ -146,6 +146,8 @@ static CONTROL_FILE_ERROR create_control_file(const char *name,
{
uint32 sum;
uchar buffer[CF_CREATE_TIME_TOTAL_SIZE];
+ ulong rnd1,rnd2;
+
DBUG_ENTER("maria_create_control_file");
if ((control_file_fd= mysql_file_create(key_file_control, name, 0,
@@ -157,7 +159,9 @@ static CONTROL_FILE_ERROR create_control_file(const char *name,
cf_changeable_size= CF_CHANGEABLE_TOTAL_SIZE;
/* Create unique uuid for the control file */
- my_uuid_init((ulong) &buffer, (ulong) &maria_uuid);
+ my_random_bytes((uchar *)&rnd1, sizeof (rnd1));
+ my_random_bytes((uchar *)&rnd2, sizeof (rnd2));
+ my_uuid_init(rnd1, rnd2);
my_uuid(maria_uuid);
/* Prepare and write the file header */
diff --git a/storage/maria/ma_create.c b/storage/maria/ma_create.c
index affad4b84c4..bf762a0ec0f 100644
--- a/storage/maria/ma_create.c
+++ b/storage/maria/ma_create.c
@@ -33,6 +33,27 @@
static int compare_columns(MARIA_COLUMNDEF **a, MARIA_COLUMNDEF **b);
+
+static ulonglong update_tot_length(ulonglong tot_length, ulonglong max_rows, uint length)
+{
+ ulonglong tot_length_part;
+
+ if (tot_length == ULONGLONG_MAX)
+ return ULONGLONG_MAX;
+
+ tot_length_part= (max_rows/(ulong) ((maria_block_size -
+ MAX_KEYPAGE_HEADER_SIZE - KEYPAGE_CHECKSUM_SIZE)/
+ (length*2)));
+ if (tot_length_part >= ULONGLONG_MAX / maria_block_size)
+ return ULONGLONG_MAX;
+
+ if (tot_length > ULONGLONG_MAX - tot_length_part * maria_block_size)
+ return ULONGLONG_MAX;
+
+ return tot_length + tot_length_part * maria_block_size;
+}
+
+
/*
Old options is used when recreating database, from maria_chk
*/
@@ -57,7 +78,7 @@ int maria_create(const char *name, enum data_file_type datafile_type,
char kfilename[FN_REFLEN], klinkname[FN_REFLEN], *klinkname_ptr;
char dfilename[FN_REFLEN], dlinkname[FN_REFLEN], *dlinkname_ptr= 0;
ulong pack_reclength;
- ulonglong tot_length,max_rows, tmp, tot_length_part;
+ ulonglong tot_length,max_rows, tmp;
enum en_fieldtype type;
enum data_file_type org_datafile_type= datafile_type;
MARIA_SHARE share;
@@ -661,23 +682,7 @@ int maria_create(const char *name, enum data_file_type datafile_type,
if (length > max_key_length)
max_key_length= length;
- if (tot_length == ULLONG_MAX)
- continue;
-
- tot_length_part= (max_rows/(ulong) (((uint) maria_block_size -
- MAX_KEYPAGE_HEADER_SIZE -
- KEYPAGE_CHECKSUM_SIZE)/
- (length*2)));
- if (tot_length_part >= (ULLONG_MAX / maria_block_size +
- ULLONG_MAX % maria_block_size))
- tot_length= ULLONG_MAX;
- else
- {
- if (tot_length > ULLONG_MAX - tot_length_part * maria_block_size)
- tot_length= ULLONG_MAX;
- else
- tot_length+= tot_length_part * maria_block_size;
- }
+ tot_length= update_tot_length(tot_length, max_rows, length);
}
unique_key_parts=0;
@@ -687,23 +692,7 @@ int maria_create(const char *name, enum data_file_type datafile_type,
unique_key_parts+=uniquedef->keysegs;
share.state.key_root[keys+i]= HA_OFFSET_ERROR;
- if (tot_length == ULLONG_MAX)
- continue;
- ulonglong tot_length_part= (max_rows/(ulong) (((uint) maria_block_size -
- MAX_KEYPAGE_HEADER_SIZE -
- KEYPAGE_CHECKSUM_SIZE) /
- ((MARIA_UNIQUE_HASH_LENGTH + pointer)*2)));
-
- if (tot_length_part >= (ULLONG_MAX / maria_block_size +
- ULLONG_MAX % maria_block_size))
- tot_length= ULLONG_MAX;
- else
- {
- if (tot_length > ULLONG_MAX - tot_length_part * maria_block_size)
- tot_length= ULLONG_MAX;
- else
- tot_length+= tot_length_part * maria_block_size;
- }
+ tot_length= update_tot_length(tot_length, max_rows, MARIA_UNIQUE_HASH_LENGTH + pointer);
}
keys+=uniques; /* Each unique has 1 key */
key_segs+=uniques; /* Each unique has 1 key seg */
diff --git a/storage/maria/ma_delete.c b/storage/maria/ma_delete.c
index 7921ab59a8f..c5a2378dc2b 100644
--- a/storage/maria/ma_delete.c
+++ b/storage/maria/ma_delete.c
@@ -559,9 +559,9 @@ static int del(MARIA_HA *info, MARIA_KEY *key,
MARIA_KEY ret_key;
MARIA_PAGE next_page;
DBUG_ENTER("del");
- DBUG_PRINT("enter",("leaf_page: %lu keypos: 0x%lx",
+ DBUG_PRINT("enter",("leaf_page: %lu keypos: %p",
(ulong) (leaf_page->pos / share->block_size),
- (ulong) keypos));
+ keypos));
DBUG_DUMP("leaf_buff", leaf_page->buff, leaf_page->size);
page_flag= leaf_page->flag;
@@ -773,9 +773,9 @@ static int underflow(MARIA_HA *info, MARIA_KEYDEF *keyinfo,
MARIA_KEY tmp_key, anc_key, leaf_key;
MARIA_PAGE next_page;
DBUG_ENTER("underflow");
- DBUG_PRINT("enter",("leaf_page: %lu keypos: 0x%lx",
+ DBUG_PRINT("enter",("leaf_page: %lu keypos: %p",
(ulong) (leaf_page->pos / share->block_size),
- (ulong) keypos));
+ keypos));
DBUG_DUMP("anc_buff", anc_page->buff, anc_page->size);
DBUG_DUMP("leaf_buff", leaf_page->buff, leaf_page->size);
@@ -918,8 +918,8 @@ static int underflow(MARIA_HA *info, MARIA_KEYDEF *keyinfo,
anc_end_pos= anc_buff + new_anc_length;
- DBUG_PRINT("test",("anc_buff: 0x%lx anc_end_pos: 0x%lx",
- (long) anc_buff, (long) anc_end_pos));
+ DBUG_PRINT("test",("anc_buff:%p anc_end_pos:%p",
+ anc_buff, anc_end_pos));
if (!first_key && !_ma_get_last_key(&anc_key, anc_page, keypos))
goto err;
@@ -1308,8 +1308,8 @@ static uint remove_key(MARIA_KEYDEF *keyinfo, uint page_flag, uint nod_flag,
int s_length;
uchar *start;
DBUG_ENTER("remove_key");
- DBUG_PRINT("enter", ("keypos: 0x%lx page_end: 0x%lx",
- (long) keypos, (long) page_end));
+ DBUG_PRINT("enter", ("keypos:%p page_end: %p",
+ keypos, page_end));
start= s_temp->key_pos= keypos;
s_temp->changed_length= 0;
diff --git a/storage/maria/ma_dynrec.c b/storage/maria/ma_dynrec.c
index 7f34b73089f..e5c108a18c6 100644
--- a/storage/maria/ma_dynrec.c
+++ b/storage/maria/ma_dynrec.c
@@ -1343,8 +1343,8 @@ ulong _ma_rec_unpack(register MARIA_HA *info, register uchar *to, uchar *from,
err:
_ma_set_fatal_error(info->s, HA_ERR_WRONG_IN_RECORD);
- DBUG_PRINT("error",("to_end: 0x%lx -> 0x%lx from_end: 0x%lx -> 0x%lx",
- (long) to, (long) to_end, (long) from, (long) from_end));
+ DBUG_PRINT("error",("to_end: %p -> %p from_end: %p -> %p",
+ to, to_end, from, from_end));
DBUG_DUMP("from", info->rec_buff, info->s->base.min_pack_length);
DBUG_RETURN(MY_FILE_ERROR);
} /* _ma_rec_unpack */
diff --git a/storage/maria/ma_key.c b/storage/maria/ma_key.c
index 89693f45dca..6f3e17ed80d 100644
--- a/storage/maria/ma_key.c
+++ b/storage/maria/ma_key.c
@@ -318,7 +318,7 @@ MARIA_KEY *_ma_make_key(MARIA_HA *info, MARIA_KEY *int_key, uint keynr,
key+= length;
}
_ma_dpointer(info->s, key, filepos);
- int_key->data_length= (key - int_key->data);
+ int_key->data_length= (uint)(key - int_key->data);
int_key->ref_length= info->s->rec_reflength;
int_key->flag= 0;
if (_ma_have_versioning(info) && trid)
@@ -449,7 +449,7 @@ MARIA_KEY *_ma_pack_key(register MARIA_HA *info, MARIA_KEY *int_key,
/* set flag to SEARCH_PART_KEY if we are not using all key parts */
int_key->flag= keyseg->type ? SEARCH_PART_KEY : 0;
int_key->ref_length= 0;
- int_key->data_length= (key - int_key->data);
+ int_key->data_length= (uint)(key - int_key->data);
DBUG_PRINT("exit", ("length: %u", int_key->data_length));
DBUG_RETURN(int_key);
diff --git a/storage/maria/ma_keycache.c b/storage/maria/ma_keycache.c
index c3083445aee..39459c486fd 100644
--- a/storage/maria/ma_keycache.c
+++ b/storage/maria/ma_keycache.c
@@ -54,8 +54,8 @@ int maria_assign_to_pagecache(MARIA_HA *info,
MARIA_SHARE* share= info->s;
DBUG_ENTER("maria_assign_to_pagecache");
DBUG_PRINT("enter",
- ("old_pagecache_handle: 0x%lx new_pagecache_handle: 0x%lx",
- (long) share->pagecache, (long) pagecache));
+ ("old_pagecache_handle:%p new_pagecache_handle:%p",
+ share->pagecache, pagecache));
/*
Skip operation if we didn't change key cache. This can happen if we
diff --git a/storage/maria/ma_loghandler.c b/storage/maria/ma_loghandler.c
index defa11ad6b4..ce254c5f631 100644
--- a/storage/maria/ma_loghandler.c
+++ b/storage/maria/ma_loghandler.c
@@ -909,7 +909,7 @@ char *translog_filename_by_fileno(uint32 file_no, char *path)
length= (uint) (int10_to_str(file_no, buff, 10) - buff);
strmov(end - length +1, buff);
- DBUG_PRINT("info", ("Path: '%s' path: 0x%lx", path, (ulong) path));
+ DBUG_PRINT("info", ("Path: '%s' path: %p", path, path));
DBUG_RETURN(path);
}
@@ -1013,8 +1013,8 @@ static TRANSLOG_FILE *get_logfile_by_number(uint32 file_no)
file= *dynamic_element(&log_descriptor.open_files,
log_descriptor.max_file - file_no, TRANSLOG_FILE **);
mysql_rwlock_unlock(&log_descriptor.open_files_lock);
- DBUG_PRINT("info", ("File 0x%lx File no: %lu, File handler: %d",
- (ulong)file, (ulong)file_no,
+ DBUG_PRINT("info", ("File %p File no: %u, File handler: %d",
+ file, file_no,
(file ? file->handler.file : -1)));
DBUG_ASSERT(!file || file->number == file_no);
DBUG_RETURN(file);
@@ -1127,7 +1127,7 @@ static my_bool translog_max_lsn_to_header(File file, LSN lsn)
my_bool rc;
DBUG_ENTER("translog_max_lsn_to_header");
DBUG_PRINT("enter", ("File descriptor: %ld "
- "lsn: (%lu,0x%lx)",
+ "lsn: " LSN_FMT,
(long) file,
LSN_IN_PARTS(lsn)));
@@ -1200,7 +1200,7 @@ my_bool translog_read_file_header(LOGHANDLER_FILE_INFO *desc, File file)
translog_interpret_file_header(desc, page_buff);
DBUG_PRINT("info", ("timestamp: %llu aria ver: %lu mysql ver: %lu "
"server id %lu page size %lu file number %lu "
- "max lsn: (%lu,0x%lx)",
+ "max lsn: " LSN_FMT,
(ulonglong) desc->timestamp,
(ulong) desc->maria_version,
(ulong) desc->mysql_version,
@@ -1229,7 +1229,7 @@ static my_bool translog_set_lsn_for_files(uint32 from_file, uint32 to_file,
{
uint32 file;
DBUG_ENTER("translog_set_lsn_for_files");
- DBUG_PRINT("enter", ("From: %lu to: %lu lsn: (%lu,0x%lx) locked: %d",
+ DBUG_PRINT("enter", ("From: %lu to: %lu lsn: " LSN_FMT " locked: %d",
(ulong) from_file, (ulong) to_file,
LSN_IN_PARTS(lsn),
is_locked));
@@ -1458,7 +1458,7 @@ LSN translog_get_file_max_lsn_stored(uint32 file)
info.max_lsn= LSN_ERROR;
}
- DBUG_PRINT("info", ("Max lsn: (%lu,0x%lx)", LSN_IN_PARTS(info.max_lsn)));
+ DBUG_PRINT("info", ("Max lsn: " LSN_FMT, LSN_IN_PARTS(info.max_lsn)));
DBUG_RETURN(info.max_lsn);
}
}
@@ -1482,8 +1482,8 @@ static my_bool translog_buffer_init(struct st_translog_buffer *buffer, int num)
buffer->pre_force_close_horizon=
buffer->prev_last_lsn= buffer->last_lsn=
LSN_IMPOSSIBLE;
- DBUG_PRINT("info", ("last_lsn and prev_last_lsn set to 0 buffer: 0x%lx",
- (ulong) buffer));
+ DBUG_PRINT("info", ("last_lsn and prev_last_lsn set to 0 buffer: %p",
+ buffer));
buffer->buffer_no= (uint8) num;
/* This Buffer File */
@@ -1684,8 +1684,8 @@ static void translog_buffer_lock(struct st_translog_buffer *buffer)
{
DBUG_ENTER("translog_buffer_lock");
DBUG_PRINT("enter",
- ("Lock buffer #%u: (0x%lx)", (uint) buffer->buffer_no,
- (ulong) buffer));
+ ("Lock buffer #%u: %p", buffer->buffer_no,
+ buffer));
mysql_mutex_lock(&buffer->mutex);
DBUG_VOID_RETURN;
}
@@ -1706,8 +1706,8 @@ static void translog_buffer_lock(struct st_translog_buffer *buffer)
static void translog_buffer_unlock(struct st_translog_buffer *buffer)
{
DBUG_ENTER("translog_buffer_unlock");
- DBUG_PRINT("enter", ("Unlock buffer... #%u (0x%lx)",
- (uint) buffer->buffer_no, (ulong) buffer));
+ DBUG_PRINT("enter", ("Unlock buffer... #%u (%p)",
+ (uint) buffer->buffer_no, buffer));
mysql_mutex_unlock(&buffer->mutex);
DBUG_VOID_RETURN;
@@ -1751,7 +1751,7 @@ static void translog_new_page_header(TRANSLOG_ADDRESS *horizon,
if (log_descriptor.flags & TRANSLOG_PAGE_CRC)
{
#ifndef DBUG_OFF
- DBUG_PRINT("info", ("write 0x11223344 CRC to (%lu,0x%lx)",
+ DBUG_PRINT("info", ("write 0x11223344 CRC to " LSN_FMT,
LSN_IN_PARTS(*horizon)));
/* This will be overwritten by real CRC; This is just for debugging */
int4store(ptr, 0x11223344);
@@ -1770,16 +1770,16 @@ static void translog_new_page_header(TRANSLOG_ADDRESS *horizon,
ptr+= TRANSLOG_PAGE_SIZE / DISK_DRIVE_SECTOR_SIZE;
}
{
- uint len= (ptr - cursor->ptr);
+ size_t len= (ptr - cursor->ptr);
(*horizon)+= len; /* increasing the offset part of the address */
- cursor->current_page_fill= len;
+ cursor->current_page_fill= (uint16)len;
if (!cursor->chaser)
- cursor->buffer->size+= len;
+ cursor->buffer->size+= (translog_size_t)len;
}
cursor->ptr= ptr;
- DBUG_PRINT("info", ("NewP buffer #%u: 0x%lx chaser: %d Size: %lu (%lu) "
- "Horizon: (%lu,0x%lx)",
- (uint) cursor->buffer->buffer_no, (ulong) cursor->buffer,
+ DBUG_PRINT("info", ("NewP buffer #%u: %p chaser: %d Size: %lu (%lu) "
+ "Horizon: " LSN_FMT,
+ (uint) cursor->buffer->buffer_no, cursor->buffer,
cursor->chaser, (ulong) cursor->buffer->size,
(ulong) (cursor->ptr - cursor->buffer->buffer),
LSN_IN_PARTS(*horizon)));
@@ -1882,17 +1882,17 @@ static void translog_finish_page(TRANSLOG_ADDRESS *horizon,
uint16 left= TRANSLOG_PAGE_SIZE - cursor->current_page_fill;
uchar *page= cursor->ptr - cursor->current_page_fill;
DBUG_ENTER("translog_finish_page");
- DBUG_PRINT("enter", ("Buffer: #%u 0x%lx "
- "Buffer addr: (%lu,0x%lx) "
- "Page addr: (%lu,0x%lx) "
- "size:%lu (%lu) Pg:%u left:%u",
- (uint) cursor->buffer_no, (ulong) cursor->buffer,
+ DBUG_PRINT("enter", ("Buffer: #%u %p "
+ "Buffer addr: " LSN_FMT " "
+ "Page addr: " LSN_FMT " "
+ "size:%u (%u) Pg:%u left:%u",
+ (uint) cursor->buffer_no, cursor->buffer,
LSN_IN_PARTS(cursor->buffer->offset),
- (ulong) LSN_FILE_NO(*horizon),
- (ulong) (LSN_OFFSET(*horizon) -
+ (uint)LSN_FILE_NO(*horizon),
+ (uint)(LSN_OFFSET(*horizon) -
cursor->current_page_fill),
- (ulong) cursor->buffer->size,
- (ulong) (cursor->ptr -cursor->buffer->buffer),
+ (uint) cursor->buffer->size,
+ (uint) (cursor->ptr -cursor->buffer->buffer),
(uint) cursor->current_page_fill, (uint) left));
DBUG_ASSERT(LSN_FILE_NO(*horizon) == LSN_FILE_NO(cursor->buffer->offset));
translog_check_cursor(cursor);
@@ -1914,10 +1914,10 @@ static void translog_finish_page(TRANSLOG_ADDRESS *horizon,
cursor->buffer->size+= left;
/* We are finishing the page so reset the counter */
cursor->current_page_fill= 0;
- DBUG_PRINT("info", ("Finish Page buffer #%u: 0x%lx "
+ DBUG_PRINT("info", ("Finish Page buffer #%u: %p "
"chaser: %d Size: %lu (%lu)",
(uint) cursor->buffer->buffer_no,
- (ulong) cursor->buffer, cursor->chaser,
+ cursor->buffer, cursor->chaser,
(ulong) cursor->buffer->size,
(ulong) (cursor->ptr - cursor->buffer->buffer)));
translog_check_cursor(cursor);
@@ -1956,9 +1956,9 @@ static void translog_finish_page(TRANSLOG_ADDRESS *horizon,
static void translog_wait_for_closing(struct st_translog_buffer *buffer)
{
DBUG_ENTER("translog_wait_for_closing");
- DBUG_PRINT("enter", ("Buffer #%u 0x%lx copies in progress: %u "
+ DBUG_PRINT("enter", ("Buffer #%u %p copies in progress: %u "
"is closing %u File: %d size: %lu",
- (uint) buffer->buffer_no, (ulong) buffer,
+ (uint) buffer->buffer_no, buffer,
(uint) buffer->copy_to_buffer_in_progress,
(uint) buffer->is_closing_buffer,
(buffer->file ? buffer->file->handler.file : -1),
@@ -1967,12 +1967,12 @@ static void translog_wait_for_closing(struct st_translog_buffer *buffer)
while (buffer->is_closing_buffer)
{
- DBUG_PRINT("info", ("wait for writers... buffer: #%u 0x%lx",
- (uint) buffer->buffer_no, (ulong) buffer));
+ DBUG_PRINT("info", ("wait for writers... buffer: #%u %p",
+ (uint) buffer->buffer_no, buffer));
DBUG_ASSERT(buffer->file != NULL);
mysql_cond_wait(&buffer->waiting_filling_buffer, &buffer->mutex);
- DBUG_PRINT("info", ("wait for writers done buffer: #%u 0x%lx",
- (uint) buffer->buffer_no, (ulong) buffer));
+ DBUG_PRINT("info", ("wait for writers done buffer: #%u %p",
+ (uint) buffer->buffer_no, buffer));
}
DBUG_VOID_RETURN;
@@ -1988,9 +1988,9 @@ static void translog_wait_for_closing(struct st_translog_buffer *buffer)
static void translog_wait_for_writers(struct st_translog_buffer *buffer)
{
DBUG_ENTER("translog_wait_for_writers");
- DBUG_PRINT("enter", ("Buffer #%u 0x%lx copies in progress: %u "
+ DBUG_PRINT("enter", ("Buffer #%u %p copies in progress: %u "
"is closing %u File: %d size: %lu",
- (uint) buffer->buffer_no, (ulong) buffer,
+ (uint) buffer->buffer_no, buffer,
(uint) buffer->copy_to_buffer_in_progress,
(uint) buffer->is_closing_buffer,
(buffer->file ? buffer->file->handler.file : -1),
@@ -1999,12 +1999,12 @@ static void translog_wait_for_writers(struct st_translog_buffer *buffer)
while (buffer->copy_to_buffer_in_progress)
{
- DBUG_PRINT("info", ("wait for writers... buffer: #%u 0x%lx",
- (uint) buffer->buffer_no, (ulong) buffer));
+ DBUG_PRINT("info", ("wait for writers... buffer: #%u %p",
+ (uint) buffer->buffer_no, buffer));
DBUG_ASSERT(buffer->file != NULL);
mysql_cond_wait(&buffer->waiting_filling_buffer, &buffer->mutex);
- DBUG_PRINT("info", ("wait for writers done buffer: #%u 0x%lx",
- (uint) buffer->buffer_no, (ulong) buffer));
+ DBUG_PRINT("info", ("wait for writers done buffer: #%u %p",
+ (uint) buffer->buffer_no, buffer));
}
DBUG_VOID_RETURN;
@@ -2029,9 +2029,9 @@ static void translog_wait_for_buffer_free(struct st_translog_buffer *buffer)
TRANSLOG_FILE *file= buffer->file;
uint8 ver= buffer->ver;
DBUG_ENTER("translog_wait_for_buffer_free");
- DBUG_PRINT("enter", ("Buffer #%u 0x%lx copies in progress: %u "
+ DBUG_PRINT("enter", ("Buffer #%u %p copies in progress: %u "
"is closing %u File: %d size: %lu",
- (uint) buffer->buffer_no, (ulong) buffer,
+ (uint) buffer->buffer_no, buffer,
(uint) buffer->copy_to_buffer_in_progress,
(uint) buffer->is_closing_buffer,
(buffer->file ? buffer->file->handler.file : -1),
@@ -2044,11 +2044,11 @@ static void translog_wait_for_buffer_free(struct st_translog_buffer *buffer)
while (buffer->file != NULL)
{
- DBUG_PRINT("info", ("wait for writers... buffer: #%u 0x%lx",
- (uint) buffer->buffer_no, (ulong) buffer));
+ DBUG_PRINT("info", ("wait for writers... buffer: #%u %p",
+ (uint) buffer->buffer_no, buffer));
mysql_cond_wait(&buffer->waiting_filling_buffer, &buffer->mutex);
- DBUG_PRINT("info", ("wait for writers done. buffer: #%u 0x%lx",
- (uint) buffer->buffer_no, (ulong) buffer));
+ DBUG_PRINT("info", ("wait for writers done. buffer: #%u %p",
+ (uint) buffer->buffer_no, buffer));
}
DBUG_ASSERT(buffer->copy_to_buffer_in_progress == 0);
DBUG_VOID_RETURN;
@@ -2096,15 +2096,15 @@ static void translog_start_buffer(struct st_translog_buffer *buffer,
{
DBUG_ENTER("translog_start_buffer");
DBUG_PRINT("enter",
- ("Assign buffer: #%u (0x%lx) offset: 0x%lx(%lu)",
- (uint) buffer->buffer_no, (ulong) buffer,
- (ulong) LSN_OFFSET(log_descriptor.horizon),
- (ulong) LSN_OFFSET(log_descriptor.horizon)));
+ ("Assign buffer: #%u (%p) offset: 0x%x(%u)",
+ (uint) buffer->buffer_no, buffer,
+ (uint) LSN_OFFSET(log_descriptor.horizon),
+ (uint) LSN_OFFSET(log_descriptor.horizon)));
DBUG_ASSERT(buffer_no == buffer->buffer_no);
buffer->pre_force_close_horizon=
buffer->prev_last_lsn= buffer->last_lsn= LSN_IMPOSSIBLE;
- DBUG_PRINT("info", ("last_lsn and prev_last_lsn set to 0 buffer: 0x%lx",
- (ulong) buffer));
+ DBUG_PRINT("info", ("last_lsn and prev_last_lsn set to 0 buffer: %p",
+ buffer));
buffer->offset= log_descriptor.horizon;
buffer->next_buffer_offset= LSN_IMPOSSIBLE;
buffer->file= get_current_logfile();
@@ -2112,11 +2112,11 @@ static void translog_start_buffer(struct st_translog_buffer *buffer,
buffer->size= 0;
buffer->skipped_data= 0;
translog_cursor_init(cursor, buffer, buffer_no);
- DBUG_PRINT("info", ("file: #%ld (%d) init cursor #%u: 0x%lx "
+ DBUG_PRINT("info", ("file: #%ld (%d) init cursor #%u: %p "
"chaser: %d Size: %lu (%lu)",
(long) (buffer->file ? buffer->file->number : 0),
(buffer->file ? buffer->file->handler.file : -1),
- (uint) cursor->buffer->buffer_no, (ulong) cursor->buffer,
+ (uint) cursor->buffer->buffer_no, cursor->buffer,
cursor->chaser, (ulong) cursor->buffer->size,
(ulong) (cursor->ptr - cursor->buffer->buffer)));
translog_check_cursor(cursor);
@@ -2153,7 +2153,7 @@ static my_bool translog_buffer_next(TRANSLOG_ADDRESS *horizon,
my_bool chasing= cursor->chaser;
DBUG_ENTER("translog_buffer_next");
- DBUG_PRINT("info", ("horizon: (%lu,0x%lx) chasing: %d",
+ DBUG_PRINT("info", ("horizon: " LSN_FMT " chasing: %d",
LSN_IN_PARTS(log_descriptor.horizon), chasing));
DBUG_ASSERT(cmp_translog_addr(log_descriptor.horizon, *horizon) >= 0);
@@ -2205,9 +2205,9 @@ static my_bool translog_buffer_next(TRANSLOG_ADDRESS *horizon,
BUFFER_MAX_LSN(log_descriptor.buffers + old_buffer_no);
}
log_descriptor.buffers[old_buffer_no].next_buffer_offset= new_buffer->offset;
- DBUG_PRINT("info", ("prev_last_lsn set to (%lu,0x%lx) buffer: 0x%lx",
+ DBUG_PRINT("info", ("prev_last_lsn set to " LSN_FMT " buffer:%p",
LSN_IN_PARTS(new_buffer->prev_last_lsn),
- (ulong) new_buffer));
+ new_buffer));
translog_new_page_header(horizon, cursor);
DBUG_RETURN(0);
}
@@ -2230,9 +2230,9 @@ static void translog_set_sent_to_disk(struct st_translog_buffer *buffer)
DBUG_ENTER("translog_set_sent_to_disk");
mysql_mutex_lock(&log_descriptor.sent_to_disk_lock);
- DBUG_PRINT("enter", ("lsn: (%lu,0x%lx) in_buffers: (%lu,0x%lx) "
- "in_buffers_only: (%lu,0x%lx) start: (%lu,0x%lx) "
- "sent_to_disk: (%lu,0x%lx)",
+ DBUG_PRINT("enter", ("lsn: " LSN_FMT " in_buffers: " LSN_FMT " "
+ "in_buffers_only: " LSN_FMT " start: " LSN_FMT " "
+ "sent_to_disk: " LSN_FMT,
LSN_IN_PARTS(lsn),
LSN_IN_PARTS(in_buffers),
LSN_IN_PARTS(log_descriptor.log_start),
@@ -2270,8 +2270,8 @@ static void translog_set_only_in_buffers(TRANSLOG_ADDRESS in_buffers)
{
DBUG_ENTER("translog_set_only_in_buffers");
mysql_mutex_lock(&log_descriptor.sent_to_disk_lock);
- DBUG_PRINT("enter", ("in_buffers: (%lu,0x%lx) "
- "in_buffers_only: (%lu,0x%lx)",
+ DBUG_PRINT("enter", ("in_buffers: " LSN_FMT " "
+ "in_buffers_only: " LSN_FMT,
LSN_IN_PARTS(in_buffers),
LSN_IN_PARTS(log_descriptor.in_buffers_only)));
/* LSN_IMPOSSIBLE == 0 => it will work for very first time */
@@ -2325,7 +2325,7 @@ static LSN translog_get_sent_to_disk()
DBUG_ENTER("translog_get_sent_to_disk");
mysql_mutex_lock(&log_descriptor.sent_to_disk_lock);
lsn= log_descriptor.sent_to_disk;
- DBUG_PRINT("info", ("sent to disk up to (%lu,0x%lx)", LSN_IN_PARTS(lsn)));
+ DBUG_PRINT("info", ("sent to disk up to " LSN_FMT, LSN_IN_PARTS(lsn)));
mysql_mutex_unlock(&log_descriptor.sent_to_disk_lock);
DBUG_RETURN(lsn);
}
@@ -2534,9 +2534,9 @@ my_bool translog_prev_buffer_flush_wait(struct st_translog_buffer *buffer)
TRANSLOG_FILE *file= buffer->file;
uint8 ver= buffer->ver;
DBUG_ENTER("translog_prev_buffer_flush_wait");
- DBUG_PRINT("enter", ("buffer: 0x%lx #%u offset: (%lu,0x%lx) "
- "prev sent: (%lu,0x%lx) prev offset: (%lu,0x%lx)",
- (ulong) buffer, (uint) buffer->buffer_no,
+ DBUG_PRINT("enter", ("buffer: %p #%u offset: " LSN_FMT " "
+ "prev sent: " LSN_FMT " prev offset: " LSN_FMT,
+ buffer, (uint) buffer->buffer_no,
LSN_IN_PARTS(buffer->offset),
LSN_IN_PARTS(buffer->prev_sent_to_disk),
LSN_IN_PARTS(buffer->prev_buffer_offset)));
@@ -2575,8 +2575,8 @@ static my_bool translog_buffer_flush(struct st_translog_buffer *buffer)
uint skipped_data;
DBUG_ENTER("translog_buffer_flush");
DBUG_PRINT("enter",
- ("Buffer: #%u 0x%lx file: %d offset: (%lu,0x%lx) size: %lu",
- (uint) buffer->buffer_no, (ulong) buffer,
+ ("Buffer: #%u %p file: %d offset: " LSN_FMT " size: %lu",
+ (uint) buffer->buffer_no, buffer,
buffer->file->handler.file,
LSN_IN_PARTS(buffer->offset),
(ulong) buffer->size));
@@ -2615,11 +2615,11 @@ static my_bool translog_buffer_flush(struct st_translog_buffer *buffer)
#ifndef DBUG_OFF
TRANSLOG_ADDRESS addr= (buffer->offset + i);
#endif
- DBUG_PRINT("info", ("send log form %lu till %lu address: (%lu,0x%lx) "
- "page #: %lu buffer size: %lu buffer: 0x%lx",
+ DBUG_PRINT("info", ("send log form %lu till %lu address: " LSN_FMT " "
+ "page #: %lu buffer size: %lu buffer: %p",
(ulong) i, (ulong) (i + TRANSLOG_PAGE_SIZE),
LSN_IN_PARTS(addr), (ulong) pg, (ulong) buffer->size,
- (ulong) buffer));
+ buffer));
DBUG_ASSERT(log_descriptor.pagecache->block_size == TRANSLOG_PAGE_SIZE);
DBUG_ASSERT(i + TRANSLOG_PAGE_SIZE <= buffer->size);
if (translog_status != TRANSLOG_OK && translog_status != TRANSLOG_SHUTDOWN)
@@ -2636,9 +2636,9 @@ static my_bool translog_buffer_flush(struct st_translog_buffer *buffer)
TRANSLOG_PAGE_SIZE - skipped_data))
{
DBUG_PRINT("error",
- ("Can't write page (%lu,0x%lx) to pagecache, error: %d",
- (ulong) buffer->file->number,
- (ulong) (LSN_OFFSET(buffer->offset)+ i),
+ ("Can't write page " LSN_FMT " to pagecache, error: %d",
+ buffer->file->number,
+ (uint)(LSN_OFFSET(buffer->offset)+ i),
my_errno));
translog_stop_writing();
DBUG_RETURN(1);
@@ -2651,10 +2651,10 @@ static my_bool translog_buffer_flush(struct st_translog_buffer *buffer)
LSN_OFFSET(buffer->offset) + buffer->skipped_data,
log_write_flags))
{
- DBUG_PRINT("error", ("Can't write buffer (%lu,0x%lx) size %lu "
+ DBUG_PRINT("error", ("Can't write buffer " LSN_FMT " size %lu "
"to the disk (%d)",
- (ulong) file->handler.file,
- (ulong) LSN_OFFSET(buffer->offset),
+ (uint) file->handler.file,
+ (uint) LSN_OFFSET(buffer->offset),
(ulong) buffer->size, errno));
translog_stop_writing();
DBUG_RETURN(1);
@@ -2874,10 +2874,10 @@ static my_bool translog_page_validator(int res, PAGECACHE_IO_HOOK_ARGS *args)
if ((pgcache_page_no_t) uint3korr(page) != page_no ||
(uint32) uint3korr(page + 3) != data->number)
{
- DBUG_PRINT("error", ("Page (%lu,0x%lx): "
+ DBUG_PRINT("error", ("Page " LSN_FMT ": "
"page address written in the page is incorrect: "
"File %lu instead of %lu or page %lu instead of %lu",
- (ulong) data->number, (ulong) offset,
+ (uint)data->number, (uint)offset,
(ulong) uint3korr(page + 3), (ulong) data->number,
(ulong) uint3korr(page),
(ulong) page_no));
@@ -2888,9 +2888,9 @@ static my_bool translog_page_validator(int res, PAGECACHE_IO_HOOK_ARGS *args)
if (flags & ~(TRANSLOG_PAGE_CRC | TRANSLOG_SECTOR_PROTECTION |
TRANSLOG_RECORD_CRC))
{
- DBUG_PRINT("error", ("Page (%lu,0x%lx): "
+ DBUG_PRINT("error", ("Page " LSN_FMT ": "
"Garbage in the page flags field detected : %x",
- (ulong) data->number, (ulong) offset,
+ (uint) data->number, (uint) offset,
(uint) flags));
DBUG_RETURN(1);
}
@@ -2902,9 +2902,9 @@ static my_bool translog_page_validator(int res, PAGECACHE_IO_HOOK_ARGS *args)
this_page_page_overhead);
if (crc != uint4korr(page_pos))
{
- DBUG_PRINT("error", ("Page (%lu,0x%lx): "
+ DBUG_PRINT("error", ("Page " LSN_FMT ": "
"CRC mismatch: calculated: %lx on the page %lx",
- (ulong) data->number, (ulong) offset,
+ (uint) data->number, (uint) offset,
(ulong) crc, (ulong) uint4korr(page_pos)));
DBUG_RETURN(1);
}
@@ -2985,10 +2985,10 @@ static uchar *translog_get_page(TRANSLOG_VALIDATOR_DATA *data, uchar *buffer,
uint32 file_no= LSN_FILE_NO(addr);
TRANSLOG_FILE *file;
DBUG_ENTER("translog_get_page");
- DBUG_PRINT("enter", ("File: %lu Offset: %lu(0x%lx)",
- (ulong) file_no,
- (ulong) LSN_OFFSET(addr),
- (ulong) LSN_OFFSET(addr)));
+ DBUG_PRINT("enter", ("File: %u Offset: %u(0x%x)",
+ file_no,
+ (uint) LSN_OFFSET(addr),
+ (uint) LSN_OFFSET(addr)));
/* it is really page address */
DBUG_ASSERT(LSN_OFFSET(addr) % TRANSLOG_PAGE_SIZE == 0);
@@ -2998,7 +2998,7 @@ static uchar *translog_get_page(TRANSLOG_VALIDATOR_DATA *data, uchar *buffer,
restart:
in_buffers= translog_only_in_buffers();
- DBUG_PRINT("info", ("in_buffers: (%lu,0x%lx)",
+ DBUG_PRINT("info", ("in_buffers: " LSN_FMT,
LSN_IN_PARTS(in_buffers)));
if (in_buffers != LSN_IMPOSSIBLE &&
cmp_translog_addr(addr, in_buffers) >= 0)
@@ -3168,9 +3168,9 @@ restart:
PAGECACHE_LOCK_READ :
PAGECACHE_LOCK_LEFT_UNLOCKED),
direct_link);
- DBUG_PRINT("info", ("Direct link is assigned to : 0x%lx * 0x%lx",
- (ulong) direct_link,
- (ulong)(direct_link ? *direct_link : NULL)));
+ DBUG_PRINT("info", ("Direct link is assigned to : %p * %p",
+ direct_link,
+ (direct_link ? *direct_link : NULL)));
data->was_recovered= file->was_recovered;
DBUG_RETURN(buffer);
}
@@ -3186,8 +3186,8 @@ restart:
static void translog_free_link(PAGECACHE_BLOCK_LINK *direct_link)
{
DBUG_ENTER("translog_free_link");
- DBUG_PRINT("info", ("Direct link: 0x%lx",
- (ulong) direct_link));
+ DBUG_PRINT("info", ("Direct link: %p",
+ direct_link));
if (direct_link)
pagecache_unlock_by_link(log_descriptor.pagecache, direct_link,
PAGECACHE_LOCK_READ_UNLOCK, PAGECACHE_UNPIN,
@@ -3380,8 +3380,8 @@ static my_bool translog_truncate_log(TRANSLOG_ADDRESS addr)
uchar page_buff[TRANSLOG_PAGE_SIZE];
DBUG_ENTER("translog_truncate_log");
/* TODO: write warning to the client */
- DBUG_PRINT("warning", ("removing all records from (%lu,0x%lx) "
- "till (%lu,0x%lx)",
+ DBUG_PRINT("warning", ("removing all records from " LSN_FMT " "
+ "till " LSN_FMT,
LSN_IN_PARTS(addr),
LSN_IN_PARTS(log_descriptor.horizon)));
DBUG_ASSERT(cmp_translog_addr(addr, log_descriptor.horizon) < 0);
@@ -3680,8 +3680,8 @@ my_bool translog_init_with_table(const char *directory,
{
if (translog_buffer_init(log_descriptor.buffers + i, i))
goto err;
- DBUG_PRINT("info", ("translog_buffer buffer #%u: 0x%lx",
- i, (ulong) log_descriptor.buffers + i));
+ DBUG_PRINT("info", ("translog_buffer buffer #%u:%p",
+ i, log_descriptor.buffers + i));
}
/*
@@ -3929,9 +3929,9 @@ my_bool translog_init_with_table(const char *directory,
log_descriptor.horizon= LSN_REPLACE_OFFSET(log_descriptor.horizon,
(chunk_offset +
LSN_OFFSET(last_valid_page)));
- DBUG_PRINT("info", ("Move Page #%u: 0x%lx chaser: %d Size: %lu (%lu)",
+ DBUG_PRINT("info", ("Move Page #%u: %p chaser: %d Size: %lu (%lu)",
(uint) log_descriptor.bc.buffer_no,
- (ulong) log_descriptor.bc.buffer,
+ log_descriptor.bc.buffer,
log_descriptor.bc.chaser,
(ulong) log_descriptor.bc.buffer->size,
(ulong) (log_descriptor.bc.ptr - log_descriptor.bc.
@@ -4095,7 +4095,7 @@ my_bool translog_init_with_table(const char *directory,
There is no harm in leaving it "as-is".
*/
log_descriptor.previous_flush_horizon= log_descriptor.horizon;
- DBUG_PRINT("info", ("previous_flush_horizon: (%lu,0x%lx)",
+ DBUG_PRINT("info", ("previous_flush_horizon: " LSN_FMT,
LSN_IN_PARTS(log_descriptor.
previous_flush_horizon)));
DBUG_RETURN(0);
@@ -4123,7 +4123,7 @@ my_bool translog_init_with_table(const char *directory,
translog_size_t rec_len;
int len;
uchar buffer[1];
- DBUG_PRINT("info", ("going to check the last found record (%lu,0x%lx)",
+ DBUG_PRINT("info", ("going to check the last found record " LSN_FMT,
LSN_IN_PARTS(last_lsn)));
len=
@@ -4132,7 +4132,7 @@ my_bool translog_init_with_table(const char *directory,
len == RECHEADER_READ_EOF))
{
DBUG_PRINT("error", ("unexpected end of log or record during "
- "reading record header: (%lu,0x%lx) len: %d",
+ "reading record header: " LSN_FMT " len: %d",
LSN_IN_PARTS(last_lsn), len));
if (readonly)
log_descriptor.log_start= log_descriptor.horizon= last_lsn;
@@ -4156,7 +4156,7 @@ my_bool translog_init_with_table(const char *directory,
if (rec_len != 1)
{
DBUG_PRINT("error", ("unexpected end of log or record during "
- "reading record body: (%lu,0x%lx) len: %d",
+ "reading record body: " LSN_FMT " len: %d",
LSN_IN_PARTS(rec.lsn),
len));
if (readonly)
@@ -4174,7 +4174,7 @@ my_bool translog_init_with_table(const char *directory,
}
}
log_descriptor.previous_flush_horizon= log_descriptor.horizon;
- DBUG_PRINT("info", ("previous_flush_horizon: (%lu,0x%lx)",
+ DBUG_PRINT("info", ("previous_flush_horizon: " LSN_FMT,
LSN_IN_PARTS(log_descriptor.previous_flush_horizon)));
DBUG_RETURN(0);
err:
@@ -4193,8 +4193,8 @@ static void translog_buffer_destroy(struct st_translog_buffer *buffer)
{
DBUG_ENTER("translog_buffer_destroy");
DBUG_PRINT("enter",
- ("Buffer #%u: 0x%lx file: %d offset: (%lu,0x%lx) size: %lu",
- (uint) buffer->buffer_no, (ulong) buffer,
+ ("Buffer #%u: %p file: %d offset: " LSN_FMT " size: %lu",
+ (uint) buffer->buffer_no, buffer,
(buffer->file ? buffer->file->handler.file : -1),
LSN_IN_PARTS(buffer->offset),
(ulong) buffer->size));
@@ -4212,7 +4212,7 @@ static void translog_buffer_destroy(struct st_translog_buffer *buffer)
translog_buffer_flush(buffer);
translog_buffer_unlock(buffer);
}
- DBUG_PRINT("info", ("Destroy mutex: 0x%lx", (ulong) &buffer->mutex));
+ DBUG_PRINT("info", ("Destroy mutex: %p", &buffer->mutex));
mysql_mutex_destroy(&buffer->mutex);
mysql_cond_destroy(&buffer->waiting_filling_buffer);
DBUG_VOID_RETURN;
@@ -4322,15 +4322,15 @@ static my_bool translog_page_next(TRANSLOG_ADDRESS *horizon,
TRANSLOG_PAGE_SIZE)))
DBUG_RETURN(1);
*prev_buffer= buffer;
- DBUG_PRINT("info", ("Buffer #%u (0x%lu): have to be flushed",
- (uint) buffer->buffer_no, (ulong) buffer));
+ DBUG_PRINT("info", ("Buffer #%u (%p): have to be flushed",
+ (uint) buffer->buffer_no, buffer));
}
else
{
- DBUG_PRINT("info", ("Use the same buffer #%u (0x%lu): "
+ DBUG_PRINT("info", ("Use the same buffer #%u (%p): "
"Buffer Size: %lu (%lu)",
(uint) buffer->buffer_no,
- (ulong) buffer,
+ buffer,
(ulong) cursor->buffer->size,
(ulong) (cursor->ptr - cursor->buffer->buffer)));
translog_finish_page(horizon, cursor);
@@ -4374,9 +4374,9 @@ static my_bool translog_write_data_on_page(TRANSLOG_ADDRESS *horizon,
cursor->current_page_fill+= length;
if (!cursor->chaser)
cursor->buffer->size+= length;
- DBUG_PRINT("info", ("Write data buffer #%u: 0x%lx "
+ DBUG_PRINT("info", ("Write data buffer #%u: %p "
"chaser: %d Size: %lu (%lu)",
- (uint) cursor->buffer->buffer_no, (ulong) cursor->buffer,
+ (uint) cursor->buffer->buffer_no, cursor->buffer,
cursor->chaser, (ulong) cursor->buffer->size,
(ulong) (cursor->ptr - cursor->buffer->buffer)));
translog_check_cursor(cursor);
@@ -4429,9 +4429,9 @@ static my_bool translog_write_parts_on_page(TRANSLOG_ADDRESS *horizon,
DBUG_ASSERT(cur < parts->elements);
part= parts->parts + cur;
buff= part->str;
- DBUG_PRINT("info", ("Part: %u Length: %lu left: %lu buff: 0x%lx",
+ DBUG_PRINT("info", ("Part: %u Length: %lu left: %lu buff: %p",
(uint) (cur + 1), (ulong) part->length, (ulong) left,
- (ulong) buff));
+ buff));
if (part->length > left)
{
@@ -4448,8 +4448,8 @@ static my_bool translog_write_parts_on_page(TRANSLOG_ADDRESS *horizon,
cur++;
DBUG_PRINT("info", ("moved to next part (len: %lu)", (ulong) len));
}
- DBUG_PRINT("info", ("copy: 0x%lx <- 0x%lx %u",
- (ulong) cursor->ptr, (ulong)buff, (uint)len));
+ DBUG_PRINT("info", ("copy: %p <- %p %u",
+ cursor->ptr, buff, len));
if (likely(len))
{
memcpy(cursor->ptr, buff, len);
@@ -4458,9 +4458,9 @@ static my_bool translog_write_parts_on_page(TRANSLOG_ADDRESS *horizon,
}
} while (left);
- DBUG_PRINT("info", ("Horizon: (%lu,0x%lx) Length %lu(0x%lx)",
+ DBUG_PRINT("info", ("Horizon: " LSN_FMT " Length %u(0x%x)",
LSN_IN_PARTS(*horizon),
- (ulong) length, (ulong) length));
+ length, length));
parts->current= cur;
(*horizon)+= length; /* offset increasing */
cursor->current_page_fill+= length;
@@ -4470,14 +4470,14 @@ static my_bool translog_write_parts_on_page(TRANSLOG_ADDRESS *horizon,
We do not not updating parts->total_record_length here because it is
need only before writing record to have total length
*/
- DBUG_PRINT("info", ("Write parts buffer #%u: 0x%lx "
+ DBUG_PRINT("info", ("Write parts buffer #%u: %p "
"chaser: %d Size: %lu (%lu) "
- "Horizon: (%lu,0x%lx) buff offset: 0x%lx",
- (uint) cursor->buffer->buffer_no, (ulong) cursor->buffer,
+ "Horizon: " LSN_FMT " buff offset: 0x%x",
+ (uint) cursor->buffer->buffer_no, cursor->buffer,
cursor->chaser, (ulong) cursor->buffer->size,
(ulong) (cursor->ptr - cursor->buffer->buffer),
LSN_IN_PARTS(*horizon),
- (ulong) (LSN_OFFSET(cursor->buffer->offset) +
+ (uint) (LSN_OFFSET(cursor->buffer->offset) +
cursor->buffer->size)));
translog_check_cursor(cursor);
@@ -4535,8 +4535,8 @@ translog_buffer_increase_writers(struct st_translog_buffer *buffer)
DBUG_ENTER("translog_buffer_increase_writers");
translog_buffer_lock_assert_owner(buffer);
buffer->copy_to_buffer_in_progress++;
- DBUG_PRINT("info", ("copy_to_buffer_in_progress. Buffer #%u 0x%lx progress: %d",
- (uint) buffer->buffer_no, (ulong) buffer,
+ DBUG_PRINT("info", ("copy_to_buffer_in_progress. Buffer #%u %p progress: %d",
+ (uint) buffer->buffer_no, buffer,
buffer->copy_to_buffer_in_progress));
DBUG_VOID_RETURN;
}
@@ -4556,8 +4556,8 @@ static void translog_buffer_decrease_writers(struct st_translog_buffer *buffer)
translog_buffer_lock_assert_owner(buffer);
buffer->copy_to_buffer_in_progress--;
DBUG_PRINT("info",
- ("copy_to_buffer_in_progress. Buffer #%u 0x%lx progress: %d",
- (uint) buffer->buffer_no, (ulong) buffer,
+ ("copy_to_buffer_in_progress. Buffer #%u %p progress: %d",
+ (uint) buffer->buffer_no, buffer,
buffer->copy_to_buffer_in_progress));
if (buffer->copy_to_buffer_in_progress == 0)
mysql_cond_broadcast(&buffer->waiting_filling_buffer);
@@ -4701,7 +4701,7 @@ static my_bool translog_advance_pointer(int pages, uint16 last_page_data)
pages * TRANSLOG_PAGE_SIZE + last_page_offset);
translog_size_t buffer_end_offset, file_end_offset, min_offset;
DBUG_ENTER("translog_advance_pointer");
- DBUG_PRINT("enter", ("Pointer: (%lu, 0x%lx) + %u + %u pages + %u + %u",
+ DBUG_PRINT("enter", ("Pointer: " LSN_FMT " + %u + %u pages + %u + %u",
LSN_IN_PARTS(log_descriptor.horizon),
(uint) (TRANSLOG_PAGE_SIZE -
log_descriptor.bc.current_page_fill),
@@ -4751,20 +4751,20 @@ static my_bool translog_advance_pointer(int pages, uint16 last_page_data)
file_end_offset= (TRANSLOG_PAGE_SIZE -
log_descriptor.bc.current_page_fill);
}
- DBUG_PRINT("info", ("offset: %lu buffer_end_offs: %lu, "
- "file_end_offs: %lu",
- (ulong) offset, (ulong) buffer_end_offset,
- (ulong) file_end_offset));
- DBUG_PRINT("info", ("Buff #%u %u (0x%lx) offset 0x%lx + size 0x%lx = "
- "0x%lx (0x%lx)",
- (uint) log_descriptor.bc.buffer->buffer_no,
- (uint) log_descriptor.bc.buffer_no,
- (ulong) log_descriptor.bc.buffer,
- (ulong) LSN_OFFSET(log_descriptor.bc.buffer->offset),
- (ulong) log_descriptor.bc.buffer->size,
- (ulong) (LSN_OFFSET(log_descriptor.bc.buffer->offset) +
+ DBUG_PRINT("info", ("offset: %u buffer_end_offs: %u, "
+ "file_end_offs: %u",
+ offset, buffer_end_offset,
+ file_end_offset));
+ DBUG_PRINT("info", ("Buff #%u %u (%p) offset 0x%x + size 0x%x = "
+ "0x%x (0x%x)",
+ log_descriptor.bc.buffer->buffer_no,
+ log_descriptor.bc.buffer_no,
+ log_descriptor.bc.buffer,
+ (uint) LSN_OFFSET(log_descriptor.bc.buffer->offset),
+ log_descriptor.bc.buffer->size,
+ (uint) (LSN_OFFSET(log_descriptor.bc.buffer->offset) +
log_descriptor.bc.buffer->size),
- (ulong) LSN_OFFSET(log_descriptor.horizon)));
+ (uint) LSN_OFFSET(log_descriptor.horizon)));
DBUG_ASSERT(LSN_OFFSET(log_descriptor.bc.buffer->offset) +
log_descriptor.bc.buffer->size ==
LSN_OFFSET(log_descriptor.horizon));
@@ -4795,9 +4795,9 @@ static my_bool translog_advance_pointer(int pages, uint16 last_page_data)
/* TODO: check is it ptr or size enough */
log_descriptor.bc.buffer->size+= min_offset;
log_descriptor.bc.ptr+= min_offset;
- DBUG_PRINT("info", ("NewP buffer #%u: 0x%lx chaser: %d Size: %lu (%lu)",
+ DBUG_PRINT("info", ("NewP buffer #%u: %p chaser: %d Size: %lu (%lu)",
(uint) log_descriptor.bc.buffer->buffer_no,
- (ulong) log_descriptor.bc.buffer,
+ log_descriptor.bc.buffer,
log_descriptor.bc.chaser,
(ulong) log_descriptor.bc.buffer->size,
(ulong) (log_descriptor.bc.ptr -log_descriptor.bc.
@@ -4841,10 +4841,10 @@ end:
translog_buffer_increase_writers(log_descriptor.bc.buffer);
log_descriptor.horizon+= offset; /* offset increasing */
log_descriptor.bc.current_page_fill= last_page_offset;
- DBUG_PRINT("info", ("NewP buffer #%u: 0x%lx chaser: %d Size: %lu (%lu) "
+ DBUG_PRINT("info", ("NewP buffer #%u: %p chaser: %d Size: %lu (%lu) "
"offset: %u last page: %u",
(uint) log_descriptor.bc.buffer->buffer_no,
- (ulong) log_descriptor.bc.buffer,
+ log_descriptor.bc.buffer,
log_descriptor.bc.chaser,
(ulong) log_descriptor.bc.buffer->size,
(ulong) (log_descriptor.bc.ptr -
@@ -4852,7 +4852,7 @@ end:
buffer), (uint) offset,
(uint) last_page_offset));
DBUG_PRINT("info",
- ("pointer moved to: (%lu, 0x%lx)",
+ ("pointer moved to: " LSN_FMT,
LSN_IN_PARTS(log_descriptor.horizon)));
translog_check_cursor(&log_descriptor.bc);
log_descriptor.bc.protected= 0;
@@ -4892,7 +4892,7 @@ static uint translog_get_current_page_rest()
static uint translog_get_current_buffer_rest()
{
- return ((log_descriptor.bc.buffer->buffer + TRANSLOG_WRITE_BUFFER -
+ return (uint)((log_descriptor.bc.buffer->buffer + TRANSLOG_WRITE_BUFFER -
log_descriptor.bc.ptr) /
TRANSLOG_PAGE_SIZE);
}
@@ -4939,7 +4939,7 @@ static inline void set_lsn(LSN *lsn, LSN value)
*lsn= value;
/* we generate LSN so something is not flushed in log */
log_descriptor.is_everything_flushed= 0;
- DBUG_PRINT("info", ("new LSN appeared: (%lu,0x%lx)", LSN_IN_PARTS(value)));
+ DBUG_PRINT("info", ("new LSN appeared: " LSN_FMT, LSN_IN_PARTS(value)));
DBUG_VOID_RETURN;
}
@@ -5030,9 +5030,9 @@ translog_write_variable_record_1group(LSN *lsn,
rc|= translog_advance_pointer((int)(full_pages + additional_chunk3_page),
(record_rest ? record_rest + 3 : 0));
log_descriptor.bc.buffer->last_lsn= *lsn;
- DBUG_PRINT("info", ("last_lsn set to (%lu,0x%lx) buffer: 0x%lx",
+ DBUG_PRINT("info", ("last_lsn set to " LSN_FMT " buffer: %p",
LSN_IN_PARTS(log_descriptor.bc.buffer->last_lsn),
- (ulong) log_descriptor.bc.buffer));
+ log_descriptor.bc.buffer));
translog_unlock();
@@ -5055,7 +5055,7 @@ translog_write_variable_record_1group(LSN *lsn,
/* fill the pages */
translog_write_parts_on_page(&horizon, &cursor, first_page, parts);
- DBUG_PRINT("info", ("absolute horizon: (%lu,0x%lx) local: (%lu,0x%lx)",
+ DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT,
LSN_IN_PARTS(log_descriptor.horizon),
LSN_IN_PARTS(horizon)));
@@ -5064,7 +5064,7 @@ translog_write_variable_record_1group(LSN *lsn,
if (translog_write_variable_record_chunk2_page(parts, &horizon, &cursor))
DBUG_RETURN(1);
- DBUG_PRINT("info", ("absolute horizon: (%lu,0x%lx) local: (%lu,0x%lx)",
+ DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT,
LSN_IN_PARTS(log_descriptor.horizon),
LSN_IN_PARTS(horizon)));
}
@@ -5076,7 +5076,7 @@ translog_write_variable_record_1group(LSN *lsn,
page_capacity_chunk_2 - 2,
&horizon, &cursor))
DBUG_RETURN(1);
- DBUG_PRINT("info", ("absolute horizon: (%lu,0x%lx) local: (%lu,0x%lx)",
+ DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT,
LSN_IN_PARTS(log_descriptor.horizon),
LSN_IN_PARTS(horizon)));
DBUG_ASSERT(cursor.current_page_fill == TRANSLOG_PAGE_SIZE);
@@ -5086,11 +5086,11 @@ translog_write_variable_record_1group(LSN *lsn,
record_rest,
&horizon, &cursor))
DBUG_RETURN(1);
- DBUG_PRINT("info", ("absolute horizon: (%lu,0x%lx) local: (%lu,0x%lx)",
- (ulong) LSN_FILE_NO(log_descriptor.horizon),
- (ulong) LSN_OFFSET(log_descriptor.horizon),
- (ulong) LSN_FILE_NO(horizon),
- (ulong) LSN_OFFSET(horizon)));
+ DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT,
+ (uint) LSN_FILE_NO(log_descriptor.horizon),
+ (uint) LSN_OFFSET(log_descriptor.horizon),
+ (uint) LSN_FILE_NO(horizon),
+ (uint) LSN_OFFSET(horizon)));
translog_buffer_lock(cursor.buffer);
translog_buffer_decrease_writers(cursor.buffer);
@@ -5156,9 +5156,9 @@ translog_write_variable_record_1chunk(LSN *lsn,
&log_descriptor.bc,
parts->total_record_length, parts);
log_descriptor.bc.buffer->last_lsn= *lsn;
- DBUG_PRINT("info", ("last_lsn set to (%lu,0x%lx) buffer: 0x%lx",
+ DBUG_PRINT("info", ("last_lsn set to " LSN_FMT " buffer: %p",
LSN_IN_PARTS(log_descriptor.bc.buffer->last_lsn),
- (ulong) log_descriptor.bc.buffer));
+ log_descriptor.bc.buffer));
translog_unlock();
/*
@@ -5202,9 +5202,9 @@ static uchar *translog_put_LSN_diff(LSN base_lsn, LSN lsn, uchar *dst)
{
uint64 diff;
DBUG_ENTER("translog_put_LSN_diff");
- DBUG_PRINT("enter", ("Base: (%lu,0x%lx) val: (%lu,0x%lx) dst: 0x%lx",
+ DBUG_PRINT("enter", ("Base: " LSN_FMT " val: " LSN_FMT " dst:%p",
LSN_IN_PARTS(base_lsn), LSN_IN_PARTS(lsn),
- (ulong) dst));
+ dst));
DBUG_ASSERT(base_lsn > lsn);
diff= base_lsn - lsn;
DBUG_PRINT("info", ("Diff: 0x%llx", (ulonglong) diff));
@@ -5248,7 +5248,7 @@ static uchar *translog_put_LSN_diff(LSN base_lsn, LSN lsn, uchar *dst)
dst[1]= 1;
lsn_store(dst + 2, lsn);
}
- DBUG_PRINT("info", ("new dst: 0x%lx", (ulong) dst));
+ DBUG_PRINT("info", ("new dst:%p", dst));
DBUG_RETURN(dst);
}
@@ -5287,8 +5287,8 @@ static uchar *translog_get_LSN_from_diff(LSN base_lsn, uchar *src, uchar *dst)
uint32 file_no, rec_offset;
uint8 code;
DBUG_ENTER("translog_get_LSN_from_diff");
- DBUG_PRINT("enter", ("Base: (%lu,0x%lx) src: 0x%lx dst 0x%lx",
- LSN_IN_PARTS(base_lsn), (ulong) src, (ulong) dst));
+ DBUG_PRINT("enter", ("Base: " LSN_FMT " src:%p dst %p",
+ LSN_IN_PARTS(base_lsn), src, dst));
first_byte= *((uint8*) src);
code= first_byte >> 6; /* Length is in 2 most significant bits */
first_byte&= 0x3F;
@@ -5305,8 +5305,8 @@ static uchar *translog_get_LSN_from_diff(LSN base_lsn, uchar *src, uchar *dst)
in real life)
*/
memcpy(dst, src + 1, LSN_STORE_SIZE);
- DBUG_PRINT("info", ("Special case of full LSN, new src: 0x%lx",
- (ulong) (src + 1 + LSN_STORE_SIZE)));
+ DBUG_PRINT("info", ("Special case of full LSN, new src:%p",
+ src + 1 + LSN_STORE_SIZE));
DBUG_RETURN(src + 1 + LSN_STORE_SIZE);
}
rec_offset= LSN_OFFSET(base_lsn) - ((first_byte << 8) + *((uint8*)src));
@@ -5341,7 +5341,7 @@ static uchar *translog_get_LSN_from_diff(LSN base_lsn, uchar *src, uchar *dst)
lsn= MAKE_LSN(file_no, rec_offset);
src+= code + 1;
lsn_store(dst, lsn);
- DBUG_PRINT("info", ("new src: 0x%lx", (ulong) src));
+ DBUG_PRINT("info", ("new src:%p", src));
DBUG_RETURN(src);
}
@@ -5375,7 +5375,7 @@ static void translog_relative_LSN_encode(struct st_translog_parts *parts,
{
uint copied= part->length;
LEX_CUSTRING *next_part;
- DBUG_PRINT("info", ("Using buffer: 0x%lx", (ulong) compressed_LSNs));
+ DBUG_PRINT("info", ("Using buffer:%p", compressed_LSNs));
memcpy(buffer, part->str, part->length);
next_part= parts->parts + parts->current + 1;
do
@@ -5592,7 +5592,7 @@ translog_write_variable_record_mgroup(LSN *lsn,
translog_write_data_on_page(&horizon, &cursor, 1, chunk2_header);
translog_write_parts_on_page(&horizon, &cursor, first_page - 1, parts);
- DBUG_PRINT("info", ("absolute horizon: (%lu,0x%lx) local: (%lu,0x%lx) "
+ DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT " "
"Left %lu",
LSN_IN_PARTS(log_descriptor.horizon),
LSN_IN_PARTS(horizon),
@@ -5604,8 +5604,8 @@ translog_write_variable_record_mgroup(LSN *lsn,
if (translog_write_variable_record_chunk2_page(parts, &horizon, &cursor))
goto err;
- DBUG_PRINT("info", ("absolute horizon: (%lu,0x%lx) "
- "local: (%lu,0x%lx) "
+ DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " "
+ "local: " LSN_FMT " "
"Left: %lu",
LSN_IN_PARTS(log_descriptor.horizon),
LSN_IN_PARTS(horizon),
@@ -5763,7 +5763,7 @@ translog_write_variable_record_mgroup(LSN *lsn,
DBUG_PRINT("info", ("chunk 2 to finish first page"));
translog_write_data_on_page(&horizon, &cursor, 1, chunk2_header);
translog_write_parts_on_page(&horizon, &cursor, first_page - 1, parts);
- DBUG_PRINT("info", ("absolute horizon: (%lu,0x%lx) local: (%lu,0x%lx) "
+ DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT " "
"Left: %lu",
LSN_IN_PARTS(log_descriptor.horizon),
LSN_IN_PARTS(horizon),
@@ -5780,7 +5780,7 @@ translog_write_variable_record_mgroup(LSN *lsn,
int2store(chunk3_header + 1, chunk3_size);
translog_write_data_on_page(&horizon, &cursor, 3, chunk3_header);
translog_write_parts_on_page(&horizon, &cursor, chunk3_size, parts);
- DBUG_PRINT("info", ("absolute horizon: (%lu,0x%lx) local: (%lu,0x%lx) "
+ DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT " "
"Left: %lu",
LSN_IN_PARTS(log_descriptor.horizon),
LSN_IN_PARTS(horizon),
@@ -5798,7 +5798,7 @@ translog_write_variable_record_mgroup(LSN *lsn,
if (translog_write_variable_record_chunk2_page(parts, &horizon, &cursor))
goto err;
- DBUG_PRINT("info", ("absolute horizon: (%lu,0x%lx) local: (%lu,0x%lx) "
+ DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT " "
"Left: %lu",
LSN_IN_PARTS(log_descriptor.horizon),
LSN_IN_PARTS(horizon),
@@ -5812,7 +5812,7 @@ translog_write_variable_record_mgroup(LSN *lsn,
chunk3_size,
&horizon, &cursor))
goto err;
- DBUG_PRINT("info", ("absolute horizon: (%lu,0x%lx) local: (%lu,0x%lx)",
+ DBUG_PRINT("info", ("absolute horizon: " LSN_FMT " local: " LSN_FMT,
LSN_IN_PARTS(log_descriptor.horizon),
LSN_IN_PARTS(horizon)));
@@ -5847,9 +5847,9 @@ translog_write_variable_record_mgroup(LSN *lsn,
translog_lock();
set_lsn(lsn, horizon);
buffer_of_last_lsn->last_lsn= *lsn;
- DBUG_PRINT("info", ("last_lsn set to (%lu,0x%lx) buffer: 0x%lx",
+ DBUG_PRINT("info", ("last_lsn set to " LSN_FMT " buffer: %p",
LSN_IN_PARTS(buffer_of_last_lsn->last_lsn),
- (ulong) buffer_of_last_lsn));
+ buffer_of_last_lsn));
if (log_record_type_descriptor[type].inwrite_hook &&
(*log_record_type_descriptor[type].inwrite_hook) (type, trn,
tbl_info,
@@ -5977,7 +5977,7 @@ static my_bool translog_write_variable_record(LSN *lsn,
DBUG_ENTER("translog_write_variable_record");
translog_lock();
- DBUG_PRINT("info", ("horizon: (%lu,0x%lx)",
+ DBUG_PRINT("info", ("horizon: " LSN_FMT,
LSN_IN_PARTS(log_descriptor.horizon)));
page_rest= TRANSLOG_PAGE_SIZE - log_descriptor.bc.current_page_fill;
DBUG_PRINT("info", ("header length: %u page_rest: %u",
@@ -6093,7 +6093,7 @@ static my_bool translog_write_fixed_record(LSN *lsn,
log_record_type_descriptor[type].fixed_length));
translog_lock();
- DBUG_PRINT("info", ("horizon: (%lu,0x%lx)",
+ DBUG_PRINT("info", ("horizon: " LSN_FMT,
LSN_IN_PARTS(log_descriptor.horizon)));
DBUG_ASSERT(log_descriptor.bc.current_page_fill <= TRANSLOG_PAGE_SIZE);
@@ -6157,9 +6157,9 @@ static my_bool translog_write_fixed_record(LSN *lsn,
parts->total_record_length, parts);
log_descriptor.bc.buffer->last_lsn= *lsn;
- DBUG_PRINT("info", ("last_lsn set to (%lu,0x%lx) buffer: 0x%lx",
+ DBUG_PRINT("info", ("last_lsn set to " LSN_FMT " buffer: %p",
LSN_IN_PARTS(log_descriptor.bc.buffer->last_lsn),
- (ulong) log_descriptor.bc.buffer));
+ log_descriptor.bc.buffer));
err:
translog_unlock();
@@ -6349,7 +6349,7 @@ my_bool translog_write_record(LSN *lsn,
}
}
- DBUG_PRINT("info", ("LSN: (%lu,0x%lx)", LSN_IN_PARTS(*lsn)));
+ DBUG_PRINT("info", ("LSN: " LSN_FMT, LSN_IN_PARTS(*lsn)));
DBUG_RETURN(rc);
}
@@ -6564,8 +6564,8 @@ my_bool translog_scanner_init(LSN lsn,
my_bool use_direct)
{
DBUG_ENTER("translog_scanner_init");
- DBUG_PRINT("enter", ("Scanner: 0x%lx LSN: (%lu,0x%lx)",
- (ulong) scanner, LSN_IN_PARTS(lsn)));
+ DBUG_PRINT("enter", ("Scanner: %p LSN: " LSN_FMT,
+ scanner, LSN_IN_PARTS(lsn)));
DBUG_ASSERT(translog_status == TRANSLOG_OK ||
translog_status == TRANSLOG_READONLY);
@@ -6576,7 +6576,7 @@ my_bool translog_scanner_init(LSN lsn,
scanner->direct_link= NULL;
scanner->horizon= translog_get_horizon();
- DBUG_PRINT("info", ("horizon: (%lu,0x%lx)", LSN_IN_PARTS(scanner->horizon)));
+ DBUG_PRINT("info", ("horizon: " LSN_FMT, LSN_IN_PARTS(scanner->horizon)));
/* lsn < horizon */
DBUG_ASSERT(lsn <= scanner->horizon);
@@ -6602,7 +6602,7 @@ my_bool translog_scanner_init(LSN lsn,
void translog_destroy_scanner(TRANSLOG_SCANNER_DATA *scanner)
{
DBUG_ENTER("translog_destroy_scanner");
- DBUG_PRINT("enter", ("Scanner: 0x%lx", (ulong)scanner));
+ DBUG_PRINT("enter", ("Scanner: %p", scanner));
translog_free_link(scanner->direct_link);
DBUG_VOID_RETURN;
}
@@ -6624,11 +6624,11 @@ static my_bool translog_scanner_eol(TRANSLOG_SCANNER_DATA *scanner)
{
DBUG_ENTER("translog_scanner_eol");
DBUG_PRINT("enter",
- ("Horizon: (%lu, 0x%lx) Current: (%lu, 0x%lx+0x%x=0x%lx)",
+ ("Horizon: " LSN_FMT " Current: (%u, 0x%x+0x%x=0x%x)",
LSN_IN_PARTS(scanner->horizon),
LSN_IN_PARTS(scanner->page_addr),
(uint) scanner->page_offset,
- (ulong) (LSN_OFFSET(scanner->page_addr) + scanner->page_offset)));
+ (uint) (LSN_OFFSET(scanner->page_addr) + scanner->page_offset)));
if (scanner->horizon > (scanner->page_addr +
scanner->page_offset))
{
@@ -6733,7 +6733,7 @@ translog_get_next_chunk(TRANSLOG_SCANNER_DATA *scanner)
translog_free_link(scanner->direct_link);
if (translog_scanner_eof(scanner))
{
- DBUG_PRINT("info", ("horizon: (%lu,0x%lx) pageaddr: (%lu,0x%lx)",
+ DBUG_PRINT("info", ("horizon: " LSN_FMT " pageaddr: " LSN_FMT,
LSN_IN_PARTS(scanner->horizon),
LSN_IN_PARTS(scanner->page_addr)));
/* if it is log end it have to be caught before */
@@ -6834,7 +6834,7 @@ translog_variable_length_header(uchar *page, translog_size_t page_offset,
src+= (2 + 2);
page_rest= (uint16) (TRANSLOG_PAGE_SIZE - (src - page));
curr= 0;
- header_to_skip= src - (page + page_offset);
+ header_to_skip= (uint) (src - (page + page_offset));
buff->chunk0_pages= 0;
for (;;)
@@ -6852,7 +6852,7 @@ translog_variable_length_header(uchar *page, translog_size_t page_offset,
DBUG_ASSERT(curr < buff->groups_no);
buff->groups[curr].addr= lsn_korr(src + i * (7 + 1));
buff->groups[curr].num= src[i * (7 + 1) + 7];
- DBUG_PRINT("info", ("group #%u (%lu,0x%lx) chunks: %u",
+ DBUG_PRINT("info", ("group #%u " LSN_FMT " chunks: %u",
curr,
LSN_IN_PARTS(buff->groups[curr].addr),
(uint) buff->groups[curr].num));
@@ -6874,7 +6874,7 @@ translog_variable_length_header(uchar *page, translog_size_t page_offset,
buff->chunk0_data_addr+= (header_to_skip + read_length * (7 + 1));
}
buff->chunk0_data_len= chunk_len - 2 - read_length * (7 + 1);
- DBUG_PRINT("info", ("Data address: (%lu,0x%lx) len: %u",
+ DBUG_PRINT("info", ("Data address: " LSN_FMT " len: %u",
LSN_IN_PARTS(buff->chunk0_data_addr),
buff->chunk0_data_len));
break;
@@ -6997,7 +6997,7 @@ int translog_read_record_header_from_buffer(uchar *page,
translog_status == TRANSLOG_READONLY);
buff->type= (page[page_offset] & TRANSLOG_REC_TYPE);
buff->short_trid= uint2korr(page + page_offset + 1);
- DBUG_PRINT("info", ("Type %u, Short TrID %u, LSN (%lu,0x%lx)",
+ DBUG_PRINT("info", ("Type %u, Short TrID %u, LSN " LSN_FMT,
(uint) buff->type, (uint)buff->short_trid,
LSN_IN_PARTS(buff->lsn)));
/* Read required bytes from the header and call hook */
@@ -7046,7 +7046,7 @@ int translog_read_record_header(LSN lsn, TRANSLOG_HEADER_BUFFER *buff)
TRANSLOG_ADDRESS addr;
TRANSLOG_VALIDATOR_DATA data;
DBUG_ENTER("translog_read_record_header");
- DBUG_PRINT("enter", ("LSN: (%lu,0x%lx)", LSN_IN_PARTS(lsn)));
+ DBUG_PRINT("enter", ("LSN: " LSN_FMT, LSN_IN_PARTS(lsn)));
DBUG_ASSERT(LSN_OFFSET(lsn) % TRANSLOG_PAGE_SIZE != 0);
DBUG_ASSERT(translog_status == TRANSLOG_OK ||
translog_status == TRANSLOG_READONLY);
@@ -7091,8 +7091,8 @@ int translog_read_record_header_scan(TRANSLOG_SCANNER_DATA *scanner,
{
translog_size_t res;
DBUG_ENTER("translog_read_record_header_scan");
- DBUG_PRINT("enter", ("Scanner: Cur: (%lu,0x%lx) Hrz: (%lu,0x%lx) "
- "Lst: (%lu,0x%lx) Offset: %u(%x) fixed %d",
+ DBUG_PRINT("enter", ("Scanner: Cur: " LSN_FMT " Hrz: " LSN_FMT " "
+ "Lst: " LSN_FMT " Offset: %u(%x) fixed %d",
LSN_IN_PARTS(scanner->page_addr),
LSN_IN_PARTS(scanner->horizon),
LSN_IN_PARTS(scanner->last_file_page),
@@ -7137,9 +7137,9 @@ int translog_read_next_record_header(TRANSLOG_SCANNER_DATA *scanner,
DBUG_ENTER("translog_read_next_record_header");
buff->groups_no= 0; /* to be sure that we will free it right */
- DBUG_PRINT("enter", ("scanner: 0x%lx", (ulong) scanner));
- DBUG_PRINT("info", ("Scanner: Cur: (%lu,0x%lx) Hrz: (%lu,0x%lx) "
- "Lst: (%lu,0x%lx) Offset: %u(%x) fixed: %d",
+ DBUG_PRINT("enter", ("scanner: %p", scanner));
+ DBUG_PRINT("info", ("Scanner: Cur: " LSN_FMT " Hrz: " LSN_FMT " "
+ "Lst: " LSN_FMT " Offset: %u(%x) fixed: %d",
LSN_IN_PARTS(scanner->page_addr),
LSN_IN_PARTS(scanner->horizon),
LSN_IN_PARTS(scanner->last_file_page),
@@ -7159,7 +7159,7 @@ int translog_read_next_record_header(TRANSLOG_SCANNER_DATA *scanner,
buff->lsn= LSN_IMPOSSIBLE;
DBUG_RETURN(RECHEADER_READ_EOF);
}
- DBUG_PRINT("info", ("Page: (%lu,0x%lx) offset: %lu byte: %x",
+ DBUG_PRINT("info", ("Page: " LSN_FMT " offset: %lu byte: %x",
LSN_IN_PARTS(scanner->page_addr),
(ulong) scanner->page_offset,
(uint) scanner->page[scanner->page_offset]));
@@ -7368,8 +7368,8 @@ translog_size_t translog_read_record(LSN lsn,
DBUG_RETURN(0);
}
DBUG_PRINT("info", ("Offset: %lu length: %lu "
- "Scanner: Cur: (%lu,0x%lx) Hrz: (%lu,0x%lx) "
- "Lst: (%lu,0x%lx) Offset: %u(%x) fixed: %d",
+ "Scanner: Cur: " LSN_FMT " Hrz: " LSN_FMT " "
+ "Lst: " LSN_FMT " Offset: %u(%x) fixed: %d",
(ulong) offset, (ulong) length,
LSN_IN_PARTS(data->scanner.page_addr),
LSN_IN_PARTS(data->scanner.horizon),
@@ -7458,15 +7458,15 @@ static void translog_force_current_buffer_to_finish()
uint16 UNINIT_VAR(current_page_fill), write_counter, previous_offset;
DBUG_ENTER("translog_force_current_buffer_to_finish");
- DBUG_PRINT("enter", ("Buffer #%u 0x%lx "
- "Buffer addr: (%lu,0x%lx) "
- "Page addr: (%lu,0x%lx) "
+ DBUG_PRINT("enter", ("Buffer #%u %p "
+ "Buffer addr: " LSN_FMT " "
+ "Page addr: " LSN_FMT " "
"size: %lu (%lu) Pg: %u left: %u in progress %u",
(uint) old_buffer_no,
- (ulong) old_buffer,
+ old_buffer,
LSN_IN_PARTS(old_buffer->offset),
- (ulong) LSN_FILE_NO(log_descriptor.horizon),
- (ulong) (LSN_OFFSET(log_descriptor.horizon) -
+ LSN_FILE_NO(log_descriptor.horizon),
+ (uint)(LSN_OFFSET(log_descriptor.horizon) -
log_descriptor.bc.current_page_fill),
(ulong) old_buffer->size,
(ulong) (log_descriptor.bc.ptr -log_descriptor.bc.
@@ -7500,10 +7500,10 @@ static void translog_force_current_buffer_to_finish()
memset(log_descriptor.bc.ptr, TRANSLOG_FILLER, left);
old_buffer->size+= left;
- DBUG_PRINT("info", ("Finish Page buffer #%u: 0x%lx "
+ DBUG_PRINT("info", ("Finish Page buffer #%u: %p "
"Size: %lu",
(uint) old_buffer->buffer_no,
- (ulong) old_buffer,
+ old_buffer,
(ulong) old_buffer->size));
DBUG_ASSERT(old_buffer->buffer_no ==
log_descriptor.bc.buffer_no);
@@ -7537,9 +7537,9 @@ static void translog_force_current_buffer_to_finish()
log_descriptor.bc.write_counter= write_counter;
log_descriptor.bc.previous_offset= previous_offset;
new_buffer->prev_last_lsn= BUFFER_MAX_LSN(old_buffer);
- DBUG_PRINT("info", ("prev_last_lsn set to (%lu,0x%lx) buffer: 0x%lx",
+ DBUG_PRINT("info", ("prev_last_lsn set to " LSN_FMT " buffer: %p",
LSN_IN_PARTS(new_buffer->prev_last_lsn),
- (ulong) new_buffer));
+ new_buffer));
/*
Advances this log pointer, increases writers and let other threads to
@@ -7575,8 +7575,8 @@ static void translog_force_current_buffer_to_finish()
*/
DBUG_ASSERT(!old_buffer->is_closing_buffer);
old_buffer->is_closing_buffer= 1; /* Other flushes will wait */
- DBUG_PRINT("enter", ("Buffer #%u 0x%lx is_closing_buffer set",
- (uint) old_buffer->buffer_no, (ulong) old_buffer));
+ DBUG_PRINT("enter", ("Buffer #%u %p is_closing_buffer set",
+ (uint) old_buffer->buffer_no, old_buffer));
translog_wait_for_writers(old_buffer);
#ifndef DBUG_OFF
/* We blocked flushing this buffer so the buffer should not changed */
@@ -7606,12 +7606,12 @@ static void translog_force_current_buffer_to_finish()
uint32 crc= translog_crc(data + log_descriptor.page_overhead,
TRANSLOG_PAGE_SIZE -
log_descriptor.page_overhead);
- DBUG_PRINT("info", ("CRC: 0x%lx", (ulong) crc));
+ DBUG_PRINT("info", ("CRC: 0x%x", crc));
int4store(data + 3 + 3 + 1, crc);
}
old_buffer->is_closing_buffer= 0;
- DBUG_PRINT("enter", ("Buffer #%u 0x%lx is_closing_buffer cleared",
- (uint) old_buffer->buffer_no, (ulong) old_buffer));
+ DBUG_PRINT("enter", ("Buffer #%u %p is_closing_buffer cleared",
+ (uint) old_buffer->buffer_no, old_buffer));
mysql_cond_broadcast(&old_buffer->waiting_filling_buffer);
if (left)
@@ -7650,7 +7650,7 @@ static void translog_force_current_buffer_to_finish()
void translog_flush_wait_for_end(LSN lsn)
{
DBUG_ENTER("translog_flush_wait_for_end");
- DBUG_PRINT("enter", ("LSN: (%lu,0x%lx)", LSN_IN_PARTS(lsn)));
+ DBUG_PRINT("enter", ("LSN: " LSN_FMT, LSN_IN_PARTS(lsn)));
mysql_mutex_assert_owner(&log_descriptor.log_flush_lock);
while (cmp_translog_addr(log_descriptor.flushed, lsn) < 0)
mysql_cond_wait(&log_descriptor.log_flush_cond,
@@ -7670,7 +7670,7 @@ void translog_flush_set_new_goal_and_wait(TRANSLOG_ADDRESS lsn)
{
int flush_no= log_descriptor.flush_no;
DBUG_ENTER("translog_flush_set_new_goal_and_wait");
- DBUG_PRINT("enter", ("LSN: (%lu,0x%lx)", LSN_IN_PARTS(lsn)));
+ DBUG_PRINT("enter", ("LSN: " LSN_FMT, LSN_IN_PARTS(lsn)));
mysql_mutex_assert_owner(&log_descriptor.log_flush_lock);
if (cmp_translog_addr(lsn, log_descriptor.next_pass_max_lsn) > 0)
{
@@ -7776,7 +7776,7 @@ void translog_flush_buffers(TRANSLOG_ADDRESS *lsn,
start_buffer_no= i;
DBUG_PRINT("info",
- ("start from: %u current: %u prev last lsn: (%lu,0x%lx)",
+ ("start from: %u current: %u prev last lsn: " LSN_FMT,
(uint) start_buffer_no, (uint) log_descriptor.bc.buffer_no,
LSN_IN_PARTS(log_descriptor.bc.buffer->prev_last_lsn)));
@@ -7790,7 +7790,7 @@ void translog_flush_buffers(TRANSLOG_ADDRESS *lsn,
{
struct st_translog_buffer *buffer= log_descriptor.bc.buffer;
*lsn= log_descriptor.bc.buffer->last_lsn; /* fix lsn if it was horizon */
- DBUG_PRINT("info", ("LSN to flush fixed to last lsn: (%lu,0x%lx)",
+ DBUG_PRINT("info", ("LSN to flush fixed to last lsn: " LSN_FMT,
LSN_IN_PARTS(*lsn)));
last_buffer_no= log_descriptor.bc.buffer_no;
log_descriptor.is_everything_flushed= 1;
@@ -7817,7 +7817,7 @@ void translog_flush_buffers(TRANSLOG_ADDRESS *lsn,
DBUG_ASSERT(log_descriptor.bc.buffer->prev_last_lsn != LSN_IMPOSSIBLE);
/* fix lsn if it was horizon */
*lsn= log_descriptor.bc.buffer->prev_last_lsn;
- DBUG_PRINT("info", ("LSN to flush fixed to prev last lsn: (%lu,0x%lx)",
+ DBUG_PRINT("info", ("LSN to flush fixed to prev last lsn: " LSN_FMT,
LSN_IN_PARTS(*lsn)));
last_buffer_no= ((log_descriptor.bc.buffer_no + TRANSLOG_BUFFERS_NO -1) %
TRANSLOG_BUFFERS_NO);
@@ -7836,10 +7836,10 @@ void translog_flush_buffers(TRANSLOG_ADDRESS *lsn,
{
struct st_translog_buffer *buffer= log_descriptor.buffers + i;
translog_buffer_lock(buffer);
- DBUG_PRINT("info", ("Check buffer: 0x%lx #: %u "
- "prev last LSN: (%lu,0x%lx) "
- "last LSN: (%lu,0x%lx) status: %s",
- (ulong)(buffer),
+ DBUG_PRINT("info", ("Check buffer:%p #: %u "
+ "prev last LSN: " LSN_FMT " "
+ "last LSN: " LSN_FMT " status: %s",
+ buffer,
(uint) i,
LSN_IN_PARTS(buffer->prev_last_lsn),
LSN_IN_PARTS(buffer->last_lsn),
@@ -7853,7 +7853,7 @@ void translog_flush_buffers(TRANSLOG_ADDRESS *lsn,
buffer->pre_force_close_horizon :
buffer->offset + buffer->size);
/* pre_force_close_horizon is reset during new buffer start */
- DBUG_PRINT("info", ("flush_horizon: (%lu,0x%lx)",
+ DBUG_PRINT("info", ("flush_horizon: " LSN_FMT,
LSN_IN_PARTS(*flush_horizon)));
DBUG_ASSERT(*flush_horizon <= log_descriptor.horizon);
@@ -7922,12 +7922,12 @@ my_bool translog_flush(TRANSLOG_ADDRESS lsn)
my_bool rc= 0;
my_bool hgroup_commit_at_start;
DBUG_ENTER("translog_flush");
- DBUG_PRINT("enter", ("Flush up to LSN: (%lu,0x%lx)", LSN_IN_PARTS(lsn)));
+ DBUG_PRINT("enter", ("Flush up to LSN: " LSN_FMT, LSN_IN_PARTS(lsn)));
DBUG_ASSERT(translog_status == TRANSLOG_OK ||
translog_status == TRANSLOG_READONLY);
mysql_mutex_lock(&log_descriptor.log_flush_lock);
- DBUG_PRINT("info", ("Everything is flushed up to (%lu,0x%lx)",
+ DBUG_PRINT("info", ("Everything is flushed up to " LSN_FMT,
LSN_IN_PARTS(log_descriptor.flushed)));
if (cmp_translog_addr(log_descriptor.flushed, lsn) >= 0)
{
@@ -7956,7 +7956,7 @@ my_bool translog_flush(TRANSLOG_ADDRESS lsn)
}
log_descriptor.flush_in_progress= 1;
flush_horizon= log_descriptor.previous_flush_horizon;
- DBUG_PRINT("info", ("flush_in_progress is set, flush_horizon: (%lu,0x%lx)",
+ DBUG_PRINT("info", ("flush_in_progress is set, flush_horizon: " LSN_FMT,
LSN_IN_PARTS(flush_horizon)));
mysql_mutex_unlock(&log_descriptor.log_flush_lock);
@@ -8019,7 +8019,7 @@ retest:
log_descriptor.next_pass_max_lsn= LSN_IMPOSSIBLE;
/* prevent other thread from continue */
log_descriptor.max_lsn_requester= pthread_self();
- DBUG_PRINT("info", ("flush took next goal: (%lu,0x%lx)",
+ DBUG_PRINT("info", ("flush took next goal: " LSN_FMT,
LSN_IN_PARTS(lsn)));
mysql_mutex_unlock(&log_descriptor.log_flush_lock);
@@ -8129,7 +8129,7 @@ int translog_assign_id_to_share(MARIA_HA *tbl_info, TRN *trn)
}
i= 1; /* scan the whole array */
} while (id == 0);
- DBUG_PRINT("info", ("id_to_share: 0x%lx -> %u", (ulong)share, id));
+ DBUG_PRINT("info", ("id_to_share: %p -> %u", share, id));
fileid_store(log_data, id);
log_array[TRANSLOG_INTERNAL_PARTS + 0].str= log_data;
log_array[TRANSLOG_INTERNAL_PARTS + 0].length= sizeof(log_data);
@@ -8180,8 +8180,8 @@ int translog_assign_id_to_share(MARIA_HA *tbl_info, TRN *trn)
void translog_deassign_id_from_share(MARIA_SHARE *share)
{
- DBUG_PRINT("info", ("id_to_share: 0x%lx id %u -> 0",
- (ulong)share, share->id));
+ DBUG_PRINT("info", ("id_to_share: %p id %u -> 0",
+ share, share->id));
/*
We don't need any mutex as we are called only when closing the last
instance of the table or at the end of REPAIR: no writes can be
@@ -8376,7 +8376,7 @@ LSN translog_first_lsn_in_log()
uint16 chunk_offset;
uchar *page;
DBUG_ENTER("translog_first_lsn_in_log");
- DBUG_PRINT("info", ("Horizon: (%lu,0x%lx)", LSN_IN_PARTS(horizon)));
+ DBUG_PRINT("info", ("Horizon: " LSN_FMT, LSN_IN_PARTS(horizon)));
DBUG_ASSERT(translog_status == TRANSLOG_OK ||
translog_status == TRANSLOG_READONLY);
@@ -8415,7 +8415,7 @@ LSN translog_first_theoretical_lsn()
uchar *page;
TRANSLOG_VALIDATOR_DATA data;
DBUG_ENTER("translog_first_theoretical_lsn");
- DBUG_PRINT("info", ("Horizon: (%lu,0x%lx)", LSN_IN_PARTS(addr)));
+ DBUG_PRINT("info", ("Horizon: " LSN_FMT, LSN_IN_PARTS(addr)));
DBUG_ASSERT(translog_status == TRANSLOG_OK ||
translog_status == TRANSLOG_READONLY);
@@ -8455,7 +8455,7 @@ my_bool translog_purge(TRANSLOG_ADDRESS low)
TRANSLOG_ADDRESS horizon= translog_get_horizon();
int rc= 0;
DBUG_ENTER("translog_purge");
- DBUG_PRINT("enter", ("low: (%lu,0x%lx)", LSN_IN_PARTS(low)));
+ DBUG_PRINT("enter", ("low: " LSN_FMT, LSN_IN_PARTS(low)));
DBUG_ASSERT(translog_status == TRANSLOG_OK ||
translog_status == TRANSLOG_READONLY);
@@ -8880,7 +8880,7 @@ static void dump_header_page(uchar *buff)
printf(" WARNING: page size is not equal compiled in one %lu!!!\n",
(ulong) TRANSLOG_PAGE_SIZE);
printf(" File number %lu\n"
- " Max lsn: (%lu,0x%lx)\n",
+ " Max lsn: " LSN_FMT "\n",
desc.file_number,
LSN_IN_PARTS(desc.max_lsn));
}
@@ -8976,7 +8976,7 @@ static uchar *dump_chunk(uchar *buffer, uchar *ptr)
{
TRANSLOG_ADDRESS gpr_addr= lsn_korr(hdr_ptr);
uint pages= hdr_ptr[LSN_STORE_SIZE];
- printf (" Group +#%u: (%lu,0x%lx) pages: %u\n",
+ printf (" Group +#%u: " LSN_FMT " pages: %u\n",
(uint) i, LSN_IN_PARTS(gpr_addr), pages);
}
}
@@ -9110,8 +9110,8 @@ static void dump_datapage(uchar *buffer, File handler)
ptr= buffer + header_len;
while (ptr && ptr < buffer + TRANSLOG_PAGE_SIZE)
{
- printf(" Chunk (%lu,0x%lx):\n",
- (ulong)file, (ulong) offset + (ptr - buffer));
+ printf(" Chunk %d %lld:\n",
+ file,((longlong) (ptr - buffer)+ offset));
ptr= dump_chunk(buffer, ptr);
}
}
diff --git a/storage/maria/ma_loghandler_lsn.h b/storage/maria/ma_loghandler_lsn.h
index 69481761e80..113b57914c5 100644
--- a/storage/maria/ma_loghandler_lsn.h
+++ b/storage/maria/ma_loghandler_lsn.h
@@ -47,7 +47,8 @@ typedef TRANSLOG_ADDRESS LSN;
#define LSN_FILE_NO_PART(L) ((L) & ((int64)0xFFFFFF00000000LL))
/* Parts of LSN for printing */
-#define LSN_IN_PARTS(L) (ulong)LSN_FILE_NO(L),(ulong)LSN_OFFSET(L)
+#define LSN_IN_PARTS(L) (uint)LSN_FILE_NO(L),(uint)LSN_OFFSET(L)
+#define LSN_FMT "(%u,0x%x)"
/* Gets record offset of a LSN/log address */
#define LSN_OFFSET(L) (ulong) ((L) & 0xFFFFFFFFL)
diff --git a/storage/maria/ma_open.c b/storage/maria/ma_open.c
index c1fdd52357f..5a0c81d3e3e 100644
--- a/storage/maria/ma_open.c
+++ b/storage/maria/ma_open.c
@@ -1383,7 +1383,7 @@ uint _ma_state_info_write(MARIA_SHARE *share, uint pWrite)
is too new). Recovery does it by itself.
*/
share->state.is_of_horizon= translog_get_horizon();
- DBUG_PRINT("info", ("is_of_horizon set to LSN (%lu,0x%lx)",
+ DBUG_PRINT("info", ("is_of_horizon set to LSN " LSN_FMT,
LSN_IN_PARTS(share->state.is_of_horizon)));
}
res= _ma_state_info_write_sub(share->kfile.file, &share->state, pWrite);
diff --git a/storage/maria/ma_page.c b/storage/maria/ma_page.c
index 4021fb8e161..c8901e6d736 100644
--- a/storage/maria/ma_page.c
+++ b/storage/maria/ma_page.c
@@ -544,8 +544,8 @@ my_bool _ma_compact_keypage(MARIA_PAGE *ma_page, TrID min_read_from)
{
if (!(page= (*ma_page->keyinfo->skip_key)(&key, 0, 0, page)))
{
- DBUG_PRINT("error",("Couldn't find last key: page_pos: 0x%lx",
- (long) page));
+ DBUG_PRINT("error",("Couldn't find last key: page_pos: %p",
+ page));
_ma_set_fatal_error(share, HA_ERR_CRASHED);
DBUG_RETURN(1);
}
diff --git a/storage/maria/ma_pagecache.c b/storage/maria/ma_pagecache.c
index a1c407967b7..1c818e6df1e 100644
--- a/storage/maria/ma_pagecache.c
+++ b/storage/maria/ma_pagecache.c
@@ -97,10 +97,10 @@
#define PCBLOCK_INFO(B) \
DBUG_PRINT("info", \
- ("block: 0x%lx fd: %lu page: %lu status: 0x%x " \
- "hshL: 0x%lx requests: %u/%u wrlocks: %u rdlocks: %u " \
+ ("block: %p fd: %lu page: %lu status: 0x%x " \
+ "hshL: %p requests: %u/%u wrlocks: %u rdlocks: %u " \
"rdlocks_q: %u pins: %u type: %s", \
- (ulong)(B), \
+ (B), \
(ulong)((B)->hash_link ? \
(B)->hash_link->file.file : \
0), \
@@ -108,7 +108,7 @@
(B)->hash_link->pageno : \
0), \
(uint) (B)->status, \
- (ulong)(B)->hash_link, \
+ (B)->hash_link, \
(uint) (B)->requests, \
(uint)((B)->hash_link ? \
(B)->hash_link->requests : \
@@ -659,9 +659,9 @@ static my_bool pagecache_fwrite(PAGECACHE *pagecache,
/* Todo: Integrate this with write_callback so we have only one callback */
if ((*filedesc->flush_log_callback)(&args))
DBUG_RETURN(1);
- DBUG_PRINT("info", ("pre_write_hook: 0x%lx data: 0x%lx",
- (ulong) filedesc->pre_write_hook,
- (ulong) filedesc->callback_data));
+ DBUG_PRINT("info", ("pre_write_hook:%p data: %p",
+ filedesc->pre_write_hook,
+ filedesc->callback_data));
if ((*filedesc->pre_write_hook)(&args))
{
DBUG_PRINT("error", ("write callback problem"));
@@ -2789,7 +2789,7 @@ static void check_and_set_lsn(PAGECACHE *pagecache,
*/
DBUG_ASSERT((block->type == PAGECACHE_LSN_PAGE) || maria_in_recovery);
old= lsn_korr(block->buffer);
- DBUG_PRINT("info", ("old lsn: (%lu, 0x%lx) new lsn: (%lu, 0x%lx)",
+ DBUG_PRINT("info", ("old lsn: " LSN_FMT " new lsn: " LSN_FMT,
LSN_IN_PARTS(old), LSN_IN_PARTS(lsn)));
if (cmp_translog_addr(lsn, old) > 0)
{
@@ -3832,8 +3832,8 @@ restart:
block= page_link->block;
if (block->status & (PCBLOCK_REASSIGNED | PCBLOCK_IN_SWITCH))
{
- DBUG_PRINT("info", ("Block 0x%0lx already is %s",
- (ulong) block,
+ DBUG_PRINT("info", ("Block %p already is %s",
+ block,
((block->status & PCBLOCK_REASSIGNED) ?
"reassigned" : "in switch")));
PCBLOCK_INFO(block);
diff --git a/storage/maria/ma_recovery.c b/storage/maria/ma_recovery.c
index b1d2378870f..256cb2b45b2 100644
--- a/storage/maria/ma_recovery.c
+++ b/storage/maria/ma_recovery.c
@@ -558,7 +558,7 @@ static void display_record_position(const LOG_DESC *log_desc,
form a group, so we indent below the group's end record
*/
tprint(tracef,
- "%sRec#%u LSN (%lu,0x%lx) short_trid %u %s(num_type:%u) len %lu\n",
+ "%sRec#%u LSN " LSN_FMT " short_trid %u %s(num_type:%u) len %lu\n",
number ? "" : " ", number, LSN_IN_PARTS(rec->lsn),
rec->short_trid, log_desc->name, rec->type,
(ulong)rec->record_length);
@@ -617,7 +617,7 @@ prototype_redo_exec_hook(LONG_TRANSACTION_ID)
llstr(long_trid, llbuf);
eprint(tracef, "Found an old transaction long_trid %s short_trid %u"
" with same short id as this new transaction, and has neither"
- " committed nor rollback (undo_lsn: (%lu,0x%lx))",
+ " committed nor rollback (undo_lsn: " LSN_FMT ")",
llbuf, sid, LSN_IN_PARTS(ulsn));
goto err;
}
@@ -640,7 +640,7 @@ static void new_transaction(uint16 sid, TrID long_id, LSN undo_lsn,
all_active_trans[sid].long_trid= long_id;
llstr(long_id, llbuf);
tprint(tracef, "Transaction long_trid %s short_trid %u starts,"
- " undo_lsn (%lu,0x%lx) first_undo_lsn (%lu,0x%lx)\n",
+ " undo_lsn " LSN_FMT " first_undo_lsn " LSN_FMT "\n",
llbuf, sid, LSN_IN_PARTS(undo_lsn), LSN_IN_PARTS(first_undo_lsn));
all_active_trans[sid].undo_lsn= undo_lsn;
all_active_trans[sid].first_undo_lsn= first_undo_lsn;
@@ -833,7 +833,7 @@ prototype_redo_exec_hook(REDO_CREATE_TABLE)
}
if (cmp_translog_addr(share->state.create_rename_lsn, rec->lsn) >= 0)
{
- tprint(tracef, "Table '%s' has create_rename_lsn (%lu,0x%lx) more "
+ tprint(tracef, "Table '%s' has create_rename_lsn " LSN_FMT " more "
"recent than record, ignoring creation",
name, LSN_IN_PARTS(share->state.create_rename_lsn));
error= 0;
@@ -1009,7 +1009,7 @@ prototype_redo_exec_hook(REDO_RENAME_TABLE)
}
if (cmp_translog_addr(share->state.create_rename_lsn, rec->lsn) >= 0)
{
- tprint(tracef, ", has create_rename_lsn (%lu,0x%lx) more recent than"
+ tprint(tracef, ", has create_rename_lsn " LSN_FMT " more recent than"
" record, ignoring renaming",
LSN_IN_PARTS(share->state.create_rename_lsn));
error= 0;
@@ -1064,7 +1064,7 @@ prototype_redo_exec_hook(REDO_RENAME_TABLE)
}
if (cmp_translog_addr(share->state.create_rename_lsn, rec->lsn) >= 0)
{
- tprint(tracef, ", has create_rename_lsn (%lu,0x%lx) more recent than"
+ tprint(tracef, ", has create_rename_lsn " LSN_FMT " more recent than"
" record, ignoring renaming",
LSN_IN_PARTS(share->state.create_rename_lsn));
/*
@@ -1233,7 +1233,7 @@ prototype_redo_exec_hook(REDO_DROP_TABLE)
}
if (cmp_translog_addr(share->state.create_rename_lsn, rec->lsn) >= 0)
{
- tprint(tracef, ", has create_rename_lsn (%lu,0x%lx) more recent than"
+ tprint(tracef, ", has create_rename_lsn " LSN_FMT " more recent than"
" record, ignoring removal",
LSN_IN_PARTS(share->state.create_rename_lsn));
error= 0;
@@ -1403,8 +1403,8 @@ static int new_table(uint16 sid, const char *name, LSN lsn_of_file_id)
}
if (cmp_translog_addr(lsn_of_file_id, share->state.create_rename_lsn) <= 0)
{
- tprint(tracef, ", has create_rename_lsn (%lu,0x%lx) more recent than"
- " LOGREC_FILE_ID's LSN (%lu,0x%lx), ignoring open request",
+ tprint(tracef, ", has create_rename_lsn " LSN_FMT " more recent than"
+ " LOGREC_FILE_ID's LSN " LSN_FMT ", ignoring open request",
LSN_IN_PARTS(share->state.create_rename_lsn),
LSN_IN_PARTS(lsn_of_file_id));
recovery_warnings++;
@@ -1873,7 +1873,7 @@ prototype_redo_exec_hook(UNDO_ROW_INSERT)
share= info->s;
if (cmp_translog_addr(rec->lsn, share->state.is_of_horizon) >= 0)
{
- tprint(tracef, " state has LSN (%lu,0x%lx) older than record, updating"
+ tprint(tracef, " state has LSN " LSN_FMT " older than record, updating"
" rows' count\n", LSN_IN_PARTS(share->state.is_of_horizon));
share->state.state.records++;
if (share->calc_checksum)
@@ -2136,7 +2136,7 @@ prototype_redo_exec_hook(CLR_END)
if (info == NULL)
DBUG_RETURN(0);
share= info->s;
- tprint(tracef, " CLR_END was about %s, undo_lsn now LSN (%lu,0x%lx)\n",
+ tprint(tracef, " CLR_END was about %s, undo_lsn now LSN " LSN_FMT "\n",
log_desc->name, LSN_IN_PARTS(previous_undo_lsn));
enlarge_buffer(rec);
@@ -2296,7 +2296,7 @@ prototype_undo_exec_hook(UNDO_ROW_INSERT)
info->trn= 0;
/* trn->undo_lsn is updated in an inwrite_hook when writing the CLR_END */
tprint(tracef, " rows' count %lu\n", (ulong)info->s->state.state.records);
- tprint(tracef, " undo_lsn now LSN (%lu,0x%lx)\n",
+ tprint(tracef, " undo_lsn now LSN " LSN_FMT "\n",
LSN_IN_PARTS(trn->undo_lsn));
return error;
}
@@ -2335,7 +2335,7 @@ prototype_undo_exec_hook(UNDO_ROW_DELETE)
rec->record_length -
(LSN_STORE_SIZE + FILEID_STORE_SIZE));
info->trn= 0;
- tprint(tracef, " rows' count %lu\n undo_lsn now LSN (%lu,0x%lx)\n",
+ tprint(tracef, " rows' count %lu\n undo_lsn now LSN " LSN_FMT "\n",
(ulong)share->state.state.records, LSN_IN_PARTS(trn->undo_lsn));
return error;
}
@@ -2374,7 +2374,7 @@ prototype_undo_exec_hook(UNDO_ROW_UPDATE)
rec->record_length -
(LSN_STORE_SIZE + FILEID_STORE_SIZE));
info->trn= 0;
- tprint(tracef, " undo_lsn now LSN (%lu,0x%lx)\n",
+ tprint(tracef, " undo_lsn now LSN " LSN_FMT "\n",
LSN_IN_PARTS(trn->undo_lsn));
return error;
}
@@ -2415,7 +2415,7 @@ prototype_undo_exec_hook(UNDO_KEY_INSERT)
FILEID_STORE_SIZE);
info->trn= 0;
/* trn->undo_lsn is updated in an inwrite_hook when writing the CLR_END */
- tprint(tracef, " undo_lsn now LSN (%lu,0x%lx)\n",
+ tprint(tracef, " undo_lsn now LSN " LSN_FMT "\n",
LSN_IN_PARTS(trn->undo_lsn));
return error;
}
@@ -2456,7 +2456,7 @@ prototype_undo_exec_hook(UNDO_KEY_DELETE)
FILEID_STORE_SIZE, FALSE);
info->trn= 0;
/* trn->undo_lsn is updated in an inwrite_hook when writing the CLR_END */
- tprint(tracef, " undo_lsn now LSN (%lu,0x%lx)\n",
+ tprint(tracef, " undo_lsn now LSN " LSN_FMT "\n",
LSN_IN_PARTS(trn->undo_lsn));
return error;
}
@@ -2497,7 +2497,7 @@ prototype_undo_exec_hook(UNDO_KEY_DELETE_WITH_ROOT)
FILEID_STORE_SIZE, TRUE);
info->trn= 0;
/* trn->undo_lsn is updated in an inwrite_hook when writing the CLR_END */
- tprint(tracef, " undo_lsn now LSN (%lu,0x%lx)\n",
+ tprint(tracef, " undo_lsn now LSN " LSN_FMT "\n",
LSN_IN_PARTS(trn->undo_lsn));
return error;
}
@@ -2525,7 +2525,7 @@ prototype_undo_exec_hook(UNDO_BULK_INSERT)
error= _ma_apply_undo_bulk_insert(info, previous_undo_lsn);
info->trn= 0;
/* trn->undo_lsn is updated in an inwrite_hook when writing the CLR_END */
- tprint(tracef, " undo_lsn now LSN (%lu,0x%lx)\n",
+ tprint(tracef, " undo_lsn now LSN " LSN_FMT "\n",
LSN_IN_PARTS(trn->undo_lsn));
return error;
}
@@ -2663,7 +2663,7 @@ static int run_redo_phase(LSN lsn, LSN lsn_end, enum maria_apply_log_way apply)
if (lsn_end != LSN_IMPOSSIBLE && rec2.lsn >= lsn_end)
{
tprint(tracef,
- "lsn_end reached at (%lu,0x%lx). "
+ "lsn_end reached at " LSN_FMT ". "
"Skipping rest of redo entries",
LSN_IN_PARTS(rec2.lsn));
translog_destroy_scanner(&scanner);
@@ -2818,7 +2818,7 @@ static uint end_of_redo_phase(my_bool prepare_for_undo_phase)
TRN *trn;
if (gslsn != LSN_IMPOSSIBLE)
{
- tprint(tracef, "Group at LSN (%lu,0x%lx) short_trid %u incomplete\n",
+ tprint(tracef, "Group at LSN " LSN_FMT " short_trid %u incomplete\n",
LSN_IN_PARTS(gslsn), sid);
all_active_trans[sid].group_start_lsn= LSN_IMPOSSIBLE;
}
@@ -3109,7 +3109,7 @@ static MARIA_HA *get_MARIA_HA_from_REDO_record(const
table was).
*/
DBUG_ASSERT(cmp_translog_addr(rec->lsn, checkpoint_start) < 0);
- tprint(tracef, ", table's LOGREC_FILE_ID has LSN (%lu,0x%lx) more recent"
+ tprint(tracef, ", table's LOGREC_FILE_ID has LSN " LSN_FMT " more recent"
" than record, skipping record",
LSN_IN_PARTS(share->lsn_of_file_id));
return NULL;
@@ -3117,7 +3117,7 @@ static MARIA_HA *get_MARIA_HA_from_REDO_record(const
if (cmp_translog_addr(rec->lsn, share->state.skip_redo_lsn) <= 0)
{
/* probably a bulk insert repair */
- tprint(tracef, ", has skip_redo_lsn (%lu,0x%lx) more recent than"
+ tprint(tracef, ", has skip_redo_lsn " LSN_FMT " more recent than"
" record, skipping record\n",
LSN_IN_PARTS(share->state.skip_redo_lsn));
return NULL;
@@ -3176,7 +3176,7 @@ static MARIA_HA *get_MARIA_HA_from_UNDO_record(const
if (cmp_translog_addr(rec->lsn, share->lsn_of_file_id) <= 0)
{
- tprint(tracef, ", table's LOGREC_FILE_ID has LSN (%lu,0x%lx) more recent"
+ tprint(tracef, ", table's LOGREC_FILE_ID has LSN " LSN_FMT " more recent"
" than record, skipping record",
LSN_IN_PARTS(share->lsn_of_file_id));
return NULL;
@@ -3185,7 +3185,7 @@ static MARIA_HA *get_MARIA_HA_from_UNDO_record(const
cmp_translog_addr(rec->lsn, share->state.skip_redo_lsn) <= 0)
{
/* probably a bulk insert repair */
- tprint(tracef, ", has skip_redo_lsn (%lu,0x%lx) more recent than"
+ tprint(tracef, ", has skip_redo_lsn " LSN_FMT " more recent than"
" record, skipping record\n",
LSN_IN_PARTS(share->state.skip_redo_lsn));
return NULL;
@@ -3220,12 +3220,12 @@ static LSN parse_checkpoint_record(LSN lsn)
LSN minimum_rec_lsn_of_active_transactions, minimum_rec_lsn_of_dirty_pages;
struct st_dirty_page *next_dirty_page_in_pool;
- tprint(tracef, "Loading data from checkpoint record at LSN (%lu,0x%lx)\n",
+ tprint(tracef, "Loading data from checkpoint record at LSN " LSN_FMT "\n",
LSN_IN_PARTS(lsn));
if ((len= translog_read_record_header(lsn, &rec)) == RECHEADER_READ_ERROR ||
rec.type != LOGREC_CHECKPOINT)
{
- eprint(tracef, "Cannot find checkpoint record at LSN (%lu,0x%lx)",
+ eprint(tracef, "Cannot find checkpoint record at LSN " LSN_FMT,
LSN_IN_PARTS(lsn));
return LSN_ERROR;
}
@@ -3243,7 +3243,7 @@ static LSN parse_checkpoint_record(LSN lsn)
ptr= log_record_buffer.str;
start_address= lsn_korr(ptr);
ptr+= LSN_STORE_SIZE;
- tprint(tracef, "Checkpoint record has start_horizon at (%lu,0x%lx)\n",
+ tprint(tracef, "Checkpoint record has start_horizon at " LSN_FMT "\n",
LSN_IN_PARTS(start_address));
/* transactions */
@@ -3261,7 +3261,7 @@ static LSN parse_checkpoint_record(LSN lsn)
takes to write one or a few rows, roughly).
*/
tprint(tracef, "Checkpoint record has min_rec_lsn of active transactions"
- " at (%lu,0x%lx)\n",
+ " at " LSN_FMT "\n",
LSN_IN_PARTS(minimum_rec_lsn_of_active_transactions));
set_if_smaller(start_address, minimum_rec_lsn_of_active_transactions);
@@ -3349,7 +3349,7 @@ static LSN parse_checkpoint_record(LSN lsn)
page_id, rec_lsn, next_dirty_page_in_pool++))
return LSN_ERROR;
if (maria_recovery_verbose)
- tprint(tracef, "%8u %8u %12lu %lu,0x%lx\n", (uint) table_id,
+ tprint(tracef, "%8u %8u %12lu " LSN_FMT "\n", (uint) table_id,
(uint) is_index, (ulong) page_id, LSN_IN_PARTS(rec_lsn));
set_if_smaller(minimum_rec_lsn_of_dirty_pages, rec_lsn);
}
@@ -3372,7 +3372,7 @@ static LSN parse_checkpoint_record(LSN lsn)
start_address= checkpoint_start=
translog_next_LSN(start_address, LSN_IMPOSSIBLE);
tprint(tracef, "Checkpoint record start_horizon now adjusted to"
- " LSN (%lu,0x%lx)\n", LSN_IN_PARTS(start_address));
+ " LSN " LSN_FMT "\n", LSN_IN_PARTS(start_address));
if (checkpoint_start == LSN_IMPOSSIBLE)
{
/*
@@ -3383,10 +3383,10 @@ static LSN parse_checkpoint_record(LSN lsn)
}
/* now, where the REDO phase should start reading log: */
tprint(tracef, "Checkpoint has min_rec_lsn of dirty pages at"
- " LSN (%lu,0x%lx)\n", LSN_IN_PARTS(minimum_rec_lsn_of_dirty_pages));
+ " LSN " LSN_FMT "\n", LSN_IN_PARTS(minimum_rec_lsn_of_dirty_pages));
set_if_smaller(start_address, minimum_rec_lsn_of_dirty_pages);
DBUG_PRINT("info",
- ("checkpoint_start: (%lu,0x%lx) start_address: (%lu,0x%lx)",
+ ("checkpoint_start: " LSN_FMT " start_address: " LSN_FMT,
LSN_IN_PARTS(checkpoint_start), LSN_IN_PARTS(start_address)));
return start_address;
}
diff --git a/storage/maria/ma_rkey.c b/storage/maria/ma_rkey.c
index 58e47089ce9..c98cdfe15ef 100644
--- a/storage/maria/ma_rkey.c
+++ b/storage/maria/ma_rkey.c
@@ -36,8 +36,8 @@ int maria_rkey(MARIA_HA *info, uchar *buf, int inx, const uchar *key_data,
MARIA_KEY key;
ICP_RESULT icp_res= ICP_MATCH;
DBUG_ENTER("maria_rkey");
- DBUG_PRINT("enter", ("base: 0x%lx buf: 0x%lx inx: %d search_flag: %d",
- (long) info, (long) buf, inx, search_flag));
+ DBUG_PRINT("enter", ("base:%p buf:%p inx: %d search_flag: %d",
+ info, buf, inx, search_flag));
if ((inx = _ma_check_index(info,inx)) < 0)
DBUG_RETURN(my_errno);
diff --git a/storage/maria/ma_rt_index.c b/storage/maria/ma_rt_index.c
index c92045eb245..4c18bff7c70 100644
--- a/storage/maria/ma_rt_index.c
+++ b/storage/maria/ma_rt_index.c
@@ -105,7 +105,7 @@ static int maria_rtree_find_req(MARIA_HA *info, MARIA_KEYDEF *keyinfo,
level + 1)))
{
case 0: /* found - exit from recursion */
- *saved_key= k - page_buf;
+ *saved_key= (uint) (k - page_buf);
goto ok;
case 1: /* not found - continue searching */
info->maria_rtree_recursion_depth= level;
@@ -140,7 +140,7 @@ static int maria_rtree_find_req(MARIA_HA *info, MARIA_KEYDEF *keyinfo,
memcpy(info->last_key.data, k,
info->last_key.data_length + info->last_key.ref_length);
info->maria_rtree_recursion_depth= level;
- *saved_key= last - page_buf;
+ *saved_key= (uint) (last - page_buf);
if (after_key < last)
{
@@ -366,7 +366,7 @@ static int maria_rtree_get_req(MARIA_HA *info, MARIA_KEYDEF *keyinfo,
_ma_kpos(nod_flag, k), level + 1)))
{
case 0: /* found - exit from recursion */
- *saved_key= k - page.buff;
+ *saved_key= (uint) (k - page.buff);
goto ok;
case 1: /* not found - continue searching */
info->maria_rtree_recursion_depth= level;
@@ -398,7 +398,7 @@ static int maria_rtree_get_req(MARIA_HA *info, MARIA_KEYDEF *keyinfo,
info->last_key.data_length + info->last_key.ref_length);
info->maria_rtree_recursion_depth= level;
- *saved_key= k - page.buff;
+ *saved_key= (uint) (k - page.buff);
if (after_key < last)
{
diff --git a/storage/maria/ma_rt_key.c b/storage/maria/ma_rt_key.c
index 488137ff159..500e484cf02 100644
--- a/storage/maria/ma_rt_key.c
+++ b/storage/maria/ma_rt_key.c
@@ -58,7 +58,7 @@ int maria_rtree_add_key(const MARIA_KEY *key, MARIA_PAGE *page,
page->size+= tot_key_length;
page_store_size(share, page);
if (share->now_transactional &&
- _ma_log_add(page, key_pos - page->buff,
+ _ma_log_add(page, (uint)(key_pos - page->buff),
key_pos, tot_key_length, tot_key_length, 0,
KEY_OP_DEBUG_LOG_ADD_1))
DBUG_RETURN(-1);
diff --git a/storage/maria/ma_rt_split.c b/storage/maria/ma_rt_split.c
index c26c0277e4f..898cb574f63 100644
--- a/storage/maria/ma_rt_split.c
+++ b/storage/maria/ma_rt_split.c
@@ -308,7 +308,7 @@ static my_bool _ma_log_rt_split(MARIA_PAGE *page,
uint translog_parts, extra_length= 0;
my_off_t page_pos;
DBUG_ENTER("_ma_log_rt_split");
- DBUG_PRINT("enter", ("page: %lu", (ulong) page));
+ DBUG_PRINT("enter", ("page: %p", page));
DBUG_ASSERT(share->now_transactional);
page_pos= page->pos / share->block_size;
@@ -477,11 +477,11 @@ int maria_rtree_split_page(const MARIA_KEY *key, MARIA_PAGE *page,
memcpy(to_with_nod_flag, cur_key_with_nod_flag, full_length);
if (log_this_change)
{
- uint to_with_nod_flag_offs= to_with_nod_flag - page->buff;
+ size_t to_with_nod_flag_offs= to_with_nod_flag - page->buff;
if (likely(cur_key != key->data))
{
/* this memcpy() is internal to the page (source in the page) */
- uint cur_key_with_nod_flag_offs= cur_key_with_nod_flag - page->buff;
+ size_t cur_key_with_nod_flag_offs= cur_key_with_nod_flag - page->buff;
int2store(log_internal_copy_ptr, to_with_nod_flag_offs);
log_internal_copy_ptr+= 2;
int2store(log_internal_copy_ptr, cur_key_with_nod_flag_offs);
@@ -526,8 +526,8 @@ int maria_rtree_split_page(const MARIA_KEY *key, MARIA_PAGE *page,
( /* log change to split page */
_ma_log_rt_split(page, key->data - nod_flag,
full_length, log_internal_copy,
- log_internal_copy_ptr - log_internal_copy,
- log_key_copy, org_length - page->size) ||
+ (uint)(log_internal_copy_ptr - log_internal_copy),
+ log_key_copy, (uint)(org_length - page->size)) ||
/* and to new page */
_ma_log_new(&new_page, 0)))
err_code= -1;
diff --git a/storage/maria/ma_search.c b/storage/maria/ma_search.c
index 951850b16a4..089e3fabdb2 100644
--- a/storage/maria/ma_search.c
+++ b/storage/maria/ma_search.c
@@ -380,8 +380,8 @@ int _ma_seq_search(const MARIA_KEY *key, const MARIA_PAGE *ma_page,
{
_ma_set_fatal_error(share, HA_ERR_CRASHED);
DBUG_PRINT("error",
- ("Found wrong key: length: %u page: 0x%lx end: 0x%lx",
- length, (long) page, (long) end));
+ ("Found wrong key: length: %u page: %p end: %p",
+ length, page, end));
DBUG_RETURN(MARIA_FOUND_WRONG_KEY);
}
if ((flag= ha_key_cmp(keyinfo->seg, t_buff, key->data,
@@ -389,15 +389,15 @@ int _ma_seq_search(const MARIA_KEY *key, const MARIA_PAGE *ma_page,
comp_flag | tmp_key.flag,
not_used)) >= 0)
break;
- DBUG_PRINT("loop_extra",("page: 0x%lx key: '%s' flag: %d",
- (long) page, t_buff, flag));
+ DBUG_PRINT("loop_extra",("page:%p key: '%s' flag: %d",
+ page, t_buff, flag));
memcpy(buff,t_buff,length);
*ret_pos=page;
}
if (flag == 0)
memcpy(buff,t_buff,length); /* Result is first key */
*last_key= page == end;
- DBUG_PRINT("exit",("flag: %d ret_pos: 0x%lx", flag, (long) *ret_pos));
+ DBUG_PRINT("exit",("flag: %d ret_pos: %p", flag, *ret_pos));
DBUG_RETURN(flag);
} /* _ma_seq_search */
@@ -555,8 +555,8 @@ int _ma_prefix_search(const MARIA_KEY *key, const MARIA_PAGE *ma_page,
{
_ma_set_fatal_error(share, HA_ERR_CRASHED);
DBUG_PRINT("error",
- ("Found wrong key: length: %u page: 0x%lx end: %lx",
- length, (long) page, (long) end));
+ ("Found wrong key: length: %u page: %p end: %p",
+ length, page, end));
DBUG_RETURN(MARIA_FOUND_WRONG_KEY);
}
@@ -692,7 +692,7 @@ int _ma_prefix_search(const MARIA_KEY *key, const MARIA_PAGE *ma_page,
*last_key= page == end;
- DBUG_PRINT("exit",("flag: %d ret_pos: 0x%lx", flag, (long) *ret_pos));
+ DBUG_PRINT("exit",("flag: %d ret_pos: %p", flag, *ret_pos));
DBUG_RETURN(flag);
} /* _ma_prefix_search */
@@ -1047,8 +1047,8 @@ uint _ma_get_pack_key(MARIA_KEY *int_key, uint page_flag,
if (length > keyseg->length)
{
DBUG_PRINT("error",
- ("Found too long null packed key: %u of %u at 0x%lx",
- length, keyseg->length, (long) *page_pos));
+ ("Found too long null packed key: %u of %u at %p",
+ length, keyseg->length, *page_pos));
DBUG_DUMP("key", *page_pos, 16);
_ma_set_fatal_error(keyinfo->share, HA_ERR_CRASHED);
return 0;
@@ -1104,8 +1104,8 @@ uint _ma_get_pack_key(MARIA_KEY *int_key, uint page_flag,
}
if (length > (uint) keyseg->length)
{
- DBUG_PRINT("error",("Found too long packed key: %u of %u at 0x%lx",
- length, keyseg->length, (long) *page_pos));
+ DBUG_PRINT("error",("Found too long packed key: %u of %u at %p",
+ length, keyseg->length, *page_pos));
DBUG_DUMP("key", *page_pos, 16);
_ma_set_fatal_error(keyinfo->share, HA_ERR_CRASHED);
return 0; /* Error */
@@ -1134,7 +1134,7 @@ uint _ma_get_pack_key(MARIA_KEY *int_key, uint page_flag,
page+=length;
}
- int_key->data_length= (key - int_key->data);
+ int_key->data_length= (uint)(key - int_key->data);
int_key->flag= 0;
length= keyseg->length;
if (page_flag & KEYPAGE_FLAG_HAS_TRANSID)
@@ -1263,8 +1263,8 @@ uint _ma_get_binary_pack_key(MARIA_KEY *int_key, uint page_flag, uint nod_flag,
if (length > keyinfo->maxlength)
{
DBUG_PRINT("error",
- ("Found too long binary packed key: %u of %u at 0x%lx",
- length, keyinfo->maxlength, (long) *page_pos));
+ ("Found too long binary packed key: %u of %u at %p",
+ length, keyinfo->maxlength, *page_pos));
DBUG_DUMP("key", *page_pos, 16);
_ma_set_fatal_error(keyinfo->share, HA_ERR_CRASHED);
DBUG_RETURN(0); /* Wrong key */
@@ -1325,8 +1325,8 @@ uint _ma_get_binary_pack_key(MARIA_KEY *int_key, uint page_flag, uint nod_flag,
from=page; from_end=page_end;
}
DBUG_ASSERT((int) length >= 0);
- DBUG_PRINT("info",("key: 0x%lx from: 0x%lx length: %u",
- (long) key, (long) from, length));
+ DBUG_PRINT("info",("key: %p from: %p length: %u",
+ key, from, length));
memmove(key, from, (size_t) length);
key+=length;
from+=length;
@@ -1336,7 +1336,7 @@ uint _ma_get_binary_pack_key(MARIA_KEY *int_key, uint page_flag, uint nod_flag,
If we have mixed key blocks with data pointer and key block pointer,
we have to copy both.
*/
- int_key->data_length= (key - int_key->data);
+ int_key->data_length= (uint)(key - int_key->data);
int_key->ref_length= length= keyseg->length;
int_key->flag= 0;
if ((tmp=(uint) (from_end-from)) <= length)
@@ -1452,7 +1452,7 @@ uchar *_ma_get_key(MARIA_KEY *key, MARIA_PAGE *ma_page, uchar *keypos)
}
}
}
- DBUG_PRINT("exit",("page: 0x%lx length: %u", (long) page,
+ DBUG_PRINT("exit",("page: %p length: %u", page,
key->data_length + key->ref_length));
DBUG_RETURN(page);
} /* _ma_get_key */
@@ -1522,8 +1522,8 @@ uchar *_ma_get_last_key(MARIA_KEY *key, MARIA_PAGE *ma_page, uchar *endpos)
uchar *lastpos, *page;
MARIA_KEYDEF *keyinfo= key->keyinfo;
DBUG_ENTER("_ma_get_last_key");
- DBUG_PRINT("enter",("page: 0x%lx endpos: 0x%lx", (long) ma_page->buff,
- (long) endpos));
+ DBUG_PRINT("enter",("page: %p endpos: %p", ma_page->buff,
+ endpos));
page_flag= ma_page->flag;
nod_flag= ma_page->node;
@@ -1548,14 +1548,14 @@ uchar *_ma_get_last_key(MARIA_KEY *key, MARIA_PAGE *ma_page, uchar *endpos)
lastpos= page;
if (!(*keyinfo->get_key)(key, page_flag, nod_flag, &page))
{
- DBUG_PRINT("error",("Couldn't find last key: page: 0x%lx",
- (long) page));
+ DBUG_PRINT("error",("Couldn't find last key: page: %p",
+ page));
_ma_set_fatal_error(keyinfo->share, HA_ERR_CRASHED);
DBUG_RETURN(0);
}
}
}
- DBUG_PRINT("exit",("lastpos: 0x%lx length: %u", (ulong) lastpos,
+ DBUG_PRINT("exit",("lastpos: %p length: %u", lastpos,
key->data_length + key->ref_length));
DBUG_RETURN(lastpos);
} /* _ma_get_last_key */
@@ -1654,9 +1654,9 @@ int _ma_search_next(register MARIA_HA *info, MARIA_KEY *key,
MARIA_KEY tmp_key;
MARIA_PAGE page;
DBUG_ENTER("_ma_search_next");
- DBUG_PRINT("enter",("nextflag: %u lastpos: %lu int_keypos: 0x%lx page_changed %d keyread_buff_used: %d",
+ DBUG_PRINT("enter",("nextflag: %u lastpos: %lu int_keypos:%p page_changed %d keyread_buff_used: %d",
nextflag, (ulong) info->cur_row.lastpos,
- (ulong) info->int_keypos,
+ info->int_keypos,
info->page_changed, info->keyread_buff_used));
DBUG_EXECUTE("key", _ma_print_key(DBUG_FILE, key););
@@ -2142,8 +2142,8 @@ _ma_calc_var_pack_key_length(const MARIA_KEY *int_key, uint nod_flag,
ref_length=0;
next_length_pack=0;
}
- DBUG_PRINT("test",("length: %d next_key: 0x%lx", length,
- (long) next_key));
+ DBUG_PRINT("test",("length: %d next_key: %p", length,
+ next_key));
{
uint tmp_length;
diff --git a/storage/maria/ma_servicethread.c b/storage/maria/ma_servicethread.c
index e495b15eef2..99ae36689de 100644
--- a/storage/maria/ma_servicethread.c
+++ b/storage/maria/ma_servicethread.c
@@ -31,7 +31,7 @@ int ma_service_thread_control_init(MA_SERVICE_THREAD_CONTROL *control)
{
int res= 0;
DBUG_ENTER("ma_service_thread_control_init");
- DBUG_PRINT("init", ("control 0x%lx", (ulong) control));
+ DBUG_PRINT("init", ("control %p", control));
control->inited= TRUE;
control->killed= FALSE;
res= (mysql_mutex_init(key_SERVICE_THREAD_CONTROL_lock,
@@ -57,7 +57,7 @@ int ma_service_thread_control_init(MA_SERVICE_THREAD_CONTROL *control)
void ma_service_thread_control_end(MA_SERVICE_THREAD_CONTROL *control)
{
DBUG_ENTER("ma_service_thread_control_end");
- DBUG_PRINT("init", ("control 0x%lx", (ulong) control));
+ DBUG_PRINT("init", ("control %p", control));
DBUG_ASSERT(control->inited);
mysql_mutex_lock(control->LOCK_control);
if (!control->killed)
@@ -95,7 +95,7 @@ my_bool my_service_thread_sleep(MA_SERVICE_THREAD_CONTROL *control,
struct timespec abstime;
my_bool res= FALSE;
DBUG_ENTER("my_service_thread_sleep");
- DBUG_PRINT("init", ("control 0x%lx", (ulong) control));
+ DBUG_PRINT("init", ("control %p", control));
mysql_mutex_lock(control->LOCK_control);
if (control->killed)
{
diff --git a/storage/maria/ma_sort.c b/storage/maria/ma_sort.c
index 6e106976b70..024b72fff2e 100644
--- a/storage/maria/ma_sort.c
+++ b/storage/maria/ma_sort.c
@@ -500,10 +500,10 @@ static my_bool _ma_thr_find_all_keys_exec(MARIA_SORT_PARAM* sort_param)
(BUFFPEK *) alloc_dynamic(&sort_param->buffpek),
&sort_param->tempfile))
goto err;
- sort_param->keys= (sort_param->buffpek.elements - 1) * (keys - 1) + idx;
+ sort_param->keys= (uint)((sort_param->buffpek.elements - 1) * (keys - 1) + idx);
}
else
- sort_param->keys= idx;
+ sort_param->keys= (uint)idx;
DBUG_RETURN(FALSE);
@@ -627,7 +627,7 @@ int _ma_thr_write_keys(MARIA_SORT_PARAM *sort_param)
uint maxbuffer=sinfo->buffpek.elements-1;
if (!mergebuf)
{
- length=param->sort_buffer_length;
+ length=(size_t)param->sort_buffer_length;
while (length >= MIN_SORT_MEMORY)
{
if ((mergebuf= my_malloc((size_t) length, MYF(0))))
@@ -919,13 +919,13 @@ static my_off_t read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek,
uint sort_length)
{
register ha_keys count;
- my_off_t length;
+ size_t length;
if ((count= (ha_keys) MY_MIN((ha_rows) buffpek->max_keys,
(ha_rows) buffpek->count)))
{
if (my_b_pread(fromfile, (uchar*) buffpek->base,
- (length= sort_length * count), buffpek->file_pos))
+ (length= sort_length * (size_t)count), buffpek->file_pos))
return(HA_OFFSET_ERROR); /* purecov: inspected */
buffpek->key=buffpek->base;
buffpek->file_pos+= length; /* New filepos */
diff --git a/storage/maria/ma_state.c b/storage/maria/ma_state.c
index 15cc48ad468..05bc8a8acb8 100644
--- a/storage/maria/ma_state.c
+++ b/storage/maria/ma_state.c
@@ -86,8 +86,8 @@ my_bool _ma_setup_live_state(MARIA_HA *info)
mysql_mutex_lock(&share->intern_lock);
share->in_trans++;
- DBUG_PRINT("info", ("share: 0x%lx in_trans: %d",
- (ulong) share, share->in_trans));
+ DBUG_PRINT("info", ("share: %p in_trans: %d",
+ share, share->in_trans));
history= share->state_history;
@@ -524,8 +524,8 @@ my_bool _ma_trnman_end_trans_hook(TRN *trn, my_bool commit,
/* Remove not visible states */
share->state_history= _ma_remove_not_visible_states(history, 0, 1);
}
- DBUG_PRINT("info", ("share: 0x%lx in_trans: %d",
- (ulong) share, share->in_trans));
+ DBUG_PRINT("info", ("share: %p in_trans: %d",
+ share, share->in_trans));
}
}
share->in_trans--;
diff --git a/storage/maria/ma_write.c b/storage/maria/ma_write.c
index 06aa2da7ae2..ffebb8c0ac9 100644
--- a/storage/maria/ma_write.c
+++ b/storage/maria/ma_write.c
@@ -787,7 +787,7 @@ int _ma_insert(register MARIA_HA *info, MARIA_KEY *key,
MARIA_SHARE *share= info->s;
MARIA_KEYDEF *keyinfo= key->keyinfo;
DBUG_ENTER("_ma_insert");
- DBUG_PRINT("enter",("key_pos: 0x%lx", (ulong) key_pos));
+ DBUG_PRINT("enter",("key_pos:%p", key_pos));
DBUG_EXECUTE("key", _ma_print_key(DBUG_FILE, key););
/*
@@ -813,8 +813,8 @@ int _ma_insert(register MARIA_HA *info, MARIA_KEY *key,
{
DBUG_PRINT("test",("t_length: %d ref_len: %d",
t_length,s_temp.ref_length));
- DBUG_PRINT("test",("n_ref_len: %d n_length: %d key_pos: 0x%lx",
- s_temp.n_ref_length, s_temp.n_length, (long) s_temp.key));
+ DBUG_PRINT("test",("n_ref_len: %d n_length: %d key_pos: %p",
+ s_temp.n_ref_length, s_temp.n_length, s_temp.key));
}
#endif
if (t_length > 0)
@@ -1128,8 +1128,8 @@ uchar *_ma_find_half_pos(MARIA_KEY *key, MARIA_PAGE *ma_page,
DBUG_RETURN(0);
} while (page < end);
*after_key= page;
- DBUG_PRINT("exit",("returns: 0x%lx page: 0x%lx half: 0x%lx",
- (long) lastpos, (long) page, (long) end));
+ DBUG_PRINT("exit",("returns: %p page: %p half: %p",
+ lastpos, page, end));
DBUG_RETURN(lastpos);
} /* _ma_find_half_pos */
@@ -1211,8 +1211,8 @@ static uchar *_ma_find_last_pos(MARIA_KEY *int_key, MARIA_PAGE *ma_page,
} while (page < end);
*after_key=lastpos;
- DBUG_PRINT("exit",("returns: 0x%lx page: 0x%lx end: 0x%lx",
- (long) prevpos,(long) page,(long) end));
+ DBUG_PRINT("exit",("returns: %p page: %p end: %p",
+ prevpos,page,end));
DBUG_RETURN(prevpos);
} /* _ma_find_last_pos */
diff --git a/storage/maria/maria_chk.c b/storage/maria/maria_chk.c
index fc3d3492252..cb8b374691e 100644
--- a/storage/maria/maria_chk.c
+++ b/storage/maria/maria_chk.c
@@ -1526,8 +1526,8 @@ static void descript(HA_CHECK *param, register MARIA_HA *info, char *name)
}
if (share->base.born_transactional)
{
- printf("LSNs: create_rename (%lu,0x%lx),"
- " state_horizon (%lu,0x%lx), skip_redo (%lu,0x%lx)\n",
+ printf("LSNs: create_rename " LSN_FMT ","
+ " state_horizon " LSN_FMT ", skip_redo " LSN_FMT "\n",
LSN_IN_PARTS(share->state.create_rename_lsn),
LSN_IN_PARTS(share->state.is_of_horizon),
LSN_IN_PARTS(share->state.skip_redo_lsn));
diff --git a/storage/maria/maria_read_log.c b/storage/maria/maria_read_log.c
index a0724b2199b..2c24c125f36 100644
--- a/storage/maria/maria_read_log.c
+++ b/storage/maria/maria_read_log.c
@@ -70,7 +70,7 @@ int main(int argc, char **argv)
fprintf(stderr, "Can't find any log\n");
goto err;
}
- if (init_pagecache(maria_pagecache, opt_page_buffer_size, 0, 0,
+ if (init_pagecache(maria_pagecache, (size_t)opt_page_buffer_size, 0, 0,
maria_block_size, 0, MY_WME) == 0)
{
fprintf(stderr, "Got error in init_pagecache() (errno: %d)\n", errno);
@@ -109,11 +109,11 @@ int main(int argc, char **argv)
last_checkpoint_lsn != LSN_IMPOSSIBLE)
{
lsn= LSN_IMPOSSIBLE; /* LSN set in maria_apply_log() */
- fprintf(stdout, "Starting from checkpoint (%lu,0x%lx)\n",
+ fprintf(stdout, "Starting from checkpoint " LSN_FMT "\n",
LSN_IN_PARTS(last_checkpoint_lsn));
}
else
- fprintf(stdout, "The transaction log starts from lsn (%lu,0x%lx)\n",
+ fprintf(stdout, "The transaction log starts from lsn " LSN_FMT "\n",
LSN_IN_PARTS(lsn));
if (opt_start_from_lsn)
@@ -125,7 +125,7 @@ int main(int argc, char **argv)
goto err;
}
lsn= (LSN) opt_start_from_lsn;
- fprintf(stdout, "Starting reading log from lsn (%lu,0x%lx)\n",
+ fprintf(stdout, "Starting reading log from lsn " LSN_FMT "\n",
LSN_IN_PARTS(lsn));
}
diff --git a/storage/maria/unittest/ma_test_loghandler-t.c b/storage/maria/unittest/ma_test_loghandler-t.c
index aa8615e9b77..02922ed9331 100644
--- a/storage/maria/unittest/ma_test_loghandler-t.c
+++ b/storage/maria/unittest/ma_test_loghandler-t.c
@@ -114,7 +114,7 @@ static my_bool check_content(uchar *ptr, ulong length)
void read_ok(TRANSLOG_HEADER_BUFFER *rec)
{
- ok(1, "read record type: %u LSN: (%lu,0x%lx)",
+ ok(1, "read record type: %u LSN: " LSN_FMT,
rec->type, LSN_IN_PARTS(rec->lsn));
}
@@ -399,7 +399,7 @@ int main(int argc __attribute__((unused)), char *argv[])
fprintf(stderr, "Incorrect LOGREC_FIXED_RECORD_0LSN_EXAMPLE "
"data read(0)\n"
"type %u, strid %u, len %u, i: %u, 4: %u 5: %u, "
- "lsn(%lu,0x%lx)\n",
+ "lsn" LSN_FMT "\n",
(uint) rec.type, (uint) rec.short_trid, (uint) rec.record_length,
(uint) uint4korr(rec.header), (uint) rec.header[4],
(uint) rec.header[5],
@@ -444,8 +444,8 @@ int main(int argc __attribute__((unused)), char *argv[])
fprintf(stderr, "Incorrect LOGREC_FIXED_RECORD_1LSN_EXAMPLE "
"data read(%d) "
"type: %u strid: %u len: %u"
- "ref: (%lu,0x%lx) (%lu,0x%lx) "
- "lsn(%lu,0x%lx)\n",
+ "ref: " LSN_FMT " " LSN_FMT " "
+ "lsn" LSN_FMT "\n",
i, (uint) rec.type, (uint) rec.short_trid,
(uint) rec.record_length,
LSN_IN_PARTS(ref), LSN_IN_PARTS(lsn),
@@ -475,9 +475,9 @@ int main(int argc __attribute__((unused)), char *argv[])
{
fprintf(stderr, "Incorrect LOGREC_FIXED_RECORD_2LSN_EXAMPLE "
"data read(%d) "
- "type %u, strid %u, len %u, ref1(%lu,0x%lx), "
- "ref2(%lu,0x%lx) %x%x%x%x%x%x%x%x%x "
- "lsn(%lu,0x%lx)\n",
+ "type %u, strid %u, len %u, ref1" LSN_FMT ", "
+ "ref2" LSN_FMT " %x%x%x%x%x%x%x%x%x "
+ "lsn" LSN_FMT "\n",
i, (uint) rec.type, (uint) rec.short_trid,
(uint) rec.record_length,
LSN_IN_PARTS(ref1), LSN_IN_PARTS(ref2),
@@ -522,7 +522,7 @@ int main(int argc __attribute__((unused)), char *argv[])
"data read(%d)"
"type %u (%d), strid %u (%d), len %lu, %lu + 7 (%d), "
"hdr len: %u (%d), "
- "ref(%lu,0x%lx), lsn(%lu,0x%lx) (%d), content: %d\n",
+ "ref" LSN_FMT ", lsn" LSN_FMT " (%d), content: %d\n",
i, (uint) rec.type,
rec.type != LOGREC_VARIABLE_RECORD_1LSN_EXAMPLE,
(uint) rec.short_trid,
@@ -541,7 +541,7 @@ int main(int argc __attribute__((unused)), char *argv[])
{
fprintf(stderr,
"Incorrect LOGREC_VARIABLE_RECORD_1LSN_EXAMPLE "
- "in whole rec read lsn(%lu,0x%lx)\n",
+ "in whole rec read lsn" LSN_FMT "\n",
LSN_IN_PARTS(rec.lsn));
goto err;
}
@@ -565,8 +565,8 @@ int main(int argc __attribute__((unused)), char *argv[])
fprintf(stderr, "Incorrect LOGREC_VARIABLE_RECORD_2LSN_EXAMPLE "
"data read(%d) "
"type %u, strid %u, len %lu != %lu + 14, hdr len: %d, "
- "ref1(%lu,0x%lx), ref2(%lu,0x%lx), "
- "lsn(%lu,0x%lx)\n",
+ "ref1" LSN_FMT ", ref2" LSN_FMT ", "
+ "lsn" LSN_FMT "\n",
i, (uint) rec.type, (uint) rec.short_trid,
(ulong) rec.record_length, (ulong) rec_len,
len, LSN_IN_PARTS(ref1), LSN_IN_PARTS(ref2),
@@ -577,7 +577,7 @@ int main(int argc __attribute__((unused)), char *argv[])
{
fprintf(stderr,
"Incorrect LOGREC_VARIABLE_RECORD_2LSN_EXAMPLE "
- "in whole rec read lsn(%lu,0x%lx)\n",
+ "in whole rec read lsn" LSN_FMT "\n",
LSN_IN_PARTS(rec.lsn));
goto err;
}
@@ -606,7 +606,7 @@ int main(int argc __attribute__((unused)), char *argv[])
fprintf(stderr, "Incorrect LOGREC_FIXED_RECORD_0LSN_EXAMPLE "
"data read(%d)\n"
"type %u, strid %u, len %u, i: %u, 4: %u 5: %u "
- "lsn(%lu,0x%lx)\n",
+ "lsn" LSN_FMT "\n",
i, (uint) rec.type, (uint) rec.short_trid,
(uint) rec.record_length,
(uint) uint4korr(rec.header), (uint) rec.header[4],
@@ -629,7 +629,7 @@ int main(int argc __attribute__((unused)), char *argv[])
fprintf(stderr, "Incorrect LOGREC_VARIABLE_RECORD_0LSN_EXAMPLE "
"data read(%d) "
"type %u, strid %u, len %lu != %lu, hdr len: %d, "
- "lsn(%lu,0x%lx)\n",
+ "lsn" LSN_FMT "\n",
i, (uint) rec.type, (uint) rec.short_trid,
(ulong) rec.record_length, (ulong) rec_len,
len, LSN_IN_PARTS(rec.lsn));
@@ -639,7 +639,7 @@ int main(int argc __attribute__((unused)), char *argv[])
{
fprintf(stderr,
"Incorrect LOGREC_VARIABLE_RECORD_2LSN_EXAMPLE "
- "in whole rec read lsn(%lu,0x%lx)\n",
+ "in whole rec read lsn" LSN_FMT "\n",
LSN_IN_PARTS(rec.lsn));
goto err;
}
diff --git a/storage/maria/unittest/ma_test_loghandler_first_lsn-t.c b/storage/maria/unittest/ma_test_loghandler_first_lsn-t.c
index 9306be3958e..87f0cf6c72c 100644
--- a/storage/maria/unittest/ma_test_loghandler_first_lsn-t.c
+++ b/storage/maria/unittest/ma_test_loghandler_first_lsn-t.c
@@ -102,7 +102,7 @@ int main(int argc __attribute__((unused)), char *argv[])
first_lsn= translog_first_lsn_in_log();
if (first_lsn != LSN_IMPOSSIBLE)
{
- fprintf(stderr, "Incorrect first lsn response (%lu,0x%lx).",
+ fprintf(stderr, "Incorrect first lsn response " LSN_FMT ".",
LSN_IN_PARTS(first_lsn));
translog_destroy();
exit(1);
@@ -140,8 +140,8 @@ int main(int argc __attribute__((unused)), char *argv[])
first_lsn= translog_first_lsn_in_log();
if (first_lsn != theor_lsn)
{
- fprintf(stderr, "Incorrect first lsn: (%lu,0x%lx) "
- " theoretical first: (%lu,0x%lx)\n",
+ fprintf(stderr, "Incorrect first lsn: " LSN_FMT " "
+ " theoretical first: " LSN_FMT "\n",
LSN_IN_PARTS(first_lsn), LSN_IN_PARTS(theor_lsn));
translog_destroy();
exit(1);
diff --git a/storage/maria/unittest/ma_test_loghandler_max_lsn-t.c b/storage/maria/unittest/ma_test_loghandler_max_lsn-t.c
index 9ff391b3814..6cb2fcb55e5 100644
--- a/storage/maria/unittest/ma_test_loghandler_max_lsn-t.c
+++ b/storage/maria/unittest/ma_test_loghandler_max_lsn-t.c
@@ -94,7 +94,7 @@ int main(int argc __attribute__((unused)), char *argv[])
}
if (max_lsn != LSN_IMPOSSIBLE)
{
- fprintf(stderr, "Incorrect first lsn response (%lu,0x%lx).",
+ fprintf(stderr, "Incorrect first lsn response " LSN_FMT ".",
LSN_IN_PARTS(max_lsn));
translog_destroy();
exit(1);
@@ -138,8 +138,8 @@ int main(int argc __attribute__((unused)), char *argv[])
}
if (max_lsn != last_lsn)
{
- fprintf(stderr, "Incorrect max lsn: (%lu,0x%lx) "
- " last lsn on first file: (%lu,0x%lx)\n",
+ fprintf(stderr, "Incorrect max lsn: " LSN_FMT " "
+ " last lsn on first file: " LSN_FMT "\n",
LSN_IN_PARTS(max_lsn), LSN_IN_PARTS(last_lsn));
translog_destroy();
exit(1);
diff --git a/storage/maria/unittest/ma_test_loghandler_multigroup-t.c b/storage/maria/unittest/ma_test_loghandler_multigroup-t.c
index 39cff18a3ab..69bc2f70f8c 100644
--- a/storage/maria/unittest/ma_test_loghandler_multigroup-t.c
+++ b/storage/maria/unittest/ma_test_loghandler_multigroup-t.c
@@ -485,7 +485,7 @@ int main(int argc __attribute__((unused)), char *argv[])
fprintf(stderr, "Incorrect LOGREC_FIXED_RECORD_0LSN_EXAMPLE "
"data read(0)\n"
"type %u, strid %u, len %u, i: %u, 4: %u 5: %u, "
- "lsn(0x%lu,0x%lx)\n",
+ LSN_FMT "\n",
(uint) rec.type, (uint) rec.short_trid, (uint) rec.record_length,
(uint)uint4korr(rec.header), (uint) rec.header[4],
(uint) rec.header[5],
@@ -533,7 +533,7 @@ int main(int argc __attribute__((unused)), char *argv[])
{
fprintf(stderr, "Incorrect LOGREC_FIXED_RECORD_1LSN_EXAMPLE "
"data read(%d)"
- "type %u, strid %u, len %u, ref(%lu,0x%lx), lsn(%lu,0x%lx)\n",
+ "type %u, strid %u, len %u, ref" LSN_FMT ", lsn" LSN_FMT "\n",
i, (uint) rec.type, (uint) rec.short_trid,
(uint) rec.record_length,
LSN_IN_PARTS(ref), LSN_IN_PARTS(rec.lsn));
@@ -563,9 +563,9 @@ int main(int argc __attribute__((unused)), char *argv[])
{
fprintf(stderr, "Incorrect LOGREC_FIXED_RECORD_2LSN_EXAMPLE "
"data read(%d) "
- "type %u, strid %u, len %u, ref1(%lu,0x%lx), "
- "ref2(%lu,0x%lx) %x%x%x%x%x%x%x%x%x "
- "lsn(%lu,0x%lx)\n",
+ "type %u, strid %u, len %u, ref1" LSN_FMT ", "
+ "ref2" LSN_FMT " %x%x%x%x%x%x%x%x%x "
+ "lsn" LSN_FMT "\n",
i, (uint) rec.type, (uint) rec.short_trid,
(uint) rec.record_length,
LSN_IN_PARTS(ref1), LSN_IN_PARTS(ref2),
@@ -611,7 +611,7 @@ int main(int argc __attribute__((unused)), char *argv[])
"data read(%d)"
"type %u (%d), strid %u (%d), len %lu, %lu + 7 (%d), "
"hdr len: %d (%d), "
- "ref(%lu,0x%lx), lsn(%lu,0x%lx) (%d), content: %d\n",
+ "ref" LSN_FMT ", lsn" LSN_FMT " (%d), content: %d\n",
i, (uint) rec.type,
rec.type !=LOGREC_VARIABLE_RECORD_1LSN_EXAMPLE,
(uint) rec.short_trid,
@@ -631,7 +631,7 @@ int main(int argc __attribute__((unused)), char *argv[])
{
fprintf(stderr,
"Incorrect LOGREC_VARIABLE_RECORD_1LSN_EXAMPLE "
- "in whole rec read lsn(%lu,0x%lx)\n",
+ "in whole rec read lsn" LSN_FMT "\n",
LSN_IN_PARTS(rec.lsn));
translog_free_record_header(&rec);
goto err;
@@ -655,8 +655,8 @@ int main(int argc __attribute__((unused)), char *argv[])
fprintf(stderr, "Incorrect LOGREC_VARIABLE_RECORD_2LSN_EXAMPLE "
" data read(%d) "
"type %u, strid %u, len %lu != %lu + 14, hdr len: %d, "
- "ref1(%lu,0x%lx), ref2(%lu,0x%lx), "
- "lsn(%lu,0x%lx)\n",
+ "ref1" LSN_FMT ", ref2" LSN_FMT ", "
+ "lsn" LSN_FMT "\n",
i, (uint) rec.type, (uint) rec.short_trid,
(ulong) rec.record_length, (ulong) rec_len,
len,
@@ -669,7 +669,7 @@ int main(int argc __attribute__((unused)), char *argv[])
{
fprintf(stderr,
"Incorrect LOGREC_VARIABLE_RECORD_2LSN_EXAMPLE "
- "in whole rec read lsn(%lu,0x%lx)\n",
+ "in whole rec read lsn" LSN_FMT "\n",
LSN_IN_PARTS(rec.lsn));
translog_free_record_header(&rec);
goto err;
@@ -701,7 +701,7 @@ int main(int argc __attribute__((unused)), char *argv[])
fprintf(stderr, "Incorrect LOGREC_FIXED_RECORD_0LSN_EXAMPLE "
"data read(%d)\n"
"type %u, strid %u, len %u, i: %u, 4: %u 5: %u "
- "lsn(%lu,0x%lx)\n",
+ "lsn" LSN_FMT "\n",
i, (uint) rec.type, (uint) rec.short_trid,
(uint) rec.record_length,
(uint)uint4korr(rec.header), (uint) rec.header[4],
@@ -725,7 +725,7 @@ int main(int argc __attribute__((unused)), char *argv[])
fprintf(stderr, "Incorrect LOGREC_VARIABLE_RECORD_0LSN_EXAMPLE "
"data read(%d) "
"type %u, strid %u, len %lu != %lu, hdr len: %d, "
- "lsn(%lu,0x%lx)\n",
+ "lsn" LSN_FMT "\n",
i, (uint) rec.type, (uint) rec.short_trid,
(ulong) rec.record_length, (ulong) rec_len,
len, LSN_IN_PARTS(rec.lsn));
@@ -736,7 +736,7 @@ int main(int argc __attribute__((unused)), char *argv[])
{
fprintf(stderr,
"Incorrect LOGREC_VARIABLE_RECORD_2LSN_EXAMPLE "
- "in whole rec read lsn(%lu,0x%lx)\n",
+ "in whole rec read lsn" LSN_FMT "\n",
LSN_IN_PARTS(rec.lsn));
translog_free_record_header(&rec);
goto err;
diff --git a/storage/maria/unittest/ma_test_loghandler_multithread-t.c b/storage/maria/unittest/ma_test_loghandler_multithread-t.c
index ff843937fcd..8b834527dd0 100644
--- a/storage/maria/unittest/ma_test_loghandler_multithread-t.c
+++ b/storage/maria/unittest/ma_test_loghandler_multithread-t.c
@@ -486,7 +486,7 @@ int main(int argc __attribute__((unused)),
fprintf(stderr, "Incorrect LOGREC_FIXED_RECORD_0LSN_EXAMPLE "
"data read(%d)\n"
"type %u, strid %u %u, len %u, i: %u %u, "
- "lsn(%lu,0x%lx) (%lu,0x%lx)\n",
+ "lsn" LSN_FMT " " LSN_FMT "\n",
i, (uint) rec.type,
(uint) rec.short_trid, (uint) uint2korr(rec.header),
(uint) rec.record_length,
@@ -510,7 +510,7 @@ int main(int argc __attribute__((unused)),
"data read(%d) "
"thread: %d, iteration %d, stage %d\n"
"type %u (%d), len %d, length %lu %lu (%d) "
- "lsn(%lu,0x%lx) (%lu,0x%lx)\n",
+ "lsn" LSN_FMT " " LSN_FMT "\n",
i, (uint) rec.short_trid, index, stage,
(uint) rec.type, (rec.type !=
LOGREC_VARIABLE_RECORD_0LSN_EXAMPLE),
@@ -526,7 +526,7 @@ int main(int argc __attribute__((unused)),
{
fprintf(stderr,
"Incorrect LOGREC_VARIABLE_RECORD_0LSN_EXAMPLE "
- "in whole rec read lsn(%lu,0x%lx)\n",
+ "in whole rec read lsn" LSN_FMT "\n",
LSN_IN_PARTS(rec.lsn));
translog_free_record_header(&rec);
goto err;
diff --git a/storage/maria/unittest/ma_test_loghandler_noflush-t.c b/storage/maria/unittest/ma_test_loghandler_noflush-t.c
index 9555cc0842f..2be6dc1da3c 100644
--- a/storage/maria/unittest/ma_test_loghandler_noflush-t.c
+++ b/storage/maria/unittest/ma_test_loghandler_noflush-t.c
@@ -116,7 +116,7 @@ int main(int argc __attribute__((unused)), char *argv[])
"data read(0)\n"
"type: %u (%d) strid: %u (%d) len: %u (%d) i: %u (%d), "
"4: %u (%d) 5: %u (%d) "
- "lsn(%lu,0x%lx) (%d)\n",
+ "lsn" LSN_FMT " (%d)\n",
(uint) rec.type, (rec.type !=LOGREC_FIXED_RECORD_0LSN_EXAMPLE),
(uint) rec.short_trid, (rec.short_trid != 0),
(uint) rec.record_length, (rec.record_length != 6),
diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc
index 55b16a286fc..4d272fc2ee6 100644
--- a/storage/myisam/ha_myisam.cc
+++ b/storage/myisam/ha_myisam.cc
@@ -340,8 +340,8 @@ int table2myisam(TABLE *table_arg, MI_KEYDEF **keydef_out,
}
}
}
- DBUG_PRINT("loop", ("found: 0x%lx recpos: %d minpos: %d length: %d",
- (long) found, recpos, minpos, length));
+ DBUG_PRINT("loop", ("found: %p recpos: %d minpos: %d length: %d",
+ found, recpos, minpos, length));
if (recpos != minpos)
{
/* reserve space for null bits */
diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c
index 71ab372f2aa..917e142af91 100644
--- a/storage/myisam/mi_check.c
+++ b/storage/myisam/mi_check.c
@@ -2916,8 +2916,8 @@ int mi_repair_parallel(HA_CHECK *param, register MI_INFO *info,
*/
sort_param[i].read_cache= ((rep_quick || !i) ? param->read_cache :
new_data_cache);
- DBUG_PRINT("io_cache_share", ("thread: %u read_cache: 0x%lx",
- i, (long) &sort_param[i].read_cache));
+ DBUG_PRINT("io_cache_share", ("thread: %u read_cache: %p",
+ i, &sort_param[i].read_cache));
/*
two approaches: the same amount of memory for each thread
@@ -3968,7 +3968,7 @@ static int sort_ft_key_write(MI_SORT_PARAM *sort_param, const void *a)
key_block++;
sort_info->key_block=key_block;
sort_param->keyinfo=& sort_info->info->s->ft2_keyinfo;
- ft_buf->count=((uchar*) ft_buf->buf - p)/val_len;
+ ft_buf->count=(int)((uchar*) ft_buf->buf - p)/val_len;
/* flushing buffer to second-level tree */
for (error=0; !error && p < (uchar*) ft_buf->buf; p+= val_len)
diff --git a/storage/myisam/mi_close.c b/storage/myisam/mi_close.c
index f0a82bcef04..ad1d3074a27 100644
--- a/storage/myisam/mi_close.c
+++ b/storage/myisam/mi_close.c
@@ -27,8 +27,8 @@ int mi_close(register MI_INFO *info)
int error=0,flag;
MYISAM_SHARE *share=info->s;
DBUG_ENTER("mi_close");
- DBUG_PRINT("enter",("base: 0x%lx reopen: %u locks: %u",
- (long) info, (uint) share->reopen,
+ DBUG_PRINT("enter",("base: %p reopen: %u locks: %u",
+ info, (uint) share->reopen,
(uint) share->tot_locks));
if (info->open_list.data)
diff --git a/storage/myisam/mi_delete.c b/storage/myisam/mi_delete.c
index 99185844b72..92b368d92dc 100644
--- a/storage/myisam/mi_delete.c
+++ b/storage/myisam/mi_delete.c
@@ -410,8 +410,8 @@ static int del(register MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *key,
MYISAM_SHARE *share=info->s;
MI_KEY_PARAM s_temp;
DBUG_ENTER("del");
- DBUG_PRINT("enter",("leaf_page: %ld keypos: 0x%lx", (long) leaf_page,
- (ulong) keypos));
+ DBUG_PRINT("enter",("leaf_page: %lld keypos: %p", leaf_page,
+ keypos));
DBUG_DUMP("leaf_buff",(uchar*) leaf_buff,mi_getint(leaf_buff));
endpos=leaf_buff+mi_getint(leaf_buff);
@@ -516,8 +516,8 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo,
MI_KEY_PARAM s_temp;
MYISAM_SHARE *share=info->s;
DBUG_ENTER("underflow");
- DBUG_PRINT("enter",("leaf_page: %ld keypos: 0x%lx",(long) leaf_page,
- (ulong) keypos));
+ DBUG_PRINT("enter",("leaf_page: %lld keypos: %p",leaf_page,
+ keypos));
DBUG_DUMP("anc_buff",(uchar*) anc_buff,mi_getint(anc_buff));
DBUG_DUMP("leaf_buff",(uchar*) leaf_buff,mi_getint(leaf_buff));
@@ -597,8 +597,8 @@ static int underflow(register MI_INFO *info, register MI_KEYDEF *keyinfo,
else
{ /* Page is full */
endpos=anc_buff+anc_length;
- DBUG_PRINT("test",("anc_buff: 0x%lx endpos: 0x%lx",
- (long) anc_buff, (long) endpos));
+ DBUG_PRINT("test",("anc_buff: %p endpos: %p",
+ anc_buff, endpos));
if (keypos != anc_buff+2+key_reflength &&
!_mi_get_last_key(info,keyinfo,anc_buff,anc_key,keypos,&length))
goto err;
@@ -776,7 +776,7 @@ static uint remove_key(MI_KEYDEF *keyinfo, uint nod_flag,
int s_length;
uchar *start;
DBUG_ENTER("remove_key");
- DBUG_PRINT("enter",("keypos: 0x%lx page_end: 0x%lx",(long) keypos, (long) page_end));
+ DBUG_PRINT("enter",("keypos: %p page_end: %p",keypos, page_end));
start=keypos;
if (!(keyinfo->flag &
diff --git a/storage/myisam/mi_dynrec.c b/storage/myisam/mi_dynrec.c
index 5241d72014c..c4baca3d89f 100644
--- a/storage/myisam/mi_dynrec.c
+++ b/storage/myisam/mi_dynrec.c
@@ -1344,8 +1344,8 @@ ulong _mi_rec_unpack(register MI_INFO *info, register uchar *to, uchar *from,
err:
my_errno= HA_ERR_WRONG_IN_RECORD;
- DBUG_PRINT("error",("to_end: 0x%lx -> 0x%lx from_end: 0x%lx -> 0x%lx",
- (long) to, (long) to_end, (long) from, (long) from_end));
+ DBUG_PRINT("error",("to_end: %p -> %p from_end: %p -> %p",
+ to, to_end, from, from_end));
DBUG_DUMP("from",(uchar*) info->rec_buff,info->s->base.min_pack_length);
DBUG_RETURN(MY_FILE_ERROR);
} /* _mi_rec_unpack */
diff --git a/storage/myisam/mi_preload.c b/storage/myisam/mi_preload.c
index 1a2d5aac94f..e0d23e0fca0 100644
--- a/storage/myisam/mi_preload.c
+++ b/storage/myisam/mi_preload.c
@@ -41,7 +41,7 @@
int mi_preload(MI_INFO *info, ulonglong key_map, my_bool ignore_leaves)
{
uint i;
- ulong length, block_length= 0;
+ size_t length, block_length= 0;
uchar *buff= NULL;
MYISAM_SHARE* share= info->s;
uint keys= share->state.header.keys;
@@ -68,7 +68,7 @@ int mi_preload(MI_INFO *info, ulonglong key_map, my_bool ignore_leaves)
}
}
else
- block_length= share->key_cache->param_block_size;
+ block_length= (size_t)share->key_cache->param_block_size;
length= info->preload_buff_size/block_length * block_length;
set_if_bigger(length, block_length);
@@ -84,7 +84,7 @@ int mi_preload(MI_INFO *info, ulonglong key_map, my_bool ignore_leaves)
{
/* Read the next block of index file into the preload buffer */
if ((my_off_t) length > (key_file_length-pos))
- length= (ulong) (key_file_length-pos);
+ length= (size_t) (key_file_length-pos);
if (mysql_file_pread(share->kfile, (uchar*) buff, length, pos,
MYF(MY_FAE|MY_FNABP)))
goto err;
diff --git a/storage/myisam/mi_rkey.c b/storage/myisam/mi_rkey.c
index b70922b0505..1dddb8b49ad 100644
--- a/storage/myisam/mi_rkey.c
+++ b/storage/myisam/mi_rkey.c
@@ -32,8 +32,8 @@ int mi_rkey(MI_INFO *info, uchar *buf, int inx, const uchar *key,
uint pack_key_length, use_key_length, nextflag;
ICP_RESULT res= ICP_NO_MATCH;
DBUG_ENTER("mi_rkey");
- DBUG_PRINT("enter", ("base: 0x%lx buf: 0x%lx inx: %d search_flag: %d",
- (long) info, (long) buf, inx, search_flag));
+ DBUG_PRINT("enter", ("base: %p buf: %p inx: %d search_flag: %d",
+ info, buf, inx, search_flag));
if ((inx = _mi_check_index(info,inx)) < 0)
DBUG_RETURN(my_errno);
diff --git a/storage/myisam/mi_search.c b/storage/myisam/mi_search.c
index 60a34c641ad..79ed846ce7e 100644
--- a/storage/myisam/mi_search.c
+++ b/storage/myisam/mi_search.c
@@ -267,8 +267,8 @@ int _mi_seq_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page,
mi_print_error(info->s, HA_ERR_CRASHED);
my_errno=HA_ERR_CRASHED;
DBUG_PRINT("error",
- ("Found wrong key: length: %u page: 0x%lx end: 0x%lx",
- length, (long) page, (long) end));
+ ("Found wrong key: length: %u page: %p end: %p",
+ length, page, end));
DBUG_RETURN(MI_FOUND_WRONG_KEY);
}
if ((flag=ha_key_cmp(keyinfo->seg,t_buff,key,key_len,comp_flag,
@@ -284,7 +284,7 @@ int _mi_seq_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page,
if (flag == 0)
memcpy(buff,t_buff,length); /* Result is first key */
*last_key= page == end;
- DBUG_PRINT("exit",("flag: %d ret_pos: 0x%lx", flag, (long) *ret_pos));
+ DBUG_PRINT("exit",("flag: %d ret_pos: %p", flag, *ret_pos));
DBUG_RETURN(flag);
} /* _mi_seq_search */
@@ -419,8 +419,8 @@ int _mi_prefix_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page,
mi_print_error(info->s, HA_ERR_CRASHED);
my_errno=HA_ERR_CRASHED;
DBUG_PRINT("error",
- ("Found wrong key: length: %u page: 0x%lx end: %lx",
- length, (long) page, (long) end));
+ ("Found wrong key: length: %u page: %p end: %p",
+ length, page, end));
DBUG_RETURN(MI_FOUND_WRONG_KEY);
}
@@ -554,7 +554,7 @@ int _mi_prefix_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page,
*last_key= page == end;
- DBUG_PRINT("exit",("flag: %d ret_pos: 0x%lx", flag, (long) *ret_pos));
+ DBUG_PRINT("exit",("flag: %d ret_pos: %p", flag, *ret_pos));
DBUG_RETURN(flag);
} /* _mi_prefix_search */
@@ -816,8 +816,8 @@ uint _mi_get_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag,
if (length > keyseg->length)
{
DBUG_PRINT("error",
- ("Found too long null packed key: %u of %u at 0x%lx",
- length, keyseg->length, (long) *page_pos));
+ ("Found too long null packed key: %u of %u at %p",
+ length, keyseg->length, *page_pos));
DBUG_DUMP("key", *page_pos, 16);
mi_print_error(keyinfo->share, HA_ERR_CRASHED);
my_errno=HA_ERR_CRASHED;
@@ -873,8 +873,8 @@ uint _mi_get_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag,
}
if (length > (uint) keyseg->length)
{
- DBUG_PRINT("error",("Found too long packed key: %u of %u at 0x%lx",
- length, keyseg->length, (long) *page_pos));
+ DBUG_PRINT("error",("Found too long packed key: %u of %u at %p",
+ length, keyseg->length, *page_pos));
DBUG_DUMP("key", *page_pos, 16);
mi_print_error(keyinfo->share, HA_ERR_CRASHED);
my_errno=HA_ERR_CRASHED;
@@ -945,8 +945,8 @@ uint _mi_get_binary_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag,
if (length > keyinfo->maxlength)
{
DBUG_PRINT("error",
- ("Found too long binary packed key: %u of %u at 0x%lx",
- length, keyinfo->maxlength, (long) *page_pos));
+ ("Found too long binary packed key: %u of %u at %p",
+ length, keyinfo->maxlength, *page_pos));
DBUG_DUMP("key", *page_pos, 16);
goto crashed; /* Wrong key */
}
@@ -1003,8 +1003,8 @@ uint _mi_get_binary_pack_key(register MI_KEYDEF *keyinfo, uint nod_flag,
length-=tmp;
from=page; from_end=page_end;
}
- DBUG_PRINT("info",("key: 0x%lx from: 0x%lx length: %u",
- (long) key, (long) from, length));
+ DBUG_PRINT("info",("key: %p from: %p length: %u",
+ key, from, length));
memmove((uchar*) key, (uchar*) from, (size_t) length);
key+=length;
from+=length;
@@ -1077,7 +1077,7 @@ uchar *_mi_get_key(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *page,
}
}
}
- DBUG_PRINT("exit",("page: 0x%lx length: %u", (long) page,
+ DBUG_PRINT("exit",("page: %p length: %u", page,
*return_key_length));
DBUG_RETURN(page);
} /* _mi_get_key */
@@ -1130,8 +1130,8 @@ uchar *_mi_get_last_key(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *page,
uint nod_flag;
uchar *lastpos;
DBUG_ENTER("_mi_get_last_key");
- DBUG_PRINT("enter",("page: 0x%lx endpos: 0x%lx", (long) page,
- (long) endpos));
+ DBUG_PRINT("enter",("page:%p endpos: %p", page,
+ endpos));
nod_flag=mi_test_if_nod(page);
if (! (keyinfo->flag & (HA_VAR_LENGTH_KEY | HA_BINARY_PACK_KEY)))
@@ -1151,15 +1151,15 @@ uchar *_mi_get_last_key(MI_INFO *info, MI_KEYDEF *keyinfo, uchar *page,
*return_key_length=(*keyinfo->get_key)(keyinfo,nod_flag,&page,lastkey);
if (*return_key_length == 0)
{
- DBUG_PRINT("error",("Couldn't find last key: page: 0x%lx",
- (long) page));
+ DBUG_PRINT("error",("Couldn't find last key: page: %p",
+ page));
mi_print_error(info->s, HA_ERR_CRASHED);
my_errno=HA_ERR_CRASHED;
DBUG_RETURN(0);
}
}
}
- DBUG_PRINT("exit",("lastpos: 0x%lx length: %u", (long) lastpos,
+ DBUG_PRINT("exit",("lastpos: %p length: %u", lastpos,
*return_key_length));
DBUG_RETURN(lastpos);
} /* _mi_get_last_key */
@@ -1245,9 +1245,9 @@ int _mi_search_next(register MI_INFO *info, register MI_KEYDEF *keyinfo,
uint nod_flag;
uchar lastkey[HA_MAX_KEY_BUFF];
DBUG_ENTER("_mi_search_next");
- DBUG_PRINT("enter",("nextflag: %u lastpos: %lu int_keypos: %lu",
- nextflag, (ulong) info->lastpos,
- (ulong) info->int_keypos));
+ DBUG_PRINT("enter",("nextflag: %u lastpos: %llu int_keypos: %p",
+ nextflag, info->lastpos,
+ info->int_keypos));
DBUG_EXECUTE("key",_mi_print_key(DBUG_FILE,keyinfo->seg,key,key_length););
/* Force full read if we are at last key or if we are not on a leaf
@@ -1697,8 +1697,8 @@ _mi_calc_var_pack_key_length(MI_KEYDEF *keyinfo,uint nod_flag,uchar *next_key,
ref_length=0;
next_length_pack=0;
}
- DBUG_PRINT("test",("length: %d next_key: 0x%lx", length,
- (long) next_key));
+ DBUG_PRINT("test",("length: %d next_key: %p", length,
+ next_key));
{
uint tmp_length;
diff --git a/storage/myisam/mi_write.c b/storage/myisam/mi_write.c
index ff12f335af1..3ba25d84e62 100644
--- a/storage/myisam/mi_write.c
+++ b/storage/myisam/mi_write.c
@@ -471,7 +471,7 @@ int _mi_insert(register MI_INFO *info, register MI_KEYDEF *keyinfo,
uchar *endpos, *prev_key;
MI_KEY_PARAM s_temp;
DBUG_ENTER("_mi_insert");
- DBUG_PRINT("enter",("key_pos: 0x%lx", (long) key_pos));
+ DBUG_PRINT("enter",("key_pos: %p", key_pos));
DBUG_EXECUTE("key",_mi_print_key(DBUG_FILE,keyinfo->seg,key,USE_WHOLE_KEY););
nod_flag=mi_test_if_nod(anc_buff);
@@ -492,8 +492,8 @@ int _mi_insert(register MI_INFO *info, register MI_KEYDEF *keyinfo,
{
DBUG_PRINT("test",("t_length: %d ref_len: %d",
t_length,s_temp.ref_length));
- DBUG_PRINT("test",("n_ref_len: %d n_length: %d key_pos: 0x%lx",
- s_temp.n_ref_length,s_temp.n_length, (long) s_temp.key));
+ DBUG_PRINT("test",("n_ref_len: %d n_length: %d key_pos: %p",
+ s_temp.n_ref_length,s_temp.n_length, s_temp.key));
}
#endif
if (t_length > 0)
@@ -693,8 +693,8 @@ uchar *_mi_find_half_pos(uint nod_flag, MI_KEYDEF *keyinfo, uchar *page,
} while (page < end);
*return_key_length=length;
*after_key=page;
- DBUG_PRINT("exit",("returns: 0x%lx page: 0x%lx half: 0x%lx",
- (long) lastpos, (long) page, (long) end));
+ DBUG_PRINT("exit",("returns: %p page: %p half: %p",
+ lastpos, page, end));
DBUG_RETURN(lastpos);
} /* _mi_find_half_pos */
@@ -750,8 +750,8 @@ static uchar *_mi_find_last_pos(MI_KEYDEF *keyinfo, uchar *page,
*return_key_length=last_length;
*after_key=lastpos;
- DBUG_PRINT("exit",("returns: 0x%lx page: 0x%lx end: 0x%lx",
- (long) prevpos,(long) page,(long) end));
+ DBUG_PRINT("exit",("returns: %p page: %p end: %p",
+ prevpos, page, end));
DBUG_RETURN(prevpos);
} /* _mi_find_last_pos */
diff --git a/storage/myisam/myisamchk.c b/storage/myisam/myisamchk.c
index c31ac8cd0d4..2e36c364453 100644
--- a/storage/myisam/myisamchk.c
+++ b/storage/myisam/myisamchk.c
@@ -28,7 +28,7 @@
static uint decode_bits;
static char **default_argv;
static const char *load_default_groups[]= { "myisamchk", 0 };
-static const char *set_collation_name, *opt_tmpdir;
+static char *set_collation_name, *opt_tmpdir;
static CHARSET_INFO *set_collation;
static long opt_myisam_block_size;
static long opt_key_cache_block_size;
@@ -1116,7 +1116,7 @@ static int myisamchk(HA_CHECK *param, char * filename)
{
if (param->testflag & (T_EXTEND | T_MEDIUM))
(void) init_key_cache(dflt_key_cache,opt_key_cache_block_size,
- param->use_buffers, 0, 0, 0, 0);
+ (size_t)param->use_buffers, 0, 0, 0, 0);
(void) init_io_cache(&param->read_cache,datafile,
(uint) param->read_buffer_length,
READ_CACHE,
diff --git a/storage/myisammrg/ha_myisammrg.cc b/storage/myisammrg/ha_myisammrg.cc
index dbcd7a0cb3b..0cf8305463b 100644
--- a/storage/myisammrg/ha_myisammrg.cc
+++ b/storage/myisammrg/ha_myisammrg.cc
@@ -359,7 +359,7 @@ int ha_myisammrg::open(const char *name, int mode __attribute__((unused)),
uint test_if_locked_arg)
{
DBUG_ENTER("ha_myisammrg::open");
- DBUG_PRINT("myrg", ("name: '%s' table: 0x%lx", name, (long) table));
+ DBUG_PRINT("myrg", ("name: '%s' table: %p", name, table));
DBUG_PRINT("myrg", ("test_if_locked_arg: %u", test_if_locked_arg));
/* Must not be used when table is open. */
@@ -413,8 +413,8 @@ int ha_myisammrg::open(const char *name, int mode __attribute__((unused)),
DBUG_RETURN(my_errno ? my_errno : -1);
/* purecov: end */
}
- DBUG_PRINT("myrg", ("MYRG_INFO: 0x%lx child tables: %u",
- (long) file, file->tables));
+ DBUG_PRINT("myrg", ("MYRG_INFO: %p child tables: %u",
+ file, file->tables));
DBUG_RETURN(0);
}
@@ -440,8 +440,8 @@ int ha_myisammrg::add_children_list(void)
List_iterator_fast<Mrg_child_def> it(child_def_list);
Mrg_child_def *mrg_child_def;
DBUG_ENTER("ha_myisammrg::add_children_list");
- DBUG_PRINT("myrg", ("table: '%s'.'%s' 0x%lx", this->table->s->db.str,
- this->table->s->table_name.str, (long) this->table));
+ DBUG_PRINT("myrg", ("table: '%s'.'%s' %p", this->table->s->db.str,
+ this->table->s->table_name.str, this->table));
/* Must call this with open table. */
DBUG_ASSERT(this->file);
@@ -699,12 +699,12 @@ extern "C" MI_INFO *myisammrg_attach_children_callback(void *callback_param)
if ((child->file->ht->db_type != DB_TYPE_MYISAM) ||
!(myisam= ((ha_myisam*) child->file)->file_ptr()))
{
- DBUG_PRINT("error", ("no MyISAM handle for child table: '%s'.'%s' 0x%lx",
+ DBUG_PRINT("error", ("no MyISAM handle for child table: '%s'.'%s' %p",
child->s->db.str, child->s->table_name.str,
- (long) child));
+ child));
}
- DBUG_PRINT("myrg", ("MyISAM handle: 0x%lx", (long) myisam));
+ DBUG_PRINT("myrg", ("MyISAM handle: %p", myisam));
end:
@@ -810,8 +810,8 @@ int ha_myisammrg::attach_children(void)
int error;
Mrg_attach_children_callback_param param(parent_l, this->children_l, child_def_list);
DBUG_ENTER("ha_myisammrg::attach_children");
- DBUG_PRINT("myrg", ("table: '%s'.'%s' 0x%lx", table->s->db.str,
- table->s->table_name.str, (long) table));
+ DBUG_PRINT("myrg", ("table: '%s'.'%s' %p", table->s->db.str,
+ table->s->table_name.str, table));
DBUG_PRINT("myrg", ("test_if_locked: %u", this->test_if_locked));
/* Must call this with open table. */
@@ -1684,7 +1684,7 @@ uint ha_myisammrg::count_query_cache_dependant_tables(uint8 *tables_type)
(*tables_type)|= HA_CACHE_TBL_NONTRANSACT;
but it has no effect because HA_CACHE_TBL_NONTRANSACT is 0
*/
- return (file->end_table - file->open_tables);
+ return (uint)(file->end_table - file->open_tables);
}
diff --git a/storage/perfschema/pfs.cc b/storage/perfschema/pfs.cc
index c411025d148..7440df6a487 100644
--- a/storage/perfschema/pfs.cc
+++ b/storage/perfschema/pfs.cc
@@ -1265,7 +1265,7 @@ static int build_prefix(const LEX_CSTRING *prefix, const char *category,
out_ptr+= len;
*out_ptr= '/';
out_ptr++;
- *output_length= out_ptr - output;
+ *output_length= (int)(out_ptr - output);
return 0;
}
@@ -1942,7 +1942,7 @@ static void set_thread_id_v1(PSI_thread *thread, ulonglong processlist_id)
PFS_thread *pfs= reinterpret_cast<PFS_thread*> (thread);
if (unlikely(pfs == NULL))
return;
- pfs->m_processlist_id= processlist_id;
+ pfs->m_processlist_id= (ulong)processlist_id;
}
/**
@@ -5121,7 +5121,7 @@ static void set_socket_info_v1(PSI_socket *socket,
/** Set socket descriptor */
if (fd != NULL)
- pfs->m_fd= *fd;
+ pfs->m_fd= (uint)*fd;
/** Set raw socket address and length */
if (likely(addr != NULL && addr_len > 0))
diff --git a/storage/perfschema/pfs_account.cc b/storage/perfschema/pfs_account.cc
index 4e3a6d8d1d3..60467a764e3 100644
--- a/storage/perfschema/pfs_account.cc
+++ b/storage/perfschema/pfs_account.cc
@@ -202,7 +202,7 @@ static void set_account_key(PFS_account_key *key,
}
ptr[0]= 0;
ptr++;
- key->m_key_length= ptr - &key->m_hash_key[0];
+ key->m_key_length= (uint)(ptr - &key->m_hash_key[0]);
}
PFS_account *
diff --git a/storage/perfschema/pfs_engine_table.cc b/storage/perfschema/pfs_engine_table.cc
index f3c6edcef59..2dfad937e87 100644
--- a/storage/perfschema/pfs_engine_table.cc
+++ b/storage/perfschema/pfs_engine_table.cc
@@ -1359,7 +1359,7 @@ bool pfs_show_status(handlerton *hton, THD *thd,
break;
}
- buflen= longlong10_to_str(size, buf, 10) - buf;
+ buflen= (uint)(longlong10_to_str(size, buf, 10) - buf);
if (print(thd,
PERFORMANCE_SCHEMA_str.str, PERFORMANCE_SCHEMA_str.length,
name, strlen(name),
diff --git a/storage/perfschema/pfs_host.cc b/storage/perfschema/pfs_host.cc
index 7da34a6d5f6..15db3e80a94 100644
--- a/storage/perfschema/pfs_host.cc
+++ b/storage/perfschema/pfs_host.cc
@@ -190,7 +190,7 @@ static void set_host_key(PFS_host_key *key,
}
ptr[0]= 0;
ptr++;
- key->m_key_length= ptr - &key->m_hash_key[0];
+ key->m_key_length= (uint)(ptr - &key->m_hash_key[0]);
}
PFS_host *find_or_create_host(PFS_thread *thread,
diff --git a/storage/perfschema/pfs_instr.cc b/storage/perfschema/pfs_instr.cc
index a9ea6faf212..9cb2c68dbaf 100644
--- a/storage/perfschema/pfs_instr.cc
+++ b/storage/perfschema/pfs_instr.cc
@@ -952,7 +952,7 @@ PFS_thread* create_thread(PFS_thread_class *klass, const void *identity,
pfs->m_thread_internal_id=
PFS_atomic::add_u64(&thread_internal_id_counter, 1);
pfs->m_parent_thread_internal_id= 0;
- pfs->m_processlist_id= processlist_id;
+ pfs->m_processlist_id= (ulong)processlist_id;
pfs->m_event_id= 1;
pfs->m_stmt_lock.set_allocated();
pfs->m_session_lock.set_allocated();
@@ -1604,7 +1604,7 @@ PFS_socket* create_socket(PFS_socket_class *klass, const my_socket *fd,
uint addr_len_used= addr_len;
if (fd != NULL)
- fd_used= *fd;
+ fd_used= (int)*fd;
if (addr_len_used > sizeof(sockaddr_storage))
addr_len_used= sizeof(sockaddr_storage);
diff --git a/storage/perfschema/pfs_instr_class.cc b/storage/perfschema/pfs_instr_class.cc
index c67c120283d..ed22c36f0d2 100644
--- a/storage/perfschema/pfs_instr_class.cc
+++ b/storage/perfschema/pfs_instr_class.cc
@@ -449,7 +449,7 @@ static void set_table_share_key(PFS_table_share_key *key,
ptr+= table_name_length;
ptr[0]= 0;
ptr++;
- key->m_key_length= ptr - &key->m_hash_key[0];
+ key->m_key_length= (uint)(ptr - &key->m_hash_key[0]);
if (lower_case_table_names)
{
diff --git a/storage/perfschema/pfs_setup_actor.cc b/storage/perfschema/pfs_setup_actor.cc
index f12d70840c1..40534d65da0 100644
--- a/storage/perfschema/pfs_setup_actor.cc
+++ b/storage/perfschema/pfs_setup_actor.cc
@@ -152,7 +152,7 @@ static void set_setup_actor_key(PFS_setup_actor_key *key,
ptr+= role_length;
ptr[0]= 0;
ptr++;
- key->m_key_length= ptr - &key->m_hash_key[0];
+ key->m_key_length= (uint)(ptr - &key->m_hash_key[0]);
}
int insert_setup_actor(const String *user, const String *host, const String *role)
diff --git a/storage/perfschema/pfs_setup_object.cc b/storage/perfschema/pfs_setup_object.cc
index 809fe8edd24..9fca9a6b945 100644
--- a/storage/perfschema/pfs_setup_object.cc
+++ b/storage/perfschema/pfs_setup_object.cc
@@ -145,7 +145,7 @@ static void set_setup_object_key(PFS_setup_object_key *key,
ptr+= object_length;
ptr[0]= 0;
ptr++;
- key->m_key_length= ptr - &key->m_hash_key[0];
+ key->m_key_length= (uint)(ptr - &key->m_hash_key[0]);
}
int insert_setup_object(enum_object_type object_type, const String *schema,
diff --git a/storage/perfschema/pfs_user.cc b/storage/perfschema/pfs_user.cc
index 528457fe017..d2266e1f7ef 100644
--- a/storage/perfschema/pfs_user.cc
+++ b/storage/perfschema/pfs_user.cc
@@ -190,7 +190,7 @@ static void set_user_key(PFS_user_key *key,
}
ptr[0]= 0;
ptr++;
- key->m_key_length= ptr - &key->m_hash_key[0];
+ key->m_key_length= (uint)(ptr - &key->m_hash_key[0]);
}
PFS_user *
diff --git a/storage/perfschema/table_events_waits.cc b/storage/perfschema/table_events_waits.cc
index cd32d81519e..01f8cd5e6b5 100644
--- a/storage/perfschema/table_events_waits.cc
+++ b/storage/perfschema/table_events_waits.cc
@@ -279,7 +279,7 @@ int table_events_waits_common::make_socket_object_columns(volatile PFS_events_wa
safe_socket->m_addr_len);
/* Convert port number to a string (length includes ':') */
- int port_len= int10_to_str(port, (port_str+1), 10) - port_str + 1;
+ int port_len= (int)(int10_to_str(port, (port_str+1), 10) - port_str + 1);
/* OBJECT NAME */
m_row.m_object_name_length= ip_length + port_len;
diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc
index 9845e77e041..4915617e3aa 100644
--- a/storage/rocksdb/ha_rocksdb.cc
+++ b/storage/rocksdb/ha_rocksdb.cc
@@ -3239,14 +3239,14 @@ static bool rocksdb_show_status(handlerton *const hton, THD *const thd,
// NB! We're replacing hyphens with underscores in output to better match
// the existing naming convention.
if (rdb->GetIntProperty("rocksdb.is-write-stopped", &v)) {
- snprintf(buf, sizeof(buf), "rocksdb.is_write_stopped COUNT : %lu\n", v);
+ snprintf(buf, sizeof(buf), "rocksdb.is_write_stopped COUNT : %llu\n", (ulonglong)v);
str.append(buf);
}
if (rdb->GetIntProperty("rocksdb.actual-delayed-write-rate", &v)) {
snprintf(buf, sizeof(buf), "rocksdb.actual_delayed_write_rate "
- "COUNT : %lu\n",
- v);
+ "COUNT : %llu\n",
+ (ulonglong)v);
str.append(buf);
}
@@ -4178,7 +4178,7 @@ std::vector<std::string> rdb_get_open_table_names(void) {
}
std::vector<std::string> Rdb_open_tables_map::get_table_names(void) const {
- ulong i;
+ size_t i;
const Rdb_table_handler *table_handler;
std::vector<std::string> names;
diff --git a/storage/rocksdb/rdb_sst_info.cc b/storage/rocksdb/rdb_sst_info.cc
index 8ca00bb30f5..72abfab5d6b 100644
--- a/storage/rocksdb/rdb_sst_info.cc
+++ b/storage/rocksdb/rdb_sst_info.cc
@@ -104,9 +104,11 @@ Rdb_sst_file_ordered::Rdb_sst_file::put(const rocksdb::Slice &key,
const rocksdb::Slice &value) {
DBUG_ASSERT(m_sst_file_writer != nullptr);
+#ifdef __GNUC__
// Add the specified key/value to the sst file writer
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#endif
return m_sst_file_writer->Add(key, value);
}
diff --git a/storage/sphinx/ha_sphinx.cc b/storage/sphinx/ha_sphinx.cc
index be57c04806b..0d162d78030 100644
--- a/storage/sphinx/ha_sphinx.cc
+++ b/storage/sphinx/ha_sphinx.cc
@@ -1735,7 +1735,7 @@ bool CSphSEQuery::ParseField ( char * sField )
}
} else if ( !strcmp ( sName, "override" ) ) // name,type,id:value,id:value,...
{
- char * sName = NULL;
+ sName = NULL;
int iType = 0;
CSphSEQuery::Override_t * pOverride = NULL;
@@ -1790,7 +1790,7 @@ bool CSphSEQuery::ParseField ( char * sField )
if (!( sRest = strchr ( sRest, ':' ) )) break; *sRest++ = '\0';
if (!( sRest - sId )) break;
- char * sValue = sRest;
+ sValue = sRest;
if ( ( sRest = strchr ( sRest, ',' ) )!=NULL )
*sRest++ = '\0';
if ( !*sValue )
@@ -2209,7 +2209,7 @@ int ha_sphinx::Connect ( const char * sHost, ushort uPort )
}
char sError[512];
- int iSocket = socket ( iDomain, SOCK_STREAM, 0 );
+ int iSocket = (int) socket ( iDomain, SOCK_STREAM, 0 );
if ( iSocket<0 )
{
@@ -2670,7 +2670,7 @@ bool ha_sphinx::UnpackStats ( CSphSEStats * pStats )
assert ( pStats );
char * pCurSave = m_pCur;
- for ( uint i=0; i<m_iMatchesTotal && m_pCur<m_pResponseEnd-sizeof(uint32); i++ ) // NOLINT
+ for ( uint m=0; m<m_iMatchesTotal && m_pCur<m_pResponseEnd-sizeof(uint32); m++ ) // NOLINT
{
m_pCur += m_bId64 ? 12 : 8; // skip id+weight
for ( uint32 i=0; i<m_iAttrs && m_pCur<m_pResponseEnd-sizeof(uint32); i++ ) // NOLINT
@@ -3155,7 +3155,7 @@ int ha_sphinx::get_rec ( byte * buf, const byte *, uint )
}
}
- af->store ( sBuf, pCur-sBuf, &my_charset_bin );
+ af->store ( sBuf, uint(pCur-sBuf), &my_charset_bin );
}
break;
@@ -3382,39 +3382,39 @@ ha_rows ha_sphinx::records_in_range ( uint, key_range *, key_range * )
// currently provided for doing that.
//
// Called from handle.cc by ha_create_table().
-int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * )
+int ha_sphinx::create ( const char * name, TABLE * table_arg, HA_CREATE_INFO * )
{
SPH_ENTER_METHOD();
char sError[256];
CSphSEShare tInfo;
- if ( !ParseUrl ( &tInfo, table, true ) )
+ if ( !ParseUrl ( &tInfo, table_arg, true ) )
SPH_RET(-1);
// check SphinxAPI table
for ( ; !tInfo.m_bSphinxQL; )
{
// check system fields (count and types)
- if ( table->s->fields<SPHINXSE_SYSTEM_COLUMNS )
+ if ( table_arg->s->fields<SPHINXSE_SYSTEM_COLUMNS )
{
my_snprintf ( sError, sizeof(sError), "%s: there MUST be at least %d columns",
name, SPHINXSE_SYSTEM_COLUMNS );
break;
}
- if ( !IsIDField ( table->field[0] ) )
+ if ( !IsIDField ( table_arg->field[0] ) )
{
my_snprintf ( sError, sizeof(sError), "%s: 1st column (docid) MUST be unsigned integer or bigint", name );
break;
}
- if ( !IsIntegerFieldType ( table->field[1]->type() ) )
+ if ( !IsIntegerFieldType ( table_arg->field[1]->type() ) )
{
my_snprintf ( sError, sizeof(sError), "%s: 2nd column (weight) MUST be integer or bigint", name );
break;
}
- enum_field_types f2 = table->field[2]->type();
+ enum_field_types f2 = table_arg->field[2]->type();
if ( f2!=MYSQL_TYPE_VARCHAR
&& f2!=MYSQL_TYPE_BLOB && f2!=MYSQL_TYPE_MEDIUM_BLOB && f2!=MYSQL_TYPE_LONG_BLOB && f2!=MYSQL_TYPE_TINY_BLOB )
{
@@ -3424,25 +3424,25 @@ int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * )
// check attributes
int i;
- for ( i=3; i<(int)table->s->fields; i++ )
+ for ( i=3; i<(int)table_arg->s->fields; i++ )
{
- enum_field_types eType = table->field[i]->type();
+ enum_field_types eType = table_arg->field[i]->type();
if ( eType!=MYSQL_TYPE_TIMESTAMP && !IsIntegerFieldType(eType) && eType!=MYSQL_TYPE_VARCHAR && eType!=MYSQL_TYPE_FLOAT )
{
my_snprintf ( sError, sizeof(sError), "%s: %dth column (attribute %s) MUST be integer, bigint, timestamp, varchar, or float",
- name, i+1, table->field[i]->field_name.str );
+ name, i+1, table_arg->field[i]->field_name.str );
break;
}
}
- if ( i!=(int)table->s->fields )
+ if ( i!=(int)table_arg->s->fields )
break;
// check index
if (
- table->s->keys!=1 ||
- table->key_info[0].user_defined_key_parts!=1 ||
- strcasecmp ( table->key_info[0].key_part[0].field->field_name.str, table->field[2]->field_name.str ) )
+ table_arg->s->keys!=1 ||
+ table_arg->key_info[0].user_defined_key_parts!=1 ||
+ strcasecmp ( table_arg->key_info[0].key_part[0].field->field_name.str, table->field[2]->field_name.str ) )
{
my_snprintf ( sError, sizeof(sError), "%s: there must be an index on '%s' column",
name, table->field[2]->field_name.str );
@@ -3460,13 +3460,13 @@ int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * )
sError[0] = '\0';
// check that 1st column is id, is of int type, and has an index
- if ( strcmp ( table->field[0]->field_name.str, "id" ) )
+ if ( strcmp ( table_arg->field[0]->field_name.str, "id" ) )
{
my_snprintf ( sError, sizeof(sError), "%s: 1st column must be called 'id'", name );
break;
}
- if ( !IsIDField ( table->field[0] ) )
+ if ( !IsIDField ( table_arg->field[0] ) )
{
my_snprintf ( sError, sizeof(sError), "%s: 'id' column must be INT UNSIGNED or BIGINT", name );
break;
@@ -3474,22 +3474,22 @@ int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * )
// check index
if (
- table->s->keys!=1 ||
- table->key_info[0].user_defined_key_parts!=1 ||
- strcasecmp ( table->key_info[0].key_part[0].field->field_name.str, "id" ) )
+ table_arg->s->keys!=1 ||
+ table_arg->key_info[0].user_defined_key_parts!=1 ||
+ strcasecmp ( table_arg->key_info[0].key_part[0].field->field_name.str, "id" ) )
{
my_snprintf ( sError, sizeof(sError), "%s: 'id' column must be indexed", name );
break;
}
// check column types
- for ( int i=1; i<(int)table->s->fields; i++ )
+ for ( int i=1; i<(int)table_arg->s->fields; i++ )
{
- enum_field_types eType = table->field[i]->type();
+ enum_field_types eType = table_arg->field[i]->type();
if ( eType!=MYSQL_TYPE_TIMESTAMP && !IsIntegerFieldType(eType) && eType!=MYSQL_TYPE_VARCHAR && eType!=MYSQL_TYPE_FLOAT )
{
my_snprintf ( sError, sizeof(sError), "%s: column %d(%s) is of unsupported type (use int/bigint/timestamp/varchar/float)",
- name, i+1, table->field[i]->field_name.str );
+ name, i+1, table_arg->field[i]->field_name.str );
break;
}
}
@@ -3504,7 +3504,7 @@ int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * )
if ( sError[0] )
{
my_error ( ER_CANT_CREATE_TABLE, MYF(0),
- table->s->db.str, table->s->table_name, sError );
+ table_arg->s->db.str, table_arg->s->table_name, sError );
SPH_RET(-1);
}
diff --git a/storage/sphinx/snippets_udf.cc b/storage/sphinx/snippets_udf.cc
index 2a290bd8de9..1eab4b4ed3d 100644
--- a/storage/sphinx/snippets_udf.cc
+++ b/storage/sphinx/snippets_udf.cc
@@ -450,7 +450,7 @@ int CSphUrl::Connect()
char * pError = NULL;
do
{
- iSocket = socket ( iDomain, SOCK_STREAM, 0 );
+ iSocket = (int)socket ( iDomain, SOCK_STREAM, 0 );
if ( iSocket==-1 )
{
pError = "Failed to create client socket";
@@ -642,7 +642,7 @@ struct CSphSnippets
}
#define STRING CHECK_TYPE(STRING_RESULT)
-#define INT CHECK_TYPE(INT_RESULT); int iValue = *(long long *)pArgs->args[i]
+#define INT CHECK_TYPE(INT_RESULT); int iValue =(int)*(long long *)pArgs->args[i]
my_bool sphinx_snippets_init ( UDF_INIT * pUDF, UDF_ARGS * pArgs, char * sMessage )
{
diff --git a/storage/spider/ha_spider.cc b/storage/spider/ha_spider.cc
index 7ed8661cc74..8d7064a16f9 100644
--- a/storage/spider/ha_spider.cc
+++ b/storage/spider/ha_spider.cc
@@ -7793,7 +7793,7 @@ int ha_spider::cmp_ref(
*field;
field++
) {
- if ((ret = (*field)->cmp_binary_offset(ptr_diff)))
+ if ((ret = (*field)->cmp_binary_offset((uint)ptr_diff)))
{
DBUG_PRINT("info",("spider different at %s",
(*field)->field_name.str));
diff --git a/storage/spider/hs_client/config.cpp b/storage/spider/hs_client/config.cpp
index b546230ca03..3bf0f3e5bdf 100644
--- a/storage/spider/hs_client/config.cpp
+++ b/storage/spider/hs_client/config.cpp
@@ -263,8 +263,8 @@ parse_args(int argc, char **argv, config& conf)
}
if (!(param = new conf_param()))
continue;
- uint32 key_len = eq - arg;
- uint32 val_len = strlen(eq + 1);
+ uint32 key_len = (uint32)(eq - arg);
+ uint32 val_len = (uint32)(strlen(eq + 1));
if (
param->key.reserve(key_len + 1) ||
param->val.reserve(val_len + 1)
diff --git a/storage/spider/hs_client/hstcpcli.cpp b/storage/spider/hs_client/hstcpcli.cpp
index fed87803f9c..60da87b9f20 100644
--- a/storage/spider/hs_client/hstcpcli.cpp
+++ b/storage/spider/hs_client/hstcpcli.cpp
@@ -497,7 +497,7 @@ hstcpcli::response_recv(size_t& num_flds_r)
char *const err_begin = start;
read_token(start, finish);
char *const err_end = start;
- String e = String(err_begin, err_end - err_begin, &my_charset_bin);
+ String e = String(err_begin, (uint32)(err_end - err_begin), &my_charset_bin);
if (!e.length()) {
e = String("unknown_error", &my_charset_bin);
}
diff --git a/storage/spider/hs_client/socket.cpp b/storage/spider/hs_client/socket.cpp
index c61b39d140f..0717acf0da1 100644
--- a/storage/spider/hs_client/socket.cpp
+++ b/storage/spider/hs_client/socket.cpp
@@ -223,7 +223,7 @@ socket_set_options(auto_file& fd, const socket_args& args, String& err_r)
int
socket_open(auto_file& fd, const socket_args& args, String& err_r)
{
- fd.reset(socket(args.family, args.socktype, args.protocol));
+ fd.reset((int)socket(args.family, args.socktype, args.protocol));
if (fd.get() < 0) {
return errno_string("socket", errno, err_r);
}
@@ -253,7 +253,7 @@ socket_connect(auto_file& fd, const socket_args& args, String& err_r)
int
socket_bind(auto_file& fd, const socket_args& args, String& err_r)
{
- fd.reset(socket(args.family, args.socktype, args.protocol));
+ fd.reset((int)socket(args.family, args.socktype, args.protocol));
if (fd.get() < 0) {
return errno_string("socket", errno, err_r);
}
@@ -300,7 +300,7 @@ int
socket_accept(int listen_fd, auto_file& fd, const socket_args& args,
sockaddr_storage& addr_r, socklen_t& addrlen_r, String& err_r)
{
- fd.reset(accept(listen_fd, reinterpret_cast<sockaddr *>(&addr_r),
+ fd.reset((int)accept(listen_fd, reinterpret_cast<sockaddr *>(&addr_r),
&addrlen_r));
if (fd.get() < 0) {
return errno_string("accept", errno, err_r);
diff --git a/storage/spider/spd_sys_table.cc b/storage/spider/spd_sys_table.cc
index a140fba7915..f0d24f37d6d 100644
--- a/storage/spider/spd_sys_table.cc
+++ b/storage/spider/spd_sys_table.cc
@@ -677,13 +677,13 @@ void spider_store_tables_name(
}
table->field[0]->store(
ptr_db,
- ptr_diff_table - 1,
+ (uint)(ptr_diff_table - 1),
system_charset_info);
DBUG_PRINT("info",("spider field[0]->null_bit = %d",
table->field[0]->null_bit));
table->field[1]->store(
ptr_table,
- name_length - ptr_diff_db - ptr_diff_table,
+ (uint)(name_length - ptr_diff_db - ptr_diff_table),
system_charset_info);
DBUG_PRINT("info",("spider field[1]->null_bit = %d",
table->field[1]->null_bit));
diff --git a/storage/xtradb/btr/btr0cur.cc b/storage/xtradb/btr/btr0cur.cc
index 59470b95c35..f22871deb32 100644
--- a/storage/xtradb/btr/btr0cur.cc
+++ b/storage/xtradb/btr/btr0cur.cc
@@ -3624,7 +3624,6 @@ btr_cur_pessimistic_delete(
ulint n_reserved = 0;
ibool success;
ibool ret = FALSE;
- ulint level;
mem_heap_t* heap;
ulint* offsets;
@@ -3677,6 +3676,10 @@ btr_cur_pessimistic_delete(
#endif /* UNIV_ZIP_DEBUG */
}
+ if (flags == 0) {
+ lock_update_delete(block, rec);
+ }
+
if (UNIV_UNLIKELY(page_get_n_recs(page) < 2)
&& UNIV_UNLIKELY(dict_index_get_page(index)
!= buf_block_get_page_no(block))) {
@@ -3691,13 +3694,7 @@ btr_cur_pessimistic_delete(
goto return_after_reservations;
}
- if (flags == 0) {
- lock_update_delete(block, rec);
- }
-
- level = btr_page_get_level(page, mtr);
-
- if (level > 0
+ if (!page_is_leaf(page)
&& UNIV_UNLIKELY(rec == page_rec_get_next(
page_get_infimum_rec(page)))) {
@@ -3720,6 +3717,7 @@ btr_cur_pessimistic_delete(
on a page, we have to change the father node pointer
so that it is equal to the new leftmost node pointer
on the page */
+ ulint level = btr_page_get_level(page, mtr);
btr_node_ptr_delete(index, block, mtr);
diff --git a/storage/xtradb/fil/fil0pagecompress.cc b/storage/xtradb/fil/fil0pagecompress.cc
index 7a61f17836b..edc932f36f5 100644
--- a/storage/xtradb/fil/fil0pagecompress.cc
+++ b/storage/xtradb/fil/fil0pagecompress.cc
@@ -106,6 +106,9 @@ fil_compress_page(
int comp_level = level;
ulint header_len = FIL_PAGE_DATA + FIL_PAGE_COMPRESSED_SIZE;
ulint write_size = 0;
+#if HAVE_LZO
+ lzo_uint write_size_lzo = write_size;
+#endif
/* Cache to avoid change during function execution */
ulint comp_method = innodb_compression_algorithm;
bool allocated = false;
@@ -207,7 +210,9 @@ fil_compress_page(
#ifdef HAVE_LZO
case PAGE_LZO_ALGORITHM:
err = lzo1x_1_15_compress(
- buf, len, out_buf+header_len, &write_size, out_buf+UNIV_PAGE_SIZE);
+ buf, len, out_buf+header_len, &write_size_lzo, out_buf+UNIV_PAGE_SIZE);
+
+ write_size = write_size_lzo;
if (err != LZO_E_OK || write_size > UNIV_PAGE_SIZE-header_len) {
if (space && !space->printed_compression_failure) {
@@ -604,8 +609,11 @@ fil_decompress_page(
#ifdef HAVE_LZO
case PAGE_LZO_ALGORITHM: {
ulint olen = 0;
+ lzo_uint olen_lzo = olen;
err = lzo1x_decompress((const unsigned char *)buf+header_len,
- actual_size,(unsigned char *)in_buf, &olen, NULL);
+ actual_size,(unsigned char *)in_buf, &olen_lzo, NULL);
+
+ olen = olen_lzo;
if (err != LZO_E_OK || (olen == 0 || olen > UNIV_PAGE_SIZE)) {
ib_logf(IB_LOG_LEVEL_ERROR,
diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc
index 64b93a07bd7..062b8d14a27 100644
--- a/storage/xtradb/handler/ha_innodb.cc
+++ b/storage/xtradb/handler/ha_innodb.cc
@@ -4602,7 +4602,7 @@ innobase_commit_low(
trx_commit_for_mysql(trx);
}
#ifdef WITH_WSREP
- if (wsrep_on(thd)) { thd_proc_info(thd, tmp); }
+ if (thd && wsrep_on(thd)) { thd_proc_info(thd, tmp); }
#endif /* WITH_WSREP */
}
diff --git a/storage/xtradb/include/lock0lock.h b/storage/xtradb/include/lock0lock.h
index 923c463aa22..5aff67db0ee 100644
--- a/storage/xtradb/include/lock0lock.h
+++ b/storage/xtradb/include/lock0lock.h
@@ -1029,6 +1029,8 @@ std::string
lock_get_info(
const lock_t*);
+#define wsrep_on_trx(trx) ((trx)->mysql_thd && wsrep_on((trx)->mysql_thd))
+
#ifndef UNIV_NONINL
#include "lock0lock.ic"
#endif
diff --git a/storage/xtradb/lock/lock0lock.cc b/storage/xtradb/lock/lock0lock.cc
index 40ab9d9403c..ce51effbab0 100644
--- a/storage/xtradb/lock/lock0lock.cc
+++ b/storage/xtradb/lock/lock0lock.cc
@@ -1835,7 +1835,7 @@ lock_rec_other_has_conflicting(
#ifdef WITH_WSREP
if (lock_rec_has_to_wait(TRUE, trx, mode, lock, is_supremum)) {
- if (wsrep_on(trx->mysql_thd)) {
+ if (wsrep_on_trx(trx)) {
trx_mutex_enter(lock->trx);
wsrep_kill_victim(trx, lock);
trx_mutex_exit(lock->trx);
@@ -2290,7 +2290,7 @@ lock_rec_create(
#ifdef WITH_WSREP
if (c_lock &&
- wsrep_on(trx->mysql_thd) &&
+ wsrep_on_trx(trx) &&
wsrep_thd_is_BF(trx->mysql_thd, FALSE)) {
lock_t *hash = (lock_t *)c_lock->hash;
lock_t *prev = NULL;
diff --git a/storage/xtradb/lock/lock0wait.cc b/storage/xtradb/lock/lock0wait.cc
index a447027e336..ca9d05a4829 100644
--- a/storage/xtradb/lock/lock0wait.cc
+++ b/storage/xtradb/lock/lock0wait.cc
@@ -197,7 +197,7 @@ wsrep_is_BF_lock_timeout(
/*====================*/
trx_t* trx) /* in: trx to check for lock priority */
{
- if (wsrep_on(trx->mysql_thd) &&
+ if (wsrep_on_trx(trx) &&
wsrep_thd_is_BF(trx->mysql_thd, FALSE)) {
fprintf(stderr, "WSREP: BF lock wait long\n");
srv_print_innodb_monitor = TRUE;
@@ -402,7 +402,7 @@ lock_wait_suspend_thread(
if (lock_wait_timeout < 100000000
&& wait_time > (double) lock_wait_timeout) {
#ifdef WITH_WSREP
- if (!wsrep_on(trx->mysql_thd) ||
+ if (!wsrep_on_trx(trx) ||
(!wsrep_is_BF_lock_timeout(trx) &&
trx->error_state != DB_DEADLOCK)) {
#endif /* WITH_WSREP */
diff --git a/storage/xtradb/log/log0crypt.cc b/storage/xtradb/log/log0crypt.cc
index 9509797dde3..a5fbbab17ef 100644
--- a/storage/xtradb/log/log0crypt.cc
+++ b/storage/xtradb/log/log0crypt.cc
@@ -245,7 +245,7 @@ next:
ENCRYPTION_FLAG_DECRYPT
@param[in] offs offset to block
@param[in] space_id tablespace id
-@return true if successfull, false in case of failure
+@return true if successful, false in case of failure
*/
static
bool
diff --git a/storage/xtradb/row/row0log.cc b/storage/xtradb/row/row0log.cc
index f49ff2f1530..e15a4f4f5d8 100644
--- a/storage/xtradb/row/row0log.cc
+++ b/storage/xtradb/row/row0log.cc
@@ -182,7 +182,6 @@ struct row_log_t {
dict_table_t* table; /*!< table that is being rebuilt,
or NULL when this is a secondary
index that is being created online */
- dict_index_t* index; /*!< index to be build */
bool same_pk;/*!< whether the definition of the PRIMARY KEY
has remained the same */
const dtuple_t* add_cols;
@@ -385,7 +384,7 @@ row_log_online_op(
byte_offset,
index->table->space)) {
log->error = DB_DECRYPTION_FAILED;
- goto err_exit;
+ goto write_failed;
}
srv_stats.n_rowlog_blocks_encrypted.inc();
@@ -479,13 +478,15 @@ static MY_ATTRIBUTE((nonnull))
void
row_log_table_close_func(
/*=====================*/
- row_log_t* log, /*!< in/out: online rebuild log */
+ dict_index_t* index, /*!< in/out: online rebuilt index */
#ifdef UNIV_DEBUG
const byte* b, /*!< in: end of log record */
#endif /* UNIV_DEBUG */
ulint size, /*!< in: size of log record */
ulint avail) /*!< in: available size for log record */
{
+ row_log_t* log = index->online_log;
+
ut_ad(mutex_own(&log->mutex));
if (size >= avail) {
@@ -520,7 +521,7 @@ row_log_table_close_func(
srv_sort_buf_size,
log->crypt_tail,
byte_offset,
- log->index->table->space)) {
+ index->table->space)) {
log->error = DB_DECRYPTION_FAILED;
goto err_exit;
}
@@ -559,11 +560,11 @@ err_exit:
}
#ifdef UNIV_DEBUG
-# define row_log_table_close(log, b, size, avail) \
- row_log_table_close_func(log, b, size, avail)
+# define row_log_table_close(index, b, size, avail) \
+ row_log_table_close_func(index, b, size, avail)
#else /* UNIV_DEBUG */
# define row_log_table_close(log, b, size, avail) \
- row_log_table_close_func(log, size, avail)
+ row_log_table_close_func(index, size, avail)
#endif /* UNIV_DEBUG */
/******************************************************//**
@@ -735,8 +736,7 @@ row_log_table_delete(
b += ext_size;
}
- row_log_table_close(
- index->online_log, b, mrec_size, avail_size);
+ row_log_table_close(index, b, mrec_size, avail_size);
}
func_exit:
@@ -859,8 +859,7 @@ row_log_table_low_redundant(
b + extra_size, index, tuple->fields, tuple->n_fields);
b += size;
- row_log_table_close(
- index->online_log, b, mrec_size, avail_size);
+ row_log_table_close(index, b, mrec_size, avail_size);
}
mem_heap_free(heap);
@@ -969,8 +968,7 @@ row_log_table_low(
memcpy(b, rec, rec_offs_data_size(offsets));
b += rec_offs_data_size(offsets);
- row_log_table_close(
- index->online_log, b, mrec_size, avail_size);
+ row_log_table_close(index, b, mrec_size, avail_size);
}
}
@@ -2675,7 +2673,7 @@ all_done:
/* If encryption is enabled decrypt buffer after reading it
from file system. */
- if (log_tmp_is_encrypted()) {
+ if (success && log_tmp_is_encrypted()) {
if (!log_tmp_block_decrypt(buf,
srv_sort_buf_size,
index->online_log->crypt_head,
@@ -2996,7 +2994,6 @@ row_log_allocate(
log->head.total = 0;
log->path = path;
log->crypt_tail = log->crypt_head = NULL;
- log->index = index;
dict_index_set_online_status(index, ONLINE_INDEX_CREATION);
index->online_log = log;
@@ -3542,7 +3539,7 @@ all_done:
/* If encryption is enabled decrypt buffer after reading it
from file system. */
- if (log_tmp_is_encrypted()) {
+ if (success && log_tmp_is_encrypted()) {
if (!log_tmp_block_decrypt(buf,
srv_sort_buf_size,
index->online_log->crypt_head,
diff --git a/storage/xtradb/row/row0merge.cc b/storage/xtradb/row/row0merge.cc
index 9d299bf6288..4eb6534161d 100644
--- a/storage/xtradb/row/row0merge.cc
+++ b/storage/xtradb/row/row0merge.cc
@@ -887,8 +887,8 @@ row_merge_read(
success = os_file_read_no_error_handling_int_fd(fd, buf,
ofs, srv_sort_buf_size);
- /* For encrypted tables, decrypt data after reading and copy data */
- if (log_tmp_is_encrypted()) {
+ /* If encryption is enabled decrypt buffer */
+ if (success && log_tmp_is_encrypted()) {
if (!log_tmp_block_decrypt(buf, srv_sort_buf_size,
crypt_buf, ofs, space)) {
return (FALSE);
@@ -3918,22 +3918,13 @@ row_merge_build_indexes(
DBUG_RETURN(DB_OUT_OF_MEMORY);
}
- /* Get crypt data from tablespace if present. We should be protected
- from concurrent DDL (e.g. drop table) by MDL-locks. */
- fil_space_t* space = fil_space_acquire(new_table->space);
-
- if (!space) {
- DBUG_RETURN(DB_TABLESPACE_NOT_FOUND);
- }
-
- /* If temporal log file is encrypted allocate memory for
+ /* If temporary log file is encrypted allocate memory for
encryption/decryption. */
if (log_tmp_is_encrypted()) {
crypt_block = static_cast<row_merge_block_t*>(
os_mem_alloc_large(&block_size));
if (crypt_block == NULL) {
- fil_space_release(space);
DBUG_RETURN(DB_OUT_OF_MEMORY);
}
}
@@ -4313,9 +4304,5 @@ func_exit:
}
}
- if (space) {
- fil_space_release(space);
- }
-
DBUG_RETURN(error);
}
diff --git a/storage/xtradb/row/row0upd.cc b/storage/xtradb/row/row0upd.cc
index b09021722be..f7aee8643ef 100644
--- a/storage/xtradb/row/row0upd.cc
+++ b/storage/xtradb/row/row0upd.cc
@@ -1989,7 +1989,7 @@ row_upd_sec_index_entry(
}
#ifdef WITH_WSREP
if (err == DB_SUCCESS && !referenced &&
- wsrep_on(trx->mysql_thd) &&
+ wsrep_on_trx(trx) &&
!wsrep_thd_is_BF(trx->mysql_thd, FALSE) &&
!(parent && que_node_get_type(parent) ==
QUE_NODE_UPDATE &&
@@ -2279,7 +2279,7 @@ err_exit:
}
}
#ifdef WITH_WSREP
- if (!referenced && wsrep_on(trx->mysql_thd) &&
+ if (!referenced && wsrep_on_trx(trx) &&
!(parent && que_node_get_type(parent) == QUE_NODE_UPDATE &&
((upd_node_t*)parent)->cascade_node == node) &&
foreign
@@ -2548,8 +2548,7 @@ row_upd_del_mark_clust_rec(
}
#ifdef WITH_WSREP
trx_t* trx = thr_get_trx(thr) ;
-
- if (err == DB_SUCCESS && !referenced && trx && wsrep_on(trx->mysql_thd) &&
+ if (err == DB_SUCCESS && !referenced && wsrep_on_trx(trx) &&
!(parent && que_node_get_type(parent) == QUE_NODE_UPDATE &&
((upd_node_t*)parent)->cascade_node == node) &&
foreign