summaryrefslogtreecommitdiff
path: root/storage/innobase/page/page0zip.cc
diff options
context:
space:
mode:
Diffstat (limited to 'storage/innobase/page/page0zip.cc')
-rw-r--r--storage/innobase/page/page0zip.cc126
1 files changed, 64 insertions, 62 deletions
diff --git a/storage/innobase/page/page0zip.cc b/storage/innobase/page/page0zip.cc
index b507945f076..c722cdd619a 100644
--- a/storage/innobase/page/page0zip.cc
+++ b/storage/innobase/page/page0zip.cc
@@ -888,9 +888,9 @@ page_zip_compress_node_ptrs(
/* Only leaf nodes may contain externally stored columns. */
ut_ad(!rec_offs_any_extern(offsets));
- UNIV_MEM_ASSERT_RW(rec, rec_offs_data_size(offsets));
- UNIV_MEM_ASSERT_RW(rec - rec_offs_extra_size(offsets),
- rec_offs_extra_size(offsets));
+ MEM_CHECK_DEFINED(rec, rec_offs_data_size(offsets));
+ MEM_CHECK_DEFINED(rec - rec_offs_extra_size(offsets),
+ rec_offs_extra_size(offsets));
/* Compress the extra bytes. */
c_stream->avail_in = static_cast<uInt>(
@@ -953,8 +953,8 @@ page_zip_compress_sec(
- c_stream->next_in);
if (UNIV_LIKELY(c_stream->avail_in != 0)) {
- UNIV_MEM_ASSERT_RW(c_stream->next_in,
- c_stream->avail_in);
+ MEM_CHECK_DEFINED(c_stream->next_in,
+ c_stream->avail_in);
err = deflate(c_stream, Z_NO_FLUSH);
if (UNIV_UNLIKELY(err != Z_OK)) {
break;
@@ -996,9 +996,9 @@ page_zip_compress_clust_ext(
int err;
ulint i;
- UNIV_MEM_ASSERT_RW(rec, rec_offs_data_size(offsets));
- UNIV_MEM_ASSERT_RW(rec - rec_offs_extra_size(offsets),
- rec_offs_extra_size(offsets));
+ MEM_CHECK_DEFINED(rec, rec_offs_data_size(offsets));
+ MEM_CHECK_DEFINED(rec - rec_offs_extra_size(offsets),
+ rec_offs_extra_size(offsets));
for (i = 0; i < rec_offs_n_fields(offsets); i++) {
ulint len;
@@ -1136,9 +1136,9 @@ page_zip_compress_clust(
ULINT_UNDEFINED, &heap);
ut_ad(rec_offs_n_fields(offsets)
== dict_index_get_n_fields(index));
- UNIV_MEM_ASSERT_RW(rec, rec_offs_data_size(offsets));
- UNIV_MEM_ASSERT_RW(rec - rec_offs_extra_size(offsets),
- rec_offs_extra_size(offsets));
+ MEM_CHECK_DEFINED(rec, rec_offs_data_size(offsets));
+ MEM_CHECK_DEFINED(rec - rec_offs_extra_size(offsets),
+ rec_offs_extra_size(offsets));
/* Compress the extra bytes. */
c_stream->avail_in = static_cast<uInt>(
@@ -1185,9 +1185,9 @@ page_zip_compress_clust(
== rec_get_nth_field(rec, offsets,
trx_id_col + 1, &len));
ut_ad(len == DATA_ROLL_PTR_LEN);
- UNIV_MEM_ASSERT_RW(rec, rec_offs_data_size(offsets));
- UNIV_MEM_ASSERT_RW(rec - rec_offs_extra_size(offsets),
- rec_offs_extra_size(offsets));
+ MEM_CHECK_DEFINED(rec, rec_offs_data_size(offsets));
+ MEM_CHECK_DEFINED(rec - rec_offs_extra_size(offsets),
+ rec_offs_extra_size(offsets));
/* Compress any preceding bytes. */
c_stream->avail_in = static_cast<uInt>(
@@ -1293,7 +1293,7 @@ page_zip_compress(
&& dict_table_is_comp(index->table)
&& !dict_index_is_ibuf(index)));
- UNIV_MEM_ASSERT_RW(page, UNIV_PAGE_SIZE);
+ MEM_CHECK_DEFINED(page, srv_page_size);
/* Check the data that will be omitted. */
ut_a(!memcmp(page + (PAGE_NEW_INFIMUM - REC_N_NEW_EXTRA_BYTES),
@@ -1490,7 +1490,7 @@ page_zip_compress(
trx_id_col = ULINT_UNDEFINED;
}
- UNIV_MEM_ASSERT_RW(c_stream.next_in, c_stream.avail_in);
+ MEM_CHECK_DEFINED(c_stream.next_in, c_stream.avail_in);
err = deflate(&c_stream, Z_FULL_FLUSH);
if (err != Z_OK) {
goto zlib_error;
@@ -1544,7 +1544,7 @@ page_zip_compress(
- (c_stream.next_in - page));
ut_a(c_stream.avail_in <= UNIV_PAGE_SIZE - PAGE_ZIP_START - PAGE_DIR);
- UNIV_MEM_ASSERT_RW(c_stream.next_in, c_stream.avail_in);
+ MEM_CHECK_DEFINED(c_stream.next_in, c_stream.avail_in);
err = deflate(&c_stream, Z_FINISH);
if (UNIV_UNLIKELY(err != Z_STREAM_END)) {
@@ -1579,9 +1579,11 @@ err_exit:
ut_ad(buf + c_stream.total_out == c_stream.next_out);
ut_ad((ulint) (storage - c_stream.next_out) >= c_stream.avail_out);
+#ifdef HAVE_valgrind
/* Valgrind believes that zlib does not initialize some bits
in the last 7 or 8 bytes of the stream. Make Valgrind happy. */
- UNIV_MEM_VALID(buf, c_stream.total_out);
+ MEM_MAKE_DEFINED(buf, c_stream.total_out);
+#endif /* HAVE_valgrind */
/* Zero out the area reserved for the modification log.
Space for the end marker of the modification log is not
@@ -1613,7 +1615,7 @@ err_exit:
page_zip_compress_write_log(page_zip, page, index, mtr);
}
- UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip));
+ MEM_CHECK_DEFINED(page_zip->data, page_zip_get_size(page_zip));
#ifdef PAGE_ZIP_COMPRESS_DBG
if (logfile) {
@@ -3045,8 +3047,8 @@ page_zip_decompress_low(
rec_offs* offsets;
ut_ad(page_zip_simple_validate(page_zip));
- UNIV_MEM_ASSERT_W(page, UNIV_PAGE_SIZE);
- UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip));
+ MEM_CHECK_ADDRESSABLE(page, srv_page_size);
+ MEM_CHECK_DEFINED(page_zip->data, page_zip_get_size(page_zip));
/* The dense directory excludes the infimum and supremum records. */
n_dense = page_dir_get_n_heap(page_zip->data) - PAGE_HEAP_NO_USER_LOW;
@@ -3090,9 +3092,9 @@ page_zip_decompress_low(
#ifdef UNIV_ZIP_DEBUG
/* Clear the uncompressed page, except the header. */
- memset(PAGE_DATA + page, 0x55, UNIV_PAGE_SIZE - PAGE_DATA);
+ memset(PAGE_DATA + page, 0x55, srv_page_size - PAGE_DATA);
#endif /* UNIV_ZIP_DEBUG */
- UNIV_MEM_INVALID(PAGE_DATA + page, UNIV_PAGE_SIZE - PAGE_DATA);
+ MEM_UNDEFINED(PAGE_DATA + page, srv_page_size - PAGE_DATA);
/* Copy the page directory. */
if (UNIV_UNLIKELY(!page_zip_dir_decode(page_zip, page, recs,
@@ -3222,7 +3224,7 @@ err_exit:
}
ut_a(page_is_comp(page));
- UNIV_MEM_ASSERT_RW(page, UNIV_PAGE_SIZE);
+ MEM_CHECK_DEFINED(page, srv_page_size);
page_zip_fields_free(index);
mem_heap_free(heap);
@@ -3365,8 +3367,8 @@ page_zip_validate_low(
temp_page_buf = static_cast<byte*>(ut_malloc_nokey(2 * UNIV_PAGE_SIZE));
temp_page = static_cast<byte*>(ut_align(temp_page_buf, UNIV_PAGE_SIZE));
- UNIV_MEM_ASSERT_RW(page, UNIV_PAGE_SIZE);
- UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip));
+ MEM_CHECK_DEFINED(page, srv_page_size);
+ MEM_CHECK_DEFINED(page_zip->data, page_zip_get_size(page_zip));
temp_page_zip = *page_zip;
valid = page_zip_decompress_low(&temp_page_zip, temp_page, TRUE);
@@ -3587,9 +3589,9 @@ page_zip_write_rec_ext(
ulint n_ext = rec_offs_n_extern(offsets);
ut_ad(rec_offs_validate(rec, index, offsets));
- UNIV_MEM_ASSERT_RW(rec, rec_offs_data_size(offsets));
- UNIV_MEM_ASSERT_RW(rec - rec_offs_extra_size(offsets),
- rec_offs_extra_size(offsets));
+ MEM_CHECK_DEFINED(rec, rec_offs_data_size(offsets));
+ MEM_CHECK_DEFINED(rec - rec_offs_extra_size(offsets),
+ rec_offs_extra_size(offsets));
externs -= (DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN)
* (page_dir_get_n_heap(page) - PAGE_HEAP_NO_USER_LOW);
@@ -3714,10 +3716,10 @@ page_zip_write_rec(
ut_ad(page_zip_header_cmp(page_zip, page));
ut_ad(page_simple_validate_new((page_t*) page));
- UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip));
- UNIV_MEM_ASSERT_RW(rec, rec_offs_data_size(offsets));
- UNIV_MEM_ASSERT_RW(rec - rec_offs_extra_size(offsets),
- rec_offs_extra_size(offsets));
+ MEM_CHECK_DEFINED(page_zip->data, page_zip_get_size(page_zip));
+ MEM_CHECK_DEFINED(rec, rec_offs_data_size(offsets));
+ MEM_CHECK_DEFINED(rec - rec_offs_extra_size(offsets),
+ rec_offs_extra_size(offsets));
slot = page_zip_dir_find(page_zip, page_offset(rec));
ut_a(slot);
@@ -3972,10 +3974,10 @@ page_zip_write_blob_ptr(
ut_ad(page_is_leaf(page));
ut_ad(dict_index_is_clust(index));
- UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip));
- UNIV_MEM_ASSERT_RW(rec, rec_offs_data_size(offsets));
- UNIV_MEM_ASSERT_RW(rec - rec_offs_extra_size(offsets),
- rec_offs_extra_size(offsets));
+ MEM_CHECK_DEFINED(page_zip->data, page_zip_get_size(page_zip));
+ MEM_CHECK_DEFINED(rec, rec_offs_data_size(offsets));
+ MEM_CHECK_DEFINED(rec - rec_offs_extra_size(offsets),
+ rec_offs_extra_size(offsets));
blob_no = page_zip_get_n_prev_extern(page_zip, rec, index)
+ rec_get_n_extern_new(rec, index, n);
@@ -4118,8 +4120,8 @@ page_zip_write_node_ptr(
ut_ad(!page_is_leaf(page));
- UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip));
- UNIV_MEM_ASSERT_RW(rec, size);
+ MEM_CHECK_DEFINED(page_zip->data, page_zip_get_size(page_zip));
+ MEM_CHECK_DEFINED(rec, size);
storage = page_zip_dir_start(page_zip)
- (rec_get_heap_no_new(rec) - 1) * REC_NODE_PTR_SIZE;
@@ -4184,7 +4186,7 @@ page_zip_write_trx_id_and_roll_ptr(
ut_ad(page_is_leaf(page));
- UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip));
+ MEM_CHECK_DEFINED(page_zip->data, page_zip_get_size(page_zip));
storage = page_zip_dir_start(page_zip)
- (rec_get_heap_no_new(rec) - 1)
@@ -4211,10 +4213,10 @@ page_zip_write_trx_id_and_roll_ptr(
mach_write_to_7(field + DATA_TRX_ID_LEN, roll_ptr);
memcpy(storage, field, DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN);
- UNIV_MEM_ASSERT_RW(rec, rec_offs_data_size(offsets));
- UNIV_MEM_ASSERT_RW(rec - rec_offs_extra_size(offsets),
- rec_offs_extra_size(offsets));
- UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip));
+ MEM_CHECK_DEFINED(rec, rec_offs_data_size(offsets));
+ MEM_CHECK_DEFINED(rec - rec_offs_extra_size(offsets),
+ rec_offs_extra_size(offsets));
+ MEM_CHECK_DEFINED(page_zip->data, page_zip_get_size(page_zip));
}
/**********************************************************************//**
@@ -4244,10 +4246,10 @@ page_zip_clear_rec(
heap_no = rec_get_heap_no_new(rec);
ut_ad(heap_no >= PAGE_HEAP_NO_USER_LOW);
- UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip));
- UNIV_MEM_ASSERT_RW(rec, rec_offs_data_size(offsets));
- UNIV_MEM_ASSERT_RW(rec - rec_offs_extra_size(offsets),
- rec_offs_extra_size(offsets));
+ MEM_CHECK_DEFINED(page_zip->data, page_zip_get_size(page_zip));
+ MEM_CHECK_DEFINED(rec, rec_offs_data_size(offsets));
+ MEM_CHECK_DEFINED(rec - rec_offs_extra_size(offsets),
+ rec_offs_extra_size(offsets));
if (!page_is_leaf(page)) {
/* Clear node_ptr. On the compressed page,
@@ -4316,7 +4318,7 @@ page_zip_rec_set_deleted(
{
byte* slot = page_zip_dir_find(page_zip, page_offset(rec));
ut_a(slot);
- UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip));
+ MEM_CHECK_DEFINED(page_zip->data, page_zip_get_size(page_zip));
if (flag) {
*slot |= (PAGE_ZIP_DIR_SLOT_DEL >> 8);
} else {
@@ -4339,7 +4341,7 @@ page_zip_rec_set_owned(
{
byte* slot = page_zip_dir_find(page_zip, page_offset(rec));
ut_a(slot);
- UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip));
+ MEM_CHECK_DEFINED(page_zip->data, page_zip_get_size(page_zip));
if (flag) {
*slot |= (PAGE_ZIP_DIR_SLOT_OWNED >> 8);
} else {
@@ -4366,7 +4368,7 @@ page_zip_dir_insert(
ut_ad(page_rec_get_next((rec_t*) prev_rec) == rec);
ut_ad(page_zip_simple_validate(page_zip));
- UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip));
+ MEM_CHECK_DEFINED(page_zip->data, page_zip_get_size(page_zip));
if (page_rec_is_infimum(prev_rec)) {
/* Use the first slot. */
@@ -4445,10 +4447,10 @@ page_zip_dir_delete(
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(rec_offs_comp(offsets));
- UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip));
- UNIV_MEM_ASSERT_RW(rec, rec_offs_data_size(offsets));
- UNIV_MEM_ASSERT_RW(rec - rec_offs_extra_size(offsets),
- rec_offs_extra_size(offsets));
+ MEM_CHECK_DEFINED(page_zip->data, page_zip_get_size(page_zip));
+ MEM_CHECK_DEFINED(rec, rec_offs_data_size(offsets));
+ MEM_CHECK_DEFINED(rec - rec_offs_extra_size(offsets),
+ rec_offs_extra_size(offsets));
slot_rec = page_zip_dir_find(page_zip, page_offset(rec));
@@ -4537,7 +4539,7 @@ page_zip_dir_add_slot(
byte* stored;
ut_ad(page_is_comp(page_zip->data));
- UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip));
+ MEM_CHECK_DEFINED(page_zip->data, page_zip_get_size(page_zip));
/* Read the old n_dense (n_heap has already been incremented). */
n_dense = page_dir_get_n_heap(page_zip->data)
@@ -4699,8 +4701,8 @@ page_zip_reorganize(
ut_ad(!dict_index_is_ibuf(index));
ut_ad(!dict_table_is_temporary(index->table));
/* Note that page_zip_validate(page_zip, page, index) may fail here. */
- UNIV_MEM_ASSERT_RW(page, UNIV_PAGE_SIZE);
- UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip));
+ MEM_CHECK_DEFINED(page, srv_page_size);
+ MEM_CHECK_DEFINED(page_zip->data, page_zip_get_size(page_zip));
/* Disable logging */
mtr_log_t log_mode = mtr_set_log_mode(mtr, MTR_LOG_NONE);
@@ -4787,10 +4789,10 @@ page_zip_copy_recs(
ut_a(dict_index_is_clust(index));
}
- UNIV_MEM_ASSERT_W(page, UNIV_PAGE_SIZE);
- UNIV_MEM_ASSERT_W(page_zip->data, page_zip_get_size(page_zip));
- UNIV_MEM_ASSERT_RW(src, UNIV_PAGE_SIZE);
- UNIV_MEM_ASSERT_RW(src_zip->data, page_zip_get_size(page_zip));
+ MEM_CHECK_ADDRESSABLE(page, srv_page_size);
+ MEM_CHECK_ADDRESSABLE(page_zip->data, page_zip_get_size(page_zip));
+ MEM_CHECK_DEFINED(src, srv_page_size);
+ MEM_CHECK_DEFINED(src_zip->data, page_zip_get_size(page_zip));
/* Copy those B-tree page header fields that are related to
the records stored in the page. Also copy the field