summaryrefslogtreecommitdiff
path: root/storage/innobase/page/page0zip.cc
diff options
context:
space:
mode:
Diffstat (limited to 'storage/innobase/page/page0zip.cc')
-rw-r--r--storage/innobase/page/page0zip.cc160
1 files changed, 89 insertions, 71 deletions
diff --git a/storage/innobase/page/page0zip.cc b/storage/innobase/page/page0zip.cc
index 52dcbf64183..ab7a19795a3 100644
--- a/storage/innobase/page/page0zip.cc
+++ b/storage/innobase/page/page0zip.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2005, 2013, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2005, 2014, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
This program is free software; you can redistribute it and/or modify it under
@@ -24,6 +24,9 @@ Compressed page interface
Created June 2005 by Marko Makela
*******************************************************/
+// First include (the generated) my_config.h, to get correct platform defines.
+#include "my_config.h"
+
#include <map>
using namespace std;
@@ -168,7 +171,7 @@ page_zip_empty_size(
+ 1/* end of modification log */
- REC_N_NEW_EXTRA_BYTES/* omitted bytes */)
/* subtract the space for page_zip_fields_encode() */
- - compressBound(2 * (n_fields + 1));
+ - compressBound(static_cast<uLong>(2 * (n_fields + 1)));
return(size > 0 ? (ulint) size : 0);
}
#endif /* !UNIV_HOTBACKUP */
@@ -849,8 +852,8 @@ page_zip_compress_node_ptrs(
rec_offs_extra_size(offsets));
/* Compress the extra bytes. */
- c_stream->avail_in = rec - REC_N_NEW_EXTRA_BYTES
- - c_stream->next_in;
+ c_stream->avail_in = static_cast<uInt>(
+ rec - REC_N_NEW_EXTRA_BYTES - c_stream->next_in);
if (c_stream->avail_in) {
err = deflate(c_stream, Z_NO_FLUSH);
@@ -862,8 +865,8 @@ page_zip_compress_node_ptrs(
/* Compress the data bytes, except node_ptr. */
c_stream->next_in = (byte*) rec;
- c_stream->avail_in = rec_offs_data_size(offsets)
- - REC_NODE_PTR_SIZE;
+ c_stream->avail_in = static_cast<uInt>(
+ rec_offs_data_size(offsets) - REC_NODE_PTR_SIZE);
if (c_stream->avail_in) {
err = deflate(c_stream, Z_NO_FLUSH);
@@ -904,8 +907,9 @@ page_zip_compress_sec(
const rec_t* rec = *recs++;
/* Compress everything up to this record. */
- c_stream->avail_in = rec - REC_N_NEW_EXTRA_BYTES
- - c_stream->next_in;
+ c_stream->avail_in = static_cast<uInt>(
+ rec - REC_N_NEW_EXTRA_BYTES
+ - c_stream->next_in);
if (UNIV_LIKELY(c_stream->avail_in)) {
UNIV_MEM_ASSERT_RW(c_stream->next_in,
@@ -970,8 +974,8 @@ page_zip_compress_clust_ext(
ut_ad(len == DATA_ROLL_PTR_LEN);
/* Compress any preceding bytes. */
- c_stream->avail_in
- = src - c_stream->next_in;
+ c_stream->avail_in = static_cast<uInt>(
+ src - c_stream->next_in);
if (c_stream->avail_in) {
err = deflate(c_stream, Z_NO_FLUSH);
@@ -1000,8 +1004,8 @@ page_zip_compress_clust_ext(
ut_ad(len >= BTR_EXTERN_FIELD_REF_SIZE);
src += len - BTR_EXTERN_FIELD_REF_SIZE;
- c_stream->avail_in = src
- - c_stream->next_in;
+ c_stream->avail_in = static_cast<uInt>(
+ src - c_stream->next_in);
if (UNIV_LIKELY(c_stream->avail_in)) {
err = deflate(c_stream, Z_NO_FLUSH);
if (UNIV_UNLIKELY(err != Z_OK)) {
@@ -1096,8 +1100,9 @@ page_zip_compress_clust(
rec_offs_extra_size(offsets));
/* Compress the extra bytes. */
- c_stream->avail_in = rec - REC_N_NEW_EXTRA_BYTES
- - c_stream->next_in;
+ c_stream->avail_in = static_cast<uInt>(
+ rec - REC_N_NEW_EXTRA_BYTES
+ - c_stream->next_in);
if (c_stream->avail_in) {
err = deflate(c_stream, Z_NO_FLUSH);
@@ -1144,7 +1149,8 @@ page_zip_compress_clust(
rec_offs_extra_size(offsets));
/* Compress any preceding bytes. */
- c_stream->avail_in = src - c_stream->next_in;
+ c_stream->avail_in = static_cast<uInt>(
+ src - c_stream->next_in);
if (c_stream->avail_in) {
err = deflate(c_stream, Z_NO_FLUSH);
@@ -1171,8 +1177,8 @@ page_zip_compress_clust(
}
/* Compress the last bytes of the record. */
- c_stream->avail_in = rec + rec_offs_data_size(offsets)
- - c_stream->next_in;
+ c_stream->avail_in = static_cast<uInt>(
+ rec + rec_offs_data_size(offsets) - c_stream->next_in);
if (c_stream->avail_in) {
err = deflate(c_stream, Z_NO_FLUSH);
@@ -1324,7 +1330,7 @@ page_zip_compress(
/* Compress the data payload. */
page_zip_set_alloc(&c_stream, heap);
- err = deflateInit2(&c_stream, level,
+ err = deflateInit2(&c_stream, static_cast<int>(level),
Z_DEFLATED, UNIV_PAGE_SIZE_SHIFT,
MAX_MEM_LEVEL, Z_DEFAULT_STRATEGY);
ut_a(err == Z_OK);
@@ -1332,7 +1338,8 @@ page_zip_compress(
c_stream.next_out = buf;
/* Subtract the space reserved for uncompressed data. */
/* Page header and the end marker of the modification log */
- c_stream.avail_out = buf_end - buf - 1;
+ c_stream.avail_out = static_cast<uInt>(buf_end - buf - 1);
+
/* Dense page directory and uncompressed columns, if any */
if (page_is_leaf(page)) {
if (dict_index_is_clust(index)) {
@@ -1361,9 +1368,9 @@ page_zip_compress(
goto zlib_error;
}
- c_stream.avail_out -= n_dense * slot_size;
- c_stream.avail_in = page_zip_fields_encode(n_fields, index,
- trx_id_col, fields);
+ c_stream.avail_out -= static_cast<uInt>(n_dense * slot_size);
+ c_stream.avail_in = static_cast<uInt>(
+ page_zip_fields_encode(n_fields, index, trx_id_col, fields));
c_stream.next_in = fields;
if (UNIV_LIKELY(!trx_id_col)) {
trx_id_col = ULINT_UNDEFINED;
@@ -1418,9 +1425,9 @@ page_zip_compress(
/* Compress any trailing garbage, in case the last record was
allocated from an originally longer space on the free list,
or the data of the last record from page_zip_compress_sec(). */
- c_stream.avail_in
- = page_header_get_field(page, PAGE_HEAP_TOP)
- - (c_stream.next_in - page);
+ c_stream.avail_in = static_cast<uInt>(
+ page_header_get_field(page, PAGE_HEAP_TOP)
+ - (c_stream.next_in - page));
ut_a(c_stream.avail_in <= UNIV_PAGE_SIZE - PAGE_ZIP_START - PAGE_DIR);
UNIV_MEM_ASSERT_RW(c_stream.next_in, c_stream.avail_in);
@@ -1567,9 +1574,8 @@ page_zip_fields_free(
dict_table_t* table = index->table;
os_fast_mutex_free(&index->zip_pad.mutex);
mem_heap_free(index->heap);
- mutex_free(&(table->autoinc_mutex));
- ut_free(table->name);
- mem_heap_free(table->heap);
+
+ dict_mem_table_free(table);
}
}
@@ -2221,15 +2227,15 @@ page_zip_decompress_node_ptrs(
const byte* storage;
/* Subtract the space reserved for uncompressed data. */
- d_stream->avail_in -= n_dense
- * (PAGE_ZIP_DIR_SLOT_SIZE + REC_NODE_PTR_SIZE);
+ d_stream->avail_in -= static_cast<uInt>(
+ n_dense * (PAGE_ZIP_DIR_SLOT_SIZE + REC_NODE_PTR_SIZE));
/* Decompress the records in heap_no order. */
for (slot = 0; slot < n_dense; slot++) {
rec_t* rec = recs[slot];
- d_stream->avail_out = rec - REC_N_NEW_EXTRA_BYTES
- - d_stream->next_out;
+ d_stream->avail_out = static_cast<uInt>(
+ rec - REC_N_NEW_EXTRA_BYTES - d_stream->next_out);
ut_ad(d_stream->avail_out < UNIV_PAGE_SIZE
- PAGE_ZIP_START - PAGE_DIR);
@@ -2265,8 +2271,8 @@ page_zip_decompress_node_ptrs(
ut_ad(!rec_offs_any_extern(offsets));
/* Decompress the data bytes, except node_ptr. */
- d_stream->avail_out = rec_offs_data_size(offsets)
- - REC_NODE_PTR_SIZE;
+ d_stream->avail_out =static_cast<uInt>(
+ rec_offs_data_size(offsets) - REC_NODE_PTR_SIZE);
switch (inflate(d_stream, Z_SYNC_FLUSH)) {
case Z_STREAM_END:
@@ -2295,9 +2301,9 @@ page_zip_decompress_node_ptrs(
/* Decompress any trailing garbage, in case the last record was
allocated from an originally longer space on the free list. */
- d_stream->avail_out = page_header_get_field(page_zip->data,
- PAGE_HEAP_TOP)
- - page_offset(d_stream->next_out);
+ d_stream->avail_out = static_cast<uInt>(
+ page_header_get_field(page_zip->data, PAGE_HEAP_TOP)
+ - page_offset(d_stream->next_out));
if (UNIV_UNLIKELY(d_stream->avail_out > UNIV_PAGE_SIZE
- PAGE_ZIP_START - PAGE_DIR)) {
@@ -2410,14 +2416,15 @@ page_zip_decompress_sec(
ut_a(!dict_index_is_clust(index));
/* Subtract the space reserved for uncompressed data. */
- d_stream->avail_in -= n_dense * PAGE_ZIP_DIR_SLOT_SIZE;
+ d_stream->avail_in -= static_cast<uint>(
+ n_dense * PAGE_ZIP_DIR_SLOT_SIZE);
for (slot = 0; slot < n_dense; slot++) {
rec_t* rec = recs[slot];
/* Decompress everything up to this record. */
- d_stream->avail_out = rec - REC_N_NEW_EXTRA_BYTES
- - d_stream->next_out;
+ d_stream->avail_out = static_cast<uint>(
+ rec - REC_N_NEW_EXTRA_BYTES - d_stream->next_out);
if (UNIV_LIKELY(d_stream->avail_out)) {
switch (inflate(d_stream, Z_SYNC_FLUSH)) {
@@ -2448,9 +2455,9 @@ page_zip_decompress_sec(
/* Decompress the data of the last record and any trailing garbage,
in case the last record was allocated from an originally longer space
on the free list. */
- d_stream->avail_out = page_header_get_field(page_zip->data,
- PAGE_HEAP_TOP)
- - page_offset(d_stream->next_out);
+ d_stream->avail_out = static_cast<uInt>(
+ page_header_get_field(page_zip->data, PAGE_HEAP_TOP)
+ - page_offset(d_stream->next_out));
if (UNIV_UNLIKELY(d_stream->avail_out > UNIV_PAGE_SIZE
- PAGE_ZIP_START - PAGE_DIR)) {
@@ -2563,7 +2570,8 @@ page_zip_decompress_clust_ext(
return(FALSE);
}
- d_stream->avail_out = dst - d_stream->next_out;
+ d_stream->avail_out = static_cast<uInt>(
+ dst - d_stream->next_out);
switch (inflate(d_stream, Z_SYNC_FLUSH)) {
case Z_STREAM_END:
@@ -2594,7 +2602,8 @@ page_zip_decompress_clust_ext(
ut_ad(len >= BTR_EXTERN_FIELD_REF_SIZE);
dst += len - BTR_EXTERN_FIELD_REF_SIZE;
- d_stream->avail_out = dst - d_stream->next_out;
+ d_stream->avail_out = static_cast<uInt>(
+ dst - d_stream->next_out);
switch (inflate(d_stream, Z_SYNC_FLUSH)) {
case Z_STREAM_END:
case Z_OK:
@@ -2660,16 +2669,17 @@ page_zip_decompress_clust(
ut_a(dict_index_is_clust(index));
/* Subtract the space reserved for uncompressed data. */
- d_stream->avail_in -= n_dense * (PAGE_ZIP_DIR_SLOT_SIZE
- + DATA_TRX_ID_LEN
- + DATA_ROLL_PTR_LEN);
+ d_stream->avail_in -= static_cast<uInt>(n_dense)
+ * (PAGE_ZIP_DIR_SLOT_SIZE
+ + DATA_TRX_ID_LEN
+ + DATA_ROLL_PTR_LEN);
/* Decompress the records in heap_no order. */
for (slot = 0; slot < n_dense; slot++) {
rec_t* rec = recs[slot];
- d_stream->avail_out = rec - REC_N_NEW_EXTRA_BYTES
- - d_stream->next_out;
+ d_stream->avail_out =static_cast<uInt>(
+ rec - REC_N_NEW_EXTRA_BYTES - d_stream->next_out);
ut_ad(d_stream->avail_out < UNIV_PAGE_SIZE
- PAGE_ZIP_START - PAGE_DIR);
@@ -2727,7 +2737,8 @@ page_zip_decompress_clust(
goto zlib_error;
}
- d_stream->avail_out = dst - d_stream->next_out;
+ d_stream->avail_out = static_cast<uInt>(
+ dst - d_stream->next_out);
switch (inflate(d_stream, Z_SYNC_FLUSH)) {
case Z_STREAM_END:
@@ -2756,8 +2767,8 @@ page_zip_decompress_clust(
}
/* Decompress the last bytes of the record. */
- d_stream->avail_out = rec_get_end(rec, offsets)
- - d_stream->next_out;
+ d_stream->avail_out = static_cast<uInt>(
+ rec_get_end(rec, offsets) - d_stream->next_out);
switch (inflate(d_stream, Z_SYNC_FLUSH)) {
case Z_STREAM_END:
@@ -2777,9 +2788,9 @@ page_zip_decompress_clust(
/* Decompress any trailing garbage, in case the last record was
allocated from an originally longer space on the free list. */
- d_stream->avail_out = page_header_get_field(page_zip->data,
- PAGE_HEAP_TOP)
- - page_offset(d_stream->next_out);
+ d_stream->avail_out = static_cast<uInt>(
+ page_header_get_field(page_zip->data, PAGE_HEAP_TOP)
+ - page_offset(d_stream->next_out));
if (UNIV_UNLIKELY(d_stream->avail_out > UNIV_PAGE_SIZE
- PAGE_ZIP_START - PAGE_DIR)) {
@@ -3035,7 +3046,8 @@ zlib_error:
d_stream.next_in = page_zip->data + PAGE_DATA;
/* Subtract the space reserved for
the page header and the end marker of the modification log. */
- d_stream.avail_in = page_zip_get_size(page_zip) - (PAGE_DATA + 1);
+ d_stream.avail_in = static_cast<uInt>(
+ page_zip_get_size(page_zip) - (PAGE_DATA + 1));
d_stream.next_out = page + PAGE_ZIP_START;
d_stream.avail_out = UNIV_PAGE_SIZE - PAGE_ZIP_START;
@@ -3494,7 +3506,7 @@ page_zip_write_rec_ext(
externs -= blob_no * BTR_EXTERN_FIELD_REF_SIZE;
if (create) {
- page_zip->n_blobs += n_ext;
+ page_zip->n_blobs += static_cast<unsigned>(n_ext);
ASSERT_ZERO_BLOB(ext_end - n_ext
* BTR_EXTERN_FIELD_REF_SIZE);
memmove(ext_end - n_ext
@@ -4402,7 +4414,7 @@ page_zip_dir_delete(
* BTR_EXTERN_FIELD_REF_SIZE;
externs -= blob_no * BTR_EXTERN_FIELD_REF_SIZE;
- page_zip->n_blobs -= n_ext;
+ page_zip->n_blobs -= static_cast<unsigned>(n_ext);
/* Shift and zero fill the array. */
memmove(ext_end + n_ext * BTR_EXTERN_FIELD_REF_SIZE, ext_end,
(page_zip->n_blobs - blob_no)
@@ -4857,8 +4869,10 @@ page_zip_calc_checksum(
adler = adler32(0L, s + FIL_PAGE_OFFSET,
FIL_PAGE_LSN - FIL_PAGE_OFFSET);
adler = adler32(adler, s + FIL_PAGE_TYPE, 2);
- adler = adler32(adler, s + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID,
- size - FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);
+ adler = adler32(
+ adler, s + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID,
+ static_cast<uInt>(size)
+ - FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);
return((ulint) adler);
case SRV_CHECKSUM_ALGORITHM_NONE:
@@ -4888,21 +4902,25 @@ page_zip_verify_checksum(
ib_uint32_t crc32 = 0 /* silence bogus warning */;
ib_uint32_t innodb = 0 /* silence bogus warning */;
- stored = mach_read_from_4(
- (const unsigned char*) data + FIL_PAGE_SPACE_OR_CHKSUM);
+ stored = static_cast<ib_uint32_t>(mach_read_from_4(
+ static_cast<const unsigned char*>(data) + FIL_PAGE_SPACE_OR_CHKSUM));
/* declare empty pages non-corrupted */
if (stored == 0) {
/* make sure that the page is really empty */
- ut_d(ulint i; for (i = 0; i < size; i++) {
- ut_a(*((const char*) data + i) == 0); });
+ ulint i;
+ for (i = 0; i < size; i++) {
+ if (*((const char*) data + i) != 0) {
+ return(FALSE);
+ }
+ }
return(TRUE);
}
- calc = page_zip_calc_checksum(
+ calc = static_cast<ib_uint32_t>(page_zip_calc_checksum(
data, size, static_cast<srv_checksum_algorithm_t>(
- srv_checksum_algorithm));
+ srv_checksum_algorithm)));
if (stored == calc) {
return(TRUE);
@@ -4918,15 +4936,15 @@ page_zip_verify_checksum(
return(TRUE);
}
crc32 = calc;
- innodb = page_zip_calc_checksum(
- data, size, SRV_CHECKSUM_ALGORITHM_INNODB);
+ innodb = static_cast<ib_uint32_t>(page_zip_calc_checksum(
+ data, size, SRV_CHECKSUM_ALGORITHM_INNODB));
break;
case SRV_CHECKSUM_ALGORITHM_INNODB:
if (stored == BUF_NO_CHECKSUM_MAGIC) {
return(TRUE);
}
- crc32 = page_zip_calc_checksum(
- data, size, SRV_CHECKSUM_ALGORITHM_CRC32);
+ crc32 = static_cast<ib_uint32_t>(page_zip_calc_checksum(
+ data, size, SRV_CHECKSUM_ALGORITHM_CRC32));
innodb = calc;
break;
case SRV_CHECKSUM_ALGORITHM_NONE: