diff options
author | Jan Lindström <jplindst@mariadb.org> | 2014-05-24 21:37:21 +0300 |
---|---|---|
committer | Jan Lindström <jplindst@mariadb.org> | 2014-05-24 21:37:21 +0300 |
commit | 1016ee9d77e8c9cd6e9bd114b808fff66f398255 (patch) | |
tree | e14ea38c4e7a8cfe794bdb00780a6d0c21cb0c20 /storage/xtradb/page | |
parent | 105060e3077a30950bcefb7a05141176bdd25872 (diff) | |
parent | edf1fbd25b81ecb0d5636566b35fa2f7fb09b0b8 (diff) | |
download | mariadb-git-1016ee9d77e8c9cd6e9bd114b808fff66f398255.tar.gz |
Merge 10.0 -> 10.1
Diffstat (limited to 'storage/xtradb/page')
-rw-r--r-- | storage/xtradb/page/page0zip.cc | 142 |
1 files changed, 77 insertions, 65 deletions
diff --git a/storage/xtradb/page/page0zip.cc b/storage/xtradb/page/page0zip.cc index f8d2f856881..245eb6198a0 100644 --- a/storage/xtradb/page/page0zip.cc +++ b/storage/xtradb/page/page0zip.cc @@ -171,7 +171,7 @@ page_zip_empty_size( + 1/* end of modification log */ - REC_N_NEW_EXTRA_BYTES/* omitted bytes */) /* subtract the space for page_zip_fields_encode() */ - - compressBound(2 * (n_fields + 1)); + - compressBound(static_cast<uLong>(2 * (n_fields + 1))); return(size > 0 ? (ulint) size : 0); } #endif /* !UNIV_HOTBACKUP */ @@ -852,8 +852,8 @@ page_zip_compress_node_ptrs( rec_offs_extra_size(offsets)); /* Compress the extra bytes. */ - c_stream->avail_in = rec - REC_N_NEW_EXTRA_BYTES - - c_stream->next_in; + c_stream->avail_in = static_cast<uInt>( + rec - REC_N_NEW_EXTRA_BYTES - c_stream->next_in); if (c_stream->avail_in) { err = deflate(c_stream, Z_NO_FLUSH); @@ -865,8 +865,8 @@ page_zip_compress_node_ptrs( /* Compress the data bytes, except node_ptr. */ c_stream->next_in = (byte*) rec; - c_stream->avail_in = rec_offs_data_size(offsets) - - REC_NODE_PTR_SIZE; + c_stream->avail_in = static_cast<uInt>( + rec_offs_data_size(offsets) - REC_NODE_PTR_SIZE); if (c_stream->avail_in) { err = deflate(c_stream, Z_NO_FLUSH); @@ -907,8 +907,9 @@ page_zip_compress_sec( const rec_t* rec = *recs++; /* Compress everything up to this record. */ - c_stream->avail_in = rec - REC_N_NEW_EXTRA_BYTES - - c_stream->next_in; + c_stream->avail_in = static_cast<uInt>( + rec - REC_N_NEW_EXTRA_BYTES + - c_stream->next_in); if (UNIV_LIKELY(c_stream->avail_in)) { UNIV_MEM_ASSERT_RW(c_stream->next_in, @@ -973,8 +974,8 @@ page_zip_compress_clust_ext( ut_ad(len == DATA_ROLL_PTR_LEN); /* Compress any preceding bytes. */ - c_stream->avail_in - = src - c_stream->next_in; + c_stream->avail_in = static_cast<uInt>( + src - c_stream->next_in); if (c_stream->avail_in) { err = deflate(c_stream, Z_NO_FLUSH); @@ -1003,8 +1004,8 @@ page_zip_compress_clust_ext( ut_ad(len >= BTR_EXTERN_FIELD_REF_SIZE); src += len - BTR_EXTERN_FIELD_REF_SIZE; - c_stream->avail_in = src - - c_stream->next_in; + c_stream->avail_in = static_cast<uInt>( + src - c_stream->next_in); if (UNIV_LIKELY(c_stream->avail_in)) { err = deflate(c_stream, Z_NO_FLUSH); if (UNIV_UNLIKELY(err != Z_OK)) { @@ -1099,8 +1100,9 @@ page_zip_compress_clust( rec_offs_extra_size(offsets)); /* Compress the extra bytes. */ - c_stream->avail_in = rec - REC_N_NEW_EXTRA_BYTES - - c_stream->next_in; + c_stream->avail_in = static_cast<uInt>( + rec - REC_N_NEW_EXTRA_BYTES + - c_stream->next_in); if (c_stream->avail_in) { err = deflate(c_stream, Z_NO_FLUSH); @@ -1147,7 +1149,8 @@ page_zip_compress_clust( rec_offs_extra_size(offsets)); /* Compress any preceding bytes. */ - c_stream->avail_in = src - c_stream->next_in; + c_stream->avail_in = static_cast<uInt>( + src - c_stream->next_in); if (c_stream->avail_in) { err = deflate(c_stream, Z_NO_FLUSH); @@ -1174,8 +1177,8 @@ page_zip_compress_clust( } /* Compress the last bytes of the record. */ - c_stream->avail_in = rec + rec_offs_data_size(offsets) - - c_stream->next_in; + c_stream->avail_in = static_cast<uInt>( + rec + rec_offs_data_size(offsets) - c_stream->next_in); if (c_stream->avail_in) { err = deflate(c_stream, Z_NO_FLUSH); @@ -1331,7 +1334,7 @@ page_zip_compress( /* Compress the data payload. */ page_zip_set_alloc(&c_stream, heap); - err = deflateInit2(&c_stream, level, + err = deflateInit2(&c_stream, static_cast<int>(level), Z_DEFLATED, UNIV_PAGE_SIZE_SHIFT, MAX_MEM_LEVEL, Z_DEFAULT_STRATEGY); ut_a(err == Z_OK); @@ -1339,7 +1342,8 @@ page_zip_compress( c_stream.next_out = buf; /* Subtract the space reserved for uncompressed data. */ /* Page header and the end marker of the modification log */ - c_stream.avail_out = buf_end - buf - 1; + c_stream.avail_out = static_cast<uInt>(buf_end - buf - 1); + /* Dense page directory and uncompressed columns, if any */ if (page_is_leaf(page)) { if (dict_index_is_clust(index)) { @@ -1368,9 +1372,9 @@ page_zip_compress( goto zlib_error; } - c_stream.avail_out -= n_dense * slot_size; - c_stream.avail_in = page_zip_fields_encode(n_fields, index, - trx_id_col, fields); + c_stream.avail_out -= static_cast<uInt>(n_dense * slot_size); + c_stream.avail_in = static_cast<uInt>( + page_zip_fields_encode(n_fields, index, trx_id_col, fields)); c_stream.next_in = fields; if (UNIV_LIKELY(!trx_id_col)) { trx_id_col = ULINT_UNDEFINED; @@ -1425,9 +1429,9 @@ page_zip_compress( /* Compress any trailing garbage, in case the last record was allocated from an originally longer space on the free list, or the data of the last record from page_zip_compress_sec(). */ - c_stream.avail_in - = page_header_get_field(page, PAGE_HEAP_TOP) - - (c_stream.next_in - page); + c_stream.avail_in = static_cast<uInt>( + page_header_get_field(page, PAGE_HEAP_TOP) + - (c_stream.next_in - page)); ut_a(c_stream.avail_in <= UNIV_PAGE_SIZE - PAGE_ZIP_START - PAGE_DIR); UNIV_MEM_ASSERT_RW(c_stream.next_in, c_stream.avail_in); @@ -2227,15 +2231,15 @@ page_zip_decompress_node_ptrs( const byte* storage; /* Subtract the space reserved for uncompressed data. */ - d_stream->avail_in -= n_dense - * (PAGE_ZIP_DIR_SLOT_SIZE + REC_NODE_PTR_SIZE); + d_stream->avail_in -= static_cast<uInt>( + n_dense * (PAGE_ZIP_DIR_SLOT_SIZE + REC_NODE_PTR_SIZE)); /* Decompress the records in heap_no order. */ for (slot = 0; slot < n_dense; slot++) { rec_t* rec = recs[slot]; - d_stream->avail_out = rec - REC_N_NEW_EXTRA_BYTES - - d_stream->next_out; + d_stream->avail_out = static_cast<uInt>( + rec - REC_N_NEW_EXTRA_BYTES - d_stream->next_out); ut_ad(d_stream->avail_out < UNIV_PAGE_SIZE - PAGE_ZIP_START - PAGE_DIR); @@ -2271,8 +2275,8 @@ page_zip_decompress_node_ptrs( ut_ad(!rec_offs_any_extern(offsets)); /* Decompress the data bytes, except node_ptr. */ - d_stream->avail_out = rec_offs_data_size(offsets) - - REC_NODE_PTR_SIZE; + d_stream->avail_out =static_cast<uInt>( + rec_offs_data_size(offsets) - REC_NODE_PTR_SIZE); switch (inflate(d_stream, Z_SYNC_FLUSH)) { case Z_STREAM_END: @@ -2301,9 +2305,9 @@ page_zip_decompress_node_ptrs( /* Decompress any trailing garbage, in case the last record was allocated from an originally longer space on the free list. */ - d_stream->avail_out = page_header_get_field(page_zip->data, - PAGE_HEAP_TOP) - - page_offset(d_stream->next_out); + d_stream->avail_out = static_cast<uInt>( + page_header_get_field(page_zip->data, PAGE_HEAP_TOP) + - page_offset(d_stream->next_out)); if (UNIV_UNLIKELY(d_stream->avail_out > UNIV_PAGE_SIZE - PAGE_ZIP_START - PAGE_DIR)) { @@ -2416,14 +2420,15 @@ page_zip_decompress_sec( ut_a(!dict_index_is_clust(index)); /* Subtract the space reserved for uncompressed data. */ - d_stream->avail_in -= n_dense * PAGE_ZIP_DIR_SLOT_SIZE; + d_stream->avail_in -= static_cast<uint>( + n_dense * PAGE_ZIP_DIR_SLOT_SIZE); for (slot = 0; slot < n_dense; slot++) { rec_t* rec = recs[slot]; /* Decompress everything up to this record. */ - d_stream->avail_out = rec - REC_N_NEW_EXTRA_BYTES - - d_stream->next_out; + d_stream->avail_out = static_cast<uint>( + rec - REC_N_NEW_EXTRA_BYTES - d_stream->next_out); if (UNIV_LIKELY(d_stream->avail_out)) { switch (inflate(d_stream, Z_SYNC_FLUSH)) { @@ -2454,9 +2459,9 @@ page_zip_decompress_sec( /* Decompress the data of the last record and any trailing garbage, in case the last record was allocated from an originally longer space on the free list. */ - d_stream->avail_out = page_header_get_field(page_zip->data, - PAGE_HEAP_TOP) - - page_offset(d_stream->next_out); + d_stream->avail_out = static_cast<uInt>( + page_header_get_field(page_zip->data, PAGE_HEAP_TOP) + - page_offset(d_stream->next_out)); if (UNIV_UNLIKELY(d_stream->avail_out > UNIV_PAGE_SIZE - PAGE_ZIP_START - PAGE_DIR)) { @@ -2569,7 +2574,8 @@ page_zip_decompress_clust_ext( return(FALSE); } - d_stream->avail_out = dst - d_stream->next_out; + d_stream->avail_out = static_cast<uInt>( + dst - d_stream->next_out); switch (inflate(d_stream, Z_SYNC_FLUSH)) { case Z_STREAM_END: @@ -2600,7 +2606,8 @@ page_zip_decompress_clust_ext( ut_ad(len >= BTR_EXTERN_FIELD_REF_SIZE); dst += len - BTR_EXTERN_FIELD_REF_SIZE; - d_stream->avail_out = dst - d_stream->next_out; + d_stream->avail_out = static_cast<uInt>( + dst - d_stream->next_out); switch (inflate(d_stream, Z_SYNC_FLUSH)) { case Z_STREAM_END: case Z_OK: @@ -2666,16 +2673,17 @@ page_zip_decompress_clust( ut_a(dict_index_is_clust(index)); /* Subtract the space reserved for uncompressed data. */ - d_stream->avail_in -= n_dense * (PAGE_ZIP_DIR_SLOT_SIZE - + DATA_TRX_ID_LEN - + DATA_ROLL_PTR_LEN); + d_stream->avail_in -= static_cast<uInt>(n_dense) + * (PAGE_ZIP_DIR_SLOT_SIZE + + DATA_TRX_ID_LEN + + DATA_ROLL_PTR_LEN); /* Decompress the records in heap_no order. */ for (slot = 0; slot < n_dense; slot++) { rec_t* rec = recs[slot]; - d_stream->avail_out = rec - REC_N_NEW_EXTRA_BYTES - - d_stream->next_out; + d_stream->avail_out =static_cast<uInt>( + rec - REC_N_NEW_EXTRA_BYTES - d_stream->next_out); ut_ad(d_stream->avail_out < UNIV_PAGE_SIZE - PAGE_ZIP_START - PAGE_DIR); @@ -2733,7 +2741,8 @@ page_zip_decompress_clust( goto zlib_error; } - d_stream->avail_out = dst - d_stream->next_out; + d_stream->avail_out = static_cast<uInt>( + dst - d_stream->next_out); switch (inflate(d_stream, Z_SYNC_FLUSH)) { case Z_STREAM_END: @@ -2762,8 +2771,8 @@ page_zip_decompress_clust( } /* Decompress the last bytes of the record. */ - d_stream->avail_out = rec_get_end(rec, offsets) - - d_stream->next_out; + d_stream->avail_out = static_cast<uInt>( + rec_get_end(rec, offsets) - d_stream->next_out); switch (inflate(d_stream, Z_SYNC_FLUSH)) { case Z_STREAM_END: @@ -2783,9 +2792,9 @@ page_zip_decompress_clust( /* Decompress any trailing garbage, in case the last record was allocated from an originally longer space on the free list. */ - d_stream->avail_out = page_header_get_field(page_zip->data, - PAGE_HEAP_TOP) - - page_offset(d_stream->next_out); + d_stream->avail_out = static_cast<uInt>( + page_header_get_field(page_zip->data, PAGE_HEAP_TOP) + - page_offset(d_stream->next_out)); if (UNIV_UNLIKELY(d_stream->avail_out > UNIV_PAGE_SIZE - PAGE_ZIP_START - PAGE_DIR)) { @@ -3041,7 +3050,8 @@ zlib_error: d_stream.next_in = page_zip->data + PAGE_DATA; /* Subtract the space reserved for the page header and the end marker of the modification log. */ - d_stream.avail_in = page_zip_get_size(page_zip) - (PAGE_DATA + 1); + d_stream.avail_in = static_cast<uInt>( + page_zip_get_size(page_zip) - (PAGE_DATA + 1)); d_stream.next_out = page + PAGE_ZIP_START; d_stream.avail_out = UNIV_PAGE_SIZE - PAGE_ZIP_START; @@ -3500,7 +3510,7 @@ page_zip_write_rec_ext( externs -= blob_no * BTR_EXTERN_FIELD_REF_SIZE; if (create) { - page_zip->n_blobs += n_ext; + page_zip->n_blobs += static_cast<unsigned>(n_ext); ASSERT_ZERO_BLOB(ext_end - n_ext * BTR_EXTERN_FIELD_REF_SIZE); memmove(ext_end - n_ext @@ -4408,7 +4418,7 @@ page_zip_dir_delete( * BTR_EXTERN_FIELD_REF_SIZE; externs -= blob_no * BTR_EXTERN_FIELD_REF_SIZE; - page_zip->n_blobs -= n_ext; + page_zip->n_blobs -= static_cast<unsigned>(n_ext); /* Shift and zero fill the array. */ memmove(ext_end + n_ext * BTR_EXTERN_FIELD_REF_SIZE, ext_end, (page_zip->n_blobs - blob_no) @@ -4863,8 +4873,10 @@ page_zip_calc_checksum( adler = adler32(0L, s + FIL_PAGE_OFFSET, FIL_PAGE_LSN - FIL_PAGE_OFFSET); adler = adler32(adler, s + FIL_PAGE_TYPE, 2); - adler = adler32(adler, s + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, - size - FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID); + adler = adler32( + adler, s + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, + static_cast<uInt>(size) + - FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID); return((ulint) adler); case SRV_CHECKSUM_ALGORITHM_NONE: @@ -4894,8 +4906,8 @@ page_zip_verify_checksum( ib_uint32_t crc32 = 0 /* silence bogus warning */; ib_uint32_t innodb = 0 /* silence bogus warning */; - stored = mach_read_from_4( - (const unsigned char*) data + FIL_PAGE_SPACE_OR_CHKSUM); + stored = static_cast<ib_uint32_t>(mach_read_from_4( + static_cast<const unsigned char*>(data) + FIL_PAGE_SPACE_OR_CHKSUM)); /* declare empty pages non-corrupted */ if (stored == 0) { @@ -4910,9 +4922,9 @@ page_zip_verify_checksum( return(TRUE); } - calc = page_zip_calc_checksum( + calc = static_cast<ib_uint32_t>(page_zip_calc_checksum( data, size, static_cast<srv_checksum_algorithm_t>( - srv_checksum_algorithm)); + srv_checksum_algorithm))); if (stored == calc) { return(TRUE); @@ -4928,15 +4940,15 @@ page_zip_verify_checksum( return(TRUE); } crc32 = calc; - innodb = page_zip_calc_checksum( - data, size, SRV_CHECKSUM_ALGORITHM_INNODB); + innodb = static_cast<ib_uint32_t>(page_zip_calc_checksum( + data, size, SRV_CHECKSUM_ALGORITHM_INNODB)); break; case SRV_CHECKSUM_ALGORITHM_INNODB: if (stored == BUF_NO_CHECKSUM_MAGIC) { return(TRUE); } - crc32 = page_zip_calc_checksum( - data, size, SRV_CHECKSUM_ALGORITHM_CRC32); + crc32 = static_cast<ib_uint32_t>(page_zip_calc_checksum( + data, size, SRV_CHECKSUM_ALGORITHM_CRC32)); innodb = calc; break; case SRV_CHECKSUM_ALGORITHM_NONE: |