summaryrefslogtreecommitdiff
path: root/storage/innobase/row
diff options
context:
space:
mode:
Diffstat (limited to 'storage/innobase/row')
-rw-r--r--storage/innobase/row/row0log.cc23
-rw-r--r--storage/innobase/row/row0merge.cc149
-rw-r--r--storage/innobase/row/row0mysql.cc71
3 files changed, 193 insertions, 50 deletions
diff --git a/storage/innobase/row/row0log.cc b/storage/innobase/row/row0log.cc
index 48b5dd36cb9..f88baa42d58 100644
--- a/storage/innobase/row/row0log.cc
+++ b/storage/innobase/row/row0log.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2011, 2014, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2011, 2015, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1381,6 +1381,27 @@ blob_done:
dfield_set_data(dfield, data, len);
}
+ if (len != UNIV_SQL_NULL && col->mtype == DATA_MYSQL
+ && col->len != len && !dict_table_is_comp(log->table)) {
+
+ ut_ad(col->len >= len);
+ if (dict_table_is_comp(index->table)) {
+ byte* buf = (byte*) mem_heap_alloc(heap,
+ col->len);
+ memcpy(buf, dfield->data, len);
+ memset(buf + len, 0x20, col->len - len);
+
+ dfield_set_data(dfield, buf, col->len);
+ } else {
+ /* field length mismatch should not happen
+ when rebuilding the redundant row format
+ table. */
+ ut_ad(0);
+ *error = DB_CORRUPTION;
+ return(NULL);
+ }
+ }
+
/* See if any columns were changed to NULL or NOT NULL. */
const dict_col_t* new_col
= dict_table_get_nth_col(log->table, col_no);
diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc
index c4224424fdb..93d57fa8e57 100644
--- a/storage/innobase/row/row0merge.cc
+++ b/storage/innobase/row/row0merge.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2005, 2014, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2005, 2015, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -235,22 +235,86 @@ row_merge_buf_free(
mem_heap_free(buf->heap);
}
-/******************************************************//**
-Insert a data tuple into a sort buffer.
-@return number of rows added, 0 if out of space */
+/** Convert the field data from compact to redundant format.
+@param[in] row_field field to copy from
+@param[out] field field to copy to
+@param[in] len length of the field data
+@param[in] zip_size compressed BLOB page size,
+ zero for uncompressed BLOBs
+@param[in,out] heap memory heap where to allocate data when
+ converting to ROW_FORMAT=REDUNDANT, or NULL
+ when not to invoke
+ row_merge_buf_redundant_convert(). */
+static
+void
+row_merge_buf_redundant_convert(
+ const dfield_t* row_field,
+ dfield_t* field,
+ ulint len,
+ ulint zip_size,
+ mem_heap_t* heap)
+{
+ ut_ad(DATA_MBMINLEN(field->type.mbminmaxlen) == 1);
+ ut_ad(DATA_MBMAXLEN(field->type.mbminmaxlen) > 1);
+
+ byte* buf = (byte*) mem_heap_alloc(heap, len);
+ ulint field_len = row_field->len;
+ ut_ad(field_len <= len);
+
+ if (row_field->ext) {
+ const byte* field_data = static_cast<byte*>(
+ dfield_get_data(row_field));
+ ulint ext_len;
+
+ ut_a(field_len >= BTR_EXTERN_FIELD_REF_SIZE);
+ ut_a(memcmp(field_data + field_len - BTR_EXTERN_FIELD_REF_SIZE,
+ field_ref_zero, BTR_EXTERN_FIELD_REF_SIZE));
+
+ byte* data = btr_copy_externally_stored_field(
+ &ext_len, field_data, zip_size, field_len, heap);
+
+ ut_ad(ext_len < len);
+
+ memcpy(buf, data, ext_len);
+ field_len = ext_len;
+ } else {
+ memcpy(buf, row_field->data, field_len);
+ }
+
+ memset(buf + field_len, 0x20, len - field_len);
+
+ dfield_set_data(field, buf, len);
+}
+
+/** Insert a data tuple into a sort buffer.
+@param[in,out] buf sort buffer
+@param[in] fts_index fts index to be created
+@param[in] old_table original table
+@param[in,out] psort_info parallel sort info
+@param[in] row table row
+@param[in] ext cache of externally stored
+ column prefixes, or NULL
+@param[in,out] doc_id Doc ID if we are creating
+ FTS index
+@param[in,out] conv_heap memory heap where to allocate data when
+ converting to ROW_FORMAT=REDUNDANT, or NULL
+ when not to invoke
+ row_merge_buf_redundant_convert()
+@param[in,out] exceed_page set if the record size exceeds the page size
+ when converting to ROW_FORMAT=REDUNDANT
+@return number of rows added, 0 if out of space */
static
ulint
row_merge_buf_add(
-/*==============*/
- row_merge_buf_t* buf, /*!< in/out: sort buffer */
- dict_index_t* fts_index,/*!< in: fts index to be created */
- const dict_table_t* old_table,/*!< in: original table */
- fts_psort_t* psort_info, /*!< in: parallel sort info */
- const dtuple_t* row, /*!< in: table row */
- const row_ext_t* ext, /*!< in: cache of externally stored
- column prefixes, or NULL */
- doc_id_t* doc_id) /*!< in/out: Doc ID if we are
- creating FTS index */
+ row_merge_buf_t* buf,
+ dict_index_t* fts_index,
+ const dict_table_t* old_table,
+ fts_psort_t* psort_info,
+ const dtuple_t* row,
+ const row_ext_t* ext,
+ doc_id_t* doc_id,
+ mem_heap_t* conv_heap,
+ bool* exceed_page)
{
ulint i;
const dict_index_t* index;
@@ -400,6 +464,23 @@ row_merge_buf_add(
n_row_added = 1;
continue;
}
+
+ if (field->len != UNIV_SQL_NULL
+ && col->mtype == DATA_MYSQL
+ && col->len != field->len) {
+
+ if (conv_heap != NULL) {
+ row_merge_buf_redundant_convert(
+ row_field, field, col->len,
+ dict_table_zip_size(old_table),
+ conv_heap);
+ } else {
+ /* Field length mismatch should not
+ happen when rebuilding redundant row
+ format table. */
+ ut_ad(dict_table_is_comp(index->table));
+ }
+ }
}
len = dfield_get_len(field);
@@ -508,6 +589,14 @@ row_merge_buf_add(
of extra_size. */
data_size += (extra_size + 1) + ((extra_size + 1) >= 0x80);
+ /* Record size can exceed page size while converting to
+ redundant row format. But there is assert
+ ut_ad(size < UNIV_PAGE_SIZE) in rec_offs_data_size().
+ It may hit the assert before attempting to insert the row. */
+ if (conv_heap != NULL && data_size > UNIV_PAGE_SIZE) {
+ *exceed_page = true;
+ }
+
ut_ad(data_size < srv_sort_buf_size);
/* Reserve one byte for the end marker of row_merge_block_t. */
@@ -527,6 +616,10 @@ row_merge_buf_add(
dfield_dup(field++, buf->heap);
} while (--n_fields);
+ if (conv_heap != NULL) {
+ mem_heap_empty(conv_heap);
+ }
+
DBUG_RETURN(n_row_added);
}
@@ -1208,6 +1301,7 @@ row_merge_read_clustered_index(
os_event_t fts_parallel_sort_event = NULL;
ibool fts_pll_sort = FALSE;
ib_int64_t sig_count = 0;
+ mem_heap_t* conv_heap = NULL;
DBUG_ENTER("row_merge_read_clustered_index");
ut_ad((old_table == new_table) == !col_map);
@@ -1303,6 +1397,11 @@ row_merge_read_clustered_index(
row_heap = mem_heap_create(sizeof(mrec_buf_t));
+ if (dict_table_is_comp(old_table)
+ && !dict_table_is_comp(new_table)) {
+ conv_heap = mem_heap_create(sizeof(mrec_buf_t));
+ }
+
/* Scan the clustered index. */
for (;;) {
const rec_t* rec;
@@ -1581,16 +1680,24 @@ write_buffers:
row_merge_buf_t* buf = merge_buf[i];
merge_file_t* file = &files[i];
ulint rows_added = 0;
+ bool exceed_page = false;
if (UNIV_LIKELY
(row && (rows_added = row_merge_buf_add(
buf, fts_index, old_table,
- psort_info, row, ext, &doc_id)))) {
+ psort_info, row, ext, &doc_id,
+ conv_heap, &exceed_page)))) {
/* If we are creating FTS index,
a single row can generate more
records for tokenized word */
file->n_rec += rows_added;
+
+ if (exceed_page) {
+ err = DB_TOO_BIG_RECORD;
+ break;
+ }
+
if (doc_id > max_doc_id) {
max_doc_id = doc_id;
}
@@ -1691,12 +1798,18 @@ write_buffers:
(!(rows_added = row_merge_buf_add(
buf, fts_index, old_table,
psort_info, row, ext,
- &doc_id)))) {
+ &doc_id, conv_heap,
+ &exceed_page)))) {
/* An empty buffer should have enough
room for at least one record. */
ut_error;
}
+ if (exceed_page) {
+ err = DB_TOO_BIG_RECORD;
+ break;
+ }
+
file->n_rec += rows_added;
}
}
@@ -1721,6 +1834,10 @@ func_exit:
}
all_done:
+ if (conv_heap != NULL) {
+ mem_heap_free(conv_heap);
+ }
+
#ifdef FTS_INTERNAL_DIAG_PRINT
DEBUG_FTS_SORT_PRINT("FTS_SORT: Complete Scan Table\n");
#endif
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index bf17673a036..c53fa530b42 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -3411,13 +3411,11 @@ row_truncate_table_for_mysql(
goto funct_exit;
}
- if (table->space && !table->dir_path_of_temp_table) {
+ if (table->space && !DICT_TF2_FLAG_IS_SET(table, DICT_TF2_TEMPORARY)) {
/* Discard and create the single-table tablespace. */
ulint space = table->space;
ulint flags = fil_space_get_flags(space);
- ut_a(!DICT_TF2_FLAG_IS_SET(table, DICT_TF2_TEMPORARY));
-
dict_get_and_save_data_dir_path(table, true);
if (flags != ULINT_UNDEFINED
@@ -4217,8 +4215,9 @@ row_drop_table_for_mysql(
is_temp = DICT_TF2_FLAG_IS_SET(table, DICT_TF2_TEMPORARY);
/* If there is a temp path then the temp flag is set.
- However, during recovery, we might have a temp flag but
- not know the temp path */
+ However, during recovery or reloading the table object
+ after eviction from data dictionary cache, we might
+ have a temp flag but not know the temp path */
ut_a(table->dir_path_of_temp_table == NULL || is_temp);
if (dict_table_is_discarded(table)
|| table->ibd_file_missing) {
@@ -4786,6 +4785,7 @@ row_rename_table_for_mysql(
ibool old_is_tmp, new_is_tmp;
pars_info_t* info = NULL;
int retry;
+ bool aux_fts_rename = false;
ut_a(old_name != NULL);
ut_a(new_name != NULL);
@@ -5072,34 +5072,8 @@ row_rename_table_for_mysql(
if (dict_table_has_fts_index(table)
&& !dict_tables_have_same_db(old_name, new_name)) {
err = fts_rename_aux_tables(table, new_name, trx);
-
- if (err != DB_SUCCESS && (table->space != 0)) {
- char* orig_name = table->name;
- trx_t* trx_bg = trx_allocate_for_background();
-
- /* If the first fts_rename fails, the trx would
- be rolled back and committed, we can't use it any more,
- so we have to start a new background trx here. */
- ut_a(trx_state_eq(trx, TRX_STATE_NOT_STARTED));
- trx_bg->op_info = "Revert the failing rename "
- "for fts aux tables";
- trx_bg->dict_operation_lock_mode = RW_X_LATCH;
- trx_start_for_ddl(trx_bg, TRX_DICT_OP_TABLE);
-
- /* If rename fails and table has its own tablespace,
- we need to call fts_rename_aux_tables again to
- revert the ibd file rename, which is not under the
- control of trx. Also notice the parent table name
- in cache is not changed yet. If the reverting fails,
- the ibd data may be left in the new database, which
- can be fixed only manually. */
- table->name = const_cast<char*>(new_name);
- fts_rename_aux_tables(table, old_name, trx_bg);
- table->name = orig_name;
-
- trx_bg->dict_operation_lock_mode = 0;
- trx_commit_for_mysql(trx_bg);
- trx_free_for_background(trx_bg);
+ if (err != DB_TABLE_NOT_FOUND) {
+ aux_fts_rename = true;
}
}
@@ -5200,6 +5174,37 @@ end:
}
funct_exit:
+ if (aux_fts_rename && err != DB_SUCCESS
+ && table != NULL && (table->space != 0)) {
+
+ char* orig_name = table->name;
+ trx_t* trx_bg = trx_allocate_for_background();
+
+ /* If the first fts_rename fails, the trx would
+ be rolled back and committed, we can't use it any more,
+ so we have to start a new background trx here. */
+ ut_a(trx_state_eq(trx_bg, TRX_STATE_NOT_STARTED));
+ trx_bg->op_info = "Revert the failing rename "
+ "for fts aux tables";
+ trx_bg->dict_operation_lock_mode = RW_X_LATCH;
+ trx_start_for_ddl(trx_bg, TRX_DICT_OP_TABLE);
+
+ /* If rename fails and table has its own tablespace,
+ we need to call fts_rename_aux_tables again to
+ revert the ibd file rename, which is not under the
+ control of trx. Also notice the parent table name
+ in cache is not changed yet. If the reverting fails,
+ the ibd data may be left in the new database, which
+ can be fixed only manually. */
+ table->name = const_cast<char*>(new_name);
+ fts_rename_aux_tables(table, old_name, trx_bg);
+ table->name = orig_name;
+
+ trx_bg->dict_operation_lock_mode = 0;
+ trx_commit_for_mysql(trx_bg);
+ trx_free_for_background(trx_bg);
+ }
+
if (table != NULL) {
dict_table_close(table, dict_locked, FALSE);
}