summaryrefslogtreecommitdiff
path: root/storage/innobase/row/row0import.cc
diff options
context:
space:
mode:
Diffstat (limited to 'storage/innobase/row/row0import.cc')
-rw-r--r--storage/innobase/row/row0import.cc1505
1 files changed, 744 insertions, 761 deletions
diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc
index f5e882f96fe..ad7f77f6215 100644
--- a/storage/innobase/row/row0import.cc
+++ b/storage/innobase/row/row0import.cc
@@ -25,15 +25,10 @@ Created 2012-02-08 by Sunny Bains.
*******************************************************/
#include "row0import.h"
-
-#ifdef UNIV_NONINL
-#include "row0import.ic"
-#endif
-
#include "btr0pcur.h"
-#include "btr0sea.h"
#include "que0que.h"
#include "dict0boot.h"
+#include "dict0load.h"
#include "ibuf0ibuf.h"
#include "pars0pars.h"
#include "row0upd.h"
@@ -51,9 +46,13 @@ Created 2012-02-08 by Sunny Bains.
#include <vector>
+#ifdef HAVE_MY_AES_H
+#include <my_aes.h>
+#endif
+
/** The size of the buffer to use for IO.
-@param n - page size of the tablespace.
-@retval number of pages */
+@param n physical page size
+@return number of pages */
#define IO_BUFFER_SIZE(n) ((1024 * 1024) / n)
/** For gathering stats on records during phase I */
@@ -116,7 +115,7 @@ struct row_import {
m_hostname(NULL),
m_table_name(NULL),
m_autoinc(0),
- m_page_size(0),
+ m_page_size(0, 0, false),
m_flags(0),
m_n_cols(0),
m_cols(NULL),
@@ -127,45 +126,39 @@ struct row_import {
~row_import() UNIV_NOTHROW;
- /**
- Find the index entry in in the indexes array.
- @param name - index name
+ /** Find the index entry in in the indexes array.
+ @param name index name
@return instance if found else 0. */
row_index_t* get_index(const char* name) const UNIV_NOTHROW;
- /**
- Get the number of rows in the index.
- @param name - index name
+ /** Get the number of rows in the index.
+ @param name index name
@return number of rows (doesn't include delete marked rows). */
ulint get_n_rows(const char* name) const UNIV_NOTHROW;
- /**
- Find the ordinal value of the column name in the cfg table columns.
- @param name - of column to look for.
+ /** Find the ordinal value of the column name in the cfg table columns.
+ @param name of column to look for.
@return ULINT_UNDEFINED if not found. */
ulint find_col(const char* name) const UNIV_NOTHROW;
- /**
- Get the number of rows for which purge failed during the convert phase.
- @param name - index name
+ /** Get the number of rows for which purge failed during the
+ convert phase.
+ @param name index name
@return number of rows for which purge failed. */
- ulint get_n_purge_failed(const char* name) const UNIV_NOTHROW;
+ ulint get_n_purge_failed(const char* name) const UNIV_NOTHROW;
- /**
- Check if the index is clean. ie. no delete-marked records
- @param name - index name
+ /** Check if the index is clean. ie. no delete-marked records
+ @param name index name
@return true if index needs to be purged. */
bool requires_purge(const char* name) const UNIV_NOTHROW
{
return(get_n_purge_failed(name) > 0);
}
- /**
- Set the index root <space, pageno> using the index name */
+ /** Set the index root <space, pageno> using the index name */
void set_root_by_name() UNIV_NOTHROW;
- /**
- Set the index root <space, pageno> using a heuristic
+ /** Set the index root <space, pageno> using a heuristic
@return DB_SUCCESS or error code */
dberr_t set_root_by_heuristic() UNIV_NOTHROW;
@@ -178,18 +171,16 @@ struct row_import {
THD* thd,
const dict_index_t* index) UNIV_NOTHROW;
- /**
- Check if the table schema that was read from the .cfg file matches the
- in memory table definition.
- @param thd - MySQL session variable
+ /** Check if the table schema that was read from the .cfg file
+ matches the in memory table definition.
+ @param thd MySQL session variable
@return DB_SUCCESS or error code. */
dberr_t match_table_columns(
THD* thd) UNIV_NOTHROW;
- /**
- Check if the table (and index) schema that was read from the .cfg file
- matches the in memory table definition.
- @param thd - MySQL session variable
+ /** Check if the table (and index) schema that was read from the
+ .cfg file matches the in memory table definition.
+ @param thd MySQL session variable
@return DB_SUCCESS or error code. */
dberr_t match_schema(
THD* thd) UNIV_NOTHROW;
@@ -205,7 +196,7 @@ struct row_import {
ib_uint64_t m_autoinc; /*!< Next autoinc value */
- ulint m_page_size; /*!< Tablespace page size */
+ page_size_t m_page_size; /*!< Tablespace page size */
ulint m_flags; /*!< Table flags */
@@ -231,15 +222,13 @@ struct row_import {
/** Use the page cursor to iterate over records in a block. */
class RecIterator {
public:
- /**
- Default constructor */
+ /** Default constructor */
RecIterator() UNIV_NOTHROW
{
memset(&m_cur, 0x0, sizeof(m_cur));
}
- /**
- Position the cursor on the first user record. */
+ /** Position the cursor on the first user record. */
void open(buf_block_t* block) UNIV_NOTHROW
{
page_cur_set_before_first(block, &m_cur);
@@ -249,8 +238,7 @@ public:
}
}
- /**
- Move to the next record. */
+ /** Move to the next record. */
void next() UNIV_NOTHROW
{
page_cur_move_to_next(&m_cur);
@@ -276,7 +264,7 @@ public:
bool remove(
const dict_index_t* index,
page_zip_des_t* page_zip,
- ulint* offsets) UNIV_NOTHROW
+ offset_t* offsets) UNIV_NOTHROW
{
/* We can't end up with an empty page unless it is root. */
if (page_get_n_recs(m_cur.block->frame) <= 1) {
@@ -296,9 +284,9 @@ couldn't purge the delete marked reocrds during Phase I. */
class IndexPurge {
public:
/** Constructor
- @param trx - the user transaction covering the import tablespace
- @param index - to be imported
- @param space_id - space id of the tablespace */
+ @param trx the user transaction covering the import tablespace
+ @param index to be imported
+ @param space_id space id of the tablespace */
IndexPurge(
trx_t* trx,
dict_index_t* index) UNIV_NOTHROW
@@ -307,9 +295,8 @@ public:
m_index(index),
m_n_rows(0)
{
- ib_logf(IB_LOG_LEVEL_INFO,
- "Phase II - Purge records from index %s",
- index->name);
+ ib::info() << "Phase II - Purge records from index "
+ << index->name;
}
/** Descructor */
@@ -327,28 +314,23 @@ public:
}
private:
- /**
- Begin import, position the cursor on the first record. */
+ /** Begin import, position the cursor on the first record. */
void open() UNIV_NOTHROW;
- /**
- Close the persistent curosr and commit the mini-transaction. */
+ /** Close the persistent curosr and commit the mini-transaction. */
void close() UNIV_NOTHROW;
- /**
- Position the cursor on the next record.
+ /** Position the cursor on the next record.
@return DB_SUCCESS or error code */
dberr_t next() UNIV_NOTHROW;
- /**
- Store the persistent cursor position and reopen the
+ /** Store the persistent cursor position and reopen the
B-tree cursor in BTR_MODIFY_TREE mode, because the
tree structure may be changed during a pessimistic delete. */
void purge_pessimistic_delete() UNIV_NOTHROW;
- /**
- Purge delete-marked records.
- @param offsets - current row offsets. */
+ /** Purge delete-marked records.
+ @param offsets current row offsets. */
void purge() UNIV_NOTHROW;
protected:
@@ -371,25 +353,25 @@ class AbstractCallback
{
public:
/** Constructor
- @param trx - covering transaction */
+ @param trx covering transaction */
AbstractCallback(trx_t* trx)
:
+ m_page_size(0, 0, false),
m_trx(trx),
m_space(ULINT_UNDEFINED),
m_xdes(),
m_xdes_page_no(ULINT_UNDEFINED),
m_space_flags(ULINT_UNDEFINED) UNIV_NOTHROW { }
- /**
- Free any extent descriptor instance */
+ /** Free any extent descriptor instance */
virtual ~AbstractCallback()
{
- delete [] m_xdes;
+ UT_DELETE_ARRAY(m_xdes);
}
/** Determine the page size to use for traversing the tablespace
- @param file_size - size of the tablespace file in bytes
- @param block - contents of the first page in the tablespace file.
+ @param file_size size of the tablespace file in bytes
+ @param block contents of the first page in the tablespace file.
@retval DB_SUCCESS or error code. */
virtual dberr_t init(
os_offset_t file_size,
@@ -398,33 +380,27 @@ public:
/** @return true if compressed table. */
bool is_compressed_table() const UNIV_NOTHROW
{
- return(get_zip_size() > 0);
+ return(get_page_size().is_compressed());
+ }
+
+ /** @return the tablespace flags */
+ ulint get_space_flags() const
+ {
+ return(m_space_flags);
}
/**
Set the name of the physical file and the file handle that is used
to open it for the file that is being iterated over.
- @param filename - then physical name of the tablespace file.
- @param file - OS file handle */
+ @param filename the physical name of the tablespace file
+ @param file OS file handle */
void set_file(const char* filename, pfs_os_file_t file) UNIV_NOTHROW
{
m_file = file;
m_filepath = filename;
}
- /** The compressed page size
- @return the compressed page size */
- ulint get_zip_size() const
- {
- return(m_zip_size);
- }
-
- /** The compressed page size
- @return the compressed page size */
- ulint get_page_size() const
- {
- return(m_page_size);
- }
+ const page_size_t& get_page_size() const { return m_page_size; }
const char* filename() const { return m_filepath; }
@@ -454,10 +430,9 @@ public:
}
protected:
- /**
- Get the physical offset of the extent descriptor within the page.
- @param page_no - page number of the extent descriptor
- @param page - contents of the page containing the extent descriptor.
+ /** Get the physical offset of the extent descriptor within the page.
+ @param page_no page number of the extent descriptor
+ @param page contents of the page containing the extent descriptor.
@return the start of the xdes array in a page */
const xdes_t* xdes(
ulint page_no,
@@ -465,19 +440,18 @@ protected:
{
ulint offset;
- offset = xdes_calc_descriptor_index(get_zip_size(), page_no);
+ offset = xdes_calc_descriptor_index(get_page_size(), page_no);
return(page + XDES_ARR_OFFSET + XDES_SIZE * offset);
}
- /**
- Set the current page directory (xdes). If the extent descriptor is
+ /** Set the current page directory (xdes). If the extent descriptor is
marked as free then free the current extent descriptor and set it to
0. This implies that all pages that are covered by this extent
descriptor are also freed.
- @param page_no - offset of page within the file
- @param page - page contents
+ @param page_no offset of page within the file
+ @param page page contents
@return DB_SUCCESS or error code. */
dberr_t set_current_xdes(
ulint page_no,
@@ -485,9 +459,8 @@ protected:
{
m_xdes_page_no = page_no;
- delete[] m_xdes;
-
- m_xdes = 0;
+ UT_DELETE_ARRAY(m_xdes);
+ m_xdes = NULL;
ulint state;
const xdes_t* xdesc = page + XDES_ARR_OFFSET;
@@ -496,39 +469,32 @@ protected:
if (state != XDES_FREE) {
- m_xdes = new(std::nothrow) xdes_t[m_page_size];
+ m_xdes = UT_NEW_ARRAY_NOKEY(xdes_t,
+ m_page_size.physical());
/* Trigger OOM */
- DBUG_EXECUTE_IF("ib_import_OOM_13",
- delete [] m_xdes; m_xdes = 0;);
+ DBUG_EXECUTE_IF(
+ "ib_import_OOM_13",
+ UT_DELETE_ARRAY(m_xdes);
+ m_xdes = NULL;
+ );
- if (m_xdes == 0) {
+ if (m_xdes == NULL) {
return(DB_OUT_OF_MEMORY);
}
- memcpy(m_xdes, page, m_page_size);
+ memcpy(m_xdes, page, m_page_size.physical());
}
return(DB_SUCCESS);
}
- /**
- @return true if it is a root page */
- bool is_root_page(const page_t* page) const UNIV_NOTHROW
- {
- ut_ad(fil_page_get_type(page) == FIL_PAGE_INDEX);
-
- return(mach_read_from_4(page + FIL_PAGE_NEXT) == FIL_NULL
- && mach_read_from_4(page + FIL_PAGE_PREV) == FIL_NULL);
- }
-
- /**
- Check if the page is marked as free in the extent descriptor.
- @param page_no - page number to check in the extent descriptor.
+ /** Check if the page is marked as free in the extent descriptor.
+ @param page_no page number to check in the extent descriptor.
@return true if the page is marked as free */
bool is_free(ulint page_no) const UNIV_NOTHROW
{
- ut_a(xdes_calc_descriptor_page(get_zip_size(), page_no)
+ ut_a(xdes_calc_descriptor_page(get_page_size(), page_no)
== m_xdes_page_no);
if (m_xdes != 0) {
@@ -543,11 +509,8 @@ protected:
}
protected:
- /** Compressed table page size */
- ulint m_zip_size;
-
/** The tablespace page size. */
- ulint m_page_size;
+ page_size_t m_page_size;
/** File handle to the tablespace */
pfs_os_file_t m_file;
@@ -583,8 +546,8 @@ protected:
};
/** Determine the page size to use for traversing the tablespace
-@param file_size - size of the tablespace file in bytes
-@param block - contents of the first page in the tablespace file.
+@param file_size size of the tablespace file in bytes
+@param block contents of the first page in the tablespace file.
@retval DB_SUCCESS or error code. */
dberr_t
AbstractCallback::init(
@@ -597,9 +560,8 @@ AbstractCallback::init(
if (!fsp_flags_is_valid(m_space_flags, true)) {
ulint cflags = fsp_flags_convert_from_101(m_space_flags);
if (cflags == ULINT_UNDEFINED) {
- ib_logf(IB_LOG_LEVEL_ERROR,
- "Invalid FSP_SPACE_FLAGS=0x%x",
- int(m_space_flags));
+ ib::error() << "Invalid FSP_SPACE_FLAGS="
+ << ib::hex(m_space_flags);
return(DB_CORRUPTION);
}
m_space_flags = cflags;
@@ -607,39 +569,21 @@ AbstractCallback::init(
/* Clear the DATA_DIR flag, which is basically garbage. */
m_space_flags &= ~(1U << FSP_FLAGS_POS_RESERVED);
+ m_page_size.copy_from(page_size_t(m_space_flags));
- /* Since we don't know whether it is a compressed table
- or not, the data is always read into the block->frame. */
-
- m_zip_size = fsp_header_get_zip_size(page);
-
- if (!ut_is_2pow(m_zip_size) || m_zip_size > UNIV_ZIP_SIZE_MAX) {
- return(DB_CORRUPTION);
- }
-
- /* Set the page size used to traverse the tablespace. */
-
- m_page_size = (is_compressed_table())
- ? get_zip_size() : fsp_flags_get_page_size(m_space_flags);
+ if (!is_compressed_table() && !m_page_size.equals_to(univ_page_size)) {
- if (m_page_size == 0) {
- ib_logf(IB_LOG_LEVEL_ERROR, "Page size is 0");
- return(DB_CORRUPTION);
- } else if (!is_compressed_table() && m_page_size != UNIV_PAGE_SIZE) {
-
- ib_logf(IB_LOG_LEVEL_ERROR,
- "Page size " ULINTPF " of ibd file is not the same "
- "as the server page size " ULINTPF,
- m_page_size, UNIV_PAGE_SIZE);
+ ib::error() << "Page size " << m_page_size.physical()
+ << " of ibd file is not the same as the server page"
+ " size " << univ_page_size.physical();
return(DB_CORRUPTION);
- } else if ((file_size % m_page_size)) {
+ } else if (file_size % m_page_size.physical() != 0) {
- ib_logf(IB_LOG_LEVEL_ERROR,
- "File size " UINT64PF " is not a multiple "
- "of the page size " ULINTPF,
- (ib_uint64_t) file_size, m_page_size);
+ ib::error() << "File size " << file_size << " is not a"
+ " multiple of the page size "
+ << m_page_size.physical();
return(DB_CORRUPTION);
}
@@ -670,11 +614,11 @@ struct FetchIndexRootPages : public AbstractCallback {
ulint m_page_no; /*!< Root page number */
};
- typedef std::vector<Index> Indexes;
+ typedef std::vector<Index, ut_allocator<Index> > Indexes;
/** Constructor
- @param trx - covering (user) transaction
- @param table - table definition in server .*/
+ @param trx covering (user) transaction
+ @param table table definition in server .*/
FetchIndexRootPages(const dict_table_t* table, trx_t* trx)
:
AbstractCallback(trx),
@@ -690,8 +634,7 @@ struct FetchIndexRootPages : public AbstractCallback {
return(m_space);
}
- /**
- Called for each block as it is read from the file.
+ /** Called for each block as it is read from the file.
@param block block to convert, it is not from the buffer pool.
@retval DB_SUCCESS or error code. */
dberr_t operator()(buf_block_t* block) UNIV_NOTHROW;
@@ -707,8 +650,7 @@ struct FetchIndexRootPages : public AbstractCallback {
Indexes m_indexes;
};
-/**
-Called for each block as it is read from the file. Check index pages to
+/** Called for each block as it is read from the file. Check index pages to
determine the exact row format. We can't get that from the tablespace
header flags alone.
@@ -723,15 +665,14 @@ dberr_t FetchIndexRootPages::operator()(buf_block_t* block) UNIV_NOTHROW
ulint page_type = fil_page_get_type(page);
if (page_type == FIL_PAGE_TYPE_XDES) {
- return set_current_xdes(block->page.offset, page);
- } else if (page_type == FIL_PAGE_INDEX
- && !is_free(block->page.offset)
- && is_root_page(page)) {
+ return set_current_xdes(block->page.id.page_no(), page);
+ } else if (fil_page_index_page_check(page)
+ && !is_free(block->page.id.page_no())
+ && !page_has_siblings(page)) {
index_id_t id = btr_page_get_index_id(page);
- ulint page_no = buf_block_get_page_no(block);
- m_indexes.push_back(Index(id, page_no));
+ m_indexes.push_back(Index(id, block->page.id.page_no()));
if (m_indexes.size() == 1) {
/* Check that the tablespace flags match the table flags. */
@@ -760,23 +701,26 @@ FetchIndexRootPages::build_row_import(row_import* cfg) const UNIV_NOTHROW
Indexes::const_iterator end = m_indexes.end();
ut_a(cfg->m_table == m_table);
- cfg->m_page_size = m_page_size;
+ cfg->m_page_size.copy_from(m_page_size);
cfg->m_n_indexes = m_indexes.size();
if (cfg->m_n_indexes == 0) {
- ib_logf(IB_LOG_LEVEL_ERROR, "No B+Tree found in tablespace");
+ ib::error() << "No B+Tree found in tablespace";
return(DB_CORRUPTION);
}
- cfg->m_indexes = new(std::nothrow) row_index_t[cfg->m_n_indexes];
+ cfg->m_indexes = UT_NEW_ARRAY_NOKEY(row_index_t, cfg->m_n_indexes);
/* Trigger OOM */
- DBUG_EXECUTE_IF("ib_import_OOM_11",
- delete [] cfg->m_indexes; cfg->m_indexes = 0;);
+ DBUG_EXECUTE_IF(
+ "ib_import_OOM_11",
+ UT_DELETE_ARRAY(cfg->m_indexes);
+ cfg->m_indexes = NULL;
+ );
- if (cfg->m_indexes == 0) {
+ if (cfg->m_indexes == NULL) {
return(DB_OUT_OF_MEMORY);
}
@@ -790,18 +734,20 @@ FetchIndexRootPages::build_row_import(row_import* cfg) const UNIV_NOTHROW
char name[BUFSIZ];
- ut_snprintf(name, sizeof(name), "index" IB_ID_FMT, it->m_id);
+ snprintf(name, sizeof(name), "index" IB_ID_FMT, it->m_id);
ulint len = strlen(name) + 1;
- cfg_index->m_name = new(std::nothrow) byte[len];
+ cfg_index->m_name = UT_NEW_ARRAY_NOKEY(byte, len);
/* Trigger OOM */
- DBUG_EXECUTE_IF("ib_import_OOM_12",
- delete [] cfg_index->m_name;
- cfg_index->m_name = 0;);
+ DBUG_EXECUTE_IF(
+ "ib_import_OOM_12",
+ UT_DELETE_ARRAY(cfg_index->m_name);
+ cfg_index->m_name = NULL;
+ );
- if (cfg_index->m_name == 0) {
+ if (cfg_index->m_name == NULL) {
return(DB_OUT_OF_MEMORY);
}
@@ -845,8 +791,8 @@ tablespace file.
class PageConverter : public AbstractCallback {
public:
/** Constructor
- * @param cfg - config of table being imported.
- * @param trx - transaction covering the import */
+ @param cfg config of table being imported.
+ @param trx transaction covering the import */
PageConverter(row_import* cfg, trx_t* trx) UNIV_NOTHROW;
virtual ~PageConverter() UNIV_NOTHROW
@@ -863,92 +809,80 @@ public:
return(m_cfg->m_table->space);
}
- /**
- Called for each block as it is read from the file.
+ /** Called for each block as it is read from the file.
@param block block to convert, it is not from the buffer pool.
@retval DB_SUCCESS or error code. */
dberr_t operator()(buf_block_t* block) UNIV_NOTHROW;
private:
- /**
- Update the page, set the space id, max trx id and index id.
- @param block - block read from file
- @param page_type - type of the page
+ /** Update the page, set the space id, max trx id and index id.
+ @param block block read from file
+ @param page_type type of the page
@retval DB_SUCCESS or error code */
dberr_t update_page(
buf_block_t* block,
ulint& page_type) UNIV_NOTHROW;
- /**
- Update the space, index id, trx id.
- @param block - block to convert
+ /** Update the space, index id, trx id.
+ @param block block to convert
@return DB_SUCCESS or error code */
dberr_t update_index_page(buf_block_t* block) UNIV_NOTHROW;
/** Update the BLOB refrences and write UNDO log entries for
rows that can't be purged optimistically.
- @param block - block to update
+ @param block block to update
@retval DB_SUCCESS or error code */
dberr_t update_records(buf_block_t* block) UNIV_NOTHROW;
- /**
- Validate the space flags and update tablespace header page.
- @param block - block read from file, not from the buffer pool.
+ /** Validate the space flags and update tablespace header page.
+ @param block block read from file, not from the buffer pool.
@retval DB_SUCCESS or error code */
dberr_t update_header(buf_block_t* block) UNIV_NOTHROW;
- /**
- Adjust the BLOB reference for a single column that is externally stored
- @param rec - record to update
- @param offsets - column offsets for the record
- @param i - column ordinal value
+ /** Adjust the BLOB reference for a single column that is externally stored
+ @param rec record to update
+ @param offsets column offsets for the record
+ @param i column ordinal value
@return DB_SUCCESS or error code */
dberr_t adjust_cluster_index_blob_column(
rec_t* rec,
- const ulint* offsets,
+ const offset_t* offsets,
ulint i) UNIV_NOTHROW;
- /**
- Adjusts the BLOB reference in the clustered index row for all
+ /** Adjusts the BLOB reference in the clustered index row for all
externally stored columns.
- @param rec - record to update
- @param offsets - column offsets for the record
+ @param rec record to update
+ @param offsets column offsets for the record
@return DB_SUCCESS or error code */
dberr_t adjust_cluster_index_blob_columns(
rec_t* rec,
- const ulint* offsets) UNIV_NOTHROW;
+ const offset_t* offsets) UNIV_NOTHROW;
- /**
- In the clustered index, adjist the BLOB pointers as needed.
+ /** In the clustered index, adjist the BLOB pointers as needed.
Also update the BLOB reference, write the new space id.
- @param rec - record to update
- @param offsets - column offsets for the record
+ @param rec record to update
+ @param offsets column offsets for the record
@return DB_SUCCESS or error code */
dberr_t adjust_cluster_index_blob_ref(
rec_t* rec,
- const ulint* offsets) UNIV_NOTHROW;
+ const offset_t* offsets) UNIV_NOTHROW;
- /**
- Purge delete-marked records, only if it is possible to do
+ /** Purge delete-marked records, only if it is possible to do
so without re-organising the B+tree.
- @param offsets - current row offsets.
+ @param offsets current row offsets.
@retval true if purged */
- bool purge(const ulint* offsets) UNIV_NOTHROW;
+ bool purge(const offset_t* offsets) UNIV_NOTHROW;
- /**
- Adjust the BLOB references and sys fields for the current record.
- @param index - the index being converted
- @param rec - record to update
- @param offsets - column offsets for the record
- @param deleted - true if row is delete marked
+ /** Adjust the BLOB references and sys fields for the current record.
+ @param index the index being converted
+ @param rec record to update
+ @param offsets column offsets for the record
@return DB_SUCCESS or error code. */
dberr_t adjust_cluster_record(
const dict_index_t* index,
rec_t* rec,
- const ulint* offsets,
- bool deleted) UNIV_NOTHROW;
+ const offset_t* offsets) UNIV_NOTHROW;
- /**
- Find an index with the matching id.
+ /** Find an index with the matching id.
@return row_index_t* instance or 0 */
row_index_t* find_index(index_id_t id) UNIV_NOTHROW
{
@@ -980,10 +914,10 @@ private:
RecIterator m_rec_iter;
/** Record offset */
- ulint m_offsets_[REC_OFFS_NORMAL_SIZE];
+ offset_t m_offsets_[REC_OFFS_NORMAL_SIZE];
/** Pointer to m_offsets_ */
- ulint* m_offsets;
+ offset_t* m_offsets;
/** Memory heap for the record offsets */
mem_heap_t* m_heap;
@@ -997,9 +931,9 @@ row_import destructor. */
row_import::~row_import() UNIV_NOTHROW
{
for (ulint i = 0; m_indexes != 0 && i < m_n_indexes; ++i) {
- delete [] m_indexes[i].m_name;
+ UT_DELETE_ARRAY(m_indexes[i].m_name);
- if (m_indexes[i].m_fields == 0) {
+ if (m_indexes[i].m_fields == NULL) {
continue;
}
@@ -1007,26 +941,25 @@ row_import::~row_import() UNIV_NOTHROW
ulint n_fields = m_indexes[i].m_n_fields;
for (ulint j = 0; j < n_fields; ++j) {
- delete [] fields[j].name;
+ UT_DELETE_ARRAY(const_cast<char*>(fields[j].name()));
}
- delete [] fields;
+ UT_DELETE_ARRAY(fields);
}
for (ulint i = 0; m_col_names != 0 && i < m_n_cols; ++i) {
- delete [] m_col_names[i];
+ UT_DELETE_ARRAY(m_col_names[i]);
}
- delete [] m_cols;
- delete [] m_indexes;
- delete [] m_col_names;
- delete [] m_table_name;
- delete [] m_hostname;
+ UT_DELETE_ARRAY(m_cols);
+ UT_DELETE_ARRAY(m_indexes);
+ UT_DELETE_ARRAY(m_col_names);
+ UT_DELETE_ARRAY(m_table_name);
+ UT_DELETE_ARRAY(m_hostname);
}
-/**
-Find the index entry in in the indexes array.
-@param name - index name
+/** Find the index entry in in the indexes array.
+@param name index name
@return instance if found else 0. */
row_index_t*
row_import::get_index(
@@ -1047,9 +980,8 @@ row_import::get_index(
return(0);
}
-/**
-Get the number of rows in the index.
-@param name - index name
+/** Get the number of rows in the index.
+@param name index name
@return number of rows (doesn't include delete marked rows). */
ulint
row_import::get_n_rows(
@@ -1062,9 +994,8 @@ row_import::get_n_rows(
return(index->m_stats.m_n_rows);
}
-/**
-Get the number of rows for which purge failed uding the convert phase.
-@param name - index name
+/** Get the number of rows for which purge failed uding the convert phase.
+@param name index name
@return number of rows for which purge failed. */
ulint
row_import::get_n_purge_failed(
@@ -1077,9 +1008,8 @@ row_import::get_n_purge_failed(
return(index->m_stats.m_n_purge_failed);
}
-/**
-Find the ordinal value of the column name in the cfg table columns.
-@param name - of column to look for.
+/** Find the ordinal value of the column name in the cfg table columns.
+@param name of column to look for.
@return ULINT_UNDEFINED if not found. */
ulint
row_import::find_col(
@@ -1114,9 +1044,9 @@ row_import::match_index_columns(
if (cfg_index == 0) {
ib_errf(thd, IB_LOG_LEVEL_ERROR,
- ER_TABLE_SCHEMA_MISMATCH,
- "Index %s not found in tablespace meta-data file.",
- index->name);
+ ER_TABLE_SCHEMA_MISMATCH,
+ "Index %s not found in tablespace meta-data file.",
+ index->name());
return(DB_ERROR);
}
@@ -1124,10 +1054,10 @@ row_import::match_index_columns(
if (cfg_index->m_n_fields != index->n_fields) {
ib_errf(thd, IB_LOG_LEVEL_ERROR,
- ER_TABLE_SCHEMA_MISMATCH,
- "Index field count %u doesn't match"
- " tablespace metadata file value " ULINTPF,
- index->n_fields, cfg_index->m_n_fields);
+ ER_TABLE_SCHEMA_MISMATCH,
+ "Index field count %u doesn't match"
+ " tablespace metadata file value " ULINTPF,
+ index->n_fields, cfg_index->m_n_fields);
return(DB_ERROR);
}
@@ -1139,13 +1069,13 @@ row_import::match_index_columns(
for (ulint i = 0; i < index->n_fields; ++i, ++field, ++cfg_field) {
- if (strcmp(field->name, cfg_field->name) != 0) {
+ if (strcmp(field->name(), cfg_field->name()) != 0) {
ib_errf(thd, IB_LOG_LEVEL_ERROR,
- ER_TABLE_SCHEMA_MISMATCH,
- "Index field name %s doesn't match"
- " tablespace metadata field name %s"
- " for field position " ULINTPF,
- field->name, cfg_field->name, i);
+ ER_TABLE_SCHEMA_MISMATCH,
+ "Index field name %s doesn't match"
+ " tablespace metadata field name %s"
+ " for field position " ULINTPF,
+ field->name(), cfg_field->name(), i);
err = DB_ERROR;
}
@@ -1155,7 +1085,7 @@ row_import::match_index_columns(
ER_TABLE_SCHEMA_MISMATCH,
"Index %s field %s prefix len %u"
" doesn't match metadata file value %u",
- index->name, field->name,
+ index->name(), field->name(),
field->prefix_len, cfg_field->prefix_len);
err = DB_ERROR;
@@ -1166,7 +1096,7 @@ row_import::match_index_columns(
ER_TABLE_SCHEMA_MISMATCH,
"Index %s field %s fixed len %u"
" doesn't match metadata file value %u",
- index->name, field->name,
+ index->name(), field->name(),
field->fixed_len,
cfg_field->fixed_len);
@@ -1177,10 +1107,9 @@ row_import::match_index_columns(
return(err);
}
-/**
-Check if the table schema that was read from the .cfg file matches the
+/** Check if the table schema that was read from the .cfg file matches the
in memory table definition.
-@param thd - MySQL session variable
+@param thd MySQL session variable
@return DB_SUCCESS or error code. */
dberr_t
row_import::match_table_columns(
@@ -1225,60 +1154,82 @@ row_import::match_table_columns(
if (cfg_col->prtype != col->prtype) {
ib_errf(thd,
- IB_LOG_LEVEL_ERROR,
- ER_TABLE_SCHEMA_MISMATCH,
- "Column %s precise type mismatch.",
- col_name);
+ IB_LOG_LEVEL_ERROR,
+ ER_TABLE_SCHEMA_MISMATCH,
+ "Column %s precise type mismatch,"
+ " it's 0X%X in the table and 0X%X"
+ " in the tablespace meta file",
+ col_name, col->prtype, cfg_col->prtype);
err = DB_ERROR;
}
if (cfg_col->mtype != col->mtype) {
ib_errf(thd,
- IB_LOG_LEVEL_ERROR,
- ER_TABLE_SCHEMA_MISMATCH,
- "Column %s main type mismatch.",
- col_name);
+ IB_LOG_LEVEL_ERROR,
+ ER_TABLE_SCHEMA_MISMATCH,
+ "Column %s main type mismatch,"
+ " it's 0X%X in the table and 0X%X"
+ " in the tablespace meta file",
+ col_name, col->mtype, cfg_col->mtype);
err = DB_ERROR;
}
if (cfg_col->len != col->len) {
ib_errf(thd,
- IB_LOG_LEVEL_ERROR,
- ER_TABLE_SCHEMA_MISMATCH,
- "Column %s length mismatch.",
- col_name);
+ IB_LOG_LEVEL_ERROR,
+ ER_TABLE_SCHEMA_MISMATCH,
+ "Column %s length mismatch,"
+ " it's %u in the table and %u"
+ " in the tablespace meta file",
+ col_name, col->len, cfg_col->len);
err = DB_ERROR;
}
if (cfg_col->mbminlen != col->mbminlen
|| cfg_col->mbmaxlen != col->mbmaxlen) {
ib_errf(thd,
- IB_LOG_LEVEL_ERROR,
- ER_TABLE_SCHEMA_MISMATCH,
- "Column %s multi-byte len mismatch.",
- col_name);
+ IB_LOG_LEVEL_ERROR,
+ ER_TABLE_SCHEMA_MISMATCH,
+ "Column %s multi-byte len mismatch,"
+ " it's %u-%u in the table and %u-%u"
+ " in the tablespace meta file",
+ col_name, col->mbminlen, col->mbmaxlen,
+ cfg_col->mbminlen, cfg_col->mbmaxlen);
err = DB_ERROR;
}
if (cfg_col->ind != col->ind) {
+ ib_errf(thd,
+ IB_LOG_LEVEL_ERROR,
+ ER_TABLE_SCHEMA_MISMATCH,
+ "Column %s position mismatch,"
+ " it's %u in the table and %u"
+ " in the tablespace meta file",
+ col_name, col->ind, cfg_col->ind);
err = DB_ERROR;
}
if (cfg_col->ord_part != col->ord_part) {
ib_errf(thd,
- IB_LOG_LEVEL_ERROR,
- ER_TABLE_SCHEMA_MISMATCH,
- "Column %s ordering mismatch.",
- col_name);
+ IB_LOG_LEVEL_ERROR,
+ ER_TABLE_SCHEMA_MISMATCH,
+ "Column %s ordering mismatch,"
+ " it's %u in the table and %u"
+ " in the tablespace meta file",
+ col_name, col->ord_part,
+ cfg_col->ord_part);
err = DB_ERROR;
}
if (cfg_col->max_prefix != col->max_prefix) {
ib_errf(thd,
- IB_LOG_LEVEL_ERROR,
- ER_TABLE_SCHEMA_MISMATCH,
- "Column %s max prefix mismatch.",
- col_name);
+ IB_LOG_LEVEL_ERROR,
+ ER_TABLE_SCHEMA_MISMATCH,
+ "Column %s max prefix mismatch"
+ " it's %u in the table and %u"
+ " in the tablespace meta file",
+ col_name, col->max_prefix,
+ cfg_col->max_prefix);
err = DB_ERROR;
}
}
@@ -1287,10 +1238,9 @@ row_import::match_table_columns(
return(err);
}
-/**
-Check if the table (and index) schema that was read from the .cfg file
+/** Check if the table (and index) schema that was read from the .cfg file
matches the in memory table definition.
-@param thd - MySQL session variable
+@param thd MySQL session variable
@return DB_SUCCESS or error code. */
dberr_t
row_import::match_schema(
@@ -1346,9 +1296,9 @@ uncompressed:
ib_errf(thd, IB_LOG_LEVEL_ERROR, ER_TABLE_SCHEMA_MISMATCH,
"Table flags don't match, server table has 0x%x"
- " and the meta-data file has 0x%lx;"
+ " and the meta-data file has 0x" ULINTPFx ";"
" .cfg file uses %s",
- m_table->flags, ulong(m_flags), msg);
+ m_table->flags, m_flags, msg);
return(DB_ERROR);
} else if (m_table->n_cols != m_n_cols) {
@@ -1439,17 +1389,9 @@ row_import::set_root_by_heuristic() UNIV_NOTHROW
if (UT_LIST_GET_LEN(m_table->indexes) != m_n_indexes) {
- char table_name[MAX_FULL_NAME_LEN + 1];
-
- innobase_format_name(
- table_name, sizeof(table_name), m_table->name, FALSE);
-
- ib_logf(IB_LOG_LEVEL_WARN,
- "Table %s should have " ULINTPF
- " indexes but the tablespace has " ULINTPF " indexes",
- table_name,
- UT_LIST_GET_LEN(m_table->indexes),
- m_n_indexes);
+ ib::warn() << "Table " << m_table->name << " should have "
+ << UT_LIST_GET_LEN(m_table->indexes) << " indexes but"
+ " the tablespace has " << m_n_indexes << " indexes";
}
dict_mutex_enter_for_mysql();
@@ -1463,22 +1405,23 @@ row_import::set_root_by_heuristic() UNIV_NOTHROW
if (index->type & DICT_FTS) {
index->type |= DICT_CORRUPT;
- ib_logf(IB_LOG_LEVEL_WARN,
- "Skipping FTS index: %s", index->name);
+ ib::warn() << "Skipping FTS index: " << index->name;
} else if (i < m_n_indexes) {
- delete [] cfg_index[i].m_name;
+ UT_DELETE_ARRAY(cfg_index[i].m_name);
ulint len = strlen(index->name) + 1;
- cfg_index[i].m_name = new(std::nothrow) byte[len];
+ cfg_index[i].m_name = UT_NEW_ARRAY_NOKEY(byte, len);
/* Trigger OOM */
- DBUG_EXECUTE_IF("ib_import_OOM_14",
- delete[] cfg_index[i].m_name;
- cfg_index[i].m_name = 0;);
+ DBUG_EXECUTE_IF(
+ "ib_import_OOM_14",
+ UT_DELETE_ARRAY(cfg_index[i].m_name);
+ cfg_index[i].m_name = NULL;
+ );
- if (cfg_index[i].m_name == 0) {
+ if (cfg_index[i].m_name == NULL) {
err = DB_OUT_OF_MEMORY;
break;
}
@@ -1581,13 +1524,70 @@ IndexPurge::next() UNIV_NOTHROW
mtr_set_log_mode(&m_mtr, MTR_LOG_NO_REDO);
btr_pcur_restore_position(BTR_MODIFY_LEAF, &m_pcur, &m_mtr);
+ /* The following is based on btr_pcur_move_to_next_user_rec(). */
+ m_pcur.old_stored = false;
+ ut_ad(m_pcur.latch_mode == BTR_MODIFY_LEAF);
+ do {
+ if (btr_pcur_is_after_last_on_page(&m_pcur)) {
+ if (btr_pcur_is_after_last_in_tree(&m_pcur, &m_mtr)) {
+ return DB_END_OF_INDEX;
+ }
- if (!btr_pcur_move_to_next_user_rec(&m_pcur, &m_mtr)) {
+ buf_block_t* block = btr_pcur_get_block(&m_pcur);
+ uint32_t next_page = btr_page_get_next(block->frame);
- return(DB_END_OF_INDEX);
- }
+ /* MDEV-13542 FIXME: Make these checks part of
+ btr_pcur_move_to_next_page(), and introduce a
+ return status that will be checked in all callers! */
+ switch (next_page) {
+ default:
+ if (next_page != block->page.id.page_no()) {
+ break;
+ }
+ /* MDEV-20931 FIXME: Check that
+ next_page is within the tablespace
+ bounds! Also check that it is not a
+ change buffer bitmap page. */
+ /* fall through */
+ case 0:
+ case 1:
+ case FIL_NULL:
+ return DB_CORRUPTION;
+ }
- return(DB_SUCCESS);
+ dict_index_t* index = m_pcur.btr_cur.index;
+ buf_block_t* next_block = btr_block_get(
+ page_id_t(block->page.id.space(), next_page),
+ block->page.size, BTR_MODIFY_LEAF, index,
+ &m_mtr);
+
+ if (UNIV_UNLIKELY(!next_block
+ || !fil_page_index_page_check(
+ next_block->frame)
+ || !!dict_index_is_spatial(index)
+ != (fil_page_get_type(
+ next_block->frame)
+ == FIL_PAGE_RTREE)
+ || page_is_comp(next_block->frame)
+ != page_is_comp(block->frame)
+ || btr_page_get_prev(
+ next_block->frame)
+ != block->page.id.page_no())) {
+ return DB_CORRUPTION;
+ }
+
+ btr_leaf_page_release(block, BTR_MODIFY_LEAF, &m_mtr);
+
+ page_cur_set_before_first(next_block,
+ &m_pcur.btr_cur.page_cur);
+
+ ut_d(page_check_dir(next_block->frame));
+ } else {
+ btr_pcur_move_to_next_on_page(&m_pcur);
+ }
+ } while (!btr_pcur_is_on_user_rec(&m_pcur));
+
+ return DB_SUCCESS;
}
/**
@@ -1599,14 +1599,15 @@ IndexPurge::purge_pessimistic_delete() UNIV_NOTHROW
{
dberr_t err;
- btr_pcur_restore_position(BTR_MODIFY_TREE, &m_pcur, &m_mtr);
+ btr_pcur_restore_position(BTR_MODIFY_TREE | BTR_LATCH_FOR_DELETE,
+ &m_pcur, &m_mtr);
ut_ad(rec_get_deleted_flag(
btr_pcur_get_rec(&m_pcur),
dict_table_is_comp(m_index->table)));
btr_cur_pessimistic_delete(
- &err, FALSE, btr_pcur_get_btr_cur(&m_pcur), 0, RB_NONE, &m_mtr);
+ &err, FALSE, btr_pcur_get_btr_cur(&m_pcur), 0, false, &m_mtr);
ut_a(err == DB_SUCCESS);
@@ -1630,10 +1631,9 @@ IndexPurge::purge() UNIV_NOTHROW
btr_pcur_restore_position(BTR_MODIFY_LEAF, &m_pcur, &m_mtr);
}
-/**
-Constructor
-* @param cfg - config of table being imported.
-* @param trx - transaction covering the import */
+/** Constructor
+@param cfg config of table being imported.
+@param trx transaction covering the import */
inline
PageConverter::PageConverter(
row_import* cfg,
@@ -1653,17 +1653,16 @@ PageConverter::PageConverter(
rec_offs_init(m_offsets_);
}
-/**
-Adjust the BLOB reference for a single column that is externally stored
-@param rec - record to update
-@param offsets - column offsets for the record
-@param i - column ordinal value
+/** Adjust the BLOB reference for a single column that is externally stored
+@param rec record to update
+@param offsets column offsets for the record
+@param i column ordinal value
@return DB_SUCCESS or error code */
inline
dberr_t
PageConverter::adjust_cluster_index_blob_column(
rec_t* rec,
- const ulint* offsets,
+ const offset_t* offsets,
ulint i) UNIV_NOTHROW
{
ulint len;
@@ -1676,47 +1675,38 @@ PageConverter::adjust_cluster_index_blob_column(
if (len < BTR_EXTERN_FIELD_REF_SIZE) {
- char index_name[MAX_FULL_NAME_LEN + 1];
-
- innobase_format_name(
- index_name, sizeof(index_name),
- m_cluster_index->name, TRUE);
-
ib_errf(m_trx->mysql_thd, IB_LOG_LEVEL_ERROR,
ER_INNODB_INDEX_CORRUPT,
"Externally stored column(" ULINTPF
") has a reference length of " ULINTPF
" in the cluster index %s",
- i, len, index_name);
+ i, len, m_cluster_index->name());
return(DB_CORRUPTION);
}
field += BTR_EXTERN_SPACE_ID - BTR_EXTERN_FIELD_REF_SIZE + len;
- if (is_compressed_table()) {
- mach_write_to_4(field, get_space_id());
+ mach_write_to_4(field, get_space_id());
+ if (m_page_zip_ptr) {
page_zip_write_blob_ptr(
m_page_zip_ptr, rec, m_cluster_index, offsets, i, 0);
- } else {
- mlog_write_ulint(field, get_space_id(), MLOG_4BYTES, 0);
}
return(DB_SUCCESS);
}
-/**
-Adjusts the BLOB reference in the clustered index row for all externally
+/** Adjusts the BLOB reference in the clustered index row for all externally
stored columns.
-@param rec - record to update
-@param offsets - column offsets for the record
+@param rec record to update
+@param offsets column offsets for the record
@return DB_SUCCESS or error code */
inline
dberr_t
PageConverter::adjust_cluster_index_blob_columns(
rec_t* rec,
- const ulint* offsets) UNIV_NOTHROW
+ const offset_t* offsets) UNIV_NOTHROW
{
ut_ad(rec_offs_any_extern(offsets));
@@ -1740,17 +1730,16 @@ PageConverter::adjust_cluster_index_blob_columns(
return(DB_SUCCESS);
}
-/**
-In the clustered index, adjust BLOB pointers as needed. Also update the
+/** In the clustered index, adjust BLOB pointers as needed. Also update the
BLOB reference, write the new space id.
-@param rec - record to update
-@param offsets - column offsets for the record
+@param rec record to update
+@param offsets column offsets for the record
@return DB_SUCCESS or error code */
inline
dberr_t
PageConverter::adjust_cluster_index_blob_ref(
rec_t* rec,
- const ulint* offsets) UNIV_NOTHROW
+ const offset_t* offsets) UNIV_NOTHROW
{
if (rec_offs_any_extern(offsets)) {
dberr_t err;
@@ -1765,14 +1754,13 @@ PageConverter::adjust_cluster_index_blob_ref(
return(DB_SUCCESS);
}
-/**
-Purge delete-marked records, only if it is possible to do so without
+/** Purge delete-marked records, only if it is possible to do so without
re-organising the B+tree.
-@param offsets - current row offsets.
+@param offsets current row offsets.
@return true if purge succeeded */
inline
bool
-PageConverter::purge(const ulint* offsets) UNIV_NOTHROW
+PageConverter::purge(const offset_t* offsets) UNIV_NOTHROW
{
const dict_index_t* index = m_index->m_srv_index;
@@ -1789,19 +1777,16 @@ PageConverter::purge(const ulint* offsets) UNIV_NOTHROW
return(false);
}
-/**
-Adjust the BLOB references and sys fields for the current record.
-@param rec - record to update
-@param offsets - column offsets for the record
-@param deleted - true if row is delete marked
+/** Adjust the BLOB references and sys fields for the current record.
+@param rec record to update
+@param offsets column offsets for the record
@return DB_SUCCESS or error code. */
inline
dberr_t
PageConverter::adjust_cluster_record(
const dict_index_t* index,
rec_t* rec,
- const ulint* offsets,
- bool deleted) UNIV_NOTHROW
+ const offset_t* offsets) UNIV_NOTHROW
{
dberr_t err;
@@ -1813,16 +1798,15 @@ PageConverter::adjust_cluster_record(
row_upd_rec_sys_fields(
rec, m_page_zip_ptr, m_cluster_index, m_offsets,
- m_trx, roll_ptr_t(1) << 55);
+ m_trx, roll_ptr_t(1) << ROLL_PTR_INSERT_FLAG_POS);
}
return(err);
}
-/**
-Update the BLOB refrences and write UNDO log entries for
+/** Update the BLOB refrences and write UNDO log entries for
rows that can't be purged optimistically.
-@param block - block to update
+@param block block to update
@retval DB_SUCCESS or error code */
inline
dberr_t
@@ -1847,15 +1831,14 @@ PageConverter::update_records(
if (deleted || clust_index) {
m_offsets = rec_get_offsets(
- rec, m_index->m_srv_index, m_offsets,
+ rec, m_index->m_srv_index, m_offsets, true,
ULINT_UNDEFINED, &m_heap);
}
if (clust_index) {
dberr_t err = adjust_cluster_record(
- m_index->m_srv_index, rec, m_offsets,
- deleted);
+ m_index->m_srv_index, rec, m_offsets);
if (err != DB_SUCCESS) {
return(err);
@@ -1883,8 +1866,7 @@ PageConverter::update_records(
return(DB_SUCCESS);
}
-/**
-Update the space, index id, trx id.
+/** Update the space, index id, trx id.
@return DB_SUCCESS or error code */
inline
dberr_t
@@ -1894,22 +1876,18 @@ PageConverter::update_index_page(
index_id_t id;
buf_frame_t* page = block->frame;
- if (is_free(buf_block_get_page_no(block))) {
+ if (is_free(block->page.id.page_no())) {
return(DB_SUCCESS);
} else if ((id = btr_page_get_index_id(page)) != m_index->m_id) {
row_index_t* index = find_index(id);
if (index == 0) {
- ib_logf(IB_LOG_LEVEL_ERROR,
- "Page for tablespace " ULINTPF " is "
- " index page with id " IB_ID_FMT " but that"
- " index is not found from configuration file."
- " Current index name %s and id " IB_ID_FMT ".",
- m_space,
- id,
- m_index->m_name,
- m_index->m_id);
+ ib::error() << "Page for tablespace " << m_space
+ << " is index page with id " << id
+ << " but that index is not found from"
+ << " configuration file. Current index name "
+ << m_index->m_name << " and id " << m_index->m_id;
m_index = 0;
return(DB_CORRUPTION);
}
@@ -1924,10 +1902,10 @@ PageConverter::update_index_page(
return(DB_SUCCESS);
}
- if (m_index && block->page.offset == m_index->m_page_no) {
+ if (m_index && block->page.id.page_no() == m_index->m_page_no) {
byte *b = FIL_PAGE_DATA + PAGE_BTR_SEG_LEAF + FSEG_HDR_SPACE
+ page;
- mach_write_to_4(b, block->page.space);
+ mach_write_to_4(b, block->page.id.space());
memcpy(FIL_PAGE_DATA + PAGE_BTR_SEG_TOP + FSEG_HDR_SPACE
+ page, b, 4);
@@ -1951,12 +1929,28 @@ PageConverter::update_index_page(
btr_page_set_index_id(
page, m_page_zip_ptr, m_index->m_srv_index->id, 0);
- page_set_max_trx_id(block, m_page_zip_ptr, m_trx->id, 0);
+ if (dict_index_is_clust(m_index->m_srv_index)) {
+ if (block->page.id.page_no() == m_index->m_srv_index->page) {
+ /* Preserve the PAGE_ROOT_AUTO_INC. */
+ } else {
+ /* Clear PAGE_MAX_TRX_ID so that it can be
+ used for other purposes in the future. IMPORT
+ in MySQL 5.6, 5.7 and MariaDB 10.0 and 10.1
+ would set the field to the transaction ID even
+ on clustered index pages. */
+ page_set_max_trx_id(block, m_page_zip_ptr, 0, NULL);
+ }
+ } else {
+ /* Set PAGE_MAX_TRX_ID on secondary index leaf pages,
+ and clear it on non-leaf pages. */
+ page_set_max_trx_id(block, m_page_zip_ptr,
+ page_is_leaf(page) ? m_trx->id : 0, NULL);
+ }
- if (page_is_empty(block->frame)) {
+ if (page_is_empty(page)) {
/* Only a root page can be empty. */
- if (!is_root_page(block->frame)) {
+ if (page_has_siblings(page)) {
// TODO: We should relax this and skip secondary
// indexes. Mark them as corrupt because they can
// always be rebuilt.
@@ -1969,9 +1963,8 @@ PageConverter::update_index_page(
return page_is_leaf(block->frame) ? update_records(block) : DB_SUCCESS;
}
-/**
-Validate the space flags and update tablespace header page.
-@param block - block read from file, not from the buffer pool.
+/** Validate the space flags and update tablespace header page.
+@param block block read from file, not from the buffer pool.
@retval DB_SUCCESS or error code */
inline
dberr_t
@@ -1979,13 +1972,11 @@ PageConverter::update_header(
buf_block_t* block) UNIV_NOTHROW
{
/* Check for valid header */
- switch(fsp_header_get_space_id(get_frame(block))) {
+ switch (fsp_header_get_space_id(get_frame(block))) {
case 0:
return(DB_CORRUPTION);
case ULINT_UNDEFINED:
- ib_logf(IB_LOG_LEVEL_WARN,
- "Space id check in the header failed "
- "- ignored");
+ ib::warn() << "Space id check in the header failed: ignored";
}
mach_write_to_8(
@@ -2009,9 +2000,8 @@ PageConverter::update_header(
return(DB_SUCCESS);
}
-/**
-Update the page, set the space id, max trx id and index id.
-@param block - block read from file
+/** Update the page, set the space id, max trx id and index id.
+@param block block read from file
@retval DB_SUCCESS or error code */
inline
dberr_t
@@ -2031,11 +2021,12 @@ PageConverter::update_page(
switch (page_type = fil_page_get_type(get_frame(block))) {
case FIL_PAGE_TYPE_FSP_HDR:
+ ut_a(block->page.id.page_no() == 0);
/* Work directly on the uncompressed page headers. */
- ut_a(buf_block_get_page_no(block) == 0);
return(update_header(block));
case FIL_PAGE_INDEX:
+ case FIL_PAGE_RTREE:
/* We need to decompress the contents into block->frame
before we can do any thing with Btree pages. */
@@ -2057,7 +2048,7 @@ PageConverter::update_page(
case FIL_PAGE_TYPE_XDES:
err = set_current_xdes(
- buf_block_get_page_no(block), get_frame(block));
+ block->page.id.page_no(), get_frame(block));
/* fall through */
case FIL_PAGE_INODE:
case FIL_PAGE_TYPE_TRX_SYS:
@@ -2077,14 +2068,12 @@ PageConverter::update_page(
return(err);
}
- ib_logf(IB_LOG_LEVEL_WARN, "Unknown page type (" ULINTPF ")",
- page_type);
+ ib::warn() << "Unknown page type (" << page_type << ")";
return(DB_CORRUPTION);
}
-/**
-Called for every page in the tablespace. If the page was not
+/** Called for every page in the tablespace. If the page was not
updated then its state must be set to BUF_PAGE_NOT_USED.
@param block block read from file, note it is not from the buffer pool
@retval DB_SUCCESS or error code. */
@@ -2093,31 +2082,27 @@ dberr_t PageConverter::operator()(buf_block_t* block) UNIV_NOTHROW
/* If we already had an old page with matching number
in the buffer pool, evict it now, because
we no longer evict the pages on DISCARD TABLESPACE. */
- buf_page_get_gen(get_space_id(), get_zip_size(), block->page.offset,
+ buf_page_get_gen(block->page.id, get_page_size(),
RW_NO_LATCH, NULL, BUF_EVICT_IF_IN_POOL,
- __FILE__, __LINE__, NULL);
+ __FILE__, __LINE__, NULL, NULL);
ulint page_type;
dberr_t err = update_page(block, page_type);
if (err != DB_SUCCESS) return err;
- /* Note: For compressed pages this function will write to the
- zip descriptor and for uncompressed pages it will write to
- page (ie. the block->frame). Therefore the caller should write
- out the descriptor contents and not block->frame for compressed
- pages. */
-
- if (!is_compressed_table() || page_type == FIL_PAGE_INDEX) {
+ if (!block->page.zip.data) {
+ buf_flush_init_for_writing(
+ NULL, block->frame, NULL, m_current_lsn);
+ } else if (fil_page_type_is_index(page_type)) {
buf_flush_init_for_writing(
- get_frame(block),
- block->page.zip.data ? &block->page.zip : NULL,
+ NULL, block->page.zip.data, &block->page.zip,
m_current_lsn);
} else {
- /* Calculate and update the checksum of non-btree
- pages for compressed tables explicitly here. */
+ /* Calculate and update the checksum of non-index
+ pages for ROW_FORMAT=COMPRESSED tables. */
buf_flush_update_zip_checksum(
- get_frame(block), get_zip_size(),
+ block->page.zip.data, get_page_size().physical(),
m_current_lsn);
}
@@ -2142,15 +2127,9 @@ row_import_discard_changes(
prebuilt->trx->error_info = NULL;
- char table_name[MAX_FULL_NAME_LEN + 1];
-
- innobase_format_name(
- table_name, sizeof(table_name),
- prebuilt->table->name, FALSE);
-
- ib_logf(IB_LOG_LEVEL_INFO,
- "Discarding tablespace of table %s: %s",
- table_name, ut_strerr(err));
+ ib::info() << "Discarding tablespace of table "
+ << prebuilt->table->name
+ << ": " << ut_strerr(err);
if (trx->dict_operation_lock_mode != RW_X_LATCH) {
ut_a(trx->dict_operation_lock_mode == 0);
@@ -2207,7 +2186,7 @@ row_import_cleanup(
DBUG_EXECUTE_IF("ib_import_before_checkpoint_crash", DBUG_SUICIDE(););
- log_make_checkpoint_at(LSN_MAX, TRUE);
+ log_make_checkpoint();
return(err);
}
@@ -2227,7 +2206,7 @@ row_import_error(
innobase_format_name(
table_name, sizeof(table_name),
- prebuilt->table->name, FALSE);
+ prebuilt->table->name.m_name);
ib_senderrf(
trx->mysql_thd, IB_LOG_LEVEL_WARN,
@@ -2268,11 +2247,6 @@ row_import_adjust_root_pages_of_secondary_indexes(
/* Adjust the root pages of the secondary indexes only. */
while ((index = dict_table_get_next_index(index)) != NULL) {
- char index_name[MAX_FULL_NAME_LEN + 1];
-
- innobase_format_name(
- index_name, sizeof(index_name), index->name, TRUE);
-
ut_a(!dict_index_is_clust(index));
if (!(index->type & DICT_CORRUPT)
@@ -2284,9 +2258,8 @@ row_import_adjust_root_pages_of_secondary_indexes(
err = btr_root_adjust_on_import(index);
} else {
- ib_logf(IB_LOG_LEVEL_WARN,
- "Skip adjustment of root pages for "
- "index %s.", index->name);
+ ib::warn() << "Skip adjustment of root pages for"
+ " index " << index->name << ".";
err = DB_CORRUPTION;
}
@@ -2300,9 +2273,9 @@ row_import_adjust_root_pages_of_secondary_indexes(
ib_errf(trx->mysql_thd,
IB_LOG_LEVEL_WARN,
ER_INNODB_INDEX_CORRUPT,
- "Index '%s' not found or corrupt, "
- "you should recreate this index.",
- index_name);
+ "Index %s not found or corrupt,"
+ " you should recreate this index.",
+ index->name());
/* Do not bail out, so that the data
can be recovered. */
@@ -2339,7 +2312,7 @@ row_import_adjust_root_pages_of_secondary_indexes(
ER_INNODB_INDEX_CORRUPT,
"Index '%s' contains " ULINTPF " entries, "
"should be " ULINTPF ", you should recreate "
- "this index.", index_name,
+ "this index.", index->name(),
purge.get_n_rows(), n_rows_in_table);
index->type |= DICT_CORRUPT;
@@ -2396,13 +2369,13 @@ row_import_set_sys_max_row_id(
ulint len;
const byte* field;
mem_heap_t* heap = NULL;
- ulint offsets_[1 + REC_OFFS_HEADER_SIZE];
- ulint* offsets;
+ offset_t offsets_[1 + REC_OFFS_HEADER_SIZE];
+ offset_t* offsets;
rec_offs_init(offsets_);
offsets = rec_get_offsets(
- rec, index, offsets_, ULINT_UNDEFINED, &heap);
+ rec, index, offsets_, true, ULINT_UNDEFINED, &heap);
field = rec_get_nth_field(
rec, offsets,
@@ -2431,16 +2404,11 @@ row_import_set_sys_max_row_id(
err = DB_CORRUPTION;);
if (err != DB_SUCCESS) {
- char index_name[MAX_FULL_NAME_LEN + 1];
-
- innobase_format_name(
- index_name, sizeof(index_name), index->name, TRUE);
-
ib_errf(prebuilt->trx->mysql_thd,
IB_LOG_LEVEL_WARN,
ER_INNODB_INDEX_CORRUPT,
- "Index '%s' corruption detected, invalid DB_ROW_ID "
- "in index.", index_name);
+ "Index `%s` corruption detected, invalid DB_ROW_ID"
+ " in index.", index->name());
return(err);
@@ -2519,13 +2487,16 @@ row_import_cfg_read_index_fields(
byte row[sizeof(ib_uint32_t) * 3];
ulint n_fields = index->m_n_fields;
- index->m_fields = new(std::nothrow) dict_field_t[n_fields];
+ index->m_fields = UT_NEW_ARRAY_NOKEY(dict_field_t, n_fields);
/* Trigger OOM */
- DBUG_EXECUTE_IF("ib_import_OOM_4",
- delete [] index->m_fields; index->m_fields = 0;);
+ DBUG_EXECUTE_IF(
+ "ib_import_OOM_4",
+ UT_DELETE_ARRAY(index->m_fields);
+ index->m_fields = NULL;
+ );
- if (index->m_fields == 0) {
+ if (index->m_fields == NULL) {
return(DB_OUT_OF_MEMORY);
}
@@ -2542,7 +2513,7 @@ row_import_cfg_read_index_fields(
ib_senderrf(
thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
- errno, strerror(errno),
+ (ulong) errno, strerror(errno),
"while reading index fields.");
return(DB_IO_ERROR);
@@ -2559,12 +2530,16 @@ row_import_cfg_read_index_fields(
/* Include the NUL byte in the length. */
ulint len = mach_read_from_4(ptr);
- byte* name = new(std::nothrow) byte[len];
+ byte* name = UT_NEW_ARRAY_NOKEY(byte, len);
/* Trigger OOM */
- DBUG_EXECUTE_IF("ib_import_OOM_5", delete [] name; name = 0;);
+ DBUG_EXECUTE_IF(
+ "ib_import_OOM_5",
+ UT_DELETE_ARRAY(name);
+ name = NULL;
+ );
- if (name == 0) {
+ if (name == NULL) {
return(DB_OUT_OF_MEMORY);
}
@@ -2576,7 +2551,7 @@ row_import_cfg_read_index_fields(
ib_senderrf(
thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
- errno, strerror(errno),
+ (ulong) errno, strerror(errno),
"while parsing table name.");
return(err);
@@ -2606,13 +2581,16 @@ row_import_read_index_data(
ut_a(cfg->m_n_indexes > 0);
ut_a(cfg->m_n_indexes < 1024);
- cfg->m_indexes = new(std::nothrow) row_index_t[cfg->m_n_indexes];
+ cfg->m_indexes = UT_NEW_ARRAY_NOKEY(row_index_t, cfg->m_n_indexes);
/* Trigger OOM */
- DBUG_EXECUTE_IF("ib_import_OOM_6",
- delete [] cfg->m_indexes; cfg->m_indexes = 0;);
+ DBUG_EXECUTE_IF(
+ "ib_import_OOM_6",
+ UT_DELETE_ARRAY(cfg->m_indexes);
+ cfg->m_indexes = NULL;
+ );
- if (cfg->m_indexes == 0) {
+ if (cfg->m_indexes == NULL) {
return(DB_OUT_OF_MEMORY);
}
@@ -2635,17 +2613,17 @@ row_import_read_index_data(
if (n_bytes != sizeof(row)) {
char msg[BUFSIZ];
- ut_snprintf(msg, sizeof(msg),
- "while reading index meta-data, expected "
- "to read %lu bytes but read only %lu "
- "bytes",
- (ulong) sizeof(row), (ulong) n_bytes);
+ snprintf(msg, sizeof(msg),
+ "while reading index meta-data, expected "
+ "to read " ULINTPF
+ " bytes but read only " ULINTPF " bytes",
+ sizeof(row), n_bytes);
ib_senderrf(
thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
- errno, strerror(errno), msg);
+ (ulong) errno, strerror(errno), msg);
- ib_logf(IB_LOG_LEVEL_ERROR, "IO Error: %s", msg);
+ ib::error() << "IO Error: " << msg;
return(DB_IO_ERROR);
}
@@ -2697,14 +2675,16 @@ row_import_read_index_data(
return(DB_CORRUPTION);
}
- cfg_index->m_name = new(std::nothrow) byte[len];
+ cfg_index->m_name = UT_NEW_ARRAY_NOKEY(byte, len);
/* Trigger OOM */
- DBUG_EXECUTE_IF("ib_import_OOM_7",
- delete [] cfg_index->m_name;
- cfg_index->m_name = 0;);
+ DBUG_EXECUTE_IF(
+ "ib_import_OOM_7",
+ UT_DELETE_ARRAY(cfg_index->m_name);
+ cfg_index->m_name = NULL;
+ );
- if (cfg_index->m_name == 0) {
+ if (cfg_index->m_name == NULL) {
return(DB_OUT_OF_MEMORY);
}
@@ -2716,7 +2696,7 @@ row_import_read_index_data(
ib_senderrf(
thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
- errno, strerror(errno),
+ (ulong) errno, strerror(errno),
"while parsing index name.");
return(err);
@@ -2755,7 +2735,7 @@ row_import_read_indexes(
if (fread(row, 1, sizeof(row), file) != sizeof(row)) {
ib_senderrf(
thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
- errno, strerror(errno),
+ (ulong) errno, strerror(errno),
"while reading number of indexes.");
return(DB_IO_ERROR);
@@ -2800,23 +2780,29 @@ row_import_read_columns(
ut_a(cfg->m_n_cols > 0);
ut_a(cfg->m_n_cols < 1024);
- cfg->m_cols = new(std::nothrow) dict_col_t[cfg->m_n_cols];
+ cfg->m_cols = UT_NEW_ARRAY_NOKEY(dict_col_t, cfg->m_n_cols);
/* Trigger OOM */
- DBUG_EXECUTE_IF("ib_import_OOM_8",
- delete [] cfg->m_cols; cfg->m_cols = 0;);
+ DBUG_EXECUTE_IF(
+ "ib_import_OOM_8",
+ UT_DELETE_ARRAY(cfg->m_cols);
+ cfg->m_cols = NULL;
+ );
- if (cfg->m_cols == 0) {
+ if (cfg->m_cols == NULL) {
return(DB_OUT_OF_MEMORY);
}
- cfg->m_col_names = new(std::nothrow) byte* [cfg->m_n_cols];
+ cfg->m_col_names = UT_NEW_ARRAY_NOKEY(byte*, cfg->m_n_cols);
/* Trigger OOM */
- DBUG_EXECUTE_IF("ib_import_OOM_9",
- delete [] cfg->m_col_names; cfg->m_col_names = 0;);
+ DBUG_EXECUTE_IF(
+ "ib_import_OOM_9",
+ UT_DELETE_ARRAY(cfg->m_col_names);
+ cfg->m_col_names = NULL;
+ );
- if (cfg->m_col_names == 0) {
+ if (cfg->m_col_names == NULL) {
return(DB_OUT_OF_MEMORY);
}
@@ -2835,7 +2821,7 @@ row_import_read_columns(
if (fread(row, 1, sizeof(row), file) != sizeof(row)) {
ib_senderrf(
thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
- errno, strerror(errno),
+ (ulong) errno, strerror(errno),
"while reading table column meta-data.");
return(DB_IO_ERROR);
@@ -2879,14 +2865,16 @@ row_import_read_columns(
return(DB_CORRUPTION);
}
- cfg->m_col_names[i] = new(std::nothrow) byte[len];
+ cfg->m_col_names[i] = UT_NEW_ARRAY_NOKEY(byte, len);
/* Trigger OOM */
- DBUG_EXECUTE_IF("ib_import_OOM_10",
- delete [] cfg->m_col_names[i];
- cfg->m_col_names[i] = 0;);
+ DBUG_EXECUTE_IF(
+ "ib_import_OOM_10",
+ UT_DELETE_ARRAY(cfg->m_col_names[i]);
+ cfg->m_col_names[i] = NULL;
+ );
- if (cfg->m_col_names[i] == 0) {
+ if (cfg->m_col_names[i] == NULL) {
return(DB_OUT_OF_MEMORY);
}
@@ -2899,7 +2887,7 @@ row_import_read_columns(
ib_senderrf(
thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
- errno, strerror(errno),
+ (ulong) errno, strerror(errno),
"while parsing table column name.");
return(err);
@@ -2930,7 +2918,7 @@ row_import_read_v1(
if (fread(value, 1, sizeof(value), file) != sizeof(value)) {
ib_senderrf(
thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
- errno, strerror(errno),
+ (ulong) errno, strerror(errno),
"while reading meta-data export hostname length.");
return(DB_IO_ERROR);
@@ -2939,13 +2927,16 @@ row_import_read_v1(
ulint len = mach_read_from_4(value);
/* NUL byte is part of name length. */
- cfg->m_hostname = new(std::nothrow) byte[len];
+ cfg->m_hostname = UT_NEW_ARRAY_NOKEY(byte, len);
/* Trigger OOM */
- DBUG_EXECUTE_IF("ib_import_OOM_1",
- delete [] cfg->m_hostname; cfg->m_hostname = 0;);
+ DBUG_EXECUTE_IF(
+ "ib_import_OOM_1",
+ UT_DELETE_ARRAY(cfg->m_hostname);
+ cfg->m_hostname = NULL;
+ );
- if (cfg->m_hostname == 0) {
+ if (cfg->m_hostname == NULL) {
return(DB_OUT_OF_MEMORY);
}
@@ -2955,7 +2946,7 @@ row_import_read_v1(
ib_senderrf(
thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
- errno, strerror(errno),
+ (ulong) errno, strerror(errno),
"while parsing export hostname.");
return(err);
@@ -2969,7 +2960,7 @@ row_import_read_v1(
if (fread(value, 1, sizeof(value), file) != sizeof(value)) {
ib_senderrf(
thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
- errno, strerror(errno),
+ (ulong) errno, strerror(errno),
"while reading meta-data table name length.");
return(DB_IO_ERROR);
@@ -2978,13 +2969,16 @@ row_import_read_v1(
len = mach_read_from_4(value);
/* NUL byte is part of name length. */
- cfg->m_table_name = new(std::nothrow) byte[len];
+ cfg->m_table_name = UT_NEW_ARRAY_NOKEY(byte, len);
/* Trigger OOM */
- DBUG_EXECUTE_IF("ib_import_OOM_2",
- delete [] cfg->m_table_name; cfg->m_table_name = 0;);
+ DBUG_EXECUTE_IF(
+ "ib_import_OOM_2",
+ UT_DELETE_ARRAY(cfg->m_table_name);
+ cfg->m_table_name = NULL;
+ );
- if (cfg->m_table_name == 0) {
+ if (cfg->m_table_name == NULL) {
return(DB_OUT_OF_MEMORY);
}
@@ -2993,15 +2987,14 @@ row_import_read_v1(
if (err != DB_SUCCESS) {
ib_senderrf(
thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
- errno, strerror(errno),
+ (ulong) errno, strerror(errno),
"while parsing table name.");
return(err);
}
- ib_logf(IB_LOG_LEVEL_INFO,
- "Importing tablespace for table '%s' that was exported "
- "from host '%s'", cfg->m_table_name, cfg->m_hostname);
+ ib::info() << "Importing tablespace for table '" << cfg->m_table_name
+ << "' that was exported from host '" << cfg->m_hostname << "'";
byte row[sizeof(ib_uint32_t) * 3];
@@ -3013,7 +3006,7 @@ row_import_read_v1(
if (fread(row, 1, sizeof(ib_uint64_t), file) != sizeof(ib_uint64_t)) {
ib_senderrf(
thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
- errno, strerror(errno),
+ (ulong) errno, strerror(errno),
"while reading autoinc value.");
return(DB_IO_ERROR);
@@ -3029,7 +3022,7 @@ row_import_read_v1(
if (fread(row, 1, sizeof(row), file) != sizeof(row)) {
ib_senderrf(
thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
- errno, strerror(errno),
+ (ulong) errno, strerror(errno),
"while reading meta-data header.");
return(DB_IO_ERROR);
@@ -3037,17 +3030,18 @@ row_import_read_v1(
byte* ptr = row;
- cfg->m_page_size = mach_read_from_4(ptr);
+ const ulint logical_page_size = mach_read_from_4(ptr);
ptr += sizeof(ib_uint32_t);
- if (cfg->m_page_size != UNIV_PAGE_SIZE) {
+ if (logical_page_size != univ_page_size.logical()) {
ib_errf(thd, IB_LOG_LEVEL_ERROR, ER_TABLE_SCHEMA_MISMATCH,
- "Tablespace to be imported has a different "
- "page size than this server. Server page size "
- "is " ULINTPF ", whereas tablespace page size is "
- ULINTPF,
- UNIV_PAGE_SIZE, cfg->m_page_size);
+ "Tablespace to be imported has a different"
+ " page size than this server. Server page size"
+ " is " ULINTPF ", whereas tablespace page size"
+ " is " ULINTPF,
+ univ_page_size.logical(),
+ logical_page_size);
return(DB_ERROR);
}
@@ -3055,24 +3049,26 @@ row_import_read_v1(
cfg->m_flags = mach_read_from_4(ptr);
ptr += sizeof(ib_uint32_t);
+ cfg->m_page_size.copy_from(dict_tf_get_page_size(cfg->m_flags));
+
+ ut_a(logical_page_size == cfg->m_page_size.logical());
+
cfg->m_n_cols = mach_read_from_4(ptr);
if (!dict_tf_is_valid(cfg->m_flags)) {
+ ib_errf(thd, IB_LOG_LEVEL_ERROR,
+ ER_TABLE_SCHEMA_MISMATCH,
+ "Invalid table flags: " ULINTPF, cfg->m_flags);
return(DB_CORRUPTION);
+ }
- } else if ((err = row_import_read_columns(file, thd, cfg))
- != DB_SUCCESS) {
-
- return(err);
-
- } else if ((err = row_import_read_indexes(file, thd, cfg))
- != DB_SUCCESS) {
+ err = row_import_read_columns(file, thd, cfg);
- return(err);
+ if (err == DB_SUCCESS) {
+ err = row_import_read_indexes(file, thd, cfg);
}
- ut_a(err == DB_SUCCESS);
return(err);
}
@@ -3097,7 +3093,7 @@ row_import_read_meta_data(
if (fread(&row, 1, sizeof(row), file) != sizeof(row)) {
ib_senderrf(
thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
- errno, strerror(errno),
+ (ulong) errno, strerror(errno),
"while reading meta-data version.");
return(DB_IO_ERROR);
@@ -3142,13 +3138,13 @@ row_import_read_cfg(
if (file == NULL) {
char msg[BUFSIZ];
- ut_snprintf(msg, sizeof(msg),
- "Error opening '%s', will attempt to import "
- "without schema verification", name);
+ snprintf(msg, sizeof(msg),
+ "Error opening '%s', will attempt to import"
+ " without schema verification", name);
ib_senderrf(
thd, IB_LOG_LEVEL_WARN, ER_IO_READ_ERROR,
- errno, strerror(errno), msg);
+ (ulong) errno, strerror(errno), msg);
cfg.m_missing = true;
@@ -3164,24 +3160,13 @@ row_import_read_cfg(
return(err);
}
-/*****************************************************************//**
-Update the <space, root page> of a table's indexes from the values
-in the data dictionary.
+/** Update the root page numbers and tablespace ID of a table.
+@param[in,out] trx dictionary transaction
+@param[in,out] table persistent table
+@param[in] reset whether to reset the fields to FIL_NULL
@return DB_SUCCESS or error code */
-UNIV_INTERN
dberr_t
-row_import_update_index_root(
-/*=========================*/
- trx_t* trx, /*!< in/out: transaction that
- covers the update */
- const dict_table_t* table, /*!< in: Table for which we want
- to set the root page_no */
- bool reset, /*!< in: if true then set to
- FIL_NUL */
- bool dict_locked) /*!< in: Set to true if the
- caller already owns the
- dict_sys_t:: mutex. */
-
+row_import_update_index_root(trx_t* trx, dict_table_t* table, bool reset)
{
const dict_index_t* index;
que_t* graph = 0;
@@ -3197,9 +3182,7 @@ row_import_update_index_root(
"WHERE TABLE_ID = :table_id AND ID = :index_id;\n"
"END;\n"};
- if (!dict_locked) {
- mutex_enter(&dict_sys->mutex);
- }
+ table->def_trx_id = trx->id;
for (index = dict_table_get_first_index(table);
index != 0;
@@ -3262,17 +3245,11 @@ row_import_update_index_root(
err = trx->error_state;
if (err != DB_SUCCESS) {
- char index_name[MAX_FULL_NAME_LEN + 1];
-
- innobase_format_name(
- index_name, sizeof(index_name),
- index->name, TRUE);
-
ib_errf(trx->mysql_thd, IB_LOG_LEVEL_ERROR,
ER_INTERNAL_ERROR,
- "While updating the <space, root page "
- "number> of index %s - %s",
- index_name, ut_strerr(err));
+ "While updating the <space, root page"
+ " number> of index %s - %s",
+ index->name(), ut_strerr(err));
break;
}
@@ -3280,10 +3257,6 @@ row_import_update_index_root(
que_graph_free(graph);
- if (!dict_locked) {
- mutex_exit(&dict_sys->mutex);
- }
-
return(err);
}
@@ -3336,7 +3309,6 @@ row_import_set_discarded(
/*****************************************************************//**
Update the DICT_TF2_DISCARDED flag in SYS_TABLES.
@return DB_SUCCESS or error code. */
-UNIV_INTERN
dberr_t
row_import_update_discarded_flag(
/*=============================*/
@@ -3358,8 +3330,8 @@ row_import_update_discarded_flag(
"PROCEDURE UPDATE_DISCARDED_FLAG() IS\n"
"DECLARE FUNCTION my_func;\n"
"DECLARE CURSOR c IS\n"
- " SELECT MIX_LEN "
- " FROM SYS_TABLES "
+ " SELECT MIX_LEN"
+ " FROM SYS_TABLES"
" WHERE ID = :table_id FOR UPDATE;"
"\n"
"BEGIN\n"
@@ -3402,7 +3374,6 @@ struct fil_iterator_t {
os_offset_t start; /*!< From where to start */
os_offset_t end; /*!< Where to stop */
os_offset_t file_size; /*!< File size in bytes */
- ulint page_size; /*!< Page size */
ulint n_io_buffers; /*!< Number of pages to use
for IO */
byte* io_buffer; /*!< Buffer to use for IO */
@@ -3431,7 +3402,8 @@ fil_iterate(
AbstractCallback& callback)
{
os_offset_t offset;
- ulint n_bytes = iter.n_io_buffers * iter.page_size;
+ const ulint size = callback.get_page_size().physical();
+ ulint n_bytes = iter.n_io_buffers * size;
const ulint buf_size = srv_page_size
#ifdef HAVE_LZO
@@ -3440,8 +3412,7 @@ fil_iterate(
+ snappy_max_compressed_length(srv_page_size)
#endif
;
- byte* page_compress_buf = static_cast<byte*>(
- ut_malloc_low(buf_size, false));
+ byte* page_compress_buf = static_cast<byte*>(malloc(buf_size));
ut_ad(!srv_read_only_mode);
if (!page_compress_buf) {
@@ -3465,7 +3436,6 @@ fil_iterate(
if (block->page.zip.data) {
/* Zip IO is done in the compressed page buffer. */
io_buffer = block->page.zip.data;
- ut_ad(PAGE_ZIP_MATCH(block->frame, &block->page.zip));
}
/* We have to read the exact number of bytes. Otherwise the
@@ -3475,7 +3445,7 @@ fil_iterate(
iter.end - offset));
ut_ad(n_bytes > 0);
- ut_ad(!(n_bytes % iter.page_size));
+ ut_ad(!(n_bytes % size));
const bool encrypted = iter.crypt_data != NULL
&& iter.crypt_data->should_encrypt();
@@ -3484,23 +3454,28 @@ fil_iterate(
? iter.crypt_io_buffer : io_buffer;
byte* const writeptr = readptr;
- if (!os_file_read_no_error_handling(iter.file, readptr,
- offset, n_bytes)) {
- ib_logf(IB_LOG_LEVEL_ERROR, "os_file_read() failed");
- err = DB_IO_ERROR;
+ IORequest read_request(IORequest::READ);
+ read_request.disable_partial_io_warnings();
+
+ err = os_file_read_no_error_handling(
+ read_request, iter.file, readptr, offset, n_bytes, 0);
+ if (err != DB_SUCCESS) {
+ ib::error() << iter.filepath
+ << ": os_file_read() failed";
goto func_exit;
}
bool updated = false;
- const ulint size = iter.page_size;
- ulint n_pages_read = ulint(n_bytes) / size;
- block->page.offset = offset / size;
+ os_offset_t page_off = offset;
+ ulint n_pages_read = n_bytes / size;
+ block->page.id.set_page_no(ulint(page_off / size));
for (ulint i = 0; i < n_pages_read;
- ++i, block->frame += size, block->page.offset++) {
- byte* src = readptr + (i * size);
+ block->page.id.set_page_no(block->page.id.page_no() + 1),
+ ++i, page_off += size, block->frame += size) {
+ byte* src = readptr + i * size;
const ulint page_no = page_get_page_no(src);
- if (!page_no && block->page.offset) {
+ if (!page_no && block->page.id.page_no()) {
const ulint* b = reinterpret_cast<const ulint*>
(src);
const ulint* const e = b + size / sizeof *b;
@@ -3515,37 +3490,39 @@ fil_iterate(
continue;
}
- if (page_no != block->page.offset) {
+ if (page_no != block->page.id.page_no()) {
page_corrupted:
- ib_logf(IB_LOG_LEVEL_WARN,
- "%s: Page %lu at offset "
- UINT64PF " looks corrupted.",
- callback.filename(),
- ulong(offset / size), offset);
+ ib::warn() << callback.filename()
+ << ": Page " << (offset / size)
+ << " at offset " << offset
+ << " looks corrupted.";
err = DB_CORRUPTION;
goto func_exit;
}
- bool decrypted = false;
- byte* dst = io_buffer + (i * size);
- bool frame_changed = false;
- ulint page_type = mach_read_from_2(src+FIL_PAGE_TYPE);
const bool page_compressed
- = page_type
- == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED
- || page_type == FIL_PAGE_PAGE_COMPRESSED;
+ = fil_page_is_compressed_encrypted(src)
+ || fil_page_is_compressed(src);
if (page_compressed && block->page.zip.data) {
goto page_corrupted;
}
+ bool decrypted = false;
+ byte* dst = io_buffer + i * size;
+ bool frame_changed = false;
+
if (!encrypted) {
} else if (!mach_read_from_4(
FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION
+ src)) {
not_encrypted:
- if (!page_compressed
- && !block->page.zip.data) {
+ if (block->page.id.page_no() == 0
+ && block->page.zip.data) {
+ block->page.zip.data = src;
+ frame_changed = true;
+ } else if (!page_compressed
+ && !block->page.zip.data) {
block->frame = src;
frame_changed = true;
} else {
@@ -3554,13 +3531,13 @@ not_encrypted:
}
} else {
if (!fil_space_verify_crypt_checksum(
- src, callback.get_zip_size())) {
+ src, callback.get_page_size())) {
goto page_corrupted;
}
decrypted = fil_space_decrypt(
iter.crypt_data, dst,
- iter.page_size, src, &err);
+ callback.get_page_size(), src, &err);
if (err != DB_SUCCESS) {
goto func_exit;
@@ -3587,7 +3564,7 @@ not_encrypted:
false,
encrypted && !frame_changed
? dst : src,
- callback.get_zip_size(), NULL)) {
+ callback.get_page_size(), NULL)) {
goto page_corrupted;
}
@@ -3629,48 +3606,46 @@ not_encrypted:
/* When tablespace is encrypted or compressed its
first page (i.e. page 0) is not encrypted or
compressed and there is no need to copy frame. */
- if (encrypted && block->page.offset != 0) {
+ if (encrypted && block->page.id.page_no() != 0) {
byte *local_frame = callback.get_frame(block);
ut_ad((writeptr + (i * size)) != local_frame);
memcpy((writeptr + (i * size)), local_frame, size);
}
if (frame_changed) {
- block->frame = dst;
+ if (block->page.zip.data) {
+ block->page.zip.data = dst;
+ } else {
+ block->frame = dst;
+ }
}
src = io_buffer + (i * size);
if (page_compressed) {
updated = true;
- if (fil_page_compress(
+ if (ulint len = fil_page_compress(
src,
page_compress_buf,
0,/* FIXME: compression level */
512,/* FIXME: proper block size */
encrypted)) {
/* FIXME: remove memcpy() */
- memcpy(src, page_compress_buf,
- srv_page_size);
+ memcpy(src, page_compress_buf, len);
+ memset(src + len, 0,
+ srv_page_size - len);
}
}
- /* If tablespace is encrypted, encrypt page before we
- write it back. Note that we should not encrypt the
- buffer that is in buffer pool. */
- /* NOTE: At this stage of IMPORT the
- buffer pool is not being used at all! */
- if (decrypted && encrypted) {
- byte *dest = writeptr + (i * size);
-
+ /* Encrypt the page if encryption was used. */
+ if (encrypted && decrypted) {
+ byte *dest = writeptr + i * size;
byte* tmp = fil_encrypt_buf(
iter.crypt_data,
- callback.get_space_id(),
- block->page.offset,
+ block->page.id.space(),
+ block->page.id.page_no(),
mach_read_from_8(src + FIL_PAGE_LSN),
- src,
- callback.get_zip_size(),
- dest);
+ src, callback.get_page_size(), dest);
if (tmp == src) {
/* TODO: remove unnecessary memcpy's */
@@ -3683,19 +3658,21 @@ not_encrypted:
}
/* A page was updated in the set, write back to disk. */
- if (updated
- && !os_file_write(
- iter.filepath, iter.file, writeptr,
- offset, (ulint) n_bytes)) {
+ if (updated) {
+ IORequest write_request(IORequest::WRITE);
- ib_logf(IB_LOG_LEVEL_ERROR, "os_file_write() failed");
- err = DB_IO_ERROR;
- goto func_exit;
+ err = os_file_write(write_request,
+ iter.filepath, iter.file,
+ writeptr, offset, n_bytes);
+
+ if (err != DB_SUCCESS) {
+ goto func_exit;
+ }
}
}
func_exit:
- ut_free(page_compress_buf);
+ free(page_compress_buf);
return err;
}
@@ -3723,46 +3700,36 @@ fil_tablespace_iterate(
DBUG_EXECUTE_IF("ib_import_trigger_corruption_1",
return(DB_CORRUPTION););
+ /* Make sure the data_dir_path is set. */
+ dict_get_and_save_data_dir_path(table, false);
+
if (DICT_TF_HAS_DATA_DIR(table->flags)) {
- dict_get_and_save_data_dir_path(table, false);
ut_a(table->data_dir_path);
- filepath = os_file_make_remote_pathname(
- table->data_dir_path, table->name, "ibd");
+ filepath = fil_make_filepath(
+ table->data_dir_path, table->name.m_name, IBD, true);
} else {
- filepath = fil_make_ibd_name(table->name, false);
+ filepath = fil_make_filepath(
+ NULL, table->name.m_name, IBD, false);
}
- {
- ibool success;
+ if (!filepath) {
+ return(DB_OUT_OF_MEMORY);
+ } else {
+ bool success;
file = os_file_create_simple_no_error_handling(
- innodb_file_data_key, filepath,
- OS_FILE_OPEN, OS_FILE_READ_WRITE, &success, FALSE);
-
- DBUG_EXECUTE_IF("fil_tablespace_iterate_failure",
- {
- static bool once;
-
- if (!once || ut_rnd_interval(0, 10) == 5) {
- once = true;
- success = FALSE;
- os_file_close(file);
- }
- });
+ innodb_data_file_key, filepath,
+ OS_FILE_OPEN, OS_FILE_READ_WRITE, false, &success);
if (!success) {
/* The following call prints an error message */
os_file_get_last_error(true);
-
- ib_logf(IB_LOG_LEVEL_ERROR,
- "Trying to import a tablespace, but could not "
- "open the tablespace file %s", filepath);
-
- mem_free(filepath);
-
- return(DB_TABLESPACE_NOT_FOUND);
-
+ ib::error() << "Trying to import a tablespace,"
+ " but could not open the tablespace file "
+ << filepath;
+ ut_free(filepath);
+ return DB_TABLESPACE_NOT_FOUND;
} else {
err = DB_SUCCESS;
}
@@ -3778,28 +3745,35 @@ fil_tablespace_iterate(
We allocate an extra page in case it is a compressed table. One
page is to ensure alignement. */
- void* page_ptr = mem_alloc(3 * UNIV_PAGE_SIZE);
+ void* page_ptr = ut_malloc_nokey(3 * UNIV_PAGE_SIZE);
byte* page = static_cast<byte*>(ut_align(page_ptr, UNIV_PAGE_SIZE));
- /* The block we will use for every physical page */
- buf_block_t block;
-
- memset(&block, 0, sizeof block);
- block.frame = page;
- block.page.space = callback.get_space_id();
- block.page.io_fix = BUF_IO_NONE;
- block.page.buf_fix_count = 1;
- block.page.state = BUF_BLOCK_FILE_PAGE;
+ buf_block_t* block = reinterpret_cast<buf_block_t*>
+ (ut_zalloc_nokey(sizeof *block));
+ block->frame = page;
+ block->page.id = page_id_t(0, 0);
+ block->page.io_fix = BUF_IO_NONE;
+ block->page.buf_fix_count = 1;
+ block->page.state = BUF_BLOCK_FILE_PAGE;
/* Read the first page and determine the page and zip size. */
- if (!os_file_read_no_error_handling(file, page, 0, UNIV_PAGE_SIZE)) {
+ IORequest request(IORequest::READ);
+ request.disable_partial_io_warnings();
- err = DB_IO_ERROR;
+ err = os_file_read_no_error_handling(request, file, page, 0,
+ UNIV_PAGE_SIZE, 0);
- } else if ((err = callback.init(file_size, &block)) == DB_SUCCESS) {
- if (const ulint zip_size = callback.get_zip_size()) {
- page_zip_set_size(&block.page.zip, zip_size);
+ if (err == DB_SUCCESS) {
+ err = callback.init(file_size, block);
+ }
+
+ if (err == DB_SUCCESS) {
+ block->page.id = page_id_t(callback.get_space_id(), 0);
+ block->page.size.copy_from(callback.get_page_size());
+ if (block->page.size.is_compressed()) {
+ page_zip_set_size(&block->page.zip,
+ callback.get_page_size().physical());
/* ROW_FORMAT=COMPRESSED is not optimised for block IO
for now. We do the IMPORT page by page. */
n_io_buffers = 1;
@@ -3807,82 +3781,71 @@ fil_tablespace_iterate(
fil_iterator_t iter;
- iter.file = file;
- iter.start = 0;
- iter.end = file_size;
- iter.filepath = filepath;
- iter.file_size = file_size;
- iter.n_io_buffers = n_io_buffers;
- iter.page_size = callback.get_page_size();
-
- /* In MariaDB/MySQL 5.6 tablespace does not exist
- during import, therefore we can't use space directly
- here. */
- ulint crypt_data_offset = fsp_header_get_crypt_offset(
- callback.get_zip_size());
-
/* read (optional) crypt data */
iter.crypt_data = fil_space_read_crypt_data(
- 0, page, crypt_data_offset);
+ callback.get_page_size(), page);
- /** If tablespace is encrypted, it needs extra buffers */
- if (iter.crypt_data != NULL) {
+ /* If tablespace is encrypted, it needs extra buffers */
+ if (iter.crypt_data && n_io_buffers > 1) {
/* decrease io buffers so that memory
- * consumption doesnt double
- * note: the +1 is to avoid n_io_buffers getting down to 0 */
- iter.n_io_buffers = (iter.n_io_buffers + 1) / 2;
+ consumption will not double */
+ n_io_buffers /= 2;
}
- /** Add an extra page for compressed page scratch area. */
+ iter.file = file;
+ iter.start = 0;
+ iter.end = file_size;
+ iter.filepath = filepath;
+ iter.file_size = file_size;
+ iter.n_io_buffers = n_io_buffers;
- void* io_buffer = mem_alloc(
+ /* Add an extra page for compressed page scratch area. */
+ void* io_buffer = ut_malloc_nokey(
(2 + iter.n_io_buffers) * UNIV_PAGE_SIZE);
iter.io_buffer = static_cast<byte*>(
ut_align(io_buffer, UNIV_PAGE_SIZE));
void* crypt_io_buffer = NULL;
- if (iter.crypt_data != NULL) {
- crypt_io_buffer = mem_alloc(
+ if (iter.crypt_data) {
+ crypt_io_buffer = ut_malloc_nokey(
(2 + iter.n_io_buffers) * UNIV_PAGE_SIZE);
iter.crypt_io_buffer = static_cast<byte*>(
ut_align(crypt_io_buffer, UNIV_PAGE_SIZE));
}
- if (block.page.zip.ssize) {
+ if (block->page.zip.ssize) {
ut_ad(iter.n_io_buffers == 1);
- block.frame = iter.io_buffer;
- block.page.zip.data = block.frame + UNIV_PAGE_SIZE;
- ut_d(block.page.zip.m_external = true);
+ block->frame = iter.io_buffer;
+ block->page.zip.data = block->frame + UNIV_PAGE_SIZE;
}
- err = fil_iterate(iter, &block, callback);
+ err = fil_iterate(iter, block, callback);
- mem_free(io_buffer);
-
- if (crypt_io_buffer != NULL) {
- mem_free(crypt_io_buffer);
- iter.crypt_io_buffer = NULL;
+ if (iter.crypt_data) {
fil_space_destroy_crypt_data(&iter.crypt_data);
}
+
+ ut_free(crypt_io_buffer);
+ ut_free(io_buffer);
}
if (err == DB_SUCCESS) {
-
- ib_logf(IB_LOG_LEVEL_INFO, "Sync to disk");
+ ib::info() << "Sync to disk";
if (!os_file_flush(file)) {
- ib_logf(IB_LOG_LEVEL_INFO, "os_file_flush() failed!");
+ ib::info() << "os_file_flush() failed!";
err = DB_IO_ERROR;
} else {
- ib_logf(IB_LOG_LEVEL_INFO, "Sync to disk - done!");
+ ib::info() << "Sync to disk - done!";
}
}
os_file_close(file);
- mem_free(page_ptr);
- mem_free(filepath);
+ ut_free(page_ptr);
+ ut_free(filepath);
+ ut_free(block);
return(err);
}
@@ -3890,8 +3853,7 @@ fil_tablespace_iterate(
/*****************************************************************//**
Imports a tablespace. The space id in the .ibd file must match the space id
of the table in the data dictionary.
-@return error code or DB_SUCCESS */
-UNIV_INTERN
+@return error code or DB_SUCCESS */
dberr_t
row_import_for_mysql(
/*=================*/
@@ -3901,26 +3863,28 @@ row_import_for_mysql(
dberr_t err;
trx_t* trx;
ib_uint64_t autoinc = 0;
- char table_name[MAX_FULL_NAME_LEN + 1];
char* filepath = NULL;
+ ulint space_flags MY_ATTRIBUTE((unused));
+ /* The caller assured that this is not read_only_mode and that no
+ temorary tablespace is being imported. */
ut_ad(!srv_read_only_mode);
-
- innobase_format_name(
- table_name, sizeof(table_name), table->name, FALSE);
+ ut_ad(!dict_table_is_temporary(table));
ut_a(table->space);
ut_ad(prebuilt->trx);
- ut_a(table->file_unreadable);
+ ut_a(!table->is_readable());
- trx_start_if_not_started(prebuilt->trx);
+ ibuf_delete_for_discarded_space(table->space);
+
+ trx_start_if_not_started(prebuilt->trx, true);
trx = trx_allocate_for_mysql();
/* So that the table is not DROPped during recovery. */
trx_set_dict_operation(trx, TRX_DICT_OP_INDEX);
- trx_start_if_not_started(trx);
+ trx_start_if_not_started(trx, true);
/* So that we can send error messages to the user. */
trx->mysql_thd = prebuilt->trx->mysql_thd;
@@ -3935,7 +3899,10 @@ row_import_for_mysql(
mutex_enter(&trx->undo_mutex);
- err = trx_undo_assign_undo(trx, TRX_UNDO_UPDATE);
+ /* TODO: Do not write any undo log for the IMPORT cleanup. */
+ trx_undo_t** pundo = &trx->rsegs.m_redo.update_undo;
+ err = trx_undo_assign_undo(trx, trx->rsegs.m_redo.rseg, pundo,
+ TRX_UNDO_UPDATE);
mutex_exit(&trx->undo_mutex);
@@ -3946,7 +3913,7 @@ row_import_for_mysql(
return(row_import_cleanup(prebuilt, trx, err));
- } else if (trx->update_undo == 0) {
+ } else if (trx->rsegs.m_redo.update_undo == 0) {
err = DB_TOO_MANY_CONCURRENT_TRXS;
return(row_import_cleanup(prebuilt, trx, err));
@@ -3967,7 +3934,7 @@ row_import_for_mysql(
if (err == DB_SUCCESS) {
- /* We have a schema file, try and match it with the our
+ /* We have a schema file, try and match it with our
data dictionary. */
err = cfg.match_schema(trx->mysql_thd);
@@ -3996,12 +3963,12 @@ row_import_for_mysql(
ut_a(err == DB_FAIL);
- cfg.m_page_size = UNIV_PAGE_SIZE;
+ cfg.m_page_size.copy_from(univ_page_size);
FetchIndexRootPages fetchIndexRootPages(table, trx);
err = fil_tablespace_iterate(
- table, IO_BUFFER_SIZE(cfg.m_page_size),
+ table, IO_BUFFER_SIZE(cfg.m_page_size.physical()),
fetchIndexRootPages);
if (err == DB_SUCCESS) {
@@ -4017,6 +3984,8 @@ row_import_for_mysql(
}
}
+ space_flags = fetchIndexRootPages.get_space_flags();
+
} else {
rw_lock_s_unlock_gen(&dict_operation_lock, 0);
}
@@ -4027,7 +3996,7 @@ row_import_for_mysql(
prebuilt->trx->op_info = "importing tablespace";
- ib_logf(IB_LOG_LEVEL_INFO, "Phase I - Update all pages");
+ ib::info() << "Phase I - Update all pages";
/* Iterate over all the pages and do the sanity checking and
the conversion required to import the tablespace. */
@@ -4037,11 +4006,11 @@ row_import_for_mysql(
/* Set the IO buffer size in pages. */
err = fil_tablespace_iterate(
- table, IO_BUFFER_SIZE(cfg.m_page_size), converter);
+ table, IO_BUFFER_SIZE(cfg.m_page_size.physical()), converter);
DBUG_EXECUTE_IF("ib_import_reset_space_and_lsn_failure",
err = DB_TOO_MANY_CONCURRENT_TRXS;);
-
+#ifdef BTR_CUR_HASH_ADAPT
/* On DISCARD TABLESPACE, we did not drop any adaptive hash
index entries. If we replaced the discarded tablespace with a
smaller one here, there could still be some adaptive hash
@@ -4058,18 +4027,20 @@ row_import_for_mysql(
break;
}
}
+#endif /* BTR_CUR_HASH_ADAPT */
if (err != DB_SUCCESS) {
char table_name[MAX_FULL_NAME_LEN + 1];
innobase_format_name(
- table_name, sizeof(table_name), table->name, FALSE);
+ table_name, sizeof(table_name),
+ table->name.m_name);
if (err != DB_DECRYPTION_FAILED) {
ib_errf(trx->mysql_thd, IB_LOG_LEVEL_ERROR,
ER_INTERNAL_ERROR,
- "Cannot reset LSNs in table '%s' : %s",
+ "Cannot reset LSNs in table %s : %s",
table_name, ut_strerr(err));
}
@@ -4081,25 +4052,41 @@ row_import_for_mysql(
/* If the table is stored in a remote tablespace, we need to
determine that filepath from the link file and system tables.
Find the space ID in SYS_TABLES since this is an ALTER TABLE. */
+ dict_get_and_save_data_dir_path(table, true);
+
if (DICT_TF_HAS_DATA_DIR(table->flags)) {
- dict_get_and_save_data_dir_path(table, true);
ut_a(table->data_dir_path);
- filepath = os_file_make_remote_pathname(
- table->data_dir_path, table->name, "ibd");
+ filepath = fil_make_filepath(
+ table->data_dir_path, table->name.m_name, IBD, true);
} else {
- filepath = fil_make_ibd_name(table->name, false);
+ filepath = fil_make_filepath(
+ NULL, table->name.m_name, IBD, false);
+ }
+
+ DBUG_EXECUTE_IF(
+ "ib_import_OOM_15",
+ ut_free(filepath);
+ filepath = NULL;
+ );
+
+ if (filepath == NULL) {
+ row_mysql_unlock_data_dictionary(trx);
+ return(row_import_cleanup(prebuilt, trx, DB_OUT_OF_MEMORY));
}
- ut_a(filepath);
/* Open the tablespace so that we can access via the buffer pool.
We set the 2nd param (fix_dict = true) here because we already
- have an x-lock on dict_operation_lock and dict_sys->mutex. */
+ have an x-lock on dict_operation_lock and dict_sys->mutex.
+ The tablespace is initially opened as a temporary one, because
+ we will not be writing any redo log for it before we have invoked
+ fil_space_set_imported() to declare it a persistent tablespace. */
- err = fil_open_single_table_tablespace(
- true, true, table->space,
- dict_tf_to_fsp_flags(table->flags),
- table->name, filepath);
+ ulint fsp_flags = dict_tf_to_fsp_flags(table->flags);
+
+ err = fil_ibd_open(
+ true, true, FIL_TYPE_IMPORT, table->space,
+ fsp_flags, table->name.m_name, filepath);
DBUG_EXECUTE_IF("ib_import_open_tablespace_failure",
err = DB_TABLESPACE_NOT_FOUND;);
@@ -4111,14 +4098,14 @@ row_import_for_mysql(
ER_GET_ERRMSG,
err, ut_strerr(err), filepath);
- mem_free(filepath);
+ ut_free(filepath);
return(row_import_cleanup(prebuilt, trx, err));
}
row_mysql_unlock_data_dictionary(trx);
- mem_free(filepath);
+ ut_free(filepath);
err = ibuf_check_bitmap_on_import(trx, table->space);
@@ -4146,10 +4133,6 @@ row_import_for_mysql(
if (err != DB_SUCCESS) {
return(row_import_error(prebuilt, trx, err));
- }
-
- if (err != DB_SUCCESS) {
- return(row_import_error(prebuilt, trx, err));
} else if (cfg.requires_purge(index->name)) {
/* Purge any delete-marked records that couldn't be
@@ -4196,28 +4179,34 @@ row_import_for_mysql(
}
}
- ib_logf(IB_LOG_LEVEL_INFO, "Phase III - Flush changes to disk");
+ ib::info() << "Phase III - Flush changes to disk";
/* Ensure that all pages dirtied during the IMPORT make it to disk.
The only dirty pages generated should be from the pessimistic purge
of delete marked records that couldn't be purged in Phase I. */
- buf_LRU_flush_or_remove_pages(prebuilt->table->space, trx);
-
- if (trx_is_interrupted(trx)) {
- ib_logf(IB_LOG_LEVEL_INFO, "Phase III - Flush interrupted");
- return(row_import_error(prebuilt, trx, DB_INTERRUPTED));
- } else {
- ib_logf(IB_LOG_LEVEL_INFO, "Phase IV - Flush complete");
+ {
+ FlushObserver observer(prebuilt->table->space, trx, NULL);
+ buf_LRU_flush_or_remove_pages(prebuilt->table->space,
+ &observer);
+
+ if (observer.is_interrupted()) {
+ ib::info() << "Phase III - Flush interrupted";
+ return(row_import_error(prebuilt, trx,
+ DB_INTERRUPTED));
+ }
}
+ ib::info() << "Phase IV - Flush complete";
+ fil_space_set_imported(prebuilt->table->space);
+
/* The dictionary latches will be released in in row_import_cleanup()
after the transaction commit, for both success and error. */
row_mysql_lock_data_dictionary(trx);
/* Update the root pages of the table's indexes. */
- err = row_import_update_index_root(trx, table, false, true);
+ err = row_import_update_index_root(trx, table, false);
if (err != DB_SUCCESS) {
return(row_import_error(prebuilt, trx, err));
@@ -4233,21 +4222,15 @@ row_import_for_mysql(
table->file_unreadable = false;
table->flags2 &= ~DICT_TF2_DISCARDED;
- if (autoinc != 0) {
- char table_name[MAX_FULL_NAME_LEN + 1];
-
- innobase_format_name(
- table_name, sizeof(table_name), table->name, FALSE);
+ /* Set autoinc value read from .cfg file, if one was specified.
+ Otherwise, keep the PAGE_ROOT_AUTO_INC as is. */
+ if (autoinc) {
+ ib::info() << table->name << " autoinc value set to "
+ << autoinc;
- ib_logf(IB_LOG_LEVEL_INFO, "%s autoinc value set to " IB_ID_FMT,
- table_name, autoinc);
-
- dict_table_autoinc_lock(table);
- dict_table_autoinc_initialize(table, autoinc);
- dict_table_autoinc_unlock(table);
+ table->autoinc = autoinc--;
+ btr_write_autoinc(dict_table_get_first_index(table), autoinc);
}
- ut_a(err == DB_SUCCESS);
-
return(row_import_cleanup(prebuilt, trx, err));
}