summaryrefslogtreecommitdiff
path: root/storage/archive
diff options
context:
space:
mode:
authorSergei Golubchik <sergii@pisem.net>2013-07-21 16:39:19 +0200
committerSergei Golubchik <sergii@pisem.net>2013-07-21 16:39:19 +0200
commitb7b5f6f1ab49948b0e15b762266d4640b3d6b7fb (patch)
tree7c302c2025184dbd053aa6135f0ff28c8ce6f359 /storage/archive
parent5f6380adde2dac3f32b40339b9b702c0135eb7d6 (diff)
parentc1d6a2d7e194225ccc19a68ea5d0f368632620d0 (diff)
downloadmariadb-git-b7b5f6f1ab49948b0e15b762266d4640b3d6b7fb.tar.gz
10.0-monty merge
includes: * remove some remnants of "Bug#14521864: MYSQL 5.1 TO 5.5 BUGS PARTITIONING" * introduce LOCK_share, now LOCK_ha_data is strictly for engines * rea_create_table() always creates .par file (even in "frm-only" mode) * fix a 5.6 bug, temp file leak on dummy ALTER TABLE
Diffstat (limited to 'storage/archive')
-rw-r--r--storage/archive/ha_archive.cc403
-rw-r--r--storage/archive/ha_archive.h44
2 files changed, 261 insertions, 186 deletions
diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc
index 1d1d6b7b743..4122e76bbef 100644
--- a/storage/archive/ha_archive.cc
+++ b/storage/archive/ha_archive.cc
@@ -99,13 +99,19 @@
-Brian
*/
-/* Variables for archive share methods */
-mysql_mutex_t archive_mutex;
-static HASH archive_open_tables;
/* The file extension */
#define ARZ ".ARZ" // The data file
#define ARN ".ARN" // Files used during an optimize call
+#define ARM ".ARM" // Meta file (deprecated)
+
+/* 5.0 compatibility */
+#define META_V1_OFFSET_CHECK_HEADER 0
+#define META_V1_OFFSET_VERSION 1
+#define META_V1_OFFSET_ROWS_RECORDED 2
+#define META_V1_OFFSET_CHECK_POINT 10
+#define META_V1_OFFSET_CRASHED 18
+#define META_V1_LENGTH 19
/*
uchar + uchar
@@ -140,23 +146,12 @@ static handler *archive_create_handler(handlerton *hton,
return new (mem_root) ha_archive(hton, table);
}
-/*
- Used for hash table that tracks open tables.
-*/
-static uchar* archive_get_key(ARCHIVE_SHARE *share, size_t *length,
- my_bool not_used __attribute__((unused)))
-{
- *length=share->table_name_length;
- return (uchar*) share->table_name;
-}
-
#ifdef HAVE_PSI_INTERFACE
-PSI_mutex_key az_key_mutex_archive_mutex, az_key_mutex_ARCHIVE_SHARE_mutex;
+PSI_mutex_key az_key_mutex_Archive_share_mutex;
static PSI_mutex_info all_archive_mutexes[]=
{
- { &az_key_mutex_archive_mutex, "archive_mutex", PSI_FLAG_GLOBAL},
- { &az_key_mutex_ARCHIVE_SHARE_mutex, "ARCHIVE_SHARE::mutex", 0}
+ { &az_key_mutex_Archive_share_mutex, "Archive_share::mutex", 0}
};
PSI_file_key arch_key_file_metadata, arch_key_file_data;
@@ -220,39 +215,24 @@ int archive_db_init(void *p)
archive_hton->discover_table= archive_discover;
archive_hton->tablefile_extensions= ha_archive_exts;
- if (mysql_mutex_init(az_key_mutex_archive_mutex,
- &archive_mutex, MY_MUTEX_INIT_FAST))
- goto error;
- if (my_hash_init(&archive_open_tables, table_alias_charset, 32, 0, 0,
- (my_hash_get_key) archive_get_key, 0, 0))
- {
- mysql_mutex_destroy(&archive_mutex);
- }
- else
- {
- DBUG_RETURN(FALSE);
- }
-error:
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(0);
}
-/*
- Release the archive handler.
-
- SYNOPSIS
- archive_db_done()
- void
-
- RETURN
- FALSE OK
-*/
-int archive_db_done(void *p)
+Archive_share::Archive_share()
{
- my_hash_free(&archive_open_tables);
- mysql_mutex_destroy(&archive_mutex);
-
- return 0;
+ crashed= false;
+ in_optimize= false;
+ archive_write_open= false;
+ dirty= false;
+ DBUG_PRINT("ha_archive", ("Archive_share: %p",
+ this));
+ thr_lock_init(&lock);
+ /*
+ We will use this lock for rows.
+ */
+ mysql_mutex_init(az_key_mutex_Archive_share_mutex,
+ &mutex, MY_MUTEX_INIT_FAST);
}
@@ -309,6 +289,103 @@ ret:
DBUG_RETURN(my_errno);
}
+/**
+ @brief Read version 1 meta file (5.0 compatibility routine).
+
+ @return Completion status
+ @retval 0 Success
+ @retval !0 Failure
+*/
+
+int Archive_share::read_v1_metafile()
+{
+ char file_name[FN_REFLEN];
+ uchar buf[META_V1_LENGTH];
+ File fd;
+ DBUG_ENTER("Archive_share::read_v1_metafile");
+
+ fn_format(file_name, data_file_name, "", ARM, MY_REPLACE_EXT);
+ if ((fd= mysql_file_open(arch_key_file_metadata, file_name, O_RDONLY, MYF(0))) == -1)
+ DBUG_RETURN(-1);
+
+ if (mysql_file_read(fd, buf, sizeof(buf), MYF(0)) != sizeof(buf))
+ {
+ mysql_file_close(fd, MYF(0));
+ DBUG_RETURN(-1);
+ }
+
+ rows_recorded= uint8korr(buf + META_V1_OFFSET_ROWS_RECORDED);
+ crashed= buf[META_V1_OFFSET_CRASHED];
+ mysql_file_close(fd, MYF(0));
+ DBUG_RETURN(0);
+}
+
+
+/**
+ @brief Write version 1 meta file (5.0 compatibility routine).
+
+ @return Completion status
+ @retval 0 Success
+ @retval !0 Failure
+*/
+
+int Archive_share::write_v1_metafile()
+{
+ char file_name[FN_REFLEN];
+ uchar buf[META_V1_LENGTH];
+ File fd;
+ DBUG_ENTER("Archive_share::write_v1_metafile");
+
+ buf[META_V1_OFFSET_CHECK_HEADER]= ARCHIVE_CHECK_HEADER;
+ buf[META_V1_OFFSET_VERSION]= 1;
+ int8store(buf + META_V1_OFFSET_ROWS_RECORDED, rows_recorded);
+ int8store(buf + META_V1_OFFSET_CHECK_POINT, (ulonglong) 0);
+ buf[META_V1_OFFSET_CRASHED]= crashed;
+
+ fn_format(file_name, data_file_name, "", ARM, MY_REPLACE_EXT);
+ if ((fd= mysql_file_open(arch_key_file_metadata, file_name, O_WRONLY, MYF(0))) == -1)
+ DBUG_RETURN(-1);
+
+ if (mysql_file_write(fd, buf, sizeof(buf), MYF(0)) != sizeof(buf))
+ {
+ mysql_file_close(fd, MYF(0));
+ DBUG_RETURN(-1);
+ }
+
+ mysql_file_close(fd, MYF(0));
+ DBUG_RETURN(0);
+}
+
+/**
+ @brief Pack version 1 row (5.0 compatibility routine).
+
+ @param[in] record the record to pack
+
+ @return Length of packed row
+*/
+
+unsigned int ha_archive::pack_row_v1(uchar *record)
+{
+ uint *blob, *end;
+ uchar *pos;
+ DBUG_ENTER("pack_row_v1");
+ memcpy(record_buffer->buffer, record, table->s->reclength);
+ pos= record_buffer->buffer + table->s->reclength;
+ for (blob= table->s->blob_field, end= blob + table->s->blob_fields;
+ blob != end; blob++)
+ {
+ uint32 length= ((Field_blob *) table->field[*blob])->get_length();
+ if (length)
+ {
+ uchar *data_ptr;
+ ((Field_blob *) table->field[*blob])->get_ptr(&data_ptr);
+ memcpy(pos, data_ptr, length);
+ pos+= length;
+ }
+ }
+ DBUG_RETURN(pos - record_buffer->buffer);
+}
+
/*
This method reads the header of a datafile and returns whether or not it was successful.
*/
@@ -361,159 +438,102 @@ int ha_archive::read_data_header(azio_stream *file_to_read)
See ha_example.cc for a longer description.
*/
-ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, int *rc)
+Archive_share *ha_archive::get_share(const char *table_name, int *rc)
{
- uint length;
- DBUG_ENTER("ha_archive::get_share");
+ Archive_share *tmp_share;
- mysql_mutex_lock(&archive_mutex);
- length=(uint) strlen(table_name);
+ DBUG_ENTER("ha_archive::get_share");
- if (!(share=(ARCHIVE_SHARE*) my_hash_search(&archive_open_tables,
- (uchar*) table_name,
- length)))
+ lock_shared_ha_data();
+ if (!(tmp_share= static_cast<Archive_share*>(get_ha_share_ptr())))
{
- char *tmp_name;
azio_stream archive_tmp;
- if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
- &share, sizeof(*share),
- &tmp_name, length+1,
- NullS))
+ tmp_share= new Archive_share;
+
+ if (!tmp_share)
{
- mysql_mutex_unlock(&archive_mutex);
*rc= HA_ERR_OUT_OF_MEM;
- DBUG_RETURN(NULL);
+ goto err;
}
+ DBUG_PRINT("ha_archive", ("new Archive_share: %p",
+ tmp_share));
- share->use_count= 0;
- share->table_name_length= length;
- share->table_name= tmp_name;
- share->crashed= FALSE;
- share->archive_write_open= FALSE;
- fn_format(share->data_file_name, table_name, "",
+ fn_format(tmp_share->data_file_name, table_name, "",
ARZ, MY_REPLACE_EXT | MY_UNPACK_FILENAME);
- strmov(share->table_name, table_name);
- DBUG_PRINT("ha_archive", ("Data File %s",
- share->data_file_name));
- /*
- We will use this lock for rows.
- */
- mysql_mutex_init(az_key_mutex_ARCHIVE_SHARE_mutex,
- &share->mutex, MY_MUTEX_INIT_FAST);
-
+ strmov(tmp_share->table_name, table_name);
+ DBUG_PRINT("ha_archive", ("Data File %s",
+ tmp_share->data_file_name));
+
/*
We read the meta file, but do not mark it dirty. Since we are not
doing a write we won't mark it dirty (and we won't open it for
anything but reading... open it for write and we will generate null
compression writes).
*/
- if (!(azopen(&archive_tmp, share->data_file_name, O_RDONLY|O_BINARY)))
- {
- *rc= my_errno ? my_errno : -1;
- mysql_mutex_unlock(&archive_mutex);
- mysql_mutex_destroy(&share->mutex);
- my_free(share);
- DBUG_RETURN(NULL);
- }
- share->version= archive_tmp.version;
- if (archive_tmp.version == ARCHIVE_VERSION)
- {
- stats.auto_increment_value= archive_tmp.auto_increment + 1;
- share->rows_recorded= (ha_rows)archive_tmp.rows;
- share->crashed= archive_tmp.dirty;
- }
- else
+ if (!(azopen(&archive_tmp, tmp_share->data_file_name, O_RDONLY|O_BINARY)))
{
- /* Used by repair */
- share->rows_recorded= ~(ha_rows) 0;
- stats.auto_increment_value= 0;
+ delete tmp_share;
+ *rc= my_errno ? my_errno : HA_ERR_CRASHED;
+ tmp_share= NULL;
+ goto err;
}
- /*
- If archive version is less than 3, It should be upgraded before
- use.
- */
- if (archive_tmp.version < ARCHIVE_VERSION)
- *rc= HA_ERR_TABLE_NEEDS_UPGRADE;
- else if (frm_compare(&archive_tmp))
- *rc= HA_ERR_TABLE_DEF_CHANGED;
-
+ stats.auto_increment_value= archive_tmp.auto_increment + 1;
+ tmp_share->rows_recorded= (ha_rows)archive_tmp.rows;
+ tmp_share->crashed= archive_tmp.dirty;
+ share= tmp_share;
+ if (archive_tmp.version == 1)
+ share->read_v1_metafile();
azclose(&archive_tmp);
- (void) my_hash_insert(&archive_open_tables, (uchar*) share);
- thr_lock_init(&share->lock);
+ set_ha_share_ptr(static_cast<Handler_share*>(tmp_share));
}
- share->use_count++;
- DBUG_PRINT("ha_archive", ("archive table %.*s has %d open handles now",
- share->table_name_length, share->table_name,
- share->use_count));
- if (share->crashed)
+ if (tmp_share->crashed)
*rc= HA_ERR_CRASHED_ON_USAGE;
- mysql_mutex_unlock(&archive_mutex);
-
- DBUG_RETURN(share);
-}
+err:
+ unlock_shared_ha_data();
+ DBUG_ASSERT(tmp_share || *rc);
-/*
- Free the share.
- See ha_example.cc for a description.
-*/
-int ha_archive::free_share()
-{
- int rc= 0;
- DBUG_ENTER("ha_archive::free_share");
- DBUG_PRINT("ha_archive",
- ("archive table %.*s has %d open handles on entrance",
- share->table_name_length, share->table_name,
- share->use_count));
-
- mysql_mutex_lock(&archive_mutex);
- if (!--share->use_count)
- {
- my_hash_delete(&archive_open_tables, (uchar*) share);
- thr_lock_delete(&share->lock);
- mysql_mutex_destroy(&share->mutex);
- /*
- We need to make sure we don't reset the crashed state.
- If we open a crashed file, wee need to close it as crashed unless
- it has been repaired.
- Since we will close the data down after this, we go on and count
- the flush on close;
- */
- if (share->archive_write_open)
- {
- if (azclose(&(share->archive_write)))
- rc= 1;
- }
- my_free(share);
- }
- mysql_mutex_unlock(&archive_mutex);
-
- DBUG_RETURN(rc);
+ DBUG_RETURN(tmp_share);
}
-int ha_archive::init_archive_writer()
+
+int Archive_share::init_archive_writer()
{
- DBUG_ENTER("ha_archive::init_archive_writer");
- /*
+ DBUG_ENTER("Archive_share::init_archive_writer");
+ /*
It is expensive to open and close the data files and since you can't have
a gzip file that can be both read and written we keep a writer open
that is shared amoung all open tables.
*/
- if (!(azopen(&(share->archive_write), share->data_file_name,
+ if (!(azopen(&archive_write, data_file_name,
O_RDWR|O_BINARY)))
{
DBUG_PRINT("ha_archive", ("Could not open archive write file"));
- share->crashed= TRUE;
+ crashed= true;
DBUG_RETURN(1);
}
- share->archive_write_open= TRUE;
+ archive_write_open= true;
DBUG_RETURN(0);
}
+void Archive_share::close_archive_writer()
+{
+ mysql_mutex_assert_owner(&mutex);
+ if (archive_write_open)
+ {
+ if (archive_write.version == 1)
+ (void) write_v1_metafile();
+ azclose(&archive_write);
+ archive_write_open= false;
+ dirty= false;
+ }
+}
+
+
/*
No locks are required because it is associated with just one handler instance
*/
@@ -523,7 +543,8 @@ int ha_archive::init_archive_reader()
/*
It is expensive to open and close the data files and since you can't have
a gzip file that can be both read and written we keep a writer open
- that is shared amoung all open tables.
+ that is shared amoung all open tables, but have one reader open for
+ each handler instance.
*/
if (!archive_reader_open)
{
@@ -554,6 +575,8 @@ int ha_archive::open(const char *name, int mode, uint open_options)
DBUG_PRINT("ha_archive", ("archive table was opened for crash: %s",
(open_options & HA_OPEN_FOR_REPAIR) ? "yes" : "no"));
share= get_share(name, &rc);
+ if (!share)
+ DBUG_RETURN(rc);
/*
Allow open on crashed table in repair mode only.
@@ -574,7 +597,6 @@ int ha_archive::open(const char *name, int mode, uint open_options)
rc= 0;
break;
}
- free_share();
/* fall through */
default:
DBUG_RETURN(rc);
@@ -586,13 +608,17 @@ int ha_archive::open(const char *name, int mode, uint open_options)
ARCHIVE_ROW_HEADER_SIZE);
if (!record_buffer)
- {
- free_share();
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
- }
thr_lock_data_init(&share->lock, &lock, NULL);
+ DBUG_PRINT("ha_archive", ("archive table was crashed %s",
+ rc == HA_ERR_CRASHED_ON_USAGE ? "yes" : "no"));
+ if (rc == HA_ERR_CRASHED_ON_USAGE && open_options & HA_OPEN_FOR_REPAIR)
+ {
+ DBUG_RETURN(0);
+ }
+
DBUG_RETURN(rc);
}
@@ -627,8 +653,6 @@ int ha_archive::close(void)
if (azclose(&archive))
rc= 1;
}
- /* then also close share */
- rc|= free_share();
DBUG_RETURN(rc);
}
@@ -709,7 +733,7 @@ int ha_archive::create(const char *name, TABLE *table_arg,
{
KEY *pos= table_arg->key_info+key;
KEY_PART_INFO *key_part= pos->key_part;
- KEY_PART_INFO *key_part_end= key_part + pos->key_parts;
+ KEY_PART_INFO *key_part_end= key_part + pos->user_defined_key_parts;
for (; key_part != key_part_end; key_part++)
{
@@ -806,7 +830,7 @@ int ha_archive::real_write_row(uchar *buf, azio_stream *writer)
DBUG_ENTER("ha_archive::real_write_row");
/* We pack the row for writing */
- r_pack_length= pack_row(buf);
+ r_pack_length= pack_row(buf, writer);
written= azwrite(writer, record_buffer->buffer, r_pack_length);
if (written != r_pack_length)
@@ -847,7 +871,7 @@ uint32 ha_archive::max_row_length(const uchar *buf)
}
-unsigned int ha_archive::pack_row(uchar *record)
+unsigned int ha_archive::pack_row(uchar *record, azio_stream *writer)
{
uchar *ptr;
@@ -857,6 +881,9 @@ unsigned int ha_archive::pack_row(uchar *record)
if (fix_rec_buff(max_row_length(record)))
DBUG_RETURN(HA_ERR_OUT_OF_MEM); /* purecov: inspected */
+ if (writer->version == 1)
+ DBUG_RETURN(pack_row_v1(record));
+
/* Copy null bits */
memcpy(record_buffer->buffer+ARCHIVE_ROW_HEADER_SIZE,
record, table->s->null_bytes);
@@ -900,7 +927,7 @@ int ha_archive::write_row(uchar *buf)
mysql_mutex_lock(&share->mutex);
- if (!share->archive_write_open && init_archive_writer())
+ if (!share->archive_write_open && share->init_archive_writer())
{
rc= errno;
goto error;
@@ -1368,11 +1395,26 @@ end:
DBUG_RETURN(rc);
}
+
+/**
+ @brief Check for upgrade
+
+ @param[in] check_opt check options
+
+ @return Completion status
+ @retval HA_ADMIN_OK No upgrade required
+ @retval HA_ADMIN_CORRUPT Cannot read meta-data
+ @retval HA_ADMIN_NEEDS_UPGRADE Upgrade required
+*/
+
int ha_archive::check_for_upgrade(HA_CHECK_OPT *check_opt)
{
- if (share->version < ARCHIVE_VERSION)
- return HA_ADMIN_NEEDS_ALTER;
- return 0;
+ DBUG_ENTER("ha_archive::check_for_upgrade");
+ if (init_archive_reader())
+ DBUG_RETURN(HA_ADMIN_CORRUPT);
+ if (archive.version < ARCHIVE_VERSION)
+ DBUG_RETURN(HA_ADMIN_NEEDS_UPGRADE);
+ DBUG_RETURN(HA_ADMIN_OK);
}
@@ -1571,6 +1613,7 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd,
void ha_archive::update_create_info(HA_CREATE_INFO *create_info)
{
+ char tmp_real_path[FN_REFLEN];
DBUG_ENTER("ha_archive::update_create_info");
ha_archive::info(HA_STATUS_AUTO);
@@ -1579,8 +1622,8 @@ void ha_archive::update_create_info(HA_CREATE_INFO *create_info)
create_info->auto_increment_value= stats.auto_increment_value;
}
- if (!(my_readlink(share->real_path, share->data_file_name, MYF(0))))
- create_info->data_file_name= share->real_path;
+ if (!(my_readlink(tmp_real_path, share->data_file_name, MYF(0))))
+ create_info->data_file_name= sql_strdup(tmp_real_path);
DBUG_VOID_RETURN;
}
@@ -1804,6 +1847,20 @@ void ha_archive::destroy_record_buffer(archive_record_buffer *r)
DBUG_VOID_RETURN;
}
+bool ha_archive::check_if_incompatible_data(HA_CREATE_INFO *info,
+ uint table_changes)
+{
+ if (info->auto_increment_value != stats.auto_increment_value ||
+ (info->used_fields & HA_CREATE_USED_DATADIR) ||
+ info->data_file_name ||
+ (info->used_fields & HA_CREATE_USED_COMMENT) ||
+ table_changes != IS_EQUAL_YES)
+ return COMPATIBLE_DATA_NO;
+
+ return COMPATIBLE_DATA_YES;
+}
+
+
struct st_mysql_storage_engine archive_storage_engine=
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
@@ -1816,7 +1873,7 @@ maria_declare_plugin(archive)
"Archive storage engine",
PLUGIN_LICENSE_GPL,
archive_db_init, /* Plugin Init */
- archive_db_done, /* Plugin Deinit */
+ NULL, /* Plugin Deinit */
0x0300 /* 3.0 */,
NULL, /* status variables */
NULL, /* system variables */
diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h
index 7e8d5cee47b..47ee89198e6 100644
--- a/storage/archive/ha_archive.h
+++ b/storage/archive/ha_archive.h
@@ -32,20 +32,38 @@ typedef struct st_archive_record_buffer {
} archive_record_buffer;
-typedef struct st_archive_share {
- char *table_name;
- char data_file_name[FN_REFLEN];
- uint table_name_length,use_count, version;
+class Archive_share : public Handler_share
+{
+public:
mysql_mutex_t mutex;
THR_LOCK lock;
azio_stream archive_write; /* Archive file we are working with */
+ ha_rows rows_recorded; /* Number of rows in tables */
+ char table_name[FN_REFLEN];
+ char data_file_name[FN_REFLEN];
+ bool in_optimize;
bool archive_write_open;
bool dirty; /* Flag for if a flush should occur */
bool crashed; /* Meta file is crashed */
- ha_rows rows_recorded; /* Number of rows in tables */
- ulonglong mean_rec_length;
- char real_path[FN_REFLEN];
-} ARCHIVE_SHARE;
+ Archive_share();
+ ~Archive_share()
+ {
+ DBUG_PRINT("ha_archive", ("~Archive_share: %p",
+ this));
+ if (archive_write_open)
+ {
+ mysql_mutex_lock(&mutex);
+ (void) close_archive_writer();
+ mysql_mutex_unlock(&mutex);
+ }
+ thr_lock_delete(&lock);
+ mysql_mutex_destroy(&mutex);
+ }
+ int init_archive_writer();
+ void close_archive_writer();
+ int write_v1_metafile();
+ int read_v1_metafile();
+};
/*
Version for file format.
@@ -58,7 +76,7 @@ typedef struct st_archive_share {
class ha_archive: public handler
{
THR_LOCK_DATA lock; /* MySQL lock */
- ARCHIVE_SHARE *share; /* Shared lock info */
+ Archive_share *share; /* Shared lock info */
azio_stream archive; /* Archive file we are working with */
my_off_t current_position; /* The position of the row we just read */
@@ -77,6 +95,7 @@ class ha_archive: public handler
void destroy_record_buffer(archive_record_buffer *r);
int frm_copy(azio_stream *src, azio_stream *dst);
int frm_compare(azio_stream *src);
+ unsigned int pack_row_v1(uchar *record);
public:
ha_archive(handlerton *hton, TABLE_SHARE *table_arg);
@@ -121,9 +140,7 @@ public:
int get_row(azio_stream *file_to_read, uchar *buf);
int get_row_version2(azio_stream *file_to_read, uchar *buf);
int get_row_version3(azio_stream *file_to_read, uchar *buf);
- ARCHIVE_SHARE *get_share(const char *table_name, int *rc);
- int free_share();
- int init_archive_writer();
+ Archive_share *get_share(const char *table_name, int *rc);
int init_archive_reader();
// Always try auto_repair in case of HA_ERR_CRASHED_ON_USAGE
bool auto_repair(int error) const
@@ -150,6 +167,7 @@ public:
uint32 max_row_length(const uchar *buf);
bool fix_rec_buff(unsigned int length);
int unpack_row(azio_stream *file_to_read, uchar *record);
- unsigned int pack_row(uchar *record);
+ unsigned int pack_row(uchar *record, azio_stream *writer);
+ bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes);
};