summaryrefslogtreecommitdiff
path: root/storage/archive
diff options
context:
space:
mode:
authorSergey Vojtovich <svoj@mariadb.org>2013-07-29 18:08:49 +0400
committerSergey Vojtovich <svoj@mariadb.org>2013-07-29 18:08:49 +0400
commit0f985c6407f331d7be9fd98af2104ca14ec6adf8 (patch)
treeba062c6c851e9c5da5f1e6d1ba92ddf4efb70a86 /storage/archive
parent5f8effe1db11c55c17df96b14993a73fe1f045ec (diff)
downloadmariadb-git-0f985c6407f331d7be9fd98af2104ca14ec6adf8.tar.gz
MDEV-4786 - merge 10.0-monty - 10.0
Fixed archive.archive failure. Applied remnants of two revisions, which were partially merged. Rev. 3225.1.1 (5.0 compatibility): BUG#11756687 - 48633: ARCHIVE TABLES ARE NOT UPGRADEABLE Archive table created by 5.0 were not accessible. This patch adds various fixes so that 5.0 archive tables are readable and writable. Though it is strongly recommended to avoid binary upgrade of archive tables whenever it is possible. Rev. 3710 (due to valgrind warnings): Bug#13907676: HA_ARCHIVE::INFO In WL#4305 the refactoring of the archive writer, it could flush the writer when it was not yet open. This was due to if bulk insert was used but no rows was actually inserted (write_row was never called), the writer was marked dirty even if it was not open. Fix was to only mark it as dirty if it was opened. mysql-test/std_data/bug48633.ARM: A test case for BUG#11756687: archive table created by 5.0.95. mysql-test/std_data/bug48633.ARZ: A test case for BUG#11756687: archive table created by 5.0.95. mysql-test/std_data/bug48633.frm: A test case for BUG#11756687: archive table created by 5.0.95. mysql-test/suite/archive/archive.result: Modified a test case for BUG#47012 according to fix for BUG#11756687. Added a test case for BUG#11756687. mysql-test/suite/archive/archive.test: Modified a test case for BUG#47012 according to fix for BUG#11756687. Added a test case for BUG#11756687. No need to remove .ARM files anymore: DROP TABLE will take care of them. storage/archive/azio.c: Do not write AZIO (v.3) header to GZIO file (v.1). Added initialization of various azio_stream members to read_header() so it can proceed with v.1 format. Update data start position only when reading first GZIO header. That is only on azopen(), but never on azread(). storage/archive/ha_archive.cc: Removed guardians that were rejecting to open v.1 archive tables. Reload .frm when repairing v.1 tables - they didn't have storage for .frm. Do not flush write stream when it is not open. Let DROP TABLE remove 5.0 .ARM files.
Diffstat (limited to 'storage/archive')
-rw-r--r--storage/archive/azio.c17
-rw-r--r--storage/archive/ha_archive.cc48
2 files changed, 41 insertions, 24 deletions
diff --git a/storage/archive/azio.c b/storage/archive/azio.c
index 4519d15cefc..c1efe19e91f 100644
--- a/storage/archive/azio.c
+++ b/storage/archive/azio.c
@@ -77,6 +77,7 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd)
s->version = (unsigned char)az_magic[1]; /* this needs to be a define to version */
s->minor_version= (unsigned char) az_magic[2]; /* minor version */
s->dirty= AZ_STATE_CLEAN;
+ s->start= 0;
/*
We do our own version of append by nature.
@@ -186,6 +187,9 @@ int write_header(azio_stream *s)
char buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE];
char *ptr= buffer;
+ if (s->version == 1)
+ return 0;
+
s->block_size= AZ_BUFSIZE_WRITE;
s->version = (unsigned char)az_magic[1];
s->minor_version = (unsigned char)az_magic[2];
@@ -308,9 +312,9 @@ void check_header(azio_stream *s)
/* Peek ahead to check the gzip magic header */
if ( s->stream.next_in[0] == gz_magic[0] && s->stream.next_in[1] == gz_magic[1])
{
+ read_header(s, s->stream.next_in);
s->stream.avail_in -= 2;
s->stream.next_in += 2;
- s->version= (unsigned char)2;
/* Check the rest of the gzip header */
method = get_byte(s);
@@ -339,7 +343,8 @@ void check_header(azio_stream *s)
for (len = 0; len < 2; len++) (void)get_byte(s);
}
s->z_err = s->z_eof ? Z_DATA_ERROR : Z_OK;
- s->start = my_tell(s->file, MYF(0)) - s->stream.avail_in;
+ if (!s->start)
+ s->start= my_tell(s->file, MYF(0)) - s->stream.avail_in;
}
else if ( s->stream.next_in[0] == az_magic[0] && s->stream.next_in[1] == az_magic[1])
{
@@ -401,9 +406,13 @@ void read_header(azio_stream *s, unsigned char *buffer)
else if (buffer[0] == gz_magic[0] && buffer[1] == gz_magic[1])
{
/*
- Set version number to previous version (2).
+ Set version number to previous version (1).
*/
- s->version= (unsigned char) 2;
+ s->version= 1;
+ s->auto_increment= 0;
+ s->frm_length= 0;
+ s->longest_row= 0;
+ s->shortest_row= 0;
} else {
/*
Unknown version.
diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc
index bb66d329de0..82f2513ad59 100644
--- a/storage/archive/ha_archive.cc
+++ b/storage/archive/ha_archive.cc
@@ -97,6 +97,11 @@
inserts a lot faster, but would mean highly arbitrary reads.
-Brian
+
+ Archive file format versions:
+ <5.1.5 - v.1
+ 5.1.5-5.1.15 - v.2
+ >5.1.15 - v.3
*/
@@ -192,9 +197,11 @@ static void init_archive_psi_keys(void)
/*
We just implement one additional file extension.
+ ARM is here just to properly drop 5.0 tables.
*/
static const char *ha_archive_exts[] = {
ARZ,
+ ARM,
NullS
};
@@ -581,20 +588,13 @@ int ha_archive::open(const char *name, int mode, uint open_options)
if (!share)
DBUG_RETURN(rc);
- /*
- Allow open on crashed table in repair mode only.
- Block open on 5.0 ARCHIVE table. Though we have almost all
- routines to access these tables, they were not well tested.
- For now we have to refuse to open such table to avoid
- potential data loss.
- */
+ /* Allow open on crashed table in repair mode only. */
switch (rc)
{
case 0:
break;
case HA_ERR_TABLE_DEF_CHANGED:
case HA_ERR_CRASHED_ON_USAGE:
- case HA_ERR_TABLE_NEEDS_UPGRADE:
if (open_options & HA_OPEN_FOR_REPAIR)
{
rc= 0;
@@ -675,6 +675,17 @@ int ha_archive::frm_copy(azio_stream *src, azio_stream *dst)
int rc= 0;
uchar *frm_ptr;
+ if (!src->frm_length)
+ {
+ size_t frm_len;
+ if (!table_share->read_frm_image((const uchar**) &frm_ptr, &frm_len))
+ {
+ azwrite_frm(dst, frm_ptr, frm_len);
+ table_share->free_frm_image(frm_ptr);
+ }
+ return 0;
+ }
+
if (!(frm_ptr= (uchar *) my_malloc(src->frm_length,
MYF(MY_THREAD_SPECIFIC | MY_WME))))
return HA_ERR_OUT_OF_MEM;
@@ -1639,19 +1650,13 @@ int ha_archive::info(uint flag)
{
DBUG_ENTER("ha_archive::info");
- /*
- If dirty, we lock, and then reset/flush the data.
- I found that just calling azflush() doesn't always work.
- */
mysql_mutex_lock(&share->mutex);
- if (share->dirty == TRUE)
+ if (share->dirty)
{
- if (share->dirty == TRUE)
- {
- DBUG_PRINT("ha_archive", ("archive flushing out rows for scan"));
- azflush(&(share->archive_write), Z_SYNC_FLUSH);
- share->dirty= FALSE;
- }
+ DBUG_PRINT("ha_archive", ("archive flushing out rows for scan"));
+ DBUG_ASSERT(share->archive_write_open);
+ azflush(&(share->archive_write), Z_SYNC_FLUSH);
+ share->dirty= FALSE;
}
/*
@@ -1727,7 +1732,10 @@ int ha_archive::end_bulk_insert()
{
DBUG_ENTER("ha_archive::end_bulk_insert");
bulk_insert= FALSE;
- share->dirty= TRUE;
+ mysql_mutex_lock(&share->mutex);
+ if (share->archive_write_open)
+ share->dirty= true;
+ mysql_mutex_unlock(&share->mutex);
DBUG_RETURN(0);
}