summaryrefslogtreecommitdiff
path: root/sql/examples
diff options
context:
space:
mode:
authorunknown <brian@brian-akers-computer.local>2005-07-10 21:17:03 -0700
committerunknown <brian@brian-akers-computer.local>2005-07-10 21:17:03 -0700
commita339be0fb3a16c386fdcf8297ef814e96ceab8ba (patch)
tree77e84470e55fa3f3e75bcfd2ca140f4be436e6cb /sql/examples
parent46f0327e6b67eba7656f55874308451aebab5dbd (diff)
downloadmariadb-git-a339be0fb3a16c386fdcf8297ef814e96ceab8ba.tar.gz
Fixed 32bit issue, reworked error logic for open tables, and redid the repair table code so that it uses the extended optimize table code.
sql/examples/ha_archive.cc: Fixed issue with 32bit systems giving warnings on bit shift (this is due to the fix by Jim to change to ha_rows). The error logic for opening a table was reworked after studing up on a reported issue. It has been reworked to create a share in all situations. The repair table will just have to figure everything out or toss its own error. The read only filesystem and permission denied problems were solved. Repair table code now rebuilds with the new optimize table extended code (so it no longer does the work itself).
Diffstat (limited to 'sql/examples')
-rw-r--r--sql/examples/ha_archive.cc101
1 files changed, 25 insertions, 76 deletions
diff --git a/sql/examples/ha_archive.cc b/sql/examples/ha_archive.cc
index 41759885838..5ce249e9c61 100644
--- a/sql/examples/ha_archive.cc
+++ b/sql/examples/ha_archive.cc
@@ -305,12 +305,12 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows, bool dirty)
meta_buffer[0]= (uchar)ARCHIVE_CHECK_HEADER;
meta_buffer[1]= (uchar)ARCHIVE_VERSION;
- int8store(meta_buffer + 2, rows);
+ int8store(meta_buffer + 2, (ulonglong)rows);
int8store(meta_buffer + 10, check_point);
*(meta_buffer + 18)= (uchar)dirty;
DBUG_PRINT("ha_archive::write_meta_file", ("Check %d", (uint)ARCHIVE_CHECK_HEADER));
DBUG_PRINT("ha_archive::write_meta_file", ("Version %d", (uint)ARCHIVE_VERSION));
- DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu", rows));
+ DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu", (ulonglong)rows));
DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu", check_point));
DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty));
@@ -326,6 +326,9 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows, bool dirty)
/*
We create the shared memory space that we will use for the open table.
+ No matter what we try to get or create a share. This is so that a repair
+ table operation can occur.
+
See ha_example.cc for a longer description.
*/
ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table)
@@ -363,7 +366,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table)
*/
VOID(pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST));
if ((share->meta_file= my_open(meta_file_name, O_RDWR, MYF(0))) == -1)
- goto error;
+ share->crashed= TRUE;
/*
After we read, we set the file to dirty. When we close, we will do the
@@ -381,27 +384,14 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table)
that is shared amoung all open tables.
*/
if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL)
- goto error2;
- if (my_hash_insert(&archive_open_tables, (byte*) share))
- goto error3;
+ share->crashed= TRUE;
+ VOID(my_hash_insert(&archive_open_tables, (byte*) share));
thr_lock_init(&share->lock);
}
share->use_count++;
pthread_mutex_unlock(&archive_mutex);
return share;
-
-error3:
- /* We close, but ignore errors since we already have errors */
- (void)gzclose(share->archive_write);
-error2:
- my_close(share->meta_file,MYF(0));
-error:
- pthread_mutex_unlock(&archive_mutex);
- VOID(pthread_mutex_destroy(&share->mutex));
- my_free((gptr) share, MYF(0));
-
- return NULL;
}
@@ -458,13 +448,14 @@ int ha_archive::open(const char *name, int mode, uint test_if_locked)
DBUG_ENTER("ha_archive::open");
if (!(share= get_share(name, table)))
- DBUG_RETURN(-1);
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM); // Not handled well by calling code!
thr_lock_data_init(&share->lock,&lock,NULL);
if ((archive= gzopen(share->data_file_name, "rb")) == NULL)
{
- (void)free_share(share); //We void since we already have an error
- DBUG_RETURN(errno ? errno : -1);
+ if (errno == EROFS || errno == EACCES)
+ DBUG_RETURN(my_errno= errno);
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
}
DBUG_RETURN(0);
@@ -803,68 +794,20 @@ int ha_archive::rnd_pos(byte * buf, byte *pos)
/*
This method repairs the meta file. It does this by walking the datafile and
- rewriting the meta file.
+ rewriting the meta file. Currently it does this by calling optimize with
+ the extended flag.
*/
int ha_archive::repair(THD* thd, HA_CHECK_OPT* check_opt)
{
- int rc;
- byte *buf;
- ha_rows rows_recorded= 0;
- gzFile rebuild_file; // Archive file we are working with
- File meta_file; // Meta file we use
- char data_file_name[FN_REFLEN];
DBUG_ENTER("ha_archive::repair");
+ check_opt->flags= T_EXTEND;
+ int rc= optimize(thd, check_opt);
- /*
- Open up the meta file to recreate it.
- */
- fn_format(data_file_name, share->table_name, "", ARZ,
- MY_REPLACE_EXT|MY_UNPACK_FILENAME);
- if ((rebuild_file= gzopen(data_file_name, "rb")) == NULL)
- DBUG_RETURN(errno ? errno : -1);
-
- if ((rc= read_data_header(rebuild_file)))
- goto error;
-
- /*
- We malloc up the buffer we will use for counting the rows.
- I know, this malloc'ing memory but this should be a very
- rare event.
- */
- if (!(buf= (byte*) my_malloc(table->s->rec_buff_length > sizeof(ulonglong) +1 ?
- table->s->rec_buff_length : sizeof(ulonglong) +1 ,
- MYF(MY_WME))))
- {
- rc= HA_ERR_CRASHED_ON_USAGE;
- goto error;
- }
-
- while (!(rc= get_row(rebuild_file, buf)))
- rows_recorded++;
-
- /*
- Only if we reach the end of the file do we assume we can rewrite.
- At this point we reset rc to a non-message state.
- */
- if (rc == HA_ERR_END_OF_FILE)
- {
- fn_format(data_file_name,share->table_name,"",ARM,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
- if ((meta_file= my_open(data_file_name, O_RDWR, MYF(0))) == -1)
- {
- rc= HA_ERR_CRASHED_ON_USAGE;
- goto error;
- }
- (void)write_meta_file(meta_file, rows_recorded, TRUE);
- my_close(meta_file,MYF(0));
- rc= 0;
- }
+ if (rc)
+ DBUG_RETURN(HA_ERR_CRASHED_ON_REPAIR);
- my_free((gptr) buf, MYF(0));
share->crashed= FALSE;
-error:
- gzclose(rebuild_file);
-
- DBUG_RETURN(rc);
+ DBUG_RETURN(0);
}
/*
@@ -925,8 +868,14 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
insert it into the new archive file.
*/
if (!rc)
+ {
+ share->rows_recorded= 0;
while (!(rc= get_row(archive, buf)))
+ {
real_write_row(buf, writer);
+ share->rows_recorded++;
+ }
+ }
my_free(buf, MYF(0));
if (rc && rc != HA_ERR_END_OF_FILE)