summaryrefslogtreecommitdiff
path: root/sql/examples
diff options
context:
space:
mode:
Diffstat (limited to 'sql/examples')
-rw-r--r--sql/examples/ha_archive.cc543
-rw-r--r--sql/examples/ha_archive.h66
-rw-r--r--sql/examples/ha_example.cc35
-rw-r--r--sql/examples/ha_example.h4
-rw-r--r--sql/examples/ha_tina.cc66
-rw-r--r--sql/examples/ha_tina.h14
6 files changed, 398 insertions, 330 deletions
diff --git a/sql/examples/ha_archive.cc b/sql/examples/ha_archive.cc
index b754c429dda..d5cf713aa44 100644
--- a/sql/examples/ha_archive.cc
+++ b/sql/examples/ha_archive.cc
@@ -22,6 +22,7 @@
#ifdef HAVE_ARCHIVE_DB
#include "ha_archive.h"
+#include <my_dir.h>
/*
First, if you want to understand storage engines you should look at
@@ -134,6 +135,28 @@ static HASH archive_open_tables;
#define DATA_BUFFER_SIZE 2 // Size of the data used in the data file
#define ARCHIVE_CHECK_HEADER 254 // The number we use to determine corruption
+/* dummy handlerton - only to have something to return from archive_db_init */
+static handlerton archive_hton = {
+ "archive",
+ 0, /* slot */
+ 0, /* savepoint size. */
+ NULL, /* close_connection */
+ NULL, /* savepoint */
+ NULL, /* rollback to savepoint */
+ NULL, /* releas savepoint */
+ NULL, /* commit */
+ NULL, /* rollback */
+ NULL, /* prepare */
+ NULL, /* recover */
+ NULL, /* commit_by_xid */
+ NULL, /* rollback_by_xid */
+ NULL, /* create_cursor_read_view */
+ NULL, /* set_cursor_read_view */
+ NULL, /* close_cursor_read_view */
+ HTON_NO_FLAGS
+};
+
+
/*
Used for hash table that tracks open tables.
*/
@@ -153,19 +176,20 @@ static byte* archive_get_key(ARCHIVE_SHARE *share,uint *length,
void
RETURN
- FALSE OK
- TRUE Error
+ &archive_hton OK
+ 0 Error
*/
-bool archive_db_init()
+handlerton *archive_db_init()
{
archive_inited= 1;
VOID(pthread_mutex_init(&archive_mutex, MY_MUTEX_INIT_FAST));
- return (hash_init(&archive_open_tables, system_charset_info, 32, 0, 0,
- (hash_get_key) archive_get_key, 0, 0));
+ if (hash_init(&archive_open_tables, system_charset_info, 32, 0, 0,
+ (hash_get_key) archive_get_key, 0, 0))
+ return 0;
+ return &archive_hton;
}
-
/*
Release the archive handler.
@@ -188,6 +212,15 @@ bool archive_db_end()
return FALSE;
}
+ha_archive::ha_archive(TABLE *table_arg)
+ :handler(&archive_hton, table_arg), delayed_insert(0), bulk_insert(0)
+{
+ /* Set our original buffer from pre-allocated memory */
+ buffer.set((char *)byte_buffer, IO_SIZE, system_charset_info);
+
+ /* The size of the offset value we will use for position() */
+ ref_length = sizeof(z_off_t);
+}
/*
This method reads the header of a datafile and returns whether or not it was successful.
@@ -239,7 +272,7 @@ error:
This method reads the header of a meta file and returns whether or not it was successful.
*rows will contain the current number of rows in the data file upon success.
*/
-int ha_archive::read_meta_file(File meta_file, ulonglong *rows)
+int ha_archive::read_meta_file(File meta_file, ha_rows *rows)
{
uchar meta_buffer[META_BUFFER_SIZE];
ulonglong check_point;
@@ -253,7 +286,7 @@ int ha_archive::read_meta_file(File meta_file, ulonglong *rows)
/*
Parse out the meta data, we ignore version at the moment
*/
- *rows= uint8korr(meta_buffer + 2);
+ *rows= (ha_rows)uint8korr(meta_buffer + 2);
check_point= uint8korr(meta_buffer + 10);
DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0]));
@@ -274,10 +307,9 @@ int ha_archive::read_meta_file(File meta_file, ulonglong *rows)
/*
This method writes out the header of a meta file and returns whether or not it was successful.
By setting dirty you say whether or not the file represents the actual state of the data file.
- Upon ::open() we set to dirty, and upon ::close() we set to clean. If we determine during
- a read that the file was dirty we will force a rebuild of this file.
+ Upon ::open() we set to dirty, and upon ::close() we set to clean.
*/
-int ha_archive::write_meta_file(File meta_file, ulonglong rows, bool dirty)
+int ha_archive::write_meta_file(File meta_file, ha_rows rows, bool dirty)
{
uchar meta_buffer[META_BUFFER_SIZE];
ulonglong check_point= 0; //Reserved for the future
@@ -286,12 +318,12 @@ int ha_archive::write_meta_file(File meta_file, ulonglong rows, bool dirty)
meta_buffer[0]= (uchar)ARCHIVE_CHECK_HEADER;
meta_buffer[1]= (uchar)ARCHIVE_VERSION;
- int8store(meta_buffer + 2, rows);
+ int8store(meta_buffer + 2, (ulonglong)rows);
int8store(meta_buffer + 10, check_point);
*(meta_buffer + 18)= (uchar)dirty;
DBUG_PRINT("ha_archive::write_meta_file", ("Check %d", (uint)ARCHIVE_CHECK_HEADER));
DBUG_PRINT("ha_archive::write_meta_file", ("Version %d", (uint)ARCHIVE_VERSION));
- DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu", rows));
+ DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu", (ulonglong)rows));
DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu", check_point));
DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty));
@@ -307,6 +339,9 @@ int ha_archive::write_meta_file(File meta_file, ulonglong rows, bool dirty)
/*
We create the shared memory space that we will use for the open table.
+ No matter what we try to get or create a share. This is so that a repair
+ table operation can occur.
+
See ha_example.cc for a longer description.
*/
ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table)
@@ -335,6 +370,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table)
share->use_count= 0;
share->table_name_length= length;
share->table_name= tmp_name;
+ share->crashed= FALSE;
fn_format(share->data_file_name,table_name,"",ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
fn_format(meta_file_name,table_name,"",ARM,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
strmov(share->table_name,table_name);
@@ -343,26 +379,17 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table)
*/
VOID(pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST));
if ((share->meta_file= my_open(meta_file_name, O_RDWR, MYF(0))) == -1)
- goto error;
+ share->crashed= TRUE;
- if (read_meta_file(share->meta_file, &share->rows_recorded))
- {
- /*
- The problem here is that for some reason, probably a crash, the meta
- file has been corrupted. So what do we do? Well we try to rebuild it
- ourself. Once that happens, we reread it, but if that fails we just
- call it quits and return an error.
- */
- if (rebuild_meta_file(share->table_name, share->meta_file))
- goto error;
- if (read_meta_file(share->meta_file, &share->rows_recorded))
- goto error;
- }
/*
After we read, we set the file to dirty. When we close, we will do the
- opposite.
+ opposite. If the meta file will not open we assume it is crashed and
+ leave it up to the user to fix.
*/
- (void)write_meta_file(share->meta_file, share->rows_recorded, TRUE);
+ if (read_meta_file(share->meta_file, &share->rows_recorded))
+ share->crashed= TRUE;
+ else
+ (void)write_meta_file(share->meta_file, share->rows_recorded, TRUE);
/*
It is expensive to open and close the data files and since you can't have
@@ -370,27 +397,14 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table)
that is shared amoung all open tables.
*/
if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL)
- goto error2;
- if (my_hash_insert(&archive_open_tables, (byte*) share))
- goto error3;
+ share->crashed= TRUE;
+ VOID(my_hash_insert(&archive_open_tables, (byte*) share));
thr_lock_init(&share->lock);
}
share->use_count++;
pthread_mutex_unlock(&archive_mutex);
return share;
-
-error3:
- /* We close, but ignore errors since we already have errors */
- (void)gzclose(share->archive_write);
-error2:
- my_close(share->meta_file,MYF(0));
-error:
- pthread_mutex_unlock(&archive_mutex);
- VOID(pthread_mutex_destroy(&share->mutex));
- my_free((gptr) share, MYF(0));
-
- return NULL;
}
@@ -420,11 +434,20 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
}
-/*
+/*
We just implement one additional file extension.
*/
+static const char *ha_archive_exts[] = {
+ ARZ,
+ ARN,
+ ARM,
+ NullS
+};
+
const char **ha_archive::bas_ext() const
-{ static const char *ext[]= { ARZ, ARN, ARM, NullS }; return ext; }
+{
+ return ha_archive_exts;
+}
/*
@@ -438,13 +461,14 @@ int ha_archive::open(const char *name, int mode, uint test_if_locked)
DBUG_ENTER("ha_archive::open");
if (!(share= get_share(name, table)))
- DBUG_RETURN(1);
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM); // Not handled well by calling code!
thr_lock_data_init(&share->lock,&lock,NULL);
if ((archive= gzopen(share->data_file_name, "rb")) == NULL)
{
- (void)free_share(share); //We void since we already have an error
- DBUG_RETURN(errno ? errno : -1);
+ if (errno == EROFS || errno == EACCES)
+ DBUG_RETURN(my_errno= errno);
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
}
DBUG_RETURN(0);
@@ -552,6 +576,44 @@ error:
DBUG_RETURN(error ? error : -1);
}
+/*
+ This is where the actual row is written out.
+*/
+int ha_archive::real_write_row(byte *buf, gzFile writer)
+{
+ z_off_t written;
+ uint *ptr, *end;
+ DBUG_ENTER("ha_archive::real_write_row");
+
+ written= gzwrite(writer, buf, table->s->reclength);
+ DBUG_PRINT("ha_archive::real_write_row", ("Wrote %d bytes expected %d", written, table->s->reclength));
+ if (!delayed_insert || !bulk_insert)
+ share->dirty= TRUE;
+
+ if (written != (z_off_t)table->s->reclength)
+ DBUG_RETURN(errno ? errno : -1);
+ /*
+ We should probably mark the table as damagaged if the record is written
+ but the blob fails.
+ */
+ for (ptr= table->s->blob_field, end= ptr + table->s->blob_fields ;
+ ptr != end ;
+ ptr++)
+ {
+ char *data_ptr;
+ uint32 size= ((Field_blob*) table->field[*ptr])->get_length();
+
+ if (size)
+ {
+ ((Field_blob*) table->field[*ptr])->get_ptr(&data_ptr);
+ written= gzwrite(writer, data_ptr, (unsigned)size);
+ if (written != (z_off_t)size)
+ DBUG_RETURN(errno ? errno : -1);
+ }
+ }
+ DBUG_RETURN(0);
+}
+
/*
Look at ha_archive::open() for an explanation of the row format.
@@ -562,48 +624,25 @@ error:
for implementing start_bulk_insert() is that we could skip
setting dirty to true each time.
*/
-int ha_archive::write_row(byte * buf)
+int ha_archive::write_row(byte *buf)
{
- z_off_t written;
- Field_blob **field;
+ int rc;
DBUG_ENTER("ha_archive::write_row");
- statistic_increment(ha_write_count,&LOCK_status);
+ if (share->crashed)
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
+
+ statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time();
pthread_mutex_lock(&share->mutex);
- written= gzwrite(share->archive_write, buf, table->reclength);
- DBUG_PRINT("ha_archive::get_row", ("Wrote %d bytes expected %d", written, table->reclength));
- share->dirty= TRUE;
- if (written != (z_off_t)table->reclength)
- goto error;
- /*
- We should probably mark the table as damagaged if the record is written
- but the blob fails.
- */
- for (field= table->blob_field ; *field ; field++)
- {
- char *ptr;
- uint32 size= (*field)->get_length();
-
- if (size)
- {
- (*field)->get_ptr(&ptr);
- written= gzwrite(share->archive_write, ptr, (unsigned)size);
- if (written != (z_off_t)size)
- goto error;
- }
- }
share->rows_recorded++;
+ rc= real_write_row(buf, share->archive_write);
pthread_mutex_unlock(&share->mutex);
- DBUG_RETURN(0);
-error:
- pthread_mutex_unlock(&share->mutex);
- DBUG_RETURN(errno ? errno : -1);
+ DBUG_RETURN(rc);
}
-
/*
All calls that need to scan the table start with this method. If we are told
that it is a table scan we rewind the file to the beginning, otherwise
@@ -613,6 +652,9 @@ error:
int ha_archive::rnd_init(bool scan)
{
DBUG_ENTER("ha_archive::rnd_init");
+
+ if (share->crashed)
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
/* We rewind the file so that we can read from the beginning if scan */
if (scan)
@@ -650,13 +692,13 @@ int ha_archive::rnd_init(bool scan)
int ha_archive::get_row(gzFile file_to_read, byte *buf)
{
int read; // Bytes read, gzread() returns int
+ uint *ptr, *end;
char *last;
size_t total_blob_length= 0;
- Field_blob **field;
DBUG_ENTER("ha_archive::get_row");
- read= gzread(file_to_read, buf, table->reclength);
- DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %d", read, table->reclength));
+ read= gzread(file_to_read, buf, table->s->reclength);
+ DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %d", read, table->s->reclength));
if (read == Z_STREAM_ERROR)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
@@ -665,28 +707,35 @@ int ha_archive::get_row(gzFile file_to_read, byte *buf)
if (read == 0)
DBUG_RETURN(HA_ERR_END_OF_FILE);
- /* If the record is the wrong size, the file is probably damaged */
- if ((ulong) read != table->reclength)
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
+ /*
+ If the record is the wrong size, the file is probably damaged, unless
+ we are dealing with a delayed insert or a bulk insert.
+ */
+ if ((ulong) read != table->s->reclength)
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
/* Calculate blob length, we use this for our buffer */
- for (field=table->blob_field; *field ; field++)
- total_blob_length += (*field)->get_length();
+ for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ;
+ ptr != end ;
+ ptr++)
+ total_blob_length += ((Field_blob*) table->field[*ptr])->get_length();
/* Adjust our row buffer if we need be */
buffer.alloc(total_blob_length);
last= (char *)buffer.ptr();
/* Loop through our blobs and read them */
- for (field=table->blob_field; *field ; field++)
+ for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ;
+ ptr != end ;
+ ptr++)
{
- size_t size= (*field)->get_length();
+ size_t size= ((Field_blob*) table->field[*ptr])->get_length();
if (size)
{
read= gzread(file_to_read, last, size);
if ((size_t) read != size)
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
- (*field)->set_ptr(size, last);
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+ ((Field_blob*) table->field[*ptr])->set_ptr(size, last);
last += size;
}
}
@@ -704,11 +753,15 @@ int ha_archive::rnd_next(byte *buf)
int rc;
DBUG_ENTER("ha_archive::rnd_next");
+ if (share->crashed)
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
+
if (!scan_rows)
DBUG_RETURN(HA_ERR_END_OF_FILE);
scan_rows--;
- statistic_increment(ha_read_rnd_next_count,&LOCK_status);
+ statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
+ &LOCK_status);
current_position= gztell(archive);
rc= get_row(archive, buf);
@@ -720,7 +773,7 @@ int ha_archive::rnd_next(byte *buf)
}
-/*
+/*
Thanks to the table flag HA_REC_NOT_IN_SEQ this will be called after
each call to ha_archive::rnd_next() if an ordering of the rows is
needed.
@@ -729,7 +782,7 @@ int ha_archive::rnd_next(byte *buf)
void ha_archive::position(const byte *record)
{
DBUG_ENTER("ha_archive::position");
- ha_store_ptr(ref, ref_length, current_position);
+ my_store_ptr(ref, ref_length, current_position);
DBUG_VOID_RETURN;
}
@@ -744,68 +797,30 @@ void ha_archive::position(const byte *record)
int ha_archive::rnd_pos(byte * buf, byte *pos)
{
DBUG_ENTER("ha_archive::rnd_pos");
- statistic_increment(ha_read_rnd_count,&LOCK_status);
- current_position= ha_get_ptr(pos, ref_length);
+ statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
+ &LOCK_status);
+ current_position= (z_off_t)my_get_ptr(pos, ref_length);
(void)gzseek(archive, current_position, SEEK_SET);
DBUG_RETURN(get_row(archive, buf));
}
/*
- This method rebuilds the meta file. It does this by walking the datafile and
- rewriting the meta file.
+ This method repairs the meta file. It does this by walking the datafile and
+ rewriting the meta file. Currently it does this by calling optimize with
+ the extended flag.
*/
-int ha_archive::rebuild_meta_file(char *table_name, File meta_file)
+int ha_archive::repair(THD* thd, HA_CHECK_OPT* check_opt)
{
- int rc;
- byte *buf;
- ulonglong rows_recorded= 0;
- gzFile rebuild_file; /* Archive file we are working with */
- char data_file_name[FN_REFLEN];
- DBUG_ENTER("ha_archive::rebuild_meta_file");
+ DBUG_ENTER("ha_archive::repair");
+ check_opt->flags= T_EXTEND;
+ int rc= optimize(thd, check_opt);
- /*
- Open up the meta file to recreate it.
- */
- fn_format(data_file_name, table_name, "", ARZ,
- MY_REPLACE_EXT|MY_UNPACK_FILENAME);
- if ((rebuild_file= gzopen(data_file_name, "rb")) == NULL)
- DBUG_RETURN(errno ? errno : -1);
+ if (rc)
+ DBUG_RETURN(HA_ERR_CRASHED_ON_REPAIR);
- if ((rc= read_data_header(rebuild_file)))
- goto error;
-
- /*
- We malloc up the buffer we will use for counting the rows.
- I know, this malloc'ing memory but this should be a very
- rare event.
- */
- if (!(buf= (byte*) my_malloc(table->rec_buff_length > sizeof(ulonglong) +1 ?
- table->rec_buff_length : sizeof(ulonglong) +1 ,
- MYF(MY_WME))))
- {
- rc= HA_ERR_CRASHED_ON_USAGE;
- goto error;
- }
-
- while (!(rc= get_row(rebuild_file, buf)))
- rows_recorded++;
-
- /*
- Only if we reach the end of the file do we assume we can rewrite.
- At this point we reset rc to a non-message state.
- */
- if (rc == HA_ERR_END_OF_FILE)
- {
- (void)write_meta_file(meta_file, rows_recorded, FALSE);
- rc= 0;
- }
-
- my_free((gptr) buf, MYF(0));
-error:
- gzclose(rebuild_file);
-
- DBUG_RETURN(rc);
+ share->crashed= FALSE;
+ DBUG_RETURN(0);
}
/*
@@ -815,56 +830,99 @@ error:
int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
{
DBUG_ENTER("ha_archive::optimize");
- int read; // Bytes read, gzread() returns int
- gzFile reader, writer;
- char block[IO_SIZE];
+ int rc;
+ gzFile writer;
char writer_filename[FN_REFLEN];
+ /* Flush any waiting data */
+ gzflush(share->archive_write, Z_SYNC_FLUSH);
+
/* Lets create a file to contain the new data */
fn_format(writer_filename, share->table_name, "", ARN,
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
- /* Closing will cause all data waiting to be flushed, to be flushed */
- gzclose(share->archive_write);
+ if ((writer= gzopen(writer_filename, "wb")) == NULL)
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
- if ((reader= gzopen(share->data_file_name, "rb")) == NULL)
- DBUG_RETURN(-1);
+ /*
+ An extended rebuild is a lot more effort. We open up each row and re-record it.
+ Any dead rows are removed (aka rows that may have been partially recorded).
+ */
- if ((writer= gzopen(writer_filename, "wb")) == NULL)
+ if (check_opt->flags == T_EXTEND)
{
- gzclose(reader);
- DBUG_RETURN(-1);
- }
+ byte *buf;
- while ((read= gzread(reader, block, IO_SIZE)))
- gzwrite(writer, block, read);
+ /*
+ First we create a buffer that we can use for reading rows, and can pass
+ to get_row().
+ */
+ if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME))))
+ {
+ rc= HA_ERR_OUT_OF_MEM;
+ goto error;
+ }
- gzclose(reader);
- gzclose(writer);
+ /*
+ Now we will rewind the archive file so that we are positioned at the
+ start of the file.
+ */
+ rc= read_data_header(archive);
+
+ /*
+ Assuming now error from rewinding the archive file, we now write out the
+ new header for out data file.
+ */
+ if (!rc)
+ rc= write_data_header(writer);
- my_rename(writer_filename,share->data_file_name,MYF(0));
+ /*
+ On success of writing out the new header, we now fetch each row and
+ insert it into the new archive file.
+ */
+ if (!rc)
+ {
+ share->rows_recorded= 0;
+ while (!(rc= get_row(archive, buf)))
+ {
+ real_write_row(buf, writer);
+ share->rows_recorded++;
+ }
+ }
- /*
- We reopen the file in case some IO is waiting to go through.
- In theory the table is closed right after this operation,
- but it is possible for IO to still happen.
- I may be being a bit too paranoid right here.
- */
- if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL)
- DBUG_RETURN(errno ? errno : -1);
- share->dirty= FALSE;
+ my_free((char*)buf, MYF(0));
+ if (rc && rc != HA_ERR_END_OF_FILE)
+ goto error;
+ }
+ else
+ {
+ /*
+ The quick method is to just read the data raw, and then compress it directly.
+ */
+ int read; // Bytes read, gzread() returns int
+ char block[IO_SIZE];
+ if (gzrewind(archive) == -1)
+ {
+ rc= HA_ERR_CRASHED_ON_USAGE;
+ goto error;
+ }
+
+ while ((read= gzread(archive, block, IO_SIZE)))
+ gzwrite(writer, block, read);
+ }
+
+ gzflush(writer, Z_SYNC_FLUSH);
+ gzclose(share->archive_write);
+ share->archive_write= writer;
+
+ my_rename(writer_filename,share->data_file_name,MYF(0));
DBUG_RETURN(0);
-}
+error:
+ gzclose(writer);
-/*
- No transactions yet, so this is pretty dull.
-*/
-int ha_archive::external_lock(THD *thd, int lock_type)
-{
- DBUG_ENTER("ha_archive::external_lock");
- DBUG_RETURN(0);
+ DBUG_RETURN(rc);
}
/*
@@ -874,6 +932,11 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd,
THR_LOCK_DATA **to,
enum thr_lock_type lock_type)
{
+ if (lock_type == TL_WRITE_DELAYED)
+ delayed_insert= TRUE;
+ else
+ delayed_insert= FALSE;
+
if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
{
/*
@@ -908,98 +971,62 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd,
}
-/******************************************************************************
-
- Everything below here is default, please look at ha_example.cc for
- descriptions.
-
- ******************************************************************************/
-
-int ha_archive::update_row(const byte * old_data, byte * new_data)
-{
-
- DBUG_ENTER("ha_archive::update_row");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-int ha_archive::delete_row(const byte * buf)
-{
- DBUG_ENTER("ha_archive::delete_row");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-int ha_archive::index_read(byte * buf, const byte * key,
- uint key_len __attribute__((unused)),
- enum ha_rkey_function find_flag
- __attribute__((unused)))
-{
- DBUG_ENTER("ha_archive::index_read");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-int ha_archive::index_read_idx(byte * buf, uint index, const byte * key,
- uint key_len __attribute__((unused)),
- enum ha_rkey_function find_flag
- __attribute__((unused)))
-{
- DBUG_ENTER("ha_archive::index_read_idx");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-
-int ha_archive::index_next(byte * buf)
-{
- DBUG_ENTER("ha_archive::index_next");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-int ha_archive::index_prev(byte * buf)
-{
- DBUG_ENTER("ha_archive::index_prev");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-int ha_archive::index_first(byte * buf)
-{
- DBUG_ENTER("ha_archive::index_first");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-int ha_archive::index_last(byte * buf)
-{
- DBUG_ENTER("ha_archive::index_last");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-
+/*
+ Hints for optimizer, see ha_tina for more information
+*/
void ha_archive::info(uint flag)
{
DBUG_ENTER("ha_archive::info");
-
- /* This is a lie, but you don't want the optimizer to see zero or 1 */
+ /*
+ This should be an accurate number now, though bulk and delayed inserts can
+ cause the number to be inaccurate.
+ */
records= share->rows_recorded;
deleted= 0;
+ /* Costs quite a bit more to get all information */
+ if (flag & HA_STATUS_TIME)
+ {
+ MY_STAT file_stat; // Stat information for the data file
+
+ VOID(my_stat(share->data_file_name, &file_stat, MYF(MY_WME)));
+
+ mean_rec_length= table->s->reclength + buffer.alloced_length();
+ data_file_length= file_stat.st_size;
+ create_time= file_stat.st_ctime;
+ update_time= file_stat.st_mtime;
+ max_data_file_length= share->rows_recorded * mean_rec_length;
+ }
+ delete_length= 0;
+ index_file_length=0;
DBUG_VOID_RETURN;
}
-int ha_archive::extra(enum ha_extra_function operation)
-{
- DBUG_ENTER("ha_archive::extra");
- DBUG_RETURN(0);
-}
-int ha_archive::reset(void)
+/*
+ This method tells us that a bulk insert operation is about to occur. We set
+ a flag which will keep write_row from saying that its data is dirty. This in
+ turn will keep selects from causing a sync to occur.
+ Basically, yet another optimizations to keep compression working well.
+*/
+void ha_archive::start_bulk_insert(ha_rows rows)
{
- DBUG_ENTER("ha_archive::reset");
- DBUG_RETURN(0);
+ DBUG_ENTER("ha_archive::start_bulk_insert");
+ bulk_insert= TRUE;
+ DBUG_VOID_RETURN;
}
-ha_rows ha_archive::records_in_range(uint inx, key_range *min_key,
- key_range *max_key)
+
+/*
+ Other side of start_bulk_insert, is end_bulk_insert. Here we turn off the bulk insert
+ flag, and set the share dirty so that the next select will call sync for us.
+*/
+int ha_archive::end_bulk_insert()
{
- DBUG_ENTER("ha_archive::records_in_range ");
- DBUG_RETURN(records); // HA_ERR_WRONG_COMMAND
+ DBUG_ENTER("ha_archive::end_bulk_insert");
+ bulk_insert= FALSE;
+ share->dirty= TRUE;
+ DBUG_RETURN(0);
}
/*
diff --git a/sql/examples/ha_archive.h b/sql/examples/ha_archive.h
index 6ceb660e951..e2d8aa49add 100644
--- a/sql/examples/ha_archive.h
+++ b/sql/examples/ha_archive.h
@@ -32,10 +32,11 @@ typedef struct st_archive_share {
uint table_name_length,use_count;
pthread_mutex_t mutex;
THR_LOCK lock;
- File meta_file; /* Meta file we use */
- gzFile archive_write; /* Archive file we are working with */
- bool dirty; /* Flag for if a flush should occur */
- ulonglong rows_recorded; /* Number of rows in tables */
+ File meta_file; /* Meta file we use */
+ gzFile archive_write; /* Archive file we are working with */
+ bool dirty; /* Flag for if a flush should occur */
+ bool crashed; /* Meta file is crashed */
+ ha_rows rows_recorded; /* Number of rows in tables */
} ARCHIVE_SHARE;
/*
@@ -52,17 +53,12 @@ class ha_archive: public handler
z_off_t current_position; /* The position of the row we just read */
byte byte_buffer[IO_SIZE]; /* Initial buffer for our string */
String buffer; /* Buffer used for blob storage */
- ulonglong scan_rows; /* Number of rows left in scan */
+ ha_rows scan_rows; /* Number of rows left in scan */
+ bool delayed_insert; /* If the insert is delayed */
+ bool bulk_insert; /* If we are performing a bulk insert */
public:
- ha_archive(TABLE *table): handler(table)
- {
- /* Set our original buffer from pre-allocated memory */
- buffer.set((char*)byte_buffer, IO_SIZE, system_charset_info);
-
- /* The size of the offset value we will use for position() */
- ref_length = sizeof(z_off_t);
- }
+ ha_archive(TABLE *table_arg);
~ha_archive()
{
}
@@ -72,61 +68,43 @@ public:
ulong table_flags() const
{
return (HA_REC_NOT_IN_SEQ | HA_NOT_EXACT_COUNT | HA_NO_AUTO_INCREMENT |
- HA_FILE_BASED);
+ HA_FILE_BASED | HA_CAN_INSERT_DELAYED);
}
ulong index_flags(uint idx, uint part, bool all_parts) const
{
return 0;
}
- /*
- Have to put something here, there is no real limit as far as
- archive is concerned.
- */
- uint max_supported_record_length() const { return UINT_MAX; }
- /*
- Called in test_quick_select to determine if indexes should be used.
- */
- virtual double scan_time() { return (double) (records) / 20.0+10; }
- /* The next method will never be called */
- virtual double read_time(uint index, uint ranges, ha_rows rows)
- { return (double) rows / 20.0+1; }
int open(const char *name, int mode, uint test_if_locked);
int close(void);
int write_row(byte * buf);
- int update_row(const byte * old_data, byte * new_data);
- int delete_row(const byte * buf);
+ int real_write_row(byte *buf, gzFile writer);
int delete_all_rows();
- int index_read(byte * buf, const byte * key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_read_idx(byte * buf, uint idx, const byte * key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_next(byte * buf);
- int index_prev(byte * buf);
- int index_first(byte * buf);
- int index_last(byte * buf);
int rnd_init(bool scan=1);
int rnd_next(byte *buf);
int rnd_pos(byte * buf, byte *pos);
int get_row(gzFile file_to_read, byte *buf);
- int read_meta_file(File meta_file, ulonglong *rows);
- int write_meta_file(File meta_file, ulonglong rows, bool dirty);
+ int read_meta_file(File meta_file, ha_rows *rows);
+ int write_meta_file(File meta_file, ha_rows rows, bool dirty);
ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table);
int free_share(ARCHIVE_SHARE *share);
- int rebuild_meta_file(char *table_name, File meta_file);
+ bool auto_repair() const { return 1; } // For the moment we just do this
int read_data_header(gzFile file_to_read);
int write_data_header(gzFile file_to_write);
void position(const byte *record);
void info(uint);
- int extra(enum ha_extra_function operation);
- int reset(void);
- int external_lock(THD *thd, int lock_type);
- ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
int optimize(THD* thd, HA_CHECK_OPT* check_opt);
+ int repair(THD* thd, HA_CHECK_OPT* check_opt);
+ void start_bulk_insert(ha_rows rows);
+ int end_bulk_insert();
+ enum row_type get_row_type() const
+ {
+ return ROW_TYPE_COMPRESSED;
+ }
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
enum thr_lock_type lock_type);
};
-bool archive_db_init(void);
+handlerton *archive_db_init(void);
bool archive_db_end(void);
diff --git a/sql/examples/ha_example.cc b/sql/examples/ha_example.cc
index b3edce5ba4a..dfc2fa7a260 100644
--- a/sql/examples/ha_example.cc
+++ b/sql/examples/ha_example.cc
@@ -72,6 +72,27 @@
#ifdef HAVE_EXAMPLE_DB
#include "ha_example.h"
+
+static handlerton example_hton= {
+ "CSV",
+ 0, /* slot */
+ 0, /* savepoint size. */
+ NULL, /* close_connection */
+ NULL, /* savepoint */
+ NULL, /* rollback to savepoint */
+ NULL, /* release savepoint */
+ NULL, /* commit */
+ NULL, /* rollback */
+ NULL, /* prepare */
+ NULL, /* recover */
+ NULL, /* commit_by_xid */
+ NULL, /* rollback_by_xid */
+ NULL, /* create_cursor_read_view */
+ NULL, /* set_cursor_read_view */
+ NULL, /* close_cursor_read_view */
+ HTON_NO_FLAGS
+};
+
/* Variables for example share methods */
static HASH example_open_tables; // Hash used to track open tables
pthread_mutex_t example_mutex; // This is the mutex we use to init the hash
@@ -179,13 +200,23 @@ static int free_share(EXAMPLE_SHARE *share)
}
+ha_example::ha_example(TABLE *table_arg)
+ :handler(&example_hton, table_arg)
+{}
+
/*
If frm_error() is called then we will use this to to find out what file extentions
exist for the storage engine. This is also used by the default rename_table and
delete_table method in handler.cc.
*/
+static const char *ha_example_exts[] = {
+ NullS
+};
+
const char **ha_example::bas_ext() const
-{ static const char *ext[]= { NullS }; return ext; }
+{
+ return ha_example_exts;
+}
/*
@@ -412,7 +443,7 @@ int ha_example::rnd_next(byte *buf)
position() is called after each call to rnd_next() if the data needs
to be ordered. You can do something like the following to store
the position:
- ha_store_ptr(ref, ref_length, current_position);
+ my_store_ptr(ref, ref_length, current_position);
The server uses ref to store data. ref_length in the above case is
the size needed to store current_position. ref is just a byte array
diff --git a/sql/examples/ha_example.h b/sql/examples/ha_example.h
index ae72e5bb275..37f38fe5210 100644
--- a/sql/examples/ha_example.h
+++ b/sql/examples/ha_example.h
@@ -45,9 +45,7 @@ class ha_example: public handler
EXAMPLE_SHARE *share; /* Shared lock info */
public:
- ha_example(TABLE *table): handler(table)
- {
- }
+ ha_example(TABLE *table_arg);
~ha_example()
{
}
diff --git a/sql/examples/ha_tina.cc b/sql/examples/ha_tina.cc
index bbcdfb0dafb..74ff3457cd2 100644
--- a/sql/examples/ha_tina.cc
+++ b/sql/examples/ha_tina.cc
@@ -54,6 +54,26 @@ pthread_mutex_t tina_mutex;
static HASH tina_open_tables;
static int tina_init= 0;
+static handlerton tina_hton= {
+ "CSV",
+ 0, /* slot */
+ 0, /* savepoint size. */
+ NULL, /* close_connection */
+ NULL, /* savepoint */
+ NULL, /* rollback to savepoint */
+ NULL, /* release savepoint */
+ NULL, /* commit */
+ NULL, /* rollback */
+ NULL, /* prepare */
+ NULL, /* recover */
+ NULL, /* commit_by_xid */
+ NULL, /* rollback_by_xid */
+ NULL, /* create_cursor_read_view */
+ NULL, /* set_cursor_read_view */
+ NULL, /* close_cursor_read_view */
+ HTON_NO_FLAGS
+};
+
/*****************************************************************************
** TINA tables
*****************************************************************************/
@@ -228,6 +248,20 @@ byte * find_eoln(byte *data, off_t begin, off_t end)
return 0;
}
+
+ha_tina::ha_tina(TABLE *table_arg)
+ :handler(&tina_hton, table_arg),
+ /*
+ These definitions are found in hanler.h
+ These are not probably completely right.
+ */
+ current_position(0), next_position(0), chain_alloced(0), chain_size(DEFAULT_CHAIN_LENGTH)
+{
+ /* Set our original buffers from pre-allocated memory */
+ buffer.set(byte_buffer, IO_SIZE, system_charset_info);
+ chain= chain_buffer;
+}
+
/*
Encode a buffer into the quoted format.
*/
@@ -374,7 +408,7 @@ int ha_tina::find_current_row(byte *buf)
}
next_position= (end_ptr - share->mapped_file)+1;
/* Maybe use \N for null? */
- memset(buf, 0, table->null_bytes); /* We do not implement nulls! */
+ memset(buf, 0, table->s->null_bytes); /* We do not implement nulls! */
DBUG_RETURN(0);
}
@@ -383,8 +417,15 @@ int ha_tina::find_current_row(byte *buf)
If frm_error() is called in table.cc this is called to find out what file
extensions exist for this handler.
*/
+static const char *ha_tina_exts[] = {
+ ".CSV",
+ NullS
+};
+
const char **ha_tina::bas_ext() const
-{ static const char *ext[]= { ".CSV", NullS }; return ext; }
+{
+ return ha_tina_exts;
+}
/*
@@ -425,7 +466,7 @@ int ha_tina::write_row(byte * buf)
int size;
DBUG_ENTER("ha_tina::write_row");
- statistic_increment(ha_write_count,&LOCK_status);
+ statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time();
@@ -461,7 +502,8 @@ int ha_tina::update_row(const byte * old_data, byte * new_data)
int size;
DBUG_ENTER("ha_tina::update_row");
- statistic_increment(ha_update_count,&LOCK_status);
+ statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
+ &LOCK_status);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
table->timestamp_field->set_time();
@@ -488,7 +530,7 @@ int ha_tina::update_row(const byte * old_data, byte * new_data)
int ha_tina::delete_row(const byte * buf)
{
DBUG_ENTER("ha_tina::delete_row");
- statistic_increment(ha_delete_count,&LOCK_status);
+ statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status);
if (chain_append())
DBUG_RETURN(-1);
@@ -630,7 +672,8 @@ int ha_tina::rnd_next(byte *buf)
{
DBUG_ENTER("ha_tina::rnd_next");
- statistic_increment(ha_read_rnd_next_count,&LOCK_status);
+ statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
+ &LOCK_status);
current_position= next_position;
if (!share->mapped_file)
@@ -646,7 +689,7 @@ int ha_tina::rnd_next(byte *buf)
In the case of an order by rows will need to be sorted.
::position() is called after each call to ::rnd_next(),
the data it stores is to a byte array. You can store this
- data via ha_store_ptr(). ref_length is a variable defined to the
+ data via my_store_ptr(). ref_length is a variable defined to the
class that is the sizeof() of position being stored. In our case
its just a position. Look at the bdb code if you want to see a case
where something other then a number is stored.
@@ -654,21 +697,22 @@ int ha_tina::rnd_next(byte *buf)
void ha_tina::position(const byte *record)
{
DBUG_ENTER("ha_tina::position");
- ha_store_ptr(ref, ref_length, current_position);
+ my_store_ptr(ref, ref_length, current_position);
DBUG_VOID_RETURN;
}
/*
Used to fetch a row from a posiion stored with ::position().
- ha_get_ptr() retrieves the data for you.
+ my_get_ptr() retrieves the data for you.
*/
int ha_tina::rnd_pos(byte * buf, byte *pos)
{
DBUG_ENTER("ha_tina::rnd_pos");
- statistic_increment(ha_read_rnd_count,&LOCK_status);
- current_position= ha_get_ptr(pos,ref_length);
+ statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
+ &LOCK_status);
+ current_position= my_get_ptr(pos,ref_length);
DBUG_RETURN(find_current_row(buf));
}
diff --git a/sql/examples/ha_tina.h b/sql/examples/ha_tina.h
index 22193c01013..5679d77a4dc 100644
--- a/sql/examples/ha_tina.h
+++ b/sql/examples/ha_tina.h
@@ -49,18 +49,8 @@ class ha_tina: public handler
byte chain_alloced;
uint32 chain_size;
- public:
- ha_tina(TABLE *table): handler(table),
- /*
- These definitions are found in hanler.h
- Theses are not probably completely right.
- */
- current_position(0), next_position(0), chain_alloced(0), chain_size(DEFAULT_CHAIN_LENGTH)
- {
- /* Set our original buffers from pre-allocated memory */
- buffer.set(byte_buffer, IO_SIZE, system_charset_info);
- chain = chain_buffer;
- }
+public:
+ ha_tina(TABLE *table_arg);
~ha_tina()
{
if (chain_alloced)