summaryrefslogtreecommitdiff
path: root/sql/examples
diff options
context:
space:
mode:
Diffstat (limited to 'sql/examples')
-rw-r--r--sql/examples/ha_archive.cc328
-rw-r--r--sql/examples/ha_archive.h50
-rw-r--r--sql/examples/ha_example.cc10
-rw-r--r--sql/examples/ha_tina.cc32
4 files changed, 209 insertions, 211 deletions
diff --git a/sql/examples/ha_archive.cc b/sql/examples/ha_archive.cc
index e58996d3c01..4f0cfb91d20 100644
--- a/sql/examples/ha_archive.cc
+++ b/sql/examples/ha_archive.cc
@@ -20,10 +20,11 @@
#pragma implementation // gcc: Class implementation
#endif
-#include <mysql_priv.h>
+#include "../mysql_priv.h"
#ifdef HAVE_ARCHIVE_DB
#include "ha_archive.h"
+#include <my_dir.h>
/*
First, if you want to understand storage engines you should look at
@@ -136,6 +137,24 @@ static HASH archive_open_tables;
#define DATA_BUFFER_SIZE 2 // Size of the data used in the data file
#define ARCHIVE_CHECK_HEADER 254 // The number we use to determine corruption
+/* dummy handlerton - only to have something to return from archive_db_init */
+static handlerton archive_hton = {
+ "archive",
+ 0, /* slot */
+ 0, /* savepoint size. */
+ 0, /* close_connection */
+ 0, /* savepoint */
+ 0, /* rollback to savepoint */
+ 0, /* releas savepoint */
+ 0, /* commit */
+ 0, /* rollback */
+ 0, /* prepare */
+ 0, /* recover */
+ 0, /* commit_by_xid */
+ 0 /* rollback_by_xid */
+};
+
+
/*
Used for hash table that tracks open tables.
*/
@@ -155,19 +174,20 @@ static byte* archive_get_key(ARCHIVE_SHARE *share,uint *length,
void
RETURN
- FALSE OK
- TRUE Error
+ &archive_hton OK
+ 0 Error
*/
-bool archive_db_init()
+handlerton *archive_db_init()
{
archive_inited= 1;
VOID(pthread_mutex_init(&archive_mutex, MY_MUTEX_INIT_FAST));
- return (hash_init(&archive_open_tables, system_charset_info, 32, 0, 0,
- (hash_get_key) archive_get_key, 0, 0));
+ if (hash_init(&archive_open_tables, system_charset_info, 32, 0, 0,
+ (hash_get_key) archive_get_key, 0, 0))
+ return 0;
+ return &archive_hton;
}
-
/*
Release the archive handler.
@@ -276,8 +296,7 @@ int ha_archive::read_meta_file(File meta_file, ulonglong *rows)
/*
This method writes out the header of a meta file and returns whether or not it was successful.
By setting dirty you say whether or not the file represents the actual state of the data file.
- Upon ::open() we set to dirty, and upon ::close() we set to clean. If we determine during
- a read that the file was dirty we will force a rebuild of this file.
+ Upon ::open() we set to dirty, and upon ::close() we set to clean.
*/
int ha_archive::write_meta_file(File meta_file, ulonglong rows, bool dirty)
{
@@ -337,6 +356,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table)
share->use_count= 0;
share->table_name_length= length;
share->table_name= tmp_name;
+ share->crashed= FALSE;
fn_format(share->data_file_name,table_name,"",ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
fn_format(meta_file_name,table_name,"",ARM,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
strmov(share->table_name,table_name);
@@ -347,24 +367,15 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table)
if ((share->meta_file= my_open(meta_file_name, O_RDWR, MYF(0))) == -1)
goto error;
- if (read_meta_file(share->meta_file, &share->rows_recorded))
- {
- /*
- The problem here is that for some reason, probably a crash, the meta
- file has been corrupted. So what do we do? Well we try to rebuild it
- ourself. Once that happens, we reread it, but if that fails we just
- call it quits and return an error.
- */
- if (rebuild_meta_file(share->table_name, share->meta_file))
- goto error;
- if (read_meta_file(share->meta_file, &share->rows_recorded))
- goto error;
- }
/*
After we read, we set the file to dirty. When we close, we will do the
- opposite.
+ opposite. If the meta file will not open we assume it is crashed and
+ leave it up to the user to fix.
*/
- (void)write_meta_file(share->meta_file, share->rows_recorded, TRUE);
+ if (read_meta_file(share->meta_file, &share->rows_recorded))
+ share->crashed= TRUE;
+ else
+ (void)write_meta_file(share->meta_file, share->rows_recorded, TRUE);
/*
It is expensive to open and close the data files and since you can't have
@@ -422,11 +433,20 @@ int ha_archive::free_share(ARCHIVE_SHARE *share)
}
-/*
+/*
We just implement one additional file extension.
*/
+static const char *ha_archive_exts[] = {
+ ARZ,
+ ARN,
+ ARM,
+ NullS
+};
+
const char **ha_archive::bas_ext() const
-{ static const char *ext[]= { ARZ, ARN, ARM, NullS }; return ext; }
+{
+ return ha_archive_exts;
+}
/*
@@ -440,7 +460,7 @@ int ha_archive::open(const char *name, int mode, uint test_if_locked)
DBUG_ENTER("ha_archive::open");
if (!(share= get_share(name, table)))
- DBUG_RETURN(1);
+ DBUG_RETURN(-1);
thr_lock_data_init(&share->lock,&lock,NULL);
if ((archive= gzopen(share->data_file_name, "rb")) == NULL)
@@ -567,31 +587,38 @@ error:
int ha_archive::write_row(byte * buf)
{
z_off_t written;
- Field_blob **field;
+ uint *ptr, *end;
DBUG_ENTER("ha_archive::write_row");
- statistic_increment(ha_write_count,&LOCK_status);
+ if (share->crashed)
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
+
+ statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time();
pthread_mutex_lock(&share->mutex);
- written= gzwrite(share->archive_write, buf, table->reclength);
- DBUG_PRINT("ha_archive::get_row", ("Wrote %d bytes expected %d", written, table->reclength));
- share->dirty= TRUE;
- if (written != (z_off_t)table->reclength)
+ written= gzwrite(share->archive_write, buf, table->s->reclength);
+ DBUG_PRINT("ha_archive::write_row", ("Wrote %d bytes expected %d", written, table->s->reclength));
+ if (!delayed_insert || !bulk_insert)
+ share->dirty= TRUE;
+
+ if (written != (z_off_t)table->s->reclength)
goto error;
/*
We should probably mark the table as damagaged if the record is written
but the blob fails.
*/
- for (field= table->blob_field ; *field ; field++)
+ for (ptr= table->s->blob_field, end= ptr + table->s->blob_fields ;
+ ptr != end ;
+ ptr++)
{
- char *ptr;
- uint32 size= (*field)->get_length();
+ char *data_ptr;
+ uint32 size= ((Field_blob*) table->field[*ptr])->get_length();
if (size)
{
- (*field)->get_ptr(&ptr);
- written= gzwrite(share->archive_write, ptr, (unsigned)size);
+ ((Field_blob*) table->field[*ptr])->get_ptr(&data_ptr);
+ written= gzwrite(share->archive_write, data_ptr, (unsigned)size);
if (written != (z_off_t)size)
goto error;
}
@@ -615,6 +642,9 @@ error:
int ha_archive::rnd_init(bool scan)
{
DBUG_ENTER("ha_archive::rnd_init");
+
+ if (share->crashed)
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
/* We rewind the file so that we can read from the beginning if scan */
if (scan)
@@ -652,13 +682,13 @@ int ha_archive::rnd_init(bool scan)
int ha_archive::get_row(gzFile file_to_read, byte *buf)
{
int read; // Bytes read, gzread() returns int
+ uint *ptr, *end;
char *last;
size_t total_blob_length= 0;
- Field_blob **field;
DBUG_ENTER("ha_archive::get_row");
- read= gzread(file_to_read, buf, table->reclength);
- DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %d", read, table->reclength));
+ read= gzread(file_to_read, buf, table->s->reclength);
+ DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %d", read, table->s->reclength));
if (read == Z_STREAM_ERROR)
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
@@ -667,28 +697,35 @@ int ha_archive::get_row(gzFile file_to_read, byte *buf)
if (read == 0)
DBUG_RETURN(HA_ERR_END_OF_FILE);
- /* If the record is the wrong size, the file is probably damaged */
- if ((ulong) read != table->reclength)
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
+ /*
+ If the record is the wrong size, the file is probably damaged, unless
+ we are dealing with a delayed insert or a bulk insert.
+ */
+ if ((ulong) read != table->s->reclength)
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
/* Calculate blob length, we use this for our buffer */
- for (field=table->blob_field; *field ; field++)
- total_blob_length += (*field)->get_length();
+ for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ;
+ ptr != end ;
+ ptr++)
+ total_blob_length += ((Field_blob*) table->field[*ptr])->get_length();
/* Adjust our row buffer if we need be */
buffer.alloc(total_blob_length);
last= (char *)buffer.ptr();
/* Loop through our blobs and read them */
- for (field=table->blob_field; *field ; field++)
+ for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ;
+ ptr != end ;
+ ptr++)
{
- size_t size= (*field)->get_length();
+ size_t size= ((Field_blob*) table->field[*ptr])->get_length();
if (size)
{
read= gzread(file_to_read, last, size);
if ((size_t) read != size)
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
- (*field)->set_ptr(size, last);
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+ ((Field_blob*) table->field[*ptr])->set_ptr(size, last);
last += size;
}
}
@@ -706,11 +743,15 @@ int ha_archive::rnd_next(byte *buf)
int rc;
DBUG_ENTER("ha_archive::rnd_next");
+ if (share->crashed)
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
+
if (!scan_rows)
DBUG_RETURN(HA_ERR_END_OF_FILE);
scan_rows--;
- statistic_increment(ha_read_rnd_next_count,&LOCK_status);
+ statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
+ &LOCK_status);
current_position= gztell(archive);
rc= get_row(archive, buf);
@@ -722,7 +763,7 @@ int ha_archive::rnd_next(byte *buf)
}
-/*
+/*
Thanks to the table flag HA_REC_NOT_IN_SEQ this will be called after
each call to ha_archive::rnd_next() if an ordering of the rows is
needed.
@@ -731,7 +772,7 @@ int ha_archive::rnd_next(byte *buf)
void ha_archive::position(const byte *record)
{
DBUG_ENTER("ha_archive::position");
- ha_store_ptr(ref, ref_length, current_position);
+ my_store_ptr(ref, ref_length, current_position);
DBUG_VOID_RETURN;
}
@@ -746,30 +787,32 @@ void ha_archive::position(const byte *record)
int ha_archive::rnd_pos(byte * buf, byte *pos)
{
DBUG_ENTER("ha_archive::rnd_pos");
- statistic_increment(ha_read_rnd_count,&LOCK_status);
- current_position= ha_get_ptr(pos, ref_length);
+ statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
+ &LOCK_status);
+ current_position= my_get_ptr(pos, ref_length);
(void)gzseek(archive, current_position, SEEK_SET);
DBUG_RETURN(get_row(archive, buf));
}
/*
- This method rebuilds the meta file. It does this by walking the datafile and
+ This method repairs the meta file. It does this by walking the datafile and
rewriting the meta file.
*/
-int ha_archive::rebuild_meta_file(char *table_name, File meta_file)
+int ha_archive::repair(THD* thd, HA_CHECK_OPT* check_opt)
{
int rc;
byte *buf;
ulonglong rows_recorded= 0;
- gzFile rebuild_file; /* Archive file we are working with */
+ gzFile rebuild_file; // Archive file we are working with
+ File meta_file; // Meta file we use
char data_file_name[FN_REFLEN];
- DBUG_ENTER("ha_archive::rebuild_meta_file");
+ DBUG_ENTER("ha_archive::repair");
/*
Open up the meta file to recreate it.
*/
- fn_format(data_file_name, table_name, "", ARZ,
+ fn_format(data_file_name, share->table_name, "", ARZ,
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
if ((rebuild_file= gzopen(data_file_name, "rb")) == NULL)
DBUG_RETURN(errno ? errno : -1);
@@ -782,8 +825,8 @@ int ha_archive::rebuild_meta_file(char *table_name, File meta_file)
I know, this malloc'ing memory but this should be a very
rare event.
*/
- if (!(buf= (byte*) my_malloc(table->rec_buff_length > sizeof(ulonglong) +1 ?
- table->rec_buff_length : sizeof(ulonglong) +1 ,
+ if (!(buf= (byte*) my_malloc(table->s->rec_buff_length > sizeof(ulonglong) +1 ?
+ table->s->rec_buff_length : sizeof(ulonglong) +1 ,
MYF(MY_WME))))
{
rc= HA_ERR_CRASHED_ON_USAGE;
@@ -799,11 +842,19 @@ int ha_archive::rebuild_meta_file(char *table_name, File meta_file)
*/
if (rc == HA_ERR_END_OF_FILE)
{
- (void)write_meta_file(meta_file, rows_recorded, FALSE);
+ fn_format(data_file_name,share->table_name,"",ARM,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
+ if ((meta_file= my_open(data_file_name, O_RDWR, MYF(0))) == -1)
+ {
+ rc= HA_ERR_CRASHED_ON_USAGE;
+ goto error;
+ }
+ (void)write_meta_file(meta_file, rows_recorded, TRUE);
+ my_close(meta_file,MYF(0));
rc= 0;
}
my_free((gptr) buf, MYF(0));
+ share->crashed= FALSE;
error:
gzclose(rebuild_file);
@@ -822,13 +873,14 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
char block[IO_SIZE];
char writer_filename[FN_REFLEN];
+ /* Closing will cause all data waiting to be flushed */
+ gzclose(share->archive_write);
+ share->archive_write= NULL;
+
/* Lets create a file to contain the new data */
fn_format(writer_filename, share->table_name, "", ARN,
MY_REPLACE_EXT|MY_UNPACK_FILENAME);
- /* Closing will cause all data waiting to be flushed, to be flushed */
- gzclose(share->archive_write);
-
if ((reader= gzopen(share->data_file_name, "rb")) == NULL)
DBUG_RETURN(-1);
@@ -846,29 +898,10 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
my_rename(writer_filename,share->data_file_name,MYF(0));
- /*
- We reopen the file in case some IO is waiting to go through.
- In theory the table is closed right after this operation,
- but it is possible for IO to still happen.
- I may be being a bit too paranoid right here.
- */
- if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL)
- DBUG_RETURN(errno ? errno : -1);
- share->dirty= FALSE;
-
DBUG_RETURN(0);
}
-/*
- No transactions yet, so this is pretty dull.
-*/
-int ha_archive::external_lock(THD *thd, int lock_type)
-{
- DBUG_ENTER("ha_archive::external_lock");
- DBUG_RETURN(0);
-}
-
/*
Below is an example of how to setup row level locking.
*/
@@ -876,6 +909,11 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd,
THR_LOCK_DATA **to,
enum thr_lock_type lock_type)
{
+ if (lock_type == TL_WRITE_DELAYED)
+ delayed_insert= TRUE;
+ else
+ delayed_insert= FALSE;
+
if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
{
/*
@@ -910,97 +948,61 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd,
}
-/******************************************************************************
-
- Everything below here is default, please look at ha_example.cc for
- descriptions.
-
- ******************************************************************************/
-
-int ha_archive::update_row(const byte * old_data, byte * new_data)
-{
-
- DBUG_ENTER("ha_archive::update_row");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-int ha_archive::delete_row(const byte * buf)
-{
- DBUG_ENTER("ha_archive::delete_row");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-int ha_archive::index_read(byte * buf, const byte * key,
- uint key_len __attribute__((unused)),
- enum ha_rkey_function find_flag
- __attribute__((unused)))
-{
- DBUG_ENTER("ha_archive::index_read");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-int ha_archive::index_read_idx(byte * buf, uint index, const byte * key,
- uint key_len __attribute__((unused)),
- enum ha_rkey_function find_flag
- __attribute__((unused)))
-{
- DBUG_ENTER("ha_archive::index_read_idx");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-
-int ha_archive::index_next(byte * buf)
-{
- DBUG_ENTER("ha_archive::index_next");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-int ha_archive::index_prev(byte * buf)
-{
- DBUG_ENTER("ha_archive::index_prev");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-int ha_archive::index_first(byte * buf)
-{
- DBUG_ENTER("ha_archive::index_first");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-int ha_archive::index_last(byte * buf)
-{
- DBUG_ENTER("ha_archive::index_last");
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
-}
-
-
+/*
+ Hints for optimizer, see ha_tina for more information
+*/
void ha_archive::info(uint flag)
{
DBUG_ENTER("ha_archive::info");
-
- /* This is a lie, but you don't want the optimizer to see zero or 1 */
+ /*
+ This should be an accurate number now, though bulk and delayed inserts can
+ cause the number to be inaccurate.
+ */
records= share->rows_recorded;
deleted= 0;
+ /* Costs quite a bit more to get all information */
+ if (flag & HA_STATUS_TIME)
+ {
+ MY_STAT file_stat; // Stat information for the data file
+
+ VOID(my_stat(share->data_file_name, &file_stat, MYF(MY_WME)));
+
+ mean_rec_length= table->s->reclength + buffer.alloced_length();
+ data_file_length= file_stat.st_size;
+ create_time= file_stat.st_ctime;
+ update_time= file_stat.st_mtime;
+ max_data_file_length= share->rows_recorded * mean_rec_length;
+ }
+ delete_length= 0;
+ index_file_length=0;
DBUG_VOID_RETURN;
}
-int ha_archive::extra(enum ha_extra_function operation)
-{
- DBUG_ENTER("ha_archive::extra");
- DBUG_RETURN(0);
-}
-int ha_archive::reset(void)
+/*
+ This method tells us that a bulk insert operation is about to occur. We set
+ a flag which will keep write_row from saying that its data is dirty. This in
+ turn will keep selects from causing a sync to occur.
+ Basically, yet another optimizations to keep compression working well.
+*/
+void ha_archive::start_bulk_insert(ha_rows rows)
{
- DBUG_ENTER("ha_archive::reset");
- DBUG_RETURN(0);
+ DBUG_ENTER("ha_archive::start_bulk_insert");
+ bulk_insert= TRUE;
+ DBUG_VOID_RETURN;
}
-ha_rows ha_archive::records_in_range(uint inx, key_range *min_key,
- key_range *max_key)
+
+/*
+ Other side of start_bulk_insert, is end_bulk_insert. Here we turn off the bulk insert
+ flag, and set the share dirty so that the next select will call sync for us.
+*/
+int ha_archive::end_bulk_insert()
{
- DBUG_ENTER("ha_archive::records_in_range ");
- DBUG_RETURN(records); // HA_ERR_WRONG_COMMAND
+ DBUG_ENTER("ha_archive::end_bulk_insert");
+ bulk_insert= FALSE;
+ share->dirty= TRUE;
+ DBUG_RETURN(0);
}
#endif /* HAVE_ARCHIVE_DB */
diff --git a/sql/examples/ha_archive.h b/sql/examples/ha_archive.h
index 7ab463b6661..2f310d8c69b 100644
--- a/sql/examples/ha_archive.h
+++ b/sql/examples/ha_archive.h
@@ -32,10 +32,11 @@ typedef struct st_archive_share {
uint table_name_length,use_count;
pthread_mutex_t mutex;
THR_LOCK lock;
- File meta_file; /* Meta file we use */
- gzFile archive_write; /* Archive file we are working with */
- bool dirty; /* Flag for if a flush should occur */
- ulonglong rows_recorded; /* Number of rows in tables */
+ File meta_file; /* Meta file we use */
+ gzFile archive_write; /* Archive file we are working with */
+ bool dirty; /* Flag for if a flush should occur */
+ bool crashed; /* Meta file is crashed */
+ ulonglong rows_recorded; /* Number of rows in tables */
} ARCHIVE_SHARE;
/*
@@ -53,12 +54,14 @@ class ha_archive: public handler
byte byte_buffer[IO_SIZE]; /* Initial buffer for our string */
String buffer; /* Buffer used for blob storage */
ulonglong scan_rows; /* Number of rows left in scan */
+ bool delayed_insert; /* If the insert is delayed */
+ bool bulk_insert; /* If we are performing a bulk insert */
public:
- ha_archive(TABLE *table): handler(table)
+ ha_archive(TABLE *table): handler(table), delayed_insert(0), bulk_insert(0)
{
/* Set our original buffer from pre-allocated memory */
- buffer.set(byte_buffer, IO_SIZE, system_charset_info);
+ buffer.set((char *)byte_buffer, IO_SIZE, system_charset_info);
/* The size of the offset value we will use for position() */
ref_length = sizeof(z_off_t);
@@ -72,37 +75,15 @@ public:
ulong table_flags() const
{
return (HA_REC_NOT_IN_SEQ | HA_NOT_EXACT_COUNT | HA_NO_AUTO_INCREMENT |
- HA_FILE_BASED);
+ HA_FILE_BASED | HA_CAN_INSERT_DELAYED);
}
ulong index_flags(uint idx, uint part, bool all_parts) const
{
return 0;
}
- /*
- Have to put something here, there is no real limit as far as
- archive is concerned.
- */
- uint max_supported_record_length() const { return UINT_MAX; }
- /*
- Called in test_quick_select to determine if indexes should be used.
- */
- virtual double scan_time() { return (double) (records) / 20.0+10; }
- /* The next method will never be called */
- virtual double read_time(uint index, uint ranges, ha_rows rows)
- { return (double) rows / 20.0+1; }
int open(const char *name, int mode, uint test_if_locked);
int close(void);
int write_row(byte * buf);
- int update_row(const byte * old_data, byte * new_data);
- int delete_row(const byte * buf);
- int index_read(byte * buf, const byte * key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_read_idx(byte * buf, uint idx, const byte * key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_next(byte * buf);
- int index_prev(byte * buf);
- int index_first(byte * buf);
- int index_last(byte * buf);
int rnd_init(bool scan=1);
int rnd_next(byte *buf);
int rnd_pos(byte * buf, byte *pos);
@@ -111,21 +92,20 @@ public:
int write_meta_file(File meta_file, ulonglong rows, bool dirty);
ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table);
int free_share(ARCHIVE_SHARE *share);
- int rebuild_meta_file(char *table_name, File meta_file);
+ bool auto_repair() const { return 1; } // For the moment we just do this
int read_data_header(gzFile file_to_read);
int write_data_header(gzFile file_to_write);
void position(const byte *record);
void info(uint);
- int extra(enum ha_extra_function operation);
- int reset(void);
- int external_lock(THD *thd, int lock_type);
- ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
int optimize(THD* thd, HA_CHECK_OPT* check_opt);
+ int repair(THD* thd, HA_CHECK_OPT* check_opt);
+ void start_bulk_insert(ha_rows rows);
+ int end_bulk_insert();
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
enum thr_lock_type lock_type);
};
-bool archive_db_init(void);
+handlerton *archive_db_init(void);
bool archive_db_end(void);
diff --git a/sql/examples/ha_example.cc b/sql/examples/ha_example.cc
index 2592d307e37..66d1a801333 100644
--- a/sql/examples/ha_example.cc
+++ b/sql/examples/ha_example.cc
@@ -186,8 +186,14 @@ static int free_share(EXAMPLE_SHARE *share)
exist for the storage engine. This is also used by the default rename_table and
delete_table method in handler.cc.
*/
+static const char *ha_example_exts[] = {
+ NullS
+};
+
const char **ha_example::bas_ext() const
-{ static const char *ext[]= { NullS }; return ext; }
+{
+ return ha_example_exts;
+}
/*
@@ -414,7 +420,7 @@ int ha_example::rnd_next(byte *buf)
position() is called after each call to rnd_next() if the data needs
to be ordered. You can do something like the following to store
the position:
- ha_store_ptr(ref, ref_length, current_position);
+ my_store_ptr(ref, ref_length, current_position);
The server uses ref to store data. ref_length in the above case is
the size needed to store current_position. ref is just a byte array
diff --git a/sql/examples/ha_tina.cc b/sql/examples/ha_tina.cc
index 16e28e5ae85..6ca7f67ef66 100644
--- a/sql/examples/ha_tina.cc
+++ b/sql/examples/ha_tina.cc
@@ -376,7 +376,7 @@ int ha_tina::find_current_row(byte *buf)
}
next_position= (end_ptr - share->mapped_file)+1;
/* Maybe use \N for null? */
- memset(buf, 0, table->null_bytes); /* We do not implement nulls! */
+ memset(buf, 0, table->s->null_bytes); /* We do not implement nulls! */
DBUG_RETURN(0);
}
@@ -385,8 +385,15 @@ int ha_tina::find_current_row(byte *buf)
If frm_error() is called in table.cc this is called to find out what file
extensions exist for this handler.
*/
+static const char *ha_tina_exts[] = {
+ ".CSV",
+ NullS
+};
+
const char **ha_tina::bas_ext() const
-{ static const char *ext[]= { ".CSV", NullS }; return ext; }
+{
+ return ha_tina_exts;
+}
/*
@@ -427,7 +434,7 @@ int ha_tina::write_row(byte * buf)
int size;
DBUG_ENTER("ha_tina::write_row");
- statistic_increment(ha_write_count,&LOCK_status);
+ statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
table->timestamp_field->set_time();
@@ -463,7 +470,8 @@ int ha_tina::update_row(const byte * old_data, byte * new_data)
int size;
DBUG_ENTER("ha_tina::update_row");
- statistic_increment(ha_update_count,&LOCK_status);
+ statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
+ &LOCK_status);
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
table->timestamp_field->set_time();
@@ -490,7 +498,7 @@ int ha_tina::update_row(const byte * old_data, byte * new_data)
int ha_tina::delete_row(const byte * buf)
{
DBUG_ENTER("ha_tina::delete_row");
- statistic_increment(ha_delete_count,&LOCK_status);
+ statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status);
if (chain_append())
DBUG_RETURN(-1);
@@ -630,7 +638,8 @@ int ha_tina::rnd_next(byte *buf)
{
DBUG_ENTER("ha_tina::rnd_next");
- statistic_increment(ha_read_rnd_next_count,&LOCK_status);
+ statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
+ &LOCK_status);
current_position= next_position;
if (!share->mapped_file)
@@ -646,7 +655,7 @@ int ha_tina::rnd_next(byte *buf)
In the case of an order by rows will need to be sorted.
::position() is called after each call to ::rnd_next(),
the data it stores is to a byte array. You can store this
- data via ha_store_ptr(). ref_length is a variable defined to the
+ data via my_store_ptr(). ref_length is a variable defined to the
class that is the sizeof() of position being stored. In our case
its just a position. Look at the bdb code if you want to see a case
where something other then a number is stored.
@@ -654,21 +663,22 @@ int ha_tina::rnd_next(byte *buf)
void ha_tina::position(const byte *record)
{
DBUG_ENTER("ha_tina::position");
- ha_store_ptr(ref, ref_length, current_position);
+ my_store_ptr(ref, ref_length, current_position);
DBUG_VOID_RETURN;
}
/*
Used to fetch a row from a posiion stored with ::position().
- ha_get_ptr() retrieves the data for you.
+ my_get_ptr() retrieves the data for you.
*/
int ha_tina::rnd_pos(byte * buf, byte *pos)
{
DBUG_ENTER("ha_tina::rnd_pos");
- statistic_increment(ha_read_rnd_count,&LOCK_status);
- current_position= ha_get_ptr(pos,ref_length);
+ statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
+ &LOCK_status);
+ current_position= my_get_ptr(pos,ref_length);
DBUG_RETURN(find_current_row(buf));
}