summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--BitKeeper/etc/logging_ok1
-rw-r--r--acconfig.h3
-rw-r--r--acinclude.m430
-rw-r--r--configure.in1
-rw-r--r--sql/Makefile.am4
-rw-r--r--sql/examples/ha_archive.cc546
-rw-r--r--sql/examples/ha_archive.h121
-rw-r--r--sql/handler.cc9
-rw-r--r--sql/handler.h2
-rw-r--r--sql/mysql_priv.h3
-rw-r--r--sql/mysqld.cc7
11 files changed, 722 insertions, 5 deletions
diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok
index d844c855de3..870b7e06eba 100644
--- a/BitKeeper/etc/logging_ok
+++ b/BitKeeper/etc/logging_ok
@@ -24,6 +24,7 @@ bell@laptop.sanja.is.com.ua
bell@sanja.is.com.ua
bk@admin.bk
bk@mysql.r18.ru
+brian@avenger.(none)
brian@brian-akers-computer.local
carsten@tsort.bitbybit.dk
davida@isil.mysql.com
diff --git a/acconfig.h b/acconfig.h
index 67e9d1759c6..1f9fa081294 100644
--- a/acconfig.h
+++ b/acconfig.h
@@ -115,6 +115,9 @@
/* Builds Example DB */
#undef HAVE_EXAMPLE_DB
+/* Builds Archive Storage Engine */
+#undef HAVE_ARCHIVE_DB
+
/* fp_except from ieeefp.h */
#undef HAVE_FP_EXCEPT
diff --git a/acinclude.m4 b/acinclude.m4
index 677c3cc9e99..3edaad0e2bb 100644
--- a/acinclude.m4
+++ b/acinclude.m4
@@ -1333,6 +1333,36 @@ dnl END OF MYSQL_CHECK_EXAMPLE SECTION
dnl ---------------------------------------------------------------------------
dnl ---------------------------------------------------------------------------
+dnl Macro: MYSQL_CHECK_ARCHIVEDB
+dnl Sets HAVE_ARCHIVE_DB if --with-archive-storage-engine is used
+dnl ---------------------------------------------------------------------------
+AC_DEFUN([MYSQL_CHECK_ARCHIVEDB], [
+ AC_ARG_WITH([archive-storage-engine],
+ [
+ --with-archive-storage-engine
+ Enable the Archive Storge Engine],
+ [archivedb="$withval"],
+ [archivedb=no])
+ AC_MSG_CHECKING([for archive storage engine])
+
+ case "$archivedb" in
+ yes )
+ AC_DEFINE(HAVE_ARCHIVE_DB)
+ AC_MSG_RESULT([yes])
+ [archivedb=yes]
+ ;;
+ * )
+ AC_MSG_RESULT([no])
+ [archivedb=no]
+ ;;
+ esac
+
+])
+dnl ---------------------------------------------------------------------------
+dnl END OF MYSQL_CHECK_ARCHIVE SECTION
+dnl ---------------------------------------------------------------------------
+
+dnl ---------------------------------------------------------------------------
dnl Macro: MYSQL_CHECK_NDBCLUSTER
dnl Sets HAVE_NDBCLUSTER_DB if --with-ndbcluster is used
dnl ---------------------------------------------------------------------------
diff --git a/configure.in b/configure.in
index 94ad5a47991..546b5a10e26 100644
--- a/configure.in
+++ b/configure.in
@@ -2619,6 +2619,7 @@ MYSQL_CHECK_ISAM
MYSQL_CHECK_BDB
MYSQL_CHECK_INNODB
MYSQL_CHECK_EXAMPLEDB
+MYSQL_CHECK_ARCHIVEDB
MYSQL_CHECK_NDBCLUSTER
# If we have threads generate some library functions and test programs
diff --git a/sql/Makefile.am b/sql/Makefile.am
index bacf3bc58d1..f56ab646c09 100644
--- a/sql/Makefile.am
+++ b/sql/Makefile.am
@@ -58,7 +58,7 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
log_event.h sql_repl.h slave.h \
stacktrace.h sql_sort.h sql_cache.h set_var.h \
spatial.h gstream.h client_settings.h \
- examples/ha_example.h
+ examples/ha_example.h examples/ha_archive.h
mysqld_SOURCES = sql_lex.cc sql_handler.cc \
item.cc item_sum.cc item_buff.cc item_func.cc \
item_cmpfunc.cc item_strfunc.cc item_timefunc.cc \
@@ -88,7 +88,7 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \
client.c sql_client.cc mini_client_errors.c pack.c\
stacktrace.c repl_failsafe.h repl_failsafe.cc sql_olap.cc\
gstream.cc spatial.cc sql_help.cc protocol_cursor.cc \
- examples/ha_example.cc
+ examples/ha_example.cc examples/ha_archive.cc
gen_lex_hash_SOURCES = gen_lex_hash.cc
gen_lex_hash_LDADD = $(LDADD) $(CXXLDFLAGS)
diff --git a/sql/examples/ha_archive.cc b/sql/examples/ha_archive.cc
new file mode 100644
index 00000000000..21a5c398a20
--- /dev/null
+++ b/sql/examples/ha_archive.cc
@@ -0,0 +1,546 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifdef __GNUC__
+#pragma implementation // gcc: Class implementation
+#endif
+
+#include <mysql_priv.h>
+
+#ifdef HAVE_ARCHIVE_DB
+#include "ha_archive.h"
+
+/*
+ First, if you want to understand storage engines you should look at
+ ha_example.cc and ha_example.h.
+ This example was written as a test case for a customer who needed
+ a storage engine without indexes that could compress data very well.
+ So, welcome to a completely compressed storage engine. This storage
+ engine only does inserts. No replace or updates. All reads are
+ complete table scans. Compression is done through gzip (bzip compresses
+ better, but only marginally, if someone asks I could add support for
+ it too, but beaware that it costs a lot more in CPU time then gzip).
+
+ We keep a file pointer open for each instance of ha_archive for each read
+ but for writes we keep one open file handle just for that. We flush it
+ only if we have a read occur. gzip handles compressing lots of records
+ at once much better then doing lots of little records between writes.
+ It is possible to not lock on writes but this would then mean we couldn't
+ handle bulk inserts as well (that is if someone was trying to read at
+ the same time since we would want to flush).
+
+ No attempts at durability are made. You can corrupt your data.
+
+ For performance as far as table scans go it is quite fast. I don't have
+ good numbers but locally it has out performed both Innodb and MyISAM. For
+ Innodb the question will be if the table can be fit into the buffer
+ pool. For MyISAM its a question of how much the file system caches the
+ MyISAM file. With enough free memory MyISAM is faster. Its only when the OS
+ doesn't have enough memory to cache entire table that archive turns out
+ to be any faster. For writes it is always a bit slower then MyISAM. It has no
+ internal limits though for row length.
+
+ TODO:
+ Add bzip optional support.
+ Allow users to set compression level.
+ Add truncate table command.
+ Implement versioning, should be easy.
+ Implement optimize so we can fix broken tables.
+ Allow for errors, find a way to mark bad rows.
+ See if during an optimize you can make the table smaller.
+ Talk to the gzip guys, come up with a writable format so that updates are doable
+ without switching to a block method.
+
+ -Brian
+*/
+
+/* Variables for archive share methods */
+pthread_mutex_t archive_mutex;
+static HASH archive_open_tables;
+static int archive_init= 0;
+
+/* The file extension */
+#define ARZ ".ARZ"
+
+/*
+ Used for hash table that tracks open tables.
+*/
+static byte* archive_get_key(ARCHIVE_SHARE *share,uint *length,
+ my_bool not_used __attribute__((unused)))
+{
+ *length=share->table_name_length;
+ return (byte*) share->table_name;
+}
+
+
+/*
+ Example of simple lock controls.
+ See ha_example.cc for a description.
+*/
+static ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table)
+{
+ ARCHIVE_SHARE *share;
+ uint length;
+ char *tmp_name;
+
+ if (!archive_init)
+ {
+ /* Hijack a mutex for init'ing the storage engine */
+ pthread_mutex_lock(&LOCK_mysql_create_db);
+ if (!archive_init)
+ {
+ archive_init++;
+ VOID(pthread_mutex_init(&archive_mutex,MY_MUTEX_INIT_FAST));
+ (void) hash_init(&archive_open_tables,system_charset_info,32,0,0,
+ (hash_get_key) archive_get_key,0,0);
+ }
+ pthread_mutex_unlock(&LOCK_mysql_create_db);
+ }
+ pthread_mutex_lock(&archive_mutex);
+ length=(uint) strlen(table_name);
+
+ if (!(share=(ARCHIVE_SHARE*) hash_search(&archive_open_tables,
+ (byte*) table_name,
+ length)))
+ {
+ if (!(share=(ARCHIVE_SHARE *)
+ my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
+ &share, sizeof(*share),
+ &tmp_name, length+1,
+ NullS)))
+ {
+ pthread_mutex_unlock(&archive_mutex);
+ return NULL;
+ }
+
+ share->use_count=0;
+ share->table_name_length=length;
+ share->table_name=tmp_name;
+ fn_format(share->data_file_name,table_name,"",ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
+ strmov(share->table_name,table_name);
+ if (my_hash_insert(&archive_open_tables, (byte*) share))
+ goto error;
+ /*
+ It is expensive to open and close the data files and since you can'thave
+ a gzip file that can be both read and written we keep two files open
+ that are shared amoung all open tables.
+ */
+ if ((share->archive_write = gzopen(share->data_file_name, "ab")) == NULL)
+ goto error;
+ thr_lock_init(&share->lock);
+ if (pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST))
+ goto error2;
+ }
+ share->use_count++;
+ pthread_mutex_unlock(&archive_mutex);
+
+ return share;
+
+error2:
+ thr_lock_delete(&share->lock);
+ /* We close, but ignore errors since we already have errors */
+ (void)gzclose(share->archive_write);
+error:
+ pthread_mutex_unlock(&archive_mutex);
+ my_free((gptr) share, MYF(0));
+
+ return NULL;
+}
+
+
+/*
+ Free lock controls.
+ See ha_example.cc for a description.
+*/
+static int free_share(ARCHIVE_SHARE *share)
+{
+ int rc= 0;
+ pthread_mutex_lock(&archive_mutex);
+ if (!--share->use_count)
+ {
+ hash_delete(&archive_open_tables, (byte*) share);
+ thr_lock_delete(&share->lock);
+ pthread_mutex_destroy(&share->mutex);
+ my_free((gptr) share, MYF(0));
+ if (gzclose(share->archive_write) == Z_ERRNO)
+ rc = -1;
+ }
+ pthread_mutex_unlock(&archive_mutex);
+
+ return rc;
+}
+
+
+/*
+ We just implement one additional file extension.
+*/
+const char **ha_archive::bas_ext() const
+{ static const char *ext[]= { ARZ, NullS }; return ext; }
+
+
+/*
+ When opening a file we:
+ Create/get our shared structure.
+ Init out lock.
+ We open the file we will read from.
+ Set the size of ref_length.
+*/
+int ha_archive::open(const char *name, int mode, uint test_if_locked)
+{
+ DBUG_ENTER("ha_archive::open");
+
+ if (!(share = get_share(name, table)))
+ DBUG_RETURN(1);
+ thr_lock_data_init(&share->lock,&lock,NULL);
+
+ if ((archive = gzopen(share->data_file_name, "rb")) == NULL)
+ DBUG_RETURN(-1);
+
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Closes the file. We first close this storage engines file handle to the
+ archive and then remove our referece count to the table (and possibly
+ free it as well).
+ */
+int ha_archive::close(void)
+{
+ DBUG_ENTER("ha_archive::close");
+ int rc= 0;
+ if (gzclose(archive) == Z_ERRNO)
+ rc =-1;
+ rc |= free_share(share);
+ DBUG_RETURN();
+}
+
+
+/*
+ We create our data file here. The format is pretty simple. The first bytes in
+ any file are the version number. Currently we do nothing with this, but in
+ the future this gives us the ability to figure out version if we change the
+ format at all. After the version we starting writing our rows. Unlike other
+ storage engines we do not "pack" our data. Since we are about to do a general
+ compression, packing would just be a waste of CPU time. If the table has blobs
+ they are written after the row in the order of creation.
+ So to read a row we:
+ Read the version
+ Read the record and copy it into buf
+ Loop through any blobs and read them
+ */
+int ha_archive::create(const char *name, TABLE *table_arg, HA_CREATE_INFO *create_info)
+{
+ File create_file;
+ char name_buff[FN_REFLEN];
+ size_t written;
+ DBUG_ENTER("ha_archive::create");
+
+ if ((create_file = my_create(fn_format(name_buff,name,"",ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
+ O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
+ DBUG_RETURN(-1);
+ if ((archive = gzdopen(create_file, "ab")) == NULL)
+ DBUG_RETURN(-1);
+ version = ARCHIVE_VERSION;
+ written = gzwrite(archive, &version, sizeof(version));
+ if (written == 0 || written != sizeof(version))
+ DBUG_RETURN(-1);
+ gzclose(archive);
+ (void)my_close(create_file,MYF(0));
+
+ DBUG_RETURN(0);
+}
+
+/*
+ Looop at ha_archive::open() for an explanation of the row format.
+ Here we just write out the row.
+*/
+int ha_archive::write_row(byte * buf)
+{
+ char *pos;
+ z_off_t written;
+ DBUG_ENTER("ha_archive::write_row");
+
+ statistic_increment(ha_write_count,&LOCK_status);
+ if (table->timestamp_default_now)
+ update_timestamp(record+table->timestamp_default_now-1);
+ written = gzwrite(share->archive_write, buf, table->reclength);
+ share->dirty= true;
+ if (written == 0 || written != table->reclength)
+ DBUG_RETURN(-1);
+
+ for (Field_blob **field=table->blob_field ; *field ; field++)
+ {
+ char *ptr;
+ uint32 size= (*field)->get_length();
+
+ (*field)->get_ptr(&ptr);
+ written = gzwrite(share->archive_write, ptr, (unsigned)size);
+ if (written == 0 || written != size)
+ DBUG_RETURN(-1);
+ }
+
+ DBUG_RETURN(0);
+}
+
+
+/*
+ All calls that need to scan the table start with this method. If we are told
+ that it is a table scan we rewind the file to the beginning, otherwise
+ we assume the position will be set.
+*/
+int ha_archive::rnd_init(bool scan)
+{
+ DBUG_ENTER("ha_archive::rnd_init");
+ int read; // gzread() returns int, and we use this to check the header
+ /* We rewind the file so that we can read from the beginning if scan */
+ if(scan)
+ if (gzrewind(archive))
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
+ /*
+ If dirty, we lock, and then reset/flush the data.
+ I found that just calling gzflush() doesn't always work.
+ */
+ if (share->dirty == true)
+ {
+ pthread_mutex_lock(&share->mutex);
+ if (share->dirty == true)
+ {
+ gzclose(share->archive_write);
+ if ((share->archive_write = gzopen(share->data_file_name, "ab")) == NULL)
+ {
+ pthread_mutex_unlock(&share->mutex);
+ DBUG_RETURN(-1);
+ }
+ share->dirty= false;
+ }
+ pthread_mutex_unlock(&share->mutex);
+ }
+
+ /*
+ At the moment we just check the size of version to make sure the header is
+ intact.
+ */
+ read= gzread(archive, &version, sizeof(version));
+ if (written == 0 || written != sizeof(version))
+ DBUG_RETURN(-1);
+ records = 0;
+ DBUG_RETURN(0);
+}
+
+
+/*
+ This is the method that is used to read a row. It assumes that the row is
+ positioned where you want it.
+*/
+int ha_archive::read_row(byte *buf)
+{
+ int read; // Bytes read, gzread() returns int
+ char *last;
+ size_t total_blob_length= 0;
+ DBUG_ENTER("ha_archive::read_row");
+
+ read = gzread(archive, buf, table->reclength);
+
+ /* If we read nothing we are at the end of the file */
+ if (read == 0)
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+
+ /* If the record is the wrong size, the file is probably damaged */
+ if (read != table->reclength)
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
+
+ /* Calculate blob length, we use this for our buffer */
+ for (Field_blob **field=table->blob_field; *field ; field++)
+ total_blob_length += (*field)->get_length();
+
+ /* Adjust our row buffer if we need be */
+ buffer.alloc(total_blob_length);
+ last = (char *)buffer.ptr();
+
+ /* Loopp through our blobs and read them */
+ for (Field_blob **field=table->blob_field; *field ; field++)
+ {
+ /* Need to setup buffer tomorrow so that it is sued to contain all blobs */
+ size_t size= (*field)->get_length();
+ read = gzread(archive, last, size);
+ if (read == 0 || read != size)
+ DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
+ (*field)->set_ptr(size, last);
+ last += size;
+ }
+ DBUG_RETURN(0);
+}
+
+/*
+ Called during ORDER BY. Its position is either from being called sequentially
+ or by having had ha_archive::rnd_pos() called before it is called.
+*/
+int ha_archive::rnd_next(byte *buf)
+{
+ DBUG_ENTER("ha_archive::rnd_next");
+ int rc;
+
+ statistic_increment(ha_read_rnd_next_count,&LOCK_status);
+ current_position = gztell(archive);
+ rc = read_row(buf);
+ if (!(HA_ERR_END_OF_FILE == rc))
+ records++;
+
+ DBUG_RETURN(rc);
+}
+
+
+/*
+ Thanks to the table flag HA_REC_NOT_IN_SEQ this will be called after
+ each call to ha_archive::rnd_next() if an ordering of the rows is
+ needed.
+*/
+void ha_archive::position(const byte *record)
+{
+ DBUG_ENTER("ha_archive::position");
+ ha_store_ptr(ref, ref_length, current_position);
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ This is called after a table scan for each row if the results of the scan need
+ to be ordered. It will take *pos and use it to move the cursor in the file so
+ that the next row that is called is the correctly ordered row.
+*/
+int ha_archive::rnd_pos(byte * buf, byte *pos)
+{
+ DBUG_ENTER("ha_archive::rnd_pos");
+ statistic_increment(ha_read_rnd_count,&LOCK_status);
+ current_position = ha_get_ptr(pos, ref_length);
+ z_off_t seek= gzseek(archive, current_position, SEEK_SET);
+
+ DBUG_RETURN(read_row(buf));
+}
+
+/******************************************************************************
+
+ Everything below here is default, please look at ha_example.cc for
+ descriptions.
+
+ ******************************************************************************/
+
+int ha_archive::update_row(const byte * old_data, byte * new_data)
+{
+
+ DBUG_ENTER("ha_archive::update_row");
+ DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+}
+
+int ha_archive::delete_row(const byte * buf)
+{
+ DBUG_ENTER("ha_archive::delete_row");
+ DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+}
+
+int ha_archive::index_read(byte * buf, const byte * key,
+ uint key_len __attribute__((unused)),
+ enum ha_rkey_function find_flag
+ __attribute__((unused)))
+{
+ DBUG_ENTER("ha_archive::index_read");
+ DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+}
+
+int ha_archive::index_read_idx(byte * buf, uint index, const byte * key,
+ uint key_len __attribute__((unused)),
+ enum ha_rkey_function find_flag
+ __attribute__((unused)))
+{
+ DBUG_ENTER("ha_archive::index_read_idx");
+ DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+}
+
+
+int ha_archive::index_next(byte * buf)
+{
+ DBUG_ENTER("ha_archive::index_next");
+ DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+}
+
+int ha_archive::index_prev(byte * buf)
+{
+ DBUG_ENTER("ha_archive::index_prev");
+ DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+}
+
+int ha_archive::index_first(byte * buf)
+{
+ DBUG_ENTER("ha_archive::index_first");
+ DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+}
+
+int ha_archive::index_last(byte * buf)
+{
+ DBUG_ENTER("ha_archive::index_last");
+ DBUG_RETURN(HA_ERR_NOT_IMPLEMENTED);
+}
+
+
+void ha_archive::info(uint flag)
+{
+ DBUG_ENTER("ha_archive::info");
+ /* This is a lie, but you don't want the optimizer to see zero or 1 */
+ if (records < 2)
+ records = 2;
+ DBUG_VOID_RETURN;
+}
+
+int ha_archive::extra(enum ha_extra_function operation)
+{
+ DBUG_ENTER("ha_archive::extra");
+ DBUG_RETURN(0);
+}
+
+int ha_archive::reset(void)
+{
+ DBUG_ENTER("ha_archive::reset");
+ DBUG_RETURN(0);
+}
+
+
+int ha_archive::external_lock(THD *thd, int lock_type)
+{
+ DBUG_ENTER("ha_archive::external_lock");
+ DBUG_RETURN(0);
+}
+
+THR_LOCK_DATA **ha_archive::store_lock(THD *thd,
+ THR_LOCK_DATA **to,
+ enum thr_lock_type lock_type)
+{
+ if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
+ lock.type=lock_type;
+ *to++= &lock;
+ return to;
+}
+
+ha_rows ha_archive::records_in_range(int inx,
+ const byte *start_key,uint start_key_len,
+ enum ha_rkey_function start_search_flag,
+ const byte *end_key,uint end_key_len,
+ enum ha_rkey_function end_search_flag)
+{
+ DBUG_ENTER("ha_archive::records_in_range ");
+ DBUG_RETURN(records); // HA_ERR_NOT_IMPLEMENTED
+}
+#endif /* HAVE_ARCHIVE_DB */
diff --git a/sql/examples/ha_archive.h b/sql/examples/ha_archive.h
new file mode 100644
index 00000000000..b1909d98b99
--- /dev/null
+++ b/sql/examples/ha_archive.h
@@ -0,0 +1,121 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <zlib.h>
+
+/*
+ Please read ha_archive.cc first. If you are looking for more general
+ answers on how storage engines work, look at ha_example.cc and
+ ha_example.h.
+*/
+
+typedef struct st_archive_share {
+ char *table_name;
+ char data_file_name[FN_REFLEN];
+ uint table_name_length,use_count;
+ pthread_mutex_t mutex;
+ THR_LOCK lock;
+ gzFile archive_write; /* Archive file we are working with */
+ bool dirty; /* Flag for if a flush should occur */
+} ARCHIVE_SHARE;
+
+/*
+ Version for file format.
+ 1 - Initial Version
+*/
+#define ARCHIVE_VERSION 1
+
+class ha_archive: public handler
+{
+ THR_LOCK_DATA lock; /* MySQL lock */
+ ARCHIVE_SHARE *share; /* Shared lock info */
+ gzFile archive; /* Archive file we are working with */
+ z_off_t current_position; /* The position of the row we just read */
+ byte byte_buffer[IO_SIZE]; /* Initial buffer for our string */
+ String buffer; /* Buffer used for blob storage */
+ unsigned int version; /* Used for recording version */
+
+public:
+ ha_archive(TABLE *table): handler(table)
+ {
+ /* Set our original buffer from pre-allocated memory */
+ buffer.set(byte_buffer, IO_SIZE, system_charset_info);
+
+ /* The size of the offset value we will use for position() */
+ ref_length = sizeof(z_off_t);
+ }
+ ~ha_archive()
+ {
+ }
+ const char *table_type() const { return "ARCHIVE"; }
+ const char *index_type(uint inx) { return "NONE"; }
+ const char **bas_ext() const;
+ ulong table_flags() const
+ {
+ return (HA_REC_NOT_IN_SEQ | HA_NOT_EXACT_COUNT | HA_NO_WRITE_DELAYED |
+ HA_NO_AUTO_INCREMENT );
+ }
+ ulong index_flags(uint inx) const
+ {
+ return 0;
+ }
+ /*
+ This is just a default, there is no real limit as far as
+ archive is concerned.
+ */
+ uint max_record_length() const { return HA_MAX_REC_LENGTH; }
+ uint max_keys() const { return 0; }
+ uint max_key_parts() const { return 0; }
+ uint max_key_length() const { return 0; }
+ /*
+ Called in test_quick_select to determine if indexes should be used.
+ */
+ virtual double scan_time() { return (double) (records+deleted) / 20.0+10; }
+ /* The next method will never be called */
+ virtual double read_time(ha_rows rows) { return (double) rows / 20.0+1; }
+ virtual bool fast_key_read() { return 1;}
+
+ int open(const char *name, int mode, uint test_if_locked);
+ int close(void);
+ int write_row(byte * buf);
+ int update_row(const byte * old_data, byte * new_data);
+ int delete_row(const byte * buf);
+ int index_read(byte * buf, const byte * key,
+ uint key_len, enum ha_rkey_function find_flag);
+ int index_read_idx(byte * buf, uint idx, const byte * key,
+ uint key_len, enum ha_rkey_function find_flag);
+ int index_next(byte * buf);
+ int index_prev(byte * buf);
+ int index_first(byte * buf);
+ int index_last(byte * buf);
+ int rnd_init(bool scan=1);
+ int rnd_next(byte *buf);
+ int rnd_pos(byte * buf, byte *pos);
+ int ha_archive::read_row(byte *buf);
+ void position(const byte *record);
+ void info(uint);
+ int extra(enum ha_extra_function operation);
+ int reset(void);
+ int external_lock(THD *thd, int lock_type);
+ ha_rows records_in_range(int inx, const byte *start_key,uint start_key_len,
+ enum ha_rkey_function start_search_flag,
+ const byte *end_key,uint end_key_len,
+ enum ha_rkey_function end_search_flag);
+ int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
+
+ THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
+ enum thr_lock_type lock_type);
+};
diff --git a/sql/handler.cc b/sql/handler.cc
index 97abc11abe3..d0dad5dcb9e 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -35,6 +35,9 @@
#ifdef HAVE_EXAMPLE_DB
#include "examples/ha_example.h"
#endif
+#ifdef HAVE_ARCHIVE_DB
+#include "examples/ha_archive.h"
+#endif
#ifdef HAVE_INNOBASE_DB
#include "ha_innodb.h"
#else
@@ -81,6 +84,8 @@ struct show_table_type_st sys_table_types[]=
"Alias for BDB", DB_TYPE_BERKELEY_DB},
{"EXAMPLE",&have_example_db,
"Example storage engine", DB_TYPE_EXAMPLE_DB},
+ {"ARCHIVE",&have_archive_db,
+ "Archive storage engine", DB_TYPE_ARCHIVE_DB},
{NullS, NULL, NullS, DB_TYPE_UNKNOWN}
};
@@ -181,6 +186,10 @@ handler *get_new_handler(TABLE *table, enum db_type db_type)
case DB_TYPE_EXAMPLE_DB:
return new ha_example(table);
#endif
+#ifdef HAVE_ARCHIVE_DB
+ case DB_TYPE_ARCHIVE_DB:
+ return new ha_archive(table);
+#endif
case DB_TYPE_HEAP:
return new ha_heap(table);
default: // should never happen
diff --git a/sql/handler.h b/sql/handler.h
index 9d39eff1301..0a79bf96fa5 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -146,7 +146,7 @@ enum db_type { DB_TYPE_UNKNOWN=0,DB_TYPE_DIAB_ISAM=1,
DB_TYPE_RMS_ISAM, DB_TYPE_HEAP, DB_TYPE_ISAM,
DB_TYPE_MRG_ISAM, DB_TYPE_MYISAM, DB_TYPE_MRG_MYISAM,
DB_TYPE_BERKELEY_DB, DB_TYPE_INNODB, DB_TYPE_GEMINI,
- DB_TYPE_EXAMPLE_DB, DB_TYPE_DEFAULT };
+ DB_TYPE_EXAMPLE_DB, DB_TYPE_ARCHIVE_DB, DB_TYPE_DEFAULT };
struct show_table_type_st {
const char *type;
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index c1b796d19c7..467251c2358 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -908,7 +908,8 @@ extern struct my_option my_long_options[];
/* optional things, have_* variables */
-extern SHOW_COMP_OPTION have_isam, have_innodb, have_berkeley_db, have_example_db;
+extern SHOW_COMP_OPTION have_isam, have_innodb, have_berkeley_db;
+extern SHOW_COMP_OPTION have_example_db, have_archive_db;
extern SHOW_COMP_OPTION have_raid, have_openssl, have_symlink;
extern SHOW_COMP_OPTION have_query_cache, have_berkeley_db, have_innodb;
extern SHOW_COMP_OPTION have_crypt;
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 585c28f3959..c3b9ab42fc6 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -370,7 +370,7 @@ KEY_CACHE *sql_key_cache;
CHARSET_INFO *system_charset_info, *files_charset_info ;
CHARSET_INFO *national_charset_info, *table_alias_charset;
-SHOW_COMP_OPTION have_berkeley_db, have_innodb, have_isam, have_example_db;
+SHOW_COMP_OPTION have_berkeley_db, have_innodb, have_isam, have_example_db, have_archive_db;
SHOW_COMP_OPTION have_raid, have_openssl, have_symlink, have_query_cache;
SHOW_COMP_OPTION have_crypt, have_compress;
@@ -5126,6 +5126,11 @@ static void mysql_init_variables(void)
#else
have_example_db= SHOW_OPTION_NO;
#endif
+#ifdef HAVE_ARCHIVE_DB
+ have_archive_db= SHOW_OPTION_YES;
+#else
+ have_archive_db= SHOW_OPTION_NO;
+#endif
#ifdef USE_RAID
have_raid=SHOW_OPTION_YES;
#else