summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <brian@avenger.(none)>2004-08-12 20:57:18 -0700
committerunknown <brian@avenger.(none)>2004-08-12 20:57:18 -0700
commit89987fa1044438f01c4a2efdf5563ec1c50a2102 (patch)
treeb1e231e4026c122f6cbe3584c1bee687679422a6 /sql
parent325de4ebf110d2ed45ff7e215c2e835e7503d2b8 (diff)
downloadmariadb-git-89987fa1044438f01c4a2efdf5563ec1c50a2102.tar.gz
This is the addition of the CSV engine "aka tina". Its an example engine that works as a plain text file.
acconfig.h: Adding HAVE CSV rule acinclude.m4: Build option of csv engine configure.in: Update for building CSV sql/Makefile.am: Added files for CSV build sql/handler.cc: Needed options for CSV to be created. sql/handler.h: CSV type engine enum. sql/mysql_priv.h: Addition of the have_csv_db variable. sql/mysqld.cc: Code to show csv option. sql/set_var.cc: Adding have show variables for both csv and example.
Diffstat (limited to 'sql')
-rw-r--r--sql/Makefile.am6
-rw-r--r--sql/examples/ha_tina.cc846
-rw-r--r--sql/examples/ha_tina.h132
-rw-r--r--sql/handler.cc9
-rw-r--r--sql/handler.h2
-rw-r--r--sql/mysql_priv.h2
-rw-r--r--sql/mysqld.cc7
-rw-r--r--sql/set_var.cc2
8 files changed, 1001 insertions, 5 deletions
diff --git a/sql/Makefile.am b/sql/Makefile.am
index 007239f2e8c..e2d857aaa96 100644
--- a/sql/Makefile.am
+++ b/sql/Makefile.am
@@ -59,7 +59,8 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
log_event.h sql_repl.h slave.h \
stacktrace.h sql_sort.h sql_cache.h set_var.h \
spatial.h gstream.h client_settings.h tzfile.h \
- tztime.h examples/ha_example.h examples/ha_archive.h
+ tztime.h examples/ha_example.h examples/ha_archive.h \
+ examples/ha_tina.h
mysqld_SOURCES = sql_lex.cc sql_handler.cc \
item.cc item_sum.cc item_buff.cc item_func.cc \
item_cmpfunc.cc item_strfunc.cc item_timefunc.cc \
@@ -90,7 +91,8 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \
stacktrace.c repl_failsafe.h repl_failsafe.cc \
gstream.cc spatial.cc sql_help.cc protocol_cursor.cc \
tztime.cc my_time.c \
- examples/ha_example.cc examples/ha_archive.cc
+ examples/ha_example.cc examples/ha_archive.cc \
+ examples/ha_tina.cc
gen_lex_hash_SOURCES = gen_lex_hash.cc
gen_lex_hash_LDADD = $(LDADD) $(CXXLDFLAGS)
diff --git a/sql/examples/ha_tina.cc b/sql/examples/ha_tina.cc
new file mode 100644
index 00000000000..728af469bb0
--- /dev/null
+++ b/sql/examples/ha_tina.cc
@@ -0,0 +1,846 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
+ Make sure to look at ha_tina.h for more details.
+
+ First off, this is a play thing for me, there are a number of things wrong with it:
+ *) It was designed for csv and therefor its performance is highly questionable.
+ *) Indexes have not been implemented. This is because the files can be traded in
+ and out of the table directory without having to worry about rebuilding anything.
+ *) NULLs and "" are treated equally (like a spreadsheet).
+ *) There was in the beginning no point to anyone seeing this other then me, so there
+ is a good chance that I haven't quite documented it well.
+ *) Less design, more "make it work"
+
+ Now there are a few cool things with it:
+ *) Errors can result in corrupted data files.
+ *) Data files can be read by spreadsheets directly.
+
+TODO:
+ *) Move to a block system for larger files
+ *) Error recovery, its all there, just need to finish it
+ *) Document how the chains work.
+
+ -Brian
+*/
+
+#ifdef __GNUC__
+#pragma implementation // gcc: Class implementation
+#endif
+
+#include "mysql_priv.h"
+#include "ha_tina.h"
+#include <sys/mman.h>
+
+/* Stuff for shares */
+pthread_mutex_t tina_mutex;
+static HASH tina_open_tables;
+static int tina_init= 0;
+
+/*****************************************************************************
+ ** TINA tables
+ *****************************************************************************/
+
+/*
+ Used for sorting chains.
+*/
+int sort_set (tina_set *a, tina_set *b)
+{
+ return ( a->begin > b->begin ? 1 : ( a->begin < b->begin ? -1 : 0 ) );
+}
+
+static byte* tina_get_key(TINA_SHARE *share,uint *length,
+ my_bool not_used __attribute__((unused)))
+{
+ *length=share->table_name_length;
+ return (byte*) share->table_name;
+}
+
+/*
+ Reloads the mmap file.
+*/
+int get_mmap(TINA_SHARE *share, int write)
+{
+ DBUG_ENTER("ha_tina::get_mmap");
+ if (share->mapped_file && munmap(share->mapped_file, share->file_stat.st_size))
+ DBUG_RETURN(1);
+
+ if (my_fstat(share->data_file, &share->file_stat, MYF(MY_WME)) == -1)
+ DBUG_RETURN(1);
+
+ if (share->file_stat.st_size)
+ {
+ if (write)
+ share->mapped_file= (byte *)mmap(NULL, share->file_stat.st_size,
+ PROT_READ|PROT_WRITE, MAP_SHARED,
+ share->data_file, 0);
+ else
+ share->mapped_file= (byte *)mmap(NULL, share->file_stat.st_size,
+ PROT_READ, MAP_PRIVATE,
+ share->data_file, 0);
+ if ((share->mapped_file ==(caddr_t)-1))
+ {
+ /*
+ Bad idea you think? See the problem is that nothing actually checks
+ the return value of ::rnd_init(), so tossing an error is about
+ it for us.
+ Never going to happen right? :)
+ */
+ my_message(errno, "Woops, blew up opening a mapped file", 0);
+ DBUG_ASSERT(0);
+ DBUG_RETURN(1);
+ }
+ }
+ else
+ share->mapped_file= NULL;
+
+ DBUG_RETURN(0);
+}
+
+/*
+ Simple lock controls.
+*/
+static TINA_SHARE *get_share(const char *table_name, TABLE *table)
+{
+ TINA_SHARE *share;
+ char *tmp_name;
+ uint length;
+
+ if (!tina_init)
+ {
+ /* Hijack a mutex for init'ing the storage engine */
+ pthread_mutex_lock(&LOCK_mysql_create_db);
+ if (!tina_init)
+ {
+ tina_init++;
+ VOID(pthread_mutex_init(&tina_mutex,MY_MUTEX_INIT_FAST));
+ (void) hash_init(&tina_open_tables,system_charset_info,32,0,0,
+ (hash_get_key) tina_get_key,0,0);
+ }
+ pthread_mutex_unlock(&LOCK_mysql_create_db);
+ }
+ pthread_mutex_lock(&tina_mutex);
+ length=(uint) strlen(table_name);
+ if (!(share=(TINA_SHARE*) hash_search(&tina_open_tables,
+ (byte*) table_name,
+ length)))
+ {
+ char data_file_name[FN_REFLEN];
+ if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
+ &share, sizeof(*share),
+ &tmp_name, length+1,
+ NullS))
+ {
+ pthread_mutex_unlock(&tina_mutex);
+ return NULL;
+ }
+
+ share->use_count=0;
+ share->table_name_length=length;
+ share->table_name=tmp_name;
+ strmov(share->table_name,table_name);
+ fn_format(data_file_name, table_name, "", ".CSV",MY_REPLACE_EXT|MY_UNPACK_FILENAME);
+ if (my_hash_insert(&tina_open_tables, (byte*) share))
+ goto error;
+ thr_lock_init(&share->lock);
+ pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST);
+
+ if ((share->data_file= my_open(data_file_name, O_RDWR, MYF(0))) == -1)
+ goto error2;
+
+ /* We only use share->data_file for writing, so we scan to the end to append */
+ if (my_seek(share->data_file, 0, SEEK_END, MYF(0)) == MY_FILEPOS_ERROR)
+ goto error2;
+
+ share->mapped_file= NULL; // We don't know the state since we just allocated it
+ if (get_mmap(share, 0) > 0)
+ goto error3;
+ }
+ share->use_count++;
+ pthread_mutex_unlock(&tina_mutex);
+
+ return share;
+
+error3:
+ my_close(share->data_file,MYF(0));
+error2:
+ thr_lock_delete(&share->lock);
+ pthread_mutex_destroy(&share->mutex);
+error:
+ pthread_mutex_unlock(&tina_mutex);
+ my_free((gptr) share, MYF(0));
+
+ return NULL;
+}
+
+
+/*
+ Free lock controls.
+*/
+static int free_share(TINA_SHARE *share)
+{
+ DBUG_ENTER("ha_tina::free_share");
+ pthread_mutex_lock(&tina_mutex);
+ int result_code= 0;
+ if (!--share->use_count){
+ /* Drop the mapped file */
+ if (share->mapped_file)
+ munmap(share->mapped_file, share->file_stat.st_size);
+ result_code= my_close(share->data_file,MYF(0));
+ hash_delete(&tina_open_tables, (byte*) share);
+ thr_lock_delete(&share->lock);
+ pthread_mutex_destroy(&share->mutex);
+ my_free((gptr) share, MYF(0));
+ }
+ pthread_mutex_unlock(&tina_mutex);
+
+ DBUG_RETURN(result_code);
+}
+
+
+/*
+ Finds the end of a line.
+ Currently only supports files written on a UNIX OS.
+*/
+byte * find_eoln(byte *data, off_t begin, off_t end)
+{
+ for (off_t x= begin; x < end; x++)
+ if (data[x] == '\n')
+ return data + x;
+
+ return 0;
+}
+
+/*
+ Encode a buffer into the quoted format.
+*/
+int ha_tina::encode_quote(byte *buf)
+{
+ char attribute_buffer[1024];
+ String attribute(attribute_buffer, sizeof(attribute_buffer), &my_charset_bin);
+
+ buffer.length(0);
+ for (Field **field=table->field ; *field ; field++)
+ {
+ const char *ptr;
+ const char *end_ptr;
+
+ (*field)->val_str(&attribute,&attribute);
+ ptr= attribute.ptr();
+ end_ptr= attribute.length() + ptr;
+
+ buffer.append('"');
+
+ while (ptr < end_ptr)
+ {
+ if (*ptr == '"')
+ {
+ buffer.append('\\');
+ buffer.append('"');
+ *ptr++;
+ }
+ else if (*ptr == '\r')
+ {
+ buffer.append('\\');
+ buffer.append('r');
+ *ptr++;
+ }
+ else if (*ptr == '\\')
+ {
+ buffer.append('\\');
+ buffer.append('\\');
+ *ptr++;
+ }
+ else if (*ptr == '\n')
+ {
+ buffer.append('\\');
+ buffer.append('n');
+ *ptr++;
+ }
+ else
+ buffer.append(*ptr++);
+ }
+ buffer.append('"');
+ buffer.append(',');
+ }
+ // Remove the comma, add a line feed
+ buffer.length(buffer.length() - 1);
+ buffer.append('\n');
+ //buffer.replace(buffer.length(), 0, "\n", 1);
+
+ return (buffer.length());
+}
+
+/*
+ chain_append() adds delete positions to the chain that we use to keep track of space.
+*/
+int ha_tina::chain_append()
+{
+ if ( chain_ptr != chain && (chain_ptr -1)->end == current_position)
+ (chain_ptr -1)->end= next_position;
+ else
+ {
+ /* We set up for the next position */
+ if ((off_t)(chain_ptr - chain) == (chain_size -1))
+ {
+ off_t location= chain_ptr - chain;
+ chain_size += DEFAULT_CHAIN_LENGTH;
+ if (chain_alloced)
+ {
+ /* Must cast since my_malloc unlike malloc doesn't have a void ptr */
+ if ((chain= (tina_set *)my_realloc((gptr)chain,chain_size,MYF(MY_WME))) == NULL)
+ return -1;
+ }
+ else
+ {
+ tina_set *ptr= (tina_set *)my_malloc(chain_size * sizeof(tina_set),MYF(MY_WME));
+ memcpy(ptr, chain, DEFAULT_CHAIN_LENGTH * sizeof(tina_set));
+ chain= ptr;
+ chain_alloced++;
+ }
+ chain_ptr= chain + location;
+ }
+ chain_ptr->begin= current_position;
+ chain_ptr->end= next_position;
+ chain_ptr++;
+ }
+
+ return 0;
+}
+
+
+/*
+ Scans for a row.
+*/
+int ha_tina::find_current_row(byte *buf)
+{
+ byte *mapped_ptr= (byte *)share->mapped_file + current_position;
+ byte *end_ptr;
+ DBUG_ENTER("ha_tina::find_current_row");
+
+ /* EOF should be counted as new line */
+ if ((end_ptr= find_eoln(share->mapped_file, current_position, share->file_stat.st_size)) == 0)
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+
+ for (Field **field=table->field ; *field ; field++)
+ {
+ int x;
+ buffer.length(0);
+ mapped_ptr++; // Increment past the first quote
+ for(;mapped_ptr != end_ptr; mapped_ptr++)
+ {
+ //Need to convert line feeds!
+ if (*mapped_ptr == '"' &&
+ (((mapped_ptr[1] == ',') && (mapped_ptr[2] == '"')) || (mapped_ptr == end_ptr -1 )))
+ {
+ mapped_ptr += 2; // Move past the , and the "
+ break;
+ }
+ if (*mapped_ptr == '\\' && mapped_ptr != (end_ptr - 1))
+ {
+ mapped_ptr++;
+ if (*mapped_ptr == 'r')
+ buffer.append('\r');
+ else if (*mapped_ptr == 'n' )
+ buffer.append('\n');
+ else if ((*mapped_ptr == '\\') || (*mapped_ptr == '"'))
+ buffer.append(*mapped_ptr);
+ else /* This could only happed with an externally created file */
+ {
+ buffer.append('\\');
+ buffer.append(*mapped_ptr);
+ }
+ }
+ else
+ buffer.append(*mapped_ptr);
+ }
+ (*field)->store(buffer.ptr(), buffer.length(), system_charset_info);
+ }
+ next_position= (end_ptr - share->mapped_file)+1;
+ /* Maybe use \N for null? */
+ memset(buf, 0, table->null_bytes); /* We do not implement nulls! */
+
+ DBUG_RETURN(0);
+}
+
+/*
+ If frm_error() is called in table.cc this is called to find out what file
+ extensions exist for this handler.
+*/
+const char **ha_tina::bas_ext() const
+{ static const char *ext[]= { ".CSV", NullS }; return ext; }
+
+
+/*
+ Open a database file. Keep in mind that tables are caches, so
+ this will not be called for every request. Any sort of positions
+ that need to be reset should be kept in the ::extra() call.
+*/
+int ha_tina::open(const char *name, int mode, uint test_if_locked)
+{
+ DBUG_ENTER("ha_tina::open");
+
+ if (!(share= get_share(name, table)))
+ DBUG_RETURN(1);
+ thr_lock_data_init(&share->lock,&lock,NULL);
+ ref_length=sizeof(off_t);
+
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Close a database file. We remove ourselves from the shared strucutre.
+ If it is empty we destroy it and free the mapped file.
+*/
+int ha_tina::close(void)
+{
+ DBUG_ENTER("ha_tina::close");
+ DBUG_RETURN(free_share(share));
+}
+
+/*
+ This is an INSERT. At the moment this handler just seeks to the end
+ of the file and appends the data. In an error case it really should
+ just truncate to the original position (this is not done yet).
+*/
+int ha_tina::write_row(byte * buf)
+{
+ int size;
+ DBUG_ENTER("ha_tina::write_row");
+
+ statistic_increment(ha_write_count,&LOCK_status);
+
+ if (table->timestamp_default_now)
+ update_timestamp(buf+table->timestamp_default_now-1);
+
+ size= encode_quote(buf);
+
+ if (my_write(share->data_file, buffer.ptr(), size, MYF(MY_WME | MY_NABP)))
+ DBUG_RETURN(-1);
+
+ /*
+ Ok, this is means that we will be doing potentially bad things
+ during a bulk insert on some OS'es. What we need is a cleanup
+ call for ::write_row that would let us fix up everything after the bulk
+ insert. The archive handler does this with an extra mutx call, which
+ might be a solution for this.
+ */
+ if (get_mmap(share, 0) > 0)
+ DBUG_RETURN(-1);
+ DBUG_RETURN(0);
+}
+
+
+/*
+ This is called for an update.
+ Make sure you put in code to increment the auto increment, also
+ update any timestamp data. Currently auto increment is not being
+ fixed since autoincrements have yet to be added to this table handler.
+ This will be called in a table scan right before the previous ::rnd_next()
+ call.
+*/
+int ha_tina::update_row(const byte * old_data, byte * new_data)
+{
+ int size;
+ DBUG_ENTER("ha_tina::update_row");
+
+ statistic_increment(ha_update_count,&LOCK_status);
+
+ if (table->timestamp_default_now)
+ update_timestamp(new_data+table->timestamp_default_now-1);
+
+ size= encode_quote(new_data);
+
+ if (chain_append())
+ DBUG_RETURN(-1);
+
+ if (my_write(share->data_file, buffer.ptr(), size, MYF(MY_WME | MY_NABP)))
+ DBUG_RETURN(-1);
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Deletes a row. First the database will find the row, and then call this method.
+ In the case of a table scan, the previous call to this will be the ::rnd_next()
+ that found this row.
+ The exception to this is an ORDER BY. This will cause the table handler to walk
+ the table noting the positions of all rows that match a query. The table will
+ then be deleted/positioned based on the ORDER (so RANDOM, DESC, ASC).
+*/
+int ha_tina::delete_row(const byte * buf)
+{
+ DBUG_ENTER("ha_tina::delete_row");
+ statistic_increment(ha_delete_count,&LOCK_status);
+
+ if (chain_append())
+ DBUG_RETURN(-1);
+
+ --records;
+
+ DBUG_RETURN(0);
+}
+
+/*
+ Fill buf with value from key. Simply this is used for a single index read
+ with a key.
+*/
+int ha_tina::index_read(byte * buf, const byte * key,
+ uint key_len __attribute__((unused)),
+ enum ha_rkey_function find_flag
+ __attribute__((unused)))
+{
+ DBUG_ENTER("ha_tina::index_read");
+ DBUG_ASSERT(0);
+ DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
+}
+
+/*
+ Fill buf with value from key. Simply this is used for a single index read
+ with a key.
+ Whatever the current key is we will use it. This is what will be in "index".
+*/
+int ha_tina::index_read_idx(byte * buf, uint index, const byte * key,
+ uint key_len __attribute__((unused)),
+ enum ha_rkey_function find_flag
+ __attribute__((unused)))
+{
+ DBUG_ENTER("ha_tina::index_read_idx");
+ DBUG_ASSERT(0);
+ DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
+}
+
+
+/*
+ Read the next position in the index.
+*/
+int ha_tina::index_next(byte * buf)
+{
+ DBUG_ENTER("ha_tina::index_next");
+ DBUG_ASSERT(0);
+ DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
+}
+
+/*
+ Read the previous position in the index.
+*/
+int ha_tina::index_prev(byte * buf)
+{
+ DBUG_ENTER("ha_tina::index_prev");
+ DBUG_ASSERT(0);
+ DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
+}
+
+/*
+ Read the first position in the index
+*/
+int ha_tina::index_first(byte * buf)
+{
+ DBUG_ENTER("ha_tina::index_first");
+ DBUG_ASSERT(0);
+ DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
+}
+
+/*
+ Read the last position in the index
+ With this we don't need to do a filesort() with index.
+ We just read the last row and call previous.
+*/
+int ha_tina::index_last(byte * buf)
+{
+ DBUG_ENTER("ha_tina::index_last");
+ DBUG_ASSERT(0);
+ DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED);
+}
+
+/*
+ All table scans call this first.
+ The order of a table scan is:
+
+ ha_tina::store_lock
+ ha_tina::external_lock
+ ha_tina::info
+ ha_tina::rnd_init
+ ha_tina::extra
+ ENUM HA_EXTRA_CACHE Cash record in HA_rrnd()
+ ha_tina::rnd_next
+ ha_tina::rnd_next
+ ha_tina::rnd_next
+ ha_tina::rnd_next
+ ha_tina::rnd_next
+ ha_tina::rnd_next
+ ha_tina::rnd_next
+ ha_tina::rnd_next
+ ha_tina::rnd_next
+ ha_tina::extra
+ ENUM HA_EXTRA_NO_CACHE End cacheing of records (def)
+ ha_tina::external_lock
+ ha_tina::extra
+ ENUM HA_EXTRA_RESET Reset database to after open
+
+ Each call to ::rnd_next() represents a row returned in the can. When no more
+ rows can be returned, rnd_next() returns a value of HA_ERR_END_OF_FILE.
+ The ::info() call is just for the optimizer.
+
+*/
+
+int ha_tina::rnd_init(bool scan)
+{
+ DBUG_ENTER("ha_tina::rnd_init");
+
+ current_position= next_position= 0;
+ records= 0;
+ chain_ptr= chain;
+ (void)madvise(share->mapped_file,share->file_stat.st_size,MADV_SEQUENTIAL);
+
+ DBUG_RETURN(0);
+}
+
+/*
+ ::rnd_next() does all the heavy lifting for a table scan. You will need to populate *buf
+ with the correct field data. You can walk the field to determine at what position you
+ should store the data (take a look at how ::find_current_row() works). The structure
+ is something like:
+ 0Foo Dog Friend
+ The first offset is for the first attribute. All space before that is reserved for null count.
+ Basically this works as a mask for which rows are nulled (compared to just empty).
+ This table handler doesn't do nulls and does not know the difference between NULL and "". This
+ is ok since this table handler is for spreadsheets and they don't know about them either :)
+*/
+int ha_tina::rnd_next(byte *buf)
+{
+ DBUG_ENTER("ha_tina::rnd_next");
+
+ statistic_increment(ha_read_rnd_next_count,&LOCK_status);
+
+ current_position= next_position;
+ if (!share->mapped_file)
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+ if (HA_ERR_END_OF_FILE == find_current_row(buf) )
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+
+ records++;
+ DBUG_RETURN(0);
+}
+
+/*
+ In the case of an order by rows will need to be sorted.
+ ::position() is called after each call to ::rnd_next(),
+ the data it stores is to a byte array. You can store this
+ data via ha_store_ptr(). ref_length is a variable defined to the
+ class that is the sizeof() of position being stored. In our case
+ its just a position. Look at the bdb code if you want to see a case
+ where something other then a number is stored.
+*/
+void ha_tina::position(const byte *record)
+{
+ DBUG_ENTER("ha_tina::position");
+ ha_store_ptr(ref, ref_length, current_position);
+ DBUG_VOID_RETURN;
+}
+
+
+/*
+ Used to fetch a row from a posiion stored with ::position().
+ ha_get_ptr() retrieves the data for you.
+*/
+
+int ha_tina::rnd_pos(byte * buf, byte *pos)
+{
+ DBUG_ENTER("ha_tina::rnd_pos");
+ statistic_increment(ha_read_rnd_count,&LOCK_status);
+ current_position= ha_get_ptr(pos,ref_length);
+ DBUG_RETURN(find_current_row(buf));
+}
+
+/*
+ ::info() is used to return information to the optimizer.
+ Currently this table handler doesn't implement most of the fields
+ really needed. SHOW also makes use of this data
+*/
+void ha_tina::info(uint flag)
+{
+ DBUG_ENTER("ha_tina::info");
+ /* This is a lie, but you don't want the optimizer to see zero or 1 */
+ if (records < 2)
+ records= 2;
+ DBUG_VOID_RETURN;
+}
+
+/*
+ Grab bag of flags that are sent to the able handler every so often.
+ HA_EXTRA_RESET and HA_EXTRA_RESET_STATE are the most frequently called.
+ You are not required to implement any of these.
+*/
+int ha_tina::extra(enum ha_extra_function operation)
+{
+ DBUG_ENTER("ha_tina::extra");
+ DBUG_RETURN(0);
+}
+
+/*
+ This is no longer used.
+*/
+int ha_tina::reset(void)
+{
+ DBUG_ENTER("ha_tina::reset");
+ ha_tina::extra(HA_EXTRA_RESET);
+ DBUG_RETURN(0);
+}
+
+
+/*
+ Called after deletes, inserts, and updates. This is where we clean up all of
+ the dead space we have collected while writing the file.
+*/
+int ha_tina::rnd_end()
+{
+ DBUG_ENTER("ha_tina::rnd_end");
+
+ /* First position will be truncate position, second will be increment */
+ if ((chain_ptr - chain) > 0)
+ {
+ tina_set *ptr;
+ off_t length;
+
+ /*
+ Setting up writable map, this will contain all of the data after the
+ get_mmap call that we have added to the file.
+ */
+ if (get_mmap(share, 1) > 0)
+ DBUG_RETURN(-1);
+ length= share->file_stat.st_size;
+
+ /*
+ The sort handles updates/deletes with random orders.
+ It also sorts so that we move the final blocks to the
+ beginning so that we move the smallest amount of data possible.
+ */
+ qsort(chain, (size_t)(chain_ptr - chain), sizeof(tina_set), (qsort_cmp)sort_set);
+ for (ptr= chain; ptr < chain_ptr; ptr++)
+ printf("Chain %d, %d\n", (int)ptr->begin, (int)ptr->end);
+ for (ptr= chain; ptr < chain_ptr; ptr++)
+ {
+ //memmove(share->mapped_file + ptr->begin, share->mapped_file
+ //+ ptr->end, length - (size_t)ptr->end);
+ /* We peek a head to see if this is the last chain */
+ printf("Delete %d, %d, %d\n", (int)ptr->begin, (int)ptr->end, (int)length);
+ if (ptr+1 == chain_ptr)
+ {
+ printf("Shiftina(end) %d(%d) to %d\n", (int)ptr->end, (int)(length - (size_t)ptr->end), (int)ptr->begin);
+ memmove(share->mapped_file + ptr->begin, share->mapped_file + ptr->end,
+ length - (size_t)ptr->end);
+ }
+ else
+ {
+ printf("Shifting %d(%d) to %d\n", (int)ptr->end, (int)((ptr++)->begin - (size_t)ptr->end), (int)ptr->begin);
+ memmove(share->mapped_file + ptr->begin, share->mapped_file + ptr->end,
+ (size_t)(ptr++)->begin - (size_t)ptr->end);
+ }
+ length= length - (size_t)(ptr->end - ptr->begin);
+ }
+ printf("Buffer %s\n",share->mapped_file);
+
+ /* Truncate the file to the new size */
+ if (my_chsize(share->data_file, length, 0, MYF(MY_WME)))
+ DBUG_RETURN(-1);
+
+ if (munmap(share->mapped_file, length))
+ DBUG_RETURN(-1);
+
+ /* We set it to null so that get_mmap() won't try to unmap it */
+ share->mapped_file= NULL;
+ if (get_mmap(share, 0) > 0)
+ DBUG_RETURN(-1);
+ }
+
+ DBUG_RETURN(0);
+}
+
+/*
+ Truncate table and others of its ilk call this.
+*/
+int ha_tina::delete_all_rows()
+{
+ DBUG_ENTER("ha_tina::delete_all_rows");
+
+ int rc= my_chsize(share->data_file, 0, 0, MYF(MY_WME));
+
+ if (get_mmap(share, 0) > 0)
+ DBUG_RETURN(-1);
+
+ DBUG_RETURN(rc);
+}
+
+/*
+ Always called by the start of a transaction (or by "lock tables");
+*/
+int ha_tina::external_lock(THD *thd, int lock_type)
+{
+ DBUG_ENTER("ha_tina::external_lock");
+ DBUG_RETURN(0); // No external locking
+}
+
+/*
+ Called by the database to lock the table. Keep in mind that this
+ is an internal lock.
+*/
+THR_LOCK_DATA **ha_tina::store_lock(THD *thd,
+ THR_LOCK_DATA **to,
+ enum thr_lock_type lock_type)
+{
+ if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
+ lock.type=lock_type;
+ *to++= &lock;
+ return to;
+}
+
+/*
+ Range optimizer calls this.
+ I need to update the information on this.
+*/
+ha_rows ha_tina::records_in_range(int inx,
+ const byte *start_key,uint start_key_len,
+ enum ha_rkey_function start_search_flag,
+ const byte *end_key,uint end_key_len,
+ enum ha_rkey_function end_search_flag)
+{
+ DBUG_ENTER("ha_tina::records_in_range ");
+ DBUG_RETURN(records); // Good guess
+}
+
+
+/*
+ Create a table. You do not want to leave the table open after a call to
+ this (the database will call ::open() if it needs to).
+*/
+
+int ha_tina::create(const char *name, TABLE *table_arg, HA_CREATE_INFO *create_info)
+{
+ char name_buff[FN_REFLEN];
+ File create_file;
+ DBUG_ENTER("ha_tina::create");
+
+ if ((create_file= my_create(fn_format(name_buff,name,"",".CSV",MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
+ O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
+ DBUG_RETURN(-1);
+
+ my_close(create_file,MYF(0));
+
+ DBUG_RETURN(0);
+}
diff --git a/sql/examples/ha_tina.h b/sql/examples/ha_tina.h
new file mode 100644
index 00000000000..67a907fddb6
--- /dev/null
+++ b/sql/examples/ha_tina.h
@@ -0,0 +1,132 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <my_dir.h>
+
+#define DEFAULT_CHAIN_LENGTH 512
+
+typedef struct st_tina_share {
+ char *table_name;
+ byte *mapped_file; /* mapped region of file */
+ uint table_name_length,use_count;
+ MY_STAT file_stat; /* Stat information for the data file */
+ File data_file; /* Current open data file */
+ pthread_mutex_t mutex;
+ THR_LOCK lock;
+} TINA_SHARE;
+
+typedef struct tina_set {
+ off_t begin;
+ off_t end;
+};
+
+class ha_tina: public handler
+{
+ THR_LOCK_DATA lock; /* MySQL lock */
+ TINA_SHARE *share; /* Shared lock info */
+ off_t current_position; /* Current position in the file during a file scan */
+ off_t next_position; /* Next position in the file scan */
+ byte byte_buffer[IO_SIZE];
+ String buffer;
+ tina_set chain_buffer[DEFAULT_CHAIN_LENGTH];
+ tina_set *chain;
+ tina_set *chain_ptr;
+ byte chain_alloced;
+ uint32 chain_size;
+
+ public:
+ ha_tina(TABLE *table): handler(table),
+ /*
+ These definitions are found in hanler.h
+ Theses are not probably completely right.
+ */
+ current_position(0), next_position(0), chain_alloced(0), chain_size(DEFAULT_CHAIN_LENGTH)
+ {
+ /* Set our original buffers from pre-allocated memory */
+ buffer.set(byte_buffer, IO_SIZE, system_charset_info);
+ chain = chain_buffer;
+ }
+ ~ha_tina()
+ {
+ if (chain_alloced)
+ my_free((gptr)chain,0);
+ }
+ const char *table_type() const { return "CSV"; }
+ const char *index_type(uint inx) { return "NONE"; }
+ const char **bas_ext() const;
+ ulong table_flags() const
+ {
+ return (HA_REC_NOT_IN_SEQ | HA_NOT_EXACT_COUNT |
+ HA_NO_AUTO_INCREMENT );
+ }
+ ulong index_flags(uint idx, uint part, bool all_parts) const
+ {
+ /* We will never have indexes so this will never be called(AKA we return zero) */
+ return 0;
+ }
+ uint max_record_length() const { return HA_MAX_REC_LENGTH; }
+ uint max_keys() const { return 0; }
+ uint max_key_parts() const { return 0; }
+ uint max_key_length() const { return 0; }
+ /*
+ Called in test_quick_select to determine if indexes should be used.
+ */
+ virtual double scan_time() { return (double) (records+deleted) / 20.0+10; }
+ /* The next method will never be called */
+ virtual double read_time(ha_rows rows) { DBUG_ASSERT(0); return((double) rows / 20.0+1); }
+ virtual bool fast_key_read() { return 1;}
+
+ int open(const char *name, int mode, uint test_if_locked);
+ int close(void);
+ int write_row(byte * buf);
+ int update_row(const byte * old_data, byte * new_data);
+ int delete_row(const byte * buf);
+ int index_read(byte * buf, const byte * key,
+ uint key_len, enum ha_rkey_function find_flag);
+ int index_read_idx(byte * buf, uint idx, const byte * key,
+ uint key_len, enum ha_rkey_function find_flag);
+ int index_next(byte * buf);
+ int index_prev(byte * buf);
+ int index_first(byte * buf);
+ int index_last(byte * buf);
+ int rnd_init(bool scan=1);
+ int rnd_next(byte *buf);
+ int rnd_pos(byte * buf, byte *pos);
+ int rnd_end();
+ void position(const byte *record);
+ void info(uint);
+ int extra(enum ha_extra_function operation);
+ int reset(void);
+ int external_lock(THD *thd, int lock_type);
+ int delete_all_rows(void);
+ ha_rows records_in_range(int inx, const byte *start_key,uint start_key_len,
+ enum ha_rkey_function start_search_flag,
+ const byte *end_key,uint end_key_len,
+ enum ha_rkey_function end_search_flag);
+// int delete_table(const char *from);
+// int rename_table(const char * from, const char * to);
+ int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
+
+ THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
+ enum thr_lock_type lock_type);
+
+ /* The following methods were added just for TINA */
+ int encode_quote(byte *buf);
+ int find_current_row(byte *buf);
+ int chain_append();
+};
diff --git a/sql/handler.cc b/sql/handler.cc
index 41a252e3088..7010b5284b8 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -38,6 +38,9 @@
#ifdef HAVE_ARCHIVE_DB
#include "examples/ha_archive.h"
#endif
+#ifdef HAVE_CSV_DB
+#include "examples/ha_tina.h"
+#endif
#ifdef HAVE_INNOBASE_DB
#include "ha_innodb.h"
#endif
@@ -91,6 +94,8 @@ struct show_table_type_st sys_table_types[]=
"Example storage engine", DB_TYPE_EXAMPLE_DB},
{"ARCHIVE",&have_archive_db,
"Archive storage engine", DB_TYPE_ARCHIVE_DB},
+ {"CSV",&have_csv_db,
+ "CSV storage engine", DB_TYPE_CSV_DB},
{NullS, NULL, NullS, DB_TYPE_UNKNOWN}
};
@@ -196,6 +201,10 @@ handler *get_new_handler(TABLE *table, enum db_type db_type)
case DB_TYPE_ARCHIVE_DB:
return new ha_archive(table);
#endif
+#ifdef HAVE_CSV_DB
+ case DB_TYPE_CSV_DB:
+ return new ha_tina(table);
+#endif
#ifdef HAVE_NDBCLUSTER_DB
case DB_TYPE_NDBCLUSTER:
return new ha_ndbcluster(table);
diff --git a/sql/handler.h b/sql/handler.h
index 28b0b8df6e2..542229dcaf2 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -147,7 +147,7 @@ enum db_type
DB_TYPE_MRG_ISAM, DB_TYPE_MYISAM, DB_TYPE_MRG_MYISAM,
DB_TYPE_BERKELEY_DB, DB_TYPE_INNODB,
DB_TYPE_GEMINI, DB_TYPE_NDBCLUSTER,
- DB_TYPE_EXAMPLE_DB, DB_TYPE_ARCHIVE_DB,
+ DB_TYPE_EXAMPLE_DB, DB_TYPE_ARCHIVE_DB, DB_TYPE_CSV_DB,
DB_TYPE_DEFAULT // Must be last
};
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 8707bc205df..2f2fc156af1 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -930,7 +930,7 @@ extern struct my_option my_long_options[];
/* optional things, have_* variables */
extern SHOW_COMP_OPTION have_isam, have_innodb, have_berkeley_db;
-extern SHOW_COMP_OPTION have_example_db, have_archive_db;
+extern SHOW_COMP_OPTION have_example_db, have_archive_db, have_csv_db;
extern SHOW_COMP_OPTION have_raid, have_openssl, have_symlink;
extern SHOW_COMP_OPTION have_query_cache, have_berkeley_db, have_innodb;
extern SHOW_COMP_OPTION have_geometry, have_rtree_keys;
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 4fd13d33bab..4f0a2f63a7f 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -366,7 +366,7 @@ CHARSET_INFO *system_charset_info, *files_charset_info ;
CHARSET_INFO *national_charset_info, *table_alias_charset;
SHOW_COMP_OPTION have_berkeley_db, have_innodb, have_isam, have_ndbcluster,
- have_example_db, have_archive_db;
+ have_example_db, have_archive_db, have_csv_db;
SHOW_COMP_OPTION have_raid, have_openssl, have_symlink, have_query_cache;
SHOW_COMP_OPTION have_geometry, have_rtree_keys;
SHOW_COMP_OPTION have_crypt, have_compress;
@@ -5456,6 +5456,11 @@ static void mysql_init_variables(void)
#else
have_archive_db= SHOW_OPTION_NO;
#endif
+#ifdef HAVE_CSV_DB
+ have_csv_db= SHOW_OPTION_YES;
+#else
+ have_csv_db= SHOW_OPTION_NO;
+#endif
#ifdef HAVE_NDBCLUSTER_DB
have_ndbcluster=SHOW_OPTION_DISABLED;
#else
diff --git a/sql/set_var.cc b/sql/set_var.cc
index e1cfb77d297..93123b12c38 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -652,6 +652,8 @@ struct show_var_st init_vars[]= {
{"have_bdb", (char*) &have_berkeley_db, SHOW_HAVE},
{"have_compress", (char*) &have_compress, SHOW_HAVE},
{"have_crypt", (char*) &have_crypt, SHOW_HAVE},
+ {"have_csv", (char*) &have_csv_db, SHOW_HAVE},
+ {"have_example", (char*) &have_example_db, SHOW_HAVE},
{"have_innodb", (char*) &have_innodb, SHOW_HAVE},
{"have_isam", (char*) &have_isam, SHOW_HAVE},
{"have_geometry", (char*) &have_geometry, SHOW_HAVE},