summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/Makefile.am32
-rw-r--r--sql/ha_archive.cc1499
-rw-r--r--sql/ha_archive.h136
-rw-r--r--sql/ha_berkeley.cc18
-rw-r--r--sql/ha_blackhole.cc252
-rw-r--r--sql/ha_blackhole.h88
-rw-r--r--sql/ha_federated.cc19
-rw-r--r--sql/ha_heap.cc13
-rw-r--r--sql/ha_innodb.cc17
-rw-r--r--sql/ha_myisam.cc16
-rw-r--r--sql/ha_myisammrg.cc15
-rw-r--r--sql/ha_ndbcluster.cc19
-rw-r--r--sql/ha_ndbcluster_binlog.cc2
-rw-r--r--sql/ha_partition.cc19
-rw-r--r--sql/handler.cc153
-rw-r--r--sql/handler.h5
-rw-r--r--sql/handlerton-win.cc72
-rw-r--r--sql/handlerton.cc.in14
-rw-r--r--sql/log.cc15
-rw-r--r--sql/mysqld.cc30
-rw-r--r--sql/partition_info.cc3
-rw-r--r--sql/sql_builtin.cc.in13
-rw-r--r--sql/sql_delete.cc2
-rw-r--r--sql/sql_plugin.cc75
-rw-r--r--sql/sql_show.cc45
-rw-r--r--sql/sql_yacc.yy2
26 files changed, 365 insertions, 2209 deletions
diff --git a/sql/Makefile.am b/sql/Makefile.am
index 60e7891931f..2665e3fcfd5 100644
--- a/sql/Makefile.am
+++ b/sql/Makefile.am
@@ -30,10 +30,7 @@ libexec_PROGRAMS = mysqld
noinst_PROGRAMS = gen_lex_hash
bin_PROGRAMS = mysql_tzinfo_to_sql
gen_lex_hash_LDFLAGS = @NOINST_LDFLAGS@
-LDADD = $(top_builddir)/storage/myisam/libmyisam.a \
- $(top_builddir)/storage/myisammrg/libmyisammrg.a \
- $(top_builddir)/storage/heap/libheap.a \
- $(top_builddir)/vio/libvio.a \
+LDADD = $(top_builddir)/vio/libvio.a \
$(top_builddir)/mysys/libmysys.a \
$(top_builddir)/dbug/libdbug.a \
$(top_builddir)/regex/libregex.a \
@@ -41,7 +38,7 @@ LDADD = $(top_builddir)/storage/myisam/libmyisam.a \
mysqld_LDADD = @MYSQLD_EXTRA_LDFLAGS@ \
@pstack_libs@ \
- @mysql_se_objs@ @mysql_se_libs@ \
+ @mysql_plugin_libs@ \
$(LDADD) $(CXXLDFLAGS) $(WRAPLIBS) @LIBDL@ \
@yassl_libs@ @openssl_libs@
noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
@@ -53,6 +50,9 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
sql_manager.h sql_map.h sql_string.h unireg.h \
sql_error.h field.h handler.h mysqld_suffix.h \
ha_heap.h ha_myisam.h ha_myisammrg.h ha_partition.h \
+ ha_innodb.h ha_berkeley.h ha_federated.h \
+ ha_ndbcluster.h ha_ndbcluster_binlog.h \
+ ha_ndbcluster_tables.h
opt_range.h protocol.h rpl_tblmap.h \
log.h sql_show.h rpl_rli.h \
sql_select.h structs.h table.h sql_udf.h hash_filo.h\
@@ -61,12 +61,12 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
rpl_injector.h \
stacktrace.h sql_sort.h sql_cache.h set_var.h \
spatial.h gstream.h client_settings.h tzfile.h \
- tztime.h my_decimal.h\
+ tztime.h my_decimal.h\
sp_head.h sp_pcontext.h sp_rcontext.h sp.h sp_cache.h \
parse_file.h sql_view.h sql_trigger.h \
sql_array.h sql_cursor.h event.h event_priv.h \
sql_plugin.h authors.h sql_partition.h \
- partition_info.h partition_element.h
+ partition_info.h partition_element.h
mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \
item.cc item_sum.cc item_buff.cc item_func.cc \
item_cmpfunc.cc item_strfunc.cc item_timefunc.cc \
@@ -79,7 +79,7 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \
mysqld.cc password.c hash_filo.cc hostname.cc \
set_var.cc sql_parse.cc sql_yacc.yy \
sql_base.cc table.cc sql_select.cc sql_insert.cc \
- sql_prepare.cc sql_error.cc \
+ sql_prepare.cc sql_error.cc \
sql_update.cc sql_delete.cc uniques.cc sql_do.cc \
procedure.cc item_uniq.cc sql_test.cc \
log.cc log_event.cc init.cc derror.cc sql_acl.cc \
@@ -87,6 +87,9 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \
discover.cc time.cc opt_range.cc opt_sum.cc \
records.cc filesort.cc handler.cc \
ha_heap.cc ha_myisam.cc ha_myisammrg.cc \
+ ha_partition.cc ha_innodb.cc ha_berkeley.cc \
+ ha_federated.cc \
+ ha_ndbcluster.cc ha_ndbcluster_binlog.cc \
sql_db.cc sql_table.cc sql_rename.cc sql_crypt.cc \
sql_load.cc mf_iocache.cc field_conv.cc sql_show.cc \
sql_udf.cc sql_analyse.cc sql_analyse.h sql_cache.cc \
@@ -102,15 +105,9 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \
sp_cache.cc parse_file.cc sql_trigger.cc \
event_executor.cc event.cc event_timed.cc \
sql_plugin.cc sql_binlog.cc \
- handlerton.cc sql_tablespace.cc partition_info.cc
-EXTRA_mysqld_SOURCES = ha_innodb.cc ha_berkeley.cc ha_archive.cc \
- ha_innodb.h ha_berkeley.h ha_archive.h \
- ha_blackhole.cc ha_federated.cc ha_ndbcluster.cc \
- ha_blackhole.h ha_federated.h ha_ndbcluster.h \
- ha_ndbcluster_binlog.cc ha_ndbcluster_binlog.h \
- ha_ndbcluster_tables.h \
- ha_partition.cc ha_partition.h
-mysqld_DEPENDENCIES = @mysql_se_objs@
+ sql_builtin.cc sql_tablespace.cc partition_info.cc
+
+
gen_lex_hash_SOURCES = gen_lex_hash.cc
gen_lex_hash_LDADD = $(LDADD) $(CXXLDFLAGS)
mysql_tzinfo_to_sql_SOURCES = mysql_tzinfo_to_sql.cc
@@ -162,6 +159,7 @@ sql_yacc.o: sql_yacc.cc sql_yacc.h $(HEADERS)
lex_hash.h: gen_lex_hash$(EXEEXT)
./gen_lex_hash$(EXEEXT) > $@
+# the following three should eventually be moved out of this directory
ha_berkeley.o: ha_berkeley.cc ha_berkeley.h
$(CXXCOMPILE) @bdb_includes@ $(LM_CFLAGS) -c $<
diff --git a/sql/ha_archive.cc b/sql/ha_archive.cc
deleted file mode 100644
index 403855b6a01..00000000000
--- a/sql/ha_archive.cc
+++ /dev/null
@@ -1,1499 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifdef USE_PRAGMA_IMPLEMENTATION
-#pragma implementation // gcc: Class implementation
-#endif
-
-#include "mysql_priv.h"
-
-#include "ha_archive.h"
-#include <my_dir.h>
-
-/*
- First, if you want to understand storage engines you should look at
- ha_example.cc and ha_example.h.
- This example was written as a test case for a customer who needed
- a storage engine without indexes that could compress data very well.
- So, welcome to a completely compressed storage engine. This storage
- engine only does inserts. No replace, deletes, or updates. All reads are
- complete table scans. Compression is done through azip (bzip compresses
- better, but only marginally, if someone asks I could add support for
- it too, but beaware that it costs a lot more in CPU time then azip).
-
- We keep a file pointer open for each instance of ha_archive for each read
- but for writes we keep one open file handle just for that. We flush it
- only if we have a read occur. azip handles compressing lots of records
- at once much better then doing lots of little records between writes.
- It is possible to not lock on writes but this would then mean we couldn't
- handle bulk inserts as well (that is if someone was trying to read at
- the same time since we would want to flush).
-
- A "meta" file is kept alongside the data file. This file serves two purpose.
- The first purpose is to track the number of rows in the table. The second
- purpose is to determine if the table was closed properly or not. When the
- meta file is first opened it is marked as dirty. It is opened when the table
- itself is opened for writing. When the table is closed the new count for rows
- is written to the meta file and the file is marked as clean. If the meta file
- is opened and it is marked as dirty, it is assumed that a crash occured. At
- this point an error occurs and the user is told to rebuild the file.
- A rebuild scans the rows and rewrites the meta file. If corruption is found
- in the data file then the meta file is not repaired.
-
- At some point a recovery method for such a drastic case needs to be divised.
-
- Locks are row level, and you will get a consistant read.
-
- For performance as far as table scans go it is quite fast. I don't have
- good numbers but locally it has out performed both Innodb and MyISAM. For
- Innodb the question will be if the table can be fit into the buffer
- pool. For MyISAM its a question of how much the file system caches the
- MyISAM file. With enough free memory MyISAM is faster. Its only when the OS
- doesn't have enough memory to cache entire table that archive turns out
- to be any faster. For writes it is always a bit slower then MyISAM. It has no
- internal limits though for row length.
-
- Examples between MyISAM (packed) and Archive.
-
- Table with 76695844 identical rows:
- 29680807 a_archive.ARZ
- 920350317 a.MYD
-
-
- Table with 8991478 rows (all of Slashdot's comments):
- 1922964506 comment_archive.ARZ
- 2944970297 comment_text.MYD
-
-
- TODO:
- Add bzip optional support.
- Allow users to set compression level.
- Add truncate table command.
- Implement versioning, should be easy.
- Allow for errors, find a way to mark bad rows.
- Talk to the azip guys, come up with a writable format so that updates are doable
- without switching to a block method.
- Add optional feature so that rows can be flushed at interval (which will cause less
- compression but may speed up ordered searches).
- Checkpoint the meta file to allow for faster rebuilds.
- Dirty open (right now the meta file is repaired if a crash occured).
- Option to allow for dirty reads, this would lower the sync calls, which would make
- inserts a lot faster, but would mean highly arbitrary reads.
-
- -Brian
-*/
-/*
- Notes on file formats.
- The Meta file is layed out as:
- check - Just an int of 254 to make sure that the the file we are opening was
- never corrupted.
- version - The current version of the file format.
- rows - This is an unsigned long long which is the number of rows in the data
- file.
- check point - Reserved for future use
- auto increment - MAX value for autoincrement
- dirty - Status of the file, whether or not its values are the latest. This
- flag is what causes a repair to occur
-
- The data file:
- check - Just an int of 254 to make sure that the the file we are opening was
- never corrupted.
- version - The current version of the file format.
- data - The data is stored in a "row +blobs" format.
-*/
-
-/* If the archive storage engine has been inited */
-static bool archive_inited= FALSE;
-/* Variables for archive share methods */
-pthread_mutex_t archive_mutex;
-static HASH archive_open_tables;
-
-/* The file extension */
-#define ARZ ".ARZ" // The data file
-#define ARN ".ARN" // Files used during an optimize call
-#define ARM ".ARM" // Meta file
-/*
- uchar + uchar + ulonglong + ulonglong + ulonglong + ulonglong + uchar
-*/
-#define META_BUFFER_SIZE sizeof(uchar) + sizeof(uchar) + sizeof(ulonglong) \
- + sizeof(ulonglong) + sizeof(ulonglong) + sizeof(ulonglong) + sizeof(uchar)
-
-/*
- uchar + uchar
-*/
-#define DATA_BUFFER_SIZE 2 // Size of the data used in the data file
-#define ARCHIVE_CHECK_HEADER 254 // The number we use to determine corruption
-
-/* Static declarations for handerton */
-static handler *archive_create_handler(TABLE_SHARE *table);
-/*
- Number of rows that will force a bulk insert.
-*/
-#define ARCHIVE_MIN_ROWS_TO_USE_BULK_INSERT 2
-
-
-/* dummy handlerton - only to have something to return from archive_db_init */
-handlerton archive_hton = {
- MYSQL_HANDLERTON_INTERFACE_VERSION,
- "ARCHIVE",
- SHOW_OPTION_YES,
- "Archive storage engine",
- DB_TYPE_ARCHIVE_DB,
- archive_db_init,
- 0, /* slot */
- 0, /* savepoint size. */
- NULL, /* close_connection */
- NULL, /* savepoint */
- NULL, /* rollback to savepoint */
- NULL, /* releas savepoint */
- NULL, /* commit */
- NULL, /* rollback */
- NULL, /* prepare */
- NULL, /* recover */
- NULL, /* commit_by_xid */
- NULL, /* rollback_by_xid */
- NULL, /* create_cursor_read_view */
- NULL, /* set_cursor_read_view */
- NULL, /* close_cursor_read_view */
- archive_create_handler, /* Create a new handler */
- NULL, /* Drop a database */
- archive_db_end, /* Panic call */
- NULL, /* Start Consistent Snapshot */
- NULL, /* Flush logs */
- NULL, /* Show status */
- NULL, /* Partition flags */
- NULL, /* Alter table flags */
- NULL, /* Alter interface */
- NULL, /* fill_files_table */
- HTON_NO_FLAGS,
- NULL, /* binlog_func */
- NULL, /* binlog_log_query */
- NULL /* release_temporary_latches */
-
-};
-
-static handler *archive_create_handler(TABLE_SHARE *table)
-{
- return new ha_archive(table);
-}
-
-/*
- Used for hash table that tracks open tables.
-*/
-static byte* archive_get_key(ARCHIVE_SHARE *share,uint *length,
- my_bool not_used __attribute__((unused)))
-{
- *length=share->table_name_length;
- return (byte*) share->table_name;
-}
-
-
-/*
- Initialize the archive handler.
-
- SYNOPSIS
- archive_db_init()
- void
-
- RETURN
- FALSE OK
- TRUE Error
-*/
-
-bool archive_db_init()
-{
- DBUG_ENTER("archive_db_init");
- if (pthread_mutex_init(&archive_mutex, MY_MUTEX_INIT_FAST))
- goto error;
- if (hash_init(&archive_open_tables, system_charset_info, 32, 0, 0,
- (hash_get_key) archive_get_key, 0, 0))
- {
- VOID(pthread_mutex_destroy(&archive_mutex));
- }
- else
- {
- archive_inited= TRUE;
- DBUG_RETURN(FALSE);
- }
-error:
- have_archive_db= SHOW_OPTION_DISABLED; // If we couldn't use handler
- DBUG_RETURN(TRUE);
-}
-
-/*
- Release the archive handler.
-
- SYNOPSIS
- archive_db_end()
- void
-
- RETURN
- FALSE OK
-*/
-
-int archive_db_end(ha_panic_function type)
-{
- if (archive_inited)
- {
- hash_free(&archive_open_tables);
- VOID(pthread_mutex_destroy(&archive_mutex));
- }
- archive_inited= 0;
- return 0;
-}
-
-ha_archive::ha_archive(TABLE_SHARE *table_arg)
- :handler(&archive_hton, table_arg), delayed_insert(0), bulk_insert(0)
-{
- /* Set our original buffer from pre-allocated memory */
- buffer.set((char *)byte_buffer, IO_SIZE, system_charset_info);
-
- /* The size of the offset value we will use for position() */
- ref_length = sizeof(my_off_t);
-}
-
-/*
- This method reads the header of a datafile and returns whether or not it was successful.
-*/
-int ha_archive::read_data_header(azio_stream *file_to_read)
-{
- uchar data_buffer[DATA_BUFFER_SIZE];
- DBUG_ENTER("ha_archive::read_data_header");
-
- if (azrewind(file_to_read) == -1)
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
-
- if (azread(file_to_read, data_buffer, DATA_BUFFER_SIZE) != DATA_BUFFER_SIZE)
- DBUG_RETURN(errno ? errno : -1);
-
- DBUG_PRINT("ha_archive::read_data_header", ("Check %u", data_buffer[0]));
- DBUG_PRINT("ha_archive::read_data_header", ("Version %u", data_buffer[1]));
-
- if ((data_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) &&
- (data_buffer[1] != (uchar)ARCHIVE_VERSION))
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
-
- DBUG_RETURN(0);
-}
-
-/*
- This method writes out the header of a datafile and returns whether or not it was successful.
-*/
-int ha_archive::write_data_header(azio_stream *file_to_write)
-{
- uchar data_buffer[DATA_BUFFER_SIZE];
- DBUG_ENTER("ha_archive::write_data_header");
-
- data_buffer[0]= (uchar)ARCHIVE_CHECK_HEADER;
- data_buffer[1]= (uchar)ARCHIVE_VERSION;
-
- if (azwrite(file_to_write, &data_buffer, DATA_BUFFER_SIZE) !=
- DATA_BUFFER_SIZE)
- goto error;
- DBUG_PRINT("ha_archive::write_data_header", ("Check %u", (uint)data_buffer[0]));
- DBUG_PRINT("ha_archive::write_data_header", ("Version %u", (uint)data_buffer[1]));
-
- DBUG_RETURN(0);
-error:
- DBUG_RETURN(errno);
-}
-
-/*
- This method reads the header of a meta file and returns whether or not it was successful.
- *rows will contain the current number of rows in the data file upon success.
-*/
-int ha_archive::read_meta_file(File meta_file, ha_rows *rows,
- ulonglong *auto_increment,
- ulonglong *forced_flushes)
-{
- uchar meta_buffer[META_BUFFER_SIZE];
- uchar *ptr= meta_buffer;
- ulonglong check_point;
-
- DBUG_ENTER("ha_archive::read_meta_file");
-
- VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
- if (my_read(meta_file, (byte*)meta_buffer, META_BUFFER_SIZE, 0) != META_BUFFER_SIZE)
- DBUG_RETURN(-1);
-
- /*
- Parse out the meta data, we ignore version at the moment
- */
-
- ptr+= sizeof(uchar)*2; // Move past header
- *rows= (ha_rows)uint8korr(ptr);
- ptr+= sizeof(ulonglong); // Move past rows
- check_point= uint8korr(ptr);
- ptr+= sizeof(ulonglong); // Move past check_point
- *auto_increment= uint8korr(ptr);
- ptr+= sizeof(ulonglong); // Move past auto_increment
- *forced_flushes= uint8korr(ptr);
- ptr+= sizeof(ulonglong); // Move past forced_flush
-
- DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0]));
- DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1]));
- DBUG_PRINT("ha_archive::read_meta_file", ("Rows %llu", *rows));
- DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %llu", check_point));
- DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %llu", *auto_increment));
- DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %llu", *forced_flushes));
- DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)(*ptr)));
-
- if ((meta_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) ||
- ((bool)(*ptr)== TRUE))
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
-
- my_sync(meta_file, MYF(MY_WME));
-
- DBUG_RETURN(0);
-}
-
-/*
- This method writes out the header of a meta file and returns whether or not it was successful.
- By setting dirty you say whether or not the file represents the actual state of the data file.
- Upon ::open() we set to dirty, and upon ::close() we set to clean.
-*/
-int ha_archive::write_meta_file(File meta_file, ha_rows rows,
- ulonglong auto_increment,
- ulonglong forced_flushes,
- bool dirty)
-{
- uchar meta_buffer[META_BUFFER_SIZE];
- uchar *ptr= meta_buffer;
- ulonglong check_point= 0; //Reserved for the future
-
- DBUG_ENTER("ha_archive::write_meta_file");
-
- *ptr= (uchar)ARCHIVE_CHECK_HEADER;
- ptr += sizeof(uchar);
- *ptr= (uchar)ARCHIVE_VERSION;
- ptr += sizeof(uchar);
- int8store(ptr, (ulonglong)rows);
- ptr += sizeof(ulonglong);
- int8store(ptr, check_point);
- ptr += sizeof(ulonglong);
- int8store(ptr, auto_increment);
- ptr += sizeof(ulonglong);
- int8store(ptr, forced_flushes);
- ptr += sizeof(ulonglong);
- *ptr= (uchar)dirty;
- DBUG_PRINT("ha_archive::write_meta_file", ("Check %d",
- (uint)ARCHIVE_CHECK_HEADER));
- DBUG_PRINT("ha_archive::write_meta_file", ("Version %d",
- (uint)ARCHIVE_VERSION));
- DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu", (ulonglong)rows));
- DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu", check_point));
- DBUG_PRINT("ha_archive::write_meta_file", ("Auto Increment %llu",
- auto_increment));
- DBUG_PRINT("ha_archive::write_meta_file", ("Forced Flushes %llu",
- forced_flushes));
- DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty));
-
- VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
- if (my_write(meta_file, (byte *)meta_buffer, META_BUFFER_SIZE, 0) != META_BUFFER_SIZE)
- DBUG_RETURN(-1);
-
- my_sync(meta_file, MYF(MY_WME));
-
- DBUG_RETURN(0);
-}
-
-
-/*
- We create the shared memory space that we will use for the open table.
- No matter what we try to get or create a share. This is so that a repair
- table operation can occur.
-
- See ha_example.cc for a longer description.
-*/
-ARCHIVE_SHARE *ha_archive::get_share(const char *table_name,
- TABLE *table, int *rc)
-{
- ARCHIVE_SHARE *share;
- char meta_file_name[FN_REFLEN];
- uint length;
- char *tmp_name;
- DBUG_ENTER("ha_archive::get_share");
-
- pthread_mutex_lock(&archive_mutex);
- length=(uint) strlen(table_name);
-
- if (!(share=(ARCHIVE_SHARE*) hash_search(&archive_open_tables,
- (byte*) table_name,
- length)))
- {
- if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
- &share, sizeof(*share),
- &tmp_name, length+1,
- NullS))
- {
- pthread_mutex_unlock(&archive_mutex);
- *rc= HA_ERR_OUT_OF_MEM;
- DBUG_RETURN(NULL);
- }
-
- share->use_count= 0;
- share->table_name_length= length;
- share->table_name= tmp_name;
- share->crashed= FALSE;
- fn_format(share->data_file_name,table_name,"",ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
- fn_format(meta_file_name,table_name,"",ARM,MY_REPLACE_EXT|MY_UNPACK_FILENAME);
- strmov(share->table_name,table_name);
- /*
- We will use this lock for rows.
- */
- VOID(pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST));
- if ((share->meta_file= my_open(meta_file_name, O_RDWR, MYF(0))) == -1)
- share->crashed= TRUE;
-
- /*
- After we read, we set the file to dirty. When we close, we will do the
- opposite. If the meta file will not open we assume it is crashed and
- leave it up to the user to fix.
- */
- if (read_meta_file(share->meta_file, &share->rows_recorded,
- &share->auto_increment_value,
- &share->forced_flushes))
- share->crashed= TRUE;
- else
- (void)write_meta_file(share->meta_file, share->rows_recorded,
- share->auto_increment_value,
- share->forced_flushes,
- TRUE);
- /*
- It is expensive to open and close the data files and since you can't have
- a gzip file that can be both read and written we keep a writer open
- that is shared amoung all open tables.
- */
- if (!(azopen(&(share->archive_write), share->data_file_name,
- O_WRONLY|O_APPEND|O_BINARY)))
- {
- DBUG_PRINT("info", ("Could not open archive write file"));
- share->crashed= TRUE;
- }
- VOID(my_hash_insert(&archive_open_tables, (byte*) share));
- thr_lock_init(&share->lock);
- }
- share->use_count++;
- DBUG_PRINT("info", ("archive table %.*s has %d open handles now",
- share->table_name_length, share->table_name,
- share->use_count));
- if (share->crashed)
- *rc= HA_ERR_CRASHED_ON_USAGE;
- pthread_mutex_unlock(&archive_mutex);
-
- DBUG_RETURN(share);
-}
-
-
-/*
- Free the share.
- See ha_example.cc for a description.
-*/
-int ha_archive::free_share(ARCHIVE_SHARE *share)
-{
- int rc= 0;
- DBUG_ENTER("ha_archive::free_share");
- DBUG_PRINT("info", ("archive table %.*s has %d open handles on entrance",
- share->table_name_length, share->table_name,
- share->use_count));
-
- pthread_mutex_lock(&archive_mutex);
- if (!--share->use_count)
- {
- hash_delete(&archive_open_tables, (byte*) share);
- thr_lock_delete(&share->lock);
- VOID(pthread_mutex_destroy(&share->mutex));
- /*
- We need to make sure we don't reset the crashed state.
- If we open a crashed file, wee need to close it as crashed unless
- it has been repaired.
- Since we will close the data down after this, we go on and count
- the flush on close;
- */
- share->forced_flushes++;
- (void)write_meta_file(share->meta_file, share->rows_recorded,
- share->auto_increment_value,
- share->forced_flushes,
- share->crashed ? TRUE :FALSE);
- if (azclose(&(share->archive_write)))
- rc= 1;
- if (my_close(share->meta_file, MYF(0)))
- rc= 1;
- my_free((gptr) share, MYF(0));
- }
- pthread_mutex_unlock(&archive_mutex);
-
- DBUG_RETURN(rc);
-}
-
-
-/*
- We just implement one additional file extension.
-*/
-static const char *ha_archive_exts[] = {
- ARZ,
- ARM,
- NullS
-};
-
-const char **ha_archive::bas_ext() const
-{
- return ha_archive_exts;
-}
-
-
-/*
- When opening a file we:
- Create/get our shared structure.
- Init out lock.
- We open the file we will read from.
-*/
-int ha_archive::open(const char *name, int mode, uint open_options)
-{
- int rc= 0;
- DBUG_ENTER("ha_archive::open");
-
- DBUG_PRINT("info", ("archive table was opened for crash %s",
- (open_options & HA_OPEN_FOR_REPAIR) ? "yes" : "no"));
- share= get_share(name, table, &rc);
-
- if (rc == HA_ERR_CRASHED_ON_USAGE && !(open_options & HA_OPEN_FOR_REPAIR))
- {
- free_share(share);
- DBUG_RETURN(rc);
- }
- else if (rc == HA_ERR_OUT_OF_MEM)
- {
- DBUG_RETURN(rc);
- }
-
- thr_lock_data_init(&share->lock,&lock,NULL);
-
- if (!(azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY)))
- {
- if (errno == EROFS || errno == EACCES)
- DBUG_RETURN(my_errno= errno);
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
- }
-
- DBUG_PRINT("info", ("archive table was crashed %s",
- rc == HA_ERR_CRASHED_ON_USAGE ? "yes" : "no"));
- if (rc == HA_ERR_CRASHED_ON_USAGE && open_options & HA_OPEN_FOR_REPAIR)
- {
- DBUG_RETURN(0);
- }
- else
- DBUG_RETURN(rc);
-}
-
-
-/*
- Closes the file.
-
- SYNOPSIS
- close();
-
- IMPLEMENTATION:
-
- We first close this storage engines file handle to the archive and
- then remove our reference count to the table (and possibly free it
- as well).
-
- RETURN
- 0 ok
- 1 Error
-*/
-
-int ha_archive::close(void)
-{
- int rc= 0;
- DBUG_ENTER("ha_archive::close");
-
- /* First close stream */
- if (azclose(&archive))
- rc= 1;
- /* then also close share */
- rc|= free_share(share);
-
- DBUG_RETURN(rc);
-}
-
-
-/*
- We create our data file here. The format is pretty simple.
- You can read about the format of the data file above.
- Unlike other storage engines we do not "pack" our data. Since we
- are about to do a general compression, packing would just be a waste of
- CPU time. If the table has blobs they are written after the row in the order
- of creation.
-*/
-
-int ha_archive::create(const char *name, TABLE *table_arg,
- HA_CREATE_INFO *create_info)
-{
- File create_file; // We use to create the datafile and the metafile
- char name_buff[FN_REFLEN];
- int error;
- DBUG_ENTER("ha_archive::create");
-
- auto_increment_value= (create_info->auto_increment_value ?
- create_info->auto_increment_value -1 :
- (ulonglong) 0);
-
- if ((create_file= my_create(fn_format(name_buff,name,"",ARM,
- MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
- O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
- {
- error= my_errno;
- goto error;
- }
-
- for (uint key= 0; key < table_arg->s->keys; key++)
- {
- KEY *pos= table_arg->key_info+key;
- KEY_PART_INFO *key_part= pos->key_part;
- KEY_PART_INFO *key_part_end= key_part + pos->key_parts;
-
- for (; key_part != key_part_end; key_part++)
- {
- Field *field= key_part->field;
-
- if (!(field->flags & AUTO_INCREMENT_FLAG))
- {
- error= -1;
- goto error;
- }
- }
- }
-
- write_meta_file(create_file, 0, auto_increment_value, 0, FALSE);
- my_close(create_file,MYF(0));
-
- /*
- We reuse name_buff since it is available.
- */
- if ((create_file= my_create(fn_format(name_buff,name,"",ARZ,
- MY_REPLACE_EXT|MY_UNPACK_FILENAME),0,
- O_RDWR | O_TRUNC,MYF(MY_WME))) < 0)
- {
- error= my_errno;
- goto error;
- }
- if (!azdopen(&archive, create_file, O_WRONLY|O_BINARY))
- {
- error= errno;
- goto error2;
- }
- if (write_data_header(&archive))
- {
- error= errno;
- goto error3;
- }
-
- if (azclose(&archive))
- {
- error= errno;
- goto error2;
- }
-
- DBUG_RETURN(0);
-
-error3:
- /* We already have an error, so ignore results of azclose. */
- (void)azclose(&archive);
-error2:
- my_close(create_file, MYF(0));
- delete_table(name);
-error:
- /* Return error number, if we got one */
- DBUG_RETURN(error ? error : -1);
-}
-
-/*
- This is where the actual row is written out.
-*/
-int ha_archive::real_write_row(byte *buf, azio_stream *writer)
-{
- my_off_t written;
- uint *ptr, *end;
- DBUG_ENTER("ha_archive::real_write_row");
-
- written= azwrite(writer, buf, table->s->reclength);
- DBUG_PRINT("ha_archive::real_write_row", ("Wrote %d bytes expected %d",
- written, table->s->reclength));
- if (!delayed_insert || !bulk_insert)
- share->dirty= TRUE;
-
- if (written != (my_off_t)table->s->reclength)
- DBUG_RETURN(errno ? errno : -1);
- /*
- We should probably mark the table as damagaged if the record is written
- but the blob fails.
- */
- for (ptr= table->s->blob_field, end= ptr + table->s->blob_fields ;
- ptr != end ;
- ptr++)
- {
- char *data_ptr;
- uint32 size= ((Field_blob*) table->field[*ptr])->get_length();
-
- if (size)
- {
- ((Field_blob*) table->field[*ptr])->get_ptr(&data_ptr);
- written= azwrite(writer, data_ptr, (unsigned)size);
- if (written != (my_off_t)size)
- DBUG_RETURN(errno ? errno : -1);
- }
- }
- DBUG_RETURN(0);
-}
-
-
-/*
- Look at ha_archive::open() for an explanation of the row format.
- Here we just write out the row.
-
- Wondering about start_bulk_insert()? We don't implement it for
- archive since it optimizes for lots of writes. The only save
- for implementing start_bulk_insert() is that we could skip
- setting dirty to true each time.
-*/
-int ha_archive::write_row(byte *buf)
-{
- int rc;
- byte *read_buf= NULL;
- ulonglong temp_auto;
- DBUG_ENTER("ha_archive::write_row");
-
- if (share->crashed)
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
-
- statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status);
- if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT)
- table->timestamp_field->set_time();
- pthread_mutex_lock(&share->mutex);
-
- if (table->next_number_field)
- {
- KEY *mkey= &table->s->key_info[0]; // We only support one key right now
- update_auto_increment();
- temp_auto= table->next_number_field->val_int();
-
- /*
- Bad news, this will cause a search for the unique value which is very
- expensive since we will have to do a table scan which will lock up
- all other writers during this period. This could perhaps be optimized
- in the future.
- */
- if (temp_auto == share->auto_increment_value &&
- mkey->flags & HA_NOSAME)
- {
- rc= HA_ERR_FOUND_DUPP_KEY;
- goto error;
- }
-
- if (temp_auto < share->auto_increment_value &&
- mkey->flags & HA_NOSAME)
- {
- /*
- First we create a buffer that we can use for reading rows, and can pass
- to get_row().
- */
- if (!(read_buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME))))
- {
- rc= HA_ERR_OUT_OF_MEM;
- goto error;
- }
- /*
- All of the buffer must be written out or we won't see all of the
- data
- */
- azflush(&(share->archive_write), Z_SYNC_FLUSH);
- share->forced_flushes++;
- /*
- Set the position of the local read thread to the beginning postion.
- */
- if (read_data_header(&archive))
- {
- rc= HA_ERR_CRASHED_ON_USAGE;
- goto error;
- }
-
- /*
- Now we read and check all of the rows.
- if (!memcmp(table->next_number_field->ptr, mfield->ptr, mfield->max_length()))
- if ((longlong)temp_auto ==
- mfield->val_int((char*)(read_buf + mfield->offset())))
- */
- Field *mfield= table->next_number_field;
-
- while (!(get_row(&archive, read_buf)))
- {
- if (!memcmp(read_buf + mfield->offset(), table->next_number_field->ptr,
- mfield->max_length()))
- {
- rc= HA_ERR_FOUND_DUPP_KEY;
- goto error;
- }
- }
- }
- else
- {
- if (temp_auto > share->auto_increment_value)
- auto_increment_value= share->auto_increment_value= temp_auto;
- }
- }
-
- /*
- Notice that the global auto_increment has been increased.
- In case of a failed row write, we will never try to reuse the value.
- */
-
- share->rows_recorded++;
- rc= real_write_row(buf, &(share->archive_write));
-error:
- pthread_mutex_unlock(&share->mutex);
- if (read_buf)
- my_free((gptr) read_buf, MYF(0));
-
- DBUG_RETURN(rc);
-}
-
-
-ulonglong ha_archive::get_auto_increment()
-{
- return share->auto_increment_value + 1;
-}
-
-/* Initialized at each key walk (called multiple times unlike rnd_init()) */
-int ha_archive::index_init(uint keynr, bool sorted)
-{
- DBUG_ENTER("ha_archive::index_init");
- active_index= keynr;
- DBUG_RETURN(0);
-}
-
-
-/*
- No indexes, so if we get a request for an index search since we tell
- the optimizer that we have unique indexes, we scan
-*/
-int ha_archive::index_read(byte *buf, const byte *key,
- uint key_len, enum ha_rkey_function find_flag)
-{
- int rc;
- DBUG_ENTER("ha_archive::index_read");
- rc= index_read_idx(buf, active_index, key, key_len, find_flag);
- DBUG_RETURN(rc);
-}
-
-
-int ha_archive::index_read_idx(byte *buf, uint index, const byte *key,
- uint key_len, enum ha_rkey_function find_flag)
-{
- int rc= 0;
- bool found= 0;
- KEY *mkey= &table->s->key_info[index];
- current_k_offset= mkey->key_part->offset;
- current_key= key;
- current_key_len= key_len;
-
-
- DBUG_ENTER("ha_archive::index_read_idx");
-
- /*
- All of the buffer must be written out or we won't see all of the
- data
- */
- pthread_mutex_lock(&share->mutex);
- azflush(&(share->archive_write), Z_SYNC_FLUSH);
- share->forced_flushes++;
- pthread_mutex_unlock(&share->mutex);
-
- /*
- Set the position of the local read thread to the beginning postion.
- */
- if (read_data_header(&archive))
- {
- rc= HA_ERR_CRASHED_ON_USAGE;
- goto error;
- }
-
- while (!(get_row(&archive, buf)))
- {
- if (!memcmp(current_key, buf + current_k_offset, current_key_len))
- {
- found= 1;
- break;
- }
- }
-
- if (found)
- DBUG_RETURN(0);
-
-error:
- DBUG_RETURN(rc ? rc : HA_ERR_END_OF_FILE);
-}
-
-
-int ha_archive::index_next(byte * buf)
-{
- bool found= 0;
-
- DBUG_ENTER("ha_archive::index_next");
-
- while (!(get_row(&archive, buf)))
- {
- if (!memcmp(current_key, buf+current_k_offset, current_key_len))
- {
- found= 1;
- break;
- }
- }
-
- DBUG_RETURN(found ? 0 : HA_ERR_END_OF_FILE);
-}
-
-/*
- All calls that need to scan the table start with this method. If we are told
- that it is a table scan we rewind the file to the beginning, otherwise
- we assume the position will be set.
-*/
-
-int ha_archive::rnd_init(bool scan)
-{
- DBUG_ENTER("ha_archive::rnd_init");
-
- if (share->crashed)
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
-
- /* We rewind the file so that we can read from the beginning if scan */
- if (scan)
- {
- scan_rows= share->rows_recorded;
- DBUG_PRINT("info", ("archive will retrieve %llu rows", scan_rows));
- records= 0;
-
- /*
- If dirty, we lock, and then reset/flush the data.
- I found that just calling azflush() doesn't always work.
- */
- if (share->dirty == TRUE)
- {
- pthread_mutex_lock(&share->mutex);
- if (share->dirty == TRUE)
- {
- DBUG_PRINT("info", ("archive flushing out rows for scan"));
- azflush(&(share->archive_write), Z_SYNC_FLUSH);
- share->forced_flushes++;
- share->dirty= FALSE;
- }
- pthread_mutex_unlock(&share->mutex);
- }
-
- if (read_data_header(&archive))
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
- }
-
- DBUG_RETURN(0);
-}
-
-
-/*
- This is the method that is used to read a row. It assumes that the row is
- positioned where you want it.
-*/
-int ha_archive::get_row(azio_stream *file_to_read, byte *buf)
-{
- int read; // Bytes read, azread() returns int
- uint *ptr, *end;
- char *last;
- size_t total_blob_length= 0;
- DBUG_ENTER("ha_archive::get_row");
-
- read= azread(file_to_read, buf, table->s->reclength);
- DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %d", read,
- table->s->reclength));
-
- if (read == Z_STREAM_ERROR)
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
-
- /* If we read nothing we are at the end of the file */
- if (read == 0)
- DBUG_RETURN(HA_ERR_END_OF_FILE);
-
- /*
- If the record is the wrong size, the file is probably damaged, unless
- we are dealing with a delayed insert or a bulk insert.
- */
- if ((ulong) read != table->s->reclength)
- DBUG_RETURN(HA_ERR_END_OF_FILE);
-
- /* Calculate blob length, we use this for our buffer */
- for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ;
- ptr != end ;
- ptr++)
- {
- if (ha_get_bit_in_read_set(((Field_blob*) table->field[*ptr])->fieldnr))
- total_blob_length += ((Field_blob*) table->field[*ptr])->get_length();
- }
-
- /* Adjust our row buffer if we need be */
- buffer.alloc(total_blob_length);
- last= (char *)buffer.ptr();
-
- /* Loop through our blobs and read them */
- for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ;
- ptr != end ;
- ptr++)
- {
- size_t size= ((Field_blob*) table->field[*ptr])->get_length();
- if (size)
- {
- if (ha_get_bit_in_read_set(((Field_blob*) table->field[*ptr])->fieldnr))
- {
- read= azread(file_to_read, last, size);
- if ((size_t) read != size)
- DBUG_RETURN(HA_ERR_END_OF_FILE);
- ((Field_blob*) table->field[*ptr])->set_ptr(size, last);
- last += size;
- }
- else
- {
- (void)azseek(file_to_read, size, SEEK_CUR);
- }
- }
- }
- DBUG_RETURN(0);
-}
-
-
-/*
- Called during ORDER BY. Its position is either from being called sequentially
- or by having had ha_archive::rnd_pos() called before it is called.
-*/
-
-int ha_archive::rnd_next(byte *buf)
-{
- int rc;
- DBUG_ENTER("ha_archive::rnd_next");
-
- if (share->crashed)
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
-
- if (!scan_rows)
- DBUG_RETURN(HA_ERR_END_OF_FILE);
- scan_rows--;
-
- statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
- &LOCK_status);
- current_position= aztell(&archive);
- rc= get_row(&archive, buf);
-
-
- if (rc != HA_ERR_END_OF_FILE)
- records++;
-
- DBUG_RETURN(rc);
-}
-
-
-/*
- Thanks to the table flag HA_REC_NOT_IN_SEQ this will be called after
- each call to ha_archive::rnd_next() if an ordering of the rows is
- needed.
-*/
-
-void ha_archive::position(const byte *record)
-{
- DBUG_ENTER("ha_archive::position");
- my_store_ptr(ref, ref_length, current_position);
- DBUG_VOID_RETURN;
-}
-
-
-/*
- This is called after a table scan for each row if the results of the
- scan need to be ordered. It will take *pos and use it to move the
- cursor in the file so that the next row that is called is the
- correctly ordered row.
-*/
-
-int ha_archive::rnd_pos(byte * buf, byte *pos)
-{
- DBUG_ENTER("ha_archive::rnd_pos");
- statistic_increment(table->in_use->status_var.ha_read_rnd_next_count,
- &LOCK_status);
- current_position= (my_off_t)my_get_ptr(pos, ref_length);
- (void)azseek(&archive, current_position, SEEK_SET);
-
- DBUG_RETURN(get_row(&archive, buf));
-}
-
-/*
- This method repairs the meta file. It does this by walking the datafile and
- rewriting the meta file. Currently it does this by calling optimize with
- the extended flag.
-*/
-int ha_archive::repair(THD* thd, HA_CHECK_OPT* check_opt)
-{
- DBUG_ENTER("ha_archive::repair");
- check_opt->flags= T_EXTEND;
- int rc= optimize(thd, check_opt);
-
- if (rc)
- DBUG_RETURN(HA_ERR_CRASHED_ON_REPAIR);
-
- share->crashed= FALSE;
- DBUG_RETURN(0);
-}
-
-/*
- The table can become fragmented if data was inserted, read, and then
- inserted again. What we do is open up the file and recompress it completely.
-*/
-int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
-{
- DBUG_ENTER("ha_archive::optimize");
- int rc;
- azio_stream writer;
- char writer_filename[FN_REFLEN];
-
- /* Flush any waiting data */
- azflush(&(share->archive_write), Z_SYNC_FLUSH);
- share->forced_flushes++;
-
- /* Lets create a file to contain the new data */
- fn_format(writer_filename, share->table_name, "", ARN,
- MY_REPLACE_EXT|MY_UNPACK_FILENAME);
-
- if (!(azopen(&writer, writer_filename, O_CREAT|O_WRONLY|O_TRUNC|O_BINARY)))
- DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
-
- /*
- An extended rebuild is a lot more effort. We open up each row and re-record it.
- Any dead rows are removed (aka rows that may have been partially recorded).
- */
-
- if (check_opt->flags == T_EXTEND)
- {
- DBUG_PRINT("info", ("archive extended rebuild"));
- byte *buf;
-
- /*
- First we create a buffer that we can use for reading rows, and can pass
- to get_row().
- */
- if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME))))
- {
- rc= HA_ERR_OUT_OF_MEM;
- goto error;
- }
-
- /*
- Now we will rewind the archive file so that we are positioned at the
- start of the file.
- */
- rc= read_data_header(&archive);
-
- /*
- Assuming now error from rewinding the archive file, we now write out the
- new header for out data file.
- */
- if (!rc)
- rc= write_data_header(&writer);
-
- /*
- On success of writing out the new header, we now fetch each row and
- insert it into the new archive file.
- */
- if (!rc)
- {
- share->rows_recorded= 0;
- auto_increment_value= share->auto_increment_value= 0;
- while (!(rc= get_row(&archive, buf)))
- {
- real_write_row(buf, &writer);
- if (table->found_next_number_field)
- {
- Field *field= table->found_next_number_field;
- ulonglong auto_value=
- (ulonglong) field->val_int((char*)(buf + field->offset()));
- if (share->auto_increment_value < auto_value)
- auto_increment_value= share->auto_increment_value=
- auto_value;
- }
- share->rows_recorded++;
- }
- }
- DBUG_PRINT("info", ("recovered %llu archive rows", share->rows_recorded));
-
- my_free((char*)buf, MYF(0));
- if (rc && rc != HA_ERR_END_OF_FILE)
- goto error;
- }
- else
- {
- DBUG_PRINT("info", ("archive quick rebuild"));
- /*
- The quick method is to just read the data raw, and then compress it directly.
- */
- int read; // Bytes read, azread() returns int
- char block[IO_SIZE];
- if (azrewind(&archive) == -1)
- {
- rc= HA_ERR_CRASHED_ON_USAGE;
- DBUG_PRINT("info", ("archive HA_ERR_CRASHED_ON_USAGE"));
- goto error;
- }
-
- while ((read= azread(&archive, block, IO_SIZE)) > 0)
- azwrite(&writer, block, read);
- }
-
- azclose(&writer);
- share->dirty= FALSE;
- share->forced_flushes= 0;
- azclose(&(share->archive_write));
- DBUG_PRINT("info", ("Reopening archive data file"));
- if (!(azopen(&(share->archive_write), share->data_file_name,
- O_WRONLY|O_APPEND|O_BINARY)))
- {
- DBUG_PRINT("info", ("Could not open archive write file"));
- rc= HA_ERR_CRASHED_ON_USAGE;
- goto error;
- }
-
- my_rename(writer_filename,share->data_file_name,MYF(0));
-
- /*
- Now we need to reopen our read descriptor since it has changed.
- */
- azclose(&archive);
- if (!(azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY)))
- {
- rc= HA_ERR_CRASHED_ON_USAGE;
- goto error;
- }
-
-
- DBUG_RETURN(0);
-
-error:
- azclose(&writer);
-
- DBUG_RETURN(rc);
-}
-
-/*
- Below is an example of how to setup row level locking.
-*/
-THR_LOCK_DATA **ha_archive::store_lock(THD *thd,
- THR_LOCK_DATA **to,
- enum thr_lock_type lock_type)
-{
- if (lock_type == TL_WRITE_DELAYED)
- delayed_insert= TRUE;
- else
- delayed_insert= FALSE;
-
- if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
- {
- /*
- Here is where we get into the guts of a row level lock.
- If TL_UNLOCK is set
- If we are not doing a LOCK TABLE or DISCARD/IMPORT
- TABLESPACE, then allow multiple writers
- */
-
- if ((lock_type >= TL_WRITE_CONCURRENT_INSERT &&
- lock_type <= TL_WRITE) && !thd->in_lock_tables
- && !thd->tablespace_op)
- lock_type = TL_WRITE_ALLOW_WRITE;
-
- /*
- In queries of type INSERT INTO t1 SELECT ... FROM t2 ...
- MySQL would use the lock TL_READ_NO_INSERT on t2, and that
- would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts
- to t2. Convert the lock to a normal read lock to allow
- concurrent inserts to t2.
- */
-
- if (lock_type == TL_READ_NO_INSERT && !thd->in_lock_tables)
- lock_type = TL_READ;
-
- lock.type=lock_type;
- }
-
- *to++= &lock;
-
- return to;
-}
-
-void ha_archive::update_create_info(HA_CREATE_INFO *create_info)
-{
- ha_archive::info(HA_STATUS_AUTO | HA_STATUS_CONST);
- if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
- {
- create_info->auto_increment_value=auto_increment_value;
- }
-}
-
-
-/*
- Hints for optimizer, see ha_tina for more information
-*/
-void ha_archive::info(uint flag)
-{
- DBUG_ENTER("ha_archive::info");
- /*
- This should be an accurate number now, though bulk and delayed inserts can
- cause the number to be inaccurate.
- */
- records= share->rows_recorded;
- deleted= 0;
- /* Costs quite a bit more to get all information */
- if (flag & HA_STATUS_TIME)
- {
- MY_STAT file_stat; // Stat information for the data file
-
- VOID(my_stat(share->data_file_name, &file_stat, MYF(MY_WME)));
-
- mean_rec_length= table->s->reclength + buffer.alloced_length();
- data_file_length= file_stat.st_size;
- create_time= file_stat.st_ctime;
- update_time= file_stat.st_mtime;
- max_data_file_length= share->rows_recorded * mean_rec_length;
- }
- delete_length= 0;
- index_file_length=0;
-
- if (flag & HA_STATUS_AUTO)
- auto_increment_value= share->auto_increment_value;
-
- DBUG_VOID_RETURN;
-}
-
-
-/*
- This method tells us that a bulk insert operation is about to occur. We set
- a flag which will keep write_row from saying that its data is dirty. This in
- turn will keep selects from causing a sync to occur.
- Basically, yet another optimizations to keep compression working well.
-*/
-void ha_archive::start_bulk_insert(ha_rows rows)
-{
- DBUG_ENTER("ha_archive::start_bulk_insert");
- if (!rows || rows >= ARCHIVE_MIN_ROWS_TO_USE_BULK_INSERT)
- bulk_insert= TRUE;
- DBUG_VOID_RETURN;
-}
-
-
-/*
- Other side of start_bulk_insert, is end_bulk_insert. Here we turn off the bulk insert
- flag, and set the share dirty so that the next select will call sync for us.
-*/
-int ha_archive::end_bulk_insert()
-{
- DBUG_ENTER("ha_archive::end_bulk_insert");
- bulk_insert= FALSE;
- share->dirty= TRUE;
- DBUG_RETURN(0);
-}
-
-/*
- We cancel a truncate command. The only way to delete an archive table is to drop it.
- This is done for security reasons. In a later version we will enable this by
- allowing the user to select a different row format.
-*/
-int ha_archive::delete_all_rows()
-{
- DBUG_ENTER("ha_archive::delete_all_rows");
- DBUG_RETURN(0);
-}
-
-/*
- We just return state if asked.
-*/
-bool ha_archive::is_crashed() const
-{
- DBUG_ENTER("ha_archive::is_crashed");
- DBUG_RETURN(share->crashed);
-}
-
-/*
- Simple scan of the tables to make sure everything is ok.
-*/
-
-int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt)
-{
- int rc= 0;
- byte *buf;
- const char *old_proc_info=thd->proc_info;
- ha_rows count= share->rows_recorded;
- DBUG_ENTER("ha_archive::check");
-
- thd->proc_info= "Checking table";
- /* Flush any waiting data */
- azflush(&(share->archive_write), Z_SYNC_FLUSH);
- share->forced_flushes++;
-
- /*
- First we create a buffer that we can use for reading rows, and can pass
- to get_row().
- */
- if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME))))
- rc= HA_ERR_OUT_OF_MEM;
-
- /*
- Now we will rewind the archive file so that we are positioned at the
- start of the file.
- */
- if (!rc)
- read_data_header(&archive);
-
- if (!rc)
- while (!(rc= get_row(&archive, buf)))
- count--;
-
- my_free((char*)buf, MYF(0));
-
- thd->proc_info= old_proc_info;
-
- if ((rc && rc != HA_ERR_END_OF_FILE) || count)
- {
- share->crashed= FALSE;
- DBUG_RETURN(HA_ADMIN_CORRUPT);
- }
- else
- {
- DBUG_RETURN(HA_ADMIN_OK);
- }
-}
-
-/*
- Check and repair the table if needed.
-*/
-bool ha_archive::check_and_repair(THD *thd)
-{
- HA_CHECK_OPT check_opt;
- DBUG_ENTER("ha_archive::check_and_repair");
-
- check_opt.init();
-
- DBUG_RETURN(repair(thd, &check_opt));
-}
diff --git a/sql/ha_archive.h b/sql/ha_archive.h
deleted file mode 100644
index 9b351b7e8da..00000000000
--- a/sql/ha_archive.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifdef USE_PRAGMA_INTERFACE
-#pragma interface /* gcc class implementation */
-#endif
-
-#include <zlib.h>
-#include "../storage/archive/azlib.h"
-
-/*
- Please read ha_archive.cc first. If you are looking for more general
- answers on how storage engines work, look at ha_example.cc and
- ha_example.h.
-*/
-
-typedef struct st_archive_share {
- char *table_name;
- char data_file_name[FN_REFLEN];
- uint table_name_length,use_count;
- pthread_mutex_t mutex;
- THR_LOCK lock;
- File meta_file; /* Meta file we use */
- azio_stream archive_write; /* Archive file we are working with */
- bool dirty; /* Flag for if a flush should occur */
- bool crashed; /* Meta file is crashed */
- ha_rows rows_recorded; /* Number of rows in tables */
- ulonglong auto_increment_value;
- ulonglong forced_flushes;
- ulonglong mean_rec_length;
-} ARCHIVE_SHARE;
-
-/*
- Version for file format.
- 1 - Initial Version
-*/
-#define ARCHIVE_VERSION 2
-
-class ha_archive: public handler
-{
- THR_LOCK_DATA lock; /* MySQL lock */
- ARCHIVE_SHARE *share; /* Shared lock info */
- azio_stream archive; /* Archive file we are working with */
- my_off_t current_position; /* The position of the row we just read */
- byte byte_buffer[IO_SIZE]; /* Initial buffer for our string */
- String buffer; /* Buffer used for blob storage */
- ha_rows scan_rows; /* Number of rows left in scan */
- bool delayed_insert; /* If the insert is delayed */
- bool bulk_insert; /* If we are performing a bulk insert */
- const byte *current_key;
- uint current_key_len;
- uint current_k_offset;
-
-public:
- ha_archive(TABLE_SHARE *table_arg);
- ~ha_archive()
- {
- }
- const char *table_type() const { return "ARCHIVE"; }
- const char *index_type(uint inx) { return "NONE"; }
- const char **bas_ext() const;
- ulong table_flags() const
- {
- return (HA_REC_NOT_IN_SEQ | HA_NOT_EXACT_COUNT | HA_CAN_BIT_FIELD |
- HA_FILE_BASED | HA_CAN_INSERT_DELAYED | HA_CAN_GEOMETRY);
- }
- ulong index_flags(uint idx, uint part, bool all_parts) const
- {
- return HA_ONLY_WHOLE_INDEX;
- }
- ulonglong get_auto_increment();
- uint max_supported_keys() const { return 1; }
- uint max_supported_key_length() const { return sizeof(ulonglong); }
- uint max_supported_key_part_length() const { return sizeof(ulonglong); }
- int index_init(uint keynr, bool sorted);
- virtual int index_read(byte * buf, const byte * key,
- uint key_len, enum ha_rkey_function find_flag);
- virtual int index_read_idx(byte * buf, uint index, const byte * key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_next(byte * buf);
- int open(const char *name, int mode, uint test_if_locked);
- int close(void);
- int write_row(byte * buf);
- int real_write_row(byte *buf, azio_stream *writer);
- int delete_all_rows();
- int rnd_init(bool scan=1);
- int rnd_next(byte *buf);
- int rnd_pos(byte * buf, byte *pos);
- int get_row(azio_stream *file_to_read, byte *buf);
- int read_meta_file(File meta_file, ha_rows *rows,
- ulonglong *auto_increment,
- ulonglong *forced_flushes);
- int write_meta_file(File meta_file, ha_rows rows,
- ulonglong auto_increment,
- ulonglong forced_flushes,
- bool dirty);
- ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table, int *rc);
- int free_share(ARCHIVE_SHARE *share);
- bool auto_repair() const { return 1; } // For the moment we just do this
- int read_data_header(azio_stream *file_to_read);
- int write_data_header(azio_stream *file_to_write);
- void position(const byte *record);
- void info(uint);
- void update_create_info(HA_CREATE_INFO *create_info);
- int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
- int optimize(THD* thd, HA_CHECK_OPT* check_opt);
- int repair(THD* thd, HA_CHECK_OPT* check_opt);
- void start_bulk_insert(ha_rows rows);
- int end_bulk_insert();
- enum row_type get_row_type() const
- {
- return ROW_TYPE_COMPRESSED;
- }
- THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
- enum thr_lock_type lock_type);
- bool is_crashed() const;
- int check(THD* thd, HA_CHECK_OPT* check_opt);
- bool check_and_repair(THD *thd);
-};
-
-bool archive_db_init(void);
-int archive_db_end(ha_panic_function type);
-
diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc
index 6ea4cc9aeb5..0fabd00faa3 100644
--- a/sql/ha_berkeley.cc
+++ b/sql/ha_berkeley.cc
@@ -56,10 +56,14 @@
#include <m_ctype.h>
#include <myisampack.h>
#include <hash.h>
+
+#ifdef WITH_BERKELEY_STORAGE_ENGINE
#include "ha_berkeley.h"
#include "sql_manager.h"
#include <stdarg.h>
+#include <mysql/plugin.h>
+
#define HA_BERKELEY_ROWS_IN_TABLE 10000 /* to get optimization right */
#define HA_BERKELEY_RANGE_COUNT 100
#define HA_BERKELEY_MAX_ROWS 10000000 /* Max rows in table */
@@ -2725,3 +2729,17 @@ bool ha_berkeley::check_if_incompatible_data(HA_CREATE_INFO *info,
}
+mysql_declare_plugin(berkeley)
+{
+ MYSQL_STORAGE_ENGINE_PLUGIN,
+ &berkeley_hton,
+ berkeley_hton.name,
+ "MySQL AB",
+ "BerkeleyDB Storage Engine",
+ NULL, /* Plugin Init */
+ NULL, /* Plugin Deinit */
+ 0x0100 /* 1.0 */,
+}
+mysql_declare_plugin_end;
+
+#endif
diff --git a/sql/ha_blackhole.cc b/sql/ha_blackhole.cc
deleted file mode 100644
index 2f5e8ee0abc..00000000000
--- a/sql/ha_blackhole.cc
+++ /dev/null
@@ -1,252 +0,0 @@
-/* Copyright (C) 2005 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-
-#ifdef USE_PRAGMA_IMPLEMENTATION
-#pragma implementation // gcc: Class implementation
-#endif
-
-#include "mysql_priv.h"
-#include "ha_blackhole.h"
-
-/* Static declarations for handlerton */
-
-static handler *blackhole_create_handler(TABLE_SHARE *table);
-
-
-/* Blackhole storage engine handlerton */
-
-handlerton blackhole_hton= {
- MYSQL_HANDLERTON_INTERFACE_VERSION,
- "BLACKHOLE",
- SHOW_OPTION_YES,
- "/dev/null storage engine (anything you write to it disappears)",
- DB_TYPE_BLACKHOLE_DB,
- NULL,
- 0, /* slot */
- 0, /* savepoint size. */
- NULL, /* close_connection */
- NULL, /* savepoint */
- NULL, /* rollback to savepoint */
- NULL, /* release savepoint */
- NULL, /* commit */
- NULL, /* rollback */
- NULL, /* prepare */
- NULL, /* recover */
- NULL, /* commit_by_xid */
- NULL, /* rollback_by_xid */
- NULL, /* create_cursor_read_view */
- NULL, /* set_cursor_read_view */
- NULL, /* close_cursor_read_view */
- blackhole_create_handler, /* Create a new handler */
- NULL, /* Drop a database */
- NULL, /* Panic call */
- NULL, /* Start Consistent Snapshot */
- NULL, /* Flush logs */
- NULL, /* Show status */
- NULL, /* Partition flags */
- NULL, /* Alter table flags */
- NULL, /* Alter Tablespace */
- NULL, /* Fill FILES table */
- HTON_CAN_RECREATE,
- NULL, /* binlog_func */
- NULL, /* binlog_log_query */
- NULL /* release_temporary_latches */
-};
-
-
-static handler *blackhole_create_handler(TABLE_SHARE *table)
-{
- return new ha_blackhole(table);
-}
-
-
-/*****************************************************************************
-** BLACKHOLE tables
-*****************************************************************************/
-
-ha_blackhole::ha_blackhole(TABLE_SHARE *table_arg)
- :handler(&blackhole_hton, table_arg)
-{}
-
-
-static const char *ha_blackhole_exts[] = {
- NullS
-};
-
-const char **ha_blackhole::bas_ext() const
-{
- return ha_blackhole_exts;
-}
-
-int ha_blackhole::open(const char *name, int mode, uint test_if_locked)
-{
- DBUG_ENTER("ha_blackhole::open");
- thr_lock_init(&thr_lock);
- thr_lock_data_init(&thr_lock,&lock,NULL);
- DBUG_RETURN(0);
-}
-
-int ha_blackhole::close(void)
-{
- DBUG_ENTER("ha_blackhole::close");
- thr_lock_delete(&thr_lock);
- DBUG_RETURN(0);
-}
-
-int ha_blackhole::create(const char *name, TABLE *table_arg,
- HA_CREATE_INFO *create_info)
-{
- DBUG_ENTER("ha_blackhole::create");
- DBUG_RETURN(0);
-}
-
-const char *ha_blackhole::index_type(uint key_number)
-{
- DBUG_ENTER("ha_blackhole::index_type");
- DBUG_RETURN((table_share->key_info[key_number].flags & HA_FULLTEXT) ?
- "FULLTEXT" :
- (table_share->key_info[key_number].flags & HA_SPATIAL) ?
- "SPATIAL" :
- (table_share->key_info[key_number].algorithm ==
- HA_KEY_ALG_RTREE) ? "RTREE" : "BTREE");
-}
-
-int ha_blackhole::write_row(byte * buf)
-{
- DBUG_ENTER("ha_blackhole::write_row");
- DBUG_RETURN(0);
-}
-
-int ha_blackhole::rnd_init(bool scan)
-{
- DBUG_ENTER("ha_blackhole::rnd_init");
- DBUG_RETURN(0);
-}
-
-
-int ha_blackhole::rnd_next(byte *buf)
-{
- DBUG_ENTER("ha_blackhole::rnd_next");
- DBUG_RETURN(HA_ERR_END_OF_FILE);
-}
-
-
-int ha_blackhole::rnd_pos(byte * buf, byte *pos)
-{
- DBUG_ENTER("ha_blackhole::rnd_pos");
- DBUG_ASSERT(0);
- DBUG_RETURN(0);
-}
-
-
-void ha_blackhole::position(const byte *record)
-{
- DBUG_ENTER("ha_blackhole::position");
- DBUG_ASSERT(0);
- DBUG_VOID_RETURN;
-}
-
-
-void ha_blackhole::info(uint flag)
-{
- DBUG_ENTER("ha_blackhole::info");
-
- records= 0;
- deleted= 0;
- errkey= 0;
- mean_rec_length= 0;
- data_file_length= 0;
- index_file_length= 0;
- max_data_file_length= 0;
- delete_length= 0;
- if (flag & HA_STATUS_AUTO)
- auto_increment_value= 1;
- DBUG_VOID_RETURN;
-}
-
-int ha_blackhole::external_lock(THD *thd, int lock_type)
-{
- DBUG_ENTER("ha_blackhole::external_lock");
- DBUG_RETURN(0);
-}
-
-
-uint ha_blackhole::lock_count(void) const
-{
- DBUG_ENTER("ha_blackhole::lock_count");
- DBUG_RETURN(0);
-}
-
-THR_LOCK_DATA **ha_blackhole::store_lock(THD *thd,
- THR_LOCK_DATA **to,
- enum thr_lock_type lock_type)
-{
- DBUG_ENTER("ha_blackhole::store_lock");
- DBUG_RETURN(to);
-}
-
-
-int ha_blackhole::index_read(byte * buf, const byte * key,
- uint key_len, enum ha_rkey_function find_flag)
-{
- DBUG_ENTER("ha_blackhole::index_read");
- DBUG_RETURN(0);
-}
-
-
-int ha_blackhole::index_read_idx(byte * buf, uint idx, const byte * key,
- uint key_len, enum ha_rkey_function find_flag)
-{
- DBUG_ENTER("ha_blackhole::index_read_idx");
- DBUG_RETURN(HA_ERR_END_OF_FILE);
-}
-
-
-int ha_blackhole::index_read_last(byte * buf, const byte * key, uint key_len)
-{
- DBUG_ENTER("ha_blackhole::index_read_last");
- DBUG_RETURN(HA_ERR_END_OF_FILE);
-}
-
-
-int ha_blackhole::index_next(byte * buf)
-{
- DBUG_ENTER("ha_blackhole::index_next");
- DBUG_RETURN(HA_ERR_END_OF_FILE);
-}
-
-
-int ha_blackhole::index_prev(byte * buf)
-{
- DBUG_ENTER("ha_blackhole::index_prev");
- DBUG_RETURN(HA_ERR_END_OF_FILE);
-}
-
-
-int ha_blackhole::index_first(byte * buf)
-{
- DBUG_ENTER("ha_blackhole::index_first");
- DBUG_RETURN(HA_ERR_END_OF_FILE);
-}
-
-
-int ha_blackhole::index_last(byte * buf)
-{
- DBUG_ENTER("ha_blackhole::index_last");
- DBUG_RETURN(HA_ERR_END_OF_FILE);
-}
-
diff --git a/sql/ha_blackhole.h b/sql/ha_blackhole.h
deleted file mode 100644
index 15e12659aa0..00000000000
--- a/sql/ha_blackhole.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/* Copyright (C) 2005 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifdef USE_PRAGMA_INTERFACE
-#pragma interface /* gcc class implementation */
-#endif
-
-/*
- Class definition for the blackhole storage engine
- "Dumbest named feature ever"
-*/
-class ha_blackhole: public handler
-{
- THR_LOCK_DATA lock; /* MySQL lock */
- THR_LOCK thr_lock;
-
-public:
- ha_blackhole(TABLE_SHARE *table_arg);
- ~ha_blackhole()
- {
- }
- /* The name that will be used for display purposes */
- const char *table_type() const { return "BLACKHOLE"; }
- /*
- The name of the index type that will be used for display
- don't implement this method unless you really have indexes
- */
- const char *index_type(uint key_number);
- const char **bas_ext() const;
- ulong table_flags() const
- {
- return(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
- HA_DUPP_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
- HA_FILE_BASED | HA_CAN_GEOMETRY | HA_READ_RND_SAME |
- HA_CAN_INSERT_DELAYED);
- }
- ulong index_flags(uint inx, uint part, bool all_parts) const
- {
- return ((table_share->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
- 0 : HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE |
- HA_READ_ORDER | HA_KEYREAD_ONLY);
- }
- /* The following defines can be increased if necessary */
-#define BLACKHOLE_MAX_KEY 64 /* Max allowed keys */
-#define BLACKHOLE_MAX_KEY_SEG 16 /* Max segments for key */
-#define BLACKHOLE_MAX_KEY_LENGTH 1000
- uint max_supported_keys() const { return BLACKHOLE_MAX_KEY; }
- uint max_supported_key_length() const { return BLACKHOLE_MAX_KEY_LENGTH; }
- uint max_supported_key_part_length() const { return BLACKHOLE_MAX_KEY_LENGTH; }
- int open(const char *name, int mode, uint test_if_locked);
- int close(void);
- int write_row(byte * buf);
- int rnd_init(bool scan);
- int rnd_next(byte *buf);
- int rnd_pos(byte * buf, byte *pos);
- int index_read(byte * buf, const byte * key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_read_idx(byte * buf, uint idx, const byte * key,
- uint key_len, enum ha_rkey_function find_flag);
- int index_read_last(byte * buf, const byte * key, uint key_len);
- int index_next(byte * buf);
- int index_prev(byte * buf);
- int index_first(byte * buf);
- int index_last(byte * buf);
- void position(const byte *record);
- void info(uint flag);
- int external_lock(THD *thd, int lock_type);
- uint lock_count(void) const;
- int create(const char *name, TABLE *table_arg,
- HA_CREATE_INFO *create_info);
- THR_LOCK_DATA **store_lock(THD *thd,
- THR_LOCK_DATA **to,
- enum thr_lock_type lock_type);
- bool has_transactions() { return 1; }
-};
diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc
index 129a44b5721..f1d2b0025a9 100644
--- a/sql/ha_federated.cc
+++ b/sql/ha_federated.cc
@@ -351,9 +351,13 @@
#pragma implementation // gcc: Class implementation
#endif
+#ifdef WITH_FEDERATED_STORAGE_ENGINE
#include "ha_federated.h"
#include "m_string.h"
+
+#include <mysql/plugin.h>
+
/* Variables for federated share methods */
static HASH federated_open_tables; // To track open tables
pthread_mutex_t federated_mutex; // To init the hash
@@ -2804,3 +2808,18 @@ int ha_federated::execute_simple_query(const char *query, int len)
DBUG_RETURN(0);
}
+
+mysql_declare_plugin(federated)
+{
+ MYSQL_STORAGE_ENGINE_PLUGIN,
+ &federated_hton,
+ federated_hton.name,
+ "Patrick Galbraith and Brian Aker, MySQL AB",
+ "Federated Storage Engine",
+ NULL, /* Plugin Init */
+ NULL, /* Plugin Deinit */
+ 0x0100 /* 1.0 */,
+}
+mysql_declare_plugin_end;
+
+#endif
diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc
index f20dfe259fb..1223de37af8 100644
--- a/sql/ha_heap.cc
+++ b/sql/ha_heap.cc
@@ -706,3 +706,16 @@ bool ha_heap::check_if_incompatible_data(HA_CREATE_INFO *info,
return COMPATIBLE_DATA_NO;
return COMPATIBLE_DATA_YES;
}
+
+mysql_declare_plugin(heap)
+{
+ MYSQL_STORAGE_ENGINE_PLUGIN,
+ &heap_hton,
+ heap_hton.name,
+ NULL,
+ heap_hton.comment,
+ NULL,
+ NULL,
+ 0
+}
+mysql_declare_plugin_end;
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index 1b0f4c34acc..e94b08cf4fb 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -42,6 +42,7 @@ have disables the InnoDB inlining in this file. */
#define MAX_ULONG_BIT ((ulong) 1 << (sizeof(ulong)*8-1))
+#ifdef WITH_INNOBASE_STORAGE_ENGINE
#include "ha_innodb.h"
pthread_mutex_t innobase_share_mutex, /* to protect innobase_open_files */
@@ -7533,3 +7534,19 @@ bool ha_innobase::check_if_incompatible_data(
return COMPATIBLE_DATA_YES;
}
+
+
+mysql_declare_plugin(innobase)
+{
+ MYSQL_STORAGE_ENGINE_PLUGIN,
+ &innobase_hton,
+ innobase_hton.name,
+ "Innobase OY",
+ "InnoDB Storage Engine",
+ NULL, /* Plugin Init */
+ NULL, /* Plugin Deinit */
+ 0x0100 /* 1.0 */,
+}
+mysql_declare_plugin_end;
+
+#endif
diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc
index ec39ee00efc..9a0a4a9896f 100644
--- a/sql/ha_myisam.cc
+++ b/sql/ha_myisam.cc
@@ -31,6 +31,8 @@
#include "../storage/myisam/rt_index.h"
#endif
+#include <mysql/plugin.h>
+
ulong myisam_recover_options= HA_RECOVER_NONE;
/* bits in myisam_recover_options */
@@ -1787,3 +1789,17 @@ bool ha_myisam::check_if_incompatible_data(HA_CREATE_INFO *info,
return COMPATIBLE_DATA_NO;
return COMPATIBLE_DATA_YES;
}
+
+
+mysql_declare_plugin(myisam)
+{
+ MYSQL_STORAGE_ENGINE_PLUGIN,
+ &myisam_hton,
+ myisam_hton.name,
+ "MySQL AB",
+ "MyISAM Storage Engine",
+ NULL, /* Plugin Init */
+ NULL, /* Plugin Deinit */
+ 0x0100 /* 1.0 */,
+}
+mysql_declare_plugin_end;
diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc
index 0ce4e1d8bcb..8130a5d939a 100644
--- a/sql/ha_myisammrg.cc
+++ b/sql/ha_myisammrg.cc
@@ -28,6 +28,8 @@
#include "../storage/myisammrg/myrg_def.h"
#endif
+#include <mysql/plugin.h>
+
/*****************************************************************************
** MyISAM MERGE tables
*****************************************************************************/
@@ -573,3 +575,16 @@ bool ha_myisammrg::check_if_incompatible_data(HA_CREATE_INFO *info,
*/
return COMPATIBLE_DATA_NO;
}
+
+mysql_declare_plugin(myisammrg)
+{
+ MYSQL_STORAGE_ENGINE_PLUGIN,
+ &myisammrg_hton,
+ myisammrg_hton.name,
+ "MySQL AB",
+ "MyISAMMRG Storage Engine",
+ NULL, /* Plugin Init */
+ NULL, /* Plugin Deinit */
+ 0x0100 /* 1.0 */,
+}
+mysql_declare_plugin_end;
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 28e026b8a10..83367fa4bee 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -27,6 +27,7 @@
#include "mysql_priv.h"
#include <my_dir.h>
+#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
#include "ha_ndbcluster.h"
#include <ndbapi/NdbApi.hpp>
#include <ndbapi/NdbScanFilter.hpp>
@@ -36,6 +37,8 @@
#include "ha_ndbcluster_binlog.h"
#include "ha_ndbcluster_tables.h"
+#include <mysql/plugin.h>
+
#ifdef ndb_dynamite
#undef assert
#define assert(x) do { if(x) break; ::printf("%s %d: assert failed: %s\n", __FILE__, __LINE__, #x); ::fflush(stdout); ::signal(SIGABRT,SIG_DFL); ::abort(); ::kill(::getpid(),6); ::kill(::getpid(),9); } while (0)
@@ -10146,3 +10149,19 @@ static int ndbcluster_fill_files_table(THD *thd, TABLE_LIST *tables, COND *cond)
}
DBUG_RETURN(0);
}
+
+
+mysql_declare_plugin(ndbcluster)
+{
+ MYSQL_STORAGE_ENGINE_PLUGIN,
+ &ndbcluster_hton,
+ ndbcluster_hton.name,
+ "MySQL AB",
+ "NDB Storage Engine",
+ NULL, /* Plugin Init */
+ NULL, /* Plugin Deinit */
+ 0x0100 /* 1.0 */,
+}
+mysql_declare_plugin_end;
+
+#endif
diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc
index 60ccb661703..b248bb8534a 100644
--- a/sql/ha_ndbcluster_binlog.cc
+++ b/sql/ha_ndbcluster_binlog.cc
@@ -16,6 +16,7 @@
*/
#include "mysql_priv.h"
+#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
#include "ha_ndbcluster.h"
#ifdef HAVE_NDB_BINLOG
@@ -3510,3 +3511,4 @@ ndbcluster_show_status_binlog(THD* thd, stat_print_fn *stat_print,
}
#endif /* HAVE_NDB_BINLOG */
+#endif
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 3ee9a2954eb..fc9985cf87f 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -54,8 +54,11 @@
#include "mysql_priv.h"
+#ifdef WITH_PARTITION_STORAGE_ENGINE
#include "ha_partition.h"
+#include <mysql/plugin.h>
+
static const char *ha_par_ext= ".par";
#ifdef NOT_USED
static int free_share(PARTITION_SHARE * share);
@@ -5487,3 +5490,19 @@ static int free_share(PARTITION_SHARE *share)
return 0;
}
#endif /* NOT_USED */
+
+
+mysql_declare_plugin(partition)
+{
+ MYSQL_STORAGE_ENGINE_PLUGIN,
+ &partition_hton,
+ partition_hton.name,
+ "Mikael Ronstrom, MySQL AB",
+ "Partitioning Engine",
+ NULL, /* Plugin Init */
+ NULL, /* Plugin Deinit */
+ 0x0100 /* 1.0 */,
+}
+mysql_declare_plugin_end;
+
+#endif
diff --git a/sql/handler.cc b/sql/handler.cc
index 808dd0841c5..56938f2eff7 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -35,6 +35,7 @@
#define NDB_MAX_ATTRIBUTES_IN_TABLE 128
#include "ha_ndbcluster.h"
#endif
+
#ifdef WITH_PARTITION_STORAGE_ENGINE
#include "ha_partition.h"
#endif
@@ -43,7 +44,7 @@
#include "ha_innodb.h"
#endif
-extern handlerton *sys_table_types[];
+static handlerton *installed_htons[128];
#define BITMAP_STACKBUF_SIZE (128/8)
@@ -138,30 +139,8 @@ handlerton *ha_resolve_by_name(THD *thd, LEX_STRING *name)
}
-struct plugin_find_dbtype_st
-{
- enum legacy_db_type db_type;
- handlerton *hton;
-};
-
-
-static my_bool plugin_find_dbtype(THD *unused, st_plugin_int *plugin,
- void *arg)
-{
- handlerton *types= (handlerton *) plugin->plugin->info;
- if (types->db_type == ((struct plugin_find_dbtype_st *)arg)->db_type)
- {
- ((struct plugin_find_dbtype_st *)arg)->hton= types;
- return TRUE;
- }
- return FALSE;
-}
-
-
const char *ha_get_storage_engine(enum legacy_db_type db_type)
{
- struct plugin_find_dbtype_st info;
-
switch (db_type)
{
case DB_TYPE_DEFAULT:
@@ -169,13 +148,10 @@ const char *ha_get_storage_engine(enum legacy_db_type db_type)
case DB_TYPE_UNKNOWN:
return "UNKNOWN";
default:
- info.db_type= db_type;
-
- if (!plugin_foreach(NULL, plugin_find_dbtype,
- MYSQL_STORAGE_ENGINE_PLUGIN, &info))
+ if (db_type > DB_TYPE_UNKNOWN && db_type < DB_TYPE_DEFAULT &&
+ installed_htons[db_type])
+ return installed_htons[db_type]->name;
return "*NONE*";
-
- return info.hton->name;
}
}
@@ -190,8 +166,6 @@ static handler *create_default(TABLE_SHARE *table)
handlerton *ha_resolve_by_legacy_type(THD *thd, enum legacy_db_type db_type)
{
- struct plugin_find_dbtype_st info;
-
switch (db_type)
{
case DB_TYPE_DEFAULT:
@@ -202,12 +176,9 @@ handlerton *ha_resolve_by_legacy_type(THD *thd, enum legacy_db_type db_type)
case DB_TYPE_UNKNOWN:
return NULL;
default:
- info.db_type= db_type;
- if (!plugin_foreach(NULL, plugin_find_dbtype,
- MYSQL_STORAGE_ENGINE_PLUGIN, &info))
+ if (db_type > DB_TYPE_UNKNOWN && db_type < DB_TYPE_DEFAULT)
+ return installed_htons[db_type];
return NULL;
-
- return info.hton;
}
}
@@ -394,32 +365,77 @@ static int ha_finish_errors(void)
}
-static void ha_was_inited_ok(handlerton *ht)
+int ha_finalize_handlerton(st_plugin_int *plugin)
{
- uint tmp= ht->savepoint_offset;
- ht->savepoint_offset= savepoint_alloc_size;
- savepoint_alloc_size+= tmp;
- ht->slot= total_ha++;
- if (ht->prepare)
- total_ha_2pc++;
+ handlerton *hton;
+ DBUG_ENTER("ha_finalize_handlerton");
+
+ if (!(hton= (handlerton *) plugin->plugin->info))
+ DBUG_RETURN(1);
+
+ switch (hton->state)
+ {
+ case SHOW_OPTION_NO:
+ case SHOW_OPTION_DISABLED:
+ break;
+ case SHOW_OPTION_YES:
+ if (hton->panic && hton->panic(HA_PANIC_CLOSE))
+ DBUG_RETURN(1);
+ if (installed_htons[hton->db_type] == hton)
+ installed_htons[hton->db_type]= NULL;
+ break;
+ };
+ DBUG_RETURN(0);
}
-int ha_initialize_handlerton(handlerton *hton)
+int ha_initialize_handlerton(st_plugin_int *plugin)
{
+ handlerton *hton;
DBUG_ENTER("ha_initialize_handlerton");
- if (hton == NULL)
+ if (!(hton= (handlerton *) plugin->plugin->info))
DBUG_RETURN(1);
- switch (hton->state)
- {
+ /* for the sake of sanity, we set the handlerton name to be the
+ same as the plugin name */
+ hton->name= plugin->name.str;
+
+
+ switch (hton->state) {
case SHOW_OPTION_NO:
break;
case SHOW_OPTION_YES:
if (!hton->init || !hton->init())
{
- ha_was_inited_ok(hton);
+ uint tmp= hton->savepoint_offset;
+ hton->savepoint_offset= savepoint_alloc_size;
+ savepoint_alloc_size+= tmp;
+ hton->slot= total_ha++;
+ if (hton->prepare)
+ total_ha_2pc++;
+
+ /* now check the db_type for conflict */
+ if (hton->db_type <= DB_TYPE_UNKNOWN ||
+ hton->db_type >= DB_TYPE_DEFAULT ||
+ installed_htons[hton->db_type])
+ {
+ int idx= (int) DB_TYPE_FIRST_DYNAMIC;
+
+ while (idx < (int) DB_TYPE_DEFAULT && installed_htons[idx])
+ idx++;
+
+ if (idx == (int) DB_TYPE_DEFAULT)
+ {
+ sql_print_warning("Too many storage engines!");
+ DBUG_RETURN(1);
+ }
+ if (hton->db_type != DB_TYPE_UNKNOWN)
+ sql_print_warning("Storage engine '%s' has conflicting typecode. "
+ "Assigning value %d.", hton->name, idx);
+ hton->db_type= (enum legacy_db_type) idx;
+ }
+ installed_htons[hton->db_type]= hton;
break;
}
/* fall through */
@@ -436,7 +452,7 @@ static my_bool init_handlerton(THD *unused1, st_plugin_int *plugin,
{
if (plugin->state == PLUGIN_IS_UNINITIALIZED)
{
- ha_initialize_handlerton((handlerton *) plugin->plugin->info);
+ ha_initialize_handlerton(plugin);
plugin->state= PLUGIN_IS_READY;
}
return FALSE;
@@ -447,12 +463,15 @@ int ha_init()
{
int error= 0;
total_ha= savepoint_alloc_size= 0;
+ DBUG_ENTER("ha_init");
+
+ bzero(installed_htons, sizeof(installed_htons));
if (ha_init_errors())
- return 1;
+ DBUG_RETURN(1);
if (plugin_foreach(NULL, init_handlerton, MYSQL_STORAGE_ENGINE_PLUGIN, 0))
- return 1;
+ DBUG_RETURN(1);
DBUG_ASSERT(total_ha < MAX_HA);
/*
@@ -462,37 +481,7 @@ int ha_init()
*/
opt_using_transactions= total_ha>(ulong)opt_bin_log;
savepoint_alloc_size+= sizeof(SAVEPOINT);
- return error;
-}
-
-
-int ha_register_builtin_plugins()
-{
- handlerton **hton;
- uint size= 0;
- struct st_mysql_plugin *plugin;
- DBUG_ENTER("ha_register_builtin_plugins");
-
- for (hton= sys_table_types; *hton; hton++)
- size+= sizeof(struct st_mysql_plugin);
-
- if (!(plugin= (struct st_mysql_plugin *)
- my_once_alloc(size, MYF(MY_WME | MY_ZEROFILL))))
- DBUG_RETURN(1);
-
- for (hton= sys_table_types; *hton; hton++, plugin++)
- {
- plugin->type= MYSQL_STORAGE_ENGINE_PLUGIN;
- plugin->info= *hton;
- plugin->version= 0;
- plugin->name= (*hton)->name;
- plugin->author= NULL;
- plugin->descr= (*hton)->comment;
-
- if (plugin_register_builtin(plugin))
- DBUG_RETURN(1);
- }
- DBUG_RETURN(0);
+ DBUG_RETURN(error);
}
diff --git a/sql/handler.h b/sql/handler.h
index e93fdfe67e3..90bee61dc14 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -233,6 +233,7 @@ enum legacy_db_type
DB_TYPE_BLACKHOLE_DB,
DB_TYPE_PARTITION_DB,
DB_TYPE_BINLOG,
+ DB_TYPE_FIRST_DYNAMIC=32,
DB_TYPE_DEFAULT=127 // Must be last
};
@@ -1545,8 +1546,8 @@ static inline bool ha_storage_engine_is_enabled(const handlerton *db_type)
/* basic stuff */
int ha_init(void);
-int ha_register_builtin_plugins();
-int ha_initialize_handlerton(handlerton *hton);
+int ha_initialize_handlerton(st_plugin_int *plugin);
+int ha_finalize_handlerton(st_plugin_int *plugin);
TYPELIB *ha_known_exts(void);
int ha_panic(enum ha_panic_function flag);
diff --git a/sql/handlerton-win.cc b/sql/handlerton-win.cc
deleted file mode 100644
index 9ce4eab2444..00000000000
--- a/sql/handlerton-win.cc
+++ /dev/null
@@ -1,72 +0,0 @@
-#include "mysql_priv.h"
-
-extern handlerton heap_hton;
-extern handlerton myisam_hton;
-extern handlerton myisammrg_hton;
-extern handlerton binlog_hton;
-#ifdef WITH_INNOBASE_STORAGE_ENGINE
-extern handlerton innobase_hton;
-#endif
-#ifdef WITH_BERKELEY_STORAGE_ENGINE
-extern handlerton berkeley_hton;
-#endif
-#ifdef WITH_EXAMPLE_STORAGE_ENGINE
-extern handlerton example_hton;
-#endif
-#ifdef WITH_ARCHIVE_STORAGE_ENGINE
-extern handlerton archive_hton;
-#endif
-#ifdef WITH_CSV_STORAGE_ENGINE
-extern handlerton tina_hton;
-#endif
-#ifdef WITH_BLACKHOLE_STORAGE_ENGINE
-extern handlerton blackhole_hton;
-#endif
-#ifdef WITH_FEDERATED_STORAGE_ENGINE
-extern handlerton federated_hton;
-#endif
-#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
-extern handlerton ndbcluster_hton;
-#endif
-#ifdef WITH_PARTITION_STORAGE_ENGINE
-extern handlerton partition_hton;
-#endif
-
-/*
- This array is used for processing compiled in engines.
-*/
-handlerton *sys_table_types[]=
-{
- &heap_hton,
- &myisam_hton,
-#ifdef WITH_INNOBASE_STORAGE_ENGINE
- &innobase_hton,
-#endif
-#ifdef WITH_BERKELEY_STORAGE_ENGINE
- &berkeley_hton,
-#endif
-#ifdef WITH_EXAMPLE_STORAGE_ENGINE
- &example_hton,
-#endif
-#ifdef WITH_ARCHIVE_STORAGE_ENGINE
- &archive_hton,
-#endif
-#ifdef WITH_CSV_STORAGE_ENGINE
- &tina_hton,
-#endif
-#ifdef WITH_BLACKHOLE_STORAGE_ENGINE
- &blackhole_hton,
-#endif
-#ifdef WITH_FEDERATED_STORAGE_ENGINE
- &federated_hton,
-#endif
-#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
- &ndbcluster_hton,
-#endif
-#ifdef WITH_PARTITION_STORAGE_ENGINE
- &partition_hton,
-#endif
- &myisammrg_hton,
- &binlog_hton,
- NULL
-};
diff --git a/sql/handlerton.cc.in b/sql/handlerton.cc.in
deleted file mode 100644
index 55af8cdd8cf..00000000000
--- a/sql/handlerton.cc.in
+++ /dev/null
@@ -1,14 +0,0 @@
-
-#include "mysql_priv.h"
-
-extern handlerton heap_hton,myisam_hton,myisammrg_hton,
- binlog_hton@mysql_se_decls@;
-
-/*
- This array is used for processing compiled in engines.
-*/
-handlerton *sys_table_types[]=
-{
- &heap_hton,&myisam_hton@mysql_se_htons@,&myisammrg_hton,&binlog_hton,NULL
-};
-
diff --git a/sql/log.cc b/sql/log.cc
index 5c67443d238..82f430f968f 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -34,6 +34,8 @@
#include "message.h"
#endif
+#include <mysql/plugin.h>
+
/* max size of the log message */
#define MAX_LOG_BUFFER_SIZE 1024
#define MAX_USER_HOST_SIZE 512
@@ -4331,3 +4333,16 @@ err1:
return 1;
}
+
+mysql_declare_plugin(binlog)
+{
+ MYSQL_STORAGE_ENGINE_PLUGIN,
+ &binlog_hton,
+ binlog_hton.name,
+ "MySQL AB",
+ "Binlog Engine",
+ NULL, /* Plugin Init */
+ NULL, /* Plugin Deinit */
+ 0x0100 /* 1.0 */,
+}
+mysql_declare_plugin_end;
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 71067630535..0c7908c6b1a 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -2703,12 +2703,6 @@ static int init_common_variables(const char *conf_file_name, int argc,
return 1;
}
- if (ha_register_builtin_plugins())
- {
- sql_print_error("Failed to register built-in storage engines.");
- return 1;
- }
-
load_defaults(conf_file_name, groups, &argc, &argv);
defaults_argv=argv;
get_options(argc,argv);
@@ -3077,6 +3071,19 @@ static int init_server_components()
}
}
+ if (xid_cache_init())
+ {
+ sql_print_error("Out of memory");
+ unireg_abort(1);
+ }
+
+ /* We have to initialize the storage engines before CSV logging */
+ if (ha_init())
+ {
+ sql_print_error("Can't init databases");
+ unireg_abort(1);
+ }
+
#ifdef WITH_CSV_STORAGE_ENGINE
if (opt_bootstrap)
log_output_options= LOG_FILE;
@@ -3240,17 +3247,6 @@ server.");
using_update_log=1;
}
- if (xid_cache_init())
- {
- sql_print_error("Out of memory");
- unireg_abort(1);
- }
- if (ha_init())
- {
- sql_print_error("Can't init databases");
- unireg_abort(1);
- }
-
/*
Check that the default storage engine is actually available.
*/
diff --git a/sql/partition_info.cc b/sql/partition_info.cc
index e2bf37d6ef3..9646e913851 100644
--- a/sql/partition_info.cc
+++ b/sql/partition_info.cc
@@ -21,9 +21,10 @@
#endif
#include "mysql_priv.h"
-#include "ha_partition.h"
#ifdef WITH_PARTITION_STORAGE_ENGINE
+#include "ha_partition.h"
+
partition_info *partition_info::get_clone()
{
diff --git a/sql/sql_builtin.cc.in b/sql/sql_builtin.cc.in
new file mode 100644
index 00000000000..18705aa3dfb
--- /dev/null
+++ b/sql/sql_builtin.cc.in
@@ -0,0 +1,13 @@
+
+#include <mysql/plugin.h>
+
+typedef struct st_mysql_plugin builtin_plugin[];
+
+extern builtin_plugin
+ builtin_binlog_plugin@mysql_plugin_defs@;
+
+struct st_mysql_plugin *mysqld_builtins[]=
+{
+ builtin_binlog_plugin@mysql_plugin_defs@,(struct st_mysql_plugin *)0
+};
+
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 4365d5b04ce..44b0fe1a2f1 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -21,7 +21,9 @@
*/
#include "mysql_priv.h"
+#ifdef WITH_INNOBASE_STORAGE_ENGINE
#include "ha_innodb.h"
+#endif
#include "sql_select.h"
#include "sp_head.h"
#include "sql_trigger.h"
diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc
index 3d42bfea104..70dfb8bded9 100644
--- a/sql/sql_plugin.cc
+++ b/sql/sql_plugin.cc
@@ -19,6 +19,8 @@
#define REPORT_TO_LOG 1
#define REPORT_TO_USER 2
+extern struct st_mysql_plugin *mysqld_builtins[];
+
char *opt_plugin_dir_ptr;
char opt_plugin_dir[FN_REFLEN];
LEX_STRING plugin_type_names[]=
@@ -540,6 +542,53 @@ err:
DBUG_RETURN(1);
}
+static int plugin_finalize(THD *thd, struct st_plugin_int *plugin)
+{
+ int rc;
+ DBUG_ENTER("plugin_finalize");
+
+ if (plugin->ref_count)
+ {
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 0,
+ "Plugin is busy and will be uninstalled on shutdown");
+ goto err;
+ }
+
+ switch (plugin->plugin->type)
+ {
+ case MYSQL_STORAGE_ENGINE_PLUGIN:
+ if (ha_finalize_handlerton(plugin))
+ {
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 0,
+ "Storage engine shutdown failed. "
+ "It will be uninstalled on shutdown");
+ sql_print_warning("Storage engine '%s' shutdown failed. "
+ "It will be uninstalled on shutdown", plugin->name.str);
+ goto err;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (plugin->plugin->deinit)
+ {
+ if ((rc= plugin->plugin->deinit()))
+ {
+ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 0,
+ "Plugin deinit failed. "
+ "It will be uninstalled on shutdown");
+ sql_print_warning("Plugin '%s' deinit failed. "
+ "It will be uninstalled on shutdown", plugin->name.str);
+ goto err;
+ }
+ }
+
+ DBUG_RETURN(0);
+err:
+ DBUG_RETURN(1);
+}
+
static void plugin_call_initializer(void)
{
uint i;
@@ -598,6 +647,8 @@ static byte *get_hash_key(const byte *buff, uint *length,
int plugin_init(void)
{
int i;
+ struct st_mysql_plugin **builtins;
+ struct st_mysql_plugin *plugin;
DBUG_ENTER("plugin_init");
if (initialized)
@@ -617,6 +668,16 @@ int plugin_init(void)
get_hash_key, NULL, 0))
goto err;
}
+
+ /* Register all the built-in plugins */
+ for (builtins= mysqld_builtins; *builtins; builtins++)
+ {
+ for (plugin= *builtins; plugin->info; plugin++)
+ {
+ if (plugin_register_builtin(plugin))
+ goto err;
+ }
+ }
initialized= 1;
@@ -823,18 +884,10 @@ my_bool mysql_uninstall_plugin(THD *thd, LEX_STRING *name)
goto err;
}
- if (plugin->ref_count)
- {
- plugin->state= PLUGIN_IS_DELETED;
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 0,
- "Plugin is not deleted, waiting on tables.");
- }
- else
- {
- if (plugin->plugin->deinit)
- plugin->plugin->deinit();
+ if (!plugin_finalize(thd, plugin))
plugin_del(name);
- }
+ else
+ plugin->state= PLUGIN_IS_DELETED;
table->field[0]->store(name->str, name->length, system_charset_info);
table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS);
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 95433828a1e..042c0397be3 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -111,6 +111,10 @@ static my_bool show_plugins(THD *thd, st_plugin_int *plugin,
CHARSET_INFO *cs= system_charset_info;
char version_buf[20];
+ /* we normally hide all the built-in plugins */
+ if (!plugin->plugin_dl && !thd->lex->verbose)
+ return 0;
+
restore_record(table, s->default_values);
table->field[0]->store(plugin->name.str, plugin->name.length, cs);
@@ -3000,46 +3004,53 @@ int fill_schema_charsets(THD *thd, TABLE_LIST *tables, COND *cond)
}
-int fill_schema_engines(THD *thd, TABLE_LIST *tables, COND *cond)
+static my_bool iter_schema_engines(THD *thd, st_plugin_int *plugin,
+ void *ptable)
{
+ TABLE *table= (TABLE *) ptable;
+ handlerton *hton= (handlerton *) plugin->plugin->info;
const char *wild= thd->lex->wild ? thd->lex->wild->ptr() : NullS;
- TABLE *table= tables->table;
CHARSET_INFO *scs= system_charset_info;
- handlerton **types;
+ DBUG_ENTER("iter_schema_engines");
- DBUG_ENTER("fill_schema_engines");
-
- for (types= sys_table_types; *types; types++)
+ if (!(hton->flags & HTON_HIDDEN))
{
- if ((*types)->flags & HTON_HIDDEN)
- continue;
-
if (!(wild && wild[0] &&
- wild_case_compare(scs, (*types)->name,wild)))
+ wild_case_compare(scs, hton->name,wild)))
{
const char *tmp;
restore_record(table, s->default_values);
- table->field[0]->store((*types)->name, strlen((*types)->name), scs);
- tmp= (*types)->state ? "DISABLED" : "ENABLED";
+ table->field[0]->store(hton->name, strlen(hton->name), scs);
+ tmp= hton->state ? "DISABLED" : "ENABLED";
table->field[1]->store( tmp, strlen(tmp), scs);
- table->field[2]->store((*types)->comment, strlen((*types)->comment), scs);
- tmp= (*types)->commit ? "YES" : "NO";
+ table->field[2]->store(hton->comment, strlen(hton->comment), scs);
+ tmp= hton->commit ? "YES" : "NO";
table->field[3]->store( tmp, strlen(tmp), scs);
- tmp= (*types)->prepare ? "YES" : "NO";
+ tmp= hton->prepare ? "YES" : "NO";
table->field[4]->store( tmp, strlen(tmp), scs);
- tmp= (*types)->savepoint_set ? "YES" : "NO";
+ tmp= hton->savepoint_set ? "YES" : "NO";
table->field[5]->store( tmp, strlen(tmp), scs);
if (schema_table_store_record(thd, table))
DBUG_RETURN(1);
}
}
-
DBUG_RETURN(0);
}
+int fill_schema_engines(THD *thd, TABLE_LIST *tables, COND *cond)
+{
+ const char *wild= thd->lex->wild ? thd->lex->wild->ptr() : NullS;
+ TABLE *table= tables->table;
+ CHARSET_INFO *scs= system_charset_info;
+
+ return plugin_foreach(thd, iter_schema_engines,
+ MYSQL_STORAGE_ENGINE_PLUGIN, table);
+}
+
+
int fill_schema_collation(THD *thd, TABLE_LIST *tables, COND *cond)
{
CHARSET_INFO **cs;
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index f570cbcd782..6a8a4f745ba 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -8173,7 +8173,7 @@ show_param:
if (prepare_schema_table(YYTHD, lex, 0, SCH_OPEN_TABLES))
YYABORT;
}
- | PLUGIN_SYM
+ | opt_full PLUGIN_SYM
{
LEX *lex= Lex;
WARN_DEPRECATED(yythd, "5.2", "SHOW PLUGIN", "'SHOW PLUGINS'");