diff options
author | unknown <acurtis@poseidon.ndb.mysql.com> | 2005-11-07 16:25:06 +0100 |
---|---|---|
committer | unknown <acurtis@poseidon.ndb.mysql.com> | 2005-11-07 16:25:06 +0100 |
commit | 6b3a9caef9b8cf42f8a706f778bba72db89cdd2b (patch) | |
tree | 2b8ffe29d899326e4ad244ac3f67d4fcf29bfae6 /sql | |
parent | df33aacd87ff08c27fd371a0bb348fe3986e6f95 (diff) | |
download | mariadb-git-6b3a9caef9b8cf42f8a706f778bba72db89cdd2b.tar.gz |
Make storage engines "pluggable", handlerton work
Makefile.am:
Changes to autoconf subst
config/ac-macros/ha_berkeley.m4:
simplify
config/ac-macros/ha_ndbcluster.m4:
simplify
config/ac-macros/ha_partition.m4:
simplify
configure.in:
strip configure of storage engine specific cruft and simplify
extra/Makefile.am:
changes to autoconf/automake subst
libmysqld/Makefile.am:
only compile storage engines if required.
make find object file a little smarter
libmysqld/examples/Makefile.am:
changes to autoconf subst
mysql-test/Makefile.am:
remove storage engine specific cruft
mysql-test/r/ps_1general.result:
cannot gaurantee order of results from 'show storage engines'
mysql-test/r/show_check.result:
fix test - frm file fails to be deleted if it is invalid
mysql-test/r/sql_mode.result:
isam does not exist, test may need to be redone/fixed in 5.0
mysql-test/r/warnings.result:
isam no longer exists
mysql-test/t/ps_1general.test:
cannot gaurantee order of results from 'show storage engines'
mysql-test/t/show_check.test:
fix test - frm file fails to be deleted if it is invalid
mysql-test/t/sql_mode.test:
isam does not exist, test may need to be redone/fixed in 5.0
mysql-test/t/system_mysql_db_fix.test:
change isam to myisam
mysql-test/t/view.test:
change isam to myisam
mysql-test/t/warnings.test:
isam no longer exists
sql/Makefile.am:
Make storage engines "pluggable" stage 1
only compile storage engines if included
sql/examples/ha_example.cc:
handlerton work
sql/examples/ha_example.h:
handlerton work
sql/examples/ha_tina.cc:
handlerton work
sql/examples/ha_tina.h:
handlerton work
sql/ha_archive.cc:
handlerton work
sql/ha_archive.h:
handlerton work
sql/ha_berkeley.cc:
handlerton work
sql/ha_berkeley.h:
handlerton work
sql/ha_blackhole.cc:
handlerton work
sql/ha_federated.cc:
handlerton work
sql/ha_federated.h:
handlerton work
sql/ha_heap.cc:
handlerton work
sql/ha_innodb.cc:
handlerton work
sql/ha_innodb.h:
handlerton work
sql/ha_myisam.cc:
handlerton work
sql/ha_myisammrg.cc:
handlerton work
sql/ha_ndbcluster.cc:
handlerton work
sql/ha_ndbcluster.h:
handlerton work
sql/ha_partition.cc:
handlerton work
sql/handler.cc:
start removing storage engine specific cruft
sql/handler.h:
start removing storage engine specific cruft
db_type for binlog handlerton
handlerton flag for not-user-selectable storage engines
sql/lex.h:
start removing storage engine specific cruft
sql/log.cc:
handlerton work
give binlog handlerton a 'real' db_type
sql/mysql_priv.h:
start removing storage engine specific cruft
sql/mysqld.cc:
start removing storage engine specific cruft
sql/set_var.cc:
start removing storage engine specific cruft
sql/sp_head.cc:
start removing storage engine specific cruft
sql/sql_class.cc:
start removing storage engine specific cruft
sql/sql_class.h:
start removing storage engine specific cruft
sql/sql_lex.h:
start removing storage engine specific cruft
sql/sql_manager.cc:
start removing storage engine specific cruft
sql/sql_manager.h:
start removing storage engine specific cruft
sql/sql_parse.cc:
start removing storage engine specific cruft
sql/sql_partition.cc:
start removing storage engine specific cruft
sql/sql_prepare.cc:
start removing storage engine specific cruft
sql/sql_show.cc:
start removing storage engine specific cruft
sql/sql_table.cc:
changed define from HAVE_PARTITION_DB to WITH_PARTITION_STORAGE_ENGINE
start removing storage engine specific cruft
sql/sql_update.cc:
changed define from HAVE_PARTITION_DB to WITH_PARTITION_STORAGE_ENGINE
sql/sql_yacc.yy:
start removing storage engine specific cruft
test if we should throw error
sql/table.cc:
changed define from HAVE_PARTITION_DB to WITH_PARTITION_STORAGE_ENGINE
sql/table.h:
changed define from HAVE_PARTITION_DB to WITH_PARTITION_STORAGE_ENGINE
sql/unireg.cc:
changed define from HAVE_PARTITION_DB to WITH_PARTITION_STORAGE_ENGINE
storage/ndb/include/kernel/kernel_types.h:
added my_config.h
storage/ndb/include/ndb_global.h.in:
added my_config.h
storage/ndb/include/ndb_types.h.in:
added my_config.h
config/ac-macros/storage.m4:
New BitKeeper file ``config/ac-macros/storage.m4''
sql/handlerton.cc.in:
New BitKeeper file ``sql/handlerton.cc.in''
Diffstat (limited to 'sql')
44 files changed, 989 insertions, 758 deletions
diff --git a/sql/Makefile.am b/sql/Makefile.am index 5516f3b6964..1ce8379b64a 100644 --- a/sql/Makefile.am +++ b/sql/Makefile.am @@ -21,7 +21,6 @@ MYSQLSHAREdir = $(pkgdatadir) MYSQLBASEdir= $(prefix) MYSQLLIBdir= $(pkglibdir) INCLUDES = @ZLIB_INCLUDES@ \ - @bdb_includes@ @innodb_includes@ @ndbcluster_includes@ \ -I$(top_builddir)/include -I$(top_srcdir)/include \ -I$(top_srcdir)/regex -I$(srcdir) $(yassl_includes) \ $(openssl_includes) @@ -38,12 +37,11 @@ LDADD = $(top_builddir)/storage/myisam/libmyisam.a \ $(top_builddir)/mysys/libmysys.a \ $(top_builddir)/dbug/libdbug.a \ $(top_builddir)/regex/libregex.a \ - $(top_builddir)/strings/libmystrings.a @ZLIB_LIBS@ @NDB_SCI_LIBS@ + $(top_builddir)/strings/libmystrings.a @ZLIB_LIBS@ mysqld_LDADD = @MYSQLD_EXTRA_LDFLAGS@ \ - @bdb_libs@ @innodb_libs@ @pstack_libs@ \ - @innodb_system_libs@ \ - @ndbcluster_libs@ @ndbcluster_system_libs@ \ + @pstack_libs@ \ + @mysql_se_objs@ @mysql_se_libs@ \ $(LDADD) $(CXXLDFLAGS) $(WRAPLIBS) @LIBDL@ \ @yassl_libs@ @openssl_libs@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \ @@ -53,9 +51,8 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \ procedure.h sql_class.h sql_lex.h sql_list.h \ sql_manager.h sql_map.h sql_string.h unireg.h \ sql_error.h field.h handler.h mysqld_suffix.h \ - ha_myisammrg.h\ - ha_heap.h ha_myisam.h ha_berkeley.h ha_innodb.h \ - ha_ndbcluster.h opt_range.h protocol.h \ + ha_heap.h ha_myisam.h ha_myisammrg.h ha_partition.h \ + opt_range.h protocol.h \ sql_select.h structs.h table.h sql_udf.h hash_filo.h\ lex.h lex_symbol.h sql_acl.h sql_crypt.h \ log_event.h sql_repl.h slave.h rpl_filter.h \ @@ -65,11 +62,8 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \ sp_head.h sp_pcontext.h sp_rcontext.h sp.h sp_cache.h \ parse_file.h sql_view.h sql_trigger.h \ sql_array.h sql_cursor.h \ - examples/ha_example.h ha_archive.h \ - examples/ha_tina.h ha_blackhole.h \ - ha_federated.h ha_partition.h \ sql_plugin.h -mysqld_SOURCES = sql_lex.cc sql_handler.cc \ +mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \ item.cc item_sum.cc item_buff.cc item_func.cc \ item_cmpfunc.cc item_strfunc.cc item_timefunc.cc \ thr_malloc.cc item_create.cc item_subselect.cc \ @@ -88,9 +82,7 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \ unireg.cc des_key_file.cc \ discover.cc time.cc opt_range.cc opt_sum.cc \ records.cc filesort.cc handler.cc \ - ha_heap.cc ha_myisam.cc ha_myisammrg.cc \ - ha_berkeley.cc ha_innodb.cc \ - ha_ndbcluster.cc \ + ha_heap.cc ha_myisam.cc ha_myisammrg.cc \ sql_db.cc sql_table.cc sql_rename.cc sql_crypt.cc \ sql_load.cc mf_iocache.cc field_conv.cc sql_show.cc \ sql_udf.cc sql_analyse.cc sql_analyse.h sql_cache.cc \ @@ -103,11 +95,13 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \ tztime.cc my_time.c my_decimal.cc\ sp_head.cc sp_pcontext.cc sp_rcontext.cc sp.cc \ sp_cache.cc parse_file.cc sql_trigger.cc \ - examples/ha_example.cc ha_archive.cc \ - examples/ha_tina.cc ha_blackhole.cc \ - ha_partition.cc sql_partition.cc \ - ha_federated.cc \ - sql_plugin.cc + sql_plugin.cc\ + handlerton.cc +EXTRA_mysqld_SOURCES = ha_innodb.cc ha_berkeley.cc ha_archive.cc \ + ha_blackhole.cc ha_federated.cc ha_ndbcluster.cc \ + ha_partition.cc \ + examples/ha_tina.cc examples/ha_example.cc +mysqld_DEPENDENCIES = @mysql_se_objs@ gen_lex_hash_SOURCES = gen_lex_hash.cc gen_lex_hash_LDADD = $(LDADD) $(CXXLDFLAGS) mysql_tzinfo_to_sql_SOURCES = mysql_tzinfo_to_sql.cc @@ -156,6 +150,16 @@ sql_yacc.o: sql_yacc.cc sql_yacc.h $(HEADERS) lex_hash.h: gen_lex_hash$(EXEEXT) ./gen_lex_hash$(EXEEXT) > $@ +ha_berkeley.o: ha_berkeley.cc ha_berkeley.h + $(CXXCOMPILE) @bdb_includes@ $(LM_CFLAGS) -c $< + +ha_ndbcluster.o:ha_ndbcluster.cc ha_ndbcluster.h + $(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $< + +#Until we can get rid of dependencies on ha_ndbcluster.h +handler.o: handler.cc ha_ndbcluster.h + $(CXXCOMPILE) @ndbcluster_includes@ $(CXXFLAGS) -c $< + # For testing of udf_example.so; Works on platforms with gcc # (This is not part of our build process but only provided as an example) udf_example.so: udf_example.cc diff --git a/sql/examples/ha_example.cc b/sql/examples/ha_example.cc index d340b9289ec..68aed7c6483 100644 --- a/sql/examples/ha_example.cc +++ b/sql/examples/ha_example.cc @@ -69,9 +69,9 @@ #include "../mysql_priv.h" -#ifdef HAVE_EXAMPLE_DB #include "ha_example.h" +static handler* example_create_handler(TABLE *table); handlerton example_hton= { "EXAMPLE", @@ -94,6 +94,15 @@ handlerton example_hton= { NULL, /* create_cursor_read_view */ NULL, /* set_cursor_read_view */ NULL, /* close_cursor_read_view */ + example_create_handler, /* Create a new handler */ + NULL, /* Drop a database */ + NULL, /* Panic call */ + NULL, /* Release temporary latches */ + NULL, /* Update Statistics */ + NULL, /* Start Consistent Snapshot */ + NULL, /* Flush logs */ + NULL, /* Show status */ + NULL, /* Replication Report Sent Binlog */ HTON_CAN_RECREATE }; @@ -204,6 +213,12 @@ static int free_share(EXAMPLE_SHARE *share) } +static handler* example_create_handler(TABLE *table) +{ + return new ha_example(table); +} + + ha_example::ha_example(TABLE *table_arg) :handler(&example_hton, table_arg) {} @@ -696,4 +711,3 @@ int ha_example::create(const char *name, TABLE *table_arg, /* This is not implemented but we want someone to be able that it works. */ DBUG_RETURN(0); } -#endif /* HAVE_EXAMPLE_DB */ diff --git a/sql/examples/ha_example.h b/sql/examples/ha_example.h index 37f38fe5210..d2ec83a5837 100644 --- a/sql/examples/ha_example.h +++ b/sql/examples/ha_example.h @@ -152,3 +152,4 @@ public: THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); //required }; + diff --git a/sql/examples/ha_tina.cc b/sql/examples/ha_tina.cc index 2c193f4ce84..46636b93d21 100644 --- a/sql/examples/ha_tina.cc +++ b/sql/examples/ha_tina.cc @@ -48,8 +48,6 @@ TODO: #include "mysql_priv.h" -#ifdef HAVE_CSV_DB - #include "ha_tina.h" #include <sys/mman.h> @@ -57,6 +55,7 @@ TODO: pthread_mutex_t tina_mutex; static HASH tina_open_tables; static int tina_init= 0; +static handler* tina_create_handler(TABLE *table); handlerton tina_hton= { "CSV", @@ -79,6 +78,15 @@ handlerton tina_hton= { NULL, /* create_cursor_read_view */ NULL, /* set_cursor_read_view */ NULL, /* close_cursor_read_view */ + tina_create_handler, /* Create a new handler */ + NULL, /* Drop a database */ + tina_end, /* Panic call */ + NULL, /* Release temporary latches */ + NULL, /* Update Statistics */ + NULL, /* Start Consistent Snapshot */ + NULL, /* Flush logs */ + NULL, /* Show status */ + NULL, /* Replication Report Sent Binlog */ HTON_CAN_RECREATE }; @@ -247,7 +255,7 @@ static int free_share(TINA_SHARE *share) DBUG_RETURN(result_code); } -bool tina_end() +int tina_end(ha_panic_function type) { if (tina_init) { @@ -255,7 +263,7 @@ bool tina_end() VOID(pthread_mutex_destroy(&tina_mutex)); } tina_init= 0; - return FALSE; + return 0; } /* @@ -272,6 +280,12 @@ byte * find_eoln(byte *data, off_t begin, off_t end) } +static handler* tina_create_handler(TABLE *table) +{ + return new ha_tina(table); +} + + ha_tina::ha_tina(TABLE *table_arg) :handler(&tina_hton, table_arg), /* @@ -909,4 +923,3 @@ int ha_tina::create(const char *name, TABLE *table_arg, DBUG_RETURN(0); } -#endif /* enable CSV */ diff --git a/sql/examples/ha_tina.h b/sql/examples/ha_tina.h index 0ac90a05812..2de6d8c8257 100644 --- a/sql/examples/ha_tina.h +++ b/sql/examples/ha_tina.h @@ -125,5 +125,5 @@ public: int chain_append(); }; -bool tina_end(); +int tina_end(ha_panic_function type); diff --git a/sql/ha_archive.cc b/sql/ha_archive.cc index c4801de5fb2..39f4c6667a0 100644 --- a/sql/ha_archive.cc +++ b/sql/ha_archive.cc @@ -20,7 +20,6 @@ #include "mysql_priv.h" -#ifdef HAVE_ARCHIVE_DB #include "ha_archive.h" #include <my_dir.h> @@ -135,6 +134,10 @@ static HASH archive_open_tables; #define DATA_BUFFER_SIZE 2 // Size of the data used in the data file #define ARCHIVE_CHECK_HEADER 254 // The number we use to determine corruption +/* Static declarations for handerton */ +static handler *archive_create_handler(TABLE *table); + + /* dummy handlerton - only to have something to return from archive_db_init */ handlerton archive_hton = { "ARCHIVE", @@ -157,9 +160,22 @@ handlerton archive_hton = { NULL, /* create_cursor_read_view */ NULL, /* set_cursor_read_view */ NULL, /* close_cursor_read_view */ + archive_create_handler, /* Create a new handler */ + NULL, /* Drop a database */ + archive_db_end, /* Panic call */ + NULL, /* Release temporary latches */ + NULL, /* Update Statistics */ + NULL, /* Start Consistent Snapshot */ + NULL, /* Flush logs */ + NULL, /* Show status */ + NULL, /* Replication Report Sent Binlog */ HTON_NO_FLAGS }; +static handler *archive_create_handler(TABLE *table) +{ + return new ha_archive(table); +} /* Used for hash table that tracks open tables. @@ -215,7 +231,7 @@ error: FALSE OK */ -bool archive_db_end() +int archive_db_end(ha_panic_function type) { if (archive_inited) { @@ -223,7 +239,7 @@ bool archive_db_end() VOID(pthread_mutex_destroy(&archive_mutex)); } archive_inited= 0; - return FALSE; + return 0; } ha_archive::ha_archive(TABLE *table_arg) @@ -1129,4 +1145,3 @@ bool ha_archive::check_and_repair(THD *thd) DBUG_RETURN(HA_ADMIN_OK); } } -#endif /* HAVE_ARCHIVE_DB */ diff --git a/sql/ha_archive.h b/sql/ha_archive.h index 56a4b9d1e27..6d7a8c05ac9 100644 --- a/sql/ha_archive.h +++ b/sql/ha_archive.h @@ -109,5 +109,5 @@ public: }; bool archive_db_init(void); -bool archive_db_end(void); +int archive_db_end(ha_panic_function type); diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc index fa8cda44866..eeca6cb1657 100644 --- a/sql/ha_berkeley.cc +++ b/sql/ha_berkeley.cc @@ -53,7 +53,6 @@ #include "mysql_priv.h" -#ifdef HAVE_BERKELEY_DB #include <m_ctype.h> #include <myisampack.h> #include <hash.h> @@ -72,6 +71,9 @@ #define STATUS_ROW_COUNT_INIT 2 #define STATUS_BDB_ANALYZE 4 +const u_int32_t bdb_DB_TXN_NOSYNC= DB_TXN_NOSYNC; +const u_int32_t bdb_DB_RECOVER= DB_RECOVER; +const u_int32_t bdb_DB_PRIVATE= DB_PRIVATE; const char *ha_berkeley_ext=".db"; bool berkeley_shared_data=0; u_int32_t berkeley_init_flags= DB_PRIVATE | DB_RECOVER, berkeley_env_flags=0, @@ -107,6 +109,7 @@ static void berkeley_noticecall(DB_ENV *db_env, db_notices notice); static int berkeley_close_connection(THD *thd); static int berkeley_commit(THD *thd, bool all); static int berkeley_rollback(THD *thd, bool all); +static handler *berkeley_create_handler(TABLE *table); handlerton berkeley_hton = { "BerkeleyDB", @@ -129,9 +132,23 @@ handlerton berkeley_hton = { NULL, /* create_cursor_read_view */ NULL, /* set_cursor_read_view */ NULL, /* close_cursor_read_view */ - HTON_CLOSE_CURSORS_AT_COMMIT + berkeley_create_handler, /* Create a new handler */ + NULL, /* Drop a database */ + berkeley_end, /* Panic call */ + NULL, /* Release temporary latches */ + NULL, /* Update Statistics */ + NULL, /* Start Consistent Snapshot */ + berkeley_flush_logs, /* Flush logs */ + berkeley_show_status, /* Show status */ + NULL, /* Replication Report Sent Binlog */ + HTON_CLOSE_CURSORS_AT_COMMIT | HTON_FLUSH_AFTER_RENAME }; +handler *berkeley_create_handler(TABLE *table) +{ + return new ha_berkeley(table); +} + typedef struct st_berkeley_trx_data { DB_TXN *all; DB_TXN *stmt; @@ -215,18 +232,19 @@ error: } -bool berkeley_end(void) +int berkeley_end(ha_panic_function type) { - int error; + int error= 0; DBUG_ENTER("berkeley_end"); - if (!db_env) - return 1; /* purecov: tested */ - berkeley_cleanup_log_files(); - error=db_env->close(db_env,0); // Error is logged - db_env=0; - hash_free(&bdb_open_tables); - pthread_mutex_destroy(&bdb_mutex); - DBUG_RETURN(error != 0); + if (db_env) + { + berkeley_cleanup_log_files(); + error= db_env->close(db_env,0); // Error is logged + db_env= 0; + hash_free(&bdb_open_tables); + pthread_mutex_destroy(&bdb_mutex); + } + DBUG_RETURN(error); } static int berkeley_close_connection(THD *thd) @@ -280,7 +298,7 @@ static int berkeley_rollback(THD *thd, bool all) } -int berkeley_show_logs(Protocol *protocol) +static bool berkeley_show_logs(THD *thd, stat_print_fn *stat_print) { char **all_logs, **free_logs, **a, **f; int error=1; @@ -307,21 +325,19 @@ int berkeley_show_logs(Protocol *protocol) { for (a = all_logs, f = free_logs; *a; ++a) { - protocol->prepare_for_resend(); - protocol->store(*a, system_charset_info); - protocol->store("BDB", 3, system_charset_info); + const char *status; if (f && *f && strcmp(*a, *f) == 0) { - f++; - protocol->store(SHOW_LOG_STATUS_FREE, system_charset_info); + f++; + status= SHOW_LOG_STATUS_FREE; } else - protocol->store(SHOW_LOG_STATUS_INUSE, system_charset_info); - - if (protocol->write()) + status= SHOW_LOG_STATUS_INUSE; + + if (stat_print(thd, berkeley_hton.name, *a, status)) { - error=1; - goto err; + error=1; + goto err; } } } @@ -331,6 +347,16 @@ err: DBUG_RETURN(error); } +bool berkeley_show_status(THD *thd, stat_print_fn *stat_print, + enum ha_stat_type stat_type) +{ + switch (stat_type) { + case HA_ENGINE_LOGS: + return berkeley_show_logs(thd, stat_print); + default: + return FALSE; + } +} static void berkeley_print_error(const DB_ENV *db_env, const char *db_errpfx, const char *buffer) @@ -344,9 +370,7 @@ static void berkeley_noticecall(DB_ENV *db_env, db_notices notice) switch (notice) { case DB_NOTICE_LOGFILE_CHANGED: /* purecov: tested */ - pthread_mutex_lock(&LOCK_manager); - manager_status |= MANAGER_BERKELEY_LOG_CLEANUP; - pthread_mutex_unlock(&LOCK_manager); + mysql_manager_submit(berkeley_cleanup_log_files); pthread_cond_signal(&COND_manager); break; } @@ -2669,4 +2693,3 @@ bool ha_berkeley::check_if_incompatible_data(HA_CREATE_INFO *info, } -#endif /* HAVE_BERKELEY_DB */ diff --git a/sql/ha_berkeley.h b/sql/ha_berkeley.h index b3ce72d374b..9de56e6bf96 100644 --- a/sql/ha_berkeley.h +++ b/sql/ha_berkeley.h @@ -157,6 +157,9 @@ class ha_berkeley: public handler bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes); }; +extern const u_int32_t bdb_DB_TXN_NOSYNC; +extern const u_int32_t bdb_DB_RECOVER; +extern const u_int32_t bdb_DB_PRIVATE; extern bool berkeley_shared_data; extern u_int32_t berkeley_init_flags,berkeley_env_flags, berkeley_lock_type, berkeley_lock_types[]; @@ -166,6 +169,6 @@ extern long berkeley_lock_scan_time; extern TYPELIB berkeley_lock_typelib; bool berkeley_init(void); -bool berkeley_end(void); +int berkeley_end(ha_panic_function type); bool berkeley_flush_logs(void); -int berkeley_show_logs(Protocol *protocol); +bool berkeley_show_status(THD *thd, stat_print_fn *print, enum ha_stat_type); diff --git a/sql/ha_blackhole.cc b/sql/ha_blackhole.cc index 2505919af39..3503f5bec1b 100644 --- a/sql/ha_blackhole.cc +++ b/sql/ha_blackhole.cc @@ -20,9 +20,12 @@ #endif #include "mysql_priv.h" -#ifdef HAVE_BLACKHOLE_DB #include "ha_blackhole.h" +/* Static declarations for handlerton */ + +static handler *blackhole_create_handler(TABLE *table); + /* Blackhole storage engine handlerton */ @@ -47,9 +50,25 @@ handlerton blackhole_hton= { NULL, /* create_cursor_read_view */ NULL, /* set_cursor_read_view */ NULL, /* close_cursor_read_view */ + blackhole_create_handler, /* Create a new handler */ + NULL, /* Drop a database */ + NULL, /* Panic call */ + NULL, /* Release temporary latches */ + NULL, /* Update Statistics */ + NULL, /* Start Consistent Snapshot */ + NULL, /* Flush logs */ + NULL, /* Show status */ + NULL, /* Replication Report Sent Binlog */ HTON_CAN_RECREATE }; + +static handler *blackhole_create_handler(TABLE *table) +{ + return new ha_blackhole(table); +} + + /***************************************************************************** ** BLACKHOLE tables *****************************************************************************/ @@ -227,4 +246,3 @@ int ha_blackhole::index_last(byte * buf) DBUG_RETURN(HA_ERR_END_OF_FILE); } -#endif /* HAVE_BLACKHOLE_DB */ diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc index 703646f8bf3..8a26b9d8fb3 100644 --- a/sql/ha_federated.cc +++ b/sql/ha_federated.cc @@ -351,7 +351,6 @@ #pragma implementation // gcc: Class implementation #endif -#ifdef HAVE_FEDERATED_DB #include "ha_federated.h" #include "m_string.h" @@ -363,6 +362,11 @@ pthread_mutex_t federated_mutex; // This is the mutex we use to static int federated_init= FALSE; // Variable for checking the // init state of hash +/* Static declaration for handerton */ + +static handler *federated_create_handler(TABLE *table); + + /* Federated storage engine handlerton */ handlerton federated_hton= { @@ -386,10 +390,25 @@ handlerton federated_hton= { NULL, /* create_cursor_read_view */ NULL, /* set_cursor_read_view */ NULL, /* close_cursor_read_view */ + federated_create_handler, /* Create a new handler */ + NULL, /* Drop a database */ + federated_db_end, /* Panic call */ + NULL, /* Release temporary latches */ + NULL, /* Update Statistics */ + NULL, /* Start Consistent Snapshot */ + NULL, /* Flush logs */ + NULL, /* Show status */ + NULL, /* Replication Report Sent Binlog */ HTON_ALTER_NOT_SUPPORTED }; +static handler *federated_create_handler(TABLE *table) +{ + return new ha_federated(table); +} + + /* Function we use in the creation of our hash to get key. */ static byte *federated_get_key(FEDERATED_SHARE *share, uint *length, @@ -443,7 +462,7 @@ error: FALSE OK */ -bool federated_db_end() +int federated_db_end(ha_panic_function type) { if (federated_init) { @@ -451,7 +470,7 @@ bool federated_db_end() VOID(pthread_mutex_destroy(&federated_mutex)); } federated_init= 0; - return FALSE; + return 0; } /* @@ -2614,4 +2633,3 @@ bool ha_federated::get_error_message(int error, String* buf) DBUG_RETURN(FALSE); } -#endif /* HAVE_FEDERATED_DB */ diff --git a/sql/ha_federated.h b/sql/ha_federated.h index e4ac695b068..52f4fad9a27 100644 --- a/sql/ha_federated.h +++ b/sql/ha_federated.h @@ -301,4 +301,4 @@ public: }; bool federated_db_init(void); -bool federated_db_end(void); +int federated_db_end(ha_panic_function type); diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc index adce6bdc102..0b91a57b889 100644 --- a/sql/ha_heap.cc +++ b/sql/ha_heap.cc @@ -23,6 +23,9 @@ #include <myisampack.h> #include "ha_heap.h" + +static handler *heap_create_handler(TABLE *table); + handlerton heap_hton= { "MEMORY", SHOW_OPTION_YES, @@ -44,9 +47,24 @@ handlerton heap_hton= { NULL, /* create_cursor_read_view */ NULL, /* set_cursor_read_view */ NULL, /* close_cursor_read_view */ + heap_create_handler, /* Create a new handler */ + NULL, /* Drop a database */ + heap_panic, /* Panic call */ + NULL, /* Release temporary latches */ + NULL, /* Update Statistics */ + NULL, /* Start Consistent Snapshot */ + NULL, /* Flush logs */ + NULL, /* Show status */ + NULL, /* Replication Report Sent Binlog */ HTON_CAN_RECREATE }; +static handler *heap_create_handler(TABLE *table) +{ + return new ha_heap(table); +} + + /***************************************************************************** ** HEAP tables *****************************************************************************/ diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index 1fdc36c84fe..916b9ae49b4 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -34,7 +34,6 @@ have disables the InnoDB inlining in this file. */ #include "mysql_priv.h" #include "slave.h" -#ifdef HAVE_INNOBASE_DB #include <m_ctype.h> #include <hash.h> #include <myisampack.h> @@ -205,6 +204,7 @@ static int innobase_rollback(THD* thd, bool all); static int innobase_rollback_to_savepoint(THD* thd, void *savepoint); static int innobase_savepoint(THD* thd, void *savepoint); static int innobase_release_savepoint(THD* thd, void *savepoint); +static handler *innobase_create_handler(TABLE *table); handlerton innobase_hton = { "InnoDB", @@ -227,9 +227,29 @@ handlerton innobase_hton = { innobase_create_cursor_view, innobase_set_cursor_view, innobase_close_cursor_view, + innobase_create_handler, /* Create a new handler */ + innobase_drop_database, /* Drop a database */ + innobase_end, /* Panic call */ + innobase_release_temporary_latches, /* Release temporary latches */ + innodb_export_status, /* Update Statistics */ + innobase_start_trx_and_assign_read_view, /* Start Consistent Snapshot */ + innobase_flush_logs, /* Flush logs */ + innobase_show_status, /* Show status */ +#ifdef HAVE_REPLICATION + innobase_repl_report_sent_binlog, /* Replication Report Sent Binlog */ +#else + NULL, +#endif HTON_NO_FLAGS }; + +static handler *innobase_create_handler(TABLE *table) +{ + return new ha_innobase(table); +} + + /********************************************************************* Commits a transaction in an InnoDB database. */ @@ -390,7 +410,7 @@ Call this function when mysqld passes control to the client. That is to avoid deadlocks on the adaptive hash S-latch possibly held by thd. For more documentation, see handler.cc. */ -void +int innobase_release_temporary_latches( /*===============================*/ THD *thd) @@ -399,7 +419,7 @@ innobase_release_temporary_latches( if (!innodb_inited) { - return; + return 0; } trx = (trx_t*) thd->ha_data[innobase_hton.slot]; @@ -407,6 +427,7 @@ innobase_release_temporary_latches( if (trx) { innobase_release_stat_resources(trx); } + return 0; } /************************************************************************ @@ -1430,8 +1451,8 @@ error: /*********************************************************************** Closes an InnoDB database. */ -bool -innobase_end(void) +int +innobase_end(ha_panic_function type) /*==============*/ /* out: TRUE if error */ { @@ -5051,7 +5072,7 @@ ha_innobase::delete_table( /********************************************************************* Removes all tables in the named database inside InnoDB. */ -int +void innobase_drop_database( /*===================*/ /* out: error number */ @@ -5117,10 +5138,13 @@ innobase_drop_database( innobase_commit_low(trx); trx_free_for_mysql(trx); - +#ifdef NO_LONGER_INTERESTED_IN_DROP_DB_ERROR error = convert_error_code_to_mysql(error, NULL); return(error); +#else + return; +#endif } /************************************************************************* @@ -6425,11 +6449,12 @@ ha_innobase::transactional_table_lock( /**************************************************************************** Here we export InnoDB status variables to MySQL. */ -void +int innodb_export_status(void) /*======================*/ { srv_export_innodb_status(); + return 0; } /**************************************************************************** @@ -6439,9 +6464,9 @@ Monitor to the client. */ bool innodb_show_status( /*===============*/ - THD* thd) /* in: the MySQL query thread of the caller */ + THD* thd, /* in: the MySQL query thread of the caller */ + stat_print_fn *stat_print) { - Protocol* protocol = thd->protocol; trx_t* trx; static const char truncated_msg[] = "... truncated...\n"; const long MAX_STATUS_SIZE = 64000; @@ -6451,10 +6476,7 @@ innodb_show_status( DBUG_ENTER("innodb_show_status"); if (have_innodb != SHOW_OPTION_YES) { - my_message(ER_NOT_SUPPORTED_YET, - "Cannot call SHOW INNODB STATUS because skip-innodb is defined", - MYF(0)); - DBUG_RETURN(TRUE); + DBUG_RETURN(FALSE); } trx = check_trx_exists(thd); @@ -6516,28 +6538,14 @@ innodb_show_status( mutex_exit_noninline(&srv_monitor_file_mutex); - List<Item> field_list; - - field_list.push_back(new Item_empty_string("Status", flen)); + bool result = FALSE; - if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | - Protocol::SEND_EOF)) { - my_free(str, MYF(0)); - - DBUG_RETURN(TRUE); + if (stat_print(thd, innobase_hton.name, "", str)) { + result= TRUE; } + my_free(str, MYF(0)); - protocol->prepare_for_resend(); - protocol->store(str, flen, system_charset_info); - my_free(str, MYF(0)); - - if (protocol->write()) { - - DBUG_RETURN(TRUE); - } - send_eof(thd); - - DBUG_RETURN(FALSE); + DBUG_RETURN(FALSE); } /**************************************************************************** @@ -6546,10 +6554,10 @@ Implements the SHOW MUTEX STATUS command. . */ bool innodb_mutex_show_status( /*===============*/ - THD* thd) /* in: the MySQL query thread of the caller */ + THD* thd, /* in: the MySQL query thread of the caller */ + stat_print_fn *stat_print) { - Protocol *protocol= thd->protocol; - List<Item> field_list; + char buf1[IO_SIZE], buf2[IO_SIZE]; mutex_t* mutex; ulint rw_lock_count= 0; ulint rw_lock_count_spin_loop= 0; @@ -6559,19 +6567,6 @@ innodb_mutex_show_status( ulonglong rw_lock_wait_time= 0; DBUG_ENTER("innodb_mutex_show_status"); - field_list.push_back(new Item_empty_string("Mutex", FN_REFLEN)); - field_list.push_back(new Item_empty_string("Module", FN_REFLEN)); - field_list.push_back(new Item_uint("Count", 21)); - field_list.push_back(new Item_uint("Spin_waits", 21)); - field_list.push_back(new Item_uint("Spin_rounds", 21)); - field_list.push_back(new Item_uint("OS_waits", 21)); - field_list.push_back(new Item_uint("OS_yields", 21)); - field_list.push_back(new Item_uint("OS_waits_time", 21)); - - if (protocol->send_fields(&field_list, - Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) - DBUG_RETURN(TRUE); - #ifdef MUTEX_PROTECT_TO_BE_ADDED_LATER mutex_enter(&mutex_list_mutex); #endif @@ -6584,17 +6579,16 @@ innodb_mutex_show_status( { if (mutex->count_using > 0) { - protocol->prepare_for_resend(); - protocol->store(mutex->cmutex_name, system_charset_info); - protocol->store(mutex->cfile_name, system_charset_info); - protocol->store((ulonglong)mutex->count_using); - protocol->store((ulonglong)mutex->count_spin_loop); - protocol->store((ulonglong)mutex->count_spin_rounds); - protocol->store((ulonglong)mutex->count_os_wait); - protocol->store((ulonglong)mutex->count_os_yield); - protocol->store((ulonglong)mutex->lspent_time/1000); - - if (protocol->write()) + my_snprintf(buf1, sizeof(buf1), "%s:%s", + mutex->cmutex_name, mutex->cfile_name); + my_snprintf(buf2, sizeof(buf2), + "count=%lu, spin_waits=%lu, spin_rounds=%lu, " + "os_waits=%lu, os_yields=%lu, os_wait_times=%lu", + mutex->count_using, mutex->count_spin_loop, + mutex->count_spin_rounds, + mutex->count_os_wait, mutex->count_os_yield, + mutex->lspent_time/1000); + if (stat_print(thd, innobase_hton.name, buf1, buf2)) { #ifdef MUTEX_PROTECT_TO_BE_ADDED_LATER mutex_exit(&mutex_list_mutex); @@ -6616,17 +6610,15 @@ innodb_mutex_show_status( mutex = UT_LIST_GET_NEXT(list, mutex); } - protocol->prepare_for_resend(); - protocol->store("rw_lock_mutexes", system_charset_info); - protocol->store("", system_charset_info); - protocol->store((ulonglong)rw_lock_count); - protocol->store((ulonglong)rw_lock_count_spin_loop); - protocol->store((ulonglong)rw_lock_count_spin_rounds); - protocol->store((ulonglong)rw_lock_count_os_wait); - protocol->store((ulonglong)rw_lock_count_os_yield); - protocol->store((ulonglong)rw_lock_wait_time/1000); - - if (protocol->write()) + my_snprintf(buf2, sizeof(buf2), + "count=%lu, spin_waits=%lu, spin_rounds=%lu, " + "os_waits=%lu, os_yields=%lu, os_wait_times=%lu", + rw_lock_count, rw_lock_count_spin_loop, + rw_lock_count_spin_rounds, + rw_lock_count_os_wait, rw_lock_count_os_yield, + rw_lock_wait_time/1000); + + if (stat_print(thd, innobase_hton.name, "rw_lock_mutexes", buf2)) { DBUG_RETURN(1); } @@ -6634,10 +6626,23 @@ innodb_mutex_show_status( #ifdef MUTEX_PROTECT_TO_BE_ADDED_LATER mutex_exit(&mutex_list_mutex); #endif - send_eof(thd); DBUG_RETURN(FALSE); } +bool innobase_show_status(THD* thd, stat_print_fn* stat_print, + enum ha_stat_type stat_type) +{ + switch (stat_type) { + case HA_ENGINE_STATUS: + return innodb_show_status(thd, stat_print); + case HA_ENGINE_MUTEX: + return innodb_mutex_show_status(thd, stat_print); + default: + return FALSE; + } +} + + /**************************************************************************** Handling the shared INNOBASE_SHARE structure that is needed to provide table locking. @@ -7470,4 +7475,3 @@ bool ha_innobase::check_if_incompatible_data(HA_CREATE_INFO *info, return COMPATIBLE_DATA_YES; } -#endif /* HAVE_INNOBASE_DB */ diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h index d687e8bf160..32de25c78c6 100644 --- a/sql/ha_innodb.h +++ b/sql/ha_innodb.h @@ -254,7 +254,7 @@ extern ulong srv_commit_concurrency; extern TYPELIB innobase_lock_typelib; bool innobase_init(void); -bool innobase_end(void); +int innobase_end(ha_panic_function type); bool innobase_flush_logs(void); uint innobase_get_free_space(void); @@ -272,12 +272,11 @@ int innobase_commit_complete(void* trx_handle); void innobase_store_binlog_offset_and_flush_log(char *binlog_name,longlong offset); #endif -int innobase_drop_database(char *path); -bool innodb_show_status(THD* thd); -bool innodb_mutex_show_status(THD* thd); -void innodb_export_status(void); +void innobase_drop_database(char *path); +bool innobase_show_status(THD* thd, stat_print_fn*, enum ha_stat_type); +int innodb_export_status(void); -void innobase_release_temporary_latches(THD *thd); +int innobase_release_temporary_latches(THD *thd); void innobase_store_binlog_offset_and_flush_log(char *binlog_name,longlong offset); diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc index d786dd75148..f03434c74e6 100644 --- a/sql/ha_myisam.cc +++ b/sql/ha_myisam.cc @@ -50,6 +50,8 @@ TYPELIB myisam_stats_method_typelib= { ** MyISAM tables *****************************************************************************/ +static handler *myisam_create_handler(TABLE *table); + /* MyISAM handlerton */ handlerton myisam_hton= { @@ -77,9 +79,25 @@ handlerton myisam_hton= { MyISAM doesn't support transactions and doesn't have transaction-dependent context: cursors can survive a commit. */ + myisam_create_handler, /* Create a new handler */ + NULL, /* Drop a database */ + mi_panic,/* Panic call */ + NULL, /* Release temporary latches */ + NULL, /* Update Statistics */ + NULL, /* Start Consistent Snapshot */ + NULL, /* Flush logs */ + NULL, /* Show status */ + NULL, /* Replication Report Sent Binlog */ HTON_CAN_RECREATE }; + +static handler *myisam_create_handler(TABLE *table) +{ + return new ha_myisam(table); +} + + // collect errors printed by mi_check routines static void mi_check_print_msg(MI_CHECK *param, const char* msg_type, diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc index cdea97bcea3..61dbc315185 100644 --- a/sql/ha_myisammrg.cc +++ b/sql/ha_myisammrg.cc @@ -32,6 +32,8 @@ ** MyISAM MERGE tables *****************************************************************************/ +static handler *myisammrg_create_handler(TABLE *table); + /* MyISAM MERGE handlerton */ handlerton myisammrg_hton= { @@ -55,9 +57,23 @@ handlerton myisammrg_hton= { NULL, /* create_cursor_read_view */ NULL, /* set_cursor_read_view */ NULL, /* close_cursor_read_view */ + myisammrg_create_handler, /* Create a new handler */ + NULL, /* Drop a database */ + myrg_panic, /* Panic call */ + NULL, /* Release temporary latches */ + NULL, /* Update Statistics */ + NULL, /* Start Consistent Snapshot */ + NULL, /* Flush logs */ + NULL, /* Show status */ + NULL, /* Replication Report Sent Binlog */ HTON_CAN_RECREATE }; +static handler *myisammrg_create_handler(TABLE *table) +{ + return new ha_myisammrg(table); +} + ha_myisammrg::ha_myisammrg(TABLE *table_arg) :handler(&myisammrg_hton, table_arg), file(0) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index fc31f4854ab..c7b168ee9fd 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -26,7 +26,6 @@ #include "mysql_priv.h" -#ifdef HAVE_NDBCLUSTER_DB #include <my_dir.h> #include "ha_ndbcluster.h" #include <ndbapi/NdbApi.hpp> @@ -35,9 +34,14 @@ // options from from mysqld.cc extern my_bool opt_ndb_optimized_node_selection; -extern enum ndb_distribution opt_ndb_distribution_id; extern const char *opt_ndbcluster_connectstring; +const char *ndb_distribution_names[]= {"KEYHASH", "LINHASH", NullS}; +TYPELIB ndb_distribution_typelib= { array_elements(ndb_distribution_names)-1, + "", ndb_distribution_names, NULL }; +const char *opt_ndb_distribution= ndb_distribution_names[ND_KEYHASH]; +enum ndb_distribution opt_ndb_distribution_id= ND_KEYHASH; + // Default value for parallelism static const int parallelism= 0; @@ -51,6 +55,7 @@ static const char share_prefix[]= "./"; static int ndbcluster_close_connection(THD *thd); static int ndbcluster_commit(THD *thd, bool all); static int ndbcluster_rollback(THD *thd, bool all); +static handler* ndbcluster_create_handler(TABLE *table); handlerton ndbcluster_hton = { "ndbcluster", @@ -73,9 +78,23 @@ handlerton ndbcluster_hton = { NULL, /* create_cursor_read_view */ NULL, /* set_cursor_read_view */ NULL, /* close_cursor_read_view */ + ndbcluster_create_handler, /* Create a new handler */ + ndbcluster_drop_database, /* Drop a database */ + ndbcluster_end, /* Panic call */ + NULL, /* Release temporary latches */ + NULL, /* Update Statistics */ + NULL, /* Start Consistent Snapshot */ + NULL, /* Flush logs */ + ndbcluster_show_status, /* Show status */ + NULL, /* Replication Report Sent Binlog */ HTON_NO_FLAGS }; +static handler *ndbcluster_create_handler(TABLE *table) +{ + return new ha_ndbcluster(table); +} + #define NDB_HIDDEN_PRIMARY_KEY_LENGTH 8 #define NDB_FAILED_AUTO_INCREMENT ~(Uint64)0 @@ -4629,9 +4648,10 @@ extern "C" byte* tables_get_key(const char *entry, uint *length, /* Drop a database in NDB Cluster - */ + NOTE add a dummy void function, since stupid handlerton is returning void instead of int... +*/ -int ndbcluster_drop_database(const char *path) +int ndbcluster_drop_database_impl(const char *path) { DBUG_ENTER("ndbcluster_drop_database"); THD *thd= current_thd; @@ -4646,13 +4666,13 @@ int ndbcluster_drop_database(const char *path) DBUG_PRINT("enter", ("db: %s", dbname)); if (!(ndb= check_ndb_in_thd(thd))) - DBUG_RETURN(HA_ERR_NO_CONNECTION); + DBUG_RETURN(-1); // List tables in NDB NDBDICT *dict= ndb->getDictionary(); if (dict->listObjects(list, NdbDictionary::Object::UserTable) != 0) - ERR_RETURN(dict->getNdbError()); + DBUG_RETURN(-1); for (i= 0 ; i < list.count ; i++) { NdbDictionary::Dictionary::List::Element& elmt= list.elements[i]; @@ -4685,6 +4705,10 @@ int ndbcluster_drop_database(const char *path) DBUG_RETURN(ret); } +void ndbcluster_drop_database(char *path) +{ + ndbcluster_drop_database_impl(path); +} /* find all tables in ndb and discover those needed */ @@ -5057,7 +5081,7 @@ ndbcluster_init_error: ndbcluster_init() */ -bool ndbcluster_end() +int ndbcluster_end(ha_panic_function type) { DBUG_ENTER("ndbcluster_end"); @@ -7941,44 +7965,33 @@ ha_ndbcluster::generate_scan_filter(Ndb_cond_stack *ndb_cond_stack, /* Implements the SHOW NDB STATUS command. */ -int -ndbcluster_show_status(THD* thd) +bool +ndbcluster_show_status(THD* thd, stat_print_fn *stat_print, + enum ha_stat_type stat_type) { - Protocol *protocol= thd->protocol; - + char buf[IO_SIZE]; DBUG_ENTER("ndbcluster_show_status"); if (have_ndbcluster != SHOW_OPTION_YES) { - my_message(ER_NOT_SUPPORTED_YET, - "Cannot call SHOW NDBCLUSTER STATUS because skip-ndbcluster is defined", - MYF(0)); - DBUG_RETURN(TRUE); + DBUG_RETURN(FALSE); + } + if (stat_type != HA_ENGINE_STATUS) + { + DBUG_RETURN(FALSE); } - List<Item> field_list; - field_list.push_back(new Item_empty_string("free_list", 255)); - field_list.push_back(new Item_return_int("created", 10,MYSQL_TYPE_LONG)); - field_list.push_back(new Item_return_int("free", 10,MYSQL_TYPE_LONG)); - field_list.push_back(new Item_return_int("sizeof", 10,MYSQL_TYPE_LONG)); - - if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) - DBUG_RETURN(TRUE); - if (get_thd_ndb(thd) && get_thd_ndb(thd)->ndb) { Ndb* ndb= (get_thd_ndb(thd))->ndb; Ndb::Free_list_usage tmp; tmp.m_name= 0; while (ndb->get_free_list_usage(&tmp)) { - protocol->prepare_for_resend(); - - protocol->store(tmp.m_name, &my_charset_bin); - protocol->store((uint)tmp.m_created); - protocol->store((uint)tmp.m_free); - protocol->store((uint)tmp.m_sizeof); - if (protocol->write()) - DBUG_RETURN(TRUE); + my_snprintf(buf, sizeof(buf), + "created=%u, free=%u, sizeof=%u", + tmp.m_created, tmp.m_free, tmp.m_sizeof); + if (stat_print(thd, ndbcluster_hton.name, tmp.m_name, buf)) + DBUG_RETURN(TRUE); } } send_eof(thd); @@ -8192,4 +8205,3 @@ bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *info, return COMPATIBLE_DATA_YES; } -#endif /* HAVE_NDBCLUSTER_DB */ diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 5a9b435121d..40c9450809c 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -25,7 +25,7 @@ #pragma interface /* gcc class implementation */ #endif -#include <ndbapi/NdbApi.hpp> +#include <NdbApi.hpp> #include <ndbapi_limits.h> class Ndb; // Forward declaration @@ -136,7 +136,6 @@ struct negated_function_mapping NDB_FUNC_TYPE neg_fun; }; -enum ndb_distribution { ND_KEYHASH= 0, ND_LINHASH= 1 }; /* Define what functions can be negated in condition pushdown. @@ -615,7 +614,7 @@ static void set_tabname(const char *pathname, char *tabname); const char *tabname, bool global); private: - friend int ndbcluster_drop_database(const char *path); + friend int ndbcluster_drop_database_impl(const char *path); int alter_table_name(const char *to); static int delete_table(ha_ndbcluster *h, Ndb *ndb, const char *path, @@ -772,7 +771,7 @@ private: extern struct show_var_st ndb_status_variables[]; bool ndbcluster_init(void); -bool ndbcluster_end(void); +int ndbcluster_end(ha_panic_function flag); int ndbcluster_discover(THD* thd, const char* dbname, const char* name, const void** frmblob, uint* frmlen); @@ -780,9 +779,9 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, const char *wild, bool dir, List<char> *files); int ndbcluster_table_exists_in_engine(THD* thd, const char *db, const char *name); -int ndbcluster_drop_database(const char* path); +void ndbcluster_drop_database(char* path); void ndbcluster_print_error(int error, const NdbOperation *error_op); -int ndbcluster_show_status(THD*); +bool ndbcluster_show_status(THD*,stat_print_fn *,enum ha_stat_type); diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index a7cf6eea49c..aadcb5a664d 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -54,7 +54,6 @@ #include <mysql_priv.h> -#ifdef HAVE_PARTITION_DB #include "ha_partition.h" static const char *ha_par_ext= ".par"; @@ -67,12 +66,14 @@ static PARTITION_SHARE *get_share(const char *table_name, TABLE * table); MODULE create/delete handler object ****************************************************************************/ +static handler* partition_create_handler(TABLE *table); + handlerton partition_hton = { "partition", SHOW_OPTION_YES, - "Partition engine", /* A comment used by SHOW to describe an engine */ + "Partition Storage Engine Helper", /* A comment used by SHOW to describe an engine */ DB_TYPE_PARTITION_DB, - 0, /* Method that initizlizes a storage engine */ + 0, /* Method that initializes a storage engine */ 0, /* slot */ 0, /* savepoint size */ NULL /*ndbcluster_close_connection*/, @@ -88,9 +89,23 @@ handlerton partition_hton = { NULL, NULL, NULL, - HTON_NO_FLAGS + partition_create_handler, /* Create a new handler */ + NULL, /* Drop a database */ + NULL, /* Panic call */ + NULL, /* Release temporary latches */ + NULL, /* Update Statistics */ + NULL, /* Start Consistent Snapshot */ + NULL, /* Flush logs */ + NULL, /* Show status */ + NULL, /* Replication Report Sent Binlog */ + HTON_NOT_USER_SELECTABLE }; +static handler* partition_create_handler(TABLE *table) +{ + return new ha_partition(table); +} + ha_partition::ha_partition(TABLE *table) :handler(&partition_hton, table), m_part_info(NULL), m_create_handler(FALSE), m_is_sub_partitioned(0) @@ -947,6 +962,8 @@ int ha_partition::close(void) { handler **file; DBUG_ENTER("ha_partition::close"); + + delete_queue(&queue); file= m_file; do { @@ -3252,4 +3269,3 @@ static int free_share(PARTITION_SHARE *share) return 0; } #endif /* NOT_USED */ -#endif /* HAVE_PARTITION_DB */ diff --git a/sql/handler.cc b/sql/handler.cc index fd1be7638b0..3db2f76aef8 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -27,112 +27,19 @@ #include "ha_myisammrg.h" -/* - We have dummy hanldertons in case the handler has not been compiled - in. This will be removed in 5.1. -*/ -#ifdef HAVE_BERKELEY_DB -#include "ha_berkeley.h" -extern handlerton berkeley_hton; -#else -handlerton berkeley_hton = { "BerkeleyDB", SHOW_OPTION_NO, - "Supports transactions and page-level locking", DB_TYPE_BERKELEY_DB, NULL, - 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, HTON_NO_FLAGS }; -#endif -#ifdef HAVE_BLACKHOLE_DB -#include "ha_blackhole.h" -extern handlerton blackhole_hton; -#else -handlerton blackhole_hton = { "BLACKHOLE", SHOW_OPTION_NO, - "/dev/null storage engine (anything you write to it disappears)", - DB_TYPE_BLACKHOLE_DB, NULL, 0, 0, NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - HTON_NO_FLAGS }; -#endif -#ifdef HAVE_EXAMPLE_DB -#include "examples/ha_example.h" -extern handlerton example_hton; -#else -handlerton example_hton = { "EXAMPLE", SHOW_OPTION_NO, - "Example storage engine", - DB_TYPE_EXAMPLE_DB, NULL, 0, 0, NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - HTON_NO_FLAGS }; -#endif -#ifdef HAVE_PARTITION_DB -#include "ha_partition.h" -extern handlerton partition_hton; -#else -handlerton partition_hton = { "partition", SHOW_OPTION_NO, - "Partition engine", - DB_TYPE_EXAMPLE_DB, NULL, 0, 0, NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - HTON_NO_FLAGS }; -#endif -#ifdef HAVE_ARCHIVE_DB -#include "ha_archive.h" -extern handlerton archive_hton; -#else -handlerton archive_hton = { "ARCHIVE", SHOW_OPTION_NO, - "Archive storage engine", DB_TYPE_ARCHIVE_DB, NULL, 0, 0, NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - HTON_NO_FLAGS }; -#endif -#ifdef HAVE_CSV_DB -#include "examples/ha_tina.h" -extern handlerton tina_hton; -#else -handlerton tina_hton = { "CSV", SHOW_OPTION_NO, "CSV storage engine", - DB_TYPE_CSV_DB, NULL, 0, 0, NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - HTON_NO_FLAGS }; -#endif -#ifdef HAVE_INNOBASE_DB -#include "ha_innodb.h" -extern handlerton innobase_hton; -#else -handlerton innobase_hton = { "InnoDB", SHOW_OPTION_NO, - "Supports transactions, row-level locking, and foreign keys", - DB_TYPE_INNODB, NULL, 0, 0, NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - HTON_NO_FLAGS }; -#endif -#ifdef HAVE_NDBCLUSTER_DB +#include <myisampack.h> +#include <errno.h> + +#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE +#define NDB_MAX_ATTRIBUTES_IN_TABLE 128 #include "ha_ndbcluster.h" -extern handlerton ndbcluster_hton; -#else -handlerton ndbcluster_hton = { "ndbcluster", SHOW_OPTION_NO, - "Clustered, fault-tolerant, memory-based tables", - DB_TYPE_NDBCLUSTER, NULL, 0, 0, NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - HTON_NO_FLAGS }; #endif -#ifdef HAVE_FEDERATED_DB -#include "ha_federated.h" -extern handlerton federated_hton; -#else -handlerton federated_hton = { "FEDERATED", SHOW_OPTION_NO, - "Federated MySQL storage engine", DB_TYPE_FEDERATED_DB, NULL, 0, 0, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - HTON_NO_FLAGS }; +#ifdef WITH_PARTITION_STORAGE_ENGINE +#include "ha_partition.h" #endif -#include <myisampack.h> -#include <errno.h> - -extern handlerton myisam_hton; -extern handlerton myisammrg_hton; -extern handlerton heap_hton; -extern handlerton binlog_hton; - -/* - Obsolete -*/ -handlerton isam_hton = { "ISAM", SHOW_OPTION_NO, "Obsolete storage engine", - DB_TYPE_ISAM, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, HTON_NO_FLAGS }; - +extern handlerton *sys_table_types[]; + /* static functions defined in this file */ static SHOW_COMP_OPTION have_yes= SHOW_OPTION_YES; @@ -144,28 +51,6 @@ ulong total_ha_2pc; /* size of savepoint storage area (see ha_init) */ ulong savepoint_alloc_size; -/* - This array is used for processing compiled in engines. -*/ -handlerton *sys_table_types[]= -{ - &myisam_hton, - &heap_hton, - &innobase_hton, - &berkeley_hton, - &blackhole_hton, - &example_hton, - &archive_hton, - &tina_hton, - &ndbcluster_hton, - &federated_hton, - &myisammrg_hton, - &binlog_hton, - &isam_hton, - &partition_hton, - NULL -}; - struct show_table_alias_st sys_table_aliases[]= { {"INNOBASE", "InnoDB"}, @@ -203,9 +88,11 @@ enum db_type ha_resolve_by_name(const char *name, uint namelen) retest: for (types= sys_table_types; *types; types++) { - if (!my_strnncoll(&my_charset_latin1, - (const uchar *)name, namelen, - (const uchar *)(*types)->name, strlen((*types)->name))) + if ((!my_strnncoll(&my_charset_latin1, + (const uchar *)name, namelen, + (const uchar *)(*types)->name, + strlen((*types)->name))) && + !((*types)->flags & HTON_NOT_USER_SELECTABLE)) return (enum db_type) (*types)->db_type; } @@ -258,9 +145,8 @@ my_bool ha_storage_engine_is_enabled(enum db_type database_type) handlerton **types; for (types= sys_table_types; *types; types++) { - if ((database_type == (*types)->db_type) && - ((*types)->state == SHOW_OPTION_YES)) - return TRUE; + if (database_type == (*types)->db_type) + return ((*types)->state == SHOW_OPTION_YES) ? TRUE : FALSE; } return FALSE; } @@ -273,13 +159,6 @@ enum db_type ha_checktype(THD *thd, enum db_type database_type, { if (ha_storage_engine_is_enabled(database_type)) return database_type; -#ifdef HAVE_PARTITION_DB - /* - Partition handler is not in the list of handlers shown since it is an internal handler - */ - if (database_type == DB_TYPE_PARTITION_DB) - return database_type; -#endif if (no_substitute) { if (report_error) @@ -312,81 +191,33 @@ enum db_type ha_checktype(THD *thd, enum db_type database_type, handler *get_new_handler(TABLE *table, MEM_ROOT *alloc, enum db_type db_type) { - handler *file; - switch (db_type) { -#ifndef NO_HASH - case DB_TYPE_HASH: - file= new (alloc) ha_hash(table); - break; -#endif - case DB_TYPE_MRG_ISAM: - file= new (alloc) ha_myisammrg(table); - break; -#ifdef HAVE_BERKELEY_DB - case DB_TYPE_BERKELEY_DB: - file= new (alloc) ha_berkeley(table); - break; -#endif -#ifdef HAVE_INNOBASE_DB - case DB_TYPE_INNODB: - file= new (alloc) ha_innobase(table); - break; -#endif -#ifdef HAVE_EXAMPLE_DB - case DB_TYPE_EXAMPLE_DB: - file= new ha_example(table); - break; -#endif -#ifdef HAVE_PARTITION_DB - case DB_TYPE_PARTITION_DB: + handler *file= NULL; + handlerton **types; + /* + handlers are allocated with new in the handlerton create() function + we need to set the thd mem_root for these to be allocated correctly + */ + THD *thd= current_thd; + MEM_ROOT *thd_save_mem_root= thd->mem_root; + thd->mem_root= alloc; + for (types= sys_table_types; *types; types++) { - file= new (alloc) ha_partition(table); - break; + if (db_type == (*types)->db_type && (*types)->create) + { + file= ((*types)->state == SHOW_OPTION_YES) ? + (*types)->create(table) : NULL; + break; + } } -#endif -#ifdef HAVE_ARCHIVE_DB - case DB_TYPE_ARCHIVE_DB: - file= new (alloc) ha_archive(table); - break; -#endif -#ifdef HAVE_BLACKHOLE_DB - case DB_TYPE_BLACKHOLE_DB: - file= new (alloc) ha_blackhole(table); - break; -#endif -#ifdef HAVE_FEDERATED_DB - case DB_TYPE_FEDERATED_DB: - file= new (alloc) ha_federated(table); - break; -#endif -#ifdef HAVE_CSV_DB - case DB_TYPE_CSV_DB: - file= new (alloc) ha_tina(table); - break; -#endif -#ifdef HAVE_NDBCLUSTER_DB - case DB_TYPE_NDBCLUSTER: - file= new (alloc) ha_ndbcluster(table); - break; -#endif - case DB_TYPE_HEAP: - file= new (alloc) ha_heap(table); - break; - default: // should never happen + thd->mem_root= thd_save_mem_root; + + if (!file) { enum db_type def=(enum db_type) current_thd->variables.table_type; /* Try first with 'default table type' */ if (db_type != def) return get_new_handler(table, alloc, def); } - /* Fall back to MyISAM */ - case DB_TYPE_MYISAM: - file= new (alloc) ha_myisam(table); - break; - case DB_TYPE_MRG_MYISAM: - file= new (alloc) ha_myisammrg(table); - break; - } if (file) { if (file->ha_initialise()) @@ -399,7 +230,7 @@ handler *get_new_handler(TABLE *table, MEM_ROOT *alloc, enum db_type db_type) } -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE handler *get_ha_partition(partition_info *part_info) { ha_partition *partition; @@ -557,40 +388,13 @@ int ha_init() int ha_panic(enum ha_panic_function flag) { int error=0; -#ifndef NO_HASH - error|=h_panic(flag); /* fix hash */ -#endif -#ifdef HAVE_ISAM - error|=mrg_panic(flag); - error|=nisam_panic(flag); -#endif - error|=heap_panic(flag); - error|=mi_panic(flag); - error|=myrg_panic(flag); -#ifdef HAVE_BERKELEY_DB - if (have_berkeley_db == SHOW_OPTION_YES) - error|=berkeley_end(); -#endif -#ifdef HAVE_INNOBASE_DB - if (have_innodb == SHOW_OPTION_YES) - error|=innobase_end(); -#endif -#ifdef HAVE_NDBCLUSTER_DB - if (have_ndbcluster == SHOW_OPTION_YES) - error|=ndbcluster_end(); -#endif -#ifdef HAVE_FEDERATED_DB - if (have_federated_db == SHOW_OPTION_YES) - error|= federated_db_end(); -#endif -#ifdef HAVE_ARCHIVE_DB - if (have_archive_db == SHOW_OPTION_YES) - error|= archive_db_end(); -#endif -#ifdef HAVE_CSV_DB - if (have_csv_db == SHOW_OPTION_YES) - error|= tina_end(); -#endif + handlerton **types; + + for (types= sys_table_types; *types; types++) + { + if ((*types)->state == SHOW_OPTION_YES && (*types)->panic) + error|= (*types)->panic(flag); + } if (ha_finish_errors()) error= 1; return error; @@ -598,14 +402,13 @@ int ha_panic(enum ha_panic_function flag) void ha_drop_database(char* path) { -#ifdef HAVE_INNOBASE_DB - if (have_innodb == SHOW_OPTION_YES) - innobase_drop_database(path); -#endif -#ifdef HAVE_NDBCLUSTER_DB - if (have_ndbcluster == SHOW_OPTION_YES) - ndbcluster_drop_database(path); -#endif + handlerton **types; + + for (types= sys_table_types; *types; types++) + { + if ((*types)->state == SHOW_OPTION_YES && (*types)->drop_database) + (*types)->drop_database(path); + } } /* don't bother to rollback here, it's done already */ @@ -613,7 +416,8 @@ void ha_close_connection(THD* thd) { handlerton **types; for (types= sys_table_types; *types; types++) - if (thd->ha_data[(*types)->slot]) + /* XXX Maybe do a rollback if close_connection == NULL ? */ + if (thd->ha_data[(*types)->slot] && (*types)->close_connection) (*types)->close_connection(thd); } @@ -1190,10 +994,14 @@ bool mysql_xa_recover(THD *thd) int ha_release_temporary_latches(THD *thd) { -#ifdef HAVE_INNOBASE_DB - if (opt_innodb) - innobase_release_temporary_latches(thd); -#endif + handlerton **types; + + for (types= sys_table_types; *types; types++) + { + if ((*types)->state == SHOW_OPTION_YES && + (*types)->release_temporary_latches) + (*types)->release_temporary_latches(thd); + } return 0; } @@ -1205,10 +1013,13 @@ int ha_release_temporary_latches(THD *thd) int ha_update_statistics() { -#ifdef HAVE_INNOBASE_DB - if (opt_innodb) - innodb_export_status(); -#endif + handlerton **types; + + for (types= sys_table_types; *types; types++) + { + if ((*types)->state == SHOW_OPTION_YES && (*types)->update_statistics) + (*types)->update_statistics(); + } return 0; } @@ -1317,35 +1128,45 @@ int ha_release_savepoint(THD *thd, SAVEPOINT *sv) int ha_start_consistent_snapshot(THD *thd) { -#ifdef HAVE_INNOBASE_DB - if ((have_innodb == SHOW_OPTION_YES) && - !innobase_start_trx_and_assign_read_view(thd)) - return 0; -#endif + bool warn= true; + handlerton **types; + + for (types= sys_table_types; *types; types++) + { + if ((*types)->state == SHOW_OPTION_YES && + (*types)->start_consistent_snapshot) + { + (*types)->start_consistent_snapshot(thd); + warn= false; /* hope user is using engine */ + } + } /* Same idea as when one wants to CREATE TABLE in one engine which does not exist: */ - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, - "This MySQL server does not support any " - "consistent-read capable storage engine"); + if (warn) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, + "This MySQL server does not support any " + "consistent-read capable storage engine"); return 0; } -bool ha_flush_logs() +bool ha_flush_logs(enum db_type db_type) { bool result=0; -#ifdef HAVE_BERKELEY_DB - if ((have_berkeley_db == SHOW_OPTION_YES) && - berkeley_flush_logs()) - result=1; -#endif -#ifdef HAVE_INNOBASE_DB - if ((have_innodb == SHOW_OPTION_YES) && - innobase_flush_logs()) - result=1; -#endif + handlerton **types; + + for (types= sys_table_types; *types; types++) + { + if ((*types)->state == SHOW_OPTION_YES && + (db_type == DB_TYPE_DEFAULT || db_type == (*types)->db_type) && + (*types)->flush_logs) + { + if ((*types)->flush_logs()) + result= 1; + } + } return result; } @@ -2326,7 +2147,7 @@ int ha_discover(THD *thd, const char *db, const char *name, DBUG_PRINT("enter", ("db: %s, name: %s", db, name)); if (is_prefix(name,tmp_file_prefix)) /* skip temporary tables */ DBUG_RETURN(error); -#ifdef HAVE_NDBCLUSTER_DB +#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE if (have_ndbcluster == SHOW_OPTION_YES) error= ndbcluster_discover(thd, db, name, frmblob, frmlen); #endif @@ -2350,7 +2171,7 @@ ha_find_files(THD *thd,const char *db,const char *path, DBUG_ENTER("ha_find_files"); DBUG_PRINT("enter", ("db: %s, path: %s, wild: %s, dir: %d", db, path, wild, dir)); -#ifdef HAVE_NDBCLUSTER_DB +#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE if (have_ndbcluster == SHOW_OPTION_YES) error= ndbcluster_find_files(thd, db, path, wild, dir, files); #endif @@ -2372,7 +2193,7 @@ int ha_table_exists_in_engine(THD* thd, const char* db, const char* name) int error= 0; DBUG_ENTER("ha_table_exists_in_engine"); DBUG_PRINT("enter", ("db: %s, name: %s", db, name)); -#ifdef HAVE_NDBCLUSTER_DB +#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE if (have_ndbcluster == SHOW_OPTION_YES) error= ndbcluster_table_exists_in_engine(thd, db, name); #endif @@ -2699,6 +2520,54 @@ TYPELIB *ha_known_exts(void) return &known_extensions; } +static bool stat_print(THD *thd, const char *type, const char *file, + const char *status) +{ + Protocol *protocol= thd->protocol; + protocol->prepare_for_resend(); + protocol->store(type, system_charset_info); + protocol->store(file, system_charset_info); + protocol->store(status, system_charset_info); + if (protocol->write()) + return TRUE; + return FALSE; +} + +bool ha_show_status(THD *thd, enum db_type db_type, enum ha_stat_type stat) +{ + handlerton **types; + List<Item> field_list; + Protocol *protocol= thd->protocol; + + field_list.push_back(new Item_empty_string("Type",10)); + field_list.push_back(new Item_empty_string("Name",FN_REFLEN)); + field_list.push_back(new Item_empty_string("Status",10)); + + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + return TRUE; + + for (types= sys_table_types; *types; types++) + { + if ((*types)->state == SHOW_OPTION_YES && + (db_type == DB_TYPE_DEFAULT || db_type == (*types)->db_type) && + (*types)->show_status) + { + if ((*types)->show_status(thd, stat_print, stat)) + return TRUE; + } + else if (db_type == (*types)->db_type && + (*types)->state != SHOW_OPTION_YES) + { + if (stat_print(thd, (*types)->name, "", "DISABLED")) + return TRUE; + } + } + + send_eof(thd); + return FALSE; +} + #ifdef HAVE_REPLICATION /* @@ -2722,11 +2591,19 @@ TYPELIB *ha_known_exts(void) int ha_repl_report_sent_binlog(THD *thd, char *log_file_name, my_off_t end_offset) { -#ifdef HAVE_INNOBASE_DB - return innobase_repl_report_sent_binlog(thd,log_file_name,end_offset); -#else - return 0; -#endif + int result= 0; + handlerton **types; + + for (types= sys_table_types; *types; types++) + { + if ((*types)->state == SHOW_OPTION_YES && + (*types)->repl_report_sent_binlog) + { + (*types)->repl_report_sent_binlog(thd,log_file_name,end_offset); + result= 0; + } + } + return result; } diff --git a/sql/handler.h b/sql/handler.h index 2db4b224698..3b903b1ce2a 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -28,10 +28,7 @@ #define NO_HASH /* Not yet implemented */ #endif -#if defined(HAVE_BERKELEY_DB) || defined(HAVE_INNOBASE_DB) || \ - defined(HAVE_NDBCLUSTER_DB) #define USING_TRANSACTIONS -#endif // the following is for checking tables @@ -191,6 +188,7 @@ enum db_type DB_TYPE_FEDERATED_DB, DB_TYPE_BLACKHOLE_DB, DB_TYPE_PARTITION_DB, + DB_TYPE_BINLOG, DB_TYPE_DEFAULT // Must be last }; @@ -308,6 +306,16 @@ typedef struct xid_t XID; #define MAX_XID_LIST_SIZE (1024*128) #endif +/* The handler for a table type. Will be included in the TABLE structure */ + +struct st_table; +typedef struct st_table TABLE; +struct st_foreign_key_info; +typedef struct st_foreign_key_info FOREIGN_KEY_INFO; +typedef bool (stat_print_fn)(THD *thd, const char *type, const char *file, + const char *status); +enum ha_stat_type { HA_ENGINE_STATUS, HA_ENGINE_LOGS, HA_ENGINE_MUTEX }; + /* handlerton is a singleton structure - one instance per storage engine - to provide access to storage engine functionality that works on the @@ -402,6 +410,16 @@ typedef struct void *(*create_cursor_read_view)(); void (*set_cursor_read_view)(void *); void (*close_cursor_read_view)(void *); + handler *(*create)(TABLE *table); + void (*drop_database)(char* path); + int (*panic)(enum ha_panic_function flag); + int (*release_temporary_latches)(THD *thd); + int (*update_statistics)(); + int (*start_consistent_snapshot)(THD *thd); + bool (*flush_logs)(); + bool (*show_status)(THD *thd, stat_print_fn *print, enum ha_stat_type stat); + int (*repl_report_sent_binlog)(THD *thd, char *log_file_name, + my_off_t end_offset); uint32 flags; /* global handler flags */ } handlerton; @@ -415,6 +433,8 @@ struct show_table_alias_st { #define HTON_CLOSE_CURSORS_AT_COMMIT (1 << 0) #define HTON_ALTER_NOT_SUPPORTED (1 << 1) #define HTON_CAN_RECREATE (1 << 2) +#define HTON_FLUSH_AFTER_RENAME (1 << 3) +#define HTON_NOT_USER_SELECTABLE (1 << 4) typedef struct st_thd_trans { @@ -430,6 +450,8 @@ enum enum_tx_isolation { ISO_READ_UNCOMMITTED, ISO_READ_COMMITTED, ISO_REPEATABLE_READ, ISO_SERIALIZABLE}; +enum ndb_distribution { ND_KEYHASH= 0, ND_LINHASH= 1 }; + typedef struct { uint32 start_part; uint32 end_part; @@ -608,7 +630,7 @@ public: }; -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE /* Answers the question if subpartitioning is used for a certain table SYNOPSIS @@ -670,12 +692,6 @@ typedef struct st_ha_create_information } HA_CREATE_INFO; -/* The handler for a table type. Will be included in the TABLE structure */ - -struct st_table; -typedef struct st_table TABLE; -struct st_foreign_key_info; -typedef struct st_foreign_key_info FOREIGN_KEY_INFO; typedef struct st_savepoint SAVEPOINT; extern ulong savepoint_alloc_size; @@ -693,7 +709,7 @@ typedef struct st_ha_check_opt } HA_CHECK_OPT; -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE bool is_partition_in_list(char *part_name, List<char> list_part_names); bool is_partitions_in_table(partition_info *new_part_info, partition_info *old_part_info); @@ -743,7 +759,7 @@ typedef struct st_handler_buffer class handler :public Sql_alloc { -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE friend class ha_partition; #endif protected: @@ -1246,7 +1262,7 @@ public: virtual const char **bas_ext() const =0; virtual ulong table_flags(void) const =0; virtual ulong alter_table_flags(void) const { return 0; } -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE virtual ulong partition_flags(void) const { return 0;} virtual int get_default_no_partitions(ulonglong max_rows) { return 1;} #endif @@ -1402,13 +1418,16 @@ int ha_panic(enum ha_panic_function flag); int ha_update_statistics(); void ha_close_connection(THD* thd); my_bool ha_storage_engine_is_enabled(enum db_type database_type); -bool ha_flush_logs(void); +bool ha_flush_logs(enum db_type db_type=DB_TYPE_DEFAULT); void ha_drop_database(char* path); int ha_create_table(const char *name, HA_CREATE_INFO *create_info, bool update_create_info); int ha_delete_table(THD *thd, enum db_type db_type, const char *path, const char *alias, bool generate_warning); +/* statistics and info */ +bool ha_show_status(THD *thd, enum db_type db_type, enum ha_stat_type stat); + /* discovery */ int ha_create_table_from_engine(THD* thd, const char *db, const char *name); int ha_discover(THD* thd, const char* dbname, const char* name, diff --git a/sql/handlerton.cc.in b/sql/handlerton.cc.in new file mode 100644 index 00000000000..55af8cdd8cf --- /dev/null +++ b/sql/handlerton.cc.in @@ -0,0 +1,14 @@ + +#include "mysql_priv.h" + +extern handlerton heap_hton,myisam_hton,myisammrg_hton, + binlog_hton@mysql_se_decls@; + +/* + This array is used for processing compiled in engines. +*/ +handlerton *sys_table_types[]= +{ + &heap_hton,&myisam_hton@mysql_se_htons@,&myisammrg_hton,&binlog_hton,NULL +}; + diff --git a/sql/lex.h b/sql/lex.h index a610a628ef4..cfafd98fe20 100644 --- a/sql/lex.h +++ b/sql/lex.h @@ -373,9 +373,7 @@ static SYMBOL symbols[] = { { "PACK_KEYS", SYM(PACK_KEYS_SYM)}, { "PARSER", SYM(PARSER_SYM)}, { "PARTIAL", SYM(PARTIAL)}, -#ifdef HAVE_PARTITION_DB { "PARTITION", SYM(PARTITION_SYM)}, -#endif { "PARTITIONS", SYM(PARTITIONS_SYM)}, { "PASSWORD", SYM(PASSWORD)}, { "PHASE", SYM(PHASE_SYM)}, diff --git a/sql/log.cc b/sql/log.cc index 3341b0265eb..c958196c466 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -51,7 +51,7 @@ handlerton binlog_hton = { "binlog", SHOW_OPTION_YES, "This is a meta storage engine to represent the binlog in a transaction", - DB_TYPE_UNKNOWN, /* IGNORE for now */ + DB_TYPE_BINLOG, /* IGNORE for now */ binlog_init, 0, sizeof(my_off_t), /* savepoint size = binlog offset */ @@ -68,9 +68,19 @@ handlerton binlog_hton = { NULL, /* create_cursor_read_view */ NULL, /* set_cursor_read_view */ NULL, /* close_cursor_read_view */ - HTON_NO_FLAGS + NULL, /* Create a new handler */ + NULL, /* Drop a database */ + NULL, /* Panic call */ + NULL, /* Release temporary latches */ + NULL, /* Update Statistics */ + NULL, /* Start Consistent Snapshot */ + NULL, /* Flush logs */ + NULL, /* Show status */ + NULL, /* Replication Report Sent Binlog */ + HTON_NOT_USER_SELECTABLE }; + /* this function is mostly a placeholder. conceptually, binlog initialization (now mostly done in MYSQL_LOG::open) diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index c4031be1752..90c4f52e51d 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -179,11 +179,6 @@ extern CHARSET_INFO *national_charset_info, *table_alias_charset; #define FLUSH_TIME 0 /* Don't flush tables */ #define MAX_CONNECT_ERRORS 10 // errors before disabling host -#ifdef HAVE_INNOBASE_DB -#define IF_INNOBASE_DB(A, B) (A) -#else -#define IF_INNOBASE_DB(A, B) (B) -#endif #ifdef __NETWARE__ #define IF_NETWARE(A,B) (A) #else @@ -1064,6 +1059,8 @@ extern ulong volatile manager_status; extern bool volatile manager_thread_in_use, mqh_used; extern pthread_t manager_thread; pthread_handler_t handle_manager(void *arg); +bool mysql_manager_submit(void (*action)()); + /* sql_test.cc */ #ifndef DBUG_OFF @@ -1253,17 +1250,67 @@ extern const LEX_STRING view_type; /* optional things, have_* variables */ -extern SHOW_COMP_OPTION have_isam, have_innodb, have_berkeley_db; -extern SHOW_COMP_OPTION have_example_db, have_archive_db, have_csv_db; +#ifdef WITH_INNOBASE_STORAGE_ENGINE +extern handlerton innobase_hton; +#define have_innodb innobase_hton.state +#else +extern SHOW_COMP_OPTION have_innodb; +#endif +#ifdef WITH_BERKELEY_STORAGE_ENGINE +extern handlerton berkeley_hton; +#define have_berkeley_db berkeley_hton.state +#else +extern SHOW_COMP_OPTION have_berkeley_db; +#endif +#ifdef WITH_EXAMPLE_STORAGE_ENGINE +extern handlerton example_hton; +#define have_example_db example_hton.state +#else +extern SHOW_COMP_OPTION have_example_db; +#endif +#ifdef WITH_ARCHIVE_STORAGE_ENGINE +extern handlerton archive_hton; +#define have_archive_db archive_hton.state +#else +extern SHOW_COMP_OPTION have_archive_db; +#endif +#ifdef WITH_CSV_STORAGE_ENGINE +extern handlerton tina_hton; +#define have_csv_db tina_hton.state +#else +extern SHOW_COMP_OPTION have_csv_db; +#endif +#ifdef WITH_FEDERATED_STORAGE_ENGINE +extern handlerton federated_hton; +#define have_federated_db federated_hton.state +#else extern SHOW_COMP_OPTION have_federated_db; +#endif +#ifdef WITH_BLACKHOLE_STORAGE_ENGINE +extern handlerton blackhole_hton; +#define have_blackhole_db blackhole_hton.state +#else extern SHOW_COMP_OPTION have_blackhole_db; +#endif +#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE +extern handlerton ndbcluster_hton; +#define have_ndbcluster ndbcluster_hton.state +#else extern SHOW_COMP_OPTION have_ndbcluster; +#endif +#ifdef WITH_PARTITION_STORAGE_ENGINE +extern handlerton partition_hton; +#define have_partition_db partition_hton.state +#else +extern SHOW_COMP_OPTION have_partition_db; +#endif + +extern SHOW_COMP_OPTION have_isam; extern SHOW_COMP_OPTION have_raid, have_openssl, have_symlink; extern SHOW_COMP_OPTION have_query_cache; extern SHOW_COMP_OPTION have_geometry, have_rtree_keys; extern SHOW_COMP_OPTION have_crypt; extern SHOW_COMP_OPTION have_compress; -extern SHOW_COMP_OPTION have_partition_db; #ifndef __WIN__ extern pthread_t signal_thread; diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 37cfdba1937..b039ce9c3d9 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -24,24 +24,16 @@ #include "stacktrace.h" #include "mysqld_suffix.h" #include "mysys_err.h" -#ifdef HAVE_BERKELEY_DB -#include "ha_berkeley.h" -#endif -#ifdef HAVE_INNOBASE_DB -#include "ha_innodb.h" -#endif + #include "ha_myisam.h" -#ifdef HAVE_NDBCLUSTER_DB -#include "ha_ndbcluster.h" -#endif -#ifdef HAVE_INNOBASE_DB +#ifdef WITH_INNOBASE_STORAGE_ENGINE #define OPT_INNODB_DEFAULT 1 #else #define OPT_INNODB_DEFAULT 0 #endif #define OPT_BDB_DEFAULT 0 -#ifdef HAVE_NDBCLUSTER_DB +#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE #define OPT_NDBCLUSTER_DEFAULT 0 #if defined(NOT_ENOUGH_TESTED) \ && defined(NDB_SHM_TRANSPORTER) && MYSQL_VERSION_ID >= 50000 @@ -330,7 +322,7 @@ static I_List<THD> thread_cache; static pthread_cond_t COND_thread_cache, COND_flush_thread_cache; -#ifdef HAVE_BERKELEY_DB +#ifdef WITH_BERKELEY_STORAGE_ENGINE static my_bool opt_sync_bdb_logs; #endif @@ -355,7 +347,59 @@ my_bool opt_safe_user_create = 0, opt_no_mix_types = 0; my_bool opt_show_slave_auth_info, opt_sql_bin_update = 0; my_bool opt_log_slave_updates= 0; my_bool opt_innodb; -#ifdef HAVE_NDBCLUSTER_DB +#ifdef WITH_INNOBASE_STORAGE_ENGINE +extern struct show_var_st innodb_status_variables[]; +extern uint innobase_init_flags, innobase_lock_type; +extern uint innobase_flush_log_at_trx_commit; +extern ulong innobase_cache_size, innobase_fast_shutdown; +extern ulong innobase_large_page_size; +extern char *innobase_home, *innobase_tmpdir, *innobase_logdir; +extern long innobase_lock_scan_time; +extern long innobase_mirrored_log_groups, innobase_log_files_in_group; +extern long innobase_log_file_size, innobase_log_buffer_size; +extern long innobase_buffer_pool_size, innobase_additional_mem_pool_size; +extern long innobase_buffer_pool_awe_mem_mb; +extern long innobase_file_io_threads, innobase_lock_wait_timeout; +extern long innobase_force_recovery; +extern long innobase_open_files; +extern char *innobase_data_home_dir, *innobase_data_file_path; +extern char *innobase_log_group_home_dir, *innobase_log_arch_dir; +extern char *innobase_unix_file_flush_method; +/* The following variables have to be my_bool for SHOW VARIABLES to work */ +extern my_bool innobase_log_archive, + innobase_use_doublewrite, + innobase_use_checksums, + innobase_use_large_pages, + innobase_use_native_aio, + innobase_file_per_table, innobase_locks_unsafe_for_binlog, + innobase_create_status_file; +extern my_bool innobase_very_fast_shutdown; /* set this to 1 just before + calling innobase_end() if you want + InnoDB to shut down without + flushing the buffer pool: this + is equivalent to a 'crash' */ +extern "C" { +extern ulong srv_max_buf_pool_modified_pct; +extern ulong srv_max_purge_lag; +extern ulong srv_auto_extend_increment; +extern ulong srv_n_spin_wait_rounds; +extern ulong srv_n_free_tickets_to_enter; +extern ulong srv_thread_sleep_delay; +extern ulong srv_thread_concurrency; +extern ulong srv_commit_concurrency; +} +#endif +#ifdef WITH_BERKELEY_STORAGE_ENGINE +extern const u_int32_t bdb_DB_TXN_NOSYNC, bdb_DB_RECOVER, bdb_DB_PRIVATE; +extern bool berkeley_shared_data; +extern u_int32_t berkeley_init_flags,berkeley_env_flags, berkeley_lock_type, + berkeley_lock_types[]; +extern ulong berkeley_cache_size, berkeley_max_lock, berkeley_log_buffer_size; +extern char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir; +extern long berkeley_lock_scan_time; +extern TYPELIB berkeley_lock_typelib; +#endif +#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE const char *opt_ndbcluster_connectstring= 0; const char *opt_ndb_connectstring= 0; char opt_ndb_constrbuf[1024]; @@ -365,11 +409,11 @@ ulong opt_ndb_cache_check_time; const char *opt_ndb_mgmd; ulong opt_ndb_nodeid; -const char *ndb_distribution_names[]= {"KEYHASH", "LINHASH", NullS}; -TYPELIB ndb_distribution_typelib= { array_elements(ndb_distribution_names)-1, - "", ndb_distribution_names, NULL }; -const char *opt_ndb_distribution= ndb_distribution_names[ND_KEYHASH]; -enum ndb_distribution opt_ndb_distribution_id= ND_KEYHASH; +extern struct show_var_st ndb_status_variables[]; +extern const char *ndb_distribution_names[]; +extern TYPELIB ndb_distribution_typelib; +extern const char *opt_ndb_distribution; +extern enum ndb_distribution opt_ndb_distribution_id; #endif my_bool opt_readonly, use_temp_pool, relay_log_purge; my_bool opt_sync_frm, opt_allow_suspicious_udfs; @@ -474,14 +518,9 @@ MY_BITMAP temp_pool; CHARSET_INFO *system_charset_info, *files_charset_info ; CHARSET_INFO *national_charset_info, *table_alias_charset; -SHOW_COMP_OPTION have_berkeley_db, have_innodb, have_isam, have_ndbcluster, - have_example_db, have_archive_db, have_csv_db; -SHOW_COMP_OPTION have_federated_db; -SHOW_COMP_OPTION have_partition_db; SHOW_COMP_OPTION have_raid, have_openssl, have_symlink, have_query_cache; SHOW_COMP_OPTION have_geometry, have_rtree_keys; SHOW_COMP_OPTION have_crypt, have_compress; -SHOW_COMP_OPTION have_blackhole_db; /* Thread specific variables */ @@ -2465,7 +2504,7 @@ pthread_handler_t handle_shutdown(void *arg) static const char *load_default_groups[]= { -#ifdef HAVE_NDBCLUSTER_DB +#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE "mysql_cluster", #endif "mysqld","server", MYSQL_BASE_VERSION, 0, 0}; @@ -2585,7 +2624,7 @@ static int init_common_variables(const char *conf_file_name, int argc, { my_use_large_pages= 1; my_large_page_size= opt_large_page_size; -#ifdef HAVE_INNOBASE_DB +#ifdef WITH_INNOBASE_STORAGE_ENGINE innobase_use_large_pages= 1; innobase_large_page_size= opt_large_page_size; #endif @@ -3130,7 +3169,7 @@ server."); static void create_maintenance_thread() { if ( -#ifdef HAVE_BERKELEY_DB +#ifdef WITH_BERKELEY_STORAGE_ENGINE (have_berkeley_db == SHOW_OPTION_YES) || #endif (flush_time && flush_time != ~(ulong) 0L)) @@ -4629,7 +4668,7 @@ struct my_option my_long_options[] = Disable with --skip-bdb (will save memory).", (gptr*) &opt_bdb, (gptr*) &opt_bdb, 0, GET_BOOL, NO_ARG, OPT_BDB_DEFAULT, 0, 0, 0, 0, 0}, -#ifdef HAVE_BERKELEY_DB +#ifdef WITH_BERKELEY_STORAGE_ENGINE {"bdb-home", OPT_BDB_HOME, "Berkeley home directory.", (gptr*) &berkeley_home, (gptr*) &berkeley_home, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"bdb-lock-detect", OPT_BDB_LOCK, @@ -4650,7 +4689,7 @@ Disable with --skip-bdb (will save memory).", {"bdb-tmpdir", OPT_BDB_TMP, "Berkeley DB tempfile name.", (gptr*) &berkeley_tmpdir, (gptr*) &berkeley_tmpdir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, -#endif /* HAVE_BERKELEY_DB */ +#endif /* WITH_BERKELEY_STORAGE_ENGINE */ {"big-tables", OPT_BIG_TABLES, "Allow big result sets by saving all temporary sets on file (Solves most 'table full' errors).", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -4786,7 +4825,7 @@ Disable with --skip-large-pages.", Disable with --skip-innodb (will save memory).", (gptr*) &opt_innodb, (gptr*) &opt_innodb, 0, GET_BOOL, NO_ARG, OPT_INNODB_DEFAULT, 0, 0, 0, 0, 0}, -#ifdef HAVE_INNOBASE_DB +#ifdef WITH_INNOBASE_STORAGE_ENGINE {"innodb_checksums", OPT_INNODB_CHECKSUMS, "Enable InnoDB checksums validation (enabled by default). \ Disable with --skip-innodb-checksums.", (gptr*) &innobase_use_checksums, (gptr*) &innobase_use_checksums, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, @@ -4794,7 +4833,7 @@ Disable with --skip-innodb-checksums.", (gptr*) &innobase_use_checksums, {"innodb_data_file_path", OPT_INNODB_DATA_FILE_PATH, "Path to individual files and their sizes.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, -#ifdef HAVE_INNOBASE_DB +#ifdef WITH_INNOBASE_STORAGE_ENGINE {"innodb_data_home_dir", OPT_INNODB_DATA_HOME_DIR, "The common part for InnoDB table spaces.", (gptr*) &innobase_data_home_dir, (gptr*) &innobase_data_home_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, @@ -4865,7 +4904,7 @@ Disable with --skip-innodb-doublewrite.", (gptr*) &innobase_use_doublewrite, (gptr*) &global_system_variables.innodb_support_xa, (gptr*) &global_system_variables.innodb_support_xa, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, -#endif /* End HAVE_INNOBASE_DB */ +#endif /* End WITH_INNOBASE_STORAGE_ENGINE */ {"isam", OPT_ISAM, "Obsolete. ISAM storage engine is no longer supported.", (gptr*) &opt_isam, (gptr*) &opt_isam, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -5025,7 +5064,7 @@ master-ssl", Disable with --skip-ndbcluster (will save memory).", (gptr*) &opt_ndbcluster, (gptr*) &opt_ndbcluster, 0, GET_BOOL, NO_ARG, OPT_NDBCLUSTER_DEFAULT, 0, 0, 0, 0, 0}, -#ifdef HAVE_NDBCLUSTER_DB +#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE {"ndb-connectstring", OPT_NDB_CONNECTSTRING, "Connect string for ndbcluster.", (gptr*) &opt_ndb_connectstring, @@ -5356,7 +5395,7 @@ log and this option does nothing anymore.", "The number of outstanding connection requests MySQL can have. This comes into play when the main MySQL thread gets very many connection requests in a very short time.", (gptr*) &back_log, (gptr*) &back_log, 0, GET_ULONG, REQUIRED_ARG, 50, 1, 65535, 0, 1, 0 }, -#ifdef HAVE_BERKELEY_DB +#ifdef WITH_BERKELEY_STORAGE_ENGINE { "bdb_cache_size", OPT_BDB_CACHE_SIZE, "The buffer that is allocated to cache index and rows for BDB tables.", (gptr*) &berkeley_cache_size, (gptr*) &berkeley_cache_size, 0, GET_ULONG, @@ -5373,7 +5412,7 @@ log and this option does nothing anymore.", "The maximum number of locks you can have active on a BDB table.", (gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG, REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0}, -#endif /* HAVE_BERKELEY_DB */ +#endif /* WITH_BERKELEY_STORAGE_ENGINE */ {"binlog_cache_size", OPT_BINLOG_CACHE_SIZE, "The size of the cache to hold the SQL statements for the binary log during a transaction. If you often use big, multi-statement transactions you can increase this to get more performance.", (gptr*) &binlog_cache_size, (gptr*) &binlog_cache_size, 0, GET_ULONG, @@ -5449,7 +5488,7 @@ log and this option does nothing anymore.", (gptr*) &global_system_variables.group_concat_max_len, (gptr*) &max_system_variables.group_concat_max_len, 0, GET_ULONG, REQUIRED_ARG, 1024, 4, (long) ~0, 0, 1, 0}, -#ifdef HAVE_INNOBASE_DB +#ifdef WITH_INNOBASE_STORAGE_ENGINE {"innodb_additional_mem_pool_size", OPT_INNODB_ADDITIONAL_MEM_POOL_SIZE, "Size of a memory pool InnoDB uses to store data dictionary information and other internal data structures.", (gptr*) &innobase_additional_mem_pool_size, @@ -5526,7 +5565,7 @@ log and this option does nothing anymore.", (gptr*) &srv_thread_sleep_delay, (gptr*) &srv_thread_sleep_delay, 0, GET_LONG, REQUIRED_ARG, 10000L, 0L, ~0L, 0, 1L, 0}, -#endif /* HAVE_INNOBASE_DB */ +#endif /* WITH_INNOBASE_STORAGE_ENGINE */ {"interactive_timeout", OPT_INTERACTIVE_TIMEOUT, "The number of seconds the server waits for activity on an interactive connection before closing it.", (gptr*) &global_system_variables.net_interactive_timeout, @@ -5846,12 +5885,12 @@ The minimum value for this variable is 4096.", (gptr*) &max_system_variables.sortbuff_size, 0, GET_ULONG, REQUIRED_ARG, MAX_SORT_MEMORY, MIN_SORT_MEMORY+MALLOC_OVERHEAD*2, ~0L, MALLOC_OVERHEAD, 1, 0}, -#ifdef HAVE_BERKELEY_DB +#ifdef WITH_BERKELEY_STORAGE_ENGINE {"sync-bdb-logs", OPT_BDB_SYNC, "Synchronously flush Berkeley DB logs. Enabled by default", (gptr*) &opt_sync_bdb_logs, (gptr*) &opt_sync_bdb_logs, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, -#endif /* HAVE_BERKELEY_DB */ +#endif /* WITH_BERKELEY_STORAGE_ENGINE */ {"sync-binlog", OPT_SYNC_BINLOG, "Synchronously flush binary log to disk after every #th event. " "Use 0 (default) to disable synchronous flushing.", @@ -6003,14 +6042,14 @@ struct show_var_st status_vars[]= { {"Com_show_create_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE_DB]), SHOW_LONG_STATUS}, {"Com_show_create_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE]), SHOW_LONG_STATUS}, {"Com_show_databases", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_DATABASES]), SHOW_LONG_STATUS}, + {"Com_show_engine_logs", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ENGINE_LOGS]), SHOW_LONG_STATUS}, + {"Com_show_engine_mutex", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ENGINE_MUTEX]), SHOW_LONG_STATUS}, + {"Com_show_engine_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ENGINE_STATUS]), SHOW_LONG_STATUS}, {"Com_show_errors", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ERRORS]), SHOW_LONG_STATUS}, {"Com_show_fields", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_FIELDS]), SHOW_LONG_STATUS}, {"Com_show_grants", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_GRANTS]), SHOW_LONG_STATUS}, - {"Com_show_innodb_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_INNODB_STATUS]), SHOW_LONG_STATUS}, {"Com_show_keys", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_KEYS]), SHOW_LONG_STATUS}, - {"Com_show_logs", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_LOGS]), SHOW_LONG_STATUS}, {"Com_show_master_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_MASTER_STAT]), SHOW_LONG_STATUS}, - {"Com_show_ndb_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_NDBCLUSTER_STATUS]), SHOW_LONG_STATUS}, {"Com_show_new_master", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_NEW_MASTER]), SHOW_LONG_STATUS}, {"Com_show_open_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_OPEN_TABLES]), SHOW_LONG_STATUS}, {"Com_show_privileges", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PRIVILEGES]), SHOW_LONG_STATUS}, @@ -6065,9 +6104,9 @@ struct show_var_st status_vars[]= { {"Handler_savepoint_rollback",(char*) offsetof(STATUS_VAR, ha_savepoint_rollback_count), SHOW_LONG_STATUS}, {"Handler_update", (char*) offsetof(STATUS_VAR, ha_update_count), SHOW_LONG_STATUS}, {"Handler_write", (char*) offsetof(STATUS_VAR, ha_write_count), SHOW_LONG_STATUS}, -#ifdef HAVE_INNOBASE_DB +#ifdef WITH_INNOBASE_STORAGE_ENGINE {"Innodb_", (char*) &innodb_status_variables, SHOW_VARS}, -#endif /*HAVE_INNOBASE_DB*/ +#endif /* WITH_INNOBASE_STORAGE_ENGINE */ {"Key_blocks_not_flushed", (char*) &dflt_key_cache_var.global_blocks_changed, SHOW_KEY_CACHE_LONG}, {"Key_blocks_unused", (char*) &dflt_key_cache_var.blocks_unused, SHOW_KEY_CACHE_CONST_LONG}, {"Key_blocks_used", (char*) &dflt_key_cache_var.blocks_used, SHOW_KEY_CACHE_CONST_LONG}, @@ -6077,9 +6116,9 @@ struct show_var_st status_vars[]= { {"Key_writes", (char*) &dflt_key_cache_var.global_cache_write, SHOW_KEY_CACHE_LONGLONG}, {"Last_query_cost", (char*) offsetof(STATUS_VAR, last_query_cost), SHOW_DOUBLE_STATUS}, {"Max_used_connections", (char*) &max_used_connections, SHOW_LONG}, -#ifdef HAVE_NDBCLUSTER_DB +#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE {"Ndb_", (char*) &ndb_status_variables, SHOW_VARS}, -#endif /*HAVE_NDBCLUSTER_DB*/ +#endif /* WITH_NDBCLUSTER_STORAGE_ENGINE */ {"Not_flushed_delayed_rows", (char*) &delayed_rows_in_use, SHOW_LONG_CONST}, {"Open_files", (char*) &my_file_opened, SHOW_LONG_CONST}, {"Open_streams", (char*) &my_stream_opened, SHOW_LONG_CONST}, @@ -6343,48 +6382,7 @@ static void mysql_init_variables(void) "d:t:i:o,/tmp/mysqld.trace"); #endif opt_error_log= IF_WIN(1,0); -#ifdef HAVE_BERKELEY_DB - have_berkeley_db= SHOW_OPTION_YES; -#else - have_berkeley_db= SHOW_OPTION_NO; -#endif -#ifdef HAVE_INNOBASE_DB - have_innodb=SHOW_OPTION_YES; -#else - have_innodb=SHOW_OPTION_NO; -#endif - have_isam=SHOW_OPTION_NO; -#ifdef HAVE_EXAMPLE_DB - have_example_db= SHOW_OPTION_YES; -#else - have_example_db= SHOW_OPTION_NO; -#endif -#ifdef HAVE_PARTITION_DB - have_partition_db= SHOW_OPTION_YES; -#else - have_partition_db= SHOW_OPTION_NO; -#endif -#ifdef HAVE_ARCHIVE_DB - have_archive_db= SHOW_OPTION_YES; -#else - have_archive_db= SHOW_OPTION_NO; -#endif -#ifdef HAVE_BLACKHOLE_DB - have_blackhole_db= SHOW_OPTION_YES; -#else - have_blackhole_db= SHOW_OPTION_NO; -#endif -#ifdef HAVE_FEDERATED_DB - have_federated_db= SHOW_OPTION_YES; -#else - have_federated_db= SHOW_OPTION_NO; -#endif -#ifdef HAVE_CSV_DB - have_csv_db= SHOW_OPTION_YES; -#else - have_csv_db= SHOW_OPTION_NO; -#endif -#ifdef HAVE_NDBCLUSTER_DB +#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE have_ndbcluster=SHOW_OPTION_DISABLED; global_system_variables.ndb_index_stat_enable=TRUE; max_system_variables.ndb_index_stat_enable=TRUE; @@ -6803,19 +6801,19 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), global_system_variables.tx_isolation= (type-1); break; } -#ifdef HAVE_BERKELEY_DB +#ifdef WITH_BERKELEY_STORAGE_ENGINE case OPT_BDB_NOSYNC: /* Deprecated option */ opt_sync_bdb_logs= 0; /* Fall through */ case OPT_BDB_SYNC: if (!opt_sync_bdb_logs) - berkeley_env_flags|= DB_TXN_NOSYNC; + berkeley_env_flags|= bdb_DB_TXN_NOSYNC; else - berkeley_env_flags&= ~DB_TXN_NOSYNC; + berkeley_env_flags&= ~bdb_DB_TXN_NOSYNC; break; case OPT_BDB_NO_RECOVER: - berkeley_init_flags&= ~(DB_RECOVER); + berkeley_init_flags&= ~(bdb_DB_RECOVER); break; case OPT_BDB_LOCK: { @@ -6839,12 +6837,12 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), break; } case OPT_BDB_SHARED: - berkeley_init_flags&= ~(DB_PRIVATE); + berkeley_init_flags&= ~(bdb_DB_PRIVATE); berkeley_shared_data= 1; break; -#endif /* HAVE_BERKELEY_DB */ +#endif /* WITH_BERKELEY_STORAGE_ENGINE */ case OPT_BDB: -#ifdef HAVE_BERKELEY_DB +#ifdef WITH_BERKELEY_STORAGE_ENGINE if (opt_bdb) have_berkeley_db= SHOW_OPTION_YES; else @@ -6852,14 +6850,14 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), #endif break; case OPT_NDBCLUSTER: -#ifdef HAVE_NDBCLUSTER_DB +#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE if (opt_ndbcluster) have_ndbcluster= SHOW_OPTION_YES; else have_ndbcluster= SHOW_OPTION_DISABLED; #endif break; -#ifdef HAVE_NDBCLUSTER_DB +#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE case OPT_NDB_MGMD: case OPT_NDB_NODEID: { @@ -6899,7 +6897,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), break; #endif case OPT_INNODB: -#ifdef HAVE_INNOBASE_DB +#ifdef WITH_INNOBASE_STORAGE_ENGINE if (opt_innodb) have_innodb= SHOW_OPTION_YES; else @@ -6907,15 +6905,15 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), #endif break; case OPT_INNODB_DATA_FILE_PATH: -#ifdef HAVE_INNOBASE_DB +#ifdef WITH_INNOBASE_STORAGE_ENGINE innobase_data_file_path= argument; #endif break; -#ifdef HAVE_INNOBASE_DB +#ifdef WITH_INNOBASE_STORAGE_ENGINE case OPT_INNODB_LOG_ARCHIVE: innobase_log_archive= argument ? test(atoi(argument)) : 1; break; -#endif /* HAVE_INNOBASE_DB */ +#endif /* WITH_INNOBASE_STORAGE_ENGINE */ case OPT_MYISAM_RECOVER: { if (!argument || !argument[0]) @@ -7061,19 +7059,19 @@ static void get_options(int argc,char **argv) get_one_option))) exit(ho_error); -#ifndef HAVE_NDBCLUSTER_DB +#ifndef WITH_NDBCLUSTER_STORAGE_ENGINE if (opt_ndbcluster) sql_print_warning("this binary does not contain NDBCLUSTER storage engine"); #endif -#ifndef HAVE_INNOBASE_DB +#ifndef WITH_INNOBASE_STORAGE_ENGINE if (opt_innodb) sql_print_warning("this binary does not contain INNODB storage engine"); #endif -#ifndef HAVE_ISAM +#ifndef WITH_ISAM_STORAGE_ENGINE if (opt_isam) sql_print_warning("this binary does not contain ISAM storage engine"); #endif -#ifndef HAVE_BERKELEY_DB +#ifndef WITH_BERKELEY_STORAGE_ENGINE if (opt_bdb) sql_print_warning("this binary does not contain BDB storage engine"); #endif @@ -7386,6 +7384,70 @@ static void create_pid_file() /***************************************************************************** + Instantiate have_xyx for missing storage engines +*****************************************************************************/ +#undef have_isam +#undef have_berkeley_db +#undef have_innodb +#undef have_ndbcluster +#undef have_example_db +#undef have_archive_db +#undef have_csv_db +#undef have_federated_db +#undef have_partition_db +#undef have_blackhole_db + +SHOW_COMP_OPTION have_berkeley_db= SHOW_OPTION_NO; +SHOW_COMP_OPTION have_innodb= SHOW_OPTION_NO; +SHOW_COMP_OPTION have_isam= SHOW_OPTION_NO; +SHOW_COMP_OPTION have_ndbcluster= SHOW_OPTION_NO; +SHOW_COMP_OPTION have_example_db= SHOW_OPTION_NO; +SHOW_COMP_OPTION have_archive_db= SHOW_OPTION_NO; +SHOW_COMP_OPTION have_csv_db= SHOW_OPTION_NO; +SHOW_COMP_OPTION have_federated_db= SHOW_OPTION_NO; +SHOW_COMP_OPTION have_partition_db= SHOW_OPTION_NO; +SHOW_COMP_OPTION have_blackhole_db= SHOW_OPTION_NO; + +#ifndef WITH_BERKELEY_STORAGE_ENGINE +bool berkeley_shared_data; +ulong berkeley_cache_size, berkeley_max_lock, berkeley_log_buffer_size; +char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir; +#endif + +#ifndef WITH_INNOBASE_STORAGE_ENGINE +uint innobase_flush_log_at_trx_commit; +ulong innobase_fast_shutdown; +long innobase_mirrored_log_groups, innobase_log_files_in_group; +long innobase_log_file_size, innobase_log_buffer_size; +long innobase_buffer_pool_size, innobase_additional_mem_pool_size; +long innobase_buffer_pool_awe_mem_mb; +long innobase_file_io_threads, innobase_lock_wait_timeout; +long innobase_force_recovery; +long innobase_open_files; +char *innobase_data_home_dir, *innobase_data_file_path; +char *innobase_log_group_home_dir, *innobase_log_arch_dir; +char *innobase_unix_file_flush_method; +my_bool innobase_log_archive, + innobase_use_doublewrite, + innobase_use_checksums, + innobase_file_per_table, + innobase_locks_unsafe_for_binlog; + +ulong srv_max_buf_pool_modified_pct; +ulong srv_max_purge_lag; +ulong srv_auto_extend_increment; +ulong srv_n_spin_wait_rounds; +ulong srv_n_free_tickets_to_enter; +ulong srv_thread_sleep_delay; +ulong srv_thread_concurrency; +ulong srv_commit_concurrency; +#endif + +#ifndef WITH_NDBCLUSTER_STORAGE_ENGINE +ulong ndb_cache_check_time; +#endif + +/***************************************************************************** Instantiate templates *****************************************************************************/ @@ -7399,3 +7461,5 @@ template class I_List<NAMED_LIST>; template class I_List<Statement>; template class I_List_iterator<Statement>; #endif + + diff --git a/sql/set_var.cc b/sql/set_var.cc index a8cd7235c53..40f7f8813cf 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -58,15 +58,46 @@ #include <my_getopt.h> #include <thr_alarm.h> #include <myisam.h> -#ifdef HAVE_BERKELEY_DB -#include "ha_berkeley.h" -#endif -#ifdef HAVE_INNOBASE_DB -#include "ha_innodb.h" -#endif -#ifdef HAVE_NDBCLUSTER_DB -#include "ha_ndbcluster.h" -#endif + +/* WITH_BERKELEY_STORAGE_ENGINE */ +extern bool berkeley_shared_data; +extern ulong berkeley_cache_size, berkeley_max_lock, berkeley_log_buffer_size; +extern char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir; + +/* WITH_INNOBASE_STORAGE_ENGINE */ +extern uint innobase_flush_log_at_trx_commit; +extern ulong innobase_fast_shutdown; +extern long innobase_mirrored_log_groups, innobase_log_files_in_group; +extern long innobase_log_file_size, innobase_log_buffer_size; +extern long innobase_buffer_pool_size, innobase_additional_mem_pool_size; +extern long innobase_buffer_pool_awe_mem_mb; +extern long innobase_file_io_threads, innobase_lock_wait_timeout; +extern long innobase_force_recovery; +extern long innobase_open_files; +extern char *innobase_data_home_dir, *innobase_data_file_path; +extern char *innobase_log_group_home_dir, *innobase_log_arch_dir; +extern char *innobase_unix_file_flush_method; +/* The following variables have to be my_bool for SHOW VARIABLES to work */ +extern my_bool innobase_log_archive, + innobase_use_doublewrite, + innobase_use_checksums, + innobase_file_per_table, + innobase_locks_unsafe_for_binlog; + +extern ulong srv_max_buf_pool_modified_pct; +extern ulong srv_max_purge_lag; +extern ulong srv_auto_extend_increment; +extern ulong srv_n_spin_wait_rounds; +extern ulong srv_n_free_tickets_to_enter; +extern ulong srv_thread_sleep_delay; +extern ulong srv_thread_concurrency; +extern ulong srv_commit_concurrency; + +/* WITH_NDBCLUSTER_STORAGE_ENGINE */ +extern ulong ndb_cache_check_time; + + + static HASH system_variable_hash; const char *bool_type_names[]= { "OFF", "ON", NullS }; @@ -398,7 +429,6 @@ sys_var_bool_ptr sys_timed_mutexes("timed_mutexes", sys_var_thd_ulong sys_net_wait_timeout("wait_timeout", &SV::net_wait_timeout); -#ifdef HAVE_INNOBASE_DB sys_var_long_ptr sys_innodb_fast_shutdown("innodb_fast_shutdown", &innobase_fast_shutdown); sys_var_long_ptr sys_innodb_max_dirty_pages_pct("innodb_max_dirty_pages_pct", @@ -421,14 +451,12 @@ sys_var_long_ptr sys_innodb_thread_concurrency("innodb_thread_concurrency", &srv_thread_concurrency); sys_var_long_ptr sys_innodb_commit_concurrency("innodb_commit_concurrency", &srv_commit_concurrency); -#endif /* Condition pushdown to storage engine */ sys_var_thd_bool sys_engine_condition_pushdown("engine_condition_pushdown", &SV::engine_condition_pushdown); -#ifdef HAVE_NDBCLUSTER_DB /* ndb thread specific variable settings */ sys_var_thd_ulong sys_ndb_autoincrement_prefetch_sz("ndb_autoincrement_prefetch_sz", @@ -450,7 +478,6 @@ sys_ndb_index_stat_cache_entries("ndb_index_stat_cache_entries", sys_var_thd_ulong sys_ndb_index_stat_update_freq("ndb_index_stat_update_freq", &SV::ndb_index_stat_update_freq); -#endif /* Time/date/datetime formats */ @@ -593,7 +620,6 @@ struct show_var_st init_vars[]= { {sys_automatic_sp_privileges.name,(char*) &sys_automatic_sp_privileges, SHOW_SYS}, {"back_log", (char*) &back_log, SHOW_LONG}, {"basedir", mysql_home, SHOW_CHAR}, -#ifdef HAVE_BERKELEY_DB {"bdb_cache_size", (char*) &berkeley_cache_size, SHOW_LONG}, {"bdb_home", (char*) &berkeley_home, SHOW_CHAR_PTR}, {"bdb_log_buffer_size", (char*) &berkeley_log_buffer_size, SHOW_LONG}, @@ -601,7 +627,6 @@ struct show_var_st init_vars[]= { {"bdb_max_lock", (char*) &berkeley_max_lock, SHOW_LONG}, {"bdb_shared_data", (char*) &berkeley_shared_data, SHOW_BOOL}, {"bdb_tmpdir", (char*) &berkeley_tmpdir, SHOW_CHAR_PTR}, -#endif {sys_binlog_cache_size.name,(char*) &sys_binlog_cache_size, SHOW_SYS}, {sys_bulk_insert_buff_size.name,(char*) &sys_bulk_insert_buff_size,SHOW_SYS}, {sys_character_set_client.name,(char*) &sys_character_set_client, SHOW_SYS}, @@ -658,7 +683,6 @@ struct show_var_st init_vars[]= { {"init_connect", (char*) &sys_init_connect, SHOW_SYS}, {"init_file", (char*) &opt_init_file, SHOW_CHAR_PTR}, {"init_slave", (char*) &sys_init_slave, SHOW_SYS}, -#ifdef HAVE_INNOBASE_DB {"innodb_additional_mem_pool_size", (char*) &innobase_additional_mem_pool_size, SHOW_LONG }, {sys_innodb_autoextend_increment.name, (char*) &sys_innodb_autoextend_increment, SHOW_SYS}, {"innodb_buffer_pool_awe_mem_mb", (char*) &innobase_buffer_pool_awe_mem_mb, SHOW_LONG }, @@ -692,7 +716,6 @@ struct show_var_st init_vars[]= { {sys_innodb_table_locks.name, (char*) &sys_innodb_table_locks, SHOW_SYS}, {sys_innodb_thread_concurrency.name, (char*) &sys_innodb_thread_concurrency, SHOW_SYS}, {sys_innodb_thread_sleep_delay.name, (char*) &sys_innodb_thread_sleep_delay, SHOW_SYS}, -#endif {sys_interactive_timeout.name,(char*) &sys_interactive_timeout, SHOW_SYS}, {sys_join_buffer_size.name, (char*) &sys_join_buffer_size, SHOW_SYS}, {sys_key_buffer_size.name, (char*) &sys_key_buffer_size, SHOW_SYS}, @@ -757,7 +780,6 @@ struct show_var_st init_vars[]= { #ifdef __NT__ {"named_pipe", (char*) &opt_enable_named_pipe, SHOW_MY_BOOL}, #endif -#ifdef HAVE_NDBCLUSTER_DB {sys_ndb_autoincrement_prefetch_sz.name, (char*) &sys_ndb_autoincrement_prefetch_sz, SHOW_SYS}, {sys_ndb_cache_check_time.name,(char*) &sys_ndb_cache_check_time, SHOW_SYS}, @@ -767,7 +789,6 @@ struct show_var_st init_vars[]= { {sys_ndb_index_stat_update_freq.name, (char*) &sys_ndb_index_stat_update_freq, SHOW_SYS}, {sys_ndb_use_exact_count.name,(char*) &sys_ndb_use_exact_count, SHOW_SYS}, {sys_ndb_use_transactions.name,(char*) &sys_ndb_use_transactions, SHOW_SYS}, -#endif {sys_net_buffer_length.name,(char*) &sys_net_buffer_length, SHOW_SYS}, {sys_net_read_timeout.name, (char*) &sys_net_read_timeout, SHOW_SYS}, {sys_net_retry_count.name, (char*) &sys_net_retry_count, SHOW_SYS}, @@ -865,9 +886,6 @@ struct show_var_st init_vars[]= { {sys_updatable_views_with_limit.name, (char*) &sys_updatable_views_with_limit,SHOW_SYS}, {"version", server_version, SHOW_CHAR}, -#ifdef HAVE_BERKELEY_DB - {"version_bdb", (char*) DB_VERSION_STRING, SHOW_CHAR}, -#endif {"version_comment", (char*) MYSQL_COMPILATION_COMMENT, SHOW_CHAR}, {"version_compile_machine", (char*) MACHINE_TYPE, SHOW_CHAR}, {sys_os.name, (char*) &sys_os, SHOW_SYS}, diff --git a/sql/sp_head.cc b/sql/sp_head.cc index abc66ce0b21..fe3bac83eda 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -87,11 +87,11 @@ sp_get_flags_for_command(LEX *lex) case SQLCOM_SHOW_ERRORS: case SQLCOM_SHOW_FIELDS: case SQLCOM_SHOW_GRANTS: - case SQLCOM_SHOW_INNODB_STATUS: + case SQLCOM_SHOW_ENGINE_STATUS: + case SQLCOM_SHOW_ENGINE_LOGS: + case SQLCOM_SHOW_ENGINE_MUTEX: case SQLCOM_SHOW_KEYS: - case SQLCOM_SHOW_LOGS: case SQLCOM_SHOW_MASTER_STAT: - case SQLCOM_SHOW_MUTEX_STATUS: case SQLCOM_SHOW_NEW_MASTER: case SQLCOM_SHOW_OPEN_TABLES: case SQLCOM_SHOW_PRIVILEGES: diff --git a/sql/sql_class.cc b/sql/sql_class.cc index fc9df020b6c..de5be091968 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -287,7 +287,7 @@ void THD::init(void) variables.date_format); variables.datetime_format= date_time_format_copy((THD*) 0, variables.datetime_format); -#ifdef HAVE_NDBCLUSTER_DB +#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE variables.ndb_use_transactions= 1; #endif pthread_mutex_unlock(&LOCK_global_system_variables); @@ -902,7 +902,7 @@ bool select_send::send_data(List<Item> &items) return 0; } -#ifdef HAVE_INNOBASE_DB +#ifdef WITH_INNOBASE_STORAGE_ENGINE /* We may be passing the control from mysqld to the client: release the InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved @@ -938,7 +938,7 @@ bool select_send::send_data(List<Item> &items) bool select_send::send_eof() { -#ifdef HAVE_INNOBASE_DB +#ifdef WITH_INNOBASE_STORAGE_ENGINE /* We may be passing the control from mysqld to the client: release the InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved by thd */ diff --git a/sql/sql_class.h b/sql/sql_class.h index 2efcc620914..62e5620f609 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -563,11 +563,8 @@ struct system_variables ulong sync_replication_slave_id; ulong sync_replication_timeout; #endif /* HAVE_REPLICATION */ -#ifdef HAVE_INNOBASE_DB my_bool innodb_table_locks; my_bool innodb_support_xa; -#endif /* HAVE_INNOBASE_DB */ -#ifdef HAVE_NDBCLUSTER_DB ulong ndb_autoincrement_prefetch_sz; my_bool ndb_force_send; my_bool ndb_use_exact_count; @@ -575,7 +572,6 @@ struct system_variables my_bool ndb_index_stat_enable; ulong ndb_index_stat_cache_entries; ulong ndb_index_stat_update_freq; -#endif /* HAVE_NDBCLUSTER_DB */ my_bool old_alter_table; my_bool old_passwords; diff --git a/sql/sql_lex.h b/sql/sql_lex.h index f164bb6809b..b82ac754e26 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -53,8 +53,8 @@ enum enum_sql_command { SQLCOM_DELETE, SQLCOM_TRUNCATE, SQLCOM_DROP_TABLE, SQLCOM_DROP_INDEX, SQLCOM_SHOW_DATABASES, SQLCOM_SHOW_TABLES, SQLCOM_SHOW_FIELDS, - SQLCOM_SHOW_KEYS, SQLCOM_SHOW_VARIABLES, SQLCOM_SHOW_LOGS, SQLCOM_SHOW_STATUS, - SQLCOM_SHOW_INNODB_STATUS, SQLCOM_SHOW_NDBCLUSTER_STATUS, SQLCOM_SHOW_MUTEX_STATUS, + SQLCOM_SHOW_KEYS, SQLCOM_SHOW_VARIABLES, SQLCOM_SHOW_STATUS, + SQLCOM_SHOW_ENGINE_LOGS, SQLCOM_SHOW_ENGINE_STATUS, SQLCOM_SHOW_ENGINE_MUTEX, SQLCOM_SHOW_PROCESSLIST, SQLCOM_SHOW_MASTER_STAT, SQLCOM_SHOW_SLAVE_STAT, SQLCOM_SHOW_GRANTS, SQLCOM_SHOW_CREATE, SQLCOM_SHOW_CHARSETS, SQLCOM_SHOW_COLLATIONS, SQLCOM_SHOW_CREATE_DB, SQLCOM_SHOW_TABLE_STATUS, diff --git a/sql/sql_manager.cc b/sql/sql_manager.cc index 1d3acd1696c..f580bcb16d9 100644 --- a/sql/sql_manager.cc +++ b/sql/sql_manager.cc @@ -32,12 +32,43 @@ pthread_t manager_thread; pthread_mutex_t LOCK_manager; pthread_cond_t COND_manager; +struct handler_cb { + struct handler_cb *next; + void (*action)(void); +}; + +static struct handler_cb * volatile cb_list; + +bool mysql_manager_submit(void (*action)()) +{ + bool result= FALSE; + struct handler_cb * volatile *cb; + pthread_mutex_lock(&LOCK_manager); + cb= &cb_list; + while (*cb && (*cb)->action != action) + cb= &(*cb)->next; + if (!*cb) + { + *cb= (struct handler_cb *)my_malloc(sizeof(struct handler_cb), MYF(MY_WME)); + if (!*cb) + result= TRUE; + else + { + (*cb)->next= NULL; + (*cb)->action= action; + } + } + pthread_mutex_unlock(&LOCK_manager); + return result; +} + pthread_handler_t handle_manager(void *arg __attribute__((unused))) { int error = 0; ulong status; struct timespec abstime; bool reset_flush_time = TRUE; + struct handler_cb *cb= NULL; my_thread_init(); DBUG_ENTER("handle_manager"); @@ -68,6 +99,11 @@ pthread_handler_t handle_manager(void *arg __attribute__((unused))) } status = manager_status; manager_status = 0; + if (cb == NULL) + { + cb= cb_list; + cb_list= NULL; + } pthread_mutex_unlock(&LOCK_manager); if (abort_loop) @@ -80,13 +116,13 @@ pthread_handler_t handle_manager(void *arg __attribute__((unused))) reset_flush_time = TRUE; } -#ifdef HAVE_BERKELEY_DB - if (status & MANAGER_BERKELEY_LOG_CLEANUP) + while (cb) { - berkeley_cleanup_log_files(); - status &= ~MANAGER_BERKELEY_LOG_CLEANUP; + struct handler_cb *next= cb->next; + cb->action(); + my_free((gptr)cb, MYF(0)); + cb= next; } -#endif if (status) DBUG_PRINT("error", ("manager did not handle something: %lx", status)); diff --git a/sql/sql_manager.h b/sql/sql_manager.h index 35704705820..d42deb8ff81 100644 --- a/sql/sql_manager.h +++ b/sql/sql_manager.h @@ -14,6 +14,6 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#ifdef HAVE_BERKELEY_DB +#ifdef WITH_BERKELEY_STORAGE_ENGINE void berkeley_cleanup_log_files(void); -#endif /* HAVE_BERKELEY_DB */ +#endif /* WITH_BERKELEY_STORAGE_ENGINE */ diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 4c5f0018bb5..9b3b7ffad69 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -22,14 +22,6 @@ #include <myisam.h> #include <my_dir.h> -#ifdef HAVE_INNOBASE_DB -#include "ha_innodb.h" -#endif - -#ifdef HAVE_NDBCLUSTER_DB -#include "ha_ndbcluster.h" -#endif - #include "sp_head.h" #include "sp.h" #include "sp_cache.h" @@ -1767,8 +1759,9 @@ bool dispatch_command(enum enum_server_command command, THD *thd, TABLE_LIST table_list; LEX_STRING conv_name; /* Saved variable value */ - my_bool old_innodb_table_locks= - IF_INNOBASE_DB(thd->variables.innodb_table_locks, FALSE); + my_bool old_innodb_table_locks= thd->variables.innodb_table_locks; + + /* used as fields initializator */ lex_start(thd, 0, 0); @@ -2685,29 +2678,20 @@ mysql_execute_command(THD *thd) res = load_master_data(thd); break; #endif /* HAVE_REPLICATION */ -#ifdef HAVE_NDBCLUSTER_DB - case SQLCOM_SHOW_NDBCLUSTER_STATUS: - { - res = ndbcluster_show_status(thd); - break; - } -#endif -#ifdef HAVE_INNOBASE_DB - case SQLCOM_SHOW_INNODB_STATUS: + case SQLCOM_SHOW_ENGINE_STATUS: { if (check_global_access(thd, SUPER_ACL)) - goto error; - res = innodb_show_status(thd); + goto error; + res = ha_show_status(thd, lex->create_info.db_type, HA_ENGINE_STATUS); break; } - case SQLCOM_SHOW_MUTEX_STATUS: + case SQLCOM_SHOW_ENGINE_MUTEX: { if (check_global_access(thd, SUPER_ACL)) goto error; - res = innodb_mutex_show_status(thd); + res = ha_show_status(thd, lex->create_info.db_type, HA_ENGINE_MUTEX); break; } -#endif #ifdef HAVE_REPLICATION case SQLCOM_LOAD_MASTER_TABLE: { @@ -3431,7 +3415,7 @@ end_with_restore_list: case SQLCOM_SHOW_COLUMN_TYPES: res= mysqld_show_column_types(thd); break; - case SQLCOM_SHOW_LOGS: + case SQLCOM_SHOW_ENGINE_LOGS: #ifdef DONT_ALLOW_SHOW_COMMANDS my_message(ER_NOT_ALLOWED_COMMAND, ER(ER_NOT_ALLOWED_COMMAND), MYF(0)); /* purecov: inspected */ @@ -3440,7 +3424,7 @@ end_with_restore_list: { if (grant_option && check_access(thd, FILE_ACL, any_db,0,0,0,0)) goto error; - res= mysqld_show_logs(thd); + res= ha_show_status(thd, lex->create_info.db_type, HA_ENGINE_LOGS); break; } #endif diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index ce73061271c..d34c509270c 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -38,7 +38,7 @@ #include <m_ctype.h> #include "md5.h" -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE /* Partition related functions declarations and some static constants; */ @@ -101,8 +101,8 @@ uint32 get_partition_id_linear_key_sub(partition_info *part_info); TRUE Yes, it is part of a management partition command FALSE No, not a management partition command DESCRIPTION - This needs to be outside of HAVE_PARTITION_DB since it is used from the - sql parser that doesn't have any #ifdef's + This needs to be outside of WITH_PARTITION_STORAGE_ENGINE since it is + used from the sql parser that doesn't have any #ifdef's */ my_bool is_partition_management(LEX *lex) @@ -112,7 +112,7 @@ my_bool is_partition_management(LEX *lex) lex->alter_info.flags == ALTER_REORGANISE_PARTITION)); } -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE /* A support function to check if a partition name is in a list of strings SYNOPSIS diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 6e6961080cd..fd03913a715 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -1726,7 +1726,9 @@ static bool check_prepared_statement(Prepared_statement *stmt, case SQLCOM_SHOW_COLUMN_TYPES: case SQLCOM_SHOW_STATUS: case SQLCOM_SHOW_VARIABLES: - case SQLCOM_SHOW_LOGS: + case SQLCOM_SHOW_ENGINE_LOGS: + case SQLCOM_SHOW_ENGINE_STATUS: + case SQLCOM_SHOW_ENGINE_MUTEX: case SQLCOM_SHOW_TABLES: case SQLCOM_SHOW_OPEN_TABLES: case SQLCOM_SHOW_CHARSETS: diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 568a0a56eda..2548e77449b 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -25,9 +25,6 @@ #include "sql_trigger.h" #include <my_dir.h> -#ifdef HAVE_BERKELEY_DB -#include "ha_berkeley.h" // For berkeley_show_logs -#endif static const char *grant_names[]={ "select","insert","update","delete","create","drop","reload","shutdown", @@ -512,29 +509,6 @@ bool mysqld_show_create_db(THD *thd, char *dbname, DBUG_RETURN(FALSE); } -bool -mysqld_show_logs(THD *thd) -{ - List<Item> field_list; - Protocol *protocol= thd->protocol; - DBUG_ENTER("mysqld_show_logs"); - - field_list.push_back(new Item_empty_string("File",FN_REFLEN)); - field_list.push_back(new Item_empty_string("Type",10)); - field_list.push_back(new Item_empty_string("Status",10)); - - if (protocol->send_fields(&field_list, - Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) - DBUG_RETURN(TRUE); - -#ifdef HAVE_BERKELEY_DB - if ((have_berkeley_db == SHOW_OPTION_YES) && berkeley_show_logs(protocol)) - DBUG_RETURN(TRUE); -#endif - - send_eof(thd); - DBUG_RETURN(FALSE); -} /**************************************************************************** @@ -974,7 +948,7 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet) packet->append(" TYPE=", 6); else packet->append(" ENGINE=", 8); -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE if (table->s->part_info) packet->append(ha_get_storage_engine( table->s->part_info->default_engine_type)); @@ -1054,7 +1028,7 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet) append_directory(thd, packet, "DATA", create_info.data_file_name); append_directory(thd, packet, "INDEX", create_info.index_file_name); } -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE { /* Partition syntax for CREATE TABLE is at the end of the syntax. diff --git a/sql/sql_table.cc b/sql/sql_table.cc index dacc614cd80..94590f6faf2 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -17,9 +17,6 @@ /* drop and alter of tables */ #include "mysql_priv.h" -#ifdef HAVE_BERKELEY_DB -#include "ha_berkeley.h" -#endif #include <hash.h> #include <myisam.h> #include <my_dir.h> @@ -1607,7 +1604,7 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name, my_error(ER_OUTOFMEMORY, MYF(0), 128);//128 bytes invented DBUG_RETURN(TRUE); } -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE partition_info *part_info= thd->lex->part_info; if (part_info) { @@ -3392,7 +3389,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, uint db_create_options, used_fields; enum db_type old_db_type,new_db_type; uint need_copy_table= 0; -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE bool online_add_empty_partition= FALSE; bool online_drop_partition= FALSE; bool partition_changed= FALSE; @@ -3474,7 +3471,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, if (create_info->db_type == DB_TYPE_DEFAULT) create_info->db_type= old_db_type; -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE /* We need to handle both partition management command such as Add Partition and others here as well as an ALTER TABLE that completely changes the @@ -4251,7 +4248,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, set_table_default_charset(thd, create_info, db); -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE if (thd->variables.old_alter_table || partition_changed) #else if (thd->variables.old_alter_table) @@ -4270,7 +4267,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, if (!need_copy_table) create_info->frm_only= 1; -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE if (partition_changed) { if (online_drop_partition) @@ -4626,12 +4623,11 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, write_bin_log(thd, TRUE); VOID(pthread_cond_broadcast(&COND_refresh)); VOID(pthread_mutex_unlock(&LOCK_open)); -#ifdef HAVE_BERKELEY_DB /* TODO RONM: This problem needs to handled for Berkeley DB partitions as well */ - if (old_db_type == DB_TYPE_BERKELEY_DB) + if (ha_check_storage_engine_flag(old_db_type,HTON_FLUSH_AFTER_RENAME)) { /* For the alter table to be properly flushed to the logs, we @@ -4647,11 +4643,10 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, my_free((char*) table, MYF(0)); } else - sql_print_warning("Could not open BDB table %s.%s after rename\n", + sql_print_warning("Could not open table %s.%s after rename\n", new_db,table_name); - (void) berkeley_flush_logs(); + ha_flush_logs(old_db_type); } -#endif table_list->table=0; // For query cache query_cache_invalidate3(thd, table_list, 0); diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 41961b66f0f..f6834117d24 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -291,7 +291,7 @@ int mysql_update(THD *thd, used_key_is_modified= check_if_key_used(table, used_index, fields); } -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE if (used_key_is_modified || order || partition_key_modified(table, fields)) #else diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 11f8192809f..7d670d8988c 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -3255,7 +3255,9 @@ storage_engines: ident_or_text { $$ = ha_resolve_by_name($1.str,$1.length); - if ($$ == DB_TYPE_UNKNOWN) { + if ($$ == DB_TYPE_UNKNOWN && + test(YYTHD->variables.sql_mode & MODE_NO_ENGINE_SUBSTITUTION)) + { my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), $1.str); YYABORT; } @@ -7110,6 +7112,9 @@ show_param: | ENGINE_SYM storage_engines { Lex->create_info.db_type= $2; } show_engine_param + | ENGINE_SYM ALL + { Lex->create_info.db_type= DB_TYPE_DEFAULT; } + show_engine_param | opt_full COLUMNS from_or_in table_ident opt_db wild_and_where { LEX *lex= Lex; @@ -7192,9 +7197,19 @@ show_param: YYABORT; } | INNOBASE_SYM STATUS_SYM - { Lex->sql_command = SQLCOM_SHOW_INNODB_STATUS; WARN_DEPRECATED("SHOW INNODB STATUS", "SHOW ENGINE INNODB STATUS"); } + { + LEX *lex= Lex; + lex->sql_command = SQLCOM_SHOW_ENGINE_STATUS; + lex->create_info.db_type= DB_TYPE_INNODB; + WARN_DEPRECATED("SHOW INNODB STATUS", "SHOW ENGINE INNODB STATUS"); + } | MUTEX_SYM STATUS_SYM - { Lex->sql_command = SQLCOM_SHOW_MUTEX_STATUS; } + { + LEX *lex= Lex; + lex->sql_command = SQLCOM_SHOW_ENGINE_MUTEX; + lex->create_info.db_type= DB_TYPE_INNODB; + WARN_DEPRECATED("SHOW MUTEX STATUS", "SHOW ENGINE INNODB MUTEX"); + } | opt_full PROCESSLIST_SYM { Lex->sql_command= SQLCOM_SHOW_PROCESSLIST;} | opt_var_type VARIABLES wild_and_where @@ -7223,9 +7238,19 @@ show_param: YYABORT; } | BERKELEY_DB_SYM LOGS_SYM - { Lex->sql_command= SQLCOM_SHOW_LOGS; WARN_DEPRECATED("SHOW BDB LOGS", "SHOW ENGINE BDB LOGS"); } + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SHOW_ENGINE_LOGS; + lex->create_info.db_type= DB_TYPE_BERKELEY_DB; + WARN_DEPRECATED("SHOW BDB LOGS", "SHOW ENGINE BDB LOGS"); + } | LOGS_SYM - { Lex->sql_command= SQLCOM_SHOW_LOGS; WARN_DEPRECATED("SHOW LOGS", "SHOW ENGINE BDB LOGS"); } + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SHOW_ENGINE_LOGS; + lex->create_info.db_type= DB_TYPE_BERKELEY_DB; + WARN_DEPRECATED("SHOW LOGS", "SHOW ENGINE BDB LOGS"); + } | GRANTS { LEX *lex=Lex; @@ -7324,30 +7349,11 @@ show_param: show_engine_param: STATUS_SYM - { - switch (Lex->create_info.db_type) { - case DB_TYPE_NDBCLUSTER: - Lex->sql_command = SQLCOM_SHOW_NDBCLUSTER_STATUS; - break; - case DB_TYPE_INNODB: - Lex->sql_command = SQLCOM_SHOW_INNODB_STATUS; - break; - default: - my_error(ER_NOT_SUPPORTED_YET, MYF(0), "STATUS"); - YYABORT; - } - } + { Lex->sql_command= SQLCOM_SHOW_ENGINE_STATUS; } + | MUTEX_SYM + { Lex->sql_command= SQLCOM_SHOW_ENGINE_MUTEX; } | LOGS_SYM - { - switch (Lex->create_info.db_type) { - case DB_TYPE_BERKELEY_DB: - Lex->sql_command = SQLCOM_SHOW_LOGS; - break; - default: - my_error(ER_NOT_SUPPORTED_YET, MYF(0), "LOGS"); - YYABORT; - } - }; + { Lex->sql_command= SQLCOM_SHOW_ENGINE_LOGS; }; master_or_binary: MASTER_SYM diff --git a/sql/table.cc b/sql/table.cc index 39792b405c6..ab80fd1f7fb 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -341,7 +341,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, str_db_type_length, next_chunk + 2, share->db_type)); } -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE else { if (!strncmp(next_chunk + 2, "partition", str_db_type_length)) @@ -361,7 +361,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, part_info_len= uint4korr(next_chunk); if (part_info_len > 0) { -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE if (mysql_unpack_partition(thd, (uchar *)(next_chunk + 4), part_info_len, outparam, default_part_db_type)) @@ -974,7 +974,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, /* Fix the partition functions and ensure they are not constant functions*/ if (part_info_len > 0) -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE if (fix_partition_func(thd,name,outparam)) #endif goto err; @@ -1044,7 +1044,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, if (! error_reported) frm_error(error,outparam,name,ME_ERROR+ME_WAITTANG, errarg); delete outparam->file; -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE if (outparam->s->part_info) { free_items(outparam->s->part_info->item_free_list); @@ -1088,7 +1088,7 @@ int closefrm(register TABLE *table) table->field= 0; } delete table->file; -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE if (table->s->part_info) { free_items(table->s->part_info->item_free_list); diff --git a/sql/table.h b/sql/table.h index 40c01a1c051..401144e4687 100644 --- a/sql/table.h +++ b/sql/table.h @@ -107,7 +107,7 @@ class Table_triggers_list; typedef struct st_table_share { -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE partition_info *part_info; /* Partition related information */ #endif /* hash of field names (contains pointers to elements of field array) */ diff --git a/sql/unireg.cc b/sql/unireg.cc index 32f533a6072..a91be308cce 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -85,11 +85,11 @@ bool mysql_create_frm(THD *thd, my_string file_name, TYPELIB formnames; uchar *screen_buff; char buff[5]; -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE partition_info *part_info= thd->lex->part_info; #endif DBUG_ENTER("mysql_create_frm"); -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE thd->lex->part_info= NULL; #endif @@ -132,7 +132,7 @@ bool mysql_create_frm(THD *thd, my_string file_name, 2 + create_info->connect_string.length); /* Partition */ create_info->extra_size+= 5; -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE if (part_info) create_info->extra_size+= part_info->part_info_len; #endif @@ -166,7 +166,7 @@ bool mysql_create_frm(THD *thd, my_string file_name, 60); forminfo[46]=(uchar) strlen((char*)forminfo+47); // Length of comment -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE if (part_info) fileinfo[61]= (uchar) part_info->default_engine_type; #endif @@ -194,7 +194,7 @@ bool mysql_create_frm(THD *thd, my_string file_name, str_db_type.length, MYF(MY_NABP))) goto err; -#ifdef HAVE_PARTITION_DB +#ifdef WITH_PARTITION_STORAGE_ENGINE if (part_info) { int4store(buff, part_info->part_info_len); |