diff options
author | unknown <acurtis@poseidon.ndb.mysql.com> | 2005-11-07 16:25:06 +0100 |
---|---|---|
committer | unknown <acurtis@poseidon.ndb.mysql.com> | 2005-11-07 16:25:06 +0100 |
commit | 6b3a9caef9b8cf42f8a706f778bba72db89cdd2b (patch) | |
tree | 2b8ffe29d899326e4ad244ac3f67d4fcf29bfae6 /sql/ha_ndbcluster.cc | |
parent | df33aacd87ff08c27fd371a0bb348fe3986e6f95 (diff) | |
download | mariadb-git-6b3a9caef9b8cf42f8a706f778bba72db89cdd2b.tar.gz |
Make storage engines "pluggable", handlerton work
Makefile.am:
Changes to autoconf subst
config/ac-macros/ha_berkeley.m4:
simplify
config/ac-macros/ha_ndbcluster.m4:
simplify
config/ac-macros/ha_partition.m4:
simplify
configure.in:
strip configure of storage engine specific cruft and simplify
extra/Makefile.am:
changes to autoconf/automake subst
libmysqld/Makefile.am:
only compile storage engines if required.
make find object file a little smarter
libmysqld/examples/Makefile.am:
changes to autoconf subst
mysql-test/Makefile.am:
remove storage engine specific cruft
mysql-test/r/ps_1general.result:
cannot gaurantee order of results from 'show storage engines'
mysql-test/r/show_check.result:
fix test - frm file fails to be deleted if it is invalid
mysql-test/r/sql_mode.result:
isam does not exist, test may need to be redone/fixed in 5.0
mysql-test/r/warnings.result:
isam no longer exists
mysql-test/t/ps_1general.test:
cannot gaurantee order of results from 'show storage engines'
mysql-test/t/show_check.test:
fix test - frm file fails to be deleted if it is invalid
mysql-test/t/sql_mode.test:
isam does not exist, test may need to be redone/fixed in 5.0
mysql-test/t/system_mysql_db_fix.test:
change isam to myisam
mysql-test/t/view.test:
change isam to myisam
mysql-test/t/warnings.test:
isam no longer exists
sql/Makefile.am:
Make storage engines "pluggable" stage 1
only compile storage engines if included
sql/examples/ha_example.cc:
handlerton work
sql/examples/ha_example.h:
handlerton work
sql/examples/ha_tina.cc:
handlerton work
sql/examples/ha_tina.h:
handlerton work
sql/ha_archive.cc:
handlerton work
sql/ha_archive.h:
handlerton work
sql/ha_berkeley.cc:
handlerton work
sql/ha_berkeley.h:
handlerton work
sql/ha_blackhole.cc:
handlerton work
sql/ha_federated.cc:
handlerton work
sql/ha_federated.h:
handlerton work
sql/ha_heap.cc:
handlerton work
sql/ha_innodb.cc:
handlerton work
sql/ha_innodb.h:
handlerton work
sql/ha_myisam.cc:
handlerton work
sql/ha_myisammrg.cc:
handlerton work
sql/ha_ndbcluster.cc:
handlerton work
sql/ha_ndbcluster.h:
handlerton work
sql/ha_partition.cc:
handlerton work
sql/handler.cc:
start removing storage engine specific cruft
sql/handler.h:
start removing storage engine specific cruft
db_type for binlog handlerton
handlerton flag for not-user-selectable storage engines
sql/lex.h:
start removing storage engine specific cruft
sql/log.cc:
handlerton work
give binlog handlerton a 'real' db_type
sql/mysql_priv.h:
start removing storage engine specific cruft
sql/mysqld.cc:
start removing storage engine specific cruft
sql/set_var.cc:
start removing storage engine specific cruft
sql/sp_head.cc:
start removing storage engine specific cruft
sql/sql_class.cc:
start removing storage engine specific cruft
sql/sql_class.h:
start removing storage engine specific cruft
sql/sql_lex.h:
start removing storage engine specific cruft
sql/sql_manager.cc:
start removing storage engine specific cruft
sql/sql_manager.h:
start removing storage engine specific cruft
sql/sql_parse.cc:
start removing storage engine specific cruft
sql/sql_partition.cc:
start removing storage engine specific cruft
sql/sql_prepare.cc:
start removing storage engine specific cruft
sql/sql_show.cc:
start removing storage engine specific cruft
sql/sql_table.cc:
changed define from HAVE_PARTITION_DB to WITH_PARTITION_STORAGE_ENGINE
start removing storage engine specific cruft
sql/sql_update.cc:
changed define from HAVE_PARTITION_DB to WITH_PARTITION_STORAGE_ENGINE
sql/sql_yacc.yy:
start removing storage engine specific cruft
test if we should throw error
sql/table.cc:
changed define from HAVE_PARTITION_DB to WITH_PARTITION_STORAGE_ENGINE
sql/table.h:
changed define from HAVE_PARTITION_DB to WITH_PARTITION_STORAGE_ENGINE
sql/unireg.cc:
changed define from HAVE_PARTITION_DB to WITH_PARTITION_STORAGE_ENGINE
storage/ndb/include/kernel/kernel_types.h:
added my_config.h
storage/ndb/include/ndb_global.h.in:
added my_config.h
storage/ndb/include/ndb_types.h.in:
added my_config.h
config/ac-macros/storage.m4:
New BitKeeper file ``config/ac-macros/storage.m4''
sql/handlerton.cc.in:
New BitKeeper file ``sql/handlerton.cc.in''
Diffstat (limited to 'sql/ha_ndbcluster.cc')
-rw-r--r-- | sql/ha_ndbcluster.cc | 78 |
1 files changed, 45 insertions, 33 deletions
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index fc31f4854ab..c7b168ee9fd 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -26,7 +26,6 @@ #include "mysql_priv.h" -#ifdef HAVE_NDBCLUSTER_DB #include <my_dir.h> #include "ha_ndbcluster.h" #include <ndbapi/NdbApi.hpp> @@ -35,9 +34,14 @@ // options from from mysqld.cc extern my_bool opt_ndb_optimized_node_selection; -extern enum ndb_distribution opt_ndb_distribution_id; extern const char *opt_ndbcluster_connectstring; +const char *ndb_distribution_names[]= {"KEYHASH", "LINHASH", NullS}; +TYPELIB ndb_distribution_typelib= { array_elements(ndb_distribution_names)-1, + "", ndb_distribution_names, NULL }; +const char *opt_ndb_distribution= ndb_distribution_names[ND_KEYHASH]; +enum ndb_distribution opt_ndb_distribution_id= ND_KEYHASH; + // Default value for parallelism static const int parallelism= 0; @@ -51,6 +55,7 @@ static const char share_prefix[]= "./"; static int ndbcluster_close_connection(THD *thd); static int ndbcluster_commit(THD *thd, bool all); static int ndbcluster_rollback(THD *thd, bool all); +static handler* ndbcluster_create_handler(TABLE *table); handlerton ndbcluster_hton = { "ndbcluster", @@ -73,9 +78,23 @@ handlerton ndbcluster_hton = { NULL, /* create_cursor_read_view */ NULL, /* set_cursor_read_view */ NULL, /* close_cursor_read_view */ + ndbcluster_create_handler, /* Create a new handler */ + ndbcluster_drop_database, /* Drop a database */ + ndbcluster_end, /* Panic call */ + NULL, /* Release temporary latches */ + NULL, /* Update Statistics */ + NULL, /* Start Consistent Snapshot */ + NULL, /* Flush logs */ + ndbcluster_show_status, /* Show status */ + NULL, /* Replication Report Sent Binlog */ HTON_NO_FLAGS }; +static handler *ndbcluster_create_handler(TABLE *table) +{ + return new ha_ndbcluster(table); +} + #define NDB_HIDDEN_PRIMARY_KEY_LENGTH 8 #define NDB_FAILED_AUTO_INCREMENT ~(Uint64)0 @@ -4629,9 +4648,10 @@ extern "C" byte* tables_get_key(const char *entry, uint *length, /* Drop a database in NDB Cluster - */ + NOTE add a dummy void function, since stupid handlerton is returning void instead of int... +*/ -int ndbcluster_drop_database(const char *path) +int ndbcluster_drop_database_impl(const char *path) { DBUG_ENTER("ndbcluster_drop_database"); THD *thd= current_thd; @@ -4646,13 +4666,13 @@ int ndbcluster_drop_database(const char *path) DBUG_PRINT("enter", ("db: %s", dbname)); if (!(ndb= check_ndb_in_thd(thd))) - DBUG_RETURN(HA_ERR_NO_CONNECTION); + DBUG_RETURN(-1); // List tables in NDB NDBDICT *dict= ndb->getDictionary(); if (dict->listObjects(list, NdbDictionary::Object::UserTable) != 0) - ERR_RETURN(dict->getNdbError()); + DBUG_RETURN(-1); for (i= 0 ; i < list.count ; i++) { NdbDictionary::Dictionary::List::Element& elmt= list.elements[i]; @@ -4685,6 +4705,10 @@ int ndbcluster_drop_database(const char *path) DBUG_RETURN(ret); } +void ndbcluster_drop_database(char *path) +{ + ndbcluster_drop_database_impl(path); +} /* find all tables in ndb and discover those needed */ @@ -5057,7 +5081,7 @@ ndbcluster_init_error: ndbcluster_init() */ -bool ndbcluster_end() +int ndbcluster_end(ha_panic_function type) { DBUG_ENTER("ndbcluster_end"); @@ -7941,44 +7965,33 @@ ha_ndbcluster::generate_scan_filter(Ndb_cond_stack *ndb_cond_stack, /* Implements the SHOW NDB STATUS command. */ -int -ndbcluster_show_status(THD* thd) +bool +ndbcluster_show_status(THD* thd, stat_print_fn *stat_print, + enum ha_stat_type stat_type) { - Protocol *protocol= thd->protocol; - + char buf[IO_SIZE]; DBUG_ENTER("ndbcluster_show_status"); if (have_ndbcluster != SHOW_OPTION_YES) { - my_message(ER_NOT_SUPPORTED_YET, - "Cannot call SHOW NDBCLUSTER STATUS because skip-ndbcluster is defined", - MYF(0)); - DBUG_RETURN(TRUE); + DBUG_RETURN(FALSE); + } + if (stat_type != HA_ENGINE_STATUS) + { + DBUG_RETURN(FALSE); } - List<Item> field_list; - field_list.push_back(new Item_empty_string("free_list", 255)); - field_list.push_back(new Item_return_int("created", 10,MYSQL_TYPE_LONG)); - field_list.push_back(new Item_return_int("free", 10,MYSQL_TYPE_LONG)); - field_list.push_back(new Item_return_int("sizeof", 10,MYSQL_TYPE_LONG)); - - if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) - DBUG_RETURN(TRUE); - if (get_thd_ndb(thd) && get_thd_ndb(thd)->ndb) { Ndb* ndb= (get_thd_ndb(thd))->ndb; Ndb::Free_list_usage tmp; tmp.m_name= 0; while (ndb->get_free_list_usage(&tmp)) { - protocol->prepare_for_resend(); - - protocol->store(tmp.m_name, &my_charset_bin); - protocol->store((uint)tmp.m_created); - protocol->store((uint)tmp.m_free); - protocol->store((uint)tmp.m_sizeof); - if (protocol->write()) - DBUG_RETURN(TRUE); + my_snprintf(buf, sizeof(buf), + "created=%u, free=%u, sizeof=%u", + tmp.m_created, tmp.m_free, tmp.m_sizeof); + if (stat_print(thd, ndbcluster_hton.name, tmp.m_name, buf)) + DBUG_RETURN(TRUE); } } send_eof(thd); @@ -8192,4 +8205,3 @@ bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *info, return COMPATIBLE_DATA_YES; } -#endif /* HAVE_NDBCLUSTER_DB */ |